CombinedText stringlengths 4 3.42M |
|---|
package kite
import (
"github.com/streadway/amqp"
"io"
"koding/tools/log"
"sync"
)
type connection struct {
messageChannel *amqp.Channel
messageStream <-chan amqp.Delivery
publishChannel *amqp.Channel
replyExchange string
bufferedMessage []byte
closed bool
closers []io.Closer
closeMutex sync.Mutex
}
func newConnection(queue, replyExchange string, consumeConn, publishConn *amqp.Connection) *connection {
messageChannel := createChannel(consumeConn)
messageStream, err := messageChannel.Consume(queue, "", true, false, false, false, nil)
if err != nil {
panic(err)
}
return &connection{
messageChannel: messageChannel,
messageStream: messageStream,
publishChannel: createChannel(publishConn),
replyExchange: replyExchange,
bufferedMessage: make([]byte, 0),
closers: make([]io.Closer, 0),
}
}
func (conn *connection) Read(p []byte) (int, error) {
if len(conn.bufferedMessage) == 0 {
message, ok := <-conn.messageStream
if !ok || message.RoutingKey == "disconnected" {
return 0, io.EOF
}
conn.bufferedMessage = message.Body
log.Debug("Read", message.Body)
}
n := copy(p, conn.bufferedMessage)
conn.bufferedMessage = conn.bufferedMessage[n:]
return n, nil
}
func (conn *connection) Write(p []byte) (int, error) {
conn.closeMutex.Lock()
defer conn.closeMutex.Unlock()
if conn.closed {
return 0, nil
}
log.Debug("Write", p)
err := conn.publishChannel.Publish(conn.replyExchange, "reply-client-message", false, false, amqp.Publishing{Body: p})
if err != nil {
panic(err)
}
return len(p), nil
}
func (conn *connection) Close() error {
conn.closeMutex.Lock()
defer conn.closeMutex.Unlock()
conn.closed = true
for _, closer := range conn.closers {
closer.Close()
}
conn.messageChannel.Close()
conn.publishChannel.Close()
return nil
}
func (conn *connection) notifyClose(closer io.Closer) {
conn.closers = append(conn.closers, closer)
}
go: Workaround for bug in amqp library that causes server deadlock.
package kite
import (
"github.com/streadway/amqp"
"io"
"koding/tools/log"
"sync"
)
type connection struct {
messageChannel *amqp.Channel
messageStream <-chan amqp.Delivery
publishChannel *amqp.Channel
replyExchange string
bufferedMessage []byte
closed bool
closers []io.Closer
closeMutex sync.Mutex
}
func newConnection(queue, replyExchange string, consumeConn, publishConn *amqp.Connection) *connection {
messageChannel := createChannel(consumeConn)
messageStream, err := messageChannel.Consume(queue, "", true, false, false, false, nil)
if err != nil {
panic(err)
}
return &connection{
messageChannel: messageChannel,
messageStream: messageStream,
publishChannel: createChannel(publishConn),
replyExchange: replyExchange,
bufferedMessage: make([]byte, 0),
closers: make([]io.Closer, 0),
}
}
func (conn *connection) Read(p []byte) (int, error) {
if len(conn.bufferedMessage) == 0 {
message, ok := <-conn.messageStream
if !ok || message.RoutingKey == "disconnected" {
return 0, io.EOF
}
conn.bufferedMessage = message.Body
log.Debug("Read", message.Body)
}
n := copy(p, conn.bufferedMessage)
conn.bufferedMessage = conn.bufferedMessage[n:]
return n, nil
}
func (conn *connection) Write(p []byte) (int, error) {
conn.closeMutex.Lock()
defer conn.closeMutex.Unlock()
if conn.closed {
return 0, nil
}
log.Debug("Write", p)
err := conn.publishChannel.Publish(conn.replyExchange, "reply-client-message", false, false, amqp.Publishing{Body: p})
if err != nil {
panic(err)
}
return len(p), nil
}
func (conn *connection) Close() error {
conn.closeMutex.Lock()
defer conn.closeMutex.Unlock()
conn.closed = true
go func() {
for _ = range conn.messageStream { // workaround: consume all remaining messages
}
}()
for _, closer := range conn.closers {
closer.Close()
}
conn.messageChannel.Close()
conn.publishChannel.Close()
return nil
}
func (conn *connection) notifyClose(closer io.Closer) {
conn.closers = append(conn.closers, closer)
}
|
package python
import (
"encoding/json"
"log"
"os"
"os/exec"
"path/filepath"
"github.com/kr/fs"
"sourcegraph.com/sourcegraph/srclib/toolchain"
"sourcegraph.com/sourcegraph/srclib/unit"
)
// Scan a directory, listing all source units
func Scan(srcdir string, repoURI string, repoSubdir string) ([]*unit.SourceUnit, error) {
if units, isSpecial := specialUnits[repoURI]; isSpecial {
return units, nil
}
cmd := exec.Command("pydep-run.py", "list", srcdir)
cmd.Stderr = os.Stderr
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
if err := cmd.Start(); err != nil {
return nil, err
}
var pkgs []*pkgInfo
if err := json.NewDecoder(stdout).Decode(&pkgs); err != nil {
return nil, err
}
if err := cmd.Wait(); err != nil {
return nil, err
}
// Keep track of all files that have been successfully discovered
discoveredScripts := make(map[string]bool)
units := make([]*unit.SourceUnit, len(pkgs))
for i, pkg := range pkgs {
units[i] = pkg.SourceUnit()
units[i].Files = pythonSourceFiles(pkg.RootDir, discoveredScripts)
reqs, err := requirements(pkg.RootDir)
if err != nil {
return nil, err
}
reqs_ := make([]interface{}, len(reqs))
for i, req := range reqs {
reqs_[i] = req
}
units[i].Dependencies = reqs_
}
// Scan for independant scripts, appending to the current set of source units
scripts := pythonSourceFiles(srcdir, discoveredScripts)
if len(scripts) > 0 {
scriptsUnit := unit.SourceUnit {
Name: "PythonProgram",
Type: "PythonProgram",
Files: scripts,
Dir: ".",
Ops: map[string]*toolchain.ToolRef{"depresolve": nil, "graph": nil},
}
reqs, err := requirements(srcdir)
if err == nil {
reqs_ := make([]interface{}, len(reqs))
for i, req := range reqs {
reqs_[i] = req
}
scriptsUnit.Dependencies = reqs_
}
units = append(units, &scriptsUnit)
}
return units, nil
}
func requirements(unitDir string) ([]*requirement, error) {
depCmd := exec.Command("pydep-run.py", "dep", unitDir)
depCmd.Stderr = os.Stderr
b, err := depCmd.Output()
if err != nil {
return nil, err
}
var reqs []*requirement
err = json.Unmarshal(b, &reqs)
if err != nil {
return nil, err
}
reqs, ignoredReqs := pruneReqs(reqs)
if len(ignoredReqs) > 0 {
ignoredKeys := make([]string, len(ignoredReqs))
for r, req := range ignoredReqs {
ignoredKeys[r] = req.Key
}
log.Printf("(warn) ignoring dependencies %v because repo URL absent", ignoredKeys)
}
return reqs, nil
}
// Get all python source files under dir
func pythonSourceFiles(dir string, discoveredScripts map[string]bool) (files []string) {
walker := fs.Walk(dir)
for walker.Step() {
if err := walker.Err(); err == nil && !walker.Stat().IsDir() && filepath.Ext(walker.Path()) == ".py" {
file, _ := filepath.Rel(dir, walker.Path())
_, found := discoveredScripts[file]
if !found {
files = append(files, file)
discoveredScripts[file] = true
}
}
}
return
}
// Remove unresolvable requirements (i.e., requirements with no clone URL)
func pruneReqs(reqs []*requirement) (kept, ignored []*requirement) {
for _, req := range reqs {
if req.RepoURL != "" { // cannot resolve dependencies with no clone URL
kept = append(kept, req)
} else {
ignored = append(ignored, req)
}
}
return
}
Shortened script unit name to '.'
package python
import (
"encoding/json"
"log"
"os"
"os/exec"
"path/filepath"
"github.com/kr/fs"
"sourcegraph.com/sourcegraph/srclib/toolchain"
"sourcegraph.com/sourcegraph/srclib/unit"
)
// Scan a directory, listing all source units
func Scan(srcdir string, repoURI string, repoSubdir string) ([]*unit.SourceUnit, error) {
if units, isSpecial := specialUnits[repoURI]; isSpecial {
return units, nil
}
cmd := exec.Command("pydep-run.py", "list", srcdir)
cmd.Stderr = os.Stderr
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
if err := cmd.Start(); err != nil {
return nil, err
}
var pkgs []*pkgInfo
if err := json.NewDecoder(stdout).Decode(&pkgs); err != nil {
return nil, err
}
if err := cmd.Wait(); err != nil {
return nil, err
}
// Keep track of all files that have been successfully discovered
discoveredScripts := make(map[string]bool)
units := make([]*unit.SourceUnit, len(pkgs))
for i, pkg := range pkgs {
units[i] = pkg.SourceUnit()
units[i].Files = pythonSourceFiles(pkg.RootDir, discoveredScripts)
reqs, err := requirements(pkg.RootDir)
if err != nil {
return nil, err
}
reqs_ := make([]interface{}, len(reqs))
for i, req := range reqs {
reqs_[i] = req
}
units[i].Dependencies = reqs_
}
// Scan for independant scripts, appending to the current set of source units
scripts := pythonSourceFiles(srcdir, discoveredScripts)
if len(scripts) > 0 {
scriptsUnit := unit.SourceUnit {
Name: ".",
Type: "PythonProgram",
Files: scripts,
Dir: ".",
Ops: map[string]*toolchain.ToolRef{"depresolve": nil, "graph": nil},
}
reqs, err := requirements(srcdir)
if err == nil {
reqs_ := make([]interface{}, len(reqs))
for i, req := range reqs {
reqs_[i] = req
}
scriptsUnit.Dependencies = reqs_
}
units = append(units, &scriptsUnit)
}
return units, nil
}
func requirements(unitDir string) ([]*requirement, error) {
depCmd := exec.Command("pydep-run.py", "dep", unitDir)
depCmd.Stderr = os.Stderr
b, err := depCmd.Output()
if err != nil {
return nil, err
}
var reqs []*requirement
err = json.Unmarshal(b, &reqs)
if err != nil {
return nil, err
}
reqs, ignoredReqs := pruneReqs(reqs)
if len(ignoredReqs) > 0 {
ignoredKeys := make([]string, len(ignoredReqs))
for r, req := range ignoredReqs {
ignoredKeys[r] = req.Key
}
log.Printf("(warn) ignoring dependencies %v because repo URL absent", ignoredKeys)
}
return reqs, nil
}
// Get all python source files under dir
func pythonSourceFiles(dir string, discoveredScripts map[string]bool) (files []string) {
walker := fs.Walk(dir)
for walker.Step() {
if err := walker.Err(); err == nil && !walker.Stat().IsDir() && filepath.Ext(walker.Path()) == ".py" {
file, _ := filepath.Rel(dir, walker.Path())
_, found := discoveredScripts[file]
if !found {
files = append(files, file)
discoveredScripts[file] = true
}
}
}
return
}
// Remove unresolvable requirements (i.e., requirements with no clone URL)
func pruneReqs(reqs []*requirement) (kept, ignored []*requirement) {
for _, req := range reqs {
if req.RepoURL != "" { // cannot resolve dependencies with no clone URL
kept = append(kept, req)
} else {
ignored = append(ignored, req)
}
}
return
}
|
// Copyright 2012 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package queue
import (
"encoding/gob"
"errors"
"io"
"net"
"sync/atomic"
"time"
)
// The size of buffered channels created by ChannelFromWriter.
const ChanSize = 32
// Message represents the message stored in the queue.
//
// A message is specified by an action and a slice of strings, representing
// arguments to the action.
//
// For example, the action "regenerate apprc" could receive one argument: the
// name of the app for which the apprc file will be regenerate.
type Message struct {
Action string
Args []string
Visits int
}
// ChannelFromWriter returns a channel from a given io.WriteCloser.
//
// Every time a Message is sent to the channel, it gets written to the writer
// in gob format. ChannelFromWriter also returns a channel for errors in
// writtings. You can use a select for error checking:
//
// ch, errCh := ChannelFromWriter(w)
// // use ch
// select {
// case err := <-errCh:
// // threat the error
// case time.After(5e9):
// // no error after 5 seconds
// }
//
// Please notice that there is no deadline for the writting. You can obviously
// ignore errors, if they are not significant for you.
//
// Whenever you close the message channel (and you should, to make it clear
// that you will not send any messages to the channel anymore), the error
// channel will get automatically closed, so the WriteCloser.
//
// Both channels are buffered by ChanSize.
func ChannelFromWriter(w io.WriteCloser) (chan<- Message, <-chan error) {
msgChan := make(chan Message, ChanSize)
errChan := make(chan error, ChanSize)
go write(w, msgChan, errChan)
return msgChan, errChan
}
// write reads messages from ch and write them to w, in gob format.
//
// If clients close ch, write will close errCh.
func write(w io.WriteCloser, ch <-chan Message, errCh chan<- error) {
defer close(errCh)
defer w.Close()
encoder := gob.NewEncoder(w)
for msg := range ch {
if err := encoder.Encode(msg); err != nil {
errCh <- err
}
}
}
// Server is the server that hosts the queue. It receives messages and
// process them.
type Server struct {
listener net.Listener
pairs chan pair
close chan int
closed int32
}
// StartServer starts a new queue server from a local address.
//
// The address must be a TCP address, in the format host:port (for example,
// [::1]:8080 or 192.168.254.10:2020).
func StartServer(laddr string) (*Server, error) {
var (
server Server
err error
)
server.listener, err = net.Listen("tcp", laddr)
if err != nil {
return nil, errors.New("Could not start server: " + err.Error())
}
server.pairs = make(chan pair, ChanSize)
server.close = make(chan int, 1)
go server.loop()
return &server, nil
}
// handle handles a new client, sending errors to the qs.errors channel and
// received messages to qs.messages.
func (qs *Server) handle(conn net.Conn) {
var err error
decoder := gob.NewDecoder(conn)
for err == nil {
var msg Message
err = decoder.Decode(&msg)
if atomic.LoadInt32(&qs.closed) == 0 {
qs.pairs <- pair{message: msg, err: err}
}
}
}
// loop accepts connection forever, and uses read to read messages from it,
// decoding them to a channel of messages.
func (qs *Server) loop() {
for atomic.LoadInt32(&qs.closed) == 0 {
conn, err := qs.listener.Accept()
if err != nil {
if e, ok := err.(*net.OpError); ok && !e.Temporary() {
return
}
continue
}
go qs.handle(conn)
}
}
// Message returns the first available message in the queue, or an error if it
// fails to read the message, or times out while waiting for the message.
//
// If timeout is negative, this method will wait nearly forever for the
// arriving of a message or an error.
func (qs *Server) Message(timeout time.Duration) (Message, error) {
var (
msg Message
err error
)
if timeout < 0 {
timeout = 1 << 62
}
select {
case pair := <-qs.pairs:
if pair.err == io.EOF {
pair.err = errors.New("EOF: client disconnected.")
}
msg = pair.message
err = pair.err
case <-qs.close:
err = errors.New("Server is closed.")
case <-time.After(timeout):
err = errors.New("Timed out waiting for the message.")
}
return msg, err
}
// PutBack puts a message back in the queue. It should be used when a message
// got using Message method cannot be processed yet. You put it back in the
// queue for processing later.
func (qs *Server) PutBack(message Message) {
if atomic.LoadInt32(&qs.closed) == 0 {
message.Visits++
qs.pairs <- pair{message: message}
}
}
// Addr returns the address of the server.
func (qs *Server) Addr() string {
return qs.listener.Addr().String()
}
// Close closes the server, closing the underlying listener.
func (qs *Server) Close() error {
if !atomic.CompareAndSwapInt32(&qs.closed, 0, 1) {
return errors.New("Server already closed.")
}
err := qs.listener.Close()
qs.pairs <- pair{err: errors.New("Server is closed.")}
qs.close <- 1
return err
}
// Dial is used to connect to a queue server.
//
// It returns three values: the channel to which messages should be sent, the
// channel where the client will get errors from the server during writing of
// messages and an error, that will be non-nil in case of failure to connect to
// the queue server.
//
// Whenever the message channel gets closed, the connection with the remote
// server will be closed.
func Dial(addr string) (chan<- Message, <-chan error, error) {
conn, err := net.Dial("tcp", addr)
if err != nil {
return nil, nil, errors.New("Could not dial to " + addr + ": " + err.Error())
}
messages, errors := ChannelFromWriter(conn)
return messages, errors, nil
}
// pair is a pair of message and error.
type pair struct {
message Message
err error
}
queue: improve docs for PutBack
// Copyright 2012 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package queue
import (
"encoding/gob"
"errors"
"io"
"net"
"sync/atomic"
"time"
)
// The size of buffered channels created by ChannelFromWriter.
const ChanSize = 32
// Message represents the message stored in the queue.
//
// A message is specified by an action and a slice of strings, representing
// arguments to the action.
//
// For example, the action "regenerate apprc" could receive one argument: the
// name of the app for which the apprc file will be regenerate.
type Message struct {
Action string
Args []string
Visits int
}
// ChannelFromWriter returns a channel from a given io.WriteCloser.
//
// Every time a Message is sent to the channel, it gets written to the writer
// in gob format. ChannelFromWriter also returns a channel for errors in
// writtings. You can use a select for error checking:
//
// ch, errCh := ChannelFromWriter(w)
// // use ch
// select {
// case err := <-errCh:
// // threat the error
// case time.After(5e9):
// // no error after 5 seconds
// }
//
// Please notice that there is no deadline for the writting. You can obviously
// ignore errors, if they are not significant for you.
//
// Whenever you close the message channel (and you should, to make it clear
// that you will not send any messages to the channel anymore), the error
// channel will get automatically closed, so the WriteCloser.
//
// Both channels are buffered by ChanSize.
func ChannelFromWriter(w io.WriteCloser) (chan<- Message, <-chan error) {
msgChan := make(chan Message, ChanSize)
errChan := make(chan error, ChanSize)
go write(w, msgChan, errChan)
return msgChan, errChan
}
// write reads messages from ch and write them to w, in gob format.
//
// If clients close ch, write will close errCh.
func write(w io.WriteCloser, ch <-chan Message, errCh chan<- error) {
defer close(errCh)
defer w.Close()
encoder := gob.NewEncoder(w)
for msg := range ch {
if err := encoder.Encode(msg); err != nil {
errCh <- err
}
}
}
// Server is the server that hosts the queue. It receives messages and
// process them.
type Server struct {
listener net.Listener
pairs chan pair
close chan int
closed int32
}
// StartServer starts a new queue server from a local address.
//
// The address must be a TCP address, in the format host:port (for example,
// [::1]:8080 or 192.168.254.10:2020).
func StartServer(laddr string) (*Server, error) {
var (
server Server
err error
)
server.listener, err = net.Listen("tcp", laddr)
if err != nil {
return nil, errors.New("Could not start server: " + err.Error())
}
server.pairs = make(chan pair, ChanSize)
server.close = make(chan int, 1)
go server.loop()
return &server, nil
}
// handle handles a new client, sending errors to the qs.errors channel and
// received messages to qs.messages.
func (qs *Server) handle(conn net.Conn) {
var err error
decoder := gob.NewDecoder(conn)
for err == nil {
var msg Message
err = decoder.Decode(&msg)
if atomic.LoadInt32(&qs.closed) == 0 {
qs.pairs <- pair{message: msg, err: err}
}
}
}
// loop accepts connection forever, and uses read to read messages from it,
// decoding them to a channel of messages.
func (qs *Server) loop() {
for atomic.LoadInt32(&qs.closed) == 0 {
conn, err := qs.listener.Accept()
if err != nil {
if e, ok := err.(*net.OpError); ok && !e.Temporary() {
return
}
continue
}
go qs.handle(conn)
}
}
// Message returns the first available message in the queue, or an error if it
// fails to read the message, or times out while waiting for the message.
//
// If timeout is negative, this method will wait nearly forever for the
// arriving of a message or an error.
func (qs *Server) Message(timeout time.Duration) (Message, error) {
var (
msg Message
err error
)
if timeout < 0 {
timeout = 1 << 62
}
select {
case pair := <-qs.pairs:
if pair.err == io.EOF {
pair.err = errors.New("EOF: client disconnected.")
}
msg = pair.message
err = pair.err
case <-qs.close:
err = errors.New("Server is closed.")
case <-time.After(timeout):
err = errors.New("Timed out waiting for the message.")
}
return msg, err
}
// PutBack puts a message back in the queue. It should be used when a message
// returned by the Message method cannot be processed yet. You put it back in
// the queue for processing later.
func (qs *Server) PutBack(message Message) {
if atomic.LoadInt32(&qs.closed) == 0 {
message.Visits++
qs.pairs <- pair{message: message}
}
}
// Addr returns the address of the server.
func (qs *Server) Addr() string {
return qs.listener.Addr().String()
}
// Close closes the server, closing the underlying listener.
func (qs *Server) Close() error {
if !atomic.CompareAndSwapInt32(&qs.closed, 0, 1) {
return errors.New("Server already closed.")
}
err := qs.listener.Close()
qs.pairs <- pair{err: errors.New("Server is closed.")}
qs.close <- 1
return err
}
// Dial is used to connect to a queue server.
//
// It returns three values: the channel to which messages should be sent, the
// channel where the client will get errors from the server during writing of
// messages and an error, that will be non-nil in case of failure to connect to
// the queue server.
//
// Whenever the message channel gets closed, the connection with the remote
// server will be closed.
func Dial(addr string) (chan<- Message, <-chan error, error) {
conn, err := net.Dial("tcp", addr)
if err != nil {
return nil, nil, errors.New("Could not dial to " + addr + ": " + err.Error())
}
messages, errors := ChannelFromWriter(conn)
return messages, errors, nil
}
// pair is a pair of message and error.
type pair struct {
message Message
err error
}
|
package machine
import (
"fmt"
"sort"
)
type Machine struct {
vendor Vendor
changeMaker ChangeMaker
}
func NewMachine(vendor Vendor, changeMaker ChangeMaker) *Machine {
return &Machine{vendor, changeMaker}
}
// Purchase the item in the specific slot and accept the given coins as payment
// Return success of the purchase, and associated change or a full refund in the
// event of a failure.
func (m *Machine) Purchase(choice string, payment Change) (*Item, Change, error) {
var change Change = nil
slot, err := m.vendor.Pick(choice)
if err != nil {
change = payment
err = &ChoiceUnavailableError{choice, err.Error()}
} else {
paid := payment.Value()
price := slot.Price()
if paid < price {
change = payment // full refund
err = &UnderpaidError{choice, price, paid}
} else {
change, err = m.changeMaker.MakeChange(paid - price)
if err != nil {
err = &ChoiceUnavailableError{choice, err.Error()}
change = payment
}
}
}
var item *Item = nil
if err == nil {
item, err = m.vendor.Dispense(slot)
// Out of stock, out of order, etc.
if err != nil {
item = nil
change = payment
}
}
return item, change, err
}
func (m *Machine) Describe() []VendingItem {
items := make([]VendingItem, 0)
for choice, slot := range m.vendor.List() {
item := VendingItem{choice, slot.ItemName(), slot.Price(), slot.Available()}
items = append(items, item)
}
sort.Sort(byChoice(items))
return items
}
type VendingItem struct {
Choice string
Item string
Price int
Available bool
}
func (v VendingItem) String() string {
available := ""
if !v.Available {
available = " OUT OF STOCK"
}
return fmt.Sprintf("VendingItem[%s \"%s\" %dc%s]",
v.Choice, v.Item, v.Price, available)
}
type byChoice []VendingItem
func (v byChoice) Len() int { return len(v) }
func (v byChoice) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (v byChoice) Less(i, j int) bool { return v[i].Choice < v[j].Choice }
type ChoiceUnavailableError struct {
choice string
reason string
}
func (e *ChoiceUnavailableError) Error() string {
return fmt.Sprintf(
"Sorry, your choice \"%s\" is currently unavailable for reason: %v",
e.choice, e.reason)
}
type UnderpaidError struct {
choice string
price int
paid int
}
func (e *UnderpaidError) Error() string {
return fmt.Sprintf(
"Your choice \"%s\" costs %dc, you only paid %dc.",
"Please insert the correct amount and try again",
e.choice, e.price, e.paid)
}
Fix broken Sprintf in UnderpaidError
package machine
import (
"fmt"
"sort"
)
type Machine struct {
vendor Vendor
changeMaker ChangeMaker
}
func NewMachine(vendor Vendor, changeMaker ChangeMaker) *Machine {
return &Machine{vendor, changeMaker}
}
// Purchase the item in the specific slot and accept the given coins as payment
// Return success of the purchase, and associated change or a full refund in the
// event of a failure.
func (m *Machine) Purchase(choice string, payment Change) (*Item, Change, error) {
var change Change = nil
slot, err := m.vendor.Pick(choice)
if err != nil {
change = payment
err = &ChoiceUnavailableError{choice, err.Error()}
} else {
paid := payment.Value()
price := slot.Price()
if paid < price {
change = payment // full refund
err = &UnderpaidError{choice, price, paid}
} else {
change, err = m.changeMaker.MakeChange(paid - price)
if err != nil {
err = &ChoiceUnavailableError{choice, err.Error()}
change = payment
}
}
}
var item *Item = nil
if err == nil {
item, err = m.vendor.Dispense(slot)
// Out of stock, out of order, etc.
if err != nil {
item = nil
change = payment
}
}
return item, change, err
}
func (m *Machine) Describe() []VendingItem {
items := make([]VendingItem, 0)
for choice, slot := range m.vendor.List() {
item := VendingItem{choice, slot.ItemName(), slot.Price(), slot.Available()}
items = append(items, item)
}
sort.Sort(byChoice(items))
return items
}
type VendingItem struct {
Choice string
Item string
Price int
Available bool
}
func (v VendingItem) String() string {
available := ""
if !v.Available {
available = " OUT OF STOCK"
}
return fmt.Sprintf("VendingItem[%s \"%s\" %dc%s]",
v.Choice, v.Item, v.Price, available)
}
type byChoice []VendingItem
func (v byChoice) Len() int { return len(v) }
func (v byChoice) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (v byChoice) Less(i, j int) bool { return v[i].Choice < v[j].Choice }
type ChoiceUnavailableError struct {
choice string
reason string
}
func (e *ChoiceUnavailableError) Error() string {
return fmt.Sprintf(
"Sorry, your choice \"%s\" is currently unavailable for reason: %v",
e.choice, e.reason)
}
type UnderpaidError struct {
choice string
price int
paid int
}
func (e *UnderpaidError) Error() string {
return fmt.Sprintf(
"Your choice \"%s\" costs %dc, you only paid %dc. "+
"Please insert the correct amount and try again",
e.choice, e.price, e.paid)
}
|
package terraform
import (
"fmt"
"log"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
"github.com/zclconf/go-cty/cty/gocty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/hcl2shim"
)
// EvalDeleteOutput is an EvalNode implementation that deletes an output
// from the state.
type EvalDeleteOutput struct {
Addr addrs.OutputValue
}
// TODO: test
func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
state, lock := ctx.State()
if state == nil {
return nil, nil
}
// Get a write lock so we can access this instance
lock.Lock()
defer lock.Unlock()
// Look for the module state. If we don't have one, create it.
mod := state.ModuleByPath(ctx.Path())
if mod == nil {
return nil, nil
}
delete(mod.Outputs, n.Addr.Name)
return nil, nil
}
// EvalWriteOutput is an EvalNode implementation that writes the output
// for the given name to the current state.
type EvalWriteOutput struct {
Addr addrs.OutputValue
Sensitive bool
Expr hcl.Expression
// ContinueOnErr allows interpolation to fail during Input
ContinueOnErr bool
}
// TODO: test
func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
// This has to run before we have a state lock, since evaluation also
// reads the state
val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil)
// We'll handle errors below, after we have loaded the module.
state, lock := ctx.State()
if state == nil {
return nil, fmt.Errorf("cannot write state to nil state")
}
// Get a write lock so we can access this instance
lock.Lock()
defer lock.Unlock()
// Look for the module state. If we don't have one, create it.
mod := state.ModuleByPath(ctx.Path())
if mod == nil {
mod = state.AddModule(ctx.Path())
}
// handling the interpolation error
if diags.HasErrors() {
if n.ContinueOnErr || flagWarnOutputErrors {
log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err())
// if we're continuing, make sure the output is included, and
// marked as unknown
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "string",
Value: config.UnknownVariableValue,
}
return nil, EvalEarlyExitError{}
}
return nil, diags.Err()
}
ty := val.Type()
switch {
case ty.IsPrimitiveType():
// For now we record all primitive types as strings, for compatibility
// with our existing state formats.
// FIXME: Revise the state format to support any type.
var valueTyped string
switch {
case !val.IsKnown():
// Legacy handling of unknown values as a special string.
valueTyped = config.UnknownVariableValue
case val.IsNull():
// State doesn't currently support null, so we'll save as empty string.
valueTyped = ""
default:
strVal, err := convert.Convert(val, cty.String)
if err != nil {
// Should never happen, because all primitives can convert to string.
return nil, fmt.Errorf("cannot marshal %#v for storage in state: %s", val, err)
}
err = gocty.FromCtyValue(strVal, &valueTyped)
if err != nil {
// Should never happen, because we already converted to string.
return nil, fmt.Errorf("cannot marshal %#v for storage in state: %s", val, err)
}
}
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "string",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case ty.IsListType() || ty.IsTupleType() || ty.IsSetType():
// For now we'll use our legacy storage forms for list-like types.
// This produces a []interface{}.
valueTyped := hcl2shim.ConfigValueFromHCL2(val)
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "list",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case ty.IsMapType() || ty.IsObjectType():
// For now we'll use our legacy storage forms for map-like types.
// This produces a map[string]interface{}.
valueTyped := hcl2shim.ConfigValueFromHCL2(val)
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "map",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case !val.IsWhollyKnown():
// While we're still using our existing state format, we can't represent
// partially-unknown values properly, so we'll just stub the whole
// thing out.
// FIXME: After the state format is revised, remove this special case
// and just store the unknown value directly.
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "unknown",
Sensitive: n.Sensitive,
Value: hcl2shim.UnknownVariableValue,
}
default:
return nil, fmt.Errorf("output %s is not a valid type (%s)", n.Addr.Name, ty.FriendlyName())
}
return nil, nil
}
core: EvalWriteOutput handle dynamic pseudo-type
This should actually have been caught by !val.IsWhollyKnown, since
DynamicVal is always unknown, but something isn't working quite right here
and so for now we'll redundantly check also if it's of the dynamic
pseudo-type, and then revisit cty later to see if there's a real bug
hiding down there.
package terraform
import (
"fmt"
"log"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
"github.com/zclconf/go-cty/cty/gocty"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/hcl2shim"
)
// EvalDeleteOutput is an EvalNode implementation that deletes an output
// from the state.
type EvalDeleteOutput struct {
Addr addrs.OutputValue
}
// TODO: test
func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
state, lock := ctx.State()
if state == nil {
return nil, nil
}
// Get a write lock so we can access this instance
lock.Lock()
defer lock.Unlock()
// Look for the module state. If we don't have one, create it.
mod := state.ModuleByPath(ctx.Path())
if mod == nil {
return nil, nil
}
delete(mod.Outputs, n.Addr.Name)
return nil, nil
}
// EvalWriteOutput is an EvalNode implementation that writes the output
// for the given name to the current state.
type EvalWriteOutput struct {
Addr addrs.OutputValue
Sensitive bool
Expr hcl.Expression
// ContinueOnErr allows interpolation to fail during Input
ContinueOnErr bool
}
// TODO: test
func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
// This has to run before we have a state lock, since evaluation also
// reads the state
val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil)
// We'll handle errors below, after we have loaded the module.
state, lock := ctx.State()
if state == nil {
return nil, fmt.Errorf("cannot write state to nil state")
}
// Get a write lock so we can access this instance
lock.Lock()
defer lock.Unlock()
// Look for the module state. If we don't have one, create it.
mod := state.ModuleByPath(ctx.Path())
if mod == nil {
mod = state.AddModule(ctx.Path())
}
// handling the interpolation error
if diags.HasErrors() {
if n.ContinueOnErr || flagWarnOutputErrors {
log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err())
// if we're continuing, make sure the output is included, and
// marked as unknown
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "string",
Value: config.UnknownVariableValue,
}
return nil, EvalEarlyExitError{}
}
return nil, diags.Err()
}
ty := val.Type()
switch {
case ty.IsPrimitiveType():
// For now we record all primitive types as strings, for compatibility
// with our existing state formats.
// FIXME: Revise the state format to support any type.
var valueTyped string
switch {
case !val.IsKnown():
// Legacy handling of unknown values as a special string.
valueTyped = config.UnknownVariableValue
case val.IsNull():
// State doesn't currently support null, so we'll save as empty string.
valueTyped = ""
default:
strVal, err := convert.Convert(val, cty.String)
if err != nil {
// Should never happen, because all primitives can convert to string.
return nil, fmt.Errorf("cannot marshal %#v for storage in state: %s", val, err)
}
err = gocty.FromCtyValue(strVal, &valueTyped)
if err != nil {
// Should never happen, because we already converted to string.
return nil, fmt.Errorf("cannot marshal %#v for storage in state: %s", val, err)
}
}
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "string",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case ty.IsListType() || ty.IsTupleType() || ty.IsSetType():
// For now we'll use our legacy storage forms for list-like types.
// This produces a []interface{}.
valueTyped := hcl2shim.ConfigValueFromHCL2(val)
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "list",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case ty.IsMapType() || ty.IsObjectType():
// For now we'll use our legacy storage forms for map-like types.
// This produces a map[string]interface{}.
valueTyped := hcl2shim.ConfigValueFromHCL2(val)
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "map",
Sensitive: n.Sensitive,
Value: valueTyped,
}
case ty == cty.DynamicPseudoType || !val.IsWhollyKnown():
// While we're still using our existing state format, we can't represent
// partially-unknown values properly, so we'll just stub the whole
// thing out.
// FIXME: After the state format is revised, remove this special case
// and just store the unknown value directly.
mod.Outputs[n.Addr.Name] = &OutputState{
Type: "unknown",
Sensitive: n.Sensitive,
Value: hcl2shim.UnknownVariableValue,
}
default:
return nil, fmt.Errorf("output %s is not a valid type (%s)", n.Addr.Name, ty.FriendlyName())
}
return nil, nil
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright (c) 2015 Klaus Post
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"fmt"
"io"
"math"
)
const (
NoCompression = 0
BestSpeed = 1
BestCompression = 9
DefaultCompression = -1
ConstantCompression = -2 // Does only Huffman encoding
logWindowSize = 15
windowSize = 1 << logWindowSize
windowMask = windowSize - 1
logMaxOffsetSize = 15 // Standard DEFLATE
minMatchLength = 4 // The smallest match that the compressor looks for
maxMatchLength = 258 // The longest match for the compressor
minOffsetSize = 1 // The shortest offset that makes any sense
// The maximum number of tokens we put into a single flat block, just too
// stop things from getting too large.
maxFlateBlockTokens = 1 << 14
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
maxHashOffset = 1 << 24
skipNever = math.MaxInt32
)
var useSSE42 bool
type compressionLevel struct {
good, lazy, nice, chain, fastSkipHashing, level int
}
// Compression levels have been rebalanced from zlib deflate defaults
// to give a bigger spread in speed and compression.
// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
var levels = []compressionLevel{
{}, // 0
// Level 1+2 uses snappy algorithm - values not used
{0, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 2},
// For levels 3-6 we don't bother trying with lazy matches.
// Lazy matching is at least 30% slower, with 1.5% increase.
{4, 0, 8, 4, 4, 3},
{4, 0, 12, 6, 5, 4},
{6, 0, 24, 16, 6, 5},
{8, 0, 32, 32, 7, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
{4, 8, 16, 16, skipNever, 7},
{6, 16, 32, 64, skipNever, 8},
{32, 258, 258, 4096, skipNever, 9},
}
type hashid uint32
type compressor struct {
compressionLevel
w *huffmanBitWriter
bulkHasher func([]byte, []hash)
// compression algorithm
fill func(*compressor, []byte) int // copy data to window
step func(*compressor) // process window
sync bool // requesting flush
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
// If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value.
chainHead int
hashHead []hashid
hashPrev []hashid
hashOffset int
// input window: unprocessed data is window[index:windowEnd]
index int
window []byte
windowEnd int
blockStart int // window index where current tokens start
byteAvailable bool // if true, still need to process window[index-1].
// queued output tokens
tokens tokens
// deflate state
length int
offset int
hash hash
maxInsertIndex int
err error
ii uint16 // position of last match, intended to overflow to reset.
snap snappyEnc
hashMatch [maxMatchLength + minMatchLength]hash
}
type hash int32
func (d *compressor) fillDeflate(b []byte) int {
if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
copy(d.window, d.window[windowSize:2*windowSize])
d.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
d.blockStart -= windowSize
} else {
d.blockStart = math.MaxInt32
}
d.hashOffset += windowSize
if d.hashOffset > maxHashOffset {
delta := d.hashOffset - 1
d.hashOffset -= delta
d.chainHead -= delta
for i, v := range d.hashPrev {
if int(v) > delta {
d.hashPrev[i] = hashid(int(v) - delta)
} else {
d.hashPrev[i] = 0
}
}
for i, v := range d.hashHead {
if int(v) > delta {
d.hashHead[i] = hashid(int(v) - delta)
} else {
d.hashHead[i] = 0
}
}
}
}
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
if index > 0 || eof {
var window []byte
if d.blockStart <= index {
window = d.window[d.blockStart:index]
}
d.blockStart = index
d.w.writeBlock(tok, eof, window)
return d.w.err
}
return nil
}
// writeBlockSkip writes the current block and uses the number of tokens
// to determine if the block should be stored on no matches, or
// only huffman encoded.
func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
if index > 0 || eof {
if d.blockStart <= index {
window := d.window[d.blockStart:index]
if tok.n == len(window) && !eof {
d.writeStoredBlock(window)
// If we removed less than 10 literals, huffman compress the block.
} else if tok.n > len(window)-10 {
d.w.writeBlockHuff(eof, window)
} else {
// Write a dynamic huffman block.
d.w.writeBlockDynamic(tok, eof, window)
}
} else {
d.w.writeBlock(tok, eof, nil)
}
d.blockStart = index
return d.w.err
}
return nil
}
// fillWindow will fill the current window with the supplied
// dictionary and calculate all hashes.
// This is much faster than doing a full encode.
// Should only be used after a start/reset.
func (d *compressor) fillWindow(b []byte) {
// Do not fill window if we are in store-only mode,
// use constant or Snappy compression.
switch d.compressionLevel.level {
case 0, 1, 2:
return
}
// If we are given too much, cut it.
if len(b) > windowSize {
b = b[len(b)-windowSize:]
}
// Add all to window.
n := copy(d.window[d.windowEnd:], b)
// Calculate 256 hashes at the time (more L1 cache hits)
loops := (n + 256 - minMatchLength) / 256
for j := 0; j < loops; j++ {
startindex := j * 256
end := startindex + 256 + minMatchLength - 1
if end > n {
end = n
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize <= 0 {
continue
}
dst := d.hashMatch[:dstSize]
d.bulkHasher(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
// Update window information.
d.windowEnd += n
d.index = n
}
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
}
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
nice := len(win) - pos
if d.nice < nice {
nice = d.nice
}
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
length = prevLength
if length >= d.good {
tries >>= 2
}
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:], wPos, minMatchLook)
if n > length && (n > minMatchLength || pos-i <= 4096) {
length = n
offset = pos - i
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
if i == minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.hashPrev[i&windowMask]) - d.hashOffset
if i < minIndex || i < 0 {
break
}
}
return
}
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
}
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
nice := len(win) - pos
if d.nice < nice {
nice = d.nice
}
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
length = prevLength
if length >= d.good {
tries >>= 2
}
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLenSSE4(win[i:], wPos, minMatchLook)
if n > length && (n > minMatchLength || pos-i <= 4096) {
length = n
offset = pos - i
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
if i == minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.hashPrev[i&windowMask]) - d.hashOffset
if i < minIndex || i < 0 {
break
}
}
return
}
func (d *compressor) writeStoredBlock(buf []byte) error {
if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
return d.w.err
}
d.w.writeBytes(buf)
return d.w.err
}
// oldHash is the hash function used when no native crc32 calculation
// or similar is present.
func oldHash(b []byte) hash {
return hash(b[0])<<(hashShift*3) + hash(b[1])<<(hashShift*2) + hash(b[2])<<hashShift + hash(b[3])
}
// oldBulkHash will compute hashes using the same
// algorithm as oldHash
func oldBulkHash(b []byte, dst []hash) {
if len(b) < minMatchLength {
return
}
h := oldHash(b)
dst[0] = h
i := 1
end := len(b) - minMatchLength + 1
for ; i < end; i++ {
h = (h << hashShift) + hash(b[i+3])
dst[i] = h
}
}
// matchLen returns the number of matching bytes in a and b
// up to length 'max'. Both slices must be at least 'max'
// bytes in size.
func matchLen(a, b []byte, max int) int {
a = a[:max]
for i, av := range a {
if b[i] != av {
return i
}
}
return max
}
func (d *compressor) initDeflate() {
d.hashHead = make([]hashid, hashSize)
d.hashPrev = make([]hashid, windowSize)
d.window = make([]byte, 2*windowSize)
d.hashOffset = 1
d.tokens.tokens = make([]token, maxFlateBlockTokens+1)
d.length = minMatchLength - 1
d.offset = 0
d.byteAvailable = false
d.index = 0
d.hash = 0
d.chainHead = -1
d.bulkHasher = oldBulkHash
if useSSE42 {
d.bulkHasher = crc32sseAll
}
}
// Assumes that d.fastSkipHashing != skipNever,
// otherwise use deflateLazy
func (d *compressor) deflate() {
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if d.index < d.maxInsertIndex {
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
}
for {
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
if d.tokens.n > 0 {
if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
return
}
}
if d.index < d.maxInsertIndex {
// Update the hash
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
ch := d.hashHead[d.hash]
d.chainHead = int(ch)
d.hashPrev[d.index&windowMask] = ch
d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
}
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if d.length >= minMatchLength {
d.ii = 0
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
// "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
if d.length <= d.fastSkipHashing {
var newIndex int
newIndex = d.index + d.length
// Calculate missing hashes
end := newIndex
if end > d.maxInsertIndex {
end = d.maxInsertIndex
}
end += minMatchLength - 1
startindex := d.index + 1
if startindex > d.maxInsertIndex {
startindex = d.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := d.hashMatch[:dstSize]
oldBulkHash(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
d.index = newIndex
} else {
// For matches this long, we don't bother inserting each individual
// item into the table.
d.index += d.length
if d.index < d.maxInsertIndex {
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
}
}
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
d.ii++
end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
if end > d.windowEnd {
end = d.windowEnd
}
for i := d.index; i < end; i++ {
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
d.index = end
}
}
}
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func (d *compressor) deflateLazy() {
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if d.index < d.maxInsertIndex {
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
}
for {
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
d.byteAvailable = false
}
if d.tokens.n > 0 {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
return
}
}
if d.index < d.maxInsertIndex {
// Update the hash
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
ch := d.hashHead[d.hash]
d.chainHead = int(ch)
d.hashPrev[d.index&windowMask] = ch
d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
}
prevLength := d.length
prevOffset := d.offset
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if prevLength >= minMatchLength && d.length <= prevLength {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
var newIndex int
newIndex = d.index + prevLength - 1
// Calculate missing hashes
end := newIndex
if end > d.maxInsertIndex {
end = d.maxInsertIndex
}
end += minMatchLength - 1
startindex := d.index + 1
if startindex > d.maxInsertIndex {
startindex = d.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := d.hashMatch[:dstSize]
oldBulkHash(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
d.index = newIndex
d.byteAvailable = false
d.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
// Reset, if we got a match this run.
if d.length >= minMatchLength {
d.ii = 0
}
// We have a byte waiting. Emit it.
if d.byteAvailable {
d.ii++
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
d.index++
// If we have a long run of no matches, skip additional bytes
// Resets when d.ii overflows after 64KB.
if d.ii > 31 {
n := int(d.ii >> 6)
for j := 0; j < n; j++ {
if d.index >= d.windowEnd-1 {
break
}
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
d.index++
}
// Flush last byte
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
d.byteAvailable = false
// d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
} else {
d.index++
d.byteAvailable = true
}
}
}
}
// Assumes that d.fastSkipHashing != skipNever,
// otherwise use deflateLazySSE
func (d *compressor) deflateSSE() {
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if d.index < d.maxInsertIndex {
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
}
for {
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
if d.tokens.n > 0 {
if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
return
}
}
if d.index < d.maxInsertIndex {
// Update the hash
d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
ch := d.hashHead[d.hash]
d.chainHead = int(ch)
d.hashPrev[d.index&windowMask] = ch
d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
}
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if d.length >= minMatchLength {
d.ii = 0
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
// "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
if d.length <= d.fastSkipHashing {
var newIndex int
newIndex = d.index + d.length
// Calculate missing hashes
end := newIndex
if end > d.maxInsertIndex {
end = d.maxInsertIndex
}
end += minMatchLength - 1
startindex := d.index + 1
if startindex > d.maxInsertIndex {
startindex = d.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := d.hashMatch[:dstSize]
crc32sseAll(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
d.index = newIndex
} else {
// For matches this long, we don't bother inserting each individual
// item into the table.
d.index += d.length
if d.index < d.maxInsertIndex {
d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
}
}
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
d.ii++
end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
if end > d.windowEnd {
end = d.windowEnd
}
for i := d.index; i < end; i++ {
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
d.index = end
}
}
}
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func (d *compressor) deflateLazySSE() {
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if d.index < d.maxInsertIndex {
d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
}
for {
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
d.byteAvailable = false
}
if d.tokens.n > 0 {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
return
}
}
if d.index < d.maxInsertIndex {
// Update the hash
d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
ch := d.hashHead[d.hash]
d.chainHead = int(ch)
d.hashPrev[d.index&windowMask] = ch
d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
}
prevLength := d.length
prevOffset := d.offset
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if prevLength >= minMatchLength && d.length <= prevLength {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
var newIndex int
newIndex = d.index + prevLength - 1
// Calculate missing hashes
end := newIndex
if end > d.maxInsertIndex {
end = d.maxInsertIndex
}
end += minMatchLength - 1
startindex := d.index + 1
if startindex > d.maxInsertIndex {
startindex = d.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := d.hashMatch[:dstSize]
crc32sseAll(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
d.index = newIndex
d.byteAvailable = false
d.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
// Reset, if we got a match this run.
if d.length >= minMatchLength {
d.ii = 0
}
// We have a byte waiting. Emit it.
if d.byteAvailable {
d.ii++
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
d.index++
// If we have a long run of no matches, skip additional bytes
// Resets when d.ii overflows after 64KB.
if d.ii > 31 {
n := int(d.ii >> 6)
for j := 0; j < n; j++ {
if d.index >= d.windowEnd-1 {
break
}
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
d.index++
}
// Flush last byte
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
d.byteAvailable = false
// d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
} else {
d.index++
d.byteAvailable = true
}
}
}
}
func (d *compressor) fillStore(b []byte) int {
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
func (d *compressor) store() {
if d.windowEnd > 0 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
}
d.windowEnd = 0
}
// fillHuff will fill the buffer with data for huffman-only compression.
// The number of bytes copied is returned.
func (d *compressor) fillHuff(b []byte) int {
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
// storeHuff will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeHuff() {
// We only compress if we have maxStoreBlockSize or we are at end-of-stream
if d.windowEnd < maxStoreBlockSize && !d.sync {
return
}
if d.windowEnd == 0 {
return
}
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
d.err = d.w.err
d.windowEnd = 0
}
// storeHuff will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeSnappy() {
// We only compress if we have maxStoreBlockSize.
if d.windowEnd < maxStoreBlockSize {
if !d.sync {
return
}
// Handle extremely small sizes.
if d.windowEnd < 128 {
if d.windowEnd == 0 {
return
}
if d.windowEnd <= 32 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
d.tokens.n = 0
d.windowEnd = 0
} else {
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
d.err = d.w.err
}
d.tokens.n = 0
d.windowEnd = 0
return
}
}
d.snap.Encode(&d.tokens, d.window[:d.windowEnd])
// If we made zero matches, store the block as is.
if d.tokens.n == d.windowEnd {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
// If we removed less than 1/16th, huffman compress the block.
} else if d.tokens.n > d.windowEnd-(d.windowEnd>>4) {
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
d.err = d.w.err
} else {
d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
d.err = d.w.err
}
d.tokens.n = 0
d.windowEnd = 0
}
// write will add input byte to the stream.
// Unless an error occurs all bytes will be consumed.
func (d *compressor) write(b []byte) (n int, err error) {
if d.err != nil {
return 0, d.err
}
n = len(b)
for len(b) > 0 {
d.step(d)
b = b[d.fill(d, b):]
if d.err != nil {
return 0, d.err
}
}
return n, d.err
}
func (d *compressor) syncFlush() error {
d.sync = true
if d.err != nil {
return d.err
}
d.step(d)
if d.err == nil {
d.w.writeStoredHeader(0, false)
d.w.flush()
d.err = d.w.err
}
d.sync = false
return d.err
}
func (d *compressor) init(w io.Writer, level int) (err error) {
d.w = newHuffmanBitWriter(w)
switch {
case level == NoCompression:
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillStore
d.step = (*compressor).store
case level == ConstantCompression:
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillHuff
d.step = (*compressor).storeHuff
case level >= 1 && level <= 3:
d.snap = newSnappy(level)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillHuff
d.step = (*compressor).storeSnappy
d.tokens.tokens = make([]token, maxStoreBlockSize+1)
case level == DefaultCompression:
level = 5
fallthrough
case 4 <= level && level <= 9:
d.compressionLevel = levels[level]
d.initDeflate()
d.fill = (*compressor).fillDeflate
if d.fastSkipHashing == skipNever {
if useSSE42 {
d.step = (*compressor).deflateLazySSE
} else {
d.step = (*compressor).deflateLazy
}
} else {
if useSSE42 {
d.step = (*compressor).deflateSSE
} else {
d.step = (*compressor).deflate
}
}
default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
}
return nil
}
// Used for zeroing the hash slice
var hzeroes [256]hashid
// reset the state of the compressor.
func (d *compressor) reset(w io.Writer) {
d.w.reset(w)
d.sync = false
d.err = nil
// We only need to reset a few things for Snappy.
if d.snap != nil {
d.snap.Reset()
d.windowEnd = 0
d.tokens.n = 0
return
}
switch d.compressionLevel.chain {
case 0:
// level was NoCompression or ConstantCompresssion.
d.windowEnd = 0
default:
d.chainHead = -1
for s := d.hashHead; len(s) > 0; {
n := copy(s, hzeroes[:])
s = s[n:]
}
for s := d.hashPrev; len(s) > 0; s = s[len(hzeroes):] {
copy(s, hzeroes[:])
}
d.hashOffset = 1
d.index, d.windowEnd = 0, 0
d.blockStart, d.byteAvailable = 0, false
d.tokens.n = 0
d.length = minMatchLength - 1
d.offset = 0
d.hash = 0
d.ii = 0
d.maxInsertIndex = 0
}
}
func (d *compressor) close() error {
if d.err != nil {
return d.err
}
d.sync = true
d.step(d)
if d.err != nil {
return d.err
}
if d.w.writeStoredHeader(0, true); d.w.err != nil {
return d.w.err
}
d.w.flush()
return d.w.err
}
// NewWriter returns a new Writer compressing data at the given level.
// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
// higher levels typically run slower but compress more.
// Level 0 (NoCompression) does not attempt any compression; it only adds the
// necessary DEFLATE framing.
// Level -1 (DefaultCompression) uses the default compression level.
// Level -2 (ConstantCompression) will use Huffman compression only, giving
// a very fast compression for all types of input, but sacrificing considerable
// compression efficiency.
//
// If level is in the range [-2, 9] then the error returned will be nil.
// Otherwise the error returned will be non-nil.
func NewWriter(w io.Writer, level int) (*Writer, error) {
var dw Writer
if err := dw.d.init(w, level); err != nil {
return nil, err
}
return &dw, nil
}
// NewWriterDict is like NewWriter but initializes the new
// Writer with a preset dictionary. The returned Writer behaves
// as if the dictionary had been written to it without producing
// any compressed output. The compressed data written to w
// can only be decompressed by a Reader initialized with the
// same dictionary.
func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
dw := &dictWriter{w}
zw, err := NewWriter(dw, level)
if err != nil {
return nil, err
}
zw.d.fillWindow(dict)
zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
return zw, err
}
type dictWriter struct {
w io.Writer
}
func (w *dictWriter) Write(b []byte) (n int, err error) {
return w.w.Write(b)
}
// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see NewWriter).
type Writer struct {
d compressor
dict []byte
}
// Write writes data to w, which will eventually write the
// compressed form of data to its underlying writer.
func (w *Writer) Write(data []byte) (n int, err error) {
return w.d.write(data)
}
// Flush flushes any pending compressed data to the underlying writer.
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet.
// Flush does not return until the data has been written.
// If the underlying writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func (w *Writer) Flush() error {
// For more about flushing:
// http://www.bolet.org/~pornin/deflate-flush.html
return w.d.syncFlush()
}
// Close flushes and closes the writer.
func (w *Writer) Close() error {
return w.d.close()
}
// Reset discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level and dictionary.
func (w *Writer) Reset(dst io.Writer) {
if dw, ok := w.d.w.w.(*dictWriter); ok {
// w was created with NewWriterDict
dw.w = dst
w.d.reset(dw)
w.d.fillWindow(w.dict)
} else {
// w was created with NewWriter
w.d.reset(dst)
}
}
// ResetDict discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level, but sets a specific dictionary.
func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
w.dict = dict
w.d.reset(dst)
w.d.fillWindow(w.dict)
}
Level 4-7: Always attempt Huffman
Always at least attempt to Huffman compress the blocks.
Also, use variable threshold, instead of 10 literals.
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright (c) 2015 Klaus Post
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"fmt"
"io"
"math"
)
const (
NoCompression = 0
BestSpeed = 1
BestCompression = 9
DefaultCompression = -1
ConstantCompression = -2 // Does only Huffman encoding
logWindowSize = 15
windowSize = 1 << logWindowSize
windowMask = windowSize - 1
logMaxOffsetSize = 15 // Standard DEFLATE
minMatchLength = 4 // The smallest match that the compressor looks for
maxMatchLength = 258 // The longest match for the compressor
minOffsetSize = 1 // The shortest offset that makes any sense
// The maximum number of tokens we put into a single flat block, just too
// stop things from getting too large.
maxFlateBlockTokens = 1 << 14
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
maxHashOffset = 1 << 24
skipNever = math.MaxInt32
)
var useSSE42 bool
type compressionLevel struct {
good, lazy, nice, chain, fastSkipHashing, level int
}
// Compression levels have been rebalanced from zlib deflate defaults
// to give a bigger spread in speed and compression.
// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
var levels = []compressionLevel{
{}, // 0
// Level 1+2 uses snappy algorithm - values not used
{0, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 2},
// For levels 3-6 we don't bother trying with lazy matches.
// Lazy matching is at least 30% slower, with 1.5% increase.
{4, 0, 8, 4, 4, 3},
{4, 0, 12, 6, 5, 4},
{6, 0, 24, 16, 6, 5},
{8, 0, 32, 32, 7, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
{4, 8, 16, 16, skipNever, 7},
{6, 16, 32, 64, skipNever, 8},
{32, 258, 258, 4096, skipNever, 9},
}
type hashid uint32
type compressor struct {
compressionLevel
w *huffmanBitWriter
bulkHasher func([]byte, []hash)
// compression algorithm
fill func(*compressor, []byte) int // copy data to window
step func(*compressor) // process window
sync bool // requesting flush
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
// If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value.
chainHead int
hashHead []hashid
hashPrev []hashid
hashOffset int
// input window: unprocessed data is window[index:windowEnd]
index int
window []byte
windowEnd int
blockStart int // window index where current tokens start
byteAvailable bool // if true, still need to process window[index-1].
// queued output tokens
tokens tokens
// deflate state
length int
offset int
hash hash
maxInsertIndex int
err error
ii uint16 // position of last match, intended to overflow to reset.
snap snappyEnc
hashMatch [maxMatchLength + minMatchLength]hash
}
type hash int32
func (d *compressor) fillDeflate(b []byte) int {
if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
copy(d.window, d.window[windowSize:2*windowSize])
d.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
d.blockStart -= windowSize
} else {
d.blockStart = math.MaxInt32
}
d.hashOffset += windowSize
if d.hashOffset > maxHashOffset {
delta := d.hashOffset - 1
d.hashOffset -= delta
d.chainHead -= delta
for i, v := range d.hashPrev {
if int(v) > delta {
d.hashPrev[i] = hashid(int(v) - delta)
} else {
d.hashPrev[i] = 0
}
}
for i, v := range d.hashHead {
if int(v) > delta {
d.hashHead[i] = hashid(int(v) - delta)
} else {
d.hashHead[i] = 0
}
}
}
}
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
if index > 0 || eof {
var window []byte
if d.blockStart <= index {
window = d.window[d.blockStart:index]
}
d.blockStart = index
d.w.writeBlock(tok, eof, window)
return d.w.err
}
return nil
}
// writeBlockSkip writes the current block and uses the number of tokens
// to determine if the block should be stored on no matches, or
// only huffman encoded.
func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
if index > 0 || eof {
if d.blockStart <= index {
window := d.window[d.blockStart:index]
// If we removed less than a 64th of all literals
// we huffman compress the block.
if tok.n > len(window)-(tok.n>>6) {
d.w.writeBlockHuff(eof, window)
} else {
// Write a dynamic huffman block.
d.w.writeBlockDynamic(tok, eof, window)
}
} else {
d.w.writeBlock(tok, eof, nil)
}
d.blockStart = index
return d.w.err
}
return nil
}
// fillWindow will fill the current window with the supplied
// dictionary and calculate all hashes.
// This is much faster than doing a full encode.
// Should only be used after a start/reset.
func (d *compressor) fillWindow(b []byte) {
// Do not fill window if we are in store-only mode,
// use constant or Snappy compression.
switch d.compressionLevel.level {
case 0, 1, 2:
return
}
// If we are given too much, cut it.
if len(b) > windowSize {
b = b[len(b)-windowSize:]
}
// Add all to window.
n := copy(d.window[d.windowEnd:], b)
// Calculate 256 hashes at the time (more L1 cache hits)
loops := (n + 256 - minMatchLength) / 256
for j := 0; j < loops; j++ {
startindex := j * 256
end := startindex + 256 + minMatchLength - 1
if end > n {
end = n
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize <= 0 {
continue
}
dst := d.hashMatch[:dstSize]
d.bulkHasher(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
// Update window information.
d.windowEnd += n
d.index = n
}
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
}
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
nice := len(win) - pos
if d.nice < nice {
nice = d.nice
}
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
length = prevLength
if length >= d.good {
tries >>= 2
}
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:], wPos, minMatchLook)
if n > length && (n > minMatchLength || pos-i <= 4096) {
length = n
offset = pos - i
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
if i == minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.hashPrev[i&windowMask]) - d.hashOffset
if i < minIndex || i < 0 {
break
}
}
return
}
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
}
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
nice := len(win) - pos
if d.nice < nice {
nice = d.nice
}
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
length = prevLength
if length >= d.good {
tries >>= 2
}
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLenSSE4(win[i:], wPos, minMatchLook)
if n > length && (n > minMatchLength || pos-i <= 4096) {
length = n
offset = pos - i
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
if i == minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.hashPrev[i&windowMask]) - d.hashOffset
if i < minIndex || i < 0 {
break
}
}
return
}
func (d *compressor) writeStoredBlock(buf []byte) error {
if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
return d.w.err
}
d.w.writeBytes(buf)
return d.w.err
}
// oldHash is the hash function used when no native crc32 calculation
// or similar is present.
func oldHash(b []byte) hash {
return hash(b[0])<<(hashShift*3) + hash(b[1])<<(hashShift*2) + hash(b[2])<<hashShift + hash(b[3])
}
// oldBulkHash will compute hashes using the same
// algorithm as oldHash
func oldBulkHash(b []byte, dst []hash) {
if len(b) < minMatchLength {
return
}
h := oldHash(b)
dst[0] = h
i := 1
end := len(b) - minMatchLength + 1
for ; i < end; i++ {
h = (h << hashShift) + hash(b[i+3])
dst[i] = h
}
}
// matchLen returns the number of matching bytes in a and b
// up to length 'max'. Both slices must be at least 'max'
// bytes in size.
func matchLen(a, b []byte, max int) int {
a = a[:max]
for i, av := range a {
if b[i] != av {
return i
}
}
return max
}
func (d *compressor) initDeflate() {
d.hashHead = make([]hashid, hashSize)
d.hashPrev = make([]hashid, windowSize)
d.window = make([]byte, 2*windowSize)
d.hashOffset = 1
d.tokens.tokens = make([]token, maxFlateBlockTokens+1)
d.length = minMatchLength - 1
d.offset = 0
d.byteAvailable = false
d.index = 0
d.hash = 0
d.chainHead = -1
d.bulkHasher = oldBulkHash
if useSSE42 {
d.bulkHasher = crc32sseAll
}
}
// Assumes that d.fastSkipHashing != skipNever,
// otherwise use deflateLazy
func (d *compressor) deflate() {
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if d.index < d.maxInsertIndex {
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
}
for {
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
if d.tokens.n > 0 {
if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
return
}
}
if d.index < d.maxInsertIndex {
// Update the hash
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
ch := d.hashHead[d.hash]
d.chainHead = int(ch)
d.hashPrev[d.index&windowMask] = ch
d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
}
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if d.length >= minMatchLength {
d.ii = 0
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
// "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
if d.length <= d.fastSkipHashing {
var newIndex int
newIndex = d.index + d.length
// Calculate missing hashes
end := newIndex
if end > d.maxInsertIndex {
end = d.maxInsertIndex
}
end += minMatchLength - 1
startindex := d.index + 1
if startindex > d.maxInsertIndex {
startindex = d.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := d.hashMatch[:dstSize]
oldBulkHash(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
d.index = newIndex
} else {
// For matches this long, we don't bother inserting each individual
// item into the table.
d.index += d.length
if d.index < d.maxInsertIndex {
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
}
}
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
d.ii++
end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
if end > d.windowEnd {
end = d.windowEnd
}
for i := d.index; i < end; i++ {
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
d.index = end
}
}
}
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func (d *compressor) deflateLazy() {
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if d.index < d.maxInsertIndex {
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
}
for {
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
d.byteAvailable = false
}
if d.tokens.n > 0 {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
return
}
}
if d.index < d.maxInsertIndex {
// Update the hash
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
ch := d.hashHead[d.hash]
d.chainHead = int(ch)
d.hashPrev[d.index&windowMask] = ch
d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
}
prevLength := d.length
prevOffset := d.offset
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if prevLength >= minMatchLength && d.length <= prevLength {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
var newIndex int
newIndex = d.index + prevLength - 1
// Calculate missing hashes
end := newIndex
if end > d.maxInsertIndex {
end = d.maxInsertIndex
}
end += minMatchLength - 1
startindex := d.index + 1
if startindex > d.maxInsertIndex {
startindex = d.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := d.hashMatch[:dstSize]
oldBulkHash(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
d.index = newIndex
d.byteAvailable = false
d.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
// Reset, if we got a match this run.
if d.length >= minMatchLength {
d.ii = 0
}
// We have a byte waiting. Emit it.
if d.byteAvailable {
d.ii++
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
d.index++
// If we have a long run of no matches, skip additional bytes
// Resets when d.ii overflows after 64KB.
if d.ii > 31 {
n := int(d.ii >> 6)
for j := 0; j < n; j++ {
if d.index >= d.windowEnd-1 {
break
}
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
d.index++
}
// Flush last byte
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
d.byteAvailable = false
// d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
} else {
d.index++
d.byteAvailable = true
}
}
}
}
// Assumes that d.fastSkipHashing != skipNever,
// otherwise use deflateLazySSE
func (d *compressor) deflateSSE() {
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if d.index < d.maxInsertIndex {
d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
}
for {
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
if d.tokens.n > 0 {
if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
return
}
}
if d.index < d.maxInsertIndex {
// Update the hash
d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
ch := d.hashHead[d.hash]
d.chainHead = int(ch)
d.hashPrev[d.index&windowMask] = ch
d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
}
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if d.length >= minMatchLength {
d.ii = 0
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
// "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
if d.length <= d.fastSkipHashing {
var newIndex int
newIndex = d.index + d.length
// Calculate missing hashes
end := newIndex
if end > d.maxInsertIndex {
end = d.maxInsertIndex
}
end += minMatchLength - 1
startindex := d.index + 1
if startindex > d.maxInsertIndex {
startindex = d.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := d.hashMatch[:dstSize]
crc32sseAll(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
d.index = newIndex
} else {
// For matches this long, we don't bother inserting each individual
// item into the table.
d.index += d.length
if d.index < d.maxInsertIndex {
d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
}
}
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
d.ii++
end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
if end > d.windowEnd {
end = d.windowEnd
}
for i := d.index; i < end; i++ {
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
d.index = end
}
}
}
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func (d *compressor) deflateLazySSE() {
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = false
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if d.index < d.maxInsertIndex {
d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
}
for {
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
d.byteAvailable = false
}
if d.tokens.n > 0 {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
return
}
}
if d.index < d.maxInsertIndex {
// Update the hash
d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
ch := d.hashHead[d.hash]
d.chainHead = int(ch)
d.hashPrev[d.index&windowMask] = ch
d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
}
prevLength := d.length
prevOffset := d.offset
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if prevLength >= minMatchLength && d.length <= prevLength {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
d.tokens.n++
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
var newIndex int
newIndex = d.index + prevLength - 1
// Calculate missing hashes
end := newIndex
if end > d.maxInsertIndex {
end = d.maxInsertIndex
}
end += minMatchLength - 1
startindex := d.index + 1
if startindex > d.maxInsertIndex {
startindex = d.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := d.hashMatch[:dstSize]
crc32sseAll(tocheck, dst)
var newH hash
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = d.hashHead[newH]
// Set the head of the hash chain to us.
d.hashHead[newH] = hashid(di + d.hashOffset)
}
d.hash = newH
}
d.index = newIndex
d.byteAvailable = false
d.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
} else {
// Reset, if we got a match this run.
if d.length >= minMatchLength {
d.ii = 0
}
// We have a byte waiting. Emit it.
if d.byteAvailable {
d.ii++
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
d.index++
// If we have a long run of no matches, skip additional bytes
// Resets when d.ii overflows after 64KB.
if d.ii > 31 {
n := int(d.ii >> 6)
for j := 0; j < n; j++ {
if d.index >= d.windowEnd-1 {
break
}
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
d.index++
}
// Flush last byte
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
d.tokens.n++
d.byteAvailable = false
// d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
return
}
d.tokens.n = 0
}
}
} else {
d.index++
d.byteAvailable = true
}
}
}
}
func (d *compressor) fillStore(b []byte) int {
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
func (d *compressor) store() {
if d.windowEnd > 0 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
}
d.windowEnd = 0
}
// fillHuff will fill the buffer with data for huffman-only compression.
// The number of bytes copied is returned.
func (d *compressor) fillHuff(b []byte) int {
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
// storeHuff will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeHuff() {
// We only compress if we have maxStoreBlockSize or we are at end-of-stream
if d.windowEnd < maxStoreBlockSize && !d.sync {
return
}
if d.windowEnd == 0 {
return
}
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
d.err = d.w.err
d.windowEnd = 0
}
// storeHuff will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeSnappy() {
// We only compress if we have maxStoreBlockSize.
if d.windowEnd < maxStoreBlockSize {
if !d.sync {
return
}
// Handle extremely small sizes.
if d.windowEnd < 128 {
if d.windowEnd == 0 {
return
}
if d.windowEnd <= 32 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
d.tokens.n = 0
d.windowEnd = 0
} else {
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
d.err = d.w.err
}
d.tokens.n = 0
d.windowEnd = 0
return
}
}
d.snap.Encode(&d.tokens, d.window[:d.windowEnd])
// If we made zero matches, store the block as is.
if d.tokens.n == d.windowEnd {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
// If we removed less than 1/16th, huffman compress the block.
} else if d.tokens.n > d.windowEnd-(d.windowEnd>>4) {
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
d.err = d.w.err
} else {
d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
d.err = d.w.err
}
d.tokens.n = 0
d.windowEnd = 0
}
// write will add input byte to the stream.
// Unless an error occurs all bytes will be consumed.
func (d *compressor) write(b []byte) (n int, err error) {
if d.err != nil {
return 0, d.err
}
n = len(b)
for len(b) > 0 {
d.step(d)
b = b[d.fill(d, b):]
if d.err != nil {
return 0, d.err
}
}
return n, d.err
}
func (d *compressor) syncFlush() error {
d.sync = true
if d.err != nil {
return d.err
}
d.step(d)
if d.err == nil {
d.w.writeStoredHeader(0, false)
d.w.flush()
d.err = d.w.err
}
d.sync = false
return d.err
}
func (d *compressor) init(w io.Writer, level int) (err error) {
d.w = newHuffmanBitWriter(w)
switch {
case level == NoCompression:
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillStore
d.step = (*compressor).store
case level == ConstantCompression:
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillHuff
d.step = (*compressor).storeHuff
case level >= 1 && level <= 3:
d.snap = newSnappy(level)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillHuff
d.step = (*compressor).storeSnappy
d.tokens.tokens = make([]token, maxStoreBlockSize+1)
case level == DefaultCompression:
level = 5
fallthrough
case 4 <= level && level <= 9:
d.compressionLevel = levels[level]
d.initDeflate()
d.fill = (*compressor).fillDeflate
if d.fastSkipHashing == skipNever {
if useSSE42 {
d.step = (*compressor).deflateLazySSE
} else {
d.step = (*compressor).deflateLazy
}
} else {
if useSSE42 {
d.step = (*compressor).deflateSSE
} else {
d.step = (*compressor).deflate
}
}
default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
}
return nil
}
// Used for zeroing the hash slice
var hzeroes [256]hashid
// reset the state of the compressor.
func (d *compressor) reset(w io.Writer) {
d.w.reset(w)
d.sync = false
d.err = nil
// We only need to reset a few things for Snappy.
if d.snap != nil {
d.snap.Reset()
d.windowEnd = 0
d.tokens.n = 0
return
}
switch d.compressionLevel.chain {
case 0:
// level was NoCompression or ConstantCompresssion.
d.windowEnd = 0
default:
d.chainHead = -1
for s := d.hashHead; len(s) > 0; {
n := copy(s, hzeroes[:])
s = s[n:]
}
for s := d.hashPrev; len(s) > 0; s = s[len(hzeroes):] {
copy(s, hzeroes[:])
}
d.hashOffset = 1
d.index, d.windowEnd = 0, 0
d.blockStart, d.byteAvailable = 0, false
d.tokens.n = 0
d.length = minMatchLength - 1
d.offset = 0
d.hash = 0
d.ii = 0
d.maxInsertIndex = 0
}
}
func (d *compressor) close() error {
if d.err != nil {
return d.err
}
d.sync = true
d.step(d)
if d.err != nil {
return d.err
}
if d.w.writeStoredHeader(0, true); d.w.err != nil {
return d.w.err
}
d.w.flush()
return d.w.err
}
// NewWriter returns a new Writer compressing data at the given level.
// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
// higher levels typically run slower but compress more.
// Level 0 (NoCompression) does not attempt any compression; it only adds the
// necessary DEFLATE framing.
// Level -1 (DefaultCompression) uses the default compression level.
// Level -2 (ConstantCompression) will use Huffman compression only, giving
// a very fast compression for all types of input, but sacrificing considerable
// compression efficiency.
//
// If level is in the range [-2, 9] then the error returned will be nil.
// Otherwise the error returned will be non-nil.
func NewWriter(w io.Writer, level int) (*Writer, error) {
var dw Writer
if err := dw.d.init(w, level); err != nil {
return nil, err
}
return &dw, nil
}
// NewWriterDict is like NewWriter but initializes the new
// Writer with a preset dictionary. The returned Writer behaves
// as if the dictionary had been written to it without producing
// any compressed output. The compressed data written to w
// can only be decompressed by a Reader initialized with the
// same dictionary.
func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
dw := &dictWriter{w}
zw, err := NewWriter(dw, level)
if err != nil {
return nil, err
}
zw.d.fillWindow(dict)
zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
return zw, err
}
type dictWriter struct {
w io.Writer
}
func (w *dictWriter) Write(b []byte) (n int, err error) {
return w.w.Write(b)
}
// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see NewWriter).
type Writer struct {
d compressor
dict []byte
}
// Write writes data to w, which will eventually write the
// compressed form of data to its underlying writer.
func (w *Writer) Write(data []byte) (n int, err error) {
return w.d.write(data)
}
// Flush flushes any pending compressed data to the underlying writer.
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet.
// Flush does not return until the data has been written.
// If the underlying writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func (w *Writer) Flush() error {
// For more about flushing:
// http://www.bolet.org/~pornin/deflate-flush.html
return w.d.syncFlush()
}
// Close flushes and closes the writer.
func (w *Writer) Close() error {
return w.d.close()
}
// Reset discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level and dictionary.
func (w *Writer) Reset(dst io.Writer) {
if dw, ok := w.d.w.w.(*dictWriter); ok {
// w was created with NewWriterDict
dw.w = dst
w.d.reset(dw)
w.d.fillWindow(w.dict)
} else {
// w was created with NewWriter
w.d.reset(dst)
}
}
// ResetDict discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level, but sets a specific dictionary.
func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
w.dict = dict
w.d.reset(dst)
w.d.fillWindow(w.dict)
}
|
package storage
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/pkg/errors"
log "gopkg.in/inconshreveable/log15.v2"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxd/backup"
"github.com/lxc/lxd/lxd/cluster/request"
"github.com/lxc/lxd/lxd/db"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/lifecycle"
"github.com/lxc/lxd/lxd/locking"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/lxd/storage/filesystem"
"github.com/lxc/lxd/lxd/storage/memorypipe"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/logging"
)
var unavailablePools = make(map[string]struct{})
var unavailablePoolsMu = sync.Mutex{}
type lxdBackend struct {
driver drivers.Driver
id int64
db api.StoragePool
name string
state *state.State
logger logger.Logger
nodes map[int64]db.StoragePoolNode
}
// ID returns the storage pool ID.
func (b *lxdBackend) ID() int64 {
return b.id
}
// Name returns the storage pool name.
func (b *lxdBackend) Name() string {
return b.name
}
// Description returns the storage pool description.
func (b *lxdBackend) Description() string {
return b.db.Description
}
// Status returns the storage pool status.
func (b *lxdBackend) Status() string {
return b.db.Status
}
// LocalStatus returns storage pool status of the local cluster member.
func (b *lxdBackend) LocalStatus() string {
unavailablePoolsMu.Lock()
defer unavailablePoolsMu.Unlock()
// Check if pool is unavailable locally and replace status if so.
// But don't modify b.db.Status as the status may be recovered later so we don't want to persist it here.
if _, found := unavailablePools[b.name]; found {
return api.StoragePoolStatusUnvailable
}
node, exists := b.nodes[b.state.Cluster.GetNodeID()]
if !exists {
return api.StoragePoolStatusUnknown
}
return db.StoragePoolStateToAPIStatus(node.State)
}
// ToAPI returns the storage pool as an API representation.
func (b *lxdBackend) ToAPI() api.StoragePool {
return b.db
}
// Driver returns the storage pool driver.
func (b *lxdBackend) Driver() drivers.Driver {
return b.driver
}
// MigrationTypes returns the migration transport method preferred when sending a migration,
// based on the migration method requested by the driver's ability.
func (b *lxdBackend) MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type {
return b.driver.MigrationTypes(contentType, refresh)
}
// Create creates the storage pool layout on the storage device.
// localOnly is used for clustering where only a single node should do remote storage setup.
func (b *lxdBackend) Create(clientType request.ClientType, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"config": b.db.Config, "description": b.db.Description, "clientType": clientType})
logger.Debug("create started")
defer logger.Debug("create finished")
revert := revert.New()
defer revert.Fail()
path := drivers.GetPoolMountPath(b.name)
if shared.IsDir(path) {
return fmt.Errorf("Storage pool directory %q already exists", path)
}
// Create the storage path.
err := os.MkdirAll(path, 0711)
if err != nil {
return errors.Wrapf(err, "Failed to create storage pool directory %q", path)
}
revert.Add(func() { os.RemoveAll(path) })
if b.driver.Info().Remote && clientType != request.ClientTypeNormal {
if !b.driver.Info().MountedRoot {
// Create the directory structure.
err = b.createStorageStructure(path)
if err != nil {
return err
}
}
// Dealing with a remote storage pool, we're done now.
revert.Success()
return nil
}
// Validate config.
err = b.driver.Validate(b.db.Config)
if err != nil {
return err
}
// Create the storage pool on the storage device.
err = b.driver.Create()
if err != nil {
return err
}
// Mount the storage pool.
ourMount, err := b.driver.Mount()
if err != nil {
return err
}
// We expect the caller of create to mount the pool if needed, so we should unmount after
// storage struct has been created.
if ourMount {
defer b.driver.Unmount()
}
// Create the directory structure.
err = b.createStorageStructure(path)
if err != nil {
return err
}
revert.Success()
return nil
}
// GetVolume returns a drivers.Volume containing copies of the supplied volume config and the pools config,
func (b *lxdBackend) GetVolume(volType drivers.VolumeType, contentType drivers.ContentType, volName string, volConfig map[string]string) drivers.Volume {
// Copy the config map to avoid internal modifications affecting external state.
newConfig := map[string]string{}
for k, v := range volConfig {
newConfig[k] = v
}
// Copy the pool config map to avoid internal modifications affecting external state.
newPoolConfig := map[string]string{}
for k, v := range b.db.Config {
newPoolConfig[k] = v
}
return drivers.NewVolume(b.driver, b.name, volType, contentType, volName, newConfig, newPoolConfig)
}
// GetResources returns utilisation information about the pool.
func (b *lxdBackend) GetResources() (*api.ResourcesStoragePool, error) {
logger := logging.AddContext(b.logger, nil)
logger.Debug("GetResources started")
defer logger.Debug("GetResources finished")
return b.driver.GetResources()
}
// IsUsed returns whether the storage pool is used by any volumes or profiles (excluding image volumes).
func (b *lxdBackend) IsUsed() (bool, error) {
// Get all users of the storage pool.
var err error
poolUsedBy := []string{}
err = b.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
poolUsedBy, err = tx.GetStoragePoolUsedBy(b.name, false)
return err
})
if err != nil {
return false, err
}
for _, entry := range poolUsedBy {
// Images are never considered a user of the pool.
if strings.HasPrefix(entry, "/1.0/images/") {
continue
}
return true, nil
}
return false, nil
}
// Update updates the pool config.
func (b *lxdBackend) Update(clientType request.ClientType, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"newDesc": newDesc, "newConfig": newConfig})
logger.Debug("Update started")
defer logger.Debug("Update finished")
// Validate config.
err := b.driver.Validate(newConfig)
if err != nil {
return err
}
// Diff the configurations.
changedConfig, userOnly := b.detectChangedConfig(b.db.Config, newConfig)
// Check if the pool source is being changed that the local state is still pending, otherwise prevent it.
_, sourceChanged := changedConfig["source"]
if sourceChanged && b.LocalStatus() != api.StoragePoolStatusPending {
return fmt.Errorf("Pool source cannot be changed when not in pending state")
}
// Apply changes to local node if both global pool and node are not pending and non-user config changed.
// Otherwise just apply changes to DB (below) ready for the actual global create request to be initiated.
if len(changedConfig) > 0 && b.Status() != api.StoragePoolStatusPending && b.LocalStatus() != api.StoragePoolStatusPending && !userOnly {
err = b.driver.Update(changedConfig)
if err != nil {
return err
}
}
// Update the database if something changed and we're in ClientTypeNormal mode.
if clientType == request.ClientTypeNormal && (len(changedConfig) > 0 || newDesc != b.db.Description) {
err = b.state.Cluster.UpdateStoragePool(b.name, newDesc, newConfig)
if err != nil {
return err
}
}
return nil
}
// Delete removes the pool.
func (b *lxdBackend) Delete(clientType request.ClientType, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"clientType": clientType})
logger.Debug("Delete started")
defer logger.Debug("Delete finished")
// If completely gone, just return
path := shared.VarPath("storage-pools", b.name)
if !shared.PathExists(path) {
return nil
}
if clientType != request.ClientTypeNormal && b.driver.Info().Remote {
if b.driver.Info().MountedRoot {
_, err := b.driver.Unmount()
if err != nil {
return err
}
} else {
// Remote storage may have leftover entries caused by
// volumes that were moved or delete while a particular system was offline.
err := os.RemoveAll(path)
if err != nil {
return err
}
}
} else {
// Remove any left over image volumes.
// This can occur during partial image unpack or if the storage pool has been recovered from an
// instace backup file and the image volume DB records were not restored.
// If non-image volumes exist, we don't delete the, even if they can then prevent the storage pool
// from being deleted, because they should not exist by this point and we don't want to end up
// removing an instance or custom volume accidentally.
// Errors listing volumes are ignored, as we should still try and delete the storage pool.
vols, _ := b.driver.ListVolumes()
for _, vol := range vols {
if vol.Type() == drivers.VolumeTypeImage {
err := b.driver.DeleteVolume(vol, op)
if err != nil {
return errors.Wrapf(err, "Failed deleting left over image volume %q (%s)", vol.Name(), vol.ContentType())
}
logger.Warn("Deleted left over image volume", log.Ctx{"volName": vol.Name(), "contentType": vol.ContentType()})
}
}
// Delete the low-level storage.
err := b.driver.Delete(op)
if err != nil {
return err
}
}
// Delete the mountpoint.
err := os.Remove(path)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove directory %q", path)
}
return nil
}
// Mount mounts the storage pool.
func (b *lxdBackend) Mount() (bool, error) {
logger := logging.AddContext(b.logger, nil)
logger.Debug("Mount started")
defer logger.Debug("Mount finished")
revert := revert.New()
defer revert.Fail()
revert.Add(func() {
unavailablePoolsMu.Lock()
unavailablePools[b.Name()] = struct{}{}
unavailablePoolsMu.Unlock()
})
path := drivers.GetPoolMountPath(b.name)
// Create the storage path if needed.
if !shared.IsDir(path) {
err := os.MkdirAll(path, 0711)
if err != nil {
return false, fmt.Errorf("Failed to create storage pool directory %q: %w", path, err)
}
}
ourMount, err := b.driver.Mount()
if err != nil {
return false, err
}
if ourMount {
revert.Add(func() { b.Unmount() })
}
// Create the directory structure (if needed) after mounted.
err = b.createStorageStructure(path)
if err != nil {
return false, err
}
revert.Success()
// Ensure pool is marked as available now its mounted.
unavailablePoolsMu.Lock()
delete(unavailablePools, b.Name())
unavailablePoolsMu.Unlock()
return ourMount, nil
}
// Unmount unmounts the storage pool.
func (b *lxdBackend) Unmount() (bool, error) {
logger := logging.AddContext(b.logger, nil)
logger.Debug("Unmount started")
defer logger.Debug("Unmount finished")
return b.driver.Unmount()
}
// ApplyPatch runs the requested patch at both backend and driver level.
func (b *lxdBackend) ApplyPatch(name string) error {
// Run early backend patches.
patch, ok := lxdEarlyPatches[name]
if ok {
err := patch(b)
if err != nil {
return err
}
}
// Run the driver patch itself.
err := b.driver.ApplyPatch(name)
if err != nil {
return err
}
// Run late backend patches.
patch, ok = lxdLatePatches[name]
if ok {
err := patch(b)
if err != nil {
return err
}
}
return nil
}
// ensureInstanceSymlink creates a symlink in the instance directory to the instance's mount path
// if doesn't exist already.
func (b *lxdBackend) ensureInstanceSymlink(instanceType instancetype.Type, projectName string, instanceName string, mountPath string) error {
if shared.IsSnapshot(instanceName) {
return fmt.Errorf("Instance must not be snapshot")
}
symlinkPath := InstancePath(instanceType, projectName, instanceName, false)
// Remove any old symlinks left over by previous bugs that may point to a different pool.
if shared.PathExists(symlinkPath) {
err := os.Remove(symlinkPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove symlink %q", symlinkPath)
}
}
// Create new symlink.
err := os.Symlink(mountPath, symlinkPath)
if err != nil {
return errors.Wrapf(err, "Failed to create symlink from %q to %q", mountPath, symlinkPath)
}
return nil
}
// removeInstanceSymlink removes a symlink in the instance directory to the instance's mount path.
func (b *lxdBackend) removeInstanceSymlink(instanceType instancetype.Type, projectName string, instanceName string) error {
symlinkPath := InstancePath(instanceType, projectName, instanceName, false)
if shared.PathExists(symlinkPath) {
err := os.Remove(symlinkPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove symlink %q", symlinkPath)
}
}
return nil
}
// ensureInstanceSnapshotSymlink creates a symlink in the snapshot directory to the instance's
// snapshot path if doesn't exist already.
func (b *lxdBackend) ensureInstanceSnapshotSymlink(instanceType instancetype.Type, projectName string, instanceName string) error {
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(instanceType)
if err != nil {
return err
}
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(instanceName)
snapshotSymlink := InstancePath(instanceType, projectName, parentName, true)
volStorageName := project.Instance(projectName, parentName)
snapshotTargetPath := drivers.GetVolumeSnapshotDir(b.name, volType, volStorageName)
// Remove any old symlinks left over by previous bugs that may point to a different pool.
if shared.PathExists(snapshotSymlink) {
err = os.Remove(snapshotSymlink)
if err != nil {
return errors.Wrapf(err, "Failed to remove symlink %q", snapshotSymlink)
}
}
// Create new symlink.
err = os.Symlink(snapshotTargetPath, snapshotSymlink)
if err != nil {
return errors.Wrapf(err, "Failed to create symlink from %q to %q", snapshotTargetPath, snapshotSymlink)
}
return nil
}
// removeInstanceSnapshotSymlinkIfUnused removes the symlink in the snapshot directory to the
// instance's snapshot path if the snapshot path is missing. It is expected that the driver will
// remove the instance's snapshot path after the last snapshot is removed or the volume is deleted.
func (b *lxdBackend) removeInstanceSnapshotSymlinkIfUnused(instanceType instancetype.Type, projectName string, instanceName string) error {
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(instanceType)
if err != nil {
return err
}
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(instanceName)
snapshotSymlink := InstancePath(instanceType, projectName, parentName, true)
volStorageName := project.Instance(projectName, parentName)
snapshotTargetPath := drivers.GetVolumeSnapshotDir(b.name, volType, volStorageName)
// If snapshot parent directory doesn't exist, remove symlink.
if !shared.PathExists(snapshotTargetPath) {
if shared.PathExists(snapshotSymlink) {
err := os.Remove(snapshotSymlink)
if err != nil {
return errors.Wrapf(err, "Failed to remove symlink %q", snapshotSymlink)
}
}
}
return nil
}
// instanceRootVolumeConfig returns the instance's root volume config.
func (b *lxdBackend) instanceRootVolumeConfig(inst instance.Instance) (map[string]string, error) {
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return nil, err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return nil, err
}
// Get volume config.
_, vol, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return nil, errors.Wrapf(err, "Volume doesn't exist for %q on pool %q", project.Instance(inst.Project(), inst.Name()), b.Name())
}
return nil, err
}
// Get the root disk device config.
_, rootDiskConf, err := shared.GetRootDiskDevice(inst.ExpandedDevices().CloneNative())
if err != nil {
return nil, err
}
// Override size property from instance root device config.
if rootDiskConf["size"] != "" {
vol.Config["size"] = rootDiskConf["size"]
}
if rootDiskConf["size.state"] != "" {
vol.Config["size.state"] = rootDiskConf["size.state"]
}
return vol.Config, nil
}
// FillInstanceConfig populates the supplied instance volume config map with any defaults based on the storage
// pool and instance type being used.
func (b *lxdBackend) FillInstanceConfig(inst instance.Instance, config map[string]string) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("FillInstanceConfig started")
defer logger.Debug("FillInstanceConfig finished")
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Fill default config in volume (creates internal copy of supplied config and modifies that).
vol := b.GetVolume(volType, contentType, volStorageName, config)
err = b.driver.FillVolumeConfig(vol)
if err != nil {
return err
}
// Copy filled volume config back into supplied config map.
for k, v := range vol.Config() {
config[k] = v
}
return nil
}
// CreateInstance creates an empty instance.
func (b *lxdBackend) CreateInstance(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("CreateInstance started")
defer logger.Debug("CreateInstance finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
revert := true
defer func() {
if !revert {
return
}
b.DeleteInstance(inst, op)
}()
contentType := InstanceContentType(inst)
// Find the root device config for instance.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = b.driver.CreateVolume(vol, nil, op)
if err != nil {
return err
}
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
err = inst.DeferTemplateApply(instance.TemplateTriggerCreate)
if err != nil {
return err
}
revert = false
return nil
}
// CreateInstanceFromBackup restores a backup file onto the storage device. Because the backup file
// is unpacked and restored onto the storage device before the instance is created in the database
// it is necessary to return two functions; a post hook that can be run once the instance has been
// created in the database to run any storage layer finalisations, and a revert hook that can be
// run if the instance database load process fails that will remove anything created thus far.
func (b *lxdBackend) CreateInstanceFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, revert.Hook, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": srcBackup.Project, "instance": srcBackup.Name, "snapshots": srcBackup.Snapshots, "optimizedStorage": *srcBackup.OptimizedStorage})
logger.Debug("CreateInstanceFromBackup started")
defer logger.Debug("CreateInstanceFromBackup finished")
// Get the volume name on storage.
volStorageName := project.Instance(srcBackup.Project, srcBackup.Name)
// Get the instance type.
instanceType, err := instancetype.New(string(srcBackup.Type))
if err != nil {
return nil, nil, err
}
// Get the volume type.
volType, err := InstanceTypeToVolumeType(instanceType)
if err != nil {
return nil, nil, err
}
contentType := drivers.ContentTypeFS
if volType == drivers.VolumeTypeVM {
contentType = drivers.ContentTypeBlock
}
// We don't know the volume's config yet as tarball hasn't been unpacked.
// We will apply the config as part of the post hook function returned if driver needs to.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
revert := revert.New()
defer revert.Fail()
// Unpack the backup into the new storage volume(s).
volPostHook, revertHook, err := b.driver.CreateVolumeFromBackup(vol, srcBackup, srcData, op)
if err != nil {
return nil, nil, err
}
if revertHook != nil {
revert.Add(revertHook)
}
err = b.ensureInstanceSymlink(instanceType, srcBackup.Project, srcBackup.Name, vol.MountPath())
if err != nil {
return nil, nil, err
}
revert.Add(func() {
b.removeInstanceSymlink(instanceType, srcBackup.Project, srcBackup.Name)
})
if len(srcBackup.Snapshots) > 0 {
err = b.ensureInstanceSnapshotSymlink(instanceType, srcBackup.Project, srcBackup.Name)
if err != nil {
return nil, nil, err
}
revert.Add(func() {
b.removeInstanceSnapshotSymlinkIfUnused(instanceType, srcBackup.Project, srcBackup.Name)
})
}
// Update pool information in the backup.yaml file.
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
return backup.UpdateInstanceConfigStoragePool(b.state.Cluster, srcBackup, mountPath)
}, op)
if err != nil {
return nil, nil, errors.Wrapf(err, "Error updating backup file")
}
var postHook func(instance.Instance) error
// Create a post hook function that will use the instance (that will be created) to setup a new volume
// containing the instance's root disk device's config so that the driver's post hook function can access
// that config to perform any post instance creation setup.
postHook = func(inst instance.Instance) error {
logger.Debug("CreateInstanceFromBackup post hook started")
defer logger.Debug("CreateInstanceFromBackup post hook finished")
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// If the driver returned a post hook, run it now.
if volPostHook != nil {
// Initialise new volume containing root disk config supplied in instance.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = volPostHook(vol)
if err != nil {
return err
}
}
// Apply quota config from root device if its set. Should be done after driver's post hook if set
// so that any volume initialisation has been completed first.
if rootDiskConf["size"] != "" {
size := rootDiskConf["size"]
logger.Debug("Applying volume quota from root disk config", log.Ctx{"size": size})
allowUnsafeResize := false
if vol.Type() == drivers.VolumeTypeContainer {
// Enable allowUnsafeResize for container imports so that filesystem resize
// safety checks are avoided in order to allow more imports to succeed when
// otherwise the pre-resize estimated checks of resize2fs would prevent
// import. If there is truly insufficient size to complete the import the
// resize will still fail, but its OK as we will then delete the volume
// rather than leaving it in a corrupted state. We don't need to do this
// for non-container volumes (nor should we) because block volumes won't
// error if we shrink them too much, and custom volumes can be created at
// the correct size immediately and don't need a post-import resize step.
allowUnsafeResize = true
}
err = b.driver.SetVolumeQuota(vol, size, allowUnsafeResize, op)
if err != nil {
// The restored volume can end up being larger than the root disk config's size
// property due to the block boundary rounding some storage drivers use. As such
// if the restored volume is larger than the config's size and it cannot be shrunk
// to the equivalent size on the target storage driver, don't fail as the backup
// has still been restored successfully.
if errors.Cause(err) == drivers.ErrCannotBeShrunk {
logger.Warn("Could not apply volume quota from root disk config as restored volume cannot be shrunk", log.Ctx{"size": rootDiskConf["size"]})
} else {
return errors.Wrapf(err, "Failed applying volume quota to root disk")
}
}
// Apply the filesystem volume quota (only when main volume is block).
if vol.IsVMBlock() {
vmStateSize := rootDiskConf["size.state"]
// Apply default VM config filesystem size if main volume size is specified and
// no custom vmStateSize is specified. This way if the main volume size is empty
// (i.e removing quota) then this will also pass empty quota for the config
// filesystem volume as well, allowing a former quota to be removed from both
// volumes.
if vmStateSize == "" && size != "" {
vmStateSize = deviceConfig.DefaultVMBlockFilesystemSize
}
logger.Debug("Applying filesystem volume quota from root disk config", log.Ctx{"size.state": vmStateSize})
fsVol := vol.NewVMBlockFilesystemVolume()
err := b.driver.SetVolumeQuota(fsVol, vmStateSize, allowUnsafeResize, op)
if errors.Cause(err) == drivers.ErrCannotBeShrunk {
logger.Warn("Could not apply VM filesystem volume quota from root disk config as restored volume cannot be shrunk", log.Ctx{"size": rootDiskConf["size"]})
} else if err != nil {
return fmt.Errorf("Failed applying filesystem volume quota to root disk: %w", err)
}
}
}
return nil
}
revert.Success()
return postHook, revertHook, nil
}
// CreateInstanceFromCopy copies an instance volume and optionally its snapshots to new volume(s).
func (b *lxdBackend) CreateInstanceFromCopy(inst instance.Instance, src instance.Instance, snapshots bool, allowInconsistent bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name(), "snapshots": snapshots})
logger.Debug("CreateInstanceFromCopy started")
defer logger.Debug("CreateInstanceFromCopy finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
if inst.Type() != src.Type() {
return fmt.Errorf("Instance types must match")
}
if src.Type() == instancetype.VM && src.IsRunning() {
return errors.Wrap(drivers.ErrNotImplemented, "Unable to perform VM live migration")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Initialise a new volume containing the root disk config supplied in the new instance.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
if b.driver.HasVolume(vol) {
return fmt.Errorf("Cannot create volume, already exists on target storage")
}
// Setup reverter.
revert := revert.New()
defer revert.Fail()
// Get the source storage pool.
srcPool, err := GetPoolByInstance(b.state, src)
if err != nil {
return err
}
// Some driver backing stores require that running instances be frozen during copy.
if !src.IsSnapshot() && b.driver.Info().RunningCopyFreeze && src.IsRunning() && !src.IsFrozen() && !allowInconsistent {
err = src.Freeze()
if err != nil {
return err
}
defer src.Unfreeze()
// Attempt to sync the filesystem.
filesystem.SyncFS(src.RootfsPath())
}
revert.Add(func() { b.DeleteInstance(inst, op) })
if b.Name() == srcPool.Name() {
logger.Debug("CreateInstanceFromCopy same-pool mode detected")
// Get the src volume name on storage.
srcVolStorageName := project.Instance(src.Project(), src.Name())
// We don't need to use the source instance's root disk config, so set to nil.
srcVol := b.GetVolume(volType, contentType, srcVolStorageName, nil)
err = b.driver.CreateVolumeFromCopy(vol, srcVol, snapshots, op)
if err != nil {
return err
}
} else {
// We are copying volumes between storage pools so use migration system as it will
// be able to negotiate a common transfer method between pool types.
logger.Debug("CreateInstanceFromCopy cross-pool mode detected")
// If we are copying snapshots, retrieve a list of snapshots from source volume.
snapshotNames := []string{}
if snapshots {
snapshots, err := VolumeSnapshotsGet(b.state, src.Project(), srcPool.Name(), src.Name(), volDBType)
if err != nil {
return err
}
for _, snapshot := range snapshots {
_, snapShotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
snapshotNames = append(snapshotNames, snapShotName)
}
}
// Negotiate the migration type to use.
offeredTypes := srcPool.MigrationTypes(contentType, false)
offerHeader := migration.TypesToHeader(offeredTypes...)
migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, false))
if err != nil {
return fmt.Errorf("Failed to negotiate copy migration type: %v", err)
}
var srcVolumeSize int64
// For VMs, get source volume size so that target can create the volume the same size.
if src.Type() == instancetype.VM {
srcVolumeSize, err = InstanceDiskBlockSize(srcPool, src, op)
if err != nil {
return errors.Wrapf(err, "Failed getting source disk size")
}
}
ctx, cancel := context.WithCancel(context.Background())
// Use in-memory pipe pair to simulate a connection between the sender and receiver.
aEnd, bEnd := memorypipe.NewPipePair(ctx)
// Run sender and receiver in separate go routines to prevent deadlocks.
aEndErrCh := make(chan error, 1)
bEndErrCh := make(chan error, 1)
go func() {
err := srcPool.MigrateInstance(src, aEnd, &migration.VolumeSourceArgs{
Name: src.Name(),
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: true, // Do use a progress tracker on sender.
AllowInconsistent: allowInconsistent,
}, op)
if err != nil {
cancel()
}
aEndErrCh <- err
}()
go func() {
err := b.CreateInstanceFromMigration(inst, bEnd, migration.VolumeTargetArgs{
Name: inst.Name(),
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
VolumeSize: srcVolumeSize, // Block size setting override.
TrackProgress: false, // Do not use a progress tracker on receiver.
}, op)
if err != nil {
cancel()
}
bEndErrCh <- err
}()
// Capture errors from the sender and receiver from their result channels.
errs := []error{}
aEndErr := <-aEndErrCh
if aEndErr != nil {
errs = append(errs, aEndErr)
}
bEndErr := <-bEndErrCh
if bEndErr != nil {
errs = append(errs, bEndErr)
}
cancel()
if len(errs) > 0 {
return fmt.Errorf("Create instance volume from copy failed: %v", errs)
}
}
// Setup the symlinks.
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
revert.Success()
return nil
}
// RefreshCustomVolume refreshes custom volumes (and optionally snapshots) during the custom volume copy operations.
// Snapshots that are not present in the source but are in the destination are removed from the
// destination if snapshots are included in the synchronization.
func (b *lxdBackend) RefreshCustomVolume(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName, srcVolName string, srcVolOnly bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "srcProjectName": srcProjectName, "volName": volName, "desc": desc, "config": config, "srcPoolName": srcPoolName, "srcVolName": srcVolName, "srcVolOnly": srcVolOnly})
logger.Debug("RefreshCustomVolume started")
defer logger.Debug("RefreshCustomVolume finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
if srcProjectName == "" {
srcProjectName = projectName
}
// Setup the source pool backend instance.
var srcPool *lxdBackend
if b.name == srcPoolName {
srcPool = b // Source and target are in the same pool so share pool var.
} else {
// Source is in a different pool to target, so load the pool.
tmpPool, err := GetPoolByName(b.state, srcPoolName)
if err != nil {
return err
}
// Convert to lxdBackend so we can access driver.
tmpBackend, ok := tmpPool.(*lxdBackend)
if !ok {
return fmt.Errorf("Pool is not an lxdBackend")
}
srcPool = tmpBackend
}
// Check source volume exists and is custom type.
_, srcVolRow, err := b.state.Cluster.GetLocalStoragePoolVolume(srcProjectName, srcVolName, db.StoragePoolVolumeTypeCustom, srcPool.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Source volume doesn't exist")
}
return err
}
// Use the source volume's config if not supplied.
if config == nil {
config = srcVolRow.Config
}
// Use the source volume's description if not supplied.
if desc == "" {
desc = srcVolRow.Description
}
contentDBType, err := VolumeContentTypeNameToContentType(srcVolRow.ContentType)
if err != nil {
return err
}
// Get the source volume's content type.
contentType := drivers.ContentTypeFS
if contentDBType == db.StoragePoolVolumeContentTypeBlock {
contentType = drivers.ContentTypeBlock
}
storagePoolSupported := false
for _, supportedType := range b.Driver().Info().VolumeTypes {
if supportedType == drivers.VolumeTypeCustom {
storagePoolSupported = true
break
}
}
if !storagePoolSupported {
return fmt.Errorf("Storage pool does not support custom volume type")
}
// If we are copying snapshots, retrieve a list of snapshots from source volume.
snapshotNames := []string{}
srcSnapVols := []drivers.Volume{}
syncSnapshots := []db.StorageVolumeArgs{}
if !srcVolOnly {
// Detect added/deleted snapshots.
srcSnapshots, err := VolumeSnapshotsGet(srcPool.state, srcProjectName, srcPoolName, srcVolName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
destSnapshots, err := VolumeSnapshotsGet(b.state, projectName, b.Name(), volName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
var deleteSnapshots []db.StorageVolumeArgs
syncSnapshots, deleteSnapshots = syncSnapshotsVolumeGet(srcSnapshots, destSnapshots)
// Build the list of snapshots to transfer.
for _, snapshot := range syncSnapshots {
_, snapshotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
snapshotNames = append(snapshotNames, snapshotName)
snapVolStorageName := project.StorageVolume(projectName, snapshot.Name)
srcSnapVol := srcPool.GetVolume(drivers.VolumeTypeCustom, contentType, snapVolStorageName, nil)
srcSnapVols = append(srcSnapVols, srcSnapVol)
}
// Delete any snapshots that have disappeared or changed on the source.
for _, snapshot := range deleteSnapshots {
_, snapshotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
snapVolName := fmt.Sprintf("%s/%s", volName, snapshotName)
// Delete the snapshot.
err = b.DeleteCustomVolumeSnapshot(projectName, snapVolName, op)
if err != nil {
return err
}
}
}
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, config)
srcVolStorageName := project.StorageVolume(srcProjectName, srcVolName)
srcVol := srcPool.GetVolume(drivers.VolumeTypeCustom, contentType, srcVolStorageName, srcVolRow.Config)
if srcPool == b {
logger.Debug("RefreshCustomVolume same-pool mode detected")
err = b.driver.RefreshVolume(vol, srcVol, srcSnapVols, op)
if err != nil {
return err
}
// Create database entry for new storage volume snapshots.
for _, snapshot := range syncSnapshots {
_, snapshotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
err = VolumeDBCreate(b.state, b, projectName, fmt.Sprintf("%s/%s", volName, snapshotName), snapshot.Description, drivers.VolumeTypeCustom, true, snapshot.Config, snapshot.ExpiryDate, contentType)
if err != nil {
return err
}
}
} else {
logger.Debug("RefreshCustomVolume cross-pool mode detected")
// Negotiate the migration type to use.
offeredTypes := srcPool.MigrationTypes(contentType, true)
offerHeader := migration.TypesToHeader(offeredTypes...)
migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, true))
if err != nil {
return fmt.Errorf("Failed to negotiate copy migration type: %v", err)
}
var volSize int64
if contentType == drivers.ContentTypeBlock {
// Get the src volume name on storage.
srcVolStorageName := project.StorageVolume(srcProjectName, srcVolName)
srcVol := srcPool.GetVolume(drivers.VolumeTypeCustom, contentType, srcVolStorageName, srcVolRow.Config)
srcVol.MountTask(func(mountPath string, op *operations.Operation) error {
volDiskPath, err := srcPool.driver.GetVolumeDiskPath(srcVol)
if err != nil {
return err
}
volSize, err = drivers.BlockDiskSizeBytes(volDiskPath)
if err != nil {
return err
}
return nil
}, nil)
}
ctx, cancel := context.WithCancel(context.Background())
// Use in-memory pipe pair to simulate a connection between the sender and receiver.
aEnd, bEnd := memorypipe.NewPipePair(ctx)
// Run sender and receiver in separate go routines to prevent deadlocks.
aEndErrCh := make(chan error, 1)
bEndErrCh := make(chan error, 1)
go func() {
err := srcPool.MigrateCustomVolume(srcProjectName, aEnd, &migration.VolumeSourceArgs{
Name: srcVolName,
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: true, // Do use a progress tracker on sender.
ContentType: string(contentType),
}, op)
if err != nil {
cancel()
}
aEndErrCh <- err
}()
go func() {
err := b.CreateCustomVolumeFromMigration(projectName, bEnd, migration.VolumeTargetArgs{
Name: volName,
Description: desc,
Config: config,
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: false, // Do not use a progress tracker on receiver.
ContentType: string(contentType),
VolumeSize: volSize, // Block size setting override.
Refresh: true,
}, op)
if err != nil {
cancel()
}
bEndErrCh <- err
}()
// Capture errors from the sender and receiver from their result channels.
errs := []error{}
aEndErr := <-aEndErrCh
if aEndErr != nil {
aEnd.Close()
errs = append(errs, aEndErr)
}
bEndErr := <-bEndErrCh
if bEndErr != nil {
errs = append(errs, bEndErr)
}
cancel()
if len(errs) > 0 {
return fmt.Errorf("Refresh custom volume from copy failed: %v", errs)
}
}
return nil
}
// RefreshInstance synchronises one instance's volume (and optionally snapshots) over another.
// Snapshots that are not present in the source but are in the destination are removed from the
// destination if snapshots are included in the synchronisation.
func (b *lxdBackend) RefreshInstance(inst instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, allowInconsistent bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name(), "srcSnapshots": len(srcSnapshots)})
logger.Debug("RefreshInstance started")
defer logger.Debug("RefreshInstance finished")
if inst.Type() != src.Type() {
return fmt.Errorf("Instance types must match")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Initialise a new volume containing the root disk config supplied in the new instance.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
// Get the src volume name on storage.
srcVolStorageName := project.Instance(src.Project(), src.Name())
// We don't need to use the source instance's root disk config, so set to nil.
srcVol := b.GetVolume(volType, contentType, srcVolStorageName, nil)
srcSnapVols := []drivers.Volume{}
for _, snapInst := range srcSnapshots {
// Initialise a new volume containing the root disk config supplied in the
// new instance. We don't need to use the source instance's snapshot root
// disk config, so set to nil. This is because snapshots are immutable yet
// the instance and its snapshots can be transferred between pools, so using
// the data from the snapshot is incorrect.
// Get the snap volume name on storage.
snapVolStorageName := project.Instance(snapInst.Project(), snapInst.Name())
srcSnapVol := b.GetVolume(volType, contentType, snapVolStorageName, nil)
srcSnapVols = append(srcSnapVols, srcSnapVol)
}
srcPool, err := GetPoolByInstance(b.state, src)
if err != nil {
return err
}
if b.Name() == srcPool.Name() {
logger.Debug("RefreshInstance same-pool mode detected")
err = b.driver.RefreshVolume(vol, srcVol, srcSnapVols, op)
if err != nil {
return err
}
} else {
// We are copying volumes between storage pools so use migration system as it will
// be able to negotiate a common transfer method between pool types.
logger.Debug("RefreshInstance cross-pool mode detected")
// Retrieve a list of snapshots we are copying.
snapshotNames := []string{}
for _, srcSnapVol := range srcSnapVols {
_, snapShotName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapVol.Name())
snapshotNames = append(snapshotNames, snapShotName)
}
// Negotiate the migration type to use.
offeredTypes := srcPool.MigrationTypes(contentType, true)
offerHeader := migration.TypesToHeader(offeredTypes...)
migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, true))
if err != nil {
return fmt.Errorf("Failed to negotiate copy migration type: %v", err)
}
ctx, cancel := context.WithCancel(context.Background())
// Use in-memory pipe pair to simulate a connection between the sender and receiver.
aEnd, bEnd := memorypipe.NewPipePair(ctx)
// Run sender and receiver in separate go routines to prevent deadlocks.
aEndErrCh := make(chan error, 1)
bEndErrCh := make(chan error, 1)
go func() {
err := srcPool.MigrateInstance(src, aEnd, &migration.VolumeSourceArgs{
Name: src.Name(),
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: true, // Do use a progress tracker on sender.
AllowInconsistent: allowInconsistent,
}, op)
if err != nil {
cancel()
}
aEndErrCh <- err
}()
go func() {
err := b.CreateInstanceFromMigration(inst, bEnd, migration.VolumeTargetArgs{
Name: inst.Name(),
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
Refresh: true, // Indicate to receiver volume should exist.
TrackProgress: false, // Do not use a progress tracker on receiver.
}, op)
if err != nil {
cancel()
}
bEndErrCh <- err
}()
// Capture errors from the sender and receiver from their result channels.
errs := []error{}
aEndErr := <-aEndErrCh
if aEndErr != nil {
errs = append(errs, aEndErr)
}
bEndErr := <-bEndErrCh
if bEndErr != nil {
errs = append(errs, bEndErr)
}
cancel()
if len(errs) > 0 {
return fmt.Errorf("Create instance volume from copy failed: %v", errs)
}
}
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
err = inst.DeferTemplateApply(instance.TemplateTriggerCopy)
if err != nil {
return err
}
return nil
}
// imageFiller returns a function that can be used as a filler function with CreateVolume().
// The function returned will unpack the specified image archive into the specified mount path
// provided, and for VM images, a raw root block path is required to unpack the qcow2 image into.
func (b *lxdBackend) imageFiller(fingerprint string, op *operations.Operation) func(vol drivers.Volume, rootBlockPath string, allowUnsafeResize bool) (int64, error) {
return func(vol drivers.Volume, rootBlockPath string, allowUnsafeResize bool) (int64, error) {
var tracker *ioprogress.ProgressTracker
if op != nil { // Not passed when being done as part of pre-migration setup.
metadata := make(map[string]interface{})
tracker = &ioprogress.ProgressTracker{
Handler: func(percent, speed int64) {
shared.SetProgressMetadata(metadata, "create_instance_from_image_unpack", "Unpack", percent, 0, speed)
op.UpdateMetadata(metadata)
}}
}
imageFile := shared.VarPath("images", fingerprint)
return ImageUnpack(imageFile, vol, rootBlockPath, b.driver.Info().BlockBacking, b.state.OS, allowUnsafeResize, tracker)
}
}
// CreateInstanceFromImage creates a new volume for an instance populated with the image requested.
// On failure caller is expected to call DeleteInstance() to clean up.
func (b *lxdBackend) CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("CreateInstanceFromImage started")
defer logger.Debug("CreateInstanceFromImage finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
// Leave reverting on failure to caller, they are expected to call DeleteInstance().
// If the driver doesn't support optimized image volumes then create a new empty volume and
// populate it with the contents of the image archive.
if !b.driver.Info().OptimizedImages {
volFiller := drivers.VolumeFiller{
Fingerprint: fingerprint,
Fill: b.imageFiller(fingerprint, op),
}
err = b.driver.CreateVolume(vol, &volFiller, op)
if err != nil {
return err
}
} else {
// If the driver supports optimized images then ensure the optimized image volume has been created
// for the images's fingerprint and that it matches the pool's current volume settings, and if not
// recreating using the pool's current volume settings.
err = b.EnsureImage(fingerprint, op)
if err != nil {
return err
}
// Try and load existing volume config on this storage pool so we can compare filesystems if needed.
_, imgDBVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
return errors.Wrapf(err, "Failed loading image record for %q", fingerprint)
}
imgVol := b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, imgDBVol.Config)
// Derive the volume size to use for a new volume when copying from a source volume.
// Where possible (if the source volume has a volatile.rootfs.size property), it checks that the
// source volume isn't larger than the volume's "size" and the pool's "volume.size" setting.
logger.Debug("Checking volume size")
newVolSize, err := vol.ConfigSizeFromSource(imgVol)
if err != nil {
return err
}
// Set the derived size directly as the "size" property on the new volume so that it is applied.
vol.SetConfigSize(newVolSize)
logger.Debug("Set new volume size", log.Ctx{"size": newVolSize})
// Proceed to create a new volume by copying the optimized image volume.
err = b.driver.CreateVolumeFromCopy(vol, imgVol, false, op)
// If the driver returns ErrCannotBeShrunk, this means that the cached volume that the new volume
// is to be created from is larger than the requested new volume size, and cannot be shrunk.
// So we unpack the image directly into a new volume rather than use the optimized snapsot.
// This is slower but allows for individual volumes to be created from an image that are smaller
// than the pool's volume settings.
if errors.Cause(err) == drivers.ErrCannotBeShrunk {
logger.Debug("Cached image volume is larger than new volume and cannot be shrunk, creating non-optimized volume")
volFiller := drivers.VolumeFiller{
Fingerprint: fingerprint,
Fill: b.imageFiller(fingerprint, op),
}
err = b.driver.CreateVolume(vol, &volFiller, op)
if err != nil {
return err
}
} else if err != nil {
return err
}
}
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
err = inst.DeferTemplateApply(instance.TemplateTriggerCreate)
if err != nil {
return err
}
return nil
}
// CreateInstanceFromMigration receives an instance being migrated.
// The args.Name and args.Config fields are ignored and, instance properties are used instead.
func (b *lxdBackend) CreateInstanceFromMigration(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "args": args})
logger.Debug("CreateInstanceFromMigration started")
defer logger.Debug("CreateInstanceFromMigration finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
if args.Config != nil {
return fmt.Errorf("Migration VolumeTargetArgs.Config cannot be set")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Find the root device config for instance.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Override args.Name and args.Config to ensure volume is created based on instance.
args.Config = rootDiskConf
args.Name = inst.Name()
projectName := inst.Project()
// If migration header supplies a volume size, then use that as block volume size instead of pool default.
// This way if the volume being received is larger than the pool default size, the block volume created
// will still be able to accommodate it.
if args.VolumeSize > 0 && contentType == drivers.ContentTypeBlock {
b.logger.Debug("Setting volume size from offer header", log.Ctx{"size": args.VolumeSize})
args.Config["size"] = fmt.Sprintf("%d", args.VolumeSize)
} else if args.Config["size"] != "" {
b.logger.Debug("Using volume size from root disk config", log.Ctx{"size": args.Config["size"]})
}
// Get the volume name on storage.
volStorageName := project.Instance(projectName, args.Name)
vol := b.GetVolume(volType, contentType, volStorageName, args.Config)
volExists := b.driver.HasVolume(vol)
if args.Refresh && !volExists {
return fmt.Errorf("Cannot refresh volume, doesn't exist on migration target storage")
} else if !args.Refresh && volExists {
return fmt.Errorf("Cannot create volume, already exists on migration target storage")
}
var preFiller drivers.VolumeFiller
revert := true
if !args.Refresh {
defer func() {
if !revert {
return
}
b.DeleteInstance(inst, op)
}()
// If the negotiated migration method is rsync and the instance's base image is
// already on the host then setup a pre-filler that will unpack the local image
// to try and speed up the rsync of the incoming volume by avoiding the need to
// transfer the base image files too.
if args.MigrationType.FSType == migration.MigrationFSType_RSYNC {
fingerprint := inst.ExpandedConfig()["volatile.base_image"]
// Confirm that the image is present in the project.
_, _, err = b.state.Cluster.GetImage(fingerprint, db.ImageFilter{Project: &projectName})
if err != db.ErrNoSuchObject && err != nil {
return err
}
// Then make sure that the image is available locally too (not guaranteed in clusters).
local := shared.PathExists(shared.VarPath("images", fingerprint))
if err == nil && local {
logger.Debug("Using optimised migration from existing image", log.Ctx{"fingerprint": fingerprint})
// Populate the volume filler with the fingerprint and image filler
// function that can be used by the driver to pre-populate the
// volume with the contents of the image.
preFiller = drivers.VolumeFiller{
Fingerprint: fingerprint,
Fill: b.imageFiller(fingerprint, op),
}
// Ensure if the image doesn't yet exist on a driver which supports
// optimized storage, then it gets created first.
err = b.EnsureImage(preFiller.Fingerprint, op)
if err != nil {
return err
}
}
}
}
err = b.driver.CreateVolumeFromMigration(vol, conn, args, &preFiller, op)
if err != nil {
conn.Close()
return err
}
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
if len(args.Snapshots) > 0 {
err = b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
}
revert = false
return nil
}
// RenameInstance renames the instance's root volume and any snapshot volumes.
func (b *lxdBackend) RenameInstance(inst instance.Instance, newName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "newName": newName})
logger.Debug("RenameInstance started")
defer logger.Debug("RenameInstance finished")
if inst.IsSnapshot() {
return fmt.Errorf("Instance cannot be a snapshot")
}
if shared.IsSnapshot(newName) {
return fmt.Errorf("New name cannot be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
revert := revert.New()
defer revert.Fail()
// Get any snapshots the instance has in the format <instance name>/<snapshot name>.
snapshots, err := b.state.Cluster.GetInstanceSnapshotsNames(inst.Project(), inst.Name())
if err != nil {
return err
}
if len(snapshots) > 0 {
revert.Add(func() {
b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), newName)
b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), inst.Name())
})
}
// Rename each snapshot DB record to have the new parent volume prefix.
for _, srcSnapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot)
newSnapVolName := drivers.GetSnapshotVolumeName(newName, snapName)
err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), srcSnapshot, newSnapVolName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newSnapVolName, srcSnapshot, volDBType, b.ID())
})
}
// Rename the parent volume DB record.
err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), inst.Name(), newName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newName, inst.Name(), volDBType, b.ID())
})
// Rename the volume and its snapshots on the storage device.
volStorageName := project.Instance(inst.Project(), inst.Name())
newVolStorageName := project.Instance(inst.Project(), newName)
contentType := InstanceContentType(inst)
// There's no need to pass config as it's not needed when renaming a volume.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
err = b.driver.RenameVolume(vol, newVolStorageName, op)
if err != nil {
return err
}
revert.Add(func() {
// There's no need to pass config as it's not needed when renaming a volume.
newVol := b.GetVolume(volType, contentType, newVolStorageName, nil)
b.driver.RenameVolume(newVol, volStorageName, op)
})
// Remove old instance symlink and create new one.
err = b.removeInstanceSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
revert.Add(func() {
b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), drivers.GetVolumeMountPath(b.name, volType, volStorageName))
})
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), newName, drivers.GetVolumeMountPath(b.name, volType, newVolStorageName))
if err != nil {
return err
}
revert.Add(func() {
b.removeInstanceSymlink(inst.Type(), inst.Project(), newName)
})
// Remove old instance snapshot symlink and create a new one if needed.
err = b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
if len(snapshots) > 0 {
err = b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), newName)
if err != nil {
return err
}
}
revert.Success()
return nil
}
// DeleteInstance removes the instance's root volume (all snapshots need to be removed first).
func (b *lxdBackend) DeleteInstance(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("DeleteInstance started")
defer logger.Debug("DeleteInstance finished")
if inst.IsSnapshot() {
return fmt.Errorf("Instance must not be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
// Get any snapshots the instance has in the format <instance name>/<snapshot name>.
snapshots, err := b.state.Cluster.GetInstanceSnapshotsNames(inst.Project(), inst.Name())
if err != nil {
return err
}
// Check all snapshots are already removed.
if len(snapshots) > 0 {
return fmt.Errorf("Cannot remove an instance volume that has snapshots")
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
contentType := InstanceContentType(inst)
// There's no need to pass config as it's not needed when deleting a volume.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
// Delete the volume from the storage device. Must come after snapshots are removed.
// Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
logger.Debug("Deleting instance volume", log.Ctx{"volName": volStorageName})
if b.driver.HasVolume(vol) {
err = b.driver.DeleteVolume(vol, op)
if err != nil {
return errors.Wrapf(err, "Error deleting storage volume")
}
}
// Remove symlinks.
err = b.removeInstanceSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
err = b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
// Remove the volume record from the database.
err = b.state.Cluster.RemoveStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return errors.Wrapf(err, "Error deleting storage volume from database")
}
return nil
}
// UpdateInstance updates an instance volume's config.
func (b *lxdBackend) UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "newDesc": newDesc, "newConfig": newConfig})
logger.Debug("UpdateInstance started")
defer logger.Debug("UpdateInstance finished")
if inst.IsSnapshot() {
return fmt.Errorf("Instance cannot be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
volStorageName := project.Instance(inst.Project(), inst.Name())
contentType := InstanceContentType(inst)
// Validate config.
newVol := b.GetVolume(volType, contentType, volStorageName, newConfig)
err = b.driver.ValidateVolume(newVol, false)
if err != nil {
return err
}
// Get current config to compare what has changed.
_, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist for %q on pool %q", project.Instance(inst.Project(), inst.Name()), b.Name())
}
return err
}
// Apply config changes if there are any.
changedConfig, userOnly := b.detectChangedConfig(curVol.Config, newConfig)
if len(changedConfig) != 0 {
// Check that the volume's size property isn't being changed.
if changedConfig["size"] != "" {
return fmt.Errorf("Instance volume 'size' property cannot be changed")
}
// Check that the volume's block.filesystem property isn't being changed.
if changedConfig["block.filesystem"] != "" {
return fmt.Errorf("Instance volume 'block.filesystem' property cannot be changed")
}
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
curVol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
if !userOnly {
err = b.driver.UpdateVolume(curVol, changedConfig)
if err != nil {
return err
}
}
}
// Update the database if something changed.
if len(changedConfig) != 0 || newDesc != curVol.Description {
err = b.state.Cluster.UpdateStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID(), newDesc, newConfig)
if err != nil {
return err
}
}
b.state.Events.SendLifecycle(inst.Project(), lifecycle.StorageVolumeUpdated.Event(newVol, string(newVol.Type()), inst.Project(), op, nil))
return nil
}
// UpdateInstanceSnapshot updates an instance snapshot volume's description.
// Volume config is not allowed to be updated and will return an error.
func (b *lxdBackend) UpdateInstanceSnapshot(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "newDesc": newDesc, "newConfig": newConfig})
logger.Debug("UpdateInstanceSnapshot started")
defer logger.Debug("UpdateInstanceSnapshot finished")
if !inst.IsSnapshot() {
return fmt.Errorf("Instance must be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
return b.updateVolumeDescriptionOnly(inst.Project(), inst.Name(), volDBType, newDesc, newConfig, op)
}
// MigrateInstance sends an instance volume for migration.
// The args.Name field is ignored and the name of the instance is used instead.
func (b *lxdBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "args": args})
logger.Debug("MigrateInstance started")
defer logger.Debug("MigrateInstance finished")
// rsync+dd can't handle running source instances
if inst.IsRunning() && args.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return fmt.Errorf("Rsync based migration doesn't support running virtual machines")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
if len(args.Snapshots) > 0 && args.FinalSync {
return fmt.Errorf("Snapshots should not be transferred during final sync")
}
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
args.Name = inst.Name() // Override args.Name to ensure instance volume is sent.
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), args.Name)
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
// Freeze the instance only when the underlying driver doesn't support it, and allowInconsistent is not set (and it's
// not already frozen/stopped)
if !inst.IsSnapshot() && b.driver.Info().RunningCopyFreeze && inst.IsRunning() && !inst.IsFrozen() && !args.AllowInconsistent {
err = inst.Freeze()
if err != nil {
return err
}
defer inst.Unfreeze()
// Attempt to sync the filesystem.
filesystem.SyncFS(inst.RootfsPath())
}
err = b.driver.MigrateVolume(vol, conn, args, op)
if err != nil {
return err
}
return nil
}
// BackupInstance creates an instance backup.
func (b *lxdBackend) BackupInstance(inst instance.Instance, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "optimized": optimized, "snapshots": snapshots})
logger.Debug("BackupInstance started")
defer logger.Debug("BackupInstance finished")
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Ensure the backup file reflects current config.
err = b.UpdateInstanceBackupFile(inst, op)
if err != nil {
return err
}
var snapNames []string
if snapshots {
// Get snapshots in age order, oldest first, and pass names to storage driver.
instSnapshots, err := inst.Snapshots()
if err != nil {
return err
}
snapNames = make([]string, 0, len(instSnapshots))
for _, instSnapshot := range instSnapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(instSnapshot.Name())
snapNames = append(snapNames, snapName)
}
}
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = b.driver.BackupVolume(vol, tarWriter, optimized, snapNames, op)
if err != nil {
return err
}
return nil
}
// GetInstanceUsage returns the disk usage of the instance's root volume.
func (b *lxdBackend) GetInstanceUsage(inst instance.Instance) (int64, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("GetInstanceUsage started")
defer logger.Debug("GetInstanceUsage finished")
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return -1, err
}
contentType := InstanceContentType(inst)
// There's no need to pass config as it's not needed when retrieving the volume usage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, nil)
return b.driver.GetVolumeUsage(vol)
}
// SetInstanceQuota sets the quota on the instance's root volume.
// Returns ErrInUse if the instance is running and the storage driver doesn't support online resizing.
func (b *lxdBackend) SetInstanceQuota(inst instance.Instance, size string, vmStateSize string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "size": size, "vm_state_size": vmStateSize})
logger.Debug("SetInstanceQuota started")
defer logger.Debug("SetInstanceQuota finished")
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentVolume := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Apply the main volume quota.
// There's no need to pass config as it's not needed when setting quotas.
vol := b.GetVolume(volType, contentVolume, volStorageName, nil)
err = b.driver.SetVolumeQuota(vol, size, false, op)
if err != nil {
return err
}
// Apply the filesystem volume quota (only when main volume is block).
if vol.IsVMBlock() {
// Apply default VM config filesystem size if main volume size is specified and no custom
// vmStateSize is specified. This way if the main volume size is empty (i.e removing quota) then
// this will also pass empty quota for the config filesystem volume as well, allowing a former
// quota to be removed from both volumes.
if vmStateSize == "" && size != "" {
vmStateSize = deviceConfig.DefaultVMBlockFilesystemSize
}
fsVol := vol.NewVMBlockFilesystemVolume()
err := b.driver.SetVolumeQuota(fsVol, vmStateSize, false, op)
if err != nil {
return err
}
}
return nil
}
// MountInstance mounts the instance's root volume.
func (b *lxdBackend) MountInstance(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("MountInstance started")
defer logger.Debug("MountInstance finished")
revert := revert.New()
defer revert.Fail()
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return nil, err
}
// Get the root disk device config.
var rootDiskConf map[string]string
if inst.ID() > -1 {
rootDiskConf, err = b.instanceRootVolumeConfig(inst)
if err != nil {
return nil, err
}
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = b.driver.MountVolume(vol, op)
if err != nil {
return nil, err
}
revert.Add(func() { b.driver.UnmountVolume(vol, false, op) })
diskPath, err := b.getInstanceDisk(inst)
if err != nil && err != drivers.ErrNotSupported {
return nil, errors.Wrapf(err, "Failed getting disk path")
}
mountInfo := &MountInfo{
DiskPath: diskPath,
}
revert.Success() // From here on it is up to caller to call UnmountInstance() when done.
return mountInfo, nil
}
// UnmountInstance unmounts the instance's root volume.
func (b *lxdBackend) UnmountInstance(inst instance.Instance, op *operations.Operation) (bool, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("UnmountInstance started")
defer logger.Debug("UnmountInstance finished")
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return false, err
}
// Get the root disk device config.
var rootDiskConf map[string]string
if inst.ID() > -1 {
rootDiskConf, err = b.instanceRootVolumeConfig(inst)
if err != nil {
return false, err
}
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
return b.driver.UnmountVolume(vol, false, op)
}
// getInstanceDisk returns the location of the disk.
func (b *lxdBackend) getInstanceDisk(inst instance.Instance) (string, error) {
if inst.Type() != instancetype.VM {
return "", drivers.ErrNotSupported
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return "", err
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
// There's no need to pass config as it's not needed when getting the
// location of the disk block device.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
// Get the location of the disk block device.
diskPath, err := b.driver.GetVolumeDiskPath(vol)
if err != nil {
return "", err
}
return diskPath, nil
}
// CreateInstanceSnapshot creates a snaphot of an instance volume.
func (b *lxdBackend) CreateInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name()})
logger.Debug("CreateInstanceSnapshot started")
defer logger.Debug("CreateInstanceSnapshot finished")
if inst.Type() != src.Type() {
return fmt.Errorf("Instance types must match")
}
if !inst.IsSnapshot() {
return fmt.Errorf("Instance must be a snapshot")
}
if src.IsSnapshot() {
return fmt.Errorf("Source instance cannot be a snapshot")
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
// Some driver backing stores require that running instances be frozen during snapshot.
if b.driver.Info().RunningCopyFreeze && src.IsRunning() && !src.IsFrozen() {
// Freeze the processes.
err = src.Freeze()
if err != nil {
return err
}
defer src.Unfreeze()
// Attempt to sync the filesystem.
filesystem.SyncFS(src.RootfsPath())
}
isSnap := inst.IsSnapshot()
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
// There's no need to pass config as it's not needed when creating volume snapshots.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
err = b.driver.CreateVolumeSnapshot(vol, op)
if err != nil {
return err
}
err = b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
return nil
}
// RenameInstanceSnapshot renames an instance snapshot.
func (b *lxdBackend) RenameInstanceSnapshot(inst instance.Instance, newName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "newName": newName})
logger.Debug("RenameInstanceSnapshot started")
defer logger.Debug("RenameInstanceSnapshot finished")
revert := revert.New()
defer revert.Fail()
if !inst.IsSnapshot() {
return fmt.Errorf("Instance must be a snapshot")
}
if shared.IsSnapshot(newName) {
return fmt.Errorf("New name cannot be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
parentName, oldSnapshotName, isSnap := shared.InstanceGetParentAndSnapshotName(inst.Name())
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Rename storage volume snapshot. No need to pass config as it's not needed when renaming a volume.
snapVol := b.GetVolume(volType, contentType, volStorageName, nil)
err = b.driver.RenameVolumeSnapshot(snapVol, newName, op)
if err != nil {
return err
}
newVolName := drivers.GetSnapshotVolumeName(parentName, newName)
revert.Add(func() {
// Revert rename. No need to pass config as it's not needed when renaming a volume.
newSnapVol := b.GetVolume(volType, contentType, project.Instance(inst.Project(), newVolName), nil)
b.driver.RenameVolumeSnapshot(newSnapVol, oldSnapshotName, op)
})
// Rename DB volume record.
err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), inst.Name(), newVolName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
// Rename DB volume record back.
b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newVolName, inst.Name(), volDBType, b.ID())
})
// Ensure the backup file reflects current config.
err = b.UpdateInstanceBackupFile(inst, op)
if err != nil {
return err
}
revert.Success()
return nil
}
// DeleteInstanceSnapshot removes the snapshot volume for the supplied snapshot instance.
func (b *lxdBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("DeleteInstanceSnapshot started")
defer logger.Debug("DeleteInstanceSnapshot finished")
parentName, snapName, isSnap := shared.InstanceGetParentAndSnapshotName(inst.Name())
if !inst.IsSnapshot() || !isSnap {
return fmt.Errorf("Instance must be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the parent volume name on storage.
parentStorageName := project.Instance(inst.Project(), parentName)
// Delete the snapshot from the storage device.
// Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
logger.Debug("Deleting instance snapshot volume", log.Ctx{"volName": parentStorageName, "snapshotName": snapName})
snapVolName := drivers.GetSnapshotVolumeName(parentStorageName, snapName)
// There's no need to pass config as it's not needed when deleting a volume snapshot.
vol := b.GetVolume(volType, contentType, snapVolName, nil)
if b.driver.HasVolume(vol) {
err = b.driver.DeleteVolumeSnapshot(vol, op)
if err != nil {
return err
}
}
// Delete symlink if needed.
err = b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
// Remove the snapshot volume record from the database if exists.
err = b.state.Cluster.RemoveStoragePoolVolume(inst.Project(), drivers.GetSnapshotVolumeName(parentName, snapName), volDBType, b.ID())
if err != nil && err != db.ErrNoSuchObject {
return err
}
return nil
}
// RestoreInstanceSnapshot restores an instance snapshot.
func (b *lxdBackend) RestoreInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name()})
logger.Debug("RestoreInstanceSnapshot started")
defer logger.Debug("RestoreInstanceSnapshot finished")
if inst.Type() != src.Type() {
return fmt.Errorf("Instance types must match")
}
if inst.IsSnapshot() {
return fmt.Errorf("Instance must not be snapshot")
}
if !src.IsSnapshot() {
return fmt.Errorf("Source instance must be a snapshot")
}
// Target instance must not be running.
if inst.IsRunning() {
return fmt.Errorf("Instance must not be running to restore")
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Find the root device config for source snapshot instance.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
_, snapshotName, isSnap := shared.InstanceGetParentAndSnapshotName(src.Name())
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
// Use the source snapshot's rootfs config (as this will later be restored into inst too).
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = b.driver.RestoreVolume(vol, snapshotName, op)
if err != nil {
snapErr, ok := err.(drivers.ErrDeleteSnapshots)
if ok {
// We need to delete some snapshots and try again.
snaps, err := inst.Snapshots()
if err != nil {
return err
}
// Go through all the snapshots.
for _, snap := range snaps {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
if !shared.StringInSlice(snapName, snapErr.Snapshots) {
continue
}
// Delete snapshot instance if listed in the error as one that needs removing.
err := snap.Delete(true)
if err != nil {
return err
}
}
// Now try restoring again.
err = b.driver.RestoreVolume(vol, snapshotName, op)
if err != nil {
return err
}
return nil
}
return err
}
return nil
}
// MountInstanceSnapshot mounts an instance snapshot. It is mounted as read only so that the
// snapshot cannot be modified.
func (b *lxdBackend) MountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("MountInstanceSnapshot started")
defer logger.Debug("MountInstanceSnapshot finished")
if !inst.IsSnapshot() {
return nil, fmt.Errorf("Instance must be a snapshot")
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return nil, err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return nil, err
}
// Get the parent and snapshot name.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
_, err = b.driver.MountVolumeSnapshot(vol, op)
if err != nil {
return nil, err
}
diskPath, err := b.getInstanceDisk(inst)
if err != nil && err != drivers.ErrNotSupported {
return nil, errors.Wrapf(err, "Failed getting disk path")
}
mountInfo := &MountInfo{
DiskPath: diskPath,
}
return mountInfo, nil
}
// UnmountInstanceSnapshot unmounts an instance snapshot.
func (b *lxdBackend) UnmountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (bool, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("UnmountInstanceSnapshot started")
defer logger.Debug("UnmountInstanceSnapshot finished")
if !inst.IsSnapshot() {
return false, fmt.Errorf("Instance must be a snapshot")
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return false, err
}
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return false, err
}
contentType := InstanceContentType(inst)
// Get the parent and snapshot name.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
return b.driver.UnmountVolumeSnapshot(vol, op)
}
// poolBlockFilesystem returns the filesystem used for new block device filesystems.
func (b *lxdBackend) poolBlockFilesystem() string {
if b.db.Config["volume.block.filesystem"] != "" {
return b.db.Config["volume.block.filesystem"]
}
return drivers.DefaultFilesystem
}
// EnsureImage creates an optimized volume of the image if supported by the storage pool driver and the volume
// doesn't already exist. If the volume already exists then it is checked to ensure it matches the pools current
// volume settings ("volume.size" and "block.filesystem" if applicable). If not the optimized volume is removed
// and regenerated to apply the pool's current volume settings.
func (b *lxdBackend) EnsureImage(fingerprint string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"fingerprint": fingerprint})
logger.Debug("EnsureImage started")
defer logger.Debug("EnsureImage finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
if !b.driver.Info().OptimizedImages {
return nil // Nothing to do for drivers that don't support optimized images volumes.
}
// We need to lock this operation to ensure that the image is not being created multiple times.
// Uses a lock name of "EnsureImage_<fingerprint>" to avoid deadlocking with CreateVolume below that also
// establishes a lock on the volume type & name if it needs to mount the volume before filling.
unlock := locking.Lock(drivers.OperationLockName("EnsureImage", b.name, drivers.VolumeTypeImage, "", fingerprint))
defer unlock()
// Load image info from database.
_, image, err := b.state.Cluster.GetImageFromAnyProject(fingerprint)
if err != nil {
return err
}
// Derive content type from image type. Image types are not the same as instance types, so don't use
// instance type constants for comparison.
contentType := drivers.ContentTypeFS
if image.Type == "virtual-machine" {
contentType = drivers.ContentTypeBlock
}
// Try and load any existing volume config on this storage pool so we can compare filesystems if needed.
_, imgDBVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
if err != db.ErrNoSuchObject {
return err
}
}
// Create the new image volume. No config for an image volume so set to nil.
// Pool config values will be read by the underlying driver if needed.
imgVol := b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, nil)
// If an existing DB row was found, check if filesystem is the same as the current pool's filesystem.
// If not we need to delete the existing cached image volume and re-create using new filesystem.
// We need to do this for VM block images too, as they create a filesystem based config volume too.
if imgDBVol != nil {
// Add existing image volume's config to imgVol.
imgVol = b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, imgDBVol.Config)
if b.Driver().Info().BlockBacking && imgVol.Config()["block.filesystem"] != b.poolBlockFilesystem() {
logger.Debug("Filesystem of pool has changed since cached image volume created, regenerating image volume")
err = b.DeleteImage(fingerprint, op)
if err != nil {
return err
}
// Reset img volume variables as we just deleted the old one.
imgDBVol = nil
imgVol = b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, nil)
}
}
// Check if we already have a suitable volume on storage device.
if b.driver.HasVolume(imgVol) {
if imgDBVol != nil {
// Work out what size the image volume should be as if we were creating from scratch.
// This takes into account the existing volume's "volatile.rootfs.size" setting if set so
// as to avoid trying to shrink a larger image volume back to the default size when it is
// allowed to be larger than the default as the pool doesn't specify a volume.size.
logger.Debug("Checking image volume size")
newVolSize, err := imgVol.ConfigSizeFromSource(imgVol)
if err != nil {
return err
}
imgVol.SetConfigSize(newVolSize)
// Try applying the current size policy to the existing volume. If it is the same the
// driver should make no changes, and if not then attempt to resize it to the new policy.
logger.Debug("Setting image volume size", "size", imgVol.ConfigSize())
err = b.driver.SetVolumeQuota(imgVol, imgVol.ConfigSize(), false, op)
if errors.Cause(err) == drivers.ErrCannotBeShrunk || errors.Cause(err) == drivers.ErrNotSupported {
// If the driver cannot resize the existing image volume to the new policy size
// then delete the image volume and try to recreate using the new policy settings.
logger.Debug("Volume size of pool has changed since cached image volume created and cached volume cannot be resized, regenerating image volume")
err = b.DeleteImage(fingerprint, op)
if err != nil {
return err
}
// Reset img volume variables as we just deleted the old one.
imgDBVol = nil
imgVol = b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, nil)
} else if err != nil {
return err
} else {
// We already have a valid volume at the correct size, just return.
return nil
}
} else {
// We have an unrecorded on-disk volume, assume it's a partial unpack and delete it.
// This can occur if LXD process exits unexpectedly during an image unpack or if the
// storage pool has been recovered (which would not recreate the image volume DB records).
logger.Warn("Deleting leftover/partially unpacked image volume")
err = b.driver.DeleteVolume(imgVol, op)
if err != nil {
return errors.Wrapf(err, "Failed deleting leftover/partially unpacked image volume")
}
}
}
volFiller := drivers.VolumeFiller{
Fingerprint: fingerprint,
Fill: b.imageFiller(fingerprint, op),
}
revert := revert.New()
defer revert.Fail()
err = b.driver.CreateVolume(imgVol, &volFiller, op)
if err != nil {
return err
}
revert.Add(func() { b.driver.DeleteVolume(imgVol, op) })
var volConfig map[string]string
// If the volume filler has recorded the size of the unpacked volume, then store this in the image DB row.
if volFiller.Size != 0 {
volConfig = map[string]string{
"volatile.rootfs.size": fmt.Sprintf("%d", volFiller.Size),
}
}
err = VolumeDBCreate(b.state, b, project.Default, fingerprint, "", drivers.VolumeTypeImage, false, volConfig, time.Time{}, contentType)
if err != nil {
return err
}
revert.Success()
return nil
}
// DeleteImage removes an image from the database and underlying storage device if needed.
func (b *lxdBackend) DeleteImage(fingerprint string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"fingerprint": fingerprint})
logger.Debug("DeleteImage started")
defer logger.Debug("DeleteImage finished")
// We need to lock this operation to ensure that the image is not being deleted multiple times.
unlock := locking.Lock(drivers.OperationLockName("DeleteImage", b.name, drivers.VolumeTypeImage, "", fingerprint))
defer unlock()
// Load image info from database.
_, image, err := b.state.Cluster.GetImageFromAnyProject(fingerprint)
if err != nil {
return err
}
contentType := drivers.ContentTypeFS
// Image types are not the same as instance types, so don't use instance type constants.
if image.Type == "virtual-machine" {
contentType = drivers.ContentTypeBlock
}
// Load the storage volume in order to get the volume config which is needed for some drivers.
_, storageVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
return err
}
vol := b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, storageVol.Config)
if b.driver.HasVolume(vol) {
err = b.driver.DeleteVolume(vol, op)
if err != nil {
return err
}
}
err = b.state.Cluster.RemoveStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
return err
}
b.state.Events.SendLifecycle(project.Default, lifecycle.StorageVolumeDeleted.Event(vol, string(vol.Type()), project.Default, op, nil))
return nil
}
// updateVolumeDescriptionOnly is a helper function used when handling update requests for volumes
// that only allow their descriptions to be updated. If any config supplied differs from the
// current volume's config then an error is returned.
func (b *lxdBackend) updateVolumeDescriptionOnly(project string, volName string, dbVolType int, newDesc string, newConfig map[string]string, op *operations.Operation) error {
// Get current config to compare what has changed.
_, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project, volName, dbVolType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
if newConfig != nil {
changedConfig, _ := b.detectChangedConfig(curVol.Config, newConfig)
if len(changedConfig) != 0 {
return fmt.Errorf("Volume config is not editable")
}
}
// Update the database if description changed. Use current config.
if newDesc != curVol.Description {
err = b.state.Cluster.UpdateStoragePoolVolume(project, volName, dbVolType, b.ID(), newDesc, curVol.Config)
if err != nil {
return err
}
}
// Get content type.
dbContentType, err := VolumeContentTypeNameToContentType(curVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Validate config.
vol := b.GetVolume(drivers.VolumeType(curVol.Type), contentType, volName, newConfig)
if !vol.IsSnapshot() {
b.state.Events.SendLifecycle(project, lifecycle.StorageVolumeUpdated.Event(vol, string(vol.Type()), project, op, nil))
} else {
b.state.Events.SendLifecycle(project, lifecycle.StorageVolumeSnapshotUpdated.Event(vol, string(vol.Type()), project, op, nil))
}
return nil
}
// UpdateImage updates image config.
func (b *lxdBackend) UpdateImage(fingerprint, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"fingerprint": fingerprint, "newDesc": newDesc, "newConfig": newConfig})
logger.Debug("UpdateImage started")
defer logger.Debug("UpdateImage finished")
return b.updateVolumeDescriptionOnly(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, newDesc, newConfig, op)
}
// CreateCustomVolume creates an empty custom volume.
func (b *lxdBackend) CreateCustomVolume(projectName string, volName string, desc string, config map[string]string, contentType drivers.ContentType, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "desc": desc, "config": config, "contentType": contentType})
logger.Debug("CreateCustomVolume started")
defer logger.Debug("CreateCustomVolume finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// Validate config.
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, config)
err := b.driver.ValidateVolume(vol, false)
if err != nil {
return err
}
storagePoolSupported := false
for _, supportedType := range b.Driver().Info().VolumeTypes {
if supportedType == drivers.VolumeTypeCustom {
storagePoolSupported = true
break
}
}
if !storagePoolSupported {
return fmt.Errorf("Storage pool does not support custom volume type")
}
// Create database entry for new storage volume.
err = VolumeDBCreate(b.state, b, projectName, volName, desc, vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDB := true
defer func() {
if revertDB {
b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
// Create the empty custom volume on the storage device.
err = b.driver.CreateVolume(vol, nil, op)
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"type": vol.Type()}))
revertDB = false
return nil
}
// CreateCustomVolumeFromCopy creates a custom volume from an existing custom volume.
// It copies the snapshots from the source volume by default, but can be disabled if requested.
func (b *lxdBackend) CreateCustomVolumeFromCopy(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName, srcVolName string, srcVolOnly bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "srcProjectName": srcProjectName, "volName": volName, "desc": desc, "config": config, "srcPoolName": srcPoolName, "srcVolName": srcVolName, "srcVolOnly": srcVolOnly})
logger.Debug("CreateCustomVolumeFromCopy started")
defer logger.Debug("CreateCustomVolumeFromCopy finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
if srcProjectName == "" {
srcProjectName = projectName
}
// Setup the source pool backend instance.
var srcPool *lxdBackend
if b.name == srcPoolName {
srcPool = b // Source and target are in the same pool so share pool var.
} else {
// Source is in a different pool to target, so load the pool.
tmpPool, err := GetPoolByName(b.state, srcPoolName)
if err != nil {
return err
}
// Convert to lxdBackend so we can access driver.
tmpBackend, ok := tmpPool.(*lxdBackend)
if !ok {
return fmt.Errorf("Pool is not an lxdBackend")
}
srcPool = tmpBackend
}
// Check source volume exists and is custom type.
_, srcVolRow, err := b.state.Cluster.GetLocalStoragePoolVolume(srcProjectName, srcVolName, db.StoragePoolVolumeTypeCustom, srcPool.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Source volume doesn't exist")
}
return err
}
// Use the source volume's config if not supplied.
if config == nil {
config = srcVolRow.Config
}
// Use the source volume's description if not supplied.
if desc == "" {
desc = srcVolRow.Description
}
contentDBType, err := VolumeContentTypeNameToContentType(srcVolRow.ContentType)
if err != nil {
return err
}
// Get the source volume's content type.
contentType := drivers.ContentTypeFS
if contentDBType == db.StoragePoolVolumeContentTypeBlock {
contentType = drivers.ContentTypeBlock
}
storagePoolSupported := false
for _, supportedType := range b.Driver().Info().VolumeTypes {
if supportedType == drivers.VolumeTypeCustom {
storagePoolSupported = true
break
}
}
if !storagePoolSupported {
return fmt.Errorf("Storage pool does not support custom volume type")
}
// If we are copying snapshots, retrieve a list of snapshots from source volume.
snapshotNames := []string{}
if !srcVolOnly {
snapshots, err := VolumeSnapshotsGet(b.state, srcProjectName, srcPoolName, srcVolName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
for _, snapshot := range snapshots {
_, snapShotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
snapshotNames = append(snapshotNames, snapShotName)
}
}
// If the source and target are in the same pool then use CreateVolumeFromCopy rather than
// migration system as it will be quicker.
if srcPool == b {
logger.Debug("CreateCustomVolumeFromCopy same-pool mode detected")
// Create slice to record DB volumes created if revert needed later.
revertDBVolumes := []string{}
defer func() {
// Remove any DB volume rows created if we are reverting.
for _, volName := range revertDBVolumes {
b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, config)
// Get the src volume name on storage.
srcVolStorageName := project.StorageVolume(srcProjectName, srcVolName)
srcVol := b.GetVolume(drivers.VolumeTypeCustom, contentType, srcVolStorageName, srcVolRow.Config)
// Check the supplied config and remove any fields not relevant for pool type.
err := b.driver.ValidateVolume(vol, true)
if err != nil {
return err
}
// Create database entry for new storage volume.
err = VolumeDBCreate(b.state, b, projectName, volName, desc, vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDBVolumes = append(revertDBVolumes, volName)
if len(snapshotNames) > 0 {
for _, snapName := range snapshotNames {
newSnapshotName := drivers.GetSnapshotVolumeName(volName, snapName)
// Create database entry for new storage volume snapshot.
err = VolumeDBCreate(b.state, b, projectName, newSnapshotName, desc, vol.Type(), true, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDBVolumes = append(revertDBVolumes, newSnapshotName)
}
}
err = b.driver.CreateVolumeFromCopy(vol, srcVol, !srcVolOnly, op)
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"type": vol.Type()}))
revertDBVolumes = nil
return nil
}
// We are copying volumes between storage pools so use migration system as it will be able
// to negotiate a common transfer method between pool types.
logger.Debug("CreateCustomVolumeFromCopy cross-pool mode detected")
// Negotiate the migration type to use.
offeredTypes := srcPool.MigrationTypes(contentType, false)
offerHeader := migration.TypesToHeader(offeredTypes...)
migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, false))
if err != nil {
return fmt.Errorf("Failed to negotiate copy migration type: %v", err)
}
// If we're copying block volumes, the target block volume needs to be
// at least the size of the source volume, otherwise we'll run into
// "no space left on device".
var volSize int64
if contentType == drivers.ContentTypeBlock {
// Get the src volume name on storage.
srcVolStorageName := project.StorageVolume(srcProjectName, srcVolName)
srcVol := srcPool.GetVolume(drivers.VolumeTypeCustom, contentType, srcVolStorageName, srcVolRow.Config)
srcVol.MountTask(func(mountPath string, op *operations.Operation) error {
volDiskPath, err := srcPool.driver.GetVolumeDiskPath(srcVol)
if err != nil {
return err
}
volSize, err = drivers.BlockDiskSizeBytes(volDiskPath)
if err != nil {
return err
}
return nil
}, nil)
}
ctx, cancel := context.WithCancel(context.Background())
// Use in-memory pipe pair to simulate a connection between the sender and receiver.
aEnd, bEnd := memorypipe.NewPipePair(ctx)
// Run sender and receiver in separate go routines to prevent deadlocks.
aEndErrCh := make(chan error, 1)
bEndErrCh := make(chan error, 1)
go func() {
err := srcPool.MigrateCustomVolume(srcProjectName, aEnd, &migration.VolumeSourceArgs{
Name: srcVolName,
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: true, // Do use a progress tracker on sender.
ContentType: string(contentType),
}, op)
if err != nil {
cancel()
}
aEndErrCh <- err
}()
go func() {
err := b.CreateCustomVolumeFromMigration(projectName, bEnd, migration.VolumeTargetArgs{
Name: volName,
Description: desc,
Config: config,
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: false, // Do not use a progress tracker on receiver.
ContentType: string(contentType),
VolumeSize: volSize, // Block size setting override.
}, op)
if err != nil {
cancel()
}
bEndErrCh <- err
}()
// Capture errors from the sender and receiver from their result channels.
errs := []error{}
aEndErr := <-aEndErrCh
if aEndErr != nil {
aEnd.Close()
errs = append(errs, aEndErr)
}
bEndErr := <-bEndErrCh
if bEndErr != nil {
errs = append(errs, bEndErr)
}
cancel()
if len(errs) > 0 {
return fmt.Errorf("Create custom volume from copy failed: %v", errs)
}
return nil
}
// MigrateCustomVolume sends a volume for migration.
func (b *lxdBackend) MigrateCustomVolume(projectName string, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": args.Name, "args": args})
logger.Debug("MigrateCustomVolume started")
defer logger.Debug("MigrateCustomVolume finished")
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, args.Name)
dbContentType, err := VolumeContentTypeNameToContentType(args.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Volume config not needed to send a volume so set to nil.
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, nil)
err = b.driver.MigrateVolume(vol, conn, args, op)
if err != nil {
return err
}
return nil
}
// CreateCustomVolumeFromMigration receives a volume being migrated.
func (b *lxdBackend) CreateCustomVolumeFromMigration(projectName string, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": args.Name, "args": args})
logger.Debug("CreateCustomVolumeFromMigration started")
defer logger.Debug("CreateCustomVolumeFromMigration finished")
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
storagePoolSupported := false
for _, supportedType := range b.Driver().Info().VolumeTypes {
if supportedType == drivers.VolumeTypeCustom {
storagePoolSupported = true
break
}
}
if !storagePoolSupported {
return fmt.Errorf("Storage pool does not support custom volume type")
}
// Create slice to record DB volumes created if revert needed later.
revertDBVolumes := []string{}
defer func() {
// Remove any DB volume rows created if we are reverting.
for _, volName := range revertDBVolumes {
b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, args.Name)
// Check the supplied config and remove any fields not relevant for destination pool type.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(args.ContentType), volStorageName, args.Config)
// VolumeSize is set to the actual size of the underlying block device.
// The target should use this value if present, otherwise it might get an error like
// "no space left on device".
if args.VolumeSize > 0 {
vol.SetConfigSize(fmt.Sprintf("%d", args.VolumeSize))
}
err := b.driver.ValidateVolume(vol, true)
if err != nil {
return err
}
if !args.Refresh || !b.driver.HasVolume(vol) {
// Create database entry for new storage volume.
err = VolumeDBCreate(b.state, b, projectName, args.Name, args.Description, vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDBVolumes = append(revertDBVolumes, args.Name)
}
if len(args.Snapshots) > 0 {
for _, snapName := range args.Snapshots {
newSnapshotName := drivers.GetSnapshotVolumeName(args.Name, snapName)
// Create database entry for new storage volume snapshot.
err = VolumeDBCreate(b.state, b, projectName, newSnapshotName, args.Description, vol.Type(), true, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDBVolumes = append(revertDBVolumes, newSnapshotName)
}
}
err = b.driver.CreateVolumeFromMigration(vol, conn, args, nil, op)
if err != nil {
conn.Close()
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"type": vol.Type()}))
revertDBVolumes = nil
return nil
}
// RenameCustomVolume renames a custom volume and its snapshots.
func (b *lxdBackend) RenameCustomVolume(projectName string, volName string, newVolName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newVolName": newVolName})
logger.Debug("RenameCustomVolume started")
defer logger.Debug("RenameCustomVolume finished")
if shared.IsSnapshot(volName) {
return fmt.Errorf("Volume name cannot be a snapshot")
}
if shared.IsSnapshot(newVolName) {
return fmt.Errorf("New volume name cannot be a snapshot")
}
revert := revert.New()
defer revert.Fail()
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return err
}
// Rename each snapshot to have the new parent volume prefix.
snapshots, err := VolumeSnapshotsGet(b.state, projectName, b.name, volName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
for _, srcSnapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot.Name)
newSnapVolName := drivers.GetSnapshotVolumeName(newVolName, snapName)
err = b.state.Cluster.RenameStoragePoolVolume(projectName, srcSnapshot.Name, newSnapVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RenameStoragePoolVolume(projectName, newSnapVolName, srcSnapshot.Name, db.StoragePoolVolumeTypeCustom, b.ID())
})
}
// Rename each backup to have the new parent volume prefix.
backups, err := b.state.Cluster.GetStoragePoolVolumeBackups(projectName, volName, b.ID())
if err != nil {
return err
}
for _, br := range backups {
backupRow := br // Local var for revert.
_, backupName, _ := shared.InstanceGetParentAndSnapshotName(backupRow.Name)
newVolBackupName := drivers.GetSnapshotVolumeName(newVolName, backupName)
volBackup := backup.NewVolumeBackup(b.state, projectName, b.name, volName, backupRow.ID, backupRow.Name, backupRow.CreationDate, backupRow.ExpiryDate, backupRow.VolumeOnly, backupRow.OptimizedStorage)
err = volBackup.Rename(newVolBackupName)
if err != nil {
return errors.Wrapf(err, "Failed renaming backup %q to %q", backupRow.Name, newVolBackupName)
}
revert.Add(func() {
volBackup.Rename(backupRow.Name)
})
}
err = b.state.Cluster.RenameStoragePoolVolume(projectName, volName, newVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RenameStoragePoolVolume(projectName, newVolName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
})
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
newVolStorageName := project.StorageVolume(projectName, newVolName)
// There's no need to pass the config as it's not needed when renaming a volume.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, nil)
err = b.driver.RenameVolume(vol, newVolStorageName, op)
if err != nil {
return err
}
vol = b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), newVolStorageName, nil)
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeRenamed.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"old_name": volName}))
revert.Success()
return nil
}
// detectChangedConfig returns the config that has changed between current and new config maps.
// Also returns a boolean indicating whether all of the changed keys start with "user.".
// Deleted keys will be returned as having an empty string value.
func (b *lxdBackend) detectChangedConfig(curConfig, newConfig map[string]string) (map[string]string, bool) {
// Diff the configurations.
changedConfig := make(map[string]string)
userOnly := true
for key := range curConfig {
if curConfig[key] != newConfig[key] {
if !strings.HasPrefix(key, "user.") {
userOnly = false
}
changedConfig[key] = newConfig[key] // Will be empty string on deleted keys.
}
}
for key := range newConfig {
if curConfig[key] != newConfig[key] {
if !strings.HasPrefix(key, "user.") {
userOnly = false
}
changedConfig[key] = newConfig[key]
}
}
return changedConfig, userOnly
}
// UpdateCustomVolume applies the supplied config to the custom volume.
func (b *lxdBackend) UpdateCustomVolume(projectName string, volName string, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newDesc": newDesc, "newConfig": newConfig})
logger.Debug("UpdateCustomVolume started")
defer logger.Debug("UpdateCustomVolume finished")
if shared.IsSnapshot(volName) {
return fmt.Errorf("Volume name cannot be a snapshot")
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// Get current config to compare what has changed.
_, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
// Get content type.
dbContentType, err := VolumeContentTypeNameToContentType(curVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Validate config.
newVol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, newConfig)
err = b.driver.ValidateVolume(newVol, false)
if err != nil {
return err
}
// Apply config changes if there are any.
changedConfig, userOnly := b.detectChangedConfig(curVol.Config, newConfig)
if len(changedConfig) != 0 {
// Check that the volume's block.filesystem property isn't being changed.
if changedConfig["block.filesystem"] != "" {
return fmt.Errorf("Custom volume 'block.filesystem' property cannot be changed")
}
// Check that security.unmapped and security.shifted aren't set together.
if shared.IsTrue(newConfig["security.unmapped"]) && shared.IsTrue(newConfig["security.shifted"]) {
return fmt.Errorf("security.unmapped and security.shifted are mutually exclusive")
}
// Check for config changing that is not allowed when running instances are using it.
if changedConfig["security.shifted"] != "" {
err = VolumeUsedByInstanceDevices(b.state, b.name, projectName, curVol, true, func(dbInst db.Instance, project db.Project, profiles []api.Profile, usedByDevices []string) error {
inst, err := instance.Load(b.state, db.InstanceToArgs(&dbInst), profiles)
if err != nil {
return err
}
// Confirm that no running instances are using it when changing shifted state.
if inst.IsRunning() && changedConfig["security.shifted"] != "" {
return fmt.Errorf("Cannot modify shifting with running instances using the volume")
}
return nil
})
if err != nil {
return err
}
}
curVol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, curVol.Config)
if !userOnly {
err = b.driver.UpdateVolume(curVol, changedConfig)
if err != nil {
return err
}
}
}
// Unset idmap keys if volume is unmapped.
if shared.IsTrue(newConfig["security.unmapped"]) {
delete(newConfig, "volatile.idmap.last")
delete(newConfig, "volatile.idmap.next")
}
// Update the database if something changed.
if len(changedConfig) != 0 || newDesc != curVol.Description {
err = b.state.Cluster.UpdateStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID(), newDesc, newConfig)
if err != nil {
return err
}
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeUpdated.Event(newVol, string(newVol.Type()), projectName, op, nil))
return nil
}
// UpdateCustomVolumeSnapshot updates the description of a custom volume snapshot.
// Volume config is not allowed to be updated and will return an error.
func (b *lxdBackend) UpdateCustomVolumeSnapshot(projectName string, volName string, newDesc string, newConfig map[string]string, newExpiryDate time.Time, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newDesc": newDesc, "newConfig": newConfig, "newExpiryDate": newExpiryDate})
logger.Debug("UpdateCustomVolumeSnapshot started")
defer logger.Debug("UpdateCustomVolumeSnapshot finished")
if !shared.IsSnapshot(volName) {
return fmt.Errorf("Volume must be a snapshot")
}
// Get current config to compare what has changed.
volID, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
curExpiryDate, err := b.state.Cluster.GetStorageVolumeSnapshotExpiry(volID)
if err != nil {
return err
}
if newConfig != nil {
changedConfig, _ := b.detectChangedConfig(curVol.Config, newConfig)
if len(changedConfig) != 0 {
return fmt.Errorf("Volume config is not editable")
}
}
// Update the database if description changed. Use current config.
if newDesc != curVol.Description || newExpiryDate != curExpiryDate {
err = b.state.Cluster.UpdateStorageVolumeSnapshot(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID(), newDesc, curVol.Config, newExpiryDate)
if err != nil {
return err
}
}
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(curVol.ContentType), curVol.Name, curVol.Config)
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeSnapshotUpdated.Event(vol, string(vol.Type()), projectName, op, nil))
return nil
}
// DeleteCustomVolume removes a custom volume and its snapshots.
func (b *lxdBackend) DeleteCustomVolume(projectName string, volName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName})
logger.Debug("DeleteCustomVolume started")
defer logger.Debug("DeleteCustomVolume finished")
_, _, isSnap := shared.InstanceGetParentAndSnapshotName(volName)
if isSnap {
return fmt.Errorf("Volume name cannot be a snapshot")
}
// Retrieve a list of snapshots.
snapshots, err := VolumeSnapshotsGet(b.state, projectName, b.name, volName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
// Remove each snapshot.
for _, snapshot := range snapshots {
err = b.DeleteCustomVolumeSnapshot(projectName, snapshot.Name, op)
if err != nil {
return err
}
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// Get the volume.
_, poolVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
// Get the content type.
dbContentType, err := VolumeContentTypeNameToContentType(poolVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// There's no need to pass config as it's not needed when deleting a volume.
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, nil)
// Delete the volume from the storage device. Must come after snapshots are removed.
if b.driver.HasVolume(vol) {
err = b.driver.DeleteVolume(vol, op)
if err != nil {
return err
}
}
// Remove backups directory for volume.
backupsPath := shared.VarPath("backups", "custom", b.name, project.StorageVolume(projectName, volName))
if shared.PathExists(backupsPath) {
err := os.RemoveAll(backupsPath)
if err != nil {
return err
}
}
// Finally, remove the volume record from the database.
err = b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeDeleted.Event(vol, string(vol.Type()), projectName, op, nil))
return nil
}
// GetCustomVolumeDisk returns the location of the disk.
func (b *lxdBackend) GetCustomVolumeDisk(projectName, volName string) (string, error) {
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return "", err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// There's no need to pass config as it's not needed when getting the volume usage.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, nil)
return b.driver.GetVolumeDiskPath(vol)
}
// GetCustomVolumeUsage returns the disk space used by the custom volume.
func (b *lxdBackend) GetCustomVolumeUsage(projectName, volName string) (int64, error) {
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return -1, err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// There's no need to pass config as it's not needed when getting the volume usage.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, nil)
return b.driver.GetVolumeUsage(vol)
}
// MountCustomVolume mounts a custom volume.
func (b *lxdBackend) MountCustomVolume(projectName, volName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName})
logger.Debug("MountCustomVolume started")
defer logger.Debug("MountCustomVolume finished")
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, volume.Config)
return b.driver.MountVolume(vol, op)
}
// UnmountCustomVolume unmounts a custom volume.
func (b *lxdBackend) UnmountCustomVolume(projectName, volName string, op *operations.Operation) (bool, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName})
logger.Debug("UnmountCustomVolume started")
defer logger.Debug("UnmountCustomVolume finished")
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return false, err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, volume.Config)
return b.driver.UnmountVolume(vol, false, op)
}
// ImportCustomVolume takes an existing custom volume on the storage backend and ensures that the DB records,
// volume directories and symlinks are restored as needed to make it operational with LXD.
// Used during the recovery import stage.
func (b *lxdBackend) ImportCustomVolume(projectName string, poolVol backup.Config, op *operations.Operation) error {
if poolVol.Volume == nil {
return fmt.Errorf("Invalid pool volume config supplied")
}
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": poolVol.Volume.Name})
logger.Debug("ImportCustomVolume started")
defer logger.Debug("ImportCustomVolume finished")
revert := revert.New()
defer revert.Fail()
// Create the storage volume DB records.
err := VolumeDBCreate(b.state, b, projectName, poolVol.Volume.Name, poolVol.Volume.Description, drivers.VolumeTypeCustom, false, poolVol.Volume.Config, time.Time{}, drivers.ContentType(poolVol.Volume.ContentType))
if err != nil {
return errors.Wrapf(err, "Failed creating custom volume %q record in project %q", poolVol.Volume.Name, projectName)
}
revert.Add(func() {
b.state.Cluster.RemoveStoragePoolVolume(projectName, poolVol.Volume.Name, db.StoragePoolVolumeTypeCustom, b.ID())
})
// Create the storage volume snapshot DB records.
for _, poolVolSnap := range poolVol.VolumeSnapshots {
fullSnapName := drivers.GetSnapshotVolumeName(poolVol.Volume.Name, poolVolSnap.Name)
err = VolumeDBCreate(b.state, b, projectName, fullSnapName, poolVolSnap.Description, drivers.VolumeTypeCustom, true, poolVolSnap.Config, time.Time{}, drivers.ContentType(poolVolSnap.ContentType))
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RemoveStoragePoolVolume(projectName, fullSnapName, db.StoragePoolVolumeTypeCustom, b.ID())
})
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, poolVol.Volume.Name)
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(poolVol.Volume.ContentType), volStorageName, poolVol.Volume.Config)
// Create the mount path if needed.
err = vol.EnsureMountPath()
if err != nil {
return err
}
// Create snapshot mount paths and snapshot parent directory if needed.
for _, poolVolSnap := range poolVol.VolumeSnapshots {
logger.Debug("Ensuring instance snapshot mount path", log.Ctx{"snapshot": poolVolSnap.Name})
snapVol, err := vol.NewSnapshot(poolVolSnap.Name)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
revert.Success()
return nil
}
// CreateCustomVolumeSnapshot creates a snapshot of a custom volume.
func (b *lxdBackend) CreateCustomVolumeSnapshot(projectName, volName string, newSnapshotName string, newExpiryDate time.Time, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newSnapshotName": newSnapshotName, "newExpiryDate": newExpiryDate})
logger.Debug("CreateCustomVolumeSnapshot started")
defer logger.Debug("CreateCustomVolumeSnapshot finished")
if shared.IsSnapshot(volName) {
return fmt.Errorf("Volume cannot be snapshot")
}
if shared.IsSnapshot(newSnapshotName) {
return fmt.Errorf("Snapshot name is not a valid snapshot name")
}
fullSnapshotName := drivers.GetSnapshotVolumeName(volName, newSnapshotName)
// Check snapshot volume doesn't exist already.
_, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, fullSnapshotName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != db.ErrNoSuchObject {
if err != nil {
return err
}
return fmt.Errorf("Snapshot by that name already exists")
}
// Load parent volume information and check it exists.
_, parentVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Parent volume doesn't exist")
}
return err
}
// Create database entry for new storage volume snapshot.
err = VolumeDBCreate(b.state, b, projectName, fullSnapshotName, parentVol.Description, drivers.VolumeTypeCustom, true, parentVol.Config, newExpiryDate, drivers.ContentType(parentVol.ContentType))
if err != nil {
return err
}
revertDB := true
defer func() {
if revertDB {
b.state.Cluster.RemoveStoragePoolVolume(projectName, fullSnapshotName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
volDBContentType, err := VolumeContentTypeNameToContentType(parentVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(volDBContentType)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, fullSnapshotName)
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, parentVol.Config)
// Create the snapshot on the storage device.
err = b.driver.CreateVolumeSnapshot(vol, op)
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeSnapshotCreated.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"type": vol.Type()}))
revertDB = false
return nil
}
// RenameCustomVolumeSnapshot renames a custom volume.
func (b *lxdBackend) RenameCustomVolumeSnapshot(projectName, volName string, newSnapshotName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newSnapshotName": newSnapshotName})
logger.Debug("RenameCustomVolumeSnapshot started")
defer logger.Debug("RenameCustomVolumeSnapshot finished")
parentName, oldSnapshotName, isSnap := shared.InstanceGetParentAndSnapshotName(volName)
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
if shared.IsSnapshot(newSnapshotName) {
return fmt.Errorf("Invalid new snapshot name")
}
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// There's no need to pass config as it's not needed when renaming a volume.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, nil)
err = b.driver.RenameVolumeSnapshot(vol, newSnapshotName, op)
if err != nil {
return err
}
newVolName := drivers.GetSnapshotVolumeName(parentName, newSnapshotName)
err = b.state.Cluster.RenameStoragePoolVolume(projectName, volName, newVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
// Get the volume name on storage.
newVolStorageName := project.StorageVolume(projectName, newVolName)
// Revert rename.
newVol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), newVolStorageName, nil)
b.driver.RenameVolumeSnapshot(newVol, oldSnapshotName, op)
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeSnapshotRenamed.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"old_name": oldSnapshotName}))
return nil
}
// DeleteCustomVolumeSnapshot removes a custom volume snapshot.
func (b *lxdBackend) DeleteCustomVolumeSnapshot(projectName, volName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName})
logger.Debug("DeleteCustomVolumeSnapshot started")
defer logger.Debug("DeleteCustomVolumeSnapshot finished")
isSnap := shared.IsSnapshot(volName)
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
// Get the volume.
_, poolVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
// Get the content type.
dbContentType, err := VolumeContentTypeNameToContentType(poolVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// There's no need to pass config as it's not needed when deleting a volume snapshot.
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, nil)
// Delete the snapshot from the storage device.
// Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
if b.driver.HasVolume(vol) {
err := b.driver.DeleteVolumeSnapshot(vol, op)
if err != nil {
return err
}
}
// Remove the snapshot volume record from the database.
err = b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeSnapshotDeleted.Event(vol, string(vol.Type()), projectName, op, nil))
return nil
}
// RestoreCustomVolume restores a custom volume from a snapshot.
func (b *lxdBackend) RestoreCustomVolume(projectName, volName string, snapshotName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "snapshotName": snapshotName})
logger.Debug("RestoreCustomVolume started")
defer logger.Debug("RestoreCustomVolume finished")
// Quick checks.
if shared.IsSnapshot(volName) {
return fmt.Errorf("Volume cannot be snapshot")
}
if shared.IsSnapshot(snapshotName) {
return fmt.Errorf("Invalid snapshot name")
}
// Get current volume.
_, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
// Check that the volume isn't in use by running instances.
err = VolumeUsedByInstanceDevices(b.state, b.Name(), projectName, curVol, true, func(dbInst db.Instance, project db.Project, profiles []api.Profile, usedByDevices []string) error {
inst, err := instance.Load(b.state, db.InstanceToArgs(&dbInst), profiles)
if err != nil {
return err
}
if inst.IsRunning() {
return fmt.Errorf("Cannot restore custom volume used by running instances")
}
return nil
})
if err != nil {
return err
}
// Get the volume config.
_, dbVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
dbContentType, err := VolumeContentTypeNameToContentType(dbVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, dbVol.Config)
err = b.driver.RestoreVolume(vol, snapshotName, op)
if err != nil {
snapErr, ok := err.(drivers.ErrDeleteSnapshots)
if ok {
// We need to delete some snapshots and try again.
for _, snapName := range snapErr.Snapshots {
err := b.DeleteCustomVolumeSnapshot(projectName, fmt.Sprintf("%s/%s", volName, snapName), op)
if err != nil {
return err
}
}
// Now try again.
err = b.driver.RestoreVolume(vol, snapshotName, op)
if err != nil {
return err
}
}
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeRestored.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"snapshot": snapshotName}))
return nil
}
func (b *lxdBackend) createStorageStructure(path string) error {
for _, volType := range b.driver.Info().VolumeTypes {
for _, name := range drivers.BaseDirectories[volType] {
path := filepath.Join(path, name)
err := os.MkdirAll(path, 0711)
if err != nil && !os.IsExist(err) {
return errors.Wrapf(err, "Failed to create directory %q", path)
}
}
}
return nil
}
// UpdateInstanceBackupFile writes the instance's config to the backup.yaml file on the storage device.
func (b *lxdBackend) UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("UpdateInstanceBackupFile started")
defer logger.Debug("UpdateInstanceBackupFile finished")
// We only write backup files out for actual instances.
if inst.IsSnapshot() {
return nil
}
// Immediately return if the instance directory doesn't exist yet.
if !shared.PathExists(inst.Path()) {
return os.ErrNotExist
}
// Generate the YAML.
ci, _, err := inst.Render()
if err != nil {
return errors.Wrap(err, "Failed to render instance metadata")
}
snapshots, err := inst.Snapshots()
if err != nil {
return errors.Wrap(err, "Failed to get snapshots")
}
var sis []*api.InstanceSnapshot
for _, s := range snapshots {
si, _, err := s.Render()
if err != nil {
return err
}
sis = append(sis, si.(*api.InstanceSnapshot))
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
contentType := InstanceContentType(inst)
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
return err
}
data, err := yaml.Marshal(&backup.Config{
Container: ci.(*api.Instance),
Snapshots: sis,
Pool: &b.db,
Volume: volume,
})
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, volume.Config)
// Update pool information in the backup.yaml file.
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
// Write the YAML
path := filepath.Join(inst.Path(), "backup.yaml")
f, err := os.Create(path)
if err != nil {
return errors.Wrapf(err, "Failed to create file %q", path)
}
defer f.Close()
err = f.Chmod(0400)
if err != nil {
return err
}
err = shared.WriteAll(f, data)
if err != nil {
return err
}
return nil
}, op)
return err
}
// CheckInstanceBackupFileSnapshots compares the snapshots on the storage device to those defined in the backup
// config supplied and returns an error if they do not match (if deleteMissing argument is false).
// If deleteMissing argument is true, then any snapshots that exist on the storage device but not in the backup
// config are removed from the storage device, and any snapshots that exist in the backup config but do not exist
// on the storage device are ignored. The remaining set of snapshots that exist on both the storage device and the
// backup config are returned. They set can be used to re-create the snapshot database entries when importing.
func (b *lxdBackend) CheckInstanceBackupFileSnapshots(backupConf *backup.Config, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "instance": backupConf.Container.Name, "deleteMissing": deleteMissing})
logger.Debug("CheckInstanceBackupFileSnapshots started")
defer logger.Debug("CheckInstanceBackupFileSnapshots finished")
instType, err := instancetype.New(string(backupConf.Container.Type))
if err != nil {
return nil, err
}
volType, err := InstanceTypeToVolumeType(instType)
if err != nil {
return nil, err
}
// Get the volume name on storage.
volStorageName := project.Instance(projectName, backupConf.Container.Name)
contentType := drivers.ContentTypeFS
if volType == drivers.VolumeTypeVM {
contentType = drivers.ContentTypeBlock
}
// We don't need to use the volume's config for mounting so set to nil.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
// Get a list of snapshots that exist on storage device.
driverSnapshots, err := vol.Snapshots(op)
if err != nil {
return nil, err
}
if len(backupConf.Snapshots) != len(driverSnapshots) {
if !deleteMissing {
return nil, errors.Wrap(ErrBackupSnapshotsMismatch, "Snapshot count in backup config and storage device are different")
}
}
// Check (and optionally delete) snapshots that do not exist in backup config.
for _, driverSnapVol := range driverSnapshots {
_, driverSnapOnly, _ := shared.InstanceGetParentAndSnapshotName(driverSnapVol.Name())
inBackupFile := false
for _, backupFileSnap := range backupConf.Snapshots {
backupFileSnapOnly := backupFileSnap.Name
if driverSnapOnly == backupFileSnapOnly {
inBackupFile = true
break
}
}
if inBackupFile {
continue
}
if !deleteMissing {
return nil, errors.Wrapf(ErrBackupSnapshotsMismatch, "Snapshot %q exists on storage device but not in backup config", driverSnapOnly)
}
err = b.driver.DeleteVolumeSnapshot(driverSnapVol, op)
if err != nil {
return nil, errors.Wrapf(err, "Failed to delete snapshot %q", driverSnapOnly)
}
logger.Warn("Deleted snapshot as not present in backup config", log.Ctx{"snapshot": driverSnapOnly})
}
// Check the snapshots in backup config exist on storage device.
existingSnapshots := []*api.InstanceSnapshot{}
for _, backupFileSnap := range backupConf.Snapshots {
backupFileSnapOnly := backupFileSnap.Name
onStorageDevice := false
for _, driverSnapVol := range driverSnapshots {
_, driverSnapOnly, _ := shared.InstanceGetParentAndSnapshotName(driverSnapVol.Name())
if driverSnapOnly == backupFileSnapOnly {
onStorageDevice = true
break
}
}
if !onStorageDevice {
if !deleteMissing {
return nil, errors.Wrapf(ErrBackupSnapshotsMismatch, "Snapshot %q exists in backup config but not on storage device", backupFileSnapOnly)
}
logger.Warn("Skipped snapshot in backup config as not present on storage device", log.Ctx{"snapshot": backupFileSnap})
continue // Skip snapshots missing on storage device.
}
existingSnapshots = append(existingSnapshots, backupFileSnap)
}
return existingSnapshots, nil
}
// ListUnknownVolumes returns volumes that exist on the storage pool but don't have records in the database.
// Returns the unknown volumes parsed/generated backup config in a slice (keyed on project name).
func (b *lxdBackend) ListUnknownVolumes(op *operations.Operation) (map[string][]*backup.Config, error) {
// Get a list of volumes on the storage pool. We only expect to get 1 volume per logical LXD volume.
// So for VMs we only expect to get the block volume for a VM and not its filesystem one too. This way we
// can operate on the volume using the existing storage pool functions and let the pool then handle the
// associated filesystem volume as needed.
poolVols, err := b.driver.ListVolumes()
if err != nil {
return nil, errors.Wrapf(err, "Failed getting pool volumes")
}
projectVols := make(map[string][]*backup.Config)
for _, poolVol := range poolVols {
volType := poolVol.Type()
// If the storage driver has returned a filesystem volume for a VM, this is a break of protocol.
if volType == drivers.VolumeTypeVM && poolVol.ContentType() == drivers.ContentTypeFS {
return nil, fmt.Errorf("Storage driver returned unexpected VM volume with filesystem content type (%q)", poolVol.Name())
}
if volType == drivers.VolumeTypeVM || volType == drivers.VolumeTypeContainer {
err = b.detectUnknownInstanceVolume(&poolVol, projectVols, op)
if err != nil {
return nil, err
}
} else if volType == drivers.VolumeTypeCustom {
err = b.detectUnknownCustomVolume(&poolVol, projectVols, op)
if err != nil {
return nil, err
}
}
}
return projectVols, nil
}
// detectUnknownInstanceVolume detects if a volume is unknown and if so attempts to mount the volume and parse the
// backup stored on it. It then runs a series of consistency checks that compare the contents of the backup file to
// the state of the volume on disk, and if all checks out, it adds the parsed backup file contents to projectVols.
func (b *lxdBackend) detectUnknownInstanceVolume(vol *drivers.Volume, projectVols map[string][]*backup.Config, op *operations.Operation) error {
volType := vol.Type()
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
projectName, instName := project.InstanceParts(vol.Name())
// Check if an entry for the instance already exists in the DB.
instID, err := b.state.Cluster.GetInstanceID(projectName, instName)
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return err
}
instSnapshots, err := b.state.Cluster.GetInstanceSnapshotsNames(projectName, instName)
if err != nil {
return err
}
// Check if any entry for the instance volume already exists in the DB.
// This will return no record for any temporary pool structs being used (as ID is -1).
volID, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, instName, volDBType, b.ID())
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return err
}
if instID > 0 && volID > 0 {
return nil // Instance record and storage record already exists in DB, no recovery needed.
} else if instID > 0 {
return fmt.Errorf("Instance %q in project %q already has instance DB record", instName, projectName)
} else if volID > 0 {
return fmt.Errorf("Instance %q in project %q already has storage DB record", instName, projectName)
}
backupYamlPath := filepath.Join(vol.MountPath(), "backup.yaml")
var backupConf *backup.Config
// If the instance is running, it should already be mounted, so check if the backup file
// is already accessible, and if so parse it directly, without disturbing the mount count.
if shared.PathExists(backupYamlPath) {
backupConf, err = backup.ParseConfigYamlFile(backupYamlPath)
if err != nil {
return errors.Wrapf(err, "Failed parsing backup file %q", backupYamlPath)
}
} else {
// We won't know what filesystem some block backed volumes are using, so ask the storage
// driver to probe the block device for us (if appropriate).
vol.SetMountFilesystemProbe(true)
// If backup file not accessible, we take this to mean the instance isn't running
// and so we need to mount the volume to access the backup file and then unmount.
// This will also create the mount path if needed.
err = vol.MountTask(func(_ string, _ *operations.Operation) error {
backupConf, err = backup.ParseConfigYamlFile(backupYamlPath)
if err != nil {
return errors.Wrapf(err, "Failed parsing backup file %q", backupYamlPath)
}
return nil
}, op)
if err != nil {
return err
}
}
// Run some consistency checks on the backup file contents.
if backupConf.Pool != nil {
if backupConf.Pool.Name != b.name {
return fmt.Errorf("Instance %q in project %q has pool name mismatch in its backup file (%q doesn't match's pool's %q)", instName, projectName, backupConf.Pool.Name, b.name)
}
if backupConf.Pool.Driver != b.Driver().Info().Name {
return fmt.Errorf("Instance %q in project %q has pool driver mismatch in its backup file (%q doesn't match's pool's %q)", instName, projectName, backupConf.Pool.Driver, b.Driver().Name())
}
}
if backupConf.Container == nil {
return fmt.Errorf("Instance %q in project %q has no instance information in its backup file", instName, projectName)
}
if instName != backupConf.Container.Name {
return fmt.Errorf("Instance %q in project %q has a different instance name in its backup file (%q)", instName, projectName, backupConf.Container.Name)
}
apiInstType, err := VolumeTypeToAPIInstanceType(volType)
if err != nil {
return errors.Wrapf(err, "Failed checking instance type for instance %q in project %q", instName, projectName)
}
if apiInstType != api.InstanceType(backupConf.Container.Type) {
return fmt.Errorf("Instance %q in project %q has a different instance type in its backup file (%q)", instName, projectName, backupConf.Container.Type)
}
if backupConf.Volume == nil {
return fmt.Errorf("Instance %q in project %q has no volume information in its backup file", instName, projectName)
}
if instName != backupConf.Volume.Name {
return fmt.Errorf("Instance %q in project %q has a different volume name in its backup file (%q)", instName, projectName, backupConf.Volume.Name)
}
instVolDBType, err := VolumeTypeNameToDBType(backupConf.Volume.Type)
if err != nil {
return errors.Wrapf(err, "Failed checking instance volume type for instance %q in project %q", instName, projectName)
}
instVolType, err := VolumeDBTypeToType(instVolDBType)
if err != nil {
return errors.Wrapf(err, "Failed checking instance volume type for instance %q in project %q", instName, projectName)
}
if volType != instVolType {
return fmt.Errorf("Instance %q in project %q has a different volume type in its backup file (%q)", instName, projectName, backupConf.Volume.Type)
}
// Add to volume to unknown volumes list for the project.
if projectVols[projectName] == nil {
projectVols[projectName] = []*backup.Config{backupConf}
} else {
projectVols[projectName] = append(projectVols[projectName], backupConf)
}
// Check snapshots are consistent between storage layer and backup config file.
_, err = b.CheckInstanceBackupFileSnapshots(backupConf, projectName, false, nil)
if err != nil {
return fmt.Errorf("Instance %q in project %q has snapshot inconsistency: %v", instName, projectName, err)
}
// Check there are no existing DB records present for snapshots.
for _, snapshot := range backupConf.Snapshots {
fullSnapshotName := drivers.GetSnapshotVolumeName(instName, snapshot.Name)
// Check if an entry for the instance already exists in the DB.
if shared.StringInSlice(fullSnapshotName, instSnapshots) {
return fmt.Errorf("Instance %q snapshot %q in project %q already has instance DB record", instName, snapshot.Name, projectName)
}
// Check if any entry for the instance snapshot volume already exists in the DB.
// This will return no record for any temporary pool structs being used (as ID is -1).
volID, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, fullSnapshotName, volDBType, b.ID())
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return err
}
if volID > 0 {
return fmt.Errorf("Instance %q snapshot %q in project %q already has storage DB record", instName, snapshot.Name, projectName)
}
}
return nil
}
// detectUnknownCustomVolume detects if a volume is unknown and if so attempts to discover the filesystem of the
// volume (for filesystem volumes). It then runs a series of consistency checks, and if all checks out, it adds
// generates a simulated backup config for the custom volume and adds it to projectVols.
func (b *lxdBackend) detectUnknownCustomVolume(vol *drivers.Volume, projectVols map[string][]*backup.Config, op *operations.Operation) error {
volType := vol.Type()
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
projectName, volName := project.StorageVolumeParts(vol.Name())
// Check if any entry for the custom volume already exists in the DB.
// This will return no record for any temporary pool structs being used (as ID is -1).
volID, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, volDBType, b.ID())
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return err
}
if volID > 0 {
return nil // Storage record already exists in DB, no recovery needed.
}
// Get a list of snapshots that exist on storage device.
snapshots, err := b.driver.VolumeSnapshots(*vol, op)
if err != nil {
return err
}
contentType := vol.ContentType()
var apiContentType string
if contentType == drivers.ContentTypeBlock {
apiContentType = db.StoragePoolVolumeContentTypeNameBlock
} else if contentType == drivers.ContentTypeFS {
apiContentType = db.StoragePoolVolumeContentTypeNameFS
// Detect block volume filesystem (by mounting it (if not already) with filesystem probe mode).
if b.driver.Info().BlockBacking {
var blockFS string
mountPath := vol.MountPath()
if filesystem.IsMountPoint(mountPath) {
blockFS, err = filesystem.Detect(mountPath)
if err != nil {
return err
}
} else {
vol.SetMountFilesystemProbe(true)
vol.MountTask(func(mountPath string, op *operations.Operation) error {
blockFS, err = filesystem.Detect(mountPath)
if err != nil {
return err
}
return nil
}, op)
}
// Record detected filesystem in config.
vol.Config()["block.filesystem"] = blockFS
}
} else {
return fmt.Errorf("Unknown custom volume content type %q", contentType)
}
// This may not always be the correct thing to do, but seeing as we don't know what the volume's config
// was lets take a best guess that it was the default config.
err = b.driver.FillVolumeConfig(*vol)
if err != nil {
return errors.Wrapf(err, "Failed filling custom volume default config")
}
// Check the filesystem detected is valid for the storage driver.
err = b.driver.ValidateVolume(*vol, false)
if err != nil {
return errors.Wrapf(err, "Failed custom volume validation")
}
backupConf := &backup.Config{
Volume: &api.StorageVolume{
Name: volName,
Type: db.StoragePoolVolumeTypeNameCustom,
ContentType: apiContentType,
StorageVolumePut: api.StorageVolumePut{
Config: vol.Config(),
},
},
}
// Populate snaphot volumes.
for _, snapOnlyName := range snapshots {
backupConf.VolumeSnapshots = append(backupConf.VolumeSnapshots, &api.StorageVolumeSnapshot{
Name: snapOnlyName, // Snapshot only name, not full name.
Config: vol.Config(), // Have to assume the snapshot volume config is same as parent.
ContentType: apiContentType,
})
}
// Add to volume to unknown volumes list for the project.
if projectVols[projectName] == nil {
projectVols[projectName] = []*backup.Config{backupConf}
} else {
projectVols[projectName] = append(projectVols[projectName], backupConf)
}
return nil
}
// ImportInstance takes an existing instance volume on the storage backend and ensures that the volume directories
// and symlinks are restored as needed to make it operational with LXD. Used during the recovery import stage.
// If the instance exists on the local cluster member then the local mount status is restored as needed.
func (b *lxdBackend) ImportInstance(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("ImportInstance started")
defer logger.Debug("ImportInstance finished")
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
// Get any snapshots the instance has in the format <instance name>/<snapshot name>.
snapshots, err := b.state.Cluster.GetInstanceSnapshotsNames(inst.Project(), inst.Name())
if err != nil {
return err
}
// Get local cluster member name.
var nodeName string
err = b.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
nodeName, err = tx.GetLocalNodeName()
return err
})
if err != nil {
return errors.Wrap(err, "Failed getting local cluster member name")
}
revert := revert.New()
defer revert.Fail()
contentType := InstanceContentType(inst)
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, nil)
err = vol.EnsureMountPath()
if err != nil {
return err
}
// Only attempt to restore mount status on instance's local cluster member.
if inst.Location() == nodeName {
logger.Debug("Restoring local instance mount status")
if inst.IsRunning() {
// If the instance is running then this implies the volume is mounted, but if the LXD daemon has
// been restarted since the DB records were removed then there will be no mount reference counter
// showing the volume is in use. If this is the case then call mount the volume to increment the
// reference counter.
if !vol.MountInUse() {
_, err = b.MountInstance(inst, op)
if err != nil {
return errors.Wrapf(err, "Failed mounting instance")
}
}
} else {
// If the instance isn't running then try and unmount it to ensure consistent state after import.
_, err = b.UnmountInstance(inst, op)
if err != nil {
return errors.Wrapf(err, "Failed unmounting instance")
}
}
}
// Create symlink.
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
revert.Add(func() {
// Remove symlinks.
b.removeInstanceSymlink(inst.Type(), inst.Project(), inst.Name())
b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), inst.Name())
})
// Create snapshot mount paths and snapshot symlink if needed.
if len(snapshots) > 0 {
for _, snapName := range snapshots {
_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapName)
logger.Debug("Ensuring instance snapshot mount path", log.Ctx{"snapshot": snapOnlyName})
snapVol, err := vol.NewSnapshot(snapOnlyName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
err = b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (b *lxdBackend) BackupCustomVolume(projectName string, volName string, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volume": volName, "optimized": optimized, "snapshots": snapshots})
logger.Debug("BackupCustomVolume started")
defer logger.Debug("BackupCustomVolume finished")
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return err
}
var snapNames []string
if snapshots {
// Get snapshots in age order, oldest first, and pass names to storage driver.
volSnaps, err := b.state.Cluster.GetLocalStoragePoolVolumeSnapshotsWithType(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return err
}
snapNames = make([]string, 0, len(volSnaps))
for _, volSnap := range volSnaps {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(volSnap.Name)
snapNames = append(snapNames, snapName)
}
}
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, volume.Config)
err = b.driver.BackupVolume(vol, tarWriter, optimized, snapNames, op)
if err != nil {
return err
}
return nil
}
func (b *lxdBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": srcBackup.Project, "volume": srcBackup.Name, "snapshots": srcBackup.Snapshots, "optimizedStorage": *srcBackup.OptimizedStorage})
logger.Debug("CreateCustomVolumeFromBackup started")
defer logger.Debug("CreateCustomVolumeFromBackup finished")
if srcBackup.Config == nil || srcBackup.Config.Volume == nil {
return fmt.Errorf("Valid volume config not found in index")
}
if len(srcBackup.Snapshots) != len(srcBackup.Config.VolumeSnapshots) {
return fmt.Errorf("Valid volume snapshot config not found in index")
}
// Check whether we are allowed to create volumes.
req := api.StorageVolumesPost{
StorageVolumePut: api.StorageVolumePut{
Config: srcBackup.Config.Volume.Config,
},
Name: srcBackup.Name,
}
err := b.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
return project.AllowVolumeCreation(tx, srcBackup.Project, req)
})
if err != nil {
return errors.Wrapf(err, "Failed checking volume creation allowed")
}
revert := revert.New()
defer revert.Fail()
// Get the volume name on storage.
volStorageName := project.StorageVolume(srcBackup.Project, srcBackup.Name)
// Validate config.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(srcBackup.Config.Volume.ContentType), volStorageName, srcBackup.Config.Volume.Config)
// Strip any unsupported config keys (in case the export was made from a different type of storage pool).
err = b.driver.ValidateVolume(vol, true)
if err != nil {
return err
}
// Create database entry for new storage volume using the validated config.
err = VolumeDBCreate(b.state, b, srcBackup.Project, srcBackup.Name, srcBackup.Config.Volume.Description, vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RemoveStoragePoolVolume(srcBackup.Project, srcBackup.Name, db.StoragePoolVolumeTypeCustom, b.ID())
})
// Create database entries fro new storage volume snapshots.
for _, s := range srcBackup.Config.VolumeSnapshots {
snapshot := s // Local var for revert.
snapName := snapshot.Name
// Due to a historical bug, the volume snapshot names were sometimes written in their full form
// (<parent>/<snap>) rather than the expected snapshot name only form, so we need to handle both.
if shared.IsSnapshot(snapshot.Name) {
_, snapName, _ = shared.InstanceGetParentAndSnapshotName(snapshot.Name)
}
fullSnapName := drivers.GetSnapshotVolumeName(srcBackup.Name, snapName)
snapVolStorageName := project.StorageVolume(srcBackup.Project, fullSnapName)
snapVol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(srcBackup.Config.Volume.ContentType), snapVolStorageName, srcBackup.Config.Volume.Config)
// Strip any unsupported config keys (in case the export was made from a different type of storage pool).
err := b.driver.ValidateVolume(snapVol, true)
if err != nil {
return err
}
err = VolumeDBCreate(b.state, b, srcBackup.Project, fullSnapName, snapshot.Description, snapVol.Type(), true, snapVol.Config(), *snapshot.ExpiresAt, snapVol.ContentType())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RemoveStoragePoolVolume(srcBackup.Project, fullSnapName, db.StoragePoolVolumeTypeCustom, b.ID())
})
}
// Unpack the backup into the new storage volume(s).
volPostHook, revertHook, err := b.driver.CreateVolumeFromBackup(vol, srcBackup, srcData, op)
if err != nil {
return err
}
if revertHook != nil {
revert.Add(revertHook)
}
// If the driver returned a post hook, return error as custom volumes don't need post hooks and we expect
// the storage driver to understand this distinction and ensure that all activities done in the postHook
// normally are done in CreateVolumeFromBackup as the DB record is created ahead of time.
if volPostHook != nil {
return fmt.Errorf("Custom volume restore doesn't support post hooks")
}
b.state.Events.SendLifecycle(srcBackup.Project, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), srcBackup.Project, op, log.Ctx{"type": vol.Type()}))
revert.Success()
return nil
}
lxd/storage/backend/lxd: Adds isStatusReady function to check if pool is ready for use
Checks if pool is pending or not available locally.
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package storage
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/pkg/errors"
log "gopkg.in/inconshreveable/log15.v2"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxd/backup"
"github.com/lxc/lxd/lxd/cluster/request"
"github.com/lxc/lxd/lxd/db"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/lifecycle"
"github.com/lxc/lxd/lxd/locking"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/lxd/storage/filesystem"
"github.com/lxc/lxd/lxd/storage/memorypipe"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/logging"
)
var unavailablePools = make(map[string]struct{})
var unavailablePoolsMu = sync.Mutex{}
type lxdBackend struct {
driver drivers.Driver
id int64
db api.StoragePool
name string
state *state.State
logger logger.Logger
nodes map[int64]db.StoragePoolNode
}
// ID returns the storage pool ID.
func (b *lxdBackend) ID() int64 {
return b.id
}
// Name returns the storage pool name.
func (b *lxdBackend) Name() string {
return b.name
}
// Description returns the storage pool description.
func (b *lxdBackend) Description() string {
return b.db.Description
}
// Status returns the storage pool status.
func (b *lxdBackend) Status() string {
return b.db.Status
}
// LocalStatus returns storage pool status of the local cluster member.
func (b *lxdBackend) LocalStatus() string {
unavailablePoolsMu.Lock()
defer unavailablePoolsMu.Unlock()
// Check if pool is unavailable locally and replace status if so.
// But don't modify b.db.Status as the status may be recovered later so we don't want to persist it here.
if _, found := unavailablePools[b.name]; found {
return api.StoragePoolStatusUnvailable
}
node, exists := b.nodes[b.state.Cluster.GetNodeID()]
if !exists {
return api.StoragePoolStatusUnknown
}
return db.StoragePoolStateToAPIStatus(node.State)
}
// isStatusReady returns an error if pool is not ready for use on this server.
func (b *lxdBackend) isStatusReady() error {
if b.Status() == api.StoragePoolStatusPending {
return fmt.Errorf("Specified pool is not fully created")
}
if b.LocalStatus() == api.StoragePoolStatusUnvailable {
return fmt.Errorf("Specified pool is not currently available on this server")
}
return nil
}
// ToAPI returns the storage pool as an API representation.
func (b *lxdBackend) ToAPI() api.StoragePool {
return b.db
}
// Driver returns the storage pool driver.
func (b *lxdBackend) Driver() drivers.Driver {
return b.driver
}
// MigrationTypes returns the migration transport method preferred when sending a migration,
// based on the migration method requested by the driver's ability.
func (b *lxdBackend) MigrationTypes(contentType drivers.ContentType, refresh bool) []migration.Type {
return b.driver.MigrationTypes(contentType, refresh)
}
// Create creates the storage pool layout on the storage device.
// localOnly is used for clustering where only a single node should do remote storage setup.
func (b *lxdBackend) Create(clientType request.ClientType, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"config": b.db.Config, "description": b.db.Description, "clientType": clientType})
logger.Debug("create started")
defer logger.Debug("create finished")
revert := revert.New()
defer revert.Fail()
path := drivers.GetPoolMountPath(b.name)
if shared.IsDir(path) {
return fmt.Errorf("Storage pool directory %q already exists", path)
}
// Create the storage path.
err := os.MkdirAll(path, 0711)
if err != nil {
return errors.Wrapf(err, "Failed to create storage pool directory %q", path)
}
revert.Add(func() { os.RemoveAll(path) })
if b.driver.Info().Remote && clientType != request.ClientTypeNormal {
if !b.driver.Info().MountedRoot {
// Create the directory structure.
err = b.createStorageStructure(path)
if err != nil {
return err
}
}
// Dealing with a remote storage pool, we're done now.
revert.Success()
return nil
}
// Validate config.
err = b.driver.Validate(b.db.Config)
if err != nil {
return err
}
// Create the storage pool on the storage device.
err = b.driver.Create()
if err != nil {
return err
}
// Mount the storage pool.
ourMount, err := b.driver.Mount()
if err != nil {
return err
}
// We expect the caller of create to mount the pool if needed, so we should unmount after
// storage struct has been created.
if ourMount {
defer b.driver.Unmount()
}
// Create the directory structure.
err = b.createStorageStructure(path)
if err != nil {
return err
}
revert.Success()
return nil
}
// GetVolume returns a drivers.Volume containing copies of the supplied volume config and the pools config,
func (b *lxdBackend) GetVolume(volType drivers.VolumeType, contentType drivers.ContentType, volName string, volConfig map[string]string) drivers.Volume {
// Copy the config map to avoid internal modifications affecting external state.
newConfig := map[string]string{}
for k, v := range volConfig {
newConfig[k] = v
}
// Copy the pool config map to avoid internal modifications affecting external state.
newPoolConfig := map[string]string{}
for k, v := range b.db.Config {
newPoolConfig[k] = v
}
return drivers.NewVolume(b.driver, b.name, volType, contentType, volName, newConfig, newPoolConfig)
}
// GetResources returns utilisation information about the pool.
func (b *lxdBackend) GetResources() (*api.ResourcesStoragePool, error) {
logger := logging.AddContext(b.logger, nil)
logger.Debug("GetResources started")
defer logger.Debug("GetResources finished")
return b.driver.GetResources()
}
// IsUsed returns whether the storage pool is used by any volumes or profiles (excluding image volumes).
func (b *lxdBackend) IsUsed() (bool, error) {
// Get all users of the storage pool.
var err error
poolUsedBy := []string{}
err = b.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
poolUsedBy, err = tx.GetStoragePoolUsedBy(b.name, false)
return err
})
if err != nil {
return false, err
}
for _, entry := range poolUsedBy {
// Images are never considered a user of the pool.
if strings.HasPrefix(entry, "/1.0/images/") {
continue
}
return true, nil
}
return false, nil
}
// Update updates the pool config.
func (b *lxdBackend) Update(clientType request.ClientType, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"newDesc": newDesc, "newConfig": newConfig})
logger.Debug("Update started")
defer logger.Debug("Update finished")
// Validate config.
err := b.driver.Validate(newConfig)
if err != nil {
return err
}
// Diff the configurations.
changedConfig, userOnly := b.detectChangedConfig(b.db.Config, newConfig)
// Check if the pool source is being changed that the local state is still pending, otherwise prevent it.
_, sourceChanged := changedConfig["source"]
if sourceChanged && b.LocalStatus() != api.StoragePoolStatusPending {
return fmt.Errorf("Pool source cannot be changed when not in pending state")
}
// Apply changes to local node if both global pool and node are not pending and non-user config changed.
// Otherwise just apply changes to DB (below) ready for the actual global create request to be initiated.
if len(changedConfig) > 0 && b.Status() != api.StoragePoolStatusPending && b.LocalStatus() != api.StoragePoolStatusPending && !userOnly {
err = b.driver.Update(changedConfig)
if err != nil {
return err
}
}
// Update the database if something changed and we're in ClientTypeNormal mode.
if clientType == request.ClientTypeNormal && (len(changedConfig) > 0 || newDesc != b.db.Description) {
err = b.state.Cluster.UpdateStoragePool(b.name, newDesc, newConfig)
if err != nil {
return err
}
}
return nil
}
// Delete removes the pool.
func (b *lxdBackend) Delete(clientType request.ClientType, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"clientType": clientType})
logger.Debug("Delete started")
defer logger.Debug("Delete finished")
// If completely gone, just return
path := shared.VarPath("storage-pools", b.name)
if !shared.PathExists(path) {
return nil
}
if clientType != request.ClientTypeNormal && b.driver.Info().Remote {
if b.driver.Info().MountedRoot {
_, err := b.driver.Unmount()
if err != nil {
return err
}
} else {
// Remote storage may have leftover entries caused by
// volumes that were moved or delete while a particular system was offline.
err := os.RemoveAll(path)
if err != nil {
return err
}
}
} else {
// Remove any left over image volumes.
// This can occur during partial image unpack or if the storage pool has been recovered from an
// instace backup file and the image volume DB records were not restored.
// If non-image volumes exist, we don't delete the, even if they can then prevent the storage pool
// from being deleted, because they should not exist by this point and we don't want to end up
// removing an instance or custom volume accidentally.
// Errors listing volumes are ignored, as we should still try and delete the storage pool.
vols, _ := b.driver.ListVolumes()
for _, vol := range vols {
if vol.Type() == drivers.VolumeTypeImage {
err := b.driver.DeleteVolume(vol, op)
if err != nil {
return errors.Wrapf(err, "Failed deleting left over image volume %q (%s)", vol.Name(), vol.ContentType())
}
logger.Warn("Deleted left over image volume", log.Ctx{"volName": vol.Name(), "contentType": vol.ContentType()})
}
}
// Delete the low-level storage.
err := b.driver.Delete(op)
if err != nil {
return err
}
}
// Delete the mountpoint.
err := os.Remove(path)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove directory %q", path)
}
return nil
}
// Mount mounts the storage pool.
func (b *lxdBackend) Mount() (bool, error) {
logger := logging.AddContext(b.logger, nil)
logger.Debug("Mount started")
defer logger.Debug("Mount finished")
revert := revert.New()
defer revert.Fail()
revert.Add(func() {
unavailablePoolsMu.Lock()
unavailablePools[b.Name()] = struct{}{}
unavailablePoolsMu.Unlock()
})
path := drivers.GetPoolMountPath(b.name)
// Create the storage path if needed.
if !shared.IsDir(path) {
err := os.MkdirAll(path, 0711)
if err != nil {
return false, fmt.Errorf("Failed to create storage pool directory %q: %w", path, err)
}
}
ourMount, err := b.driver.Mount()
if err != nil {
return false, err
}
if ourMount {
revert.Add(func() { b.Unmount() })
}
// Create the directory structure (if needed) after mounted.
err = b.createStorageStructure(path)
if err != nil {
return false, err
}
revert.Success()
// Ensure pool is marked as available now its mounted.
unavailablePoolsMu.Lock()
delete(unavailablePools, b.Name())
unavailablePoolsMu.Unlock()
return ourMount, nil
}
// Unmount unmounts the storage pool.
func (b *lxdBackend) Unmount() (bool, error) {
logger := logging.AddContext(b.logger, nil)
logger.Debug("Unmount started")
defer logger.Debug("Unmount finished")
return b.driver.Unmount()
}
// ApplyPatch runs the requested patch at both backend and driver level.
func (b *lxdBackend) ApplyPatch(name string) error {
// Run early backend patches.
patch, ok := lxdEarlyPatches[name]
if ok {
err := patch(b)
if err != nil {
return err
}
}
// Run the driver patch itself.
err := b.driver.ApplyPatch(name)
if err != nil {
return err
}
// Run late backend patches.
patch, ok = lxdLatePatches[name]
if ok {
err := patch(b)
if err != nil {
return err
}
}
return nil
}
// ensureInstanceSymlink creates a symlink in the instance directory to the instance's mount path
// if doesn't exist already.
func (b *lxdBackend) ensureInstanceSymlink(instanceType instancetype.Type, projectName string, instanceName string, mountPath string) error {
if shared.IsSnapshot(instanceName) {
return fmt.Errorf("Instance must not be snapshot")
}
symlinkPath := InstancePath(instanceType, projectName, instanceName, false)
// Remove any old symlinks left over by previous bugs that may point to a different pool.
if shared.PathExists(symlinkPath) {
err := os.Remove(symlinkPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove symlink %q", symlinkPath)
}
}
// Create new symlink.
err := os.Symlink(mountPath, symlinkPath)
if err != nil {
return errors.Wrapf(err, "Failed to create symlink from %q to %q", mountPath, symlinkPath)
}
return nil
}
// removeInstanceSymlink removes a symlink in the instance directory to the instance's mount path.
func (b *lxdBackend) removeInstanceSymlink(instanceType instancetype.Type, projectName string, instanceName string) error {
symlinkPath := InstancePath(instanceType, projectName, instanceName, false)
if shared.PathExists(symlinkPath) {
err := os.Remove(symlinkPath)
if err != nil {
return errors.Wrapf(err, "Failed to remove symlink %q", symlinkPath)
}
}
return nil
}
// ensureInstanceSnapshotSymlink creates a symlink in the snapshot directory to the instance's
// snapshot path if doesn't exist already.
func (b *lxdBackend) ensureInstanceSnapshotSymlink(instanceType instancetype.Type, projectName string, instanceName string) error {
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(instanceType)
if err != nil {
return err
}
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(instanceName)
snapshotSymlink := InstancePath(instanceType, projectName, parentName, true)
volStorageName := project.Instance(projectName, parentName)
snapshotTargetPath := drivers.GetVolumeSnapshotDir(b.name, volType, volStorageName)
// Remove any old symlinks left over by previous bugs that may point to a different pool.
if shared.PathExists(snapshotSymlink) {
err = os.Remove(snapshotSymlink)
if err != nil {
return errors.Wrapf(err, "Failed to remove symlink %q", snapshotSymlink)
}
}
// Create new symlink.
err = os.Symlink(snapshotTargetPath, snapshotSymlink)
if err != nil {
return errors.Wrapf(err, "Failed to create symlink from %q to %q", snapshotTargetPath, snapshotSymlink)
}
return nil
}
// removeInstanceSnapshotSymlinkIfUnused removes the symlink in the snapshot directory to the
// instance's snapshot path if the snapshot path is missing. It is expected that the driver will
// remove the instance's snapshot path after the last snapshot is removed or the volume is deleted.
func (b *lxdBackend) removeInstanceSnapshotSymlinkIfUnused(instanceType instancetype.Type, projectName string, instanceName string) error {
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(instanceType)
if err != nil {
return err
}
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(instanceName)
snapshotSymlink := InstancePath(instanceType, projectName, parentName, true)
volStorageName := project.Instance(projectName, parentName)
snapshotTargetPath := drivers.GetVolumeSnapshotDir(b.name, volType, volStorageName)
// If snapshot parent directory doesn't exist, remove symlink.
if !shared.PathExists(snapshotTargetPath) {
if shared.PathExists(snapshotSymlink) {
err := os.Remove(snapshotSymlink)
if err != nil {
return errors.Wrapf(err, "Failed to remove symlink %q", snapshotSymlink)
}
}
}
return nil
}
// instanceRootVolumeConfig returns the instance's root volume config.
func (b *lxdBackend) instanceRootVolumeConfig(inst instance.Instance) (map[string]string, error) {
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return nil, err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return nil, err
}
// Get volume config.
_, vol, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return nil, errors.Wrapf(err, "Volume doesn't exist for %q on pool %q", project.Instance(inst.Project(), inst.Name()), b.Name())
}
return nil, err
}
// Get the root disk device config.
_, rootDiskConf, err := shared.GetRootDiskDevice(inst.ExpandedDevices().CloneNative())
if err != nil {
return nil, err
}
// Override size property from instance root device config.
if rootDiskConf["size"] != "" {
vol.Config["size"] = rootDiskConf["size"]
}
if rootDiskConf["size.state"] != "" {
vol.Config["size.state"] = rootDiskConf["size.state"]
}
return vol.Config, nil
}
// FillInstanceConfig populates the supplied instance volume config map with any defaults based on the storage
// pool and instance type being used.
func (b *lxdBackend) FillInstanceConfig(inst instance.Instance, config map[string]string) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("FillInstanceConfig started")
defer logger.Debug("FillInstanceConfig finished")
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Fill default config in volume (creates internal copy of supplied config and modifies that).
vol := b.GetVolume(volType, contentType, volStorageName, config)
err = b.driver.FillVolumeConfig(vol)
if err != nil {
return err
}
// Copy filled volume config back into supplied config map.
for k, v := range vol.Config() {
config[k] = v
}
return nil
}
// CreateInstance creates an empty instance.
func (b *lxdBackend) CreateInstance(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("CreateInstance started")
defer logger.Debug("CreateInstance finished")
err := b.isStatusReady()
if err != nil {
return err
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
revert := true
defer func() {
if !revert {
return
}
b.DeleteInstance(inst, op)
}()
contentType := InstanceContentType(inst)
// Find the root device config for instance.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = b.driver.CreateVolume(vol, nil, op)
if err != nil {
return err
}
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
err = inst.DeferTemplateApply(instance.TemplateTriggerCreate)
if err != nil {
return err
}
revert = false
return nil
}
// CreateInstanceFromBackup restores a backup file onto the storage device. Because the backup file
// is unpacked and restored onto the storage device before the instance is created in the database
// it is necessary to return two functions; a post hook that can be run once the instance has been
// created in the database to run any storage layer finalisations, and a revert hook that can be
// run if the instance database load process fails that will remove anything created thus far.
func (b *lxdBackend) CreateInstanceFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, revert.Hook, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": srcBackup.Project, "instance": srcBackup.Name, "snapshots": srcBackup.Snapshots, "optimizedStorage": *srcBackup.OptimizedStorage})
logger.Debug("CreateInstanceFromBackup started")
defer logger.Debug("CreateInstanceFromBackup finished")
// Get the volume name on storage.
volStorageName := project.Instance(srcBackup.Project, srcBackup.Name)
// Get the instance type.
instanceType, err := instancetype.New(string(srcBackup.Type))
if err != nil {
return nil, nil, err
}
// Get the volume type.
volType, err := InstanceTypeToVolumeType(instanceType)
if err != nil {
return nil, nil, err
}
contentType := drivers.ContentTypeFS
if volType == drivers.VolumeTypeVM {
contentType = drivers.ContentTypeBlock
}
// We don't know the volume's config yet as tarball hasn't been unpacked.
// We will apply the config as part of the post hook function returned if driver needs to.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
revert := revert.New()
defer revert.Fail()
// Unpack the backup into the new storage volume(s).
volPostHook, revertHook, err := b.driver.CreateVolumeFromBackup(vol, srcBackup, srcData, op)
if err != nil {
return nil, nil, err
}
if revertHook != nil {
revert.Add(revertHook)
}
err = b.ensureInstanceSymlink(instanceType, srcBackup.Project, srcBackup.Name, vol.MountPath())
if err != nil {
return nil, nil, err
}
revert.Add(func() {
b.removeInstanceSymlink(instanceType, srcBackup.Project, srcBackup.Name)
})
if len(srcBackup.Snapshots) > 0 {
err = b.ensureInstanceSnapshotSymlink(instanceType, srcBackup.Project, srcBackup.Name)
if err != nil {
return nil, nil, err
}
revert.Add(func() {
b.removeInstanceSnapshotSymlinkIfUnused(instanceType, srcBackup.Project, srcBackup.Name)
})
}
// Update pool information in the backup.yaml file.
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
return backup.UpdateInstanceConfigStoragePool(b.state.Cluster, srcBackup, mountPath)
}, op)
if err != nil {
return nil, nil, errors.Wrapf(err, "Error updating backup file")
}
var postHook func(instance.Instance) error
// Create a post hook function that will use the instance (that will be created) to setup a new volume
// containing the instance's root disk device's config so that the driver's post hook function can access
// that config to perform any post instance creation setup.
postHook = func(inst instance.Instance) error {
logger.Debug("CreateInstanceFromBackup post hook started")
defer logger.Debug("CreateInstanceFromBackup post hook finished")
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// If the driver returned a post hook, run it now.
if volPostHook != nil {
// Initialise new volume containing root disk config supplied in instance.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = volPostHook(vol)
if err != nil {
return err
}
}
// Apply quota config from root device if its set. Should be done after driver's post hook if set
// so that any volume initialisation has been completed first.
if rootDiskConf["size"] != "" {
size := rootDiskConf["size"]
logger.Debug("Applying volume quota from root disk config", log.Ctx{"size": size})
allowUnsafeResize := false
if vol.Type() == drivers.VolumeTypeContainer {
// Enable allowUnsafeResize for container imports so that filesystem resize
// safety checks are avoided in order to allow more imports to succeed when
// otherwise the pre-resize estimated checks of resize2fs would prevent
// import. If there is truly insufficient size to complete the import the
// resize will still fail, but its OK as we will then delete the volume
// rather than leaving it in a corrupted state. We don't need to do this
// for non-container volumes (nor should we) because block volumes won't
// error if we shrink them too much, and custom volumes can be created at
// the correct size immediately and don't need a post-import resize step.
allowUnsafeResize = true
}
err = b.driver.SetVolumeQuota(vol, size, allowUnsafeResize, op)
if err != nil {
// The restored volume can end up being larger than the root disk config's size
// property due to the block boundary rounding some storage drivers use. As such
// if the restored volume is larger than the config's size and it cannot be shrunk
// to the equivalent size on the target storage driver, don't fail as the backup
// has still been restored successfully.
if errors.Cause(err) == drivers.ErrCannotBeShrunk {
logger.Warn("Could not apply volume quota from root disk config as restored volume cannot be shrunk", log.Ctx{"size": rootDiskConf["size"]})
} else {
return errors.Wrapf(err, "Failed applying volume quota to root disk")
}
}
// Apply the filesystem volume quota (only when main volume is block).
if vol.IsVMBlock() {
vmStateSize := rootDiskConf["size.state"]
// Apply default VM config filesystem size if main volume size is specified and
// no custom vmStateSize is specified. This way if the main volume size is empty
// (i.e removing quota) then this will also pass empty quota for the config
// filesystem volume as well, allowing a former quota to be removed from both
// volumes.
if vmStateSize == "" && size != "" {
vmStateSize = deviceConfig.DefaultVMBlockFilesystemSize
}
logger.Debug("Applying filesystem volume quota from root disk config", log.Ctx{"size.state": vmStateSize})
fsVol := vol.NewVMBlockFilesystemVolume()
err := b.driver.SetVolumeQuota(fsVol, vmStateSize, allowUnsafeResize, op)
if errors.Cause(err) == drivers.ErrCannotBeShrunk {
logger.Warn("Could not apply VM filesystem volume quota from root disk config as restored volume cannot be shrunk", log.Ctx{"size": rootDiskConf["size"]})
} else if err != nil {
return fmt.Errorf("Failed applying filesystem volume quota to root disk: %w", err)
}
}
}
return nil
}
revert.Success()
return postHook, revertHook, nil
}
// CreateInstanceFromCopy copies an instance volume and optionally its snapshots to new volume(s).
func (b *lxdBackend) CreateInstanceFromCopy(inst instance.Instance, src instance.Instance, snapshots bool, allowInconsistent bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name(), "snapshots": snapshots})
logger.Debug("CreateInstanceFromCopy started")
defer logger.Debug("CreateInstanceFromCopy finished")
err := b.isStatusReady()
if err != nil {
return err
}
if inst.Type() != src.Type() {
return fmt.Errorf("Instance types must match")
}
if src.Type() == instancetype.VM && src.IsRunning() {
return errors.Wrap(drivers.ErrNotImplemented, "Unable to perform VM live migration")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Initialise a new volume containing the root disk config supplied in the new instance.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
if b.driver.HasVolume(vol) {
return fmt.Errorf("Cannot create volume, already exists on target storage")
}
// Setup reverter.
revert := revert.New()
defer revert.Fail()
// Get the source storage pool.
srcPool, err := GetPoolByInstance(b.state, src)
if err != nil {
return err
}
// Some driver backing stores require that running instances be frozen during copy.
if !src.IsSnapshot() && b.driver.Info().RunningCopyFreeze && src.IsRunning() && !src.IsFrozen() && !allowInconsistent {
err = src.Freeze()
if err != nil {
return err
}
defer src.Unfreeze()
// Attempt to sync the filesystem.
filesystem.SyncFS(src.RootfsPath())
}
revert.Add(func() { b.DeleteInstance(inst, op) })
if b.Name() == srcPool.Name() {
logger.Debug("CreateInstanceFromCopy same-pool mode detected")
// Get the src volume name on storage.
srcVolStorageName := project.Instance(src.Project(), src.Name())
// We don't need to use the source instance's root disk config, so set to nil.
srcVol := b.GetVolume(volType, contentType, srcVolStorageName, nil)
err = b.driver.CreateVolumeFromCopy(vol, srcVol, snapshots, op)
if err != nil {
return err
}
} else {
// We are copying volumes between storage pools so use migration system as it will
// be able to negotiate a common transfer method between pool types.
logger.Debug("CreateInstanceFromCopy cross-pool mode detected")
// If we are copying snapshots, retrieve a list of snapshots from source volume.
snapshotNames := []string{}
if snapshots {
snapshots, err := VolumeSnapshotsGet(b.state, src.Project(), srcPool.Name(), src.Name(), volDBType)
if err != nil {
return err
}
for _, snapshot := range snapshots {
_, snapShotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
snapshotNames = append(snapshotNames, snapShotName)
}
}
// Negotiate the migration type to use.
offeredTypes := srcPool.MigrationTypes(contentType, false)
offerHeader := migration.TypesToHeader(offeredTypes...)
migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, false))
if err != nil {
return fmt.Errorf("Failed to negotiate copy migration type: %v", err)
}
var srcVolumeSize int64
// For VMs, get source volume size so that target can create the volume the same size.
if src.Type() == instancetype.VM {
srcVolumeSize, err = InstanceDiskBlockSize(srcPool, src, op)
if err != nil {
return errors.Wrapf(err, "Failed getting source disk size")
}
}
ctx, cancel := context.WithCancel(context.Background())
// Use in-memory pipe pair to simulate a connection between the sender and receiver.
aEnd, bEnd := memorypipe.NewPipePair(ctx)
// Run sender and receiver in separate go routines to prevent deadlocks.
aEndErrCh := make(chan error, 1)
bEndErrCh := make(chan error, 1)
go func() {
err := srcPool.MigrateInstance(src, aEnd, &migration.VolumeSourceArgs{
Name: src.Name(),
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: true, // Do use a progress tracker on sender.
AllowInconsistent: allowInconsistent,
}, op)
if err != nil {
cancel()
}
aEndErrCh <- err
}()
go func() {
err := b.CreateInstanceFromMigration(inst, bEnd, migration.VolumeTargetArgs{
Name: inst.Name(),
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
VolumeSize: srcVolumeSize, // Block size setting override.
TrackProgress: false, // Do not use a progress tracker on receiver.
}, op)
if err != nil {
cancel()
}
bEndErrCh <- err
}()
// Capture errors from the sender and receiver from their result channels.
errs := []error{}
aEndErr := <-aEndErrCh
if aEndErr != nil {
errs = append(errs, aEndErr)
}
bEndErr := <-bEndErrCh
if bEndErr != nil {
errs = append(errs, bEndErr)
}
cancel()
if len(errs) > 0 {
return fmt.Errorf("Create instance volume from copy failed: %v", errs)
}
}
// Setup the symlinks.
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
revert.Success()
return nil
}
// RefreshCustomVolume refreshes custom volumes (and optionally snapshots) during the custom volume copy operations.
// Snapshots that are not present in the source but are in the destination are removed from the
// destination if snapshots are included in the synchronization.
func (b *lxdBackend) RefreshCustomVolume(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName, srcVolName string, srcVolOnly bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "srcProjectName": srcProjectName, "volName": volName, "desc": desc, "config": config, "srcPoolName": srcPoolName, "srcVolName": srcVolName, "srcVolOnly": srcVolOnly})
logger.Debug("RefreshCustomVolume started")
defer logger.Debug("RefreshCustomVolume finished")
err := b.isStatusReady()
if err != nil {
return err
}
if srcProjectName == "" {
srcProjectName = projectName
}
// Setup the source pool backend instance.
var srcPool *lxdBackend
if b.name == srcPoolName {
srcPool = b // Source and target are in the same pool so share pool var.
} else {
// Source is in a different pool to target, so load the pool.
tmpPool, err := GetPoolByName(b.state, srcPoolName)
if err != nil {
return err
}
// Convert to lxdBackend so we can access driver.
tmpBackend, ok := tmpPool.(*lxdBackend)
if !ok {
return fmt.Errorf("Pool is not an lxdBackend")
}
srcPool = tmpBackend
}
// Check source volume exists and is custom type.
_, srcVolRow, err := b.state.Cluster.GetLocalStoragePoolVolume(srcProjectName, srcVolName, db.StoragePoolVolumeTypeCustom, srcPool.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Source volume doesn't exist")
}
return err
}
// Use the source volume's config if not supplied.
if config == nil {
config = srcVolRow.Config
}
// Use the source volume's description if not supplied.
if desc == "" {
desc = srcVolRow.Description
}
contentDBType, err := VolumeContentTypeNameToContentType(srcVolRow.ContentType)
if err != nil {
return err
}
// Get the source volume's content type.
contentType := drivers.ContentTypeFS
if contentDBType == db.StoragePoolVolumeContentTypeBlock {
contentType = drivers.ContentTypeBlock
}
storagePoolSupported := false
for _, supportedType := range b.Driver().Info().VolumeTypes {
if supportedType == drivers.VolumeTypeCustom {
storagePoolSupported = true
break
}
}
if !storagePoolSupported {
return fmt.Errorf("Storage pool does not support custom volume type")
}
// If we are copying snapshots, retrieve a list of snapshots from source volume.
snapshotNames := []string{}
srcSnapVols := []drivers.Volume{}
syncSnapshots := []db.StorageVolumeArgs{}
if !srcVolOnly {
// Detect added/deleted snapshots.
srcSnapshots, err := VolumeSnapshotsGet(srcPool.state, srcProjectName, srcPoolName, srcVolName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
destSnapshots, err := VolumeSnapshotsGet(b.state, projectName, b.Name(), volName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
var deleteSnapshots []db.StorageVolumeArgs
syncSnapshots, deleteSnapshots = syncSnapshotsVolumeGet(srcSnapshots, destSnapshots)
// Build the list of snapshots to transfer.
for _, snapshot := range syncSnapshots {
_, snapshotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
snapshotNames = append(snapshotNames, snapshotName)
snapVolStorageName := project.StorageVolume(projectName, snapshot.Name)
srcSnapVol := srcPool.GetVolume(drivers.VolumeTypeCustom, contentType, snapVolStorageName, nil)
srcSnapVols = append(srcSnapVols, srcSnapVol)
}
// Delete any snapshots that have disappeared or changed on the source.
for _, snapshot := range deleteSnapshots {
_, snapshotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
snapVolName := fmt.Sprintf("%s/%s", volName, snapshotName)
// Delete the snapshot.
err = b.DeleteCustomVolumeSnapshot(projectName, snapVolName, op)
if err != nil {
return err
}
}
}
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, config)
srcVolStorageName := project.StorageVolume(srcProjectName, srcVolName)
srcVol := srcPool.GetVolume(drivers.VolumeTypeCustom, contentType, srcVolStorageName, srcVolRow.Config)
if srcPool == b {
logger.Debug("RefreshCustomVolume same-pool mode detected")
err = b.driver.RefreshVolume(vol, srcVol, srcSnapVols, op)
if err != nil {
return err
}
// Create database entry for new storage volume snapshots.
for _, snapshot := range syncSnapshots {
_, snapshotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
err = VolumeDBCreate(b.state, b, projectName, fmt.Sprintf("%s/%s", volName, snapshotName), snapshot.Description, drivers.VolumeTypeCustom, true, snapshot.Config, snapshot.ExpiryDate, contentType)
if err != nil {
return err
}
}
} else {
logger.Debug("RefreshCustomVolume cross-pool mode detected")
// Negotiate the migration type to use.
offeredTypes := srcPool.MigrationTypes(contentType, true)
offerHeader := migration.TypesToHeader(offeredTypes...)
migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, true))
if err != nil {
return fmt.Errorf("Failed to negotiate copy migration type: %v", err)
}
var volSize int64
if contentType == drivers.ContentTypeBlock {
// Get the src volume name on storage.
srcVolStorageName := project.StorageVolume(srcProjectName, srcVolName)
srcVol := srcPool.GetVolume(drivers.VolumeTypeCustom, contentType, srcVolStorageName, srcVolRow.Config)
srcVol.MountTask(func(mountPath string, op *operations.Operation) error {
volDiskPath, err := srcPool.driver.GetVolumeDiskPath(srcVol)
if err != nil {
return err
}
volSize, err = drivers.BlockDiskSizeBytes(volDiskPath)
if err != nil {
return err
}
return nil
}, nil)
}
ctx, cancel := context.WithCancel(context.Background())
// Use in-memory pipe pair to simulate a connection between the sender and receiver.
aEnd, bEnd := memorypipe.NewPipePair(ctx)
// Run sender and receiver in separate go routines to prevent deadlocks.
aEndErrCh := make(chan error, 1)
bEndErrCh := make(chan error, 1)
go func() {
err := srcPool.MigrateCustomVolume(srcProjectName, aEnd, &migration.VolumeSourceArgs{
Name: srcVolName,
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: true, // Do use a progress tracker on sender.
ContentType: string(contentType),
}, op)
if err != nil {
cancel()
}
aEndErrCh <- err
}()
go func() {
err := b.CreateCustomVolumeFromMigration(projectName, bEnd, migration.VolumeTargetArgs{
Name: volName,
Description: desc,
Config: config,
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: false, // Do not use a progress tracker on receiver.
ContentType: string(contentType),
VolumeSize: volSize, // Block size setting override.
Refresh: true,
}, op)
if err != nil {
cancel()
}
bEndErrCh <- err
}()
// Capture errors from the sender and receiver from their result channels.
errs := []error{}
aEndErr := <-aEndErrCh
if aEndErr != nil {
aEnd.Close()
errs = append(errs, aEndErr)
}
bEndErr := <-bEndErrCh
if bEndErr != nil {
errs = append(errs, bEndErr)
}
cancel()
if len(errs) > 0 {
return fmt.Errorf("Refresh custom volume from copy failed: %v", errs)
}
}
return nil
}
// RefreshInstance synchronises one instance's volume (and optionally snapshots) over another.
// Snapshots that are not present in the source but are in the destination are removed from the
// destination if snapshots are included in the synchronisation.
func (b *lxdBackend) RefreshInstance(inst instance.Instance, src instance.Instance, srcSnapshots []instance.Instance, allowInconsistent bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name(), "srcSnapshots": len(srcSnapshots)})
logger.Debug("RefreshInstance started")
defer logger.Debug("RefreshInstance finished")
if inst.Type() != src.Type() {
return fmt.Errorf("Instance types must match")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Initialise a new volume containing the root disk config supplied in the new instance.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
// Get the src volume name on storage.
srcVolStorageName := project.Instance(src.Project(), src.Name())
// We don't need to use the source instance's root disk config, so set to nil.
srcVol := b.GetVolume(volType, contentType, srcVolStorageName, nil)
srcSnapVols := []drivers.Volume{}
for _, snapInst := range srcSnapshots {
// Initialise a new volume containing the root disk config supplied in the
// new instance. We don't need to use the source instance's snapshot root
// disk config, so set to nil. This is because snapshots are immutable yet
// the instance and its snapshots can be transferred between pools, so using
// the data from the snapshot is incorrect.
// Get the snap volume name on storage.
snapVolStorageName := project.Instance(snapInst.Project(), snapInst.Name())
srcSnapVol := b.GetVolume(volType, contentType, snapVolStorageName, nil)
srcSnapVols = append(srcSnapVols, srcSnapVol)
}
srcPool, err := GetPoolByInstance(b.state, src)
if err != nil {
return err
}
if b.Name() == srcPool.Name() {
logger.Debug("RefreshInstance same-pool mode detected")
err = b.driver.RefreshVolume(vol, srcVol, srcSnapVols, op)
if err != nil {
return err
}
} else {
// We are copying volumes between storage pools so use migration system as it will
// be able to negotiate a common transfer method between pool types.
logger.Debug("RefreshInstance cross-pool mode detected")
// Retrieve a list of snapshots we are copying.
snapshotNames := []string{}
for _, srcSnapVol := range srcSnapVols {
_, snapShotName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapVol.Name())
snapshotNames = append(snapshotNames, snapShotName)
}
// Negotiate the migration type to use.
offeredTypes := srcPool.MigrationTypes(contentType, true)
offerHeader := migration.TypesToHeader(offeredTypes...)
migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, true))
if err != nil {
return fmt.Errorf("Failed to negotiate copy migration type: %v", err)
}
ctx, cancel := context.WithCancel(context.Background())
// Use in-memory pipe pair to simulate a connection between the sender and receiver.
aEnd, bEnd := memorypipe.NewPipePair(ctx)
// Run sender and receiver in separate go routines to prevent deadlocks.
aEndErrCh := make(chan error, 1)
bEndErrCh := make(chan error, 1)
go func() {
err := srcPool.MigrateInstance(src, aEnd, &migration.VolumeSourceArgs{
Name: src.Name(),
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: true, // Do use a progress tracker on sender.
AllowInconsistent: allowInconsistent,
}, op)
if err != nil {
cancel()
}
aEndErrCh <- err
}()
go func() {
err := b.CreateInstanceFromMigration(inst, bEnd, migration.VolumeTargetArgs{
Name: inst.Name(),
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
Refresh: true, // Indicate to receiver volume should exist.
TrackProgress: false, // Do not use a progress tracker on receiver.
}, op)
if err != nil {
cancel()
}
bEndErrCh <- err
}()
// Capture errors from the sender and receiver from their result channels.
errs := []error{}
aEndErr := <-aEndErrCh
if aEndErr != nil {
errs = append(errs, aEndErr)
}
bEndErr := <-bEndErrCh
if bEndErr != nil {
errs = append(errs, bEndErr)
}
cancel()
if len(errs) > 0 {
return fmt.Errorf("Create instance volume from copy failed: %v", errs)
}
}
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
err = inst.DeferTemplateApply(instance.TemplateTriggerCopy)
if err != nil {
return err
}
return nil
}
// imageFiller returns a function that can be used as a filler function with CreateVolume().
// The function returned will unpack the specified image archive into the specified mount path
// provided, and for VM images, a raw root block path is required to unpack the qcow2 image into.
func (b *lxdBackend) imageFiller(fingerprint string, op *operations.Operation) func(vol drivers.Volume, rootBlockPath string, allowUnsafeResize bool) (int64, error) {
return func(vol drivers.Volume, rootBlockPath string, allowUnsafeResize bool) (int64, error) {
var tracker *ioprogress.ProgressTracker
if op != nil { // Not passed when being done as part of pre-migration setup.
metadata := make(map[string]interface{})
tracker = &ioprogress.ProgressTracker{
Handler: func(percent, speed int64) {
shared.SetProgressMetadata(metadata, "create_instance_from_image_unpack", "Unpack", percent, 0, speed)
op.UpdateMetadata(metadata)
}}
}
imageFile := shared.VarPath("images", fingerprint)
return ImageUnpack(imageFile, vol, rootBlockPath, b.driver.Info().BlockBacking, b.state.OS, allowUnsafeResize, tracker)
}
}
// CreateInstanceFromImage creates a new volume for an instance populated with the image requested.
// On failure caller is expected to call DeleteInstance() to clean up.
func (b *lxdBackend) CreateInstanceFromImage(inst instance.Instance, fingerprint string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("CreateInstanceFromImage started")
defer logger.Debug("CreateInstanceFromImage finished")
err := b.isStatusReady()
if err != nil {
return err
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
// Leave reverting on failure to caller, they are expected to call DeleteInstance().
// If the driver doesn't support optimized image volumes then create a new empty volume and
// populate it with the contents of the image archive.
if !b.driver.Info().OptimizedImages {
volFiller := drivers.VolumeFiller{
Fingerprint: fingerprint,
Fill: b.imageFiller(fingerprint, op),
}
err = b.driver.CreateVolume(vol, &volFiller, op)
if err != nil {
return err
}
} else {
// If the driver supports optimized images then ensure the optimized image volume has been created
// for the images's fingerprint and that it matches the pool's current volume settings, and if not
// recreating using the pool's current volume settings.
err = b.EnsureImage(fingerprint, op)
if err != nil {
return err
}
// Try and load existing volume config on this storage pool so we can compare filesystems if needed.
_, imgDBVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
return errors.Wrapf(err, "Failed loading image record for %q", fingerprint)
}
imgVol := b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, imgDBVol.Config)
// Derive the volume size to use for a new volume when copying from a source volume.
// Where possible (if the source volume has a volatile.rootfs.size property), it checks that the
// source volume isn't larger than the volume's "size" and the pool's "volume.size" setting.
logger.Debug("Checking volume size")
newVolSize, err := vol.ConfigSizeFromSource(imgVol)
if err != nil {
return err
}
// Set the derived size directly as the "size" property on the new volume so that it is applied.
vol.SetConfigSize(newVolSize)
logger.Debug("Set new volume size", log.Ctx{"size": newVolSize})
// Proceed to create a new volume by copying the optimized image volume.
err = b.driver.CreateVolumeFromCopy(vol, imgVol, false, op)
// If the driver returns ErrCannotBeShrunk, this means that the cached volume that the new volume
// is to be created from is larger than the requested new volume size, and cannot be shrunk.
// So we unpack the image directly into a new volume rather than use the optimized snapsot.
// This is slower but allows for individual volumes to be created from an image that are smaller
// than the pool's volume settings.
if errors.Cause(err) == drivers.ErrCannotBeShrunk {
logger.Debug("Cached image volume is larger than new volume and cannot be shrunk, creating non-optimized volume")
volFiller := drivers.VolumeFiller{
Fingerprint: fingerprint,
Fill: b.imageFiller(fingerprint, op),
}
err = b.driver.CreateVolume(vol, &volFiller, op)
if err != nil {
return err
}
} else if err != nil {
return err
}
}
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
err = inst.DeferTemplateApply(instance.TemplateTriggerCreate)
if err != nil {
return err
}
return nil
}
// CreateInstanceFromMigration receives an instance being migrated.
// The args.Name and args.Config fields are ignored and, instance properties are used instead.
func (b *lxdBackend) CreateInstanceFromMigration(inst instance.Instance, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "args": args})
logger.Debug("CreateInstanceFromMigration started")
defer logger.Debug("CreateInstanceFromMigration finished")
err := b.isStatusReady()
if err != nil {
return err
}
if args.Config != nil {
return fmt.Errorf("Migration VolumeTargetArgs.Config cannot be set")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Find the root device config for instance.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Override args.Name and args.Config to ensure volume is created based on instance.
args.Config = rootDiskConf
args.Name = inst.Name()
projectName := inst.Project()
// If migration header supplies a volume size, then use that as block volume size instead of pool default.
// This way if the volume being received is larger than the pool default size, the block volume created
// will still be able to accommodate it.
if args.VolumeSize > 0 && contentType == drivers.ContentTypeBlock {
b.logger.Debug("Setting volume size from offer header", log.Ctx{"size": args.VolumeSize})
args.Config["size"] = fmt.Sprintf("%d", args.VolumeSize)
} else if args.Config["size"] != "" {
b.logger.Debug("Using volume size from root disk config", log.Ctx{"size": args.Config["size"]})
}
// Get the volume name on storage.
volStorageName := project.Instance(projectName, args.Name)
vol := b.GetVolume(volType, contentType, volStorageName, args.Config)
volExists := b.driver.HasVolume(vol)
if args.Refresh && !volExists {
return fmt.Errorf("Cannot refresh volume, doesn't exist on migration target storage")
} else if !args.Refresh && volExists {
return fmt.Errorf("Cannot create volume, already exists on migration target storage")
}
var preFiller drivers.VolumeFiller
revert := true
if !args.Refresh {
defer func() {
if !revert {
return
}
b.DeleteInstance(inst, op)
}()
// If the negotiated migration method is rsync and the instance's base image is
// already on the host then setup a pre-filler that will unpack the local image
// to try and speed up the rsync of the incoming volume by avoiding the need to
// transfer the base image files too.
if args.MigrationType.FSType == migration.MigrationFSType_RSYNC {
fingerprint := inst.ExpandedConfig()["volatile.base_image"]
// Confirm that the image is present in the project.
_, _, err = b.state.Cluster.GetImage(fingerprint, db.ImageFilter{Project: &projectName})
if err != db.ErrNoSuchObject && err != nil {
return err
}
// Then make sure that the image is available locally too (not guaranteed in clusters).
local := shared.PathExists(shared.VarPath("images", fingerprint))
if err == nil && local {
logger.Debug("Using optimised migration from existing image", log.Ctx{"fingerprint": fingerprint})
// Populate the volume filler with the fingerprint and image filler
// function that can be used by the driver to pre-populate the
// volume with the contents of the image.
preFiller = drivers.VolumeFiller{
Fingerprint: fingerprint,
Fill: b.imageFiller(fingerprint, op),
}
// Ensure if the image doesn't yet exist on a driver which supports
// optimized storage, then it gets created first.
err = b.EnsureImage(preFiller.Fingerprint, op)
if err != nil {
return err
}
}
}
}
err = b.driver.CreateVolumeFromMigration(vol, conn, args, &preFiller, op)
if err != nil {
conn.Close()
return err
}
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
if len(args.Snapshots) > 0 {
err = b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
}
revert = false
return nil
}
// RenameInstance renames the instance's root volume and any snapshot volumes.
func (b *lxdBackend) RenameInstance(inst instance.Instance, newName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "newName": newName})
logger.Debug("RenameInstance started")
defer logger.Debug("RenameInstance finished")
if inst.IsSnapshot() {
return fmt.Errorf("Instance cannot be a snapshot")
}
if shared.IsSnapshot(newName) {
return fmt.Errorf("New name cannot be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
revert := revert.New()
defer revert.Fail()
// Get any snapshots the instance has in the format <instance name>/<snapshot name>.
snapshots, err := b.state.Cluster.GetInstanceSnapshotsNames(inst.Project(), inst.Name())
if err != nil {
return err
}
if len(snapshots) > 0 {
revert.Add(func() {
b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), newName)
b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), inst.Name())
})
}
// Rename each snapshot DB record to have the new parent volume prefix.
for _, srcSnapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot)
newSnapVolName := drivers.GetSnapshotVolumeName(newName, snapName)
err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), srcSnapshot, newSnapVolName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newSnapVolName, srcSnapshot, volDBType, b.ID())
})
}
// Rename the parent volume DB record.
err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), inst.Name(), newName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newName, inst.Name(), volDBType, b.ID())
})
// Rename the volume and its snapshots on the storage device.
volStorageName := project.Instance(inst.Project(), inst.Name())
newVolStorageName := project.Instance(inst.Project(), newName)
contentType := InstanceContentType(inst)
// There's no need to pass config as it's not needed when renaming a volume.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
err = b.driver.RenameVolume(vol, newVolStorageName, op)
if err != nil {
return err
}
revert.Add(func() {
// There's no need to pass config as it's not needed when renaming a volume.
newVol := b.GetVolume(volType, contentType, newVolStorageName, nil)
b.driver.RenameVolume(newVol, volStorageName, op)
})
// Remove old instance symlink and create new one.
err = b.removeInstanceSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
revert.Add(func() {
b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), drivers.GetVolumeMountPath(b.name, volType, volStorageName))
})
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), newName, drivers.GetVolumeMountPath(b.name, volType, newVolStorageName))
if err != nil {
return err
}
revert.Add(func() {
b.removeInstanceSymlink(inst.Type(), inst.Project(), newName)
})
// Remove old instance snapshot symlink and create a new one if needed.
err = b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
if len(snapshots) > 0 {
err = b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), newName)
if err != nil {
return err
}
}
revert.Success()
return nil
}
// DeleteInstance removes the instance's root volume (all snapshots need to be removed first).
func (b *lxdBackend) DeleteInstance(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("DeleteInstance started")
defer logger.Debug("DeleteInstance finished")
if inst.IsSnapshot() {
return fmt.Errorf("Instance must not be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
// Get any snapshots the instance has in the format <instance name>/<snapshot name>.
snapshots, err := b.state.Cluster.GetInstanceSnapshotsNames(inst.Project(), inst.Name())
if err != nil {
return err
}
// Check all snapshots are already removed.
if len(snapshots) > 0 {
return fmt.Errorf("Cannot remove an instance volume that has snapshots")
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
contentType := InstanceContentType(inst)
// There's no need to pass config as it's not needed when deleting a volume.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
// Delete the volume from the storage device. Must come after snapshots are removed.
// Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
logger.Debug("Deleting instance volume", log.Ctx{"volName": volStorageName})
if b.driver.HasVolume(vol) {
err = b.driver.DeleteVolume(vol, op)
if err != nil {
return errors.Wrapf(err, "Error deleting storage volume")
}
}
// Remove symlinks.
err = b.removeInstanceSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
err = b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
// Remove the volume record from the database.
err = b.state.Cluster.RemoveStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return errors.Wrapf(err, "Error deleting storage volume from database")
}
return nil
}
// UpdateInstance updates an instance volume's config.
func (b *lxdBackend) UpdateInstance(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "newDesc": newDesc, "newConfig": newConfig})
logger.Debug("UpdateInstance started")
defer logger.Debug("UpdateInstance finished")
if inst.IsSnapshot() {
return fmt.Errorf("Instance cannot be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
volStorageName := project.Instance(inst.Project(), inst.Name())
contentType := InstanceContentType(inst)
// Validate config.
newVol := b.GetVolume(volType, contentType, volStorageName, newConfig)
err = b.driver.ValidateVolume(newVol, false)
if err != nil {
return err
}
// Get current config to compare what has changed.
_, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist for %q on pool %q", project.Instance(inst.Project(), inst.Name()), b.Name())
}
return err
}
// Apply config changes if there are any.
changedConfig, userOnly := b.detectChangedConfig(curVol.Config, newConfig)
if len(changedConfig) != 0 {
// Check that the volume's size property isn't being changed.
if changedConfig["size"] != "" {
return fmt.Errorf("Instance volume 'size' property cannot be changed")
}
// Check that the volume's block.filesystem property isn't being changed.
if changedConfig["block.filesystem"] != "" {
return fmt.Errorf("Instance volume 'block.filesystem' property cannot be changed")
}
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
curVol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
if !userOnly {
err = b.driver.UpdateVolume(curVol, changedConfig)
if err != nil {
return err
}
}
}
// Update the database if something changed.
if len(changedConfig) != 0 || newDesc != curVol.Description {
err = b.state.Cluster.UpdateStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID(), newDesc, newConfig)
if err != nil {
return err
}
}
b.state.Events.SendLifecycle(inst.Project(), lifecycle.StorageVolumeUpdated.Event(newVol, string(newVol.Type()), inst.Project(), op, nil))
return nil
}
// UpdateInstanceSnapshot updates an instance snapshot volume's description.
// Volume config is not allowed to be updated and will return an error.
func (b *lxdBackend) UpdateInstanceSnapshot(inst instance.Instance, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "newDesc": newDesc, "newConfig": newConfig})
logger.Debug("UpdateInstanceSnapshot started")
defer logger.Debug("UpdateInstanceSnapshot finished")
if !inst.IsSnapshot() {
return fmt.Errorf("Instance must be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
return b.updateVolumeDescriptionOnly(inst.Project(), inst.Name(), volDBType, newDesc, newConfig, op)
}
// MigrateInstance sends an instance volume for migration.
// The args.Name field is ignored and the name of the instance is used instead.
func (b *lxdBackend) MigrateInstance(inst instance.Instance, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "args": args})
logger.Debug("MigrateInstance started")
defer logger.Debug("MigrateInstance finished")
// rsync+dd can't handle running source instances
if inst.IsRunning() && args.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return fmt.Errorf("Rsync based migration doesn't support running virtual machines")
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
if len(args.Snapshots) > 0 && args.FinalSync {
return fmt.Errorf("Snapshots should not be transferred during final sync")
}
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
args.Name = inst.Name() // Override args.Name to ensure instance volume is sent.
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), args.Name)
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
// Freeze the instance only when the underlying driver doesn't support it, and allowInconsistent is not set (and it's
// not already frozen/stopped)
if !inst.IsSnapshot() && b.driver.Info().RunningCopyFreeze && inst.IsRunning() && !inst.IsFrozen() && !args.AllowInconsistent {
err = inst.Freeze()
if err != nil {
return err
}
defer inst.Unfreeze()
// Attempt to sync the filesystem.
filesystem.SyncFS(inst.RootfsPath())
}
err = b.driver.MigrateVolume(vol, conn, args, op)
if err != nil {
return err
}
return nil
}
// BackupInstance creates an instance backup.
func (b *lxdBackend) BackupInstance(inst instance.Instance, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "optimized": optimized, "snapshots": snapshots})
logger.Debug("BackupInstance started")
defer logger.Debug("BackupInstance finished")
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Ensure the backup file reflects current config.
err = b.UpdateInstanceBackupFile(inst, op)
if err != nil {
return err
}
var snapNames []string
if snapshots {
// Get snapshots in age order, oldest first, and pass names to storage driver.
instSnapshots, err := inst.Snapshots()
if err != nil {
return err
}
snapNames = make([]string, 0, len(instSnapshots))
for _, instSnapshot := range instSnapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(instSnapshot.Name())
snapNames = append(snapNames, snapName)
}
}
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = b.driver.BackupVolume(vol, tarWriter, optimized, snapNames, op)
if err != nil {
return err
}
return nil
}
// GetInstanceUsage returns the disk usage of the instance's root volume.
func (b *lxdBackend) GetInstanceUsage(inst instance.Instance) (int64, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("GetInstanceUsage started")
defer logger.Debug("GetInstanceUsage finished")
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return -1, err
}
contentType := InstanceContentType(inst)
// There's no need to pass config as it's not needed when retrieving the volume usage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, nil)
return b.driver.GetVolumeUsage(vol)
}
// SetInstanceQuota sets the quota on the instance's root volume.
// Returns ErrInUse if the instance is running and the storage driver doesn't support online resizing.
func (b *lxdBackend) SetInstanceQuota(inst instance.Instance, size string, vmStateSize string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "size": size, "vm_state_size": vmStateSize})
logger.Debug("SetInstanceQuota started")
defer logger.Debug("SetInstanceQuota finished")
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentVolume := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Apply the main volume quota.
// There's no need to pass config as it's not needed when setting quotas.
vol := b.GetVolume(volType, contentVolume, volStorageName, nil)
err = b.driver.SetVolumeQuota(vol, size, false, op)
if err != nil {
return err
}
// Apply the filesystem volume quota (only when main volume is block).
if vol.IsVMBlock() {
// Apply default VM config filesystem size if main volume size is specified and no custom
// vmStateSize is specified. This way if the main volume size is empty (i.e removing quota) then
// this will also pass empty quota for the config filesystem volume as well, allowing a former
// quota to be removed from both volumes.
if vmStateSize == "" && size != "" {
vmStateSize = deviceConfig.DefaultVMBlockFilesystemSize
}
fsVol := vol.NewVMBlockFilesystemVolume()
err := b.driver.SetVolumeQuota(fsVol, vmStateSize, false, op)
if err != nil {
return err
}
}
return nil
}
// MountInstance mounts the instance's root volume.
func (b *lxdBackend) MountInstance(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("MountInstance started")
defer logger.Debug("MountInstance finished")
revert := revert.New()
defer revert.Fail()
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return nil, err
}
// Get the root disk device config.
var rootDiskConf map[string]string
if inst.ID() > -1 {
rootDiskConf, err = b.instanceRootVolumeConfig(inst)
if err != nil {
return nil, err
}
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = b.driver.MountVolume(vol, op)
if err != nil {
return nil, err
}
revert.Add(func() { b.driver.UnmountVolume(vol, false, op) })
diskPath, err := b.getInstanceDisk(inst)
if err != nil && err != drivers.ErrNotSupported {
return nil, errors.Wrapf(err, "Failed getting disk path")
}
mountInfo := &MountInfo{
DiskPath: diskPath,
}
revert.Success() // From here on it is up to caller to call UnmountInstance() when done.
return mountInfo, nil
}
// UnmountInstance unmounts the instance's root volume.
func (b *lxdBackend) UnmountInstance(inst instance.Instance, op *operations.Operation) (bool, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("UnmountInstance started")
defer logger.Debug("UnmountInstance finished")
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return false, err
}
// Get the root disk device config.
var rootDiskConf map[string]string
if inst.ID() > -1 {
rootDiskConf, err = b.instanceRootVolumeConfig(inst)
if err != nil {
return false, err
}
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
return b.driver.UnmountVolume(vol, false, op)
}
// getInstanceDisk returns the location of the disk.
func (b *lxdBackend) getInstanceDisk(inst instance.Instance) (string, error) {
if inst.Type() != instancetype.VM {
return "", drivers.ErrNotSupported
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return "", err
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
// There's no need to pass config as it's not needed when getting the
// location of the disk block device.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
// Get the location of the disk block device.
diskPath, err := b.driver.GetVolumeDiskPath(vol)
if err != nil {
return "", err
}
return diskPath, nil
}
// CreateInstanceSnapshot creates a snaphot of an instance volume.
func (b *lxdBackend) CreateInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name()})
logger.Debug("CreateInstanceSnapshot started")
defer logger.Debug("CreateInstanceSnapshot finished")
if inst.Type() != src.Type() {
return fmt.Errorf("Instance types must match")
}
if !inst.IsSnapshot() {
return fmt.Errorf("Instance must be a snapshot")
}
if src.IsSnapshot() {
return fmt.Errorf("Source instance cannot be a snapshot")
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
// Some driver backing stores require that running instances be frozen during snapshot.
if b.driver.Info().RunningCopyFreeze && src.IsRunning() && !src.IsFrozen() {
// Freeze the processes.
err = src.Freeze()
if err != nil {
return err
}
defer src.Unfreeze()
// Attempt to sync the filesystem.
filesystem.SyncFS(src.RootfsPath())
}
isSnap := inst.IsSnapshot()
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
// There's no need to pass config as it's not needed when creating volume snapshots.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
err = b.driver.CreateVolumeSnapshot(vol, op)
if err != nil {
return err
}
err = b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
return nil
}
// RenameInstanceSnapshot renames an instance snapshot.
func (b *lxdBackend) RenameInstanceSnapshot(inst instance.Instance, newName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "newName": newName})
logger.Debug("RenameInstanceSnapshot started")
defer logger.Debug("RenameInstanceSnapshot finished")
revert := revert.New()
defer revert.Fail()
if !inst.IsSnapshot() {
return fmt.Errorf("Instance must be a snapshot")
}
if shared.IsSnapshot(newName) {
return fmt.Errorf("New name cannot be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
parentName, oldSnapshotName, isSnap := shared.InstanceGetParentAndSnapshotName(inst.Name())
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
contentType := InstanceContentType(inst)
volStorageName := project.Instance(inst.Project(), inst.Name())
// Rename storage volume snapshot. No need to pass config as it's not needed when renaming a volume.
snapVol := b.GetVolume(volType, contentType, volStorageName, nil)
err = b.driver.RenameVolumeSnapshot(snapVol, newName, op)
if err != nil {
return err
}
newVolName := drivers.GetSnapshotVolumeName(parentName, newName)
revert.Add(func() {
// Revert rename. No need to pass config as it's not needed when renaming a volume.
newSnapVol := b.GetVolume(volType, contentType, project.Instance(inst.Project(), newVolName), nil)
b.driver.RenameVolumeSnapshot(newSnapVol, oldSnapshotName, op)
})
// Rename DB volume record.
err = b.state.Cluster.RenameStoragePoolVolume(inst.Project(), inst.Name(), newVolName, volDBType, b.ID())
if err != nil {
return err
}
revert.Add(func() {
// Rename DB volume record back.
b.state.Cluster.RenameStoragePoolVolume(inst.Project(), newVolName, inst.Name(), volDBType, b.ID())
})
// Ensure the backup file reflects current config.
err = b.UpdateInstanceBackupFile(inst, op)
if err != nil {
return err
}
revert.Success()
return nil
}
// DeleteInstanceSnapshot removes the snapshot volume for the supplied snapshot instance.
func (b *lxdBackend) DeleteInstanceSnapshot(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("DeleteInstanceSnapshot started")
defer logger.Debug("DeleteInstanceSnapshot finished")
parentName, snapName, isSnap := shared.InstanceGetParentAndSnapshotName(inst.Name())
if !inst.IsSnapshot() || !isSnap {
return fmt.Errorf("Instance must be a snapshot")
}
// Check we can convert the instance to the volume types needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Get the parent volume name on storage.
parentStorageName := project.Instance(inst.Project(), parentName)
// Delete the snapshot from the storage device.
// Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
logger.Debug("Deleting instance snapshot volume", log.Ctx{"volName": parentStorageName, "snapshotName": snapName})
snapVolName := drivers.GetSnapshotVolumeName(parentStorageName, snapName)
// There's no need to pass config as it's not needed when deleting a volume snapshot.
vol := b.GetVolume(volType, contentType, snapVolName, nil)
if b.driver.HasVolume(vol) {
err = b.driver.DeleteVolumeSnapshot(vol, op)
if err != nil {
return err
}
}
// Delete symlink if needed.
err = b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
// Remove the snapshot volume record from the database if exists.
err = b.state.Cluster.RemoveStoragePoolVolume(inst.Project(), drivers.GetSnapshotVolumeName(parentName, snapName), volDBType, b.ID())
if err != nil && err != db.ErrNoSuchObject {
return err
}
return nil
}
// RestoreInstanceSnapshot restores an instance snapshot.
func (b *lxdBackend) RestoreInstanceSnapshot(inst instance.Instance, src instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name(), "src": src.Name()})
logger.Debug("RestoreInstanceSnapshot started")
defer logger.Debug("RestoreInstanceSnapshot finished")
if inst.Type() != src.Type() {
return fmt.Errorf("Instance types must match")
}
if inst.IsSnapshot() {
return fmt.Errorf("Instance must not be snapshot")
}
if !src.IsSnapshot() {
return fmt.Errorf("Source instance must be a snapshot")
}
// Target instance must not be running.
if inst.IsRunning() {
return fmt.Errorf("Instance must not be running to restore")
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
contentType := InstanceContentType(inst)
// Find the root device config for source snapshot instance.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
_, snapshotName, isSnap := shared.InstanceGetParentAndSnapshotName(src.Name())
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
// Use the source snapshot's rootfs config (as this will later be restored into inst too).
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
err = b.driver.RestoreVolume(vol, snapshotName, op)
if err != nil {
snapErr, ok := err.(drivers.ErrDeleteSnapshots)
if ok {
// We need to delete some snapshots and try again.
snaps, err := inst.Snapshots()
if err != nil {
return err
}
// Go through all the snapshots.
for _, snap := range snaps {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
if !shared.StringInSlice(snapName, snapErr.Snapshots) {
continue
}
// Delete snapshot instance if listed in the error as one that needs removing.
err := snap.Delete(true)
if err != nil {
return err
}
}
// Now try restoring again.
err = b.driver.RestoreVolume(vol, snapshotName, op)
if err != nil {
return err
}
return nil
}
return err
}
return nil
}
// MountInstanceSnapshot mounts an instance snapshot. It is mounted as read only so that the
// snapshot cannot be modified.
func (b *lxdBackend) MountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (*MountInfo, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("MountInstanceSnapshot started")
defer logger.Debug("MountInstanceSnapshot finished")
if !inst.IsSnapshot() {
return nil, fmt.Errorf("Instance must be a snapshot")
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return nil, err
}
contentType := InstanceContentType(inst)
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return nil, err
}
// Get the parent and snapshot name.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
_, err = b.driver.MountVolumeSnapshot(vol, op)
if err != nil {
return nil, err
}
diskPath, err := b.getInstanceDisk(inst)
if err != nil && err != drivers.ErrNotSupported {
return nil, errors.Wrapf(err, "Failed getting disk path")
}
mountInfo := &MountInfo{
DiskPath: diskPath,
}
return mountInfo, nil
}
// UnmountInstanceSnapshot unmounts an instance snapshot.
func (b *lxdBackend) UnmountInstanceSnapshot(inst instance.Instance, op *operations.Operation) (bool, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("UnmountInstanceSnapshot started")
defer logger.Debug("UnmountInstanceSnapshot finished")
if !inst.IsSnapshot() {
return false, fmt.Errorf("Instance must be a snapshot")
}
// Check we can convert the instance to the volume type needed.
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return false, err
}
// Get the root disk device config.
rootDiskConf, err := b.instanceRootVolumeConfig(inst)
if err != nil {
return false, err
}
contentType := InstanceContentType(inst)
// Get the parent and snapshot name.
volStorageName := project.Instance(inst.Project(), inst.Name())
// Get the volume.
vol := b.GetVolume(volType, contentType, volStorageName, rootDiskConf)
return b.driver.UnmountVolumeSnapshot(vol, op)
}
// poolBlockFilesystem returns the filesystem used for new block device filesystems.
func (b *lxdBackend) poolBlockFilesystem() string {
if b.db.Config["volume.block.filesystem"] != "" {
return b.db.Config["volume.block.filesystem"]
}
return drivers.DefaultFilesystem
}
// EnsureImage creates an optimized volume of the image if supported by the storage pool driver and the volume
// doesn't already exist. If the volume already exists then it is checked to ensure it matches the pools current
// volume settings ("volume.size" and "block.filesystem" if applicable). If not the optimized volume is removed
// and regenerated to apply the pool's current volume settings.
func (b *lxdBackend) EnsureImage(fingerprint string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"fingerprint": fingerprint})
logger.Debug("EnsureImage started")
defer logger.Debug("EnsureImage finished")
err := b.isStatusReady()
if err != nil {
return err
}
if !b.driver.Info().OptimizedImages {
return nil // Nothing to do for drivers that don't support optimized images volumes.
}
// We need to lock this operation to ensure that the image is not being created multiple times.
// Uses a lock name of "EnsureImage_<fingerprint>" to avoid deadlocking with CreateVolume below that also
// establishes a lock on the volume type & name if it needs to mount the volume before filling.
unlock := locking.Lock(drivers.OperationLockName("EnsureImage", b.name, drivers.VolumeTypeImage, "", fingerprint))
defer unlock()
// Load image info from database.
_, image, err := b.state.Cluster.GetImageFromAnyProject(fingerprint)
if err != nil {
return err
}
// Derive content type from image type. Image types are not the same as instance types, so don't use
// instance type constants for comparison.
contentType := drivers.ContentTypeFS
if image.Type == "virtual-machine" {
contentType = drivers.ContentTypeBlock
}
// Try and load any existing volume config on this storage pool so we can compare filesystems if needed.
_, imgDBVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
if err != db.ErrNoSuchObject {
return err
}
}
// Create the new image volume. No config for an image volume so set to nil.
// Pool config values will be read by the underlying driver if needed.
imgVol := b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, nil)
// If an existing DB row was found, check if filesystem is the same as the current pool's filesystem.
// If not we need to delete the existing cached image volume and re-create using new filesystem.
// We need to do this for VM block images too, as they create a filesystem based config volume too.
if imgDBVol != nil {
// Add existing image volume's config to imgVol.
imgVol = b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, imgDBVol.Config)
if b.Driver().Info().BlockBacking && imgVol.Config()["block.filesystem"] != b.poolBlockFilesystem() {
logger.Debug("Filesystem of pool has changed since cached image volume created, regenerating image volume")
err = b.DeleteImage(fingerprint, op)
if err != nil {
return err
}
// Reset img volume variables as we just deleted the old one.
imgDBVol = nil
imgVol = b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, nil)
}
}
// Check if we already have a suitable volume on storage device.
if b.driver.HasVolume(imgVol) {
if imgDBVol != nil {
// Work out what size the image volume should be as if we were creating from scratch.
// This takes into account the existing volume's "volatile.rootfs.size" setting if set so
// as to avoid trying to shrink a larger image volume back to the default size when it is
// allowed to be larger than the default as the pool doesn't specify a volume.size.
logger.Debug("Checking image volume size")
newVolSize, err := imgVol.ConfigSizeFromSource(imgVol)
if err != nil {
return err
}
imgVol.SetConfigSize(newVolSize)
// Try applying the current size policy to the existing volume. If it is the same the
// driver should make no changes, and if not then attempt to resize it to the new policy.
logger.Debug("Setting image volume size", "size", imgVol.ConfigSize())
err = b.driver.SetVolumeQuota(imgVol, imgVol.ConfigSize(), false, op)
if errors.Cause(err) == drivers.ErrCannotBeShrunk || errors.Cause(err) == drivers.ErrNotSupported {
// If the driver cannot resize the existing image volume to the new policy size
// then delete the image volume and try to recreate using the new policy settings.
logger.Debug("Volume size of pool has changed since cached image volume created and cached volume cannot be resized, regenerating image volume")
err = b.DeleteImage(fingerprint, op)
if err != nil {
return err
}
// Reset img volume variables as we just deleted the old one.
imgDBVol = nil
imgVol = b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, nil)
} else if err != nil {
return err
} else {
// We already have a valid volume at the correct size, just return.
return nil
}
} else {
// We have an unrecorded on-disk volume, assume it's a partial unpack and delete it.
// This can occur if LXD process exits unexpectedly during an image unpack or if the
// storage pool has been recovered (which would not recreate the image volume DB records).
logger.Warn("Deleting leftover/partially unpacked image volume")
err = b.driver.DeleteVolume(imgVol, op)
if err != nil {
return errors.Wrapf(err, "Failed deleting leftover/partially unpacked image volume")
}
}
}
volFiller := drivers.VolumeFiller{
Fingerprint: fingerprint,
Fill: b.imageFiller(fingerprint, op),
}
revert := revert.New()
defer revert.Fail()
err = b.driver.CreateVolume(imgVol, &volFiller, op)
if err != nil {
return err
}
revert.Add(func() { b.driver.DeleteVolume(imgVol, op) })
var volConfig map[string]string
// If the volume filler has recorded the size of the unpacked volume, then store this in the image DB row.
if volFiller.Size != 0 {
volConfig = map[string]string{
"volatile.rootfs.size": fmt.Sprintf("%d", volFiller.Size),
}
}
err = VolumeDBCreate(b.state, b, project.Default, fingerprint, "", drivers.VolumeTypeImage, false, volConfig, time.Time{}, contentType)
if err != nil {
return err
}
revert.Success()
return nil
}
// DeleteImage removes an image from the database and underlying storage device if needed.
func (b *lxdBackend) DeleteImage(fingerprint string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"fingerprint": fingerprint})
logger.Debug("DeleteImage started")
defer logger.Debug("DeleteImage finished")
// We need to lock this operation to ensure that the image is not being deleted multiple times.
unlock := locking.Lock(drivers.OperationLockName("DeleteImage", b.name, drivers.VolumeTypeImage, "", fingerprint))
defer unlock()
// Load image info from database.
_, image, err := b.state.Cluster.GetImageFromAnyProject(fingerprint)
if err != nil {
return err
}
contentType := drivers.ContentTypeFS
// Image types are not the same as instance types, so don't use instance type constants.
if image.Type == "virtual-machine" {
contentType = drivers.ContentTypeBlock
}
// Load the storage volume in order to get the volume config which is needed for some drivers.
_, storageVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
return err
}
vol := b.GetVolume(drivers.VolumeTypeImage, contentType, fingerprint, storageVol.Config)
if b.driver.HasVolume(vol) {
err = b.driver.DeleteVolume(vol, op)
if err != nil {
return err
}
}
err = b.state.Cluster.RemoveStoragePoolVolume(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, b.ID())
if err != nil {
return err
}
b.state.Events.SendLifecycle(project.Default, lifecycle.StorageVolumeDeleted.Event(vol, string(vol.Type()), project.Default, op, nil))
return nil
}
// updateVolumeDescriptionOnly is a helper function used when handling update requests for volumes
// that only allow their descriptions to be updated. If any config supplied differs from the
// current volume's config then an error is returned.
func (b *lxdBackend) updateVolumeDescriptionOnly(project string, volName string, dbVolType int, newDesc string, newConfig map[string]string, op *operations.Operation) error {
// Get current config to compare what has changed.
_, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(project, volName, dbVolType, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
if newConfig != nil {
changedConfig, _ := b.detectChangedConfig(curVol.Config, newConfig)
if len(changedConfig) != 0 {
return fmt.Errorf("Volume config is not editable")
}
}
// Update the database if description changed. Use current config.
if newDesc != curVol.Description {
err = b.state.Cluster.UpdateStoragePoolVolume(project, volName, dbVolType, b.ID(), newDesc, curVol.Config)
if err != nil {
return err
}
}
// Get content type.
dbContentType, err := VolumeContentTypeNameToContentType(curVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Validate config.
vol := b.GetVolume(drivers.VolumeType(curVol.Type), contentType, volName, newConfig)
if !vol.IsSnapshot() {
b.state.Events.SendLifecycle(project, lifecycle.StorageVolumeUpdated.Event(vol, string(vol.Type()), project, op, nil))
} else {
b.state.Events.SendLifecycle(project, lifecycle.StorageVolumeSnapshotUpdated.Event(vol, string(vol.Type()), project, op, nil))
}
return nil
}
// UpdateImage updates image config.
func (b *lxdBackend) UpdateImage(fingerprint, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"fingerprint": fingerprint, "newDesc": newDesc, "newConfig": newConfig})
logger.Debug("UpdateImage started")
defer logger.Debug("UpdateImage finished")
return b.updateVolumeDescriptionOnly(project.Default, fingerprint, db.StoragePoolVolumeTypeImage, newDesc, newConfig, op)
}
// CreateCustomVolume creates an empty custom volume.
func (b *lxdBackend) CreateCustomVolume(projectName string, volName string, desc string, config map[string]string, contentType drivers.ContentType, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "desc": desc, "config": config, "contentType": contentType})
logger.Debug("CreateCustomVolume started")
defer logger.Debug("CreateCustomVolume finished")
err := b.isStatusReady()
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// Validate config.
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, config)
err = b.driver.ValidateVolume(vol, false)
if err != nil {
return err
}
storagePoolSupported := false
for _, supportedType := range b.Driver().Info().VolumeTypes {
if supportedType == drivers.VolumeTypeCustom {
storagePoolSupported = true
break
}
}
if !storagePoolSupported {
return fmt.Errorf("Storage pool does not support custom volume type")
}
// Create database entry for new storage volume.
err = VolumeDBCreate(b.state, b, projectName, volName, desc, vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDB := true
defer func() {
if revertDB {
b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
// Create the empty custom volume on the storage device.
err = b.driver.CreateVolume(vol, nil, op)
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"type": vol.Type()}))
revertDB = false
return nil
}
// CreateCustomVolumeFromCopy creates a custom volume from an existing custom volume.
// It copies the snapshots from the source volume by default, but can be disabled if requested.
func (b *lxdBackend) CreateCustomVolumeFromCopy(projectName string, srcProjectName string, volName string, desc string, config map[string]string, srcPoolName, srcVolName string, srcVolOnly bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "srcProjectName": srcProjectName, "volName": volName, "desc": desc, "config": config, "srcPoolName": srcPoolName, "srcVolName": srcVolName, "srcVolOnly": srcVolOnly})
logger.Debug("CreateCustomVolumeFromCopy started")
defer logger.Debug("CreateCustomVolumeFromCopy finished")
err := b.isStatusReady()
if err != nil {
return err
}
if srcProjectName == "" {
srcProjectName = projectName
}
// Setup the source pool backend instance.
var srcPool *lxdBackend
if b.name == srcPoolName {
srcPool = b // Source and target are in the same pool so share pool var.
} else {
// Source is in a different pool to target, so load the pool.
tmpPool, err := GetPoolByName(b.state, srcPoolName)
if err != nil {
return err
}
// Convert to lxdBackend so we can access driver.
tmpBackend, ok := tmpPool.(*lxdBackend)
if !ok {
return fmt.Errorf("Pool is not an lxdBackend")
}
srcPool = tmpBackend
}
// Check source volume exists and is custom type.
_, srcVolRow, err := b.state.Cluster.GetLocalStoragePoolVolume(srcProjectName, srcVolName, db.StoragePoolVolumeTypeCustom, srcPool.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Source volume doesn't exist")
}
return err
}
// Use the source volume's config if not supplied.
if config == nil {
config = srcVolRow.Config
}
// Use the source volume's description if not supplied.
if desc == "" {
desc = srcVolRow.Description
}
contentDBType, err := VolumeContentTypeNameToContentType(srcVolRow.ContentType)
if err != nil {
return err
}
// Get the source volume's content type.
contentType := drivers.ContentTypeFS
if contentDBType == db.StoragePoolVolumeContentTypeBlock {
contentType = drivers.ContentTypeBlock
}
storagePoolSupported := false
for _, supportedType := range b.Driver().Info().VolumeTypes {
if supportedType == drivers.VolumeTypeCustom {
storagePoolSupported = true
break
}
}
if !storagePoolSupported {
return fmt.Errorf("Storage pool does not support custom volume type")
}
// If we are copying snapshots, retrieve a list of snapshots from source volume.
snapshotNames := []string{}
if !srcVolOnly {
snapshots, err := VolumeSnapshotsGet(b.state, srcProjectName, srcPoolName, srcVolName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
for _, snapshot := range snapshots {
_, snapShotName, _ := shared.InstanceGetParentAndSnapshotName(snapshot.Name)
snapshotNames = append(snapshotNames, snapShotName)
}
}
// If the source and target are in the same pool then use CreateVolumeFromCopy rather than
// migration system as it will be quicker.
if srcPool == b {
logger.Debug("CreateCustomVolumeFromCopy same-pool mode detected")
// Create slice to record DB volumes created if revert needed later.
revertDBVolumes := []string{}
defer func() {
// Remove any DB volume rows created if we are reverting.
for _, volName := range revertDBVolumes {
b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, config)
// Get the src volume name on storage.
srcVolStorageName := project.StorageVolume(srcProjectName, srcVolName)
srcVol := b.GetVolume(drivers.VolumeTypeCustom, contentType, srcVolStorageName, srcVolRow.Config)
// Check the supplied config and remove any fields not relevant for pool type.
err := b.driver.ValidateVolume(vol, true)
if err != nil {
return err
}
// Create database entry for new storage volume.
err = VolumeDBCreate(b.state, b, projectName, volName, desc, vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDBVolumes = append(revertDBVolumes, volName)
if len(snapshotNames) > 0 {
for _, snapName := range snapshotNames {
newSnapshotName := drivers.GetSnapshotVolumeName(volName, snapName)
// Create database entry for new storage volume snapshot.
err = VolumeDBCreate(b.state, b, projectName, newSnapshotName, desc, vol.Type(), true, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDBVolumes = append(revertDBVolumes, newSnapshotName)
}
}
err = b.driver.CreateVolumeFromCopy(vol, srcVol, !srcVolOnly, op)
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"type": vol.Type()}))
revertDBVolumes = nil
return nil
}
// We are copying volumes between storage pools so use migration system as it will be able
// to negotiate a common transfer method between pool types.
logger.Debug("CreateCustomVolumeFromCopy cross-pool mode detected")
// Negotiate the migration type to use.
offeredTypes := srcPool.MigrationTypes(contentType, false)
offerHeader := migration.TypesToHeader(offeredTypes...)
migrationTypes, err := migration.MatchTypes(offerHeader, FallbackMigrationType(contentType), b.MigrationTypes(contentType, false))
if err != nil {
return fmt.Errorf("Failed to negotiate copy migration type: %v", err)
}
// If we're copying block volumes, the target block volume needs to be
// at least the size of the source volume, otherwise we'll run into
// "no space left on device".
var volSize int64
if contentType == drivers.ContentTypeBlock {
// Get the src volume name on storage.
srcVolStorageName := project.StorageVolume(srcProjectName, srcVolName)
srcVol := srcPool.GetVolume(drivers.VolumeTypeCustom, contentType, srcVolStorageName, srcVolRow.Config)
srcVol.MountTask(func(mountPath string, op *operations.Operation) error {
volDiskPath, err := srcPool.driver.GetVolumeDiskPath(srcVol)
if err != nil {
return err
}
volSize, err = drivers.BlockDiskSizeBytes(volDiskPath)
if err != nil {
return err
}
return nil
}, nil)
}
ctx, cancel := context.WithCancel(context.Background())
// Use in-memory pipe pair to simulate a connection between the sender and receiver.
aEnd, bEnd := memorypipe.NewPipePair(ctx)
// Run sender and receiver in separate go routines to prevent deadlocks.
aEndErrCh := make(chan error, 1)
bEndErrCh := make(chan error, 1)
go func() {
err := srcPool.MigrateCustomVolume(srcProjectName, aEnd, &migration.VolumeSourceArgs{
Name: srcVolName,
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: true, // Do use a progress tracker on sender.
ContentType: string(contentType),
}, op)
if err != nil {
cancel()
}
aEndErrCh <- err
}()
go func() {
err := b.CreateCustomVolumeFromMigration(projectName, bEnd, migration.VolumeTargetArgs{
Name: volName,
Description: desc,
Config: config,
Snapshots: snapshotNames,
MigrationType: migrationTypes[0],
TrackProgress: false, // Do not use a progress tracker on receiver.
ContentType: string(contentType),
VolumeSize: volSize, // Block size setting override.
}, op)
if err != nil {
cancel()
}
bEndErrCh <- err
}()
// Capture errors from the sender and receiver from their result channels.
errs := []error{}
aEndErr := <-aEndErrCh
if aEndErr != nil {
aEnd.Close()
errs = append(errs, aEndErr)
}
bEndErr := <-bEndErrCh
if bEndErr != nil {
errs = append(errs, bEndErr)
}
cancel()
if len(errs) > 0 {
return fmt.Errorf("Create custom volume from copy failed: %v", errs)
}
return nil
}
// MigrateCustomVolume sends a volume for migration.
func (b *lxdBackend) MigrateCustomVolume(projectName string, conn io.ReadWriteCloser, args *migration.VolumeSourceArgs, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": args.Name, "args": args})
logger.Debug("MigrateCustomVolume started")
defer logger.Debug("MigrateCustomVolume finished")
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, args.Name)
dbContentType, err := VolumeContentTypeNameToContentType(args.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Volume config not needed to send a volume so set to nil.
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, nil)
err = b.driver.MigrateVolume(vol, conn, args, op)
if err != nil {
return err
}
return nil
}
// CreateCustomVolumeFromMigration receives a volume being migrated.
func (b *lxdBackend) CreateCustomVolumeFromMigration(projectName string, conn io.ReadWriteCloser, args migration.VolumeTargetArgs, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": args.Name, "args": args})
logger.Debug("CreateCustomVolumeFromMigration started")
defer logger.Debug("CreateCustomVolumeFromMigration finished")
err := b.isStatusReady()
if err != nil {
return err
}
storagePoolSupported := false
for _, supportedType := range b.Driver().Info().VolumeTypes {
if supportedType == drivers.VolumeTypeCustom {
storagePoolSupported = true
break
}
}
if !storagePoolSupported {
return fmt.Errorf("Storage pool does not support custom volume type")
}
// Create slice to record DB volumes created if revert needed later.
revertDBVolumes := []string{}
defer func() {
// Remove any DB volume rows created if we are reverting.
for _, volName := range revertDBVolumes {
b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, args.Name)
// Check the supplied config and remove any fields not relevant for destination pool type.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(args.ContentType), volStorageName, args.Config)
// VolumeSize is set to the actual size of the underlying block device.
// The target should use this value if present, otherwise it might get an error like
// "no space left on device".
if args.VolumeSize > 0 {
vol.SetConfigSize(fmt.Sprintf("%d", args.VolumeSize))
}
err = b.driver.ValidateVolume(vol, true)
if err != nil {
return err
}
if !args.Refresh || !b.driver.HasVolume(vol) {
// Create database entry for new storage volume.
err = VolumeDBCreate(b.state, b, projectName, args.Name, args.Description, vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDBVolumes = append(revertDBVolumes, args.Name)
}
if len(args.Snapshots) > 0 {
for _, snapName := range args.Snapshots {
newSnapshotName := drivers.GetSnapshotVolumeName(args.Name, snapName)
// Create database entry for new storage volume snapshot.
err = VolumeDBCreate(b.state, b, projectName, newSnapshotName, args.Description, vol.Type(), true, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revertDBVolumes = append(revertDBVolumes, newSnapshotName)
}
}
err = b.driver.CreateVolumeFromMigration(vol, conn, args, nil, op)
if err != nil {
conn.Close()
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"type": vol.Type()}))
revertDBVolumes = nil
return nil
}
// RenameCustomVolume renames a custom volume and its snapshots.
func (b *lxdBackend) RenameCustomVolume(projectName string, volName string, newVolName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newVolName": newVolName})
logger.Debug("RenameCustomVolume started")
defer logger.Debug("RenameCustomVolume finished")
if shared.IsSnapshot(volName) {
return fmt.Errorf("Volume name cannot be a snapshot")
}
if shared.IsSnapshot(newVolName) {
return fmt.Errorf("New volume name cannot be a snapshot")
}
revert := revert.New()
defer revert.Fail()
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return err
}
// Rename each snapshot to have the new parent volume prefix.
snapshots, err := VolumeSnapshotsGet(b.state, projectName, b.name, volName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
for _, srcSnapshot := range snapshots {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(srcSnapshot.Name)
newSnapVolName := drivers.GetSnapshotVolumeName(newVolName, snapName)
err = b.state.Cluster.RenameStoragePoolVolume(projectName, srcSnapshot.Name, newSnapVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RenameStoragePoolVolume(projectName, newSnapVolName, srcSnapshot.Name, db.StoragePoolVolumeTypeCustom, b.ID())
})
}
// Rename each backup to have the new parent volume prefix.
backups, err := b.state.Cluster.GetStoragePoolVolumeBackups(projectName, volName, b.ID())
if err != nil {
return err
}
for _, br := range backups {
backupRow := br // Local var for revert.
_, backupName, _ := shared.InstanceGetParentAndSnapshotName(backupRow.Name)
newVolBackupName := drivers.GetSnapshotVolumeName(newVolName, backupName)
volBackup := backup.NewVolumeBackup(b.state, projectName, b.name, volName, backupRow.ID, backupRow.Name, backupRow.CreationDate, backupRow.ExpiryDate, backupRow.VolumeOnly, backupRow.OptimizedStorage)
err = volBackup.Rename(newVolBackupName)
if err != nil {
return errors.Wrapf(err, "Failed renaming backup %q to %q", backupRow.Name, newVolBackupName)
}
revert.Add(func() {
volBackup.Rename(backupRow.Name)
})
}
err = b.state.Cluster.RenameStoragePoolVolume(projectName, volName, newVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RenameStoragePoolVolume(projectName, newVolName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
})
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
newVolStorageName := project.StorageVolume(projectName, newVolName)
// There's no need to pass the config as it's not needed when renaming a volume.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, nil)
err = b.driver.RenameVolume(vol, newVolStorageName, op)
if err != nil {
return err
}
vol = b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), newVolStorageName, nil)
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeRenamed.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"old_name": volName}))
revert.Success()
return nil
}
// detectChangedConfig returns the config that has changed between current and new config maps.
// Also returns a boolean indicating whether all of the changed keys start with "user.".
// Deleted keys will be returned as having an empty string value.
func (b *lxdBackend) detectChangedConfig(curConfig, newConfig map[string]string) (map[string]string, bool) {
// Diff the configurations.
changedConfig := make(map[string]string)
userOnly := true
for key := range curConfig {
if curConfig[key] != newConfig[key] {
if !strings.HasPrefix(key, "user.") {
userOnly = false
}
changedConfig[key] = newConfig[key] // Will be empty string on deleted keys.
}
}
for key := range newConfig {
if curConfig[key] != newConfig[key] {
if !strings.HasPrefix(key, "user.") {
userOnly = false
}
changedConfig[key] = newConfig[key]
}
}
return changedConfig, userOnly
}
// UpdateCustomVolume applies the supplied config to the custom volume.
func (b *lxdBackend) UpdateCustomVolume(projectName string, volName string, newDesc string, newConfig map[string]string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newDesc": newDesc, "newConfig": newConfig})
logger.Debug("UpdateCustomVolume started")
defer logger.Debug("UpdateCustomVolume finished")
if shared.IsSnapshot(volName) {
return fmt.Errorf("Volume name cannot be a snapshot")
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// Get current config to compare what has changed.
_, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
// Get content type.
dbContentType, err := VolumeContentTypeNameToContentType(curVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Validate config.
newVol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, newConfig)
err = b.driver.ValidateVolume(newVol, false)
if err != nil {
return err
}
// Apply config changes if there are any.
changedConfig, userOnly := b.detectChangedConfig(curVol.Config, newConfig)
if len(changedConfig) != 0 {
// Check that the volume's block.filesystem property isn't being changed.
if changedConfig["block.filesystem"] != "" {
return fmt.Errorf("Custom volume 'block.filesystem' property cannot be changed")
}
// Check that security.unmapped and security.shifted aren't set together.
if shared.IsTrue(newConfig["security.unmapped"]) && shared.IsTrue(newConfig["security.shifted"]) {
return fmt.Errorf("security.unmapped and security.shifted are mutually exclusive")
}
// Check for config changing that is not allowed when running instances are using it.
if changedConfig["security.shifted"] != "" {
err = VolumeUsedByInstanceDevices(b.state, b.name, projectName, curVol, true, func(dbInst db.Instance, project db.Project, profiles []api.Profile, usedByDevices []string) error {
inst, err := instance.Load(b.state, db.InstanceToArgs(&dbInst), profiles)
if err != nil {
return err
}
// Confirm that no running instances are using it when changing shifted state.
if inst.IsRunning() && changedConfig["security.shifted"] != "" {
return fmt.Errorf("Cannot modify shifting with running instances using the volume")
}
return nil
})
if err != nil {
return err
}
}
curVol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, curVol.Config)
if !userOnly {
err = b.driver.UpdateVolume(curVol, changedConfig)
if err != nil {
return err
}
}
}
// Unset idmap keys if volume is unmapped.
if shared.IsTrue(newConfig["security.unmapped"]) {
delete(newConfig, "volatile.idmap.last")
delete(newConfig, "volatile.idmap.next")
}
// Update the database if something changed.
if len(changedConfig) != 0 || newDesc != curVol.Description {
err = b.state.Cluster.UpdateStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID(), newDesc, newConfig)
if err != nil {
return err
}
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeUpdated.Event(newVol, string(newVol.Type()), projectName, op, nil))
return nil
}
// UpdateCustomVolumeSnapshot updates the description of a custom volume snapshot.
// Volume config is not allowed to be updated and will return an error.
func (b *lxdBackend) UpdateCustomVolumeSnapshot(projectName string, volName string, newDesc string, newConfig map[string]string, newExpiryDate time.Time, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newDesc": newDesc, "newConfig": newConfig, "newExpiryDate": newExpiryDate})
logger.Debug("UpdateCustomVolumeSnapshot started")
defer logger.Debug("UpdateCustomVolumeSnapshot finished")
if !shared.IsSnapshot(volName) {
return fmt.Errorf("Volume must be a snapshot")
}
// Get current config to compare what has changed.
volID, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
curExpiryDate, err := b.state.Cluster.GetStorageVolumeSnapshotExpiry(volID)
if err != nil {
return err
}
if newConfig != nil {
changedConfig, _ := b.detectChangedConfig(curVol.Config, newConfig)
if len(changedConfig) != 0 {
return fmt.Errorf("Volume config is not editable")
}
}
// Update the database if description changed. Use current config.
if newDesc != curVol.Description || newExpiryDate != curExpiryDate {
err = b.state.Cluster.UpdateStorageVolumeSnapshot(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID(), newDesc, curVol.Config, newExpiryDate)
if err != nil {
return err
}
}
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(curVol.ContentType), curVol.Name, curVol.Config)
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeSnapshotUpdated.Event(vol, string(vol.Type()), projectName, op, nil))
return nil
}
// DeleteCustomVolume removes a custom volume and its snapshots.
func (b *lxdBackend) DeleteCustomVolume(projectName string, volName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName})
logger.Debug("DeleteCustomVolume started")
defer logger.Debug("DeleteCustomVolume finished")
_, _, isSnap := shared.InstanceGetParentAndSnapshotName(volName)
if isSnap {
return fmt.Errorf("Volume name cannot be a snapshot")
}
// Retrieve a list of snapshots.
snapshots, err := VolumeSnapshotsGet(b.state, projectName, b.name, volName, db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
// Remove each snapshot.
for _, snapshot := range snapshots {
err = b.DeleteCustomVolumeSnapshot(projectName, snapshot.Name, op)
if err != nil {
return err
}
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// Get the volume.
_, poolVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
// Get the content type.
dbContentType, err := VolumeContentTypeNameToContentType(poolVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// There's no need to pass config as it's not needed when deleting a volume.
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, nil)
// Delete the volume from the storage device. Must come after snapshots are removed.
if b.driver.HasVolume(vol) {
err = b.driver.DeleteVolume(vol, op)
if err != nil {
return err
}
}
// Remove backups directory for volume.
backupsPath := shared.VarPath("backups", "custom", b.name, project.StorageVolume(projectName, volName))
if shared.PathExists(backupsPath) {
err := os.RemoveAll(backupsPath)
if err != nil {
return err
}
}
// Finally, remove the volume record from the database.
err = b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeDeleted.Event(vol, string(vol.Type()), projectName, op, nil))
return nil
}
// GetCustomVolumeDisk returns the location of the disk.
func (b *lxdBackend) GetCustomVolumeDisk(projectName, volName string) (string, error) {
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return "", err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// There's no need to pass config as it's not needed when getting the volume usage.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, nil)
return b.driver.GetVolumeDiskPath(vol)
}
// GetCustomVolumeUsage returns the disk space used by the custom volume.
func (b *lxdBackend) GetCustomVolumeUsage(projectName, volName string) (int64, error) {
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return -1, err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// There's no need to pass config as it's not needed when getting the volume usage.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, nil)
return b.driver.GetVolumeUsage(vol)
}
// MountCustomVolume mounts a custom volume.
func (b *lxdBackend) MountCustomVolume(projectName, volName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName})
logger.Debug("MountCustomVolume started")
defer logger.Debug("MountCustomVolume finished")
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, volume.Config)
return b.driver.MountVolume(vol, op)
}
// UnmountCustomVolume unmounts a custom volume.
func (b *lxdBackend) UnmountCustomVolume(projectName, volName string, op *operations.Operation) (bool, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName})
logger.Debug("UnmountCustomVolume started")
defer logger.Debug("UnmountCustomVolume finished")
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return false, err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, volume.Config)
return b.driver.UnmountVolume(vol, false, op)
}
// ImportCustomVolume takes an existing custom volume on the storage backend and ensures that the DB records,
// volume directories and symlinks are restored as needed to make it operational with LXD.
// Used during the recovery import stage.
func (b *lxdBackend) ImportCustomVolume(projectName string, poolVol backup.Config, op *operations.Operation) error {
if poolVol.Volume == nil {
return fmt.Errorf("Invalid pool volume config supplied")
}
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": poolVol.Volume.Name})
logger.Debug("ImportCustomVolume started")
defer logger.Debug("ImportCustomVolume finished")
revert := revert.New()
defer revert.Fail()
// Create the storage volume DB records.
err := VolumeDBCreate(b.state, b, projectName, poolVol.Volume.Name, poolVol.Volume.Description, drivers.VolumeTypeCustom, false, poolVol.Volume.Config, time.Time{}, drivers.ContentType(poolVol.Volume.ContentType))
if err != nil {
return errors.Wrapf(err, "Failed creating custom volume %q record in project %q", poolVol.Volume.Name, projectName)
}
revert.Add(func() {
b.state.Cluster.RemoveStoragePoolVolume(projectName, poolVol.Volume.Name, db.StoragePoolVolumeTypeCustom, b.ID())
})
// Create the storage volume snapshot DB records.
for _, poolVolSnap := range poolVol.VolumeSnapshots {
fullSnapName := drivers.GetSnapshotVolumeName(poolVol.Volume.Name, poolVolSnap.Name)
err = VolumeDBCreate(b.state, b, projectName, fullSnapName, poolVolSnap.Description, drivers.VolumeTypeCustom, true, poolVolSnap.Config, time.Time{}, drivers.ContentType(poolVolSnap.ContentType))
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RemoveStoragePoolVolume(projectName, fullSnapName, db.StoragePoolVolumeTypeCustom, b.ID())
})
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, poolVol.Volume.Name)
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(poolVol.Volume.ContentType), volStorageName, poolVol.Volume.Config)
// Create the mount path if needed.
err = vol.EnsureMountPath()
if err != nil {
return err
}
// Create snapshot mount paths and snapshot parent directory if needed.
for _, poolVolSnap := range poolVol.VolumeSnapshots {
logger.Debug("Ensuring instance snapshot mount path", log.Ctx{"snapshot": poolVolSnap.Name})
snapVol, err := vol.NewSnapshot(poolVolSnap.Name)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
revert.Success()
return nil
}
// CreateCustomVolumeSnapshot creates a snapshot of a custom volume.
func (b *lxdBackend) CreateCustomVolumeSnapshot(projectName, volName string, newSnapshotName string, newExpiryDate time.Time, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newSnapshotName": newSnapshotName, "newExpiryDate": newExpiryDate})
logger.Debug("CreateCustomVolumeSnapshot started")
defer logger.Debug("CreateCustomVolumeSnapshot finished")
if shared.IsSnapshot(volName) {
return fmt.Errorf("Volume cannot be snapshot")
}
if shared.IsSnapshot(newSnapshotName) {
return fmt.Errorf("Snapshot name is not a valid snapshot name")
}
fullSnapshotName := drivers.GetSnapshotVolumeName(volName, newSnapshotName)
// Check snapshot volume doesn't exist already.
_, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, fullSnapshotName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != db.ErrNoSuchObject {
if err != nil {
return err
}
return fmt.Errorf("Snapshot by that name already exists")
}
// Load parent volume information and check it exists.
_, parentVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return fmt.Errorf("Parent volume doesn't exist")
}
return err
}
// Create database entry for new storage volume snapshot.
err = VolumeDBCreate(b.state, b, projectName, fullSnapshotName, parentVol.Description, drivers.VolumeTypeCustom, true, parentVol.Config, newExpiryDate, drivers.ContentType(parentVol.ContentType))
if err != nil {
return err
}
revertDB := true
defer func() {
if revertDB {
b.state.Cluster.RemoveStoragePoolVolume(projectName, fullSnapshotName, db.StoragePoolVolumeTypeCustom, b.ID())
}
}()
volDBContentType, err := VolumeContentTypeNameToContentType(parentVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(volDBContentType)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, fullSnapshotName)
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, parentVol.Config)
// Create the snapshot on the storage device.
err = b.driver.CreateVolumeSnapshot(vol, op)
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeSnapshotCreated.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"type": vol.Type()}))
revertDB = false
return nil
}
// RenameCustomVolumeSnapshot renames a custom volume.
func (b *lxdBackend) RenameCustomVolumeSnapshot(projectName, volName string, newSnapshotName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "newSnapshotName": newSnapshotName})
logger.Debug("RenameCustomVolumeSnapshot started")
defer logger.Debug("RenameCustomVolumeSnapshot finished")
parentName, oldSnapshotName, isSnap := shared.InstanceGetParentAndSnapshotName(volName)
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
if shared.IsSnapshot(newSnapshotName) {
return fmt.Errorf("Invalid new snapshot name")
}
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// There's no need to pass config as it's not needed when renaming a volume.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, nil)
err = b.driver.RenameVolumeSnapshot(vol, newSnapshotName, op)
if err != nil {
return err
}
newVolName := drivers.GetSnapshotVolumeName(parentName, newSnapshotName)
err = b.state.Cluster.RenameStoragePoolVolume(projectName, volName, newVolName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
// Get the volume name on storage.
newVolStorageName := project.StorageVolume(projectName, newVolName)
// Revert rename.
newVol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), newVolStorageName, nil)
b.driver.RenameVolumeSnapshot(newVol, oldSnapshotName, op)
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeSnapshotRenamed.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"old_name": oldSnapshotName}))
return nil
}
// DeleteCustomVolumeSnapshot removes a custom volume snapshot.
func (b *lxdBackend) DeleteCustomVolumeSnapshot(projectName, volName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName})
logger.Debug("DeleteCustomVolumeSnapshot started")
defer logger.Debug("DeleteCustomVolumeSnapshot finished")
isSnap := shared.IsSnapshot(volName)
if !isSnap {
return fmt.Errorf("Volume name must be a snapshot")
}
// Get the volume.
_, poolVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
// Get the content type.
dbContentType, err := VolumeContentTypeNameToContentType(poolVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
// There's no need to pass config as it's not needed when deleting a volume snapshot.
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, nil)
// Delete the snapshot from the storage device.
// Must come before DB RemoveStoragePoolVolume so that the volume ID is still available.
if b.driver.HasVolume(vol) {
err := b.driver.DeleteVolumeSnapshot(vol, op)
if err != nil {
return err
}
}
// Remove the snapshot volume record from the database.
err = b.state.Cluster.RemoveStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeSnapshotDeleted.Event(vol, string(vol.Type()), projectName, op, nil))
return nil
}
// RestoreCustomVolume restores a custom volume from a snapshot.
func (b *lxdBackend) RestoreCustomVolume(projectName, volName string, snapshotName string, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volName": volName, "snapshotName": snapshotName})
logger.Debug("RestoreCustomVolume started")
defer logger.Debug("RestoreCustomVolume finished")
// Quick checks.
if shared.IsSnapshot(volName) {
return fmt.Errorf("Volume cannot be snapshot")
}
if shared.IsSnapshot(snapshotName) {
return fmt.Errorf("Invalid snapshot name")
}
// Get current volume.
_, curVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
// Check that the volume isn't in use by running instances.
err = VolumeUsedByInstanceDevices(b.state, b.Name(), projectName, curVol, true, func(dbInst db.Instance, project db.Project, profiles []api.Profile, usedByDevices []string) error {
inst, err := instance.Load(b.state, db.InstanceToArgs(&dbInst), profiles)
if err != nil {
return err
}
if inst.IsRunning() {
return fmt.Errorf("Cannot restore custom volume used by running instances")
}
return nil
})
if err != nil {
return err
}
// Get the volume config.
_, dbVol, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.ID())
if err != nil {
if err == db.ErrNoSuchObject {
return errors.Wrapf(err, "Volume doesn't exist")
}
return err
}
dbContentType, err := VolumeContentTypeNameToContentType(dbVol.ContentType)
if err != nil {
return err
}
contentType, err := VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
vol := b.GetVolume(drivers.VolumeTypeCustom, contentType, volStorageName, dbVol.Config)
err = b.driver.RestoreVolume(vol, snapshotName, op)
if err != nil {
snapErr, ok := err.(drivers.ErrDeleteSnapshots)
if ok {
// We need to delete some snapshots and try again.
for _, snapName := range snapErr.Snapshots {
err := b.DeleteCustomVolumeSnapshot(projectName, fmt.Sprintf("%s/%s", volName, snapName), op)
if err != nil {
return err
}
}
// Now try again.
err = b.driver.RestoreVolume(vol, snapshotName, op)
if err != nil {
return err
}
}
return err
}
b.state.Events.SendLifecycle(projectName, lifecycle.StorageVolumeRestored.Event(vol, string(vol.Type()), projectName, op, log.Ctx{"snapshot": snapshotName}))
return nil
}
func (b *lxdBackend) createStorageStructure(path string) error {
for _, volType := range b.driver.Info().VolumeTypes {
for _, name := range drivers.BaseDirectories[volType] {
path := filepath.Join(path, name)
err := os.MkdirAll(path, 0711)
if err != nil && !os.IsExist(err) {
return errors.Wrapf(err, "Failed to create directory %q", path)
}
}
}
return nil
}
// UpdateInstanceBackupFile writes the instance's config to the backup.yaml file on the storage device.
func (b *lxdBackend) UpdateInstanceBackupFile(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("UpdateInstanceBackupFile started")
defer logger.Debug("UpdateInstanceBackupFile finished")
// We only write backup files out for actual instances.
if inst.IsSnapshot() {
return nil
}
// Immediately return if the instance directory doesn't exist yet.
if !shared.PathExists(inst.Path()) {
return os.ErrNotExist
}
// Generate the YAML.
ci, _, err := inst.Render()
if err != nil {
return errors.Wrap(err, "Failed to render instance metadata")
}
snapshots, err := inst.Snapshots()
if err != nil {
return errors.Wrap(err, "Failed to get snapshots")
}
var sis []*api.InstanceSnapshot
for _, s := range snapshots {
si, _, err := s.Render()
if err != nil {
return err
}
sis = append(sis, si.(*api.InstanceSnapshot))
}
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
contentType := InstanceContentType(inst)
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(inst.Project(), inst.Name(), volDBType, b.ID())
if err != nil {
return err
}
data, err := yaml.Marshal(&backup.Config{
Container: ci.(*api.Instance),
Snapshots: sis,
Pool: &b.db,
Volume: volume,
})
if err != nil {
return err
}
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, volume.Config)
// Update pool information in the backup.yaml file.
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
// Write the YAML
path := filepath.Join(inst.Path(), "backup.yaml")
f, err := os.Create(path)
if err != nil {
return errors.Wrapf(err, "Failed to create file %q", path)
}
defer f.Close()
err = f.Chmod(0400)
if err != nil {
return err
}
err = shared.WriteAll(f, data)
if err != nil {
return err
}
return nil
}, op)
return err
}
// CheckInstanceBackupFileSnapshots compares the snapshots on the storage device to those defined in the backup
// config supplied and returns an error if they do not match (if deleteMissing argument is false).
// If deleteMissing argument is true, then any snapshots that exist on the storage device but not in the backup
// config are removed from the storage device, and any snapshots that exist in the backup config but do not exist
// on the storage device are ignored. The remaining set of snapshots that exist on both the storage device and the
// backup config are returned. They set can be used to re-create the snapshot database entries when importing.
func (b *lxdBackend) CheckInstanceBackupFileSnapshots(backupConf *backup.Config, projectName string, deleteMissing bool, op *operations.Operation) ([]*api.InstanceSnapshot, error) {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "instance": backupConf.Container.Name, "deleteMissing": deleteMissing})
logger.Debug("CheckInstanceBackupFileSnapshots started")
defer logger.Debug("CheckInstanceBackupFileSnapshots finished")
instType, err := instancetype.New(string(backupConf.Container.Type))
if err != nil {
return nil, err
}
volType, err := InstanceTypeToVolumeType(instType)
if err != nil {
return nil, err
}
// Get the volume name on storage.
volStorageName := project.Instance(projectName, backupConf.Container.Name)
contentType := drivers.ContentTypeFS
if volType == drivers.VolumeTypeVM {
contentType = drivers.ContentTypeBlock
}
// We don't need to use the volume's config for mounting so set to nil.
vol := b.GetVolume(volType, contentType, volStorageName, nil)
// Get a list of snapshots that exist on storage device.
driverSnapshots, err := vol.Snapshots(op)
if err != nil {
return nil, err
}
if len(backupConf.Snapshots) != len(driverSnapshots) {
if !deleteMissing {
return nil, errors.Wrap(ErrBackupSnapshotsMismatch, "Snapshot count in backup config and storage device are different")
}
}
// Check (and optionally delete) snapshots that do not exist in backup config.
for _, driverSnapVol := range driverSnapshots {
_, driverSnapOnly, _ := shared.InstanceGetParentAndSnapshotName(driverSnapVol.Name())
inBackupFile := false
for _, backupFileSnap := range backupConf.Snapshots {
backupFileSnapOnly := backupFileSnap.Name
if driverSnapOnly == backupFileSnapOnly {
inBackupFile = true
break
}
}
if inBackupFile {
continue
}
if !deleteMissing {
return nil, errors.Wrapf(ErrBackupSnapshotsMismatch, "Snapshot %q exists on storage device but not in backup config", driverSnapOnly)
}
err = b.driver.DeleteVolumeSnapshot(driverSnapVol, op)
if err != nil {
return nil, errors.Wrapf(err, "Failed to delete snapshot %q", driverSnapOnly)
}
logger.Warn("Deleted snapshot as not present in backup config", log.Ctx{"snapshot": driverSnapOnly})
}
// Check the snapshots in backup config exist on storage device.
existingSnapshots := []*api.InstanceSnapshot{}
for _, backupFileSnap := range backupConf.Snapshots {
backupFileSnapOnly := backupFileSnap.Name
onStorageDevice := false
for _, driverSnapVol := range driverSnapshots {
_, driverSnapOnly, _ := shared.InstanceGetParentAndSnapshotName(driverSnapVol.Name())
if driverSnapOnly == backupFileSnapOnly {
onStorageDevice = true
break
}
}
if !onStorageDevice {
if !deleteMissing {
return nil, errors.Wrapf(ErrBackupSnapshotsMismatch, "Snapshot %q exists in backup config but not on storage device", backupFileSnapOnly)
}
logger.Warn("Skipped snapshot in backup config as not present on storage device", log.Ctx{"snapshot": backupFileSnap})
continue // Skip snapshots missing on storage device.
}
existingSnapshots = append(existingSnapshots, backupFileSnap)
}
return existingSnapshots, nil
}
// ListUnknownVolumes returns volumes that exist on the storage pool but don't have records in the database.
// Returns the unknown volumes parsed/generated backup config in a slice (keyed on project name).
func (b *lxdBackend) ListUnknownVolumes(op *operations.Operation) (map[string][]*backup.Config, error) {
// Get a list of volumes on the storage pool. We only expect to get 1 volume per logical LXD volume.
// So for VMs we only expect to get the block volume for a VM and not its filesystem one too. This way we
// can operate on the volume using the existing storage pool functions and let the pool then handle the
// associated filesystem volume as needed.
poolVols, err := b.driver.ListVolumes()
if err != nil {
return nil, errors.Wrapf(err, "Failed getting pool volumes")
}
projectVols := make(map[string][]*backup.Config)
for _, poolVol := range poolVols {
volType := poolVol.Type()
// If the storage driver has returned a filesystem volume for a VM, this is a break of protocol.
if volType == drivers.VolumeTypeVM && poolVol.ContentType() == drivers.ContentTypeFS {
return nil, fmt.Errorf("Storage driver returned unexpected VM volume with filesystem content type (%q)", poolVol.Name())
}
if volType == drivers.VolumeTypeVM || volType == drivers.VolumeTypeContainer {
err = b.detectUnknownInstanceVolume(&poolVol, projectVols, op)
if err != nil {
return nil, err
}
} else if volType == drivers.VolumeTypeCustom {
err = b.detectUnknownCustomVolume(&poolVol, projectVols, op)
if err != nil {
return nil, err
}
}
}
return projectVols, nil
}
// detectUnknownInstanceVolume detects if a volume is unknown and if so attempts to mount the volume and parse the
// backup stored on it. It then runs a series of consistency checks that compare the contents of the backup file to
// the state of the volume on disk, and if all checks out, it adds the parsed backup file contents to projectVols.
func (b *lxdBackend) detectUnknownInstanceVolume(vol *drivers.Volume, projectVols map[string][]*backup.Config, op *operations.Operation) error {
volType := vol.Type()
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
projectName, instName := project.InstanceParts(vol.Name())
// Check if an entry for the instance already exists in the DB.
instID, err := b.state.Cluster.GetInstanceID(projectName, instName)
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return err
}
instSnapshots, err := b.state.Cluster.GetInstanceSnapshotsNames(projectName, instName)
if err != nil {
return err
}
// Check if any entry for the instance volume already exists in the DB.
// This will return no record for any temporary pool structs being used (as ID is -1).
volID, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, instName, volDBType, b.ID())
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return err
}
if instID > 0 && volID > 0 {
return nil // Instance record and storage record already exists in DB, no recovery needed.
} else if instID > 0 {
return fmt.Errorf("Instance %q in project %q already has instance DB record", instName, projectName)
} else if volID > 0 {
return fmt.Errorf("Instance %q in project %q already has storage DB record", instName, projectName)
}
backupYamlPath := filepath.Join(vol.MountPath(), "backup.yaml")
var backupConf *backup.Config
// If the instance is running, it should already be mounted, so check if the backup file
// is already accessible, and if so parse it directly, without disturbing the mount count.
if shared.PathExists(backupYamlPath) {
backupConf, err = backup.ParseConfigYamlFile(backupYamlPath)
if err != nil {
return errors.Wrapf(err, "Failed parsing backup file %q", backupYamlPath)
}
} else {
// We won't know what filesystem some block backed volumes are using, so ask the storage
// driver to probe the block device for us (if appropriate).
vol.SetMountFilesystemProbe(true)
// If backup file not accessible, we take this to mean the instance isn't running
// and so we need to mount the volume to access the backup file and then unmount.
// This will also create the mount path if needed.
err = vol.MountTask(func(_ string, _ *operations.Operation) error {
backupConf, err = backup.ParseConfigYamlFile(backupYamlPath)
if err != nil {
return errors.Wrapf(err, "Failed parsing backup file %q", backupYamlPath)
}
return nil
}, op)
if err != nil {
return err
}
}
// Run some consistency checks on the backup file contents.
if backupConf.Pool != nil {
if backupConf.Pool.Name != b.name {
return fmt.Errorf("Instance %q in project %q has pool name mismatch in its backup file (%q doesn't match's pool's %q)", instName, projectName, backupConf.Pool.Name, b.name)
}
if backupConf.Pool.Driver != b.Driver().Info().Name {
return fmt.Errorf("Instance %q in project %q has pool driver mismatch in its backup file (%q doesn't match's pool's %q)", instName, projectName, backupConf.Pool.Driver, b.Driver().Name())
}
}
if backupConf.Container == nil {
return fmt.Errorf("Instance %q in project %q has no instance information in its backup file", instName, projectName)
}
if instName != backupConf.Container.Name {
return fmt.Errorf("Instance %q in project %q has a different instance name in its backup file (%q)", instName, projectName, backupConf.Container.Name)
}
apiInstType, err := VolumeTypeToAPIInstanceType(volType)
if err != nil {
return errors.Wrapf(err, "Failed checking instance type for instance %q in project %q", instName, projectName)
}
if apiInstType != api.InstanceType(backupConf.Container.Type) {
return fmt.Errorf("Instance %q in project %q has a different instance type in its backup file (%q)", instName, projectName, backupConf.Container.Type)
}
if backupConf.Volume == nil {
return fmt.Errorf("Instance %q in project %q has no volume information in its backup file", instName, projectName)
}
if instName != backupConf.Volume.Name {
return fmt.Errorf("Instance %q in project %q has a different volume name in its backup file (%q)", instName, projectName, backupConf.Volume.Name)
}
instVolDBType, err := VolumeTypeNameToDBType(backupConf.Volume.Type)
if err != nil {
return errors.Wrapf(err, "Failed checking instance volume type for instance %q in project %q", instName, projectName)
}
instVolType, err := VolumeDBTypeToType(instVolDBType)
if err != nil {
return errors.Wrapf(err, "Failed checking instance volume type for instance %q in project %q", instName, projectName)
}
if volType != instVolType {
return fmt.Errorf("Instance %q in project %q has a different volume type in its backup file (%q)", instName, projectName, backupConf.Volume.Type)
}
// Add to volume to unknown volumes list for the project.
if projectVols[projectName] == nil {
projectVols[projectName] = []*backup.Config{backupConf}
} else {
projectVols[projectName] = append(projectVols[projectName], backupConf)
}
// Check snapshots are consistent between storage layer and backup config file.
_, err = b.CheckInstanceBackupFileSnapshots(backupConf, projectName, false, nil)
if err != nil {
return fmt.Errorf("Instance %q in project %q has snapshot inconsistency: %v", instName, projectName, err)
}
// Check there are no existing DB records present for snapshots.
for _, snapshot := range backupConf.Snapshots {
fullSnapshotName := drivers.GetSnapshotVolumeName(instName, snapshot.Name)
// Check if an entry for the instance already exists in the DB.
if shared.StringInSlice(fullSnapshotName, instSnapshots) {
return fmt.Errorf("Instance %q snapshot %q in project %q already has instance DB record", instName, snapshot.Name, projectName)
}
// Check if any entry for the instance snapshot volume already exists in the DB.
// This will return no record for any temporary pool structs being used (as ID is -1).
volID, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, fullSnapshotName, volDBType, b.ID())
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return err
}
if volID > 0 {
return fmt.Errorf("Instance %q snapshot %q in project %q already has storage DB record", instName, snapshot.Name, projectName)
}
}
return nil
}
// detectUnknownCustomVolume detects if a volume is unknown and if so attempts to discover the filesystem of the
// volume (for filesystem volumes). It then runs a series of consistency checks, and if all checks out, it adds
// generates a simulated backup config for the custom volume and adds it to projectVols.
func (b *lxdBackend) detectUnknownCustomVolume(vol *drivers.Volume, projectVols map[string][]*backup.Config, op *operations.Operation) error {
volType := vol.Type()
volDBType, err := VolumeTypeToDBType(volType)
if err != nil {
return err
}
projectName, volName := project.StorageVolumeParts(vol.Name())
// Check if any entry for the custom volume already exists in the DB.
// This will return no record for any temporary pool structs being used (as ID is -1).
volID, _, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, volDBType, b.ID())
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return err
}
if volID > 0 {
return nil // Storage record already exists in DB, no recovery needed.
}
// Get a list of snapshots that exist on storage device.
snapshots, err := b.driver.VolumeSnapshots(*vol, op)
if err != nil {
return err
}
contentType := vol.ContentType()
var apiContentType string
if contentType == drivers.ContentTypeBlock {
apiContentType = db.StoragePoolVolumeContentTypeNameBlock
} else if contentType == drivers.ContentTypeFS {
apiContentType = db.StoragePoolVolumeContentTypeNameFS
// Detect block volume filesystem (by mounting it (if not already) with filesystem probe mode).
if b.driver.Info().BlockBacking {
var blockFS string
mountPath := vol.MountPath()
if filesystem.IsMountPoint(mountPath) {
blockFS, err = filesystem.Detect(mountPath)
if err != nil {
return err
}
} else {
vol.SetMountFilesystemProbe(true)
vol.MountTask(func(mountPath string, op *operations.Operation) error {
blockFS, err = filesystem.Detect(mountPath)
if err != nil {
return err
}
return nil
}, op)
}
// Record detected filesystem in config.
vol.Config()["block.filesystem"] = blockFS
}
} else {
return fmt.Errorf("Unknown custom volume content type %q", contentType)
}
// This may not always be the correct thing to do, but seeing as we don't know what the volume's config
// was lets take a best guess that it was the default config.
err = b.driver.FillVolumeConfig(*vol)
if err != nil {
return errors.Wrapf(err, "Failed filling custom volume default config")
}
// Check the filesystem detected is valid for the storage driver.
err = b.driver.ValidateVolume(*vol, false)
if err != nil {
return errors.Wrapf(err, "Failed custom volume validation")
}
backupConf := &backup.Config{
Volume: &api.StorageVolume{
Name: volName,
Type: db.StoragePoolVolumeTypeNameCustom,
ContentType: apiContentType,
StorageVolumePut: api.StorageVolumePut{
Config: vol.Config(),
},
},
}
// Populate snaphot volumes.
for _, snapOnlyName := range snapshots {
backupConf.VolumeSnapshots = append(backupConf.VolumeSnapshots, &api.StorageVolumeSnapshot{
Name: snapOnlyName, // Snapshot only name, not full name.
Config: vol.Config(), // Have to assume the snapshot volume config is same as parent.
ContentType: apiContentType,
})
}
// Add to volume to unknown volumes list for the project.
if projectVols[projectName] == nil {
projectVols[projectName] = []*backup.Config{backupConf}
} else {
projectVols[projectName] = append(projectVols[projectName], backupConf)
}
return nil
}
// ImportInstance takes an existing instance volume on the storage backend and ensures that the volume directories
// and symlinks are restored as needed to make it operational with LXD. Used during the recovery import stage.
// If the instance exists on the local cluster member then the local mount status is restored as needed.
func (b *lxdBackend) ImportInstance(inst instance.Instance, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": inst.Project(), "instance": inst.Name()})
logger.Debug("ImportInstance started")
defer logger.Debug("ImportInstance finished")
volType, err := InstanceTypeToVolumeType(inst.Type())
if err != nil {
return err
}
// Get any snapshots the instance has in the format <instance name>/<snapshot name>.
snapshots, err := b.state.Cluster.GetInstanceSnapshotsNames(inst.Project(), inst.Name())
if err != nil {
return err
}
// Get local cluster member name.
var nodeName string
err = b.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
nodeName, err = tx.GetLocalNodeName()
return err
})
if err != nil {
return errors.Wrap(err, "Failed getting local cluster member name")
}
revert := revert.New()
defer revert.Fail()
contentType := InstanceContentType(inst)
// Get the volume name on storage.
volStorageName := project.Instance(inst.Project(), inst.Name())
vol := b.GetVolume(volType, contentType, volStorageName, nil)
err = vol.EnsureMountPath()
if err != nil {
return err
}
// Only attempt to restore mount status on instance's local cluster member.
if inst.Location() == nodeName {
logger.Debug("Restoring local instance mount status")
if inst.IsRunning() {
// If the instance is running then this implies the volume is mounted, but if the LXD daemon has
// been restarted since the DB records were removed then there will be no mount reference counter
// showing the volume is in use. If this is the case then call mount the volume to increment the
// reference counter.
if !vol.MountInUse() {
_, err = b.MountInstance(inst, op)
if err != nil {
return errors.Wrapf(err, "Failed mounting instance")
}
}
} else {
// If the instance isn't running then try and unmount it to ensure consistent state after import.
_, err = b.UnmountInstance(inst, op)
if err != nil {
return errors.Wrapf(err, "Failed unmounting instance")
}
}
}
// Create symlink.
err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), vol.MountPath())
if err != nil {
return err
}
revert.Add(func() {
// Remove symlinks.
b.removeInstanceSymlink(inst.Type(), inst.Project(), inst.Name())
b.removeInstanceSnapshotSymlinkIfUnused(inst.Type(), inst.Project(), inst.Name())
})
// Create snapshot mount paths and snapshot symlink if needed.
if len(snapshots) > 0 {
for _, snapName := range snapshots {
_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapName)
logger.Debug("Ensuring instance snapshot mount path", log.Ctx{"snapshot": snapOnlyName})
snapVol, err := vol.NewSnapshot(snapOnlyName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
err = b.ensureInstanceSnapshotSymlink(inst.Type(), inst.Project(), inst.Name())
if err != nil {
return err
}
}
revert.Success()
return nil
}
func (b *lxdBackend) BackupCustomVolume(projectName string, volName string, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": projectName, "volume": volName, "optimized": optimized, "snapshots": snapshots})
logger.Debug("BackupCustomVolume started")
defer logger.Debug("BackupCustomVolume finished")
// Get the volume name on storage.
volStorageName := project.StorageVolume(projectName, volName)
_, volume, err := b.state.Cluster.GetLocalStoragePoolVolume(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return err
}
var snapNames []string
if snapshots {
// Get snapshots in age order, oldest first, and pass names to storage driver.
volSnaps, err := b.state.Cluster.GetLocalStoragePoolVolumeSnapshotsWithType(projectName, volName, db.StoragePoolVolumeTypeCustom, b.id)
if err != nil {
return err
}
snapNames = make([]string, 0, len(volSnaps))
for _, volSnap := range volSnaps {
_, snapName, _ := shared.InstanceGetParentAndSnapshotName(volSnap.Name)
snapNames = append(snapNames, snapName)
}
}
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(volume.ContentType), volStorageName, volume.Config)
err = b.driver.BackupVolume(vol, tarWriter, optimized, snapNames, op)
if err != nil {
return err
}
return nil
}
func (b *lxdBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io.ReadSeeker, op *operations.Operation) error {
logger := logging.AddContext(b.logger, log.Ctx{"project": srcBackup.Project, "volume": srcBackup.Name, "snapshots": srcBackup.Snapshots, "optimizedStorage": *srcBackup.OptimizedStorage})
logger.Debug("CreateCustomVolumeFromBackup started")
defer logger.Debug("CreateCustomVolumeFromBackup finished")
if srcBackup.Config == nil || srcBackup.Config.Volume == nil {
return fmt.Errorf("Valid volume config not found in index")
}
if len(srcBackup.Snapshots) != len(srcBackup.Config.VolumeSnapshots) {
return fmt.Errorf("Valid volume snapshot config not found in index")
}
// Check whether we are allowed to create volumes.
req := api.StorageVolumesPost{
StorageVolumePut: api.StorageVolumePut{
Config: srcBackup.Config.Volume.Config,
},
Name: srcBackup.Name,
}
err := b.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
return project.AllowVolumeCreation(tx, srcBackup.Project, req)
})
if err != nil {
return errors.Wrapf(err, "Failed checking volume creation allowed")
}
revert := revert.New()
defer revert.Fail()
// Get the volume name on storage.
volStorageName := project.StorageVolume(srcBackup.Project, srcBackup.Name)
// Validate config.
vol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(srcBackup.Config.Volume.ContentType), volStorageName, srcBackup.Config.Volume.Config)
// Strip any unsupported config keys (in case the export was made from a different type of storage pool).
err = b.driver.ValidateVolume(vol, true)
if err != nil {
return err
}
// Create database entry for new storage volume using the validated config.
err = VolumeDBCreate(b.state, b, srcBackup.Project, srcBackup.Name, srcBackup.Config.Volume.Description, vol.Type(), false, vol.Config(), time.Time{}, vol.ContentType())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RemoveStoragePoolVolume(srcBackup.Project, srcBackup.Name, db.StoragePoolVolumeTypeCustom, b.ID())
})
// Create database entries fro new storage volume snapshots.
for _, s := range srcBackup.Config.VolumeSnapshots {
snapshot := s // Local var for revert.
snapName := snapshot.Name
// Due to a historical bug, the volume snapshot names were sometimes written in their full form
// (<parent>/<snap>) rather than the expected snapshot name only form, so we need to handle both.
if shared.IsSnapshot(snapshot.Name) {
_, snapName, _ = shared.InstanceGetParentAndSnapshotName(snapshot.Name)
}
fullSnapName := drivers.GetSnapshotVolumeName(srcBackup.Name, snapName)
snapVolStorageName := project.StorageVolume(srcBackup.Project, fullSnapName)
snapVol := b.GetVolume(drivers.VolumeTypeCustom, drivers.ContentType(srcBackup.Config.Volume.ContentType), snapVolStorageName, srcBackup.Config.Volume.Config)
// Strip any unsupported config keys (in case the export was made from a different type of storage pool).
err := b.driver.ValidateVolume(snapVol, true)
if err != nil {
return err
}
err = VolumeDBCreate(b.state, b, srcBackup.Project, fullSnapName, snapshot.Description, snapVol.Type(), true, snapVol.Config(), *snapshot.ExpiresAt, snapVol.ContentType())
if err != nil {
return err
}
revert.Add(func() {
b.state.Cluster.RemoveStoragePoolVolume(srcBackup.Project, fullSnapName, db.StoragePoolVolumeTypeCustom, b.ID())
})
}
// Unpack the backup into the new storage volume(s).
volPostHook, revertHook, err := b.driver.CreateVolumeFromBackup(vol, srcBackup, srcData, op)
if err != nil {
return err
}
if revertHook != nil {
revert.Add(revertHook)
}
// If the driver returned a post hook, return error as custom volumes don't need post hooks and we expect
// the storage driver to understand this distinction and ensure that all activities done in the postHook
// normally are done in CreateVolumeFromBackup as the DB record is created ahead of time.
if volPostHook != nil {
return fmt.Errorf("Custom volume restore doesn't support post hooks")
}
b.state.Events.SendLifecycle(srcBackup.Project, lifecycle.StorageVolumeCreated.Event(vol, string(vol.Type()), srcBackup.Project, op, log.Ctx{"type": vol.Type()}))
revert.Success()
return nil
}
|
// Copyright © 2015-2016 Lawrence E. Bakst. All rights reserved.
// Originaly written on a plane from SFO->EWR on 7-23-15 in about an hour.
// Based on an idea I had been mulling in my mind for years.
//
// dedup scans files or directories and calculates fingerprints hashes for them based on their contents.
// Without the -d (directory) switch dedup recursively scans the supplied directories in depth first
// order and records the hash of each file in a map of slices keyed by the hash. After the scan is
// complete, the resulting map is iterated and if any of the slices have a length of more than 1,
// then the files on that slice are all duplicates of each other.
//
// If -d swicth is supplied the hashes of files are themselves recursively hashed and the resulting
// hashes of each directory (but not the files) are recorded in the map. Again, if the length of any
// slice is more than 1 then the entire directory is duplicated.
//
// If the -r switch is supplied, when the map is scanned, any slices with a length different than
// the number of supplied directores are printed as these represent missing files. This allows
// directories to be easily compared and more than two can easily be compared.
package main
import (
"flag"
"fmt"
"github.com/tildeleb/hashland/aeshash"
_ "github.com/tildeleb/hashland/jenkins"
"hash"
"leb.io/hrff"
"log"
"os"
"regexp"
"strings"
)
type PathReader interface {
PathRead(path string, fi os.FileInfo) (r uint64)
}
type kfe struct {
path string
size int64
hash uint64
}
type stat struct {
scannedFiles int64
scannedDirs int64
matchedFiles int64
matchedDirs int64
}
var stats stat
var ddre *regexp.Regexp
var patre *regexp.Regexp
var blockSize = flag.Int64("b", 8192, "block size")
var dirf = flag.Bool("d", false, "hash dirs")
var r = flag.Bool("r", false, "reverse sense; record non duplicated files")
var fr = flag.Bool("fr", false, "full read; read the entire file")
var pat = flag.String("pat", "", "regexp pattern to match filenames")
var dd = flag.String("dd", "", "do not descend past dirs named this")
var print = flag.Bool("p", false, "print duplicated dirs or files")
var ps = flag.Bool("ps", false, "print summary")
var pd = flag.Bool("pd", false, "print duplicates with -r")
var _fthreshold hrff.Int64
var _dthreshold hrff.Int64
var fthreshold int64
var dthreshold int64
var total int64
var count int64
var hmap = make(map[uint64][]kfe, 100)
var smap = make(map[int64][]kfe, 100)
var hf hash.Hash64
func fullName(path string, fi os.FileInfo) string {
p := ""
if path == "" {
p = fi.Name()
} else {
p = path + "/" + fi.Name()
}
return p
}
func readFullHash(path string, fi os.FileInfo) (r uint64) {
p := fullName(path, fi)
//fmt.Printf("readPartialHash: path=%q fi.Name=%q, p=%q\n", path, fi.Name(), p)
if fi.Size() == 0 {
return 0
}
buf := make([]byte, *blockSize)
f, err := os.Open(p)
if err != nil {
panic("readPartialHash: Open")
}
defer f.Close()
hf.Reset()
for {
l, err := f.Read(buf)
//fmt.Printf("f=%q, err=%v, l=%d, size=%d\n", fi.Name(), err, l, fi.Size())
if l == 0 {
break
}
if l < 0 || err != nil {
log.Fatal(err)
return
}
hf.Write(buf[:l])
}
if false {
fmt.Printf("blocksSize=%d\n", *blockSize)
panic("readPartialHash: blockSize")
}
r = hf.Sum64()
//fmt.Printf("readPartialHash: p=%q, r=%#016x\n", p, r)
//h.Write(buf[0:l])
//r = h.Sum64()
//fmt.Printf("file=%q, hash=0x%016x\n", p, r)
return r
}
func readPartialHash(path string, fi os.FileInfo) (r uint64) {
p := fullName(path, fi)
//fmt.Printf("readPartialHash: path=%q fi.Name=%q, p=%q\n", path, fi.Name(), p)
if fi.Size() == 0 {
return 0
}
//h := jenkins.New364(0)
var half = *blockSize / 2
buf := make([]byte, *blockSize)
f, err := os.Open(p)
if err != nil {
panic("readPartialHash: Open")
}
l := 0
if fi.Size() <= *blockSize {
l, _ = f.Read(buf)
if err != nil {
log.Fatal(err)
}
} else {
l, err = f.Read(buf[0:half])
if err != nil {
log.Fatal(err)
}
_, _ = f.Seek(-half, os.SEEK_END)
l2, _ := f.Read(buf[half:])
if err != nil {
log.Fatal(err)
}
lt := l + l2
if lt != int(*blockSize) {
fmt.Printf("blocksSize=%d, half=%d\n", *blockSize, half)
fmt.Printf("f=%q, size=%d, l=%d, l2=%d, lt=%d\n", fi.Name(), fi.Size(), l, l2, lt)
panic("readPartialHash: blockSize")
}
}
f.Close()
r = aeshash.Hash(buf[0:l], 0)
//h.Write(buf[0:l])
//r = h.Sum64()
//fmt.Printf("file=%q, hash=0x%016x\n", p, r)
return
}
func add(hash uint64, size int64, k *kfe) {
_, ok := hmap[hash]
if !ok {
hmap[hash] = []kfe{*k}
} else {
hmap[hash] = append(hmap[hash], *k)
}
}
func addFile(path string, fi os.FileInfo, hash uint64, size int64) {
p := fullName(path, fi)
//fmt.Printf("addFile: path=%q, fi.Name()=%q, p=%q\n", path, fi.Name(), p)
k1 := kfe{p, fi.Size(), 0}
skey := fi.Size()
// 0 length files are currently silently ignored
// they are not identical
hkey := uint64(0)
if skey > fthreshold {
if *fr {
hkey = readFullHash(path, fi)
} else {
hkey = readPartialHash(path, fi)
}
add(hkey, skey, &k1)
// smap not used
_, ok2 := smap[skey]
if !ok2 {
smap[skey] = []kfe{k1}
} else {
smap[skey] = append(smap[skey], k1)
}
}
}
func addDir(path string, fi os.FileInfo, hash uint64, size int64) {
if size <= dthreshold {
return // should dirs respect threshold or is it only for files?
}
p := fullName(path, fi)
//fmt.Printf("addDir: path=%q, fi.Name()=%q, p=%q, size=%d, hash=0x%016x\n", path, fi.Name(), p, size, hash)
k1 := kfe{p, size, hash}
add(hash, size, &k1)
}
func descend(path string, fis []os.FileInfo,
ffp func(path string, fis os.FileInfo, hash uint64, size int64),
dfp func(path string, fis os.FileInfo, hash uint64, size int64)) (uint64, int64) {
var des func(path string, fis []os.FileInfo) (uint64, int64)
des = func(path string, fis []os.FileInfo) (uint64, int64) {
var hash uint64
var size, sizes int64
var gh = aeshash.NewAES(0)
for _, fi := range fis {
//fmt.Printf("des: fi.Name=%q\n", fi.Name())
switch {
case fi.Mode()&os.ModeDir == os.ModeDir:
stats.scannedDirs++
if *dd != "" {
b := ddre.MatchString(fi.Name())
if b {
fmt.Printf("des: skip dir=%q\n", fi.Name())
continue
}
}
p := fullName(path, fi)
//fmt.Printf("des: dir=%q\n", p)
d, err := os.Open(p)
if err != nil {
continue
}
fis, err := d.Readdir(-1)
if err != nil || fis == nil {
fmt.Printf("des: can't read %q\n", fullName(path, fi))
continue
}
d.Close()
h, size := des(p, fis)
hash = h
gh.Write64(hash)
sizes += size
//fmt.Printf("des: dir: path=%q, fi.Name()=%q, sizes=%d\n", path, fi.Name(), sizes)
stats.matchedDirs++
if dfp != nil {
dfp(path, fi, hash, size)
}
case fi.Mode()&os.ModeType == 0:
stats.scannedFiles++
sizes += fi.Size()
//fmt.Printf("des: file: path=%q, fi.Name()=%q, sizes=%d\n", path, fi.Name(), sizes)
if fi.Size() > fthreshold && (*pat == "" || (*pat != "" && patre.MatchString(fi.Name()))) {
if *fr {
hash = readFullHash(path, fi)
} else {
hash = readPartialHash(path, fi)
}
gh.Write64(hash)
stats.matchedFiles++
if ffp != nil {
ffp(path, fi, hash, size)
}
}
default:
continue
}
}
hashes := gh.Sum64()
//fmt.Printf("dir=%q, size=%d\n", path, sizes)
return hashes, sizes
}
//fmt.Printf("des: path=%q\n", path)
return des(path, fis)
}
func scan(paths []string, ndirs int) {
var hash uint64
var size int64
for _, path := range paths {
fi, err := os.Stat(path)
if err != nil || fi == nil {
fmt.Printf("fi=%#v, err=%v\n", fi, err)
panic("bad")
}
prefix := ""
idx := strings.LastIndex(path, "/")
if idx != -1 {
prefix = path[0:idx]
}
switch {
case fi.Mode()&os.ModeDir == os.ModeDir:
fis := []os.FileInfo{fi}
if *dirf {
//hash, size = addDir(dir, fi)
hash, size = descend(prefix, fis, nil, addDir)
//fmt.Printf("scan: add hash=0x%016x, path=%q, fi.Name()=%q\n", hash, prefix, fi.Name())
add(hash, size, &kfe{prefix, size, hash})
} else {
//addDirs(path, fis)
hash, size = descend(prefix, fis, addFile, nil)
}
case fi.Mode()&os.ModeType == 0:
if *fr {
hash = readFullHash(prefix, fi)
} else {
hash = readPartialHash(prefix, fi)
}
fmt.Printf("0x%016x %q\n", hash, path) // ???
//fmt.Printf("addFile: path=%q, fi.Name()=%q\n", path, fi.Name())
}
if *dirf && *ps {
fmt.Printf("# dir=%q, hash=0x%016x, files totaling %h\n", path, hash, hrff.Int64{size, "B"})
}
}
}
func check(kind string, ndirs int) {
for k, v := range hmap {
switch {
case *r && len(v) < ndirs && !*pd:
count++
if *print {
fmt.Printf("\t%q %d %d\n", v[0].path, len(v), ndirs)
}
case *r && len(v) > ndirs && *pd:
count++
if *print {
fmt.Printf("\t%q %d %d\n", v[0].path, len(v), ndirs)
}
case !*r && len(v) > 1:
if len(v) > 1 {
if *print {
fmt.Printf("0x%016x ", k)
}
for k2, v2 := range v {
size := hrff.Int64{v2.size, "B"}
if k2 == 0 && *print {
fmt.Printf("%h\n", size)
}
total += v2.size
count++
if *print {
fmt.Printf("\t%q\n", v2.path)
}
}
}
}
}
if *ps {
if *r {
fmt.Printf("# %d %s missing\n", count, kind)
} else {
fmt.Printf("# %d %s duplicated, totaling %h\n", count, kind, hrff.Int64{total, "B"})
}
fmt.Printf("# %d files, %d dirs scanned\n", stats.scannedFiles, stats.scannedDirs)
}
}
func main() {
var kind string = "files"
var ndirs, nfiles int
var paths []string
flag.Var(&_fthreshold, "ft", "file sizes <= threshhold will not be considered")
flag.Var(&_dthreshold, "dt", "directory sizes <= threshhold will not be considered")
//fmt.Printf("dedup\n")
hf = aeshash.NewAES(0)
flag.Parse()
if *pat != "" {
re, err := regexp.Compile(*pat)
if err != nil {
return
}
patre = re
}
if *dd != "" {
re, err := regexp.Compile(*dd)
if err != nil {
return
}
ddre = re
}
fthreshold = int64(_fthreshold.V)
dthreshold = int64(_dthreshold.V)
//fmt.Printf("fthreshold=%d\n", fthreshold)
//fmt.Printf("dthreshold=%d\n", dthreshold)
if *dirf {
kind = "dirs"
}
if len(flag.Args()) != 0 {
for _, path := range flag.Args() {
fi, err := os.Stat(path)
if err != nil || fi == nil {
fmt.Printf("fi=%#v, err=%v\n", fi, err)
panic("bad")
}
if fi.Mode()&os.ModeDir == os.ModeDir {
ndirs++
} else {
nfiles++
}
paths = append(paths, path)
}
}
scan(paths, ndirs)
check(kind, ndirs)
}
/*
1. still a bug when comparing two dirs, there are two differnt top level hashses
2. with -r what happens with duplicated files? The count will not be ndirs and can be higher. Could chnage compare
but what about 2 files in 2 dirs with a drop and an add would seem correct.
*/
fixed comment
// Copyright © 2015-2016 Lawrence E. Bakst. All rights reserved.
// Originaly written on a plane from SFO->EWR on 7-23-15 in about an hour.
// Based on an idea I had been mulling in my mind for years.
//
// dedup scans files or directories and calculates fingerprints hashes for them based on their contents.
//
// Without the -d (directory) switch dedup recursively scans the supplied directories in depth first
// order and records the hash of each file in a map of slices keyed by the hash. After the scan is
// complete, the resulting map is iterated and if any of the slices have a length of more than 1,
// then the files on that slice are all duplicates of each other.
//
// If -d swicth is supplied the hashes of files are themselves recursively hashed and the resulting
// hashes of each directory (but not the files) are recorded in the map. Again, if the length of any
// slice is more than 1 then the entire directory is duplicated.
//
// If the -r switch is supplied, when the map is scanned, any slices with a length different than
// the number of supplied directores are printed as these represent missing files. This allows
// directories to be easily compared and more than two can easily be compared.
package main
import (
"flag"
"fmt"
"github.com/tildeleb/hashland/aeshash"
_ "github.com/tildeleb/hashland/jenkins"
"hash"
"leb.io/hrff"
"log"
"os"
"regexp"
"strings"
)
type PathReader interface {
PathRead(path string, fi os.FileInfo) (r uint64)
}
type kfe struct {
path string
size int64
hash uint64
}
type stat struct {
scannedFiles int64
scannedDirs int64
matchedFiles int64
matchedDirs int64
}
var stats stat
var ddre *regexp.Regexp
var patre *regexp.Regexp
var blockSize = flag.Int64("b", 8192, "block size")
var dirf = flag.Bool("d", false, "hash dirs")
var r = flag.Bool("r", false, "reverse sense; record non duplicated files")
var fr = flag.Bool("fr", false, "full read; read the entire file")
var pat = flag.String("pat", "", "regexp pattern to match filenames")
var dd = flag.String("dd", "", "do not descend past dirs named this")
var print = flag.Bool("p", false, "print duplicated dirs or files")
var ps = flag.Bool("ps", false, "print summary")
var pd = flag.Bool("pd", false, "print duplicates with -r")
var _fthreshold hrff.Int64
var _dthreshold hrff.Int64
var fthreshold int64
var dthreshold int64
var total int64
var count int64
var hmap = make(map[uint64][]kfe, 100)
var smap = make(map[int64][]kfe, 100)
var hf hash.Hash64
func fullName(path string, fi os.FileInfo) string {
p := ""
if path == "" {
p = fi.Name()
} else {
p = path + "/" + fi.Name()
}
return p
}
func readFullHash(path string, fi os.FileInfo) (r uint64) {
p := fullName(path, fi)
//fmt.Printf("readPartialHash: path=%q fi.Name=%q, p=%q\n", path, fi.Name(), p)
if fi.Size() == 0 {
return 0
}
buf := make([]byte, *blockSize)
f, err := os.Open(p)
if err != nil {
panic("readPartialHash: Open")
}
defer f.Close()
hf.Reset()
for {
l, err := f.Read(buf)
//fmt.Printf("f=%q, err=%v, l=%d, size=%d\n", fi.Name(), err, l, fi.Size())
if l == 0 {
break
}
if l < 0 || err != nil {
log.Fatal(err)
return
}
hf.Write(buf[:l])
}
if false {
fmt.Printf("blocksSize=%d\n", *blockSize)
panic("readPartialHash: blockSize")
}
r = hf.Sum64()
//fmt.Printf("readPartialHash: p=%q, r=%#016x\n", p, r)
//h.Write(buf[0:l])
//r = h.Sum64()
//fmt.Printf("file=%q, hash=0x%016x\n", p, r)
return r
}
func readPartialHash(path string, fi os.FileInfo) (r uint64) {
p := fullName(path, fi)
//fmt.Printf("readPartialHash: path=%q fi.Name=%q, p=%q\n", path, fi.Name(), p)
if fi.Size() == 0 {
return 0
}
//h := jenkins.New364(0)
var half = *blockSize / 2
buf := make([]byte, *blockSize)
f, err := os.Open(p)
if err != nil {
panic("readPartialHash: Open")
}
l := 0
if fi.Size() <= *blockSize {
l, _ = f.Read(buf)
if err != nil {
log.Fatal(err)
}
} else {
l, err = f.Read(buf[0:half])
if err != nil {
log.Fatal(err)
}
_, _ = f.Seek(-half, os.SEEK_END)
l2, _ := f.Read(buf[half:])
if err != nil {
log.Fatal(err)
}
lt := l + l2
if lt != int(*blockSize) {
fmt.Printf("blocksSize=%d, half=%d\n", *blockSize, half)
fmt.Printf("f=%q, size=%d, l=%d, l2=%d, lt=%d\n", fi.Name(), fi.Size(), l, l2, lt)
panic("readPartialHash: blockSize")
}
}
f.Close()
r = aeshash.Hash(buf[0:l], 0)
//h.Write(buf[0:l])
//r = h.Sum64()
//fmt.Printf("file=%q, hash=0x%016x\n", p, r)
return
}
func add(hash uint64, size int64, k *kfe) {
_, ok := hmap[hash]
if !ok {
hmap[hash] = []kfe{*k}
} else {
hmap[hash] = append(hmap[hash], *k)
}
}
func addFile(path string, fi os.FileInfo, hash uint64, size int64) {
p := fullName(path, fi)
//fmt.Printf("addFile: path=%q, fi.Name()=%q, p=%q\n", path, fi.Name(), p)
k1 := kfe{p, fi.Size(), 0}
skey := fi.Size()
// 0 length files are currently silently ignored
// they are not identical
hkey := uint64(0)
if skey > fthreshold {
if *fr {
hkey = readFullHash(path, fi)
} else {
hkey = readPartialHash(path, fi)
}
add(hkey, skey, &k1)
// smap not used
_, ok2 := smap[skey]
if !ok2 {
smap[skey] = []kfe{k1}
} else {
smap[skey] = append(smap[skey], k1)
}
}
}
func addDir(path string, fi os.FileInfo, hash uint64, size int64) {
if size <= dthreshold {
return // should dirs respect threshold or is it only for files?
}
p := fullName(path, fi)
//fmt.Printf("addDir: path=%q, fi.Name()=%q, p=%q, size=%d, hash=0x%016x\n", path, fi.Name(), p, size, hash)
k1 := kfe{p, size, hash}
add(hash, size, &k1)
}
func descend(path string, fis []os.FileInfo,
ffp func(path string, fis os.FileInfo, hash uint64, size int64),
dfp func(path string, fis os.FileInfo, hash uint64, size int64)) (uint64, int64) {
var des func(path string, fis []os.FileInfo) (uint64, int64)
des = func(path string, fis []os.FileInfo) (uint64, int64) {
var hash uint64
var size, sizes int64
var gh = aeshash.NewAES(0)
for _, fi := range fis {
//fmt.Printf("des: fi.Name=%q\n", fi.Name())
switch {
case fi.Mode()&os.ModeDir == os.ModeDir:
stats.scannedDirs++
if *dd != "" {
b := ddre.MatchString(fi.Name())
if b {
fmt.Printf("des: skip dir=%q\n", fi.Name())
continue
}
}
p := fullName(path, fi)
//fmt.Printf("des: dir=%q\n", p)
d, err := os.Open(p)
if err != nil {
continue
}
fis, err := d.Readdir(-1)
if err != nil || fis == nil {
fmt.Printf("des: can't read %q\n", fullName(path, fi))
continue
}
d.Close()
h, size := des(p, fis)
hash = h
gh.Write64(hash)
sizes += size
//fmt.Printf("des: dir: path=%q, fi.Name()=%q, sizes=%d\n", path, fi.Name(), sizes)
stats.matchedDirs++
if dfp != nil {
dfp(path, fi, hash, size)
}
case fi.Mode()&os.ModeType == 0:
stats.scannedFiles++
sizes += fi.Size()
//fmt.Printf("des: file: path=%q, fi.Name()=%q, sizes=%d\n", path, fi.Name(), sizes)
if fi.Size() > fthreshold && (*pat == "" || (*pat != "" && patre.MatchString(fi.Name()))) {
if *fr {
hash = readFullHash(path, fi)
} else {
hash = readPartialHash(path, fi)
}
gh.Write64(hash)
stats.matchedFiles++
if ffp != nil {
ffp(path, fi, hash, size)
}
}
default:
continue
}
}
hashes := gh.Sum64()
//fmt.Printf("dir=%q, size=%d\n", path, sizes)
return hashes, sizes
}
//fmt.Printf("des: path=%q\n", path)
return des(path, fis)
}
func scan(paths []string, ndirs int) {
var hash uint64
var size int64
for _, path := range paths {
fi, err := os.Stat(path)
if err != nil || fi == nil {
fmt.Printf("fi=%#v, err=%v\n", fi, err)
panic("bad")
}
prefix := ""
idx := strings.LastIndex(path, "/")
if idx != -1 {
prefix = path[0:idx]
}
switch {
case fi.Mode()&os.ModeDir == os.ModeDir:
fis := []os.FileInfo{fi}
if *dirf {
//hash, size = addDir(dir, fi)
hash, size = descend(prefix, fis, nil, addDir)
//fmt.Printf("scan: add hash=0x%016x, path=%q, fi.Name()=%q\n", hash, prefix, fi.Name())
add(hash, size, &kfe{prefix, size, hash})
} else {
//addDirs(path, fis)
hash, size = descend(prefix, fis, addFile, nil)
}
case fi.Mode()&os.ModeType == 0:
if *fr {
hash = readFullHash(prefix, fi)
} else {
hash = readPartialHash(prefix, fi)
}
fmt.Printf("0x%016x %q\n", hash, path) // ???
//fmt.Printf("addFile: path=%q, fi.Name()=%q\n", path, fi.Name())
}
if *dirf && *ps {
fmt.Printf("# dir=%q, hash=0x%016x, files totaling %h\n", path, hash, hrff.Int64{size, "B"})
}
}
}
func check(kind string, ndirs int) {
for k, v := range hmap {
switch {
case *r && len(v) < ndirs && !*pd:
count++
if *print {
fmt.Printf("\t%q %d %d\n", v[0].path, len(v), ndirs)
}
case *r && len(v) > ndirs && *pd:
count++
if *print {
fmt.Printf("\t%q %d %d\n", v[0].path, len(v), ndirs)
}
case !*r && len(v) > 1:
if len(v) > 1 {
if *print {
fmt.Printf("0x%016x ", k)
}
for k2, v2 := range v {
size := hrff.Int64{v2.size, "B"}
if k2 == 0 && *print {
fmt.Printf("%h\n", size)
}
total += v2.size
count++
if *print {
fmt.Printf("\t%q\n", v2.path)
}
}
}
}
}
if *ps {
if *r {
fmt.Printf("# %d %s missing\n", count, kind)
} else {
fmt.Printf("# %d %s duplicated, totaling %h\n", count, kind, hrff.Int64{total, "B"})
}
fmt.Printf("# %d files, %d dirs scanned\n", stats.scannedFiles, stats.scannedDirs)
}
}
func main() {
var kind string = "files"
var ndirs, nfiles int
var paths []string
flag.Var(&_fthreshold, "ft", "file sizes <= threshhold will not be considered")
flag.Var(&_dthreshold, "dt", "directory sizes <= threshhold will not be considered")
//fmt.Printf("dedup\n")
hf = aeshash.NewAES(0)
flag.Parse()
if *pat != "" {
re, err := regexp.Compile(*pat)
if err != nil {
return
}
patre = re
}
if *dd != "" {
re, err := regexp.Compile(*dd)
if err != nil {
return
}
ddre = re
}
fthreshold = int64(_fthreshold.V)
dthreshold = int64(_dthreshold.V)
//fmt.Printf("fthreshold=%d\n", fthreshold)
//fmt.Printf("dthreshold=%d\n", dthreshold)
if *dirf {
kind = "dirs"
}
if len(flag.Args()) != 0 {
for _, path := range flag.Args() {
fi, err := os.Stat(path)
if err != nil || fi == nil {
fmt.Printf("fi=%#v, err=%v\n", fi, err)
panic("bad")
}
if fi.Mode()&os.ModeDir == os.ModeDir {
ndirs++
} else {
nfiles++
}
paths = append(paths, path)
}
}
scan(paths, ndirs)
check(kind, ndirs)
}
/*
1. still a bug when comparing two dirs, there are two differnt top level hashses
2. with -r what happens with duplicated files? The count will not be ndirs and can be higher. Could chnage compare
but what about 2 files in 2 dirs with a drop and an add would seem correct.
*/
|
// +build !providerless
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"context"
"errors"
"fmt"
"io"
"net"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/sts"
"gopkg.in/gcfg.v1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
informercorev1 "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/pkg/version"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
nodehelpers "k8s.io/cloud-provider/node/helpers"
servicehelpers "k8s.io/cloud-provider/service/helpers"
cloudvolume "k8s.io/cloud-provider/volume"
volerr "k8s.io/cloud-provider/volume/errors"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
)
// NLBHealthCheckRuleDescription is the comment used on a security group rule to
// indicate that it is used for health checks
const NLBHealthCheckRuleDescription = "kubernetes.io/rule/nlb/health"
// NLBClientRuleDescription is the comment used on a security group rule to
// indicate that it is used for client traffic
const NLBClientRuleDescription = "kubernetes.io/rule/nlb/client"
// NLBMtuDiscoveryRuleDescription is the comment used on a security group rule
// to indicate that it is used for mtu discovery
const NLBMtuDiscoveryRuleDescription = "kubernetes.io/rule/nlb/mtu"
// ProviderName is the name of this cloud provider.
const ProviderName = "aws"
// TagNameKubernetesService is the tag name we use to differentiate multiple
// services. Used currently for ELBs only.
const TagNameKubernetesService = "kubernetes.io/service-name"
// TagNameSubnetInternalELB is the tag name used on a subnet to designate that
// it should be used for internal ELBs
const TagNameSubnetInternalELB = "kubernetes.io/role/internal-elb"
// TagNameSubnetPublicELB is the tag name used on a subnet to designate that
// it should be used for internet ELBs
const TagNameSubnetPublicELB = "kubernetes.io/role/elb"
// ServiceAnnotationLoadBalancerType is the annotation used on the service
// to indicate what type of Load Balancer we want. Right now, the only accepted
// value is "nlb"
const ServiceAnnotationLoadBalancerType = "service.beta.kubernetes.io/aws-load-balancer-type"
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
// to indicate that we want an internal ELB.
const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/aws-load-balancer-internal"
// ServiceAnnotationLoadBalancerProxyProtocol is the annotation used on the
// service to enable the proxy protocol on an ELB. Right now we only accept the
// value "*" which means enable the proxy protocol on all ELB backends. In the
// future we could adjust this to allow setting the proxy protocol only on
// certain backends.
const ServiceAnnotationLoadBalancerProxyProtocol = "service.beta.kubernetes.io/aws-load-balancer-proxy-protocol"
// ServiceAnnotationLoadBalancerAccessLogEmitInterval is the annotation used to
// specify access log emit interval.
const ServiceAnnotationLoadBalancerAccessLogEmitInterval = "service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval"
// ServiceAnnotationLoadBalancerAccessLogEnabled is the annotation used on the
// service to enable or disable access logs.
const ServiceAnnotationLoadBalancerAccessLogEnabled = "service.beta.kubernetes.io/aws-load-balancer-access-log-enabled"
// ServiceAnnotationLoadBalancerAccessLogS3BucketName is the annotation used to
// specify access log s3 bucket name.
const ServiceAnnotationLoadBalancerAccessLogS3BucketName = "service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name"
// ServiceAnnotationLoadBalancerAccessLogS3BucketPrefix is the annotation used
// to specify access log s3 bucket prefix.
const ServiceAnnotationLoadBalancerAccessLogS3BucketPrefix = "service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix"
// ServiceAnnotationLoadBalancerConnectionDrainingEnabled is the annnotation
// used on the service to enable or disable connection draining.
const ServiceAnnotationLoadBalancerConnectionDrainingEnabled = "service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled"
// ServiceAnnotationLoadBalancerConnectionDrainingTimeout is the annotation
// used on the service to specify a connection draining timeout.
const ServiceAnnotationLoadBalancerConnectionDrainingTimeout = "service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout"
// ServiceAnnotationLoadBalancerConnectionIdleTimeout is the annotation used
// on the service to specify the idle connection timeout.
const ServiceAnnotationLoadBalancerConnectionIdleTimeout = "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout"
// ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled is the annotation
// used on the service to enable or disable cross-zone load balancing.
const ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled = "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled"
// ServiceAnnotationLoadBalancerExtraSecurityGroups is the annotation used
// on the service to specify additional security groups to be added to ELB created
const ServiceAnnotationLoadBalancerExtraSecurityGroups = "service.beta.kubernetes.io/aws-load-balancer-extra-security-groups"
// ServiceAnnotationLoadBalancerSecurityGroups is the annotation used
// on the service to specify the security groups to be added to ELB created. Differently from the annotation
// "service.beta.kubernetes.io/aws-load-balancer-extra-security-groups", this replaces all other security groups previously assigned to the ELB.
const ServiceAnnotationLoadBalancerSecurityGroups = "service.beta.kubernetes.io/aws-load-balancer-security-groups"
// ServiceAnnotationLoadBalancerCertificate is the annotation used on the
// service to request a secure listener. Value is a valid certificate ARN.
// For more, see http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html
// CertARN is an IAM or CM certificate ARN, e.g. arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
const ServiceAnnotationLoadBalancerCertificate = "service.beta.kubernetes.io/aws-load-balancer-ssl-cert"
// ServiceAnnotationLoadBalancerSSLPorts is the annotation used on the service
// to specify a comma-separated list of ports that will use SSL/HTTPS
// listeners. Defaults to '*' (all).
const ServiceAnnotationLoadBalancerSSLPorts = "service.beta.kubernetes.io/aws-load-balancer-ssl-ports"
// ServiceAnnotationLoadBalancerSSLNegotiationPolicy is the annotation used on
// the service to specify a SSL negotiation settings for the HTTPS/SSL listeners
// of your load balancer. Defaults to AWS's default
const ServiceAnnotationLoadBalancerSSLNegotiationPolicy = "service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy"
// ServiceAnnotationLoadBalancerBEProtocol is the annotation used on the service
// to specify the protocol spoken by the backend (pod) behind a listener.
// If `http` (default) or `https`, an HTTPS listener that terminates the
// connection and parses headers is created.
// If set to `ssl` or `tcp`, a "raw" SSL listener is used.
// If set to `http` and `aws-load-balancer-ssl-cert` is not used then
// a HTTP listener is used.
const ServiceAnnotationLoadBalancerBEProtocol = "service.beta.kubernetes.io/aws-load-balancer-backend-protocol"
// ServiceAnnotationLoadBalancerAdditionalTags is the annotation used on the service
// to specify a comma-separated list of key-value pairs which will be recorded as
// additional tags in the ELB.
// For example: "Key1=Val1,Key2=Val2,KeyNoVal1=,KeyNoVal2"
const ServiceAnnotationLoadBalancerAdditionalTags = "service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags"
// ServiceAnnotationLoadBalancerHCHealthyThreshold is the annotation used on
// the service to specify the number of successive successful health checks
// required for a backend to be considered healthy for traffic.
const ServiceAnnotationLoadBalancerHCHealthyThreshold = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold"
// ServiceAnnotationLoadBalancerHCUnhealthyThreshold is the annotation used
// on the service to specify the number of unsuccessful health checks
// required for a backend to be considered unhealthy for traffic
const ServiceAnnotationLoadBalancerHCUnhealthyThreshold = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold"
// ServiceAnnotationLoadBalancerHCTimeout is the annotation used on the
// service to specify, in seconds, how long to wait before marking a health
// check as failed.
const ServiceAnnotationLoadBalancerHCTimeout = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout"
// ServiceAnnotationLoadBalancerHCInterval is the annotation used on the
// service to specify, in seconds, the interval between health checks.
const ServiceAnnotationLoadBalancerHCInterval = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval"
// ServiceAnnotationLoadBalancerEIPAllocations is the annotation used on the
// service to specify a comma separated list of EIP allocations to use as
// static IP addresses for the NLB. Only supported on elbv2 (NLB)
const ServiceAnnotationLoadBalancerEIPAllocations = "service.beta.kubernetes.io/aws-load-balancer-eip-allocations"
// Event key when a volume is stuck on attaching state when being attached to a volume
const volumeAttachmentStuck = "VolumeAttachmentStuck"
// Indicates that a node has volumes stuck in attaching state and hence it is not fit for scheduling more pods
const nodeWithImpairedVolumes = "NodeWithImpairedVolumes"
const (
// volumeAttachmentConsecutiveErrorLimit is the number of consecutive errors we will ignore when waiting for a volume to attach/detach
volumeAttachmentStatusConsecutiveErrorLimit = 10
// Attach typically takes 2-5 seconds (average is 2). Asking before 2 seconds is just waste of API quota.
volumeAttachmentStatusInitialDelay = 2*time.Second
// Detach typically takes 5-10 seconds (average is 6). Asking before 5 seconds is just waste of API quota.
volumeDetachmentStatusInitialDelay = 5*time.Second
// After the initial delay, poll attach/detach with exponential backoff (2046 seconds total)
volumeAttachmentStatusPollDelay = 2 * time.Second
volumeAttachmentStatusFactor = 2
volumeAttachmentStatusSteps = 11
// createTag* is configuration of exponential backoff for CreateTag call. We
// retry mainly because if we create an object, we cannot tag it until it is
// "fully created" (eventual consistency). Starting with 1 second, doubling
// it every step and taking 9 steps results in 255 second total waiting
// time.
createTagInitialDelay = 1 * time.Second
createTagFactor = 2.0
createTagSteps = 9
// volumeCreate* is configuration of exponential backoff for created volume.
// On a random AWS account (shared among several developers) it took 4s on
// average, 8s max.
volumeCreateInitialDelay = 5 * time.Second
volumeCreateBackoffFactor = 1.2
volumeCreateBackoffSteps = 10
// Number of node names that can be added to a filter. The AWS limit is 200
// but we are using a lower limit on purpose
filterNodeLimit = 150
)
// awsTagNameMasterRoles is a set of well-known AWS tag names that indicate the instance is a master
// The major consequence is that it is then not considered for AWS zone discovery for dynamic volume creation.
var awsTagNameMasterRoles = sets.NewString("kubernetes.io/role/master", "k8s.io/role/master")
// Maps from backend protocol to ELB protocol
var backendProtocolMapping = map[string]string{
"https": "https",
"http": "https",
"ssl": "ssl",
"tcp": "ssl",
}
// MaxReadThenCreateRetries sets the maximum number of attempts we will make when
// we read to see if something exists and then try to create it if we didn't find it.
// This can fail once in a consistent system if done in parallel
// In an eventually consistent system, it could fail unboundedly
const MaxReadThenCreateRetries = 30
// DefaultVolumeType specifies which storage to use for newly created Volumes
// TODO: Remove when user/admin can configure volume types and thus we don't
// need hardcoded defaults.
const DefaultVolumeType = "gp2"
// Services is an abstraction over AWS, to allow mocking/other implementations
type Services interface {
Compute(region string) (EC2, error)
LoadBalancing(region string) (ELB, error)
LoadBalancingV2(region string) (ELBV2, error)
Autoscaling(region string) (ASG, error)
Metadata() (EC2Metadata, error)
KeyManagement(region string) (KMS, error)
}
// EC2 is an abstraction over AWS', to allow mocking/other implementations
// Note that the DescribeX functions return a list, so callers don't need to deal with paging
// TODO: Should we rename this to AWS (EBS & ELB are not technically part of EC2)
type EC2 interface {
// Query EC2 for instances matching the filter
DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error)
// Attach a volume to an instance
AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error)
// Detach a volume from an instance it is attached to
DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error)
// Lists volumes
DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error)
// Create an EBS volume
CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error)
// Delete an EBS volume
DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error)
DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error)
DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error)
CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
DeleteSecurityGroup(request *ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error)
AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error)
DescribeSubnets(*ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error)
CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error)
CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error)
DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error)
ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error)
DescribeVpcs(input *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error)
}
// ELB is a simple pass-through of AWS' ELB client interface, which allows for testing
type ELB interface {
CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error)
DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error)
DescribeLoadBalancers(*elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error)
AddTags(*elb.AddTagsInput) (*elb.AddTagsOutput, error)
RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error)
DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error)
CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error)
SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error)
SetLoadBalancerPoliciesOfListener(input *elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error)
DescribeLoadBalancerPolicies(input *elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error)
DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error)
AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error)
CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error)
DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error)
ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error)
ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error)
DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error)
ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error)
}
// ELBV2 is a simple pass-through of AWS' ELBV2 client interface, which allows for testing
type ELBV2 interface {
AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error)
CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error)
DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error)
DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error)
ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error)
DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error)
CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error)
DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error)
ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error)
DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error)
DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error)
DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error)
ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error)
RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error)
DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error)
CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error)
DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error)
DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error)
ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error)
WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error
}
// ASG is a simple pass-through of the Autoscaling client interface, which
// allows for testing.
type ASG interface {
UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error)
DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error)
}
// KMS is a simple pass-through of the Key Management Service client interface,
// which allows for testing.
type KMS interface {
DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error)
}
// EC2Metadata is an abstraction over the AWS metadata service.
type EC2Metadata interface {
// Query the EC2 metadata service (used to discover instance-id etc)
GetMetadata(path string) (string, error)
}
// AWS volume types
const (
// Provisioned IOPS SSD
VolumeTypeIO1 = "io1"
// General Purpose SSD
VolumeTypeGP2 = "gp2"
// Cold HDD (sc1)
VolumeTypeSC1 = "sc1"
// Throughput Optimized HDD
VolumeTypeST1 = "st1"
)
// AWS provisioning limits.
// Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
const (
MinTotalIOPS = 100
MaxTotalIOPS = 20000
)
// VolumeOptions specifies capacity and tags for a volume.
type VolumeOptions struct {
CapacityGB int
Tags map[string]string
VolumeType string
AvailabilityZone string
// IOPSPerGB x CapacityGB will give total IOPS of the volume to create.
// Calculated total IOPS will be capped at MaxTotalIOPS.
IOPSPerGB int
Encrypted bool
// fully qualified resource name to the key to use for encryption.
// example: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef
KmsKeyID string
}
// Volumes is an interface for managing cloud-provisioned volumes
// TODO: Allow other clouds to implement this
type Volumes interface {
// Attach the disk to the node with the specified NodeName
// nodeName can be empty to mean "the instance on which we are running"
// Returns the device (e.g. /dev/xvdf) where we attached the volume
AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error)
// Detach the disk from the node with the specified NodeName
// nodeName can be empty to mean "the instance on which we are running"
// Returns the device where the volume was attached
DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error)
// Create a volume with the specified options
CreateDisk(volumeOptions *VolumeOptions) (volumeName KubernetesVolumeID, err error)
// Delete the specified volume
// Returns true iff the volume was deleted
// If the was not found, returns (false, nil)
DeleteDisk(volumeName KubernetesVolumeID) (bool, error)
// Get labels to apply to volume on creation
GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]string, error)
// Get volume's disk path from volume name
// return the device path where the volume is attached
GetDiskPath(volumeName KubernetesVolumeID) (string, error)
// Check if the volume is already attached to the node with the specified NodeName
DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error)
// Check if disks specified in argument map are still attached to their respective nodes.
DisksAreAttached(map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error)
// Expand the disk to new size
ResizeDisk(diskName KubernetesVolumeID, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
}
// InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups
// TODO: Allow other clouds to implement this
type InstanceGroups interface {
// Set the size to the fixed size
ResizeInstanceGroup(instanceGroupName string, size int) error
// Queries the cloud provider for information about the specified instance group
DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error)
}
// InstanceGroupInfo is returned by InstanceGroups.Describe, and exposes information about the group.
type InstanceGroupInfo interface {
// The number of instances currently running under control of this group
CurrentSize() (int, error)
}
var _ cloudprovider.Interface = (*Cloud)(nil)
var _ cloudprovider.Instances = (*Cloud)(nil)
var _ cloudprovider.LoadBalancer = (*Cloud)(nil)
var _ cloudprovider.Routes = (*Cloud)(nil)
var _ cloudprovider.Zones = (*Cloud)(nil)
var _ cloudprovider.PVLabeler = (*Cloud)(nil)
// Cloud is an implementation of Interface, LoadBalancer and Instances for Amazon Web Services.
type Cloud struct {
ec2 EC2
elb ELB
elbv2 ELBV2
asg ASG
kms KMS
metadata EC2Metadata
cfg *CloudConfig
region string
vpcID string
tagging awsTagging
// The AWS instance that we are running on
// Note that we cache some state in awsInstance (mountpoints), so we must preserve the instance
selfAWSInstance *awsInstance
instanceCache instanceCache
clientBuilder cloudprovider.ControllerClientBuilder
kubeClient clientset.Interface
nodeInformer informercorev1.NodeInformer
// Extract the function out to make it easier to test
nodeInformerHasSynced cache.InformerSynced
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
// We keep an active list of devices we have assigned but not yet
// attached, to avoid a race condition where we assign a device mapping
// and then get a second request before we attach the volume
attachingMutex sync.Mutex
attaching map[types.NodeName]map[mountDevice]EBSVolumeID
// state of our device allocator for each node
deviceAllocators map[types.NodeName]DeviceAllocator
}
var _ Volumes = &Cloud{}
// CloudConfig wraps the settings for the AWS cloud provider.
// NOTE: Cloud config files should follow the same Kubernetes deprecation policy as
// flags or CLIs. Config fields should not change behavior in incompatible ways and
// should be deprecated for at least 2 release prior to removing.
// See https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli
// for more details.
type CloudConfig struct {
Global struct {
// TODO: Is there any use for this? We can get it from the instance metadata service
// Maybe if we're not running on AWS, e.g. bootstrap; for now it is not very useful
Zone string
// The AWS VPC flag enables the possibility to run the master components
// on a different aws account, on a different cloud provider or on-premises.
// If the flag is set also the KubernetesClusterTag must be provided
VPC string
// SubnetID enables using a specific subnet to use for ELB's
SubnetID string
// RouteTableID enables using a specific RouteTable
RouteTableID string
// RoleARN is the IAM role to assume when interaction with AWS APIs.
RoleARN string
// KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources
KubernetesClusterTag string
// KubernetesClusterID is the cluster id we'll use to identify our cluster resources
KubernetesClusterID string
//The aws provider creates an inbound rule per load balancer on the node security
//group. However, this can run into the AWS security group rule limit of 50 if
//many LoadBalancers are created.
//
//This flag disables the automatic ingress creation. It requires that the user
//has setup a rule that allows inbound traffic on kubelet ports from the
//local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
DisableSecurityGroupIngress bool
//AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB
//can cause the max number of security groups to be reached. If this is set instead of creating a new
//Security group for each ELB this security group will be used instead.
ElbSecurityGroup string
//During the instantiation of an new AWS cloud provider, the detected region
//is validated against a known set of regions.
//
//In a non-standard, AWS like environment (e.g. Eucalyptus), this check may
//be undesirable. Setting this to true will disable the check and provide
//a warning that the check was skipped. Please note that this is an
//experimental feature and work-in-progress for the moment. If you find
//yourself in an non-AWS cloud and open an issue, please indicate that in the
//issue body.
DisableStrictZoneCheck bool
}
// [ServiceOverride "1"]
// Service = s3
// Region = region1
// URL = https://s3.foo.bar
// SigningRegion = signing_region
// SigningMethod = signing_method
//
// [ServiceOverride "2"]
// Service = ec2
// Region = region2
// URL = https://ec2.foo.bar
// SigningRegion = signing_region
// SigningMethod = signing_method
ServiceOverride map[string]*struct {
Service string
Region string
URL string
SigningRegion string
SigningMethod string
SigningName string
}
}
func (cfg *CloudConfig) validateOverrides() error {
if len(cfg.ServiceOverride) == 0 {
return nil
}
set := make(map[string]bool)
for onum, ovrd := range cfg.ServiceOverride {
// Note: gcfg does not space trim, so we have to when comparing to empty string ""
name := strings.TrimSpace(ovrd.Service)
if name == "" {
return fmt.Errorf("service name is missing [Service is \"\"] in override %s", onum)
}
// insure the map service name is space trimmed
ovrd.Service = name
region := strings.TrimSpace(ovrd.Region)
if region == "" {
return fmt.Errorf("service region is missing [Region is \"\"] in override %s", onum)
}
// insure the map region is space trimmed
ovrd.Region = region
url := strings.TrimSpace(ovrd.URL)
if url == "" {
return fmt.Errorf("url is missing [URL is \"\"] in override %s", onum)
}
signingRegion := strings.TrimSpace(ovrd.SigningRegion)
if signingRegion == "" {
return fmt.Errorf("signingRegion is missing [SigningRegion is \"\"] in override %s", onum)
}
signature := name + "_" + region
if set[signature] {
return fmt.Errorf("duplicate entry found for service override [%s] (%s in %s)", onum, name, region)
}
set[signature] = true
}
return nil
}
func (cfg *CloudConfig) getResolver() endpoints.ResolverFunc {
defaultResolver := endpoints.DefaultResolver()
defaultResolverFn := func(service, region string,
optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
return defaultResolver.EndpointFor(service, region, optFns...)
}
if len(cfg.ServiceOverride) == 0 {
return defaultResolverFn
}
return func(service, region string,
optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
for _, override := range cfg.ServiceOverride {
if override.Service == service && override.Region == region {
return endpoints.ResolvedEndpoint{
URL: override.URL,
SigningRegion: override.SigningRegion,
SigningMethod: override.SigningMethod,
SigningName: override.SigningName,
}, nil
}
}
return defaultResolver.EndpointFor(service, region, optFns...)
}
}
// awsSdkEC2 is an implementation of the EC2 interface, backed by aws-sdk-go
type awsSdkEC2 struct {
ec2 *ec2.EC2
}
// Interface to make the CloudConfig immutable for awsSDKProvider
type awsCloudConfigProvider interface {
getResolver() endpoints.ResolverFunc
}
type awsSDKProvider struct {
creds *credentials.Credentials
cfg awsCloudConfigProvider
mutex sync.Mutex
regionDelayers map[string]*CrossRequestRetryDelay
}
func newAWSSDKProvider(creds *credentials.Credentials, cfg *CloudConfig) *awsSDKProvider {
return &awsSDKProvider{
creds: creds,
cfg: cfg,
regionDelayers: make(map[string]*CrossRequestRetryDelay),
}
}
func (p *awsSDKProvider) addHandlers(regionName string, h *request.Handlers) {
h.Build.PushFrontNamed(request.NamedHandler{
Name: "k8s/user-agent",
Fn: request.MakeAddToUserAgentHandler("kubernetes", version.Get().String()),
})
h.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/logger",
Fn: awsHandlerLogger,
})
delayer := p.getCrossRequestRetryDelay(regionName)
if delayer != nil {
h.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/delay-presign",
Fn: delayer.BeforeSign,
})
h.AfterRetry.PushFrontNamed(request.NamedHandler{
Name: "k8s/delay-afterretry",
Fn: delayer.AfterRetry,
})
}
p.addAPILoggingHandlers(h)
}
func (p *awsSDKProvider) addAPILoggingHandlers(h *request.Handlers) {
h.Send.PushBackNamed(request.NamedHandler{
Name: "k8s/api-request",
Fn: awsSendHandlerLogger,
})
h.ValidateResponse.PushFrontNamed(request.NamedHandler{
Name: "k8s/api-validate-response",
Fn: awsValidateResponseHandlerLogger,
})
}
// Get a CrossRequestRetryDelay, scoped to the region, not to the request.
// This means that when we hit a limit on a call, we will delay _all_ calls to the API.
// We do this to protect the AWS account from becoming overloaded and effectively locked.
// We also log when we hit request limits.
// Note that this delays the current goroutine; this is bad behaviour and will
// likely cause k8s to become slow or unresponsive for cloud operations.
// However, this throttle is intended only as a last resort. When we observe
// this throttling, we need to address the root cause (e.g. add a delay to a
// controller retry loop)
func (p *awsSDKProvider) getCrossRequestRetryDelay(regionName string) *CrossRequestRetryDelay {
p.mutex.Lock()
defer p.mutex.Unlock()
delayer, found := p.regionDelayers[regionName]
if !found {
delayer = NewCrossRequestRetryDelay()
p.regionDelayers[regionName] = delayer
}
return delayer
}
// SetInformers implements InformerUser interface by setting up informer-fed caches for aws lib to
// leverage Kubernetes API for caching
func (c *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
klog.Infof("Setting up informers for Cloud")
c.nodeInformer = informerFactory.Core().V1().Nodes()
c.nodeInformerHasSynced = c.nodeInformer.Informer().HasSynced
}
func (p *awsSDKProvider) Compute(regionName string) (EC2, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
service := ec2.New(sess)
p.addHandlers(regionName, &service.Handlers)
ec2 := &awsSdkEC2{
ec2: service,
}
return ec2, nil
}
func (p *awsSDKProvider) LoadBalancing(regionName string) (ELB, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
elbClient := elb.New(sess)
p.addHandlers(regionName, &elbClient.Handlers)
return elbClient, nil
}
func (p *awsSDKProvider) LoadBalancingV2(regionName string) (ELBV2, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
elbClient := elbv2.New(sess)
p.addHandlers(regionName, &elbClient.Handlers)
return elbClient, nil
}
func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
client := autoscaling.New(sess)
p.addHandlers(regionName, &client.Handlers)
return client, nil
}
func (p *awsSDKProvider) Metadata() (EC2Metadata, error) {
sess, err := session.NewSession(&aws.Config{
EndpointResolver: p.cfg.getResolver(),
})
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
client := ec2metadata.New(sess)
p.addAPILoggingHandlers(&client.Handlers)
return client, nil
}
func (p *awsSDKProvider) KeyManagement(regionName string) (KMS, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
kmsClient := kms.New(sess)
p.addHandlers(regionName, &kmsClient.Handlers)
return kmsClient, nil
}
func newEc2Filter(name string, values ...string) *ec2.Filter {
filter := &ec2.Filter{
Name: aws.String(name),
}
for _, value := range values {
filter.Values = append(filter.Values, aws.String(value))
}
return filter
}
// AddSSHKeyToAllInstances is currently not implemented.
func (c *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
return cloudprovider.NotImplemented
}
// CurrentNodeName returns the name of the current node
func (c *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
return c.selfAWSInstance.nodeName, nil
}
// Implementation of EC2.Instances
func (s *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) {
// Instances are paged
results := []*ec2.Instance{}
var nextToken *string
requestTime := time.Now()
for {
response, err := s.ec2.DescribeInstances(request)
if err != nil {
recordAWSMetric("describe_instance", 0, err)
return nil, fmt.Errorf("error listing AWS instances: %q", err)
}
for _, reservation := range response.Reservations {
results = append(results, reservation.Instances...)
}
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_instance", timeTaken, nil)
return results, nil
}
// Implements EC2.DescribeSecurityGroups
func (s *awsSdkEC2) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) {
// Security groups are paged
results := []*ec2.SecurityGroup{}
var nextToken *string
requestTime := time.Now()
for {
response, err := s.ec2.DescribeSecurityGroups(request)
if err != nil {
recordAWSMetric("describe_security_groups", 0, err)
return nil, fmt.Errorf("error listing AWS security groups: %q", err)
}
results = append(results, response.SecurityGroups...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_security_groups", timeTaken, nil)
return results, nil
}
func (s *awsSdkEC2) AttachVolume(request *ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) {
requestTime := time.Now()
resp, err := s.ec2.AttachVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("attach_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DetachVolume(request *ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) {
requestTime := time.Now()
resp, err := s.ec2.DetachVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("detach_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) {
// Volumes are paged
results := []*ec2.Volume{}
var nextToken *string
requestTime := time.Now()
for {
response, err := s.ec2.DescribeVolumes(request)
if err != nil {
recordAWSMetric("describe_volume", 0, err)
return nil, err
}
results = append(results, response.Volumes...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_volume", timeTaken, nil)
return results, nil
}
func (s *awsSdkEC2) CreateVolume(request *ec2.CreateVolumeInput) (*ec2.Volume, error) {
requestTime := time.Now()
resp, err := s.ec2.CreateVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("create_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DeleteVolume(request *ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) {
requestTime := time.Now()
resp, err := s.ec2.DeleteVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("delete_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) ModifyVolume(request *ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) {
requestTime := time.Now()
resp, err := s.ec2.ModifyVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("modify_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DescribeVolumeModifications(request *ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) {
requestTime := time.Now()
results := []*ec2.VolumeModification{}
var nextToken *string
for {
resp, err := s.ec2.DescribeVolumesModifications(request)
if err != nil {
recordAWSMetric("describe_volume_modification", 0, err)
return nil, fmt.Errorf("error listing volume modifictions : %v", err)
}
results = append(results, resp.VolumesModifications...)
nextToken = resp.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_volume_modification", timeTaken, nil)
return results, nil
}
func (s *awsSdkEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) {
// Subnets are not paged
response, err := s.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS subnets: %q", err)
}
return response.Subnets, nil
}
func (s *awsSdkEC2) CreateSecurityGroup(request *ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) {
return s.ec2.CreateSecurityGroup(request)
}
func (s *awsSdkEC2) DeleteSecurityGroup(request *ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) {
return s.ec2.DeleteSecurityGroup(request)
}
func (s *awsSdkEC2) AuthorizeSecurityGroupIngress(request *ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) {
return s.ec2.AuthorizeSecurityGroupIngress(request)
}
func (s *awsSdkEC2) RevokeSecurityGroupIngress(request *ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) {
return s.ec2.RevokeSecurityGroupIngress(request)
}
func (s *awsSdkEC2) CreateTags(request *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
requestTime := time.Now()
resp, err := s.ec2.CreateTags(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("create_tags", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) {
results := []*ec2.RouteTable{}
var nextToken *string
requestTime := time.Now()
for {
response, err := s.ec2.DescribeRouteTables(request)
if err != nil {
recordAWSMetric("describe_route_tables", 0, err)
return nil, fmt.Errorf("error listing AWS route tables: %q", err)
}
results = append(results, response.RouteTables...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_route_tables", timeTaken, nil)
return results, nil
}
func (s *awsSdkEC2) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) {
return s.ec2.CreateRoute(request)
}
func (s *awsSdkEC2) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) {
return s.ec2.DeleteRoute(request)
}
func (s *awsSdkEC2) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) {
return s.ec2.ModifyInstanceAttribute(request)
}
func (s *awsSdkEC2) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) {
return s.ec2.DescribeVpcs(request)
}
func init() {
registerMetrics()
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
cfg, err := readAWSCloudConfig(config)
if err != nil {
return nil, fmt.Errorf("unable to read AWS cloud provider config file: %v", err)
}
if err = cfg.validateOverrides(); err != nil {
return nil, fmt.Errorf("unable to validate custom endpoint overrides: %v", err)
}
sess, err := session.NewSession(&aws.Config{})
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
var provider credentials.Provider
if cfg.Global.RoleARN == "" {
provider = &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(sess),
}
} else {
klog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN)
provider = &stscreds.AssumeRoleProvider{
Client: sts.New(sess),
RoleARN: cfg.Global.RoleARN,
}
}
creds := credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.EnvProvider{},
provider,
&credentials.SharedCredentialsProvider{},
})
aws := newAWSSDKProvider(creds, cfg)
return newAWSCloud(*cfg, aws)
})
}
// readAWSCloudConfig reads an instance of AWSCloudConfig from config reader.
func readAWSCloudConfig(config io.Reader) (*CloudConfig, error) {
var cfg CloudConfig
var err error
if config != nil {
err = gcfg.ReadInto(&cfg, config)
if err != nil {
return nil, err
}
}
return &cfg, nil
}
func updateConfigZone(cfg *CloudConfig, metadata EC2Metadata) error {
if cfg.Global.Zone == "" {
if metadata != nil {
klog.Info("Zone not specified in configuration file; querying AWS metadata service")
var err error
cfg.Global.Zone, err = getAvailabilityZone(metadata)
if err != nil {
return err
}
}
if cfg.Global.Zone == "" {
return fmt.Errorf("no zone specified in configuration file")
}
}
return nil
}
func getAvailabilityZone(metadata EC2Metadata) (string, error) {
return metadata.GetMetadata("placement/availability-zone")
}
// Derives the region from a valid az name.
// Returns an error if the az is known invalid (empty)
func azToRegion(az string) (string, error) {
if len(az) < 1 {
return "", fmt.Errorf("invalid (empty) AZ")
}
region := az[:len(az)-1]
return region, nil
}
// newAWSCloud creates a new instance of AWSCloud.
// AWSProvider and instanceId are primarily for tests
func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) {
// We have some state in the Cloud object - in particular the attaching map
// Log so that if we are building multiple Cloud objects, it is obvious!
klog.Infof("Building AWS cloudprovider")
metadata, err := awsServices.Metadata()
if err != nil {
return nil, fmt.Errorf("error creating AWS metadata client: %q", err)
}
err = updateConfigZone(&cfg, metadata)
if err != nil {
return nil, fmt.Errorf("unable to determine AWS zone from cloud provider config or EC2 instance metadata: %v", err)
}
zone := cfg.Global.Zone
if len(zone) <= 1 {
return nil, fmt.Errorf("invalid AWS zone in config file: %s", zone)
}
regionName, err := azToRegion(zone)
if err != nil {
return nil, err
}
if !cfg.Global.DisableStrictZoneCheck {
if !isRegionValid(regionName, metadata) {
return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone)
}
} else {
klog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone)
}
ec2, err := awsServices.Compute(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS EC2 client: %v", err)
}
elb, err := awsServices.LoadBalancing(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS ELB client: %v", err)
}
elbv2, err := awsServices.LoadBalancingV2(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS ELBV2 client: %v", err)
}
asg, err := awsServices.Autoscaling(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS autoscaling client: %v", err)
}
kms, err := awsServices.KeyManagement(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS key management client: %v", err)
}
awsCloud := &Cloud{
ec2: ec2,
elb: elb,
elbv2: elbv2,
asg: asg,
metadata: metadata,
kms: kms,
cfg: &cfg,
region: regionName,
attaching: make(map[types.NodeName]map[mountDevice]EBSVolumeID),
deviceAllocators: make(map[types.NodeName]DeviceAllocator),
}
awsCloud.instanceCache.cloud = awsCloud
tagged := cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != ""
if cfg.Global.VPC != "" && (cfg.Global.SubnetID != "" || cfg.Global.RoleARN != "") && tagged {
// When the master is running on a different AWS account, cloud provider or on-premise
// build up a dummy instance and use the VPC from the nodes account
klog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises")
awsCloud.selfAWSInstance = &awsInstance{
nodeName: "master-dummy",
vpcID: cfg.Global.VPC,
subnetID: cfg.Global.SubnetID,
}
awsCloud.vpcID = cfg.Global.VPC
} else {
selfAWSInstance, err := awsCloud.buildSelfAWSInstance()
if err != nil {
return nil, err
}
awsCloud.selfAWSInstance = selfAWSInstance
awsCloud.vpcID = selfAWSInstance.vpcID
}
if cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != "" {
if err := awsCloud.tagging.init(cfg.Global.KubernetesClusterTag, cfg.Global.KubernetesClusterID); err != nil {
return nil, err
}
} else {
// TODO: Clean up double-API query
info, err := awsCloud.selfAWSInstance.describeInstance()
if err != nil {
return nil, err
}
if err := awsCloud.tagging.initFromTags(info.Tags); err != nil {
return nil, err
}
}
return awsCloud, nil
}
// isRegionValid accepts an AWS region name and returns if the region is a
// valid region known to the AWS SDK. Considers the region returned from the
// EC2 metadata service to be a valid region as it's only available on a host
// running in a valid AWS region.
func isRegionValid(region string, metadata EC2Metadata) bool {
// Does the AWS SDK know about the region?
for _, p := range endpoints.DefaultPartitions() {
for r := range p.Regions() {
if r == region {
return true
}
}
}
// ap-northeast-3 is purposely excluded from the SDK because it
// requires an access request (for more details see):
// https://github.com/aws/aws-sdk-go/issues/1863
if region == "ap-northeast-3" {
return true
}
// Fallback to checking if the region matches the instance metadata region
// (ignoring any user overrides). This just accounts for running an old
// build of Kubernetes in a new region that wasn't compiled into the SDK
// when Kubernetes was built.
if az, err := getAvailabilityZone(metadata); err == nil {
if r, err := azToRegion(az); err == nil && region == r {
return true
}
}
return false
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (c *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
c.clientBuilder = clientBuilder
c.kubeClient = clientBuilder.ClientOrDie("aws-cloud-provider")
c.eventBroadcaster = record.NewBroadcaster()
c.eventBroadcaster.StartLogging(klog.Infof)
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.kubeClient.CoreV1().Events("")})
c.eventRecorder = c.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "aws-cloud-provider"})
}
// Clusters returns the list of clusters.
func (c *Cloud) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}
// ProviderName returns the cloud provider ID.
func (c *Cloud) ProviderName() string {
return ProviderName
}
// LoadBalancer returns an implementation of LoadBalancer for Amazon Web Services.
func (c *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return c, true
}
// Instances returns an implementation of Instances for Amazon Web Services.
func (c *Cloud) Instances() (cloudprovider.Instances, bool) {
return c, true
}
// Zones returns an implementation of Zones for Amazon Web Services.
func (c *Cloud) Zones() (cloudprovider.Zones, bool) {
return c, true
}
// Routes returns an implementation of Routes for Amazon Web Services.
func (c *Cloud) Routes() (cloudprovider.Routes, bool) {
return c, true
}
// HasClusterID returns true if the cluster has a clusterID
func (c *Cloud) HasClusterID() bool {
return len(c.tagging.clusterID()) > 0
}
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
if c.selfAWSInstance.nodeName == name || len(name) == 0 {
addresses := []v1.NodeAddress{}
macs, err := c.metadata.GetMetadata("network/interfaces/macs/")
if err != nil {
return nil, fmt.Errorf("error querying AWS metadata for %q: %q", "network/interfaces/macs", err)
}
// We want the IPs to end up in order by interface (in particular, we want eth0's
// IPs first), but macs isn't necessarily sorted in that order so we have to
// explicitly order by device-number (device-number == the "0" in "eth0").
macIPs := make(map[int]string)
for _, macID := range strings.Split(macs, "\n") {
if macID == "" {
continue
}
numPath := path.Join("network/interfaces/macs/", macID, "device-number")
numStr, err := c.metadata.GetMetadata(numPath)
if err != nil {
return nil, fmt.Errorf("error querying AWS metadata for %q: %q", numPath, err)
}
num, err := strconv.Atoi(strings.TrimSpace(numStr))
if err != nil {
klog.Warningf("Bad device-number %q for interface %s\n", numStr, macID)
continue
}
ipPath := path.Join("network/interfaces/macs/", macID, "local-ipv4s")
macIPs[num], err = c.metadata.GetMetadata(ipPath)
if err != nil {
return nil, fmt.Errorf("error querying AWS metadata for %q: %q", ipPath, err)
}
}
for i := 0; i < len(macIPs); i++ {
internalIPs := macIPs[i]
if internalIPs == "" {
continue
}
for _, internalIP := range strings.Split(internalIPs, "\n") {
if internalIP == "" {
continue
}
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalIP, Address: internalIP})
}
}
externalIP, err := c.metadata.GetMetadata("public-ipv4")
if err != nil {
//TODO: It would be nice to be able to determine the reason for the failure,
// but the AWS client masks all failures with the same error description.
klog.V(4).Info("Could not determine public IP from AWS metadata.")
} else {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: externalIP})
}
localHostname, err := c.metadata.GetMetadata("local-hostname")
if err != nil || len(localHostname) == 0 {
//TODO: It would be nice to be able to determine the reason for the failure,
// but the AWS client masks all failures with the same error description.
klog.V(4).Info("Could not determine private DNS from AWS metadata.")
} else {
hostname, internalDNS := parseMetadataLocalHostname(localHostname)
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: hostname})
for _, d := range internalDNS {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: d})
}
}
externalDNS, err := c.metadata.GetMetadata("public-hostname")
if err != nil || len(externalDNS) == 0 {
//TODO: It would be nice to be able to determine the reason for the failure,
// but the AWS client masks all failures with the same error description.
klog.V(4).Info("Could not determine public DNS from AWS metadata.")
} else {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalDNS, Address: externalDNS})
}
return addresses, nil
}
instance, err := c.getInstanceByNodeName(name)
if err != nil {
return nil, fmt.Errorf("getInstanceByNodeName failed for %q with %q", name, err)
}
return extractNodeAddresses(instance)
}
// parseMetadataLocalHostname parses the output of "local-hostname" metadata.
// If a DHCP option set is configured for a VPC and it has multiple domain names, GetMetadata
// returns a string containing first the hostname followed by additional domain names,
// space-separated. For example, if the DHCP option set has:
// domain-name = us-west-2.compute.internal a.a b.b c.c d.d;
// $ curl http://169.254.169.254/latest/meta-data/local-hostname
// ip-192-168-111-51.us-west-2.compute.internal a.a b.b c.c d.d
func parseMetadataLocalHostname(metadata string) (string, []string) {
localHostnames := strings.Fields(metadata)
hostname := localHostnames[0]
internalDNS := []string{hostname}
privateAddress := strings.Split(hostname, ".")[0]
for _, h := range localHostnames[1:] {
internalDNSAddress := privateAddress + "." + h
internalDNS = append(internalDNS, internalDNSAddress)
}
return hostname, internalDNS
}
// extractNodeAddresses maps the instance information from EC2 to an array of NodeAddresses
func extractNodeAddresses(instance *ec2.Instance) ([]v1.NodeAddress, error) {
// Not clear if the order matters here, but we might as well indicate a sensible preference order
if instance == nil {
return nil, fmt.Errorf("nil instance passed to extractNodeAddresses")
}
addresses := []v1.NodeAddress{}
// handle internal network interfaces
for _, networkInterface := range instance.NetworkInterfaces {
// skip network interfaces that are not currently in use
if aws.StringValue(networkInterface.Status) != ec2.NetworkInterfaceStatusInUse {
continue
}
for _, internalIP := range networkInterface.PrivateIpAddresses {
if ipAddress := aws.StringValue(internalIP.PrivateIpAddress); ipAddress != "" {
ip := net.ParseIP(ipAddress)
if ip == nil {
return nil, fmt.Errorf("EC2 instance had invalid private address: %s (%q)", aws.StringValue(instance.InstanceId), ipAddress)
}
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalIP, Address: ip.String()})
}
}
}
// TODO: Other IP addresses (multiple ips)?
publicIPAddress := aws.StringValue(instance.PublicIpAddress)
if publicIPAddress != "" {
ip := net.ParseIP(publicIPAddress)
if ip == nil {
return nil, fmt.Errorf("EC2 instance had invalid public address: %s (%s)", aws.StringValue(instance.InstanceId), publicIPAddress)
}
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: ip.String()})
}
privateDNSName := aws.StringValue(instance.PrivateDnsName)
if privateDNSName != "" {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: privateDNSName})
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: privateDNSName})
}
publicDNSName := aws.StringValue(instance.PublicDnsName)
if publicDNSName != "" {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalDNS, Address: publicDNSName})
}
return addresses, nil
}
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (c *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return nil, err
}
instance, err := describeInstance(c.ec2, instanceID)
if err != nil {
return nil, err
}
return extractNodeAddresses(instance)
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return false, err
}
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{instanceID.awsString()},
}
instances, err := c.ec2.DescribeInstances(request)
if err != nil {
return false, err
}
if len(instances) == 0 {
return false, nil
}
if len(instances) > 1 {
return false, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
state := instances[0].State.Name
if *state == ec2.InstanceStateNameTerminated {
klog.Warningf("the instance %s is terminated", instanceID)
return false, nil
}
return true, nil
}
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
func (c *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return false, err
}
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{instanceID.awsString()},
}
instances, err := c.ec2.DescribeInstances(request)
if err != nil {
return false, err
}
if len(instances) == 0 {
klog.Warningf("the instance %s does not exist anymore", providerID)
// returns false, because otherwise node is not deleted from cluster
// false means that it will continue to check InstanceExistsByProviderID
return false, nil
}
if len(instances) > 1 {
return false, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
instance := instances[0]
if instance.State != nil {
state := aws.StringValue(instance.State.Name)
// valid state for detaching volumes
if state == ec2.InstanceStateNameStopped {
return true, nil
}
}
return false, nil
}
// InstanceID returns the cloud provider ID of the node with the specified nodeName.
func (c *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) {
// In the future it is possible to also return an endpoint as:
// <endpoint>/<zone>/<instanceid>
if c.selfAWSInstance.nodeName == nodeName {
return "/" + c.selfAWSInstance.availabilityZone + "/" + c.selfAWSInstance.awsID, nil
}
inst, err := c.getInstanceByNodeName(nodeName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// The Instances interface requires that we return InstanceNotFound (without wrapping)
return "", err
}
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %q", nodeName, err)
}
return "/" + aws.StringValue(inst.Placement.AvailabilityZone) + "/" + aws.StringValue(inst.InstanceId), nil
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (c *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return "", err
}
instance, err := describeInstance(c.ec2, instanceID)
if err != nil {
return "", err
}
return aws.StringValue(instance.InstanceType), nil
}
// InstanceType returns the type of the node with the specified nodeName.
func (c *Cloud) InstanceType(ctx context.Context, nodeName types.NodeName) (string, error) {
if c.selfAWSInstance.nodeName == nodeName {
return c.selfAWSInstance.instanceType, nil
}
inst, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %q", nodeName, err)
}
return aws.StringValue(inst.InstanceType), nil
}
// GetCandidateZonesForDynamicVolume retrieves a list of all the zones in which nodes are running
// It currently involves querying all instances
func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) {
// We don't currently cache this; it is currently used only in volume
// creation which is expected to be a comparatively rare occurrence.
// TODO: Caching / expose v1.Nodes to the cloud provider?
// TODO: We could also query for subnets, I think
// Note: It is more efficient to call the EC2 API twice with different tag
// filters than to call it once with a tag filter that results in a logical
// OR. For really large clusters the logical OR will result in EC2 API rate
// limiting.
instances := []*ec2.Instance{}
baseFilters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")}
filters := c.tagging.addFilters(baseFilters)
di, err := c.describeInstances(filters)
if err != nil {
return nil, err
}
instances = append(instances, di...)
if c.tagging.usesLegacyTags {
filters = c.tagging.addLegacyFilters(baseFilters)
di, err = c.describeInstances(filters)
if err != nil {
return nil, err
}
instances = append(instances, di...)
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances returned")
}
zones := sets.NewString()
for _, instance := range instances {
// We skip over master nodes, if the installation tool labels them with one of the well-known master labels
// This avoids creating a volume in a zone where only the master is running - e.g. #34583
// This is a short-term workaround until the scheduler takes care of zone selection
master := false
for _, tag := range instance.Tags {
tagKey := aws.StringValue(tag.Key)
if awsTagNameMasterRoles.Has(tagKey) {
master = true
}
}
if master {
klog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId))
continue
}
if instance.Placement != nil {
zone := aws.StringValue(instance.Placement.AvailabilityZone)
zones.Insert(zone)
}
}
klog.V(2).Infof("Found instances in zones %s", zones)
return zones, nil
}
// GetZone implements Zones.GetZone
func (c *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
return cloudprovider.Zone{
FailureDomain: c.selfAWSInstance.availabilityZone,
Region: c.region,
}, nil
}
// GetZoneByProviderID implements Zones.GetZoneByProviderID
// This is particularly useful in external cloud providers where the kubelet
// does not initialize node data.
func (c *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return cloudprovider.Zone{}, err
}
instance, err := c.getInstanceByID(string(instanceID))
if err != nil {
return cloudprovider.Zone{}, err
}
zone := cloudprovider.Zone{
FailureDomain: *(instance.Placement.AvailabilityZone),
Region: c.region,
}
return zone, nil
}
// GetZoneByNodeName implements Zones.GetZoneByNodeName
// This is particularly useful in external cloud providers where the kubelet
// does not initialize node data.
func (c *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
instance, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return cloudprovider.Zone{}, err
}
zone := cloudprovider.Zone{
FailureDomain: *(instance.Placement.AvailabilityZone),
Region: c.region,
}
return zone, nil
}
// Used to represent a mount device for attaching an EBS volume
// This should be stored as a single letter (i.e. c, not sdc or /dev/sdc)
type mountDevice string
type awsInstance struct {
ec2 EC2
// id in AWS
awsID string
// node name in k8s
nodeName types.NodeName
// availability zone the instance resides in
availabilityZone string
// ID of VPC the instance resides in
vpcID string
// ID of subnet the instance resides in
subnetID string
// instance type
instanceType string
}
// newAWSInstance creates a new awsInstance object
func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance {
az := ""
if instance.Placement != nil {
az = aws.StringValue(instance.Placement.AvailabilityZone)
}
self := &awsInstance{
ec2: ec2Service,
awsID: aws.StringValue(instance.InstanceId),
nodeName: mapInstanceToNodeName(instance),
availabilityZone: az,
instanceType: aws.StringValue(instance.InstanceType),
vpcID: aws.StringValue(instance.VpcId),
subnetID: aws.StringValue(instance.SubnetId),
}
return self
}
// Gets the full information about this instance from the EC2 API
func (i *awsInstance) describeInstance() (*ec2.Instance, error) {
return describeInstance(i.ec2, InstanceID(i.awsID))
}
// Gets the mountDevice already assigned to the volume, or assigns an unused mountDevice.
// If the volume is already assigned, this will return the existing mountDevice with alreadyAttached=true.
// Otherwise the mountDevice is assigned by finding the first available mountDevice, and it is returned with alreadyAttached=false.
func (c *Cloud) getMountDevice(
i *awsInstance,
info *ec2.Instance,
volumeID EBSVolumeID,
assign bool) (assigned mountDevice, alreadyAttached bool, err error) {
deviceMappings := map[mountDevice]EBSVolumeID{}
volumeStatus := map[EBSVolumeID]string{} // for better logging of volume status
for _, blockDevice := range info.BlockDeviceMappings {
name := aws.StringValue(blockDevice.DeviceName)
if strings.HasPrefix(name, "/dev/sd") {
name = name[7:]
}
if strings.HasPrefix(name, "/dev/xvd") {
name = name[8:]
}
if len(name) < 1 || len(name) > 2 {
klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName))
}
if blockDevice.Ebs != nil && blockDevice.Ebs.VolumeId != nil {
volumeStatus[EBSVolumeID(*blockDevice.Ebs.VolumeId)] = aws.StringValue(blockDevice.Ebs.Status)
}
deviceMappings[mountDevice(name)] = EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId))
}
// We lock to prevent concurrent mounts from conflicting
// We may still conflict if someone calls the API concurrently,
// but the AWS API will then fail one of the two attach operations
c.attachingMutex.Lock()
defer c.attachingMutex.Unlock()
for mountDevice, volume := range c.attaching[i.nodeName] {
deviceMappings[mountDevice] = volume
}
// Check to see if this volume is already assigned a device on this machine
for mountDevice, mappingVolumeID := range deviceMappings {
if volumeID == mappingVolumeID {
if assign {
// DescribeInstances shows the volume as attached / detaching, while Kubernetes
// cloud provider thinks it's detached.
// This can happened when the volume has just been detached from the same node
// and AWS API returns stale data in this DescribeInstances ("eventual consistency").
// Fail the attachment and let A/D controller retry in a while, hoping that
// AWS API returns consistent result next time (i.e. the volume is detached).
status := volumeStatus[mappingVolumeID]
klog.Warningf("Got assignment call for already-assigned volume: %s@%s, volume status: %s", mountDevice, mappingVolumeID, status)
return mountDevice, false, fmt.Errorf("volume is still being detached from the node")
}
return mountDevice, true, nil
}
}
if !assign {
return mountDevice(""), false, nil
}
// Find the next unused device name
deviceAllocator := c.deviceAllocators[i.nodeName]
if deviceAllocator == nil {
// we want device names with two significant characters, starting with /dev/xvdbb
// the allowed range is /dev/xvd[b-c][a-z]
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
deviceAllocator = NewDeviceAllocator()
c.deviceAllocators[i.nodeName] = deviceAllocator
}
// We need to lock deviceAllocator to prevent possible race with Deprioritize function
deviceAllocator.Lock()
defer deviceAllocator.Unlock()
chosen, err := deviceAllocator.GetNext(deviceMappings)
if err != nil {
klog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err)
return "", false, fmt.Errorf("too many EBS volumes attached to node %s", i.nodeName)
}
attaching := c.attaching[i.nodeName]
if attaching == nil {
attaching = make(map[mountDevice]EBSVolumeID)
c.attaching[i.nodeName] = attaching
}
attaching[chosen] = volumeID
klog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID)
return chosen, false, nil
}
// endAttaching removes the entry from the "attachments in progress" map
// It returns true if it was found (and removed), false otherwise
func (c *Cloud) endAttaching(i *awsInstance, volumeID EBSVolumeID, mountDevice mountDevice) bool {
c.attachingMutex.Lock()
defer c.attachingMutex.Unlock()
existingVolumeID, found := c.attaching[i.nodeName][mountDevice]
if !found {
return false
}
if volumeID != existingVolumeID {
// This actually can happen, because getMountDevice combines the attaching map with the volumes
// attached to the instance (as reported by the EC2 API). So if endAttaching comes after
// a 10 second poll delay, we might well have had a concurrent request to allocate a mountpoint,
// which because we allocate sequentially is _very_ likely to get the immediately freed volume
klog.Infof("endAttaching on device %q assigned to different volume: %q vs %q", mountDevice, volumeID, existingVolumeID)
return false
}
klog.V(2).Infof("Releasing in-process attachment entry: %s -> volume %s", mountDevice, volumeID)
delete(c.attaching[i.nodeName], mountDevice)
return true
}
type awsDisk struct {
ec2 EC2
// Name in k8s
name KubernetesVolumeID
// id in AWS
awsID EBSVolumeID
}
func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) {
awsID, err := name.MapToAWSVolumeID()
if err != nil {
return nil, err
}
disk := &awsDisk{ec2: aws.ec2, name: name, awsID: awsID}
return disk, nil
}
// Helper function for describeVolume callers. Tries to retype given error to AWS error
// and returns true in case the AWS error is "InvalidVolume.NotFound", false otherwise
func isAWSErrorVolumeNotFound(err error) bool {
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
if awsError.Code() == "InvalidVolume.NotFound" {
return true
}
}
}
return false
}
// Gets the full information about this volume from the EC2 API
func (d *awsDisk) describeVolume() (*ec2.Volume, error) {
volumeID := d.awsID
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{volumeID.awsString()},
}
volumes, err := d.ec2.DescribeVolumes(request)
if err != nil {
return nil, err
}
if len(volumes) == 0 {
return nil, fmt.Errorf("no volumes found")
}
if len(volumes) > 1 {
return nil, fmt.Errorf("multiple volumes found")
}
return volumes[0], nil
}
func (d *awsDisk) describeVolumeModification() (*ec2.VolumeModification, error) {
volumeID := d.awsID
request := &ec2.DescribeVolumesModificationsInput{
VolumeIds: []*string{volumeID.awsString()},
}
volumeMods, err := d.ec2.DescribeVolumeModifications(request)
if err != nil {
return nil, fmt.Errorf("error describing volume modification %s with %v", volumeID, err)
}
if len(volumeMods) == 0 {
return nil, fmt.Errorf("no volume modifications found for %s", volumeID)
}
lastIndex := len(volumeMods) - 1
return volumeMods[lastIndex], nil
}
func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) {
volumeID := d.awsID
request := &ec2.ModifyVolumeInput{
VolumeId: volumeID.awsString(),
Size: aws.Int64(requestGiB),
}
output, err := d.ec2.ModifyVolume(request)
if err != nil {
modifyError := fmt.Errorf("AWS modifyVolume failed for %s with %v", volumeID, err)
return requestGiB, modifyError
}
volumeModification := output.VolumeModification
if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateCompleted {
return aws.Int64Value(volumeModification.TargetSize), nil
}
backoff := wait.Backoff{
Duration: 1 * time.Second,
Factor: 2,
Steps: 10,
}
checkForResize := func() (bool, error) {
volumeModification, err := d.describeVolumeModification()
if err != nil {
return false, err
}
// According to https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring_mods.html
// Size changes usually take a few seconds to complete and take effect after a volume is in the Optimizing state.
if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateOptimizing {
return true, nil
}
return false, nil
}
waitWithErr := wait.ExponentialBackoff(backoff, checkForResize)
return requestGiB, waitWithErr
}
// applyUnSchedulableTaint applies a unschedulable taint to a node after verifying
// if node has become unusable because of volumes getting stuck in attaching state.
func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) {
node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{})
if fetchErr != nil {
klog.Errorf("Error fetching node %s with %v", nodeName, fetchErr)
return
}
taint := &v1.Taint{
Key: nodeWithImpairedVolumes,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
}
err := nodehelpers.AddOrUpdateTaintOnNode(c.kubeClient, string(nodeName), taint)
if err != nil {
klog.Errorf("Error applying taint to node %s with error %v", nodeName, err)
return
}
c.eventRecorder.Eventf(node, v1.EventTypeWarning, volumeAttachmentStuck, reason)
}
// waitForAttachmentStatus polls until the attachment status is the expected value
// On success, it returns the last attachment state.
func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, error) {
backoff := wait.Backoff{
Duration: volumeAttachmentStatusPollDelay,
Factor: volumeAttachmentStatusFactor,
Steps: volumeAttachmentStatusSteps,
}
// Because of rate limiting, we often see errors from describeVolume
// So we tolerate a limited number of failures.
// But once we see more than 10 errors in a row, we return the error
describeErrorCount := 0
var attachment *ec2.VolumeAttachment
count := 0
start := time.Now()
time.Sleep(getInitialAttachDetachDelay(status))
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
count++
info, err := d.describeVolume()
if err != nil {
// The VolumeNotFound error is special -- we don't need to wait for it to repeat
if isAWSErrorVolumeNotFound(err) {
if status == "detached" {
// The disk doesn't exist, assume it's detached, log warning and stop waiting
klog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID)
stateStr := "detached"
attachment = &ec2.VolumeAttachment{
State: &stateStr,
}
return true, nil
}
if status == "attached" {
// The disk doesn't exist, complain, give up waiting and report error
klog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID)
return false, err
}
}
describeErrorCount++
if describeErrorCount > volumeAttachmentStatusConsecutiveErrorLimit {
// report the error
return false, err
}
klog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err)
return false, nil
}
describeErrorCount = 0
if len(info.Attachments) > 1 {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info)
}
attachmentStatus := ""
for _, a := range info.Attachments {
if attachmentStatus != "" {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info)
}
if a.State != nil {
attachment = a
attachmentStatus = *a.State
} else {
// Shouldn't happen; log so we know if it is
klog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a)
}
}
if attachmentStatus == "" {
attachmentStatus = "detached"
}
if attachmentStatus == status {
// Attachment is in requested state, finish waiting
return true, nil
}
// continue waiting
klog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status)
return false, nil
})
end := time.Now()
klog.Infof("waitForAttachmentStatus finished for volume %s %s after %d [%f]", d.awsID, status, count, end.Sub(start).Seconds())
return attachment, err
}
// Deletes the EBS disk
func (d *awsDisk) deleteVolume() (bool, error) {
request := &ec2.DeleteVolumeInput{VolumeId: d.awsID.awsString()}
_, err := d.ec2.DeleteVolume(request)
if err != nil {
if isAWSErrorVolumeNotFound(err) {
return false, nil
}
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "VolumeInUse" {
return false, volerr.NewDeletedVolumeInUseError(err.Error())
}
}
return false, fmt.Errorf("error deleting EBS volume %q: %q", d.awsID, err)
}
return true, nil
}
// Builds the awsInstance for the EC2 instance on which we are running.
// This is called when the AWSCloud is initialized, and should not be called otherwise (because the awsInstance for the local instance is a singleton with drive mapping state)
func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) {
if c.selfAWSInstance != nil {
panic("do not call buildSelfAWSInstance directly")
}
instanceID, err := c.metadata.GetMetadata("instance-id")
if err != nil {
return nil, fmt.Errorf("error fetching instance-id from ec2 metadata service: %q", err)
}
// We want to fetch the hostname via the EC2 metadata service
// (`GetMetadata("local-hostname")`): But see #11543 - we need to use
// the EC2 API to get the privateDnsName in case of a private DNS zone
// e.g. mydomain.io, because the metadata service returns the wrong
// hostname. Once we're doing that, we might as well get all our
// information from the instance returned by the EC2 API - it is a
// single API call to get all the information, and it means we don't
// have two code paths.
instance, err := c.getInstanceByID(instanceID)
if err != nil {
return nil, fmt.Errorf("error finding instance %s: %q", instanceID, err)
}
return newAWSInstance(c.ec2, instance), nil
}
// wrapAttachError wraps the error returned by an AttachVolume request with
// additional information, if needed and possible.
func wrapAttachError(err error, disk *awsDisk, instance string) error {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "VolumeInUse" {
info, err := disk.describeVolume()
if err != nil {
klog.Errorf("Error describing volume %q: %q", disk.awsID, err)
} else {
for _, a := range info.Attachments {
if disk.awsID != EBSVolumeID(aws.StringValue(a.VolumeId)) {
klog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId))
} else if aws.StringValue(a.State) == "attached" {
return fmt.Errorf("error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId))
}
}
}
}
}
return fmt.Errorf("error attaching EBS volume %q to instance %q: %q", disk.awsID, instance, err)
}
// AttachDisk implements Volumes.AttachDisk
func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
disk, err := newAWSDisk(c, diskName)
if err != nil {
return "", err
}
awsInstance, info, err := c.getFullInstance(nodeName)
if err != nil {
return "", fmt.Errorf("error finding instance %s: %q", nodeName, err)
}
// mountDevice will hold the device where we should try to attach the disk
var mountDevice mountDevice
// alreadyAttached is true if we have already called AttachVolume on this disk
var alreadyAttached bool
// attachEnded is set to true if the attach operation completed
// (successfully or not), and is thus no longer in progress
attachEnded := false
defer func() {
if attachEnded {
if !c.endAttaching(awsInstance, disk.awsID, mountDevice) {
klog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID)
}
}
}()
mountDevice, alreadyAttached, err = c.getMountDevice(awsInstance, info, disk.awsID, true)
if err != nil {
return "", err
}
// Inside the instance, the mountpoint always looks like /dev/xvdX (?)
hostDevice := "/dev/xvd" + string(mountDevice)
// We are using xvd names (so we are HVM only)
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
ec2Device := "/dev/xvd" + string(mountDevice)
if !alreadyAttached {
available, err := c.checkIfAvailable(disk, "attaching", awsInstance.awsID)
if err != nil {
klog.Error(err)
}
if !available {
attachEnded = true
return "", err
}
request := &ec2.AttachVolumeInput{
Device: aws.String(ec2Device),
InstanceId: aws.String(awsInstance.awsID),
VolumeId: disk.awsID.awsString(),
}
attachResponse, err := c.ec2.AttachVolume(request)
if err != nil {
attachEnded = true
// TODO: Check if the volume was concurrently attached?
return "", wrapAttachError(err, disk, awsInstance.awsID)
}
if da, ok := c.deviceAllocators[awsInstance.nodeName]; ok {
da.Deprioritize(mountDevice)
}
klog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse)
}
attachment, err := disk.waitForAttachmentStatus("attached")
if err != nil {
if err == wait.ErrWaitTimeout {
c.applyUnSchedulableTaint(nodeName, "Volume stuck in attaching state - node needs reboot to fix impaired state.")
}
return "", err
}
// The attach operation has finished
attachEnded = true
// Double check the attachment to be 100% sure we attached the correct volume at the correct mountpoint
// It could happen otherwise that we see the volume attached from a previous/separate AttachVolume call,
// which could theoretically be against a different device (or even instance).
if attachment == nil {
// Impossible?
return "", fmt.Errorf("unexpected state: attachment nil after attached %q to %q", diskName, nodeName)
}
if ec2Device != aws.StringValue(attachment.Device) {
return "", fmt.Errorf("disk attachment of %q to %q failed: requested device %q but found %q", diskName, nodeName, ec2Device, aws.StringValue(attachment.Device))
}
if awsInstance.awsID != aws.StringValue(attachment.InstanceId) {
return "", fmt.Errorf("disk attachment of %q to %q failed: requested instance %q but found %q", diskName, nodeName, awsInstance.awsID, aws.StringValue(attachment.InstanceId))
}
return hostDevice, nil
}
// DetachDisk implements Volumes.DetachDisk
func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
if err != nil {
if isAWSErrorVolumeNotFound(err) {
// Someone deleted the volume being detached; complain, but do nothing else and return success
klog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName)
return "", nil
}
return "", err
}
if !attached && diskInfo.ec2Instance != nil {
klog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName)
return "", nil
}
if !attached {
return "", nil
}
awsInstance := newAWSInstance(c.ec2, diskInfo.ec2Instance)
mountDevice, alreadyAttached, err := c.getMountDevice(awsInstance, diskInfo.ec2Instance, diskInfo.disk.awsID, false)
if err != nil {
return "", err
}
if !alreadyAttached {
klog.Warningf("DetachDisk called on non-attached disk: %s", diskName)
// TODO: Continue? Tolerate non-attached error from the AWS DetachVolume call?
}
request := ec2.DetachVolumeInput{
InstanceId: &awsInstance.awsID,
VolumeId: diskInfo.disk.awsID.awsString(),
}
response, err := c.ec2.DetachVolume(&request)
if err != nil {
return "", fmt.Errorf("error detaching EBS volume %q from %q: %q", diskInfo.disk.awsID, awsInstance.awsID, err)
}
if response == nil {
return "", errors.New("no response from DetachVolume")
}
attachment, err := diskInfo.disk.waitForAttachmentStatus("detached")
if err != nil {
return "", err
}
if da, ok := c.deviceAllocators[awsInstance.nodeName]; ok {
da.Deprioritize(mountDevice)
}
if attachment != nil {
// We expect it to be nil, it is (maybe) interesting if it is not
klog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment)
}
if mountDevice != "" {
c.endAttaching(awsInstance, diskInfo.disk.awsID, mountDevice)
// We don't check the return value - we don't really expect the attachment to have been
// in progress, though it might have been
}
hostDevicePath := "/dev/xvd" + string(mountDevice)
return hostDevicePath, err
}
// CreateDisk implements Volumes.CreateDisk
func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, error) {
var createType string
var iops int64
switch volumeOptions.VolumeType {
case VolumeTypeGP2, VolumeTypeSC1, VolumeTypeST1:
createType = volumeOptions.VolumeType
case VolumeTypeIO1:
// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html
// for IOPS constraints. AWS will throw an error if IOPS per GB gets out
// of supported bounds, no need to check it here.
createType = volumeOptions.VolumeType
iops = int64(volumeOptions.CapacityGB * volumeOptions.IOPSPerGB)
// Cap at min/max total IOPS, AWS would throw an error if it gets too
// low/high.
if iops < MinTotalIOPS {
iops = MinTotalIOPS
}
if iops > MaxTotalIOPS {
iops = MaxTotalIOPS
}
case "":
createType = DefaultVolumeType
default:
return "", fmt.Errorf("invalid AWS VolumeType %q", volumeOptions.VolumeType)
}
request := &ec2.CreateVolumeInput{}
request.AvailabilityZone = aws.String(volumeOptions.AvailabilityZone)
request.Size = aws.Int64(int64(volumeOptions.CapacityGB))
request.VolumeType = aws.String(createType)
request.Encrypted = aws.Bool(volumeOptions.Encrypted)
if len(volumeOptions.KmsKeyID) > 0 {
request.KmsKeyId = aws.String(volumeOptions.KmsKeyID)
request.Encrypted = aws.Bool(true)
}
if iops > 0 {
request.Iops = aws.Int64(iops)
}
tags := volumeOptions.Tags
tags = c.tagging.buildTags(ResourceLifecycleOwned, tags)
var tagList []*ec2.Tag
for k, v := range tags {
tagList = append(tagList, &ec2.Tag{
Key: aws.String(k), Value: aws.String(v),
})
}
request.TagSpecifications = append(request.TagSpecifications, &ec2.TagSpecification{
Tags: tagList,
ResourceType: aws.String(ec2.ResourceTypeVolume),
})
response, err := c.ec2.CreateVolume(request)
if err != nil {
return "", err
}
awsID := EBSVolumeID(aws.StringValue(response.VolumeId))
if awsID == "" {
return "", fmt.Errorf("VolumeID was not returned by CreateVolume")
}
volumeName := KubernetesVolumeID("aws://" + aws.StringValue(response.AvailabilityZone) + "/" + string(awsID))
err = c.waitUntilVolumeAvailable(volumeName)
if err != nil {
// AWS has a bad habbit of reporting success when creating a volume with
// encryption keys that either don't exists or have wrong permissions.
// Such volume lives for couple of seconds and then it's silently deleted
// by AWS. There is no other check to ensure that given KMS key is correct,
// because Kubernetes may have limited permissions to the key.
if isAWSErrorVolumeNotFound(err) {
err = fmt.Errorf("failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key")
}
return "", err
}
return volumeName, nil
}
func (c *Cloud) waitUntilVolumeAvailable(volumeName KubernetesVolumeID) error {
disk, err := newAWSDisk(c, volumeName)
if err != nil {
// Unreachable code
return err
}
time.Sleep(5 * time.Second)
backoff := wait.Backoff{
Duration: volumeCreateInitialDelay,
Factor: volumeCreateBackoffFactor,
Steps: volumeCreateBackoffSteps,
}
err = wait.ExponentialBackoff(backoff, func() (done bool, err error) {
vol, err := disk.describeVolume()
if err != nil {
return true, err
}
if vol.State != nil {
switch *vol.State {
case "available":
// The volume is Available, it won't be deleted now.
return true, nil
case "creating":
return false, nil
default:
return true, fmt.Errorf("unexpected State of newly created AWS EBS volume %s: %q", volumeName, *vol.State)
}
}
return false, nil
})
return err
}
// DeleteDisk implements Volumes.DeleteDisk
func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return false, err
}
available, err := c.checkIfAvailable(awsDisk, "deleting", "")
if err != nil {
if isAWSErrorVolumeNotFound(err) {
klog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID)
return false, nil
}
klog.Error(err)
}
if !available {
return false, err
}
return awsDisk.deleteVolume()
}
func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) (bool, error) {
info, err := disk.describeVolume()
if err != nil {
klog.Errorf("Error describing volume %q: %q", disk.awsID, err)
// if for some reason we can not describe volume we will return error
return false, err
}
volumeState := aws.StringValue(info.State)
opError := fmt.Sprintf("Error %s EBS volume %q", opName, disk.awsID)
if len(instance) != 0 {
opError = fmt.Sprintf("%q to instance %q", opError, instance)
}
// Only available volumes can be attached or deleted
if volumeState != "available" {
// Volume is attached somewhere else and we can not attach it here
if len(info.Attachments) > 0 {
attachment := info.Attachments[0]
instanceID := aws.StringValue(attachment.InstanceId)
attachedInstance, ierr := c.getInstanceByID(instanceID)
attachErr := fmt.Sprintf("%s since volume is currently attached to %q", opError, instanceID)
if ierr != nil {
klog.Error(attachErr)
return false, errors.New(attachErr)
}
devicePath := aws.StringValue(attachment.Device)
nodeName := mapInstanceToNodeName(attachedInstance)
danglingErr := volerr.NewDanglingError(attachErr, nodeName, devicePath)
return false, danglingErr
}
attachErr := fmt.Errorf("%s since volume is in %q state", opError, volumeState)
return false, attachErr
}
return true, nil
}
// GetLabelsForVolume gets the volume labels for a volume
func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
// Ignore if not AWSElasticBlockStore.
if pv.Spec.AWSElasticBlockStore == nil {
return nil, nil
}
// Ignore any volumes that are being provisioned
if pv.Spec.AWSElasticBlockStore.VolumeID == cloudvolume.ProvisionedVolumeName {
return nil, nil
}
spec := KubernetesVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
labels, err := c.GetVolumeLabels(spec)
if err != nil {
return nil, err
}
return labels, nil
}
// GetVolumeLabels implements Volumes.GetVolumeLabels
func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]string, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return nil, err
}
info, err := awsDisk.describeVolume()
if err != nil {
return nil, err
}
labels := make(map[string]string)
az := aws.StringValue(info.AvailabilityZone)
if az == "" {
return nil, fmt.Errorf("volume did not have AZ information: %q", aws.StringValue(info.VolumeId))
}
labels[v1.LabelZoneFailureDomain] = az
region, err := azToRegion(az)
if err != nil {
return nil, err
}
labels[v1.LabelZoneRegion] = region
return labels, nil
}
// GetDiskPath implements Volumes.GetDiskPath
func (c *Cloud) GetDiskPath(volumeName KubernetesVolumeID) (string, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return "", err
}
info, err := awsDisk.describeVolume()
if err != nil {
return "", err
}
if len(info.Attachments) == 0 {
return "", fmt.Errorf("No attachment to volume %s", volumeName)
}
return aws.StringValue(info.Attachments[0].Device), nil
}
// DiskIsAttached implements Volumes.DiskIsAttached
func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
_, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
if err != nil {
if isAWSErrorVolumeNotFound(err) {
// The disk doesn't exist, can't be attached
klog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName)
return false, nil
}
return true, err
}
return attached, nil
}
// DisksAreAttached returns a map of nodes and Kubernetes volume IDs indicating
// if the volumes are attached to the node
func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error) {
attached := make(map[types.NodeName]map[KubernetesVolumeID]bool)
if len(nodeDisks) == 0 {
return attached, nil
}
nodeNames := []string{}
for nodeName, diskNames := range nodeDisks {
for _, diskName := range diskNames {
setNodeDisk(attached, diskName, nodeName, false)
}
nodeNames = append(nodeNames, mapNodeNameToPrivateDNSName(nodeName))
}
// Note that we get instances regardless of state.
// This means there might be multiple nodes with the same node names.
awsInstances, err := c.getInstancesByNodeNames(nodeNames)
if err != nil {
// When there is an error fetching instance information
// it is safer to return nil and let volume information not be touched.
return nil, err
}
if len(awsInstances) == 0 {
klog.V(2).Infof("DisksAreAttached found no instances matching node names; will assume disks not attached")
return attached, nil
}
// Note that we check that the volume is attached to the correct node, not that it is attached to _a_ node
for _, awsInstance := range awsInstances {
nodeName := mapInstanceToNodeName(awsInstance)
diskNames := nodeDisks[nodeName]
if len(diskNames) == 0 {
continue
}
awsInstanceState := "<nil>"
if awsInstance != nil && awsInstance.State != nil {
awsInstanceState = aws.StringValue(awsInstance.State.Name)
}
if awsInstanceState == "terminated" {
// Instance is terminated, safe to assume volumes not attached
// Note that we keep volumes attached to instances in other states (most notably, stopped)
continue
}
idToDiskName := make(map[EBSVolumeID]KubernetesVolumeID)
for _, diskName := range diskNames {
volumeID, err := diskName.MapToAWSVolumeID()
if err != nil {
return nil, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err)
}
idToDiskName[volumeID] = diskName
}
for _, blockDevice := range awsInstance.BlockDeviceMappings {
volumeID := EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId))
diskName, found := idToDiskName[volumeID]
if found {
// Disk is still attached to node
setNodeDisk(attached, diskName, nodeName, true)
}
}
}
return attached, nil
}
// ResizeDisk resizes an EBS volume in GiB increments, it will round up to the
// next GiB if arguments are not provided in even GiB increments
func (c *Cloud) ResizeDisk(
diskName KubernetesVolumeID,
oldSize resource.Quantity,
newSize resource.Quantity) (resource.Quantity, error) {
awsDisk, err := newAWSDisk(c, diskName)
if err != nil {
return oldSize, err
}
volumeInfo, err := awsDisk.describeVolume()
if err != nil {
descErr := fmt.Errorf("AWS.ResizeDisk Error describing volume %s with %v", diskName, err)
return oldSize, descErr
}
// AWS resizes in chunks of GiB (not GB)
requestGiB := volumehelpers.RoundUpToGiB(newSize)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
// If disk already if of greater or equal size than requested we return
if aws.Int64Value(volumeInfo.Size) >= requestGiB {
return newSizeQuant, nil
}
_, err = awsDisk.modifyVolume(requestGiB)
if err != nil {
return oldSize, err
}
return newSizeQuant, nil
}
// Gets the current load balancer state
func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) {
request := &elb.DescribeLoadBalancersInput{}
request.LoadBalancerNames = []*string{&name}
response, err := c.elb.DescribeLoadBalancers(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "LoadBalancerNotFound" {
return nil, nil
}
}
return nil, err
}
var ret *elb.LoadBalancerDescription
for _, loadBalancer := range response.LoadBalancerDescriptions {
if ret != nil {
klog.Errorf("Found multiple load balancers with name: %s", name)
}
ret = loadBalancer
}
return ret, nil
}
func (c *Cloud) addLoadBalancerTags(loadBalancerName string, requested map[string]string) error {
var tags []*elb.Tag
for k, v := range requested {
tag := &elb.Tag{
Key: aws.String(k),
Value: aws.String(v),
}
tags = append(tags, tag)
}
request := &elb.AddTagsInput{}
request.LoadBalancerNames = []*string{&loadBalancerName}
request.Tags = tags
_, err := c.elb.AddTags(request)
if err != nil {
return fmt.Errorf("error adding tags to load balancer: %v", err)
}
return nil
}
// Gets the current load balancer state
func (c *Cloud) describeLoadBalancerv2(name string) (*elbv2.LoadBalancer, error) {
request := &elbv2.DescribeLoadBalancersInput{
Names: []*string{aws.String(name)},
}
response, err := c.elbv2.DescribeLoadBalancers(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == elbv2.ErrCodeLoadBalancerNotFoundException {
return nil, nil
}
}
return nil, fmt.Errorf("error describing load balancer: %q", err)
}
// AWS will not return 2 load balancers with the same name _and_ type.
for i := range response.LoadBalancers {
if aws.StringValue(response.LoadBalancers[i].Type) == elbv2.LoadBalancerTypeEnumNetwork {
return response.LoadBalancers[i], nil
}
}
return nil, fmt.Errorf("NLB '%s' could not be found", name)
}
// Retrieves instance's vpc id from metadata
func (c *Cloud) findVPCID() (string, error) {
macs, err := c.metadata.GetMetadata("network/interfaces/macs/")
if err != nil {
return "", fmt.Errorf("could not list interfaces of the instance: %q", err)
}
// loop over interfaces, first vpc id returned wins
for _, macPath := range strings.Split(macs, "\n") {
if len(macPath) == 0 {
continue
}
url := fmt.Sprintf("network/interfaces/macs/%svpc-id", macPath)
vpcID, err := c.metadata.GetMetadata(url)
if err != nil {
continue
}
return vpcID, nil
}
return "", fmt.Errorf("could not find VPC ID in instance metadata")
}
// Retrieves the specified security group from the AWS API, or returns nil if not found
func (c *Cloud) findSecurityGroup(securityGroupID string) (*ec2.SecurityGroup, error) {
describeSecurityGroupsRequest := &ec2.DescribeSecurityGroupsInput{
GroupIds: []*string{&securityGroupID},
}
// We don't apply our tag filters because we are retrieving by ID
groups, err := c.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest)
if err != nil {
klog.Warningf("Error retrieving security group: %q", err)
return nil, err
}
if len(groups) == 0 {
return nil, nil
}
if len(groups) != 1 {
// This should not be possible - ids should be unique
return nil, fmt.Errorf("multiple security groups found with same id %q", securityGroupID)
}
group := groups[0]
return group, nil
}
func isEqualIntPointer(l, r *int64) bool {
if l == nil {
return r == nil
}
if r == nil {
return l == nil
}
return *l == *r
}
func isEqualStringPointer(l, r *string) bool {
if l == nil {
return r == nil
}
if r == nil {
return l == nil
}
return *l == *r
}
func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupUserIDs bool) bool {
if !isEqualIntPointer(newPermission.FromPort, existing.FromPort) {
return false
}
if !isEqualIntPointer(newPermission.ToPort, existing.ToPort) {
return false
}
if !isEqualStringPointer(newPermission.IpProtocol, existing.IpProtocol) {
return false
}
// Check only if newPermission is a subset of existing. Usually it has zero or one elements.
// Not doing actual CIDR math yet; not clear it's needed, either.
klog.V(4).Infof("Comparing %v to %v", newPermission, existing)
if len(newPermission.IpRanges) > len(existing.IpRanges) {
return false
}
for j := range newPermission.IpRanges {
found := false
for k := range existing.IpRanges {
if isEqualStringPointer(newPermission.IpRanges[j].CidrIp, existing.IpRanges[k].CidrIp) {
found = true
break
}
}
if !found {
return false
}
}
for _, leftPair := range newPermission.UserIdGroupPairs {
found := false
for _, rightPair := range existing.UserIdGroupPairs {
if isEqualUserGroupPair(leftPair, rightPair, compareGroupUserIDs) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) bool {
klog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId)
if isEqualStringPointer(l.GroupId, r.GroupId) {
if compareGroupUserIDs {
if isEqualStringPointer(l.UserId, r.UserId) {
return true
}
} else {
return true
}
}
return false
}
// Makes sure the security group ingress is exactly the specified permissions
// Returns true if and only if changes were made
// The security group must already exist
func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPermissionSet) (bool, error) {
// We do not want to make changes to the Global defined SG
if securityGroupID == c.cfg.Global.ElbSecurityGroup {
return false, nil
}
group, err := c.findSecurityGroup(securityGroupID)
if err != nil {
klog.Warningf("Error retrieving security group %q", err)
return false, err
}
if group == nil {
return false, fmt.Errorf("security group not found: %s", securityGroupID)
}
klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions)
actual := NewIPPermissionSet(group.IpPermissions...)
// EC2 groups rules together, for example combining:
//
// { Port=80, Range=[A] } and { Port=80, Range=[B] }
//
// into { Port=80, Range=[A,B] }
//
// We have to ungroup them, because otherwise the logic becomes really
// complicated, and also because if we have Range=[A,B] and we try to
// add Range=[A] then EC2 complains about a duplicate rule.
permissions = permissions.Ungroup()
actual = actual.Ungroup()
remove := actual.Difference(permissions)
add := permissions.Difference(actual)
if add.Len() == 0 && remove.Len() == 0 {
return false, nil
}
// TODO: There is a limit in VPC of 100 rules per security group, so we
// probably should try grouping or combining to fit under this limit.
// But this is only used on the ELB security group currently, so it
// would require (ports * CIDRS) > 100. Also, it isn't obvious exactly
// how removing single permissions from compound rules works, and we
// don't want to accidentally open more than intended while we're
// applying changes.
if add.Len() != 0 {
klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List())
request := &ec2.AuthorizeSecurityGroupIngressInput{}
request.GroupId = &securityGroupID
request.IpPermissions = add.List()
_, err = c.ec2.AuthorizeSecurityGroupIngress(request)
if err != nil {
return false, fmt.Errorf("error authorizing security group ingress: %q", err)
}
}
if remove.Len() != 0 {
klog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List())
request := &ec2.RevokeSecurityGroupIngressInput{}
request.GroupId = &securityGroupID
request.IpPermissions = remove.List()
_, err = c.ec2.RevokeSecurityGroupIngress(request)
if err != nil {
return false, fmt.Errorf("error revoking security group ingress: %q", err)
}
}
return true, nil
}
// Makes sure the security group includes the specified permissions
// Returns true if and only if changes were made
// The security group must already exist
func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions []*ec2.IpPermission) (bool, error) {
// We do not want to make changes to the Global defined SG
if securityGroupID == c.cfg.Global.ElbSecurityGroup {
return false, nil
}
group, err := c.findSecurityGroup(securityGroupID)
if err != nil {
klog.Warningf("Error retrieving security group: %q", err)
return false, err
}
if group == nil {
return false, fmt.Errorf("security group not found: %s", securityGroupID)
}
klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions)
changes := []*ec2.IpPermission{}
for _, addPermission := range addPermissions {
hasUserID := false
for i := range addPermission.UserIdGroupPairs {
if addPermission.UserIdGroupPairs[i].UserId != nil {
hasUserID = true
}
}
found := false
for _, groupPermission := range group.IpPermissions {
if ipPermissionExists(addPermission, groupPermission, hasUserID) {
found = true
break
}
}
if !found {
changes = append(changes, addPermission)
}
}
if len(changes) == 0 {
return false, nil
}
klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes)
request := &ec2.AuthorizeSecurityGroupIngressInput{}
request.GroupId = &securityGroupID
request.IpPermissions = changes
_, err = c.ec2.AuthorizeSecurityGroupIngress(request)
if err != nil {
klog.Warningf("Error authorizing security group ingress %q", err)
return false, fmt.Errorf("error authorizing security group ingress: %q", err)
}
return true, nil
}
// Makes sure the security group no longer includes the specified permissions
// Returns true if and only if changes were made
// If the security group no longer exists, will return (false, nil)
func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermissions []*ec2.IpPermission) (bool, error) {
// We do not want to make changes to the Global defined SG
if securityGroupID == c.cfg.Global.ElbSecurityGroup {
return false, nil
}
group, err := c.findSecurityGroup(securityGroupID)
if err != nil {
klog.Warningf("Error retrieving security group: %q", err)
return false, err
}
if group == nil {
klog.Warning("Security group not found: ", securityGroupID)
return false, nil
}
changes := []*ec2.IpPermission{}
for _, removePermission := range removePermissions {
hasUserID := false
for i := range removePermission.UserIdGroupPairs {
if removePermission.UserIdGroupPairs[i].UserId != nil {
hasUserID = true
}
}
var found *ec2.IpPermission
for _, groupPermission := range group.IpPermissions {
if ipPermissionExists(removePermission, groupPermission, hasUserID) {
found = removePermission
break
}
}
if found != nil {
changes = append(changes, found)
}
}
if len(changes) == 0 {
return false, nil
}
klog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes)
request := &ec2.RevokeSecurityGroupIngressInput{}
request.GroupId = &securityGroupID
request.IpPermissions = changes
_, err = c.ec2.RevokeSecurityGroupIngress(request)
if err != nil {
klog.Warningf("Error revoking security group ingress: %q", err)
return false, err
}
return true, nil
}
// Makes sure the security group exists.
// For multi-cluster isolation, name must be globally unique, for example derived from the service UUID.
// Additional tags can be specified
// Returns the security group id or error
func (c *Cloud) ensureSecurityGroup(name string, description string, additionalTags map[string]string) (string, error) {
groupID := ""
attempt := 0
for {
attempt++
// Note that we do _not_ add our tag filters; group-name + vpc-id is the EC2 primary key.
// However, we do check that it matches our tags.
// If it doesn't have any tags, we tag it; this is how we recover if we failed to tag before.
// If it has a different cluster's tags, that is an error.
// This shouldn't happen because name is expected to be globally unique (UUID derived)
request := &ec2.DescribeSecurityGroupsInput{}
request.Filters = []*ec2.Filter{
newEc2Filter("group-name", name),
newEc2Filter("vpc-id", c.vpcID),
}
securityGroups, err := c.ec2.DescribeSecurityGroups(request)
if err != nil {
return "", err
}
if len(securityGroups) >= 1 {
if len(securityGroups) > 1 {
klog.Warningf("Found multiple security groups with name: %q", name)
}
err := c.tagging.readRepairClusterTags(
c.ec2, aws.StringValue(securityGroups[0].GroupId),
ResourceLifecycleOwned, nil, securityGroups[0].Tags)
if err != nil {
return "", err
}
return aws.StringValue(securityGroups[0].GroupId), nil
}
createRequest := &ec2.CreateSecurityGroupInput{}
createRequest.VpcId = &c.vpcID
createRequest.GroupName = &name
createRequest.Description = &description
createResponse, err := c.ec2.CreateSecurityGroup(createRequest)
if err != nil {
ignore := false
switch err := err.(type) {
case awserr.Error:
if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries {
klog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry")
ignore = true
}
}
if !ignore {
klog.Errorf("Error creating security group: %q", err)
return "", err
}
time.Sleep(1 * time.Second)
} else {
groupID = aws.StringValue(createResponse.GroupId)
break
}
}
if groupID == "" {
return "", fmt.Errorf("created security group, but id was not returned: %s", name)
}
err := c.tagging.createTags(c.ec2, groupID, ResourceLifecycleOwned, additionalTags)
if err != nil {
// If we retry, ensureClusterTags will recover from this - it
// will add the missing tags. We could delete the security
// group here, but that doesn't feel like the right thing, as
// the caller is likely to retry the create
return "", fmt.Errorf("error tagging security group: %q", err)
}
return groupID, nil
}
// Finds the value for a given tag.
func findTag(tags []*ec2.Tag, key string) (string, bool) {
for _, tag := range tags {
if aws.StringValue(tag.Key) == key {
return aws.StringValue(tag.Value), true
}
}
return "", false
}
// Finds the subnets associated with the cluster, by matching tags.
// For maximal backwards compatibility, if no subnets are tagged, it will fall-back to the current subnet.
// However, in future this will likely be treated as an error.
func (c *Cloud) findSubnets() ([]*ec2.Subnet, error) {
request := &ec2.DescribeSubnetsInput{}
request.Filters = []*ec2.Filter{newEc2Filter("vpc-id", c.vpcID)}
subnets, err := c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error describing subnets: %q", err)
}
var matches []*ec2.Subnet
for _, subnet := range subnets {
if c.tagging.hasClusterTag(subnet.Tags) {
matches = append(matches, subnet)
}
}
if len(matches) != 0 {
return matches, nil
}
// Fall back to the current instance subnets, if nothing is tagged
klog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.")
request = &ec2.DescribeSubnetsInput{}
request.Filters = []*ec2.Filter{newEc2Filter("subnet-id", c.selfAWSInstance.subnetID)}
subnets, err = c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error describing subnets: %q", err)
}
return subnets, nil
}
// Finds the subnets to use for an ELB we are creating.
// Normal (Internet-facing) ELBs must use public subnets, so we skip private subnets.
// Internal ELBs can use public or private subnets, but if we have a private subnet we should prefer that.
func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) {
vpcIDFilter := newEc2Filter("vpc-id", c.vpcID)
subnets, err := c.findSubnets()
if err != nil {
return nil, err
}
rRequest := &ec2.DescribeRouteTablesInput{}
rRequest.Filters = []*ec2.Filter{vpcIDFilter}
rt, err := c.ec2.DescribeRouteTables(rRequest)
if err != nil {
return nil, fmt.Errorf("error describe route table: %q", err)
}
subnetsByAZ := make(map[string]*ec2.Subnet)
for _, subnet := range subnets {
az := aws.StringValue(subnet.AvailabilityZone)
id := aws.StringValue(subnet.SubnetId)
if az == "" || id == "" {
klog.Warningf("Ignoring subnet with empty az/id: %v", subnet)
continue
}
isPublic, err := isSubnetPublic(rt, id)
if err != nil {
return nil, err
}
if !internalELB && !isPublic {
klog.V(2).Infof("Ignoring private subnet for public ELB %q", id)
continue
}
existing := subnetsByAZ[az]
if existing == nil {
subnetsByAZ[az] = subnet
continue
}
// Try to break the tie using a tag
var tagName string
if internalELB {
tagName = TagNameSubnetInternalELB
} else {
tagName = TagNameSubnetPublicELB
}
_, existingHasTag := findTag(existing.Tags, tagName)
_, subnetHasTag := findTag(subnet.Tags, tagName)
if existingHasTag != subnetHasTag {
if subnetHasTag {
subnetsByAZ[az] = subnet
}
continue
}
// If we have two subnets for the same AZ we arbitrarily choose the one that is first lexicographically.
// TODO: Should this be an error.
if strings.Compare(*existing.SubnetId, *subnet.SubnetId) > 0 {
klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *subnet.SubnetId, *existing.SubnetId, *subnet.SubnetId)
subnetsByAZ[az] = subnet
continue
}
klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *existing.SubnetId, *existing.SubnetId, *subnet.SubnetId)
continue
}
var azNames []string
for key := range subnetsByAZ {
azNames = append(azNames, key)
}
sort.Strings(azNames)
var subnetIDs []string
for _, key := range azNames {
subnetIDs = append(subnetIDs, aws.StringValue(subnetsByAZ[key].SubnetId))
}
return subnetIDs, nil
}
func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) {
var subnetTable *ec2.RouteTable
for _, table := range rt {
for _, assoc := range table.Associations {
if aws.StringValue(assoc.SubnetId) == subnetID {
subnetTable = table
break
}
}
}
if subnetTable == nil {
// If there is no explicit association, the subnet will be implicitly
// associated with the VPC's main routing table.
for _, table := range rt {
for _, assoc := range table.Associations {
if aws.BoolValue(assoc.Main) == true {
klog.V(4).Infof("Assuming implicit use of main routing table %s for %s",
aws.StringValue(table.RouteTableId), subnetID)
subnetTable = table
break
}
}
}
}
if subnetTable == nil {
return false, fmt.Errorf("could not locate routing table for subnet %s", subnetID)
}
for _, route := range subnetTable.Routes {
// There is no direct way in the AWS API to determine if a subnet is public or private.
// A public subnet is one which has an internet gateway route
// we look for the gatewayId and make sure it has the prefix of igw to differentiate
// from the default in-subnet route which is called "local"
// or other virtual gateway (starting with vgv)
// or vpc peering connections (starting with pcx).
if strings.HasPrefix(aws.StringValue(route.GatewayId), "igw") {
return true, nil
}
}
return false, nil
}
type portSets struct {
names sets.String
numbers sets.Int64
}
// getPortSets returns a portSets structure representing port names and numbers
// that the comma-separated string describes. If the input is empty or equal to
// "*", a nil pointer is returned.
func getPortSets(annotation string) (ports *portSets) {
if annotation != "" && annotation != "*" {
ports = &portSets{
sets.NewString(),
sets.NewInt64(),
}
portStringSlice := strings.Split(annotation, ",")
for _, item := range portStringSlice {
port, err := strconv.Atoi(item)
if err != nil {
ports.names.Insert(item)
} else {
ports.numbers.Insert(int64(port))
}
}
}
return
}
// buildELBSecurityGroupList returns list of SecurityGroups which should be
// attached to ELB created by a service. List always consist of at least
// 1 member which is an SG created for this service or a SG from the Global config.
// Extra groups can be specified via annotation, as can extra tags for any
// new groups. The annotation "ServiceAnnotationLoadBalancerSecurityGroups" allows for
// setting the security groups specified.
func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, loadBalancerName string, annotations map[string]string) ([]string, error) {
var err error
var securityGroupID string
if c.cfg.Global.ElbSecurityGroup != "" {
securityGroupID = c.cfg.Global.ElbSecurityGroup
} else {
// Create a security group for the load balancer
sgName := "k8s-elb-" + loadBalancerName
sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName)
securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription, getLoadBalancerAdditionalTags(annotations))
if err != nil {
klog.Errorf("Error creating load balancer security group: %q", err)
return nil, err
}
}
sgList := []string{}
for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerSecurityGroups], ",") {
extraSG = strings.TrimSpace(extraSG)
if len(extraSG) > 0 {
sgList = append(sgList, extraSG)
}
}
// If no Security Groups have been specified with the ServiceAnnotationLoadBalancerSecurityGroups annotation, we add the default one.
if len(sgList) == 0 {
sgList = append(sgList, securityGroupID)
}
for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups], ",") {
extraSG = strings.TrimSpace(extraSG)
if len(extraSG) > 0 {
sgList = append(sgList, extraSG)
}
}
return sgList, nil
}
// buildListener creates a new listener from the given port, adding an SSL certificate
// if indicated by the appropriate annotations.
func buildListener(port v1.ServicePort, annotations map[string]string, sslPorts *portSets) (*elb.Listener, error) {
loadBalancerPort := int64(port.Port)
portName := strings.ToLower(port.Name)
instancePort := int64(port.NodePort)
protocol := strings.ToLower(string(port.Protocol))
instanceProtocol := protocol
listener := &elb.Listener{}
listener.InstancePort = &instancePort
listener.LoadBalancerPort = &loadBalancerPort
certID := annotations[ServiceAnnotationLoadBalancerCertificate]
if certID != "" && (sslPorts == nil || sslPorts.numbers.Has(loadBalancerPort) || sslPorts.names.Has(portName)) {
instanceProtocol = annotations[ServiceAnnotationLoadBalancerBEProtocol]
if instanceProtocol == "" {
protocol = "ssl"
instanceProtocol = "tcp"
} else {
protocol = backendProtocolMapping[instanceProtocol]
if protocol == "" {
return nil, fmt.Errorf("Invalid backend protocol %s for %s in %s", instanceProtocol, certID, ServiceAnnotationLoadBalancerBEProtocol)
}
}
listener.SSLCertificateId = &certID
} else if annotationProtocol := annotations[ServiceAnnotationLoadBalancerBEProtocol]; annotationProtocol == "http" {
instanceProtocol = annotationProtocol
protocol = "http"
}
listener.Protocol = &protocol
listener.InstanceProtocol = &instanceProtocol
return listener, nil
}
// EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer
func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
annotations := apiService.Annotations
klog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)",
clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, annotations)
if apiService.Spec.SessionAffinity != v1.ServiceAffinityNone {
// ELB supports sticky sessions, but only when configured for HTTP/HTTPS
return nil, fmt.Errorf("unsupported load balancer affinity: %v", apiService.Spec.SessionAffinity)
}
if len(apiService.Spec.Ports) == 0 {
return nil, fmt.Errorf("requested load balancer with no ports")
}
// Figure out what mappings we want on the load balancer
listeners := []*elb.Listener{}
v2Mappings := []nlbPortMapping{}
sslPorts := getPortSets(annotations[ServiceAnnotationLoadBalancerSSLPorts])
for _, port := range apiService.Spec.Ports {
if port.Protocol != v1.ProtocolTCP {
return nil, fmt.Errorf("Only TCP LoadBalancer is supported for AWS ELB")
}
if port.NodePort == 0 {
klog.Errorf("Ignoring port without NodePort defined: %v", port)
continue
}
if isNLB(annotations) {
portMapping := nlbPortMapping{
FrontendPort: int64(port.Port),
FrontendProtocol: string(port.Protocol),
TrafficPort: int64(port.NodePort),
TrafficProtocol: string(port.Protocol),
// if externalTrafficPolicy == "Local", we'll override the
// health check later
HealthCheckPort: int64(port.NodePort),
HealthCheckProtocol: elbv2.ProtocolEnumTcp,
}
certificateARN := annotations[ServiceAnnotationLoadBalancerCertificate]
if certificateARN != "" && (sslPorts == nil || sslPorts.numbers.Has(int64(port.Port)) || sslPorts.names.Has(port.Name)) {
portMapping.FrontendProtocol = elbv2.ProtocolEnumTls
portMapping.SSLCertificateARN = certificateARN
portMapping.SSLPolicy = annotations[ServiceAnnotationLoadBalancerSSLNegotiationPolicy]
if backendProtocol := annotations[ServiceAnnotationLoadBalancerBEProtocol]; backendProtocol == "ssl" {
portMapping.TrafficProtocol = elbv2.ProtocolEnumTls
}
}
v2Mappings = append(v2Mappings, portMapping)
}
listener, err := buildListener(port, annotations, sslPorts)
if err != nil {
return nil, err
}
listeners = append(listeners, listener)
}
if apiService.Spec.LoadBalancerIP != "" {
return nil, fmt.Errorf("LoadBalancerIP cannot be specified for AWS ELB")
}
instances, err := c.findInstancesForELB(nodes)
if err != nil {
return nil, err
}
sourceRanges, err := servicehelpers.GetLoadBalancerSourceRanges(apiService)
if err != nil {
return nil, err
}
// Determine if this is tagged as an Internal ELB
internalELB := false
internalAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerInternal]
if internalAnnotation == "false" {
internalELB = false
} else if internalAnnotation != "" {
internalELB = true
}
if isNLB(annotations) {
if path, healthCheckNodePort := servicehelpers.GetServiceHealthCheckPathPort(apiService); path != "" {
for i := range v2Mappings {
v2Mappings[i].HealthCheckPort = int64(healthCheckNodePort)
v2Mappings[i].HealthCheckPath = path
v2Mappings[i].HealthCheckProtocol = elbv2.ProtocolEnumHttp
}
}
// Find the subnets that the ELB will live in
subnetIDs, err := c.findELBSubnets(internalELB)
if err != nil {
klog.Errorf("Error listing subnets in VPC: %q", err)
return nil, err
}
// Bail out early if there are no subnets
if len(subnetIDs) == 0 {
return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB")
}
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, apiService)
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
instanceIDs := []string{}
for id := range instances {
instanceIDs = append(instanceIDs, string(id))
}
v2LoadBalancer, err := c.ensureLoadBalancerv2(
serviceName,
loadBalancerName,
v2Mappings,
instanceIDs,
subnetIDs,
internalELB,
annotations,
)
if err != nil {
return nil, err
}
sourceRangeCidrs := []string{}
for cidr := range sourceRanges {
sourceRangeCidrs = append(sourceRangeCidrs, cidr)
}
if len(sourceRangeCidrs) == 0 {
sourceRangeCidrs = append(sourceRangeCidrs, "0.0.0.0/0")
}
err = c.updateInstanceSecurityGroupsForNLB(loadBalancerName, instances, sourceRangeCidrs, v2Mappings)
if err != nil {
klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err)
return nil, err
}
// We don't have an `ensureLoadBalancerInstances()` function for elbv2
// because `ensureLoadBalancerv2()` requires instance Ids
// TODO: Wait for creation?
return v2toStatus(v2LoadBalancer), nil
}
// Determine if we need to set the Proxy protocol policy
proxyProtocol := false
proxyProtocolAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerProxyProtocol]
if proxyProtocolAnnotation != "" {
if proxyProtocolAnnotation != "*" {
return nil, fmt.Errorf("annotation %q=%q detected, but the only value supported currently is '*'", ServiceAnnotationLoadBalancerProxyProtocol, proxyProtocolAnnotation)
}
proxyProtocol = true
}
// Some load balancer attributes are required, so defaults are set. These can be overridden by annotations.
loadBalancerAttributes := &elb.LoadBalancerAttributes{
AccessLog: &elb.AccessLog{Enabled: aws.Bool(false)},
ConnectionDraining: &elb.ConnectionDraining{Enabled: aws.Bool(false)},
ConnectionSettings: &elb.ConnectionSettings{IdleTimeout: aws.Int64(60)},
CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{Enabled: aws.Bool(false)},
}
// Determine if an access log emit interval has been specified
accessLogEmitIntervalAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogEmitInterval]
if accessLogEmitIntervalAnnotation != "" {
accessLogEmitInterval, err := strconv.ParseInt(accessLogEmitIntervalAnnotation, 10, 64)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerAccessLogEmitInterval,
accessLogEmitIntervalAnnotation,
)
}
loadBalancerAttributes.AccessLog.EmitInterval = &accessLogEmitInterval
}
// Determine if access log enabled/disabled has been specified
accessLogEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogEnabled]
if accessLogEnabledAnnotation != "" {
accessLogEnabled, err := strconv.ParseBool(accessLogEnabledAnnotation)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerAccessLogEnabled,
accessLogEnabledAnnotation,
)
}
loadBalancerAttributes.AccessLog.Enabled = &accessLogEnabled
}
// Determine if access log s3 bucket name has been specified
accessLogS3BucketNameAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogS3BucketName]
if accessLogS3BucketNameAnnotation != "" {
loadBalancerAttributes.AccessLog.S3BucketName = &accessLogS3BucketNameAnnotation
}
// Determine if access log s3 bucket prefix has been specified
accessLogS3BucketPrefixAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogS3BucketPrefix]
if accessLogS3BucketPrefixAnnotation != "" {
loadBalancerAttributes.AccessLog.S3BucketPrefix = &accessLogS3BucketPrefixAnnotation
}
// Determine if connection draining enabled/disabled has been specified
connectionDrainingEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionDrainingEnabled]
if connectionDrainingEnabledAnnotation != "" {
connectionDrainingEnabled, err := strconv.ParseBool(connectionDrainingEnabledAnnotation)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerConnectionDrainingEnabled,
connectionDrainingEnabledAnnotation,
)
}
loadBalancerAttributes.ConnectionDraining.Enabled = &connectionDrainingEnabled
}
// Determine if connection draining timeout has been specified
connectionDrainingTimeoutAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionDrainingTimeout]
if connectionDrainingTimeoutAnnotation != "" {
connectionDrainingTimeout, err := strconv.ParseInt(connectionDrainingTimeoutAnnotation, 10, 64)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerConnectionDrainingTimeout,
connectionDrainingTimeoutAnnotation,
)
}
loadBalancerAttributes.ConnectionDraining.Timeout = &connectionDrainingTimeout
}
// Determine if connection idle timeout has been specified
connectionIdleTimeoutAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionIdleTimeout]
if connectionIdleTimeoutAnnotation != "" {
connectionIdleTimeout, err := strconv.ParseInt(connectionIdleTimeoutAnnotation, 10, 64)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerConnectionIdleTimeout,
connectionIdleTimeoutAnnotation,
)
}
loadBalancerAttributes.ConnectionSettings.IdleTimeout = &connectionIdleTimeout
}
// Determine if cross zone load balancing enabled/disabled has been specified
crossZoneLoadBalancingEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled]
if crossZoneLoadBalancingEnabledAnnotation != "" {
crossZoneLoadBalancingEnabled, err := strconv.ParseBool(crossZoneLoadBalancingEnabledAnnotation)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled,
crossZoneLoadBalancingEnabledAnnotation,
)
}
loadBalancerAttributes.CrossZoneLoadBalancing.Enabled = &crossZoneLoadBalancingEnabled
}
// Find the subnets that the ELB will live in
subnetIDs, err := c.findELBSubnets(internalELB)
if err != nil {
klog.Errorf("Error listing subnets in VPC: %q", err)
return nil, err
}
// Bail out early if there are no subnets
if len(subnetIDs) == 0 {
return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB")
}
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, apiService)
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
securityGroupIDs, err := c.buildELBSecurityGroupList(serviceName, loadBalancerName, annotations)
if err != nil {
return nil, err
}
if len(securityGroupIDs) == 0 {
return nil, fmt.Errorf("[BUG] ELB can't have empty list of Security Groups to be assigned, this is a Kubernetes bug, please report")
}
{
ec2SourceRanges := []*ec2.IpRange{}
for _, sourceRange := range sourceRanges.StringSlice() {
ec2SourceRanges = append(ec2SourceRanges, &ec2.IpRange{CidrIp: aws.String(sourceRange)})
}
permissions := NewIPPermissionSet()
for _, port := range apiService.Spec.Ports {
portInt64 := int64(port.Port)
protocol := strings.ToLower(string(port.Protocol))
permission := &ec2.IpPermission{}
permission.FromPort = &portInt64
permission.ToPort = &portInt64
permission.IpRanges = ec2SourceRanges
permission.IpProtocol = &protocol
permissions.Insert(permission)
}
// Allow ICMP fragmentation packets, important for MTU discovery
{
permission := &ec2.IpPermission{
IpProtocol: aws.String("icmp"),
FromPort: aws.Int64(3),
ToPort: aws.Int64(4),
IpRanges: ec2SourceRanges,
}
permissions.Insert(permission)
}
_, err = c.setSecurityGroupIngress(securityGroupIDs[0], permissions)
if err != nil {
return nil, err
}
}
// Build the load balancer itself
loadBalancer, err := c.ensureLoadBalancer(
serviceName,
loadBalancerName,
listeners,
subnetIDs,
securityGroupIDs,
internalELB,
proxyProtocol,
loadBalancerAttributes,
annotations,
)
if err != nil {
return nil, err
}
if sslPolicyName, ok := annotations[ServiceAnnotationLoadBalancerSSLNegotiationPolicy]; ok {
err := c.ensureSSLNegotiationPolicy(loadBalancer, sslPolicyName)
if err != nil {
return nil, err
}
for _, port := range c.getLoadBalancerTLSPorts(loadBalancer) {
err := c.setSSLNegotiationPolicy(loadBalancerName, sslPolicyName, port)
if err != nil {
return nil, err
}
}
}
if path, healthCheckNodePort := servicehelpers.GetServiceHealthCheckPathPort(apiService); path != "" {
klog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path)
err = c.ensureLoadBalancerHealthCheck(loadBalancer, "HTTP", healthCheckNodePort, path, annotations)
if err != nil {
return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %q", loadBalancerName, healthCheckNodePort, err)
}
} else {
klog.V(4).Infof("service %v does not need custom health checks", apiService.Name)
// We only configure a TCP health-check on the first port
var tcpHealthCheckPort int32
for _, listener := range listeners {
if listener.InstancePort == nil {
continue
}
tcpHealthCheckPort = int32(*listener.InstancePort)
break
}
annotationProtocol := strings.ToLower(annotations[ServiceAnnotationLoadBalancerBEProtocol])
var hcProtocol string
if annotationProtocol == "https" || annotationProtocol == "ssl" {
hcProtocol = "SSL"
} else {
hcProtocol = "TCP"
}
// there must be no path on TCP health check
err = c.ensureLoadBalancerHealthCheck(loadBalancer, hcProtocol, tcpHealthCheckPort, "", annotations)
if err != nil {
return nil, err
}
}
err = c.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances)
if err != nil {
klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err)
return nil, err
}
err = c.ensureLoadBalancerInstances(aws.StringValue(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances)
if err != nil {
klog.Warningf("Error registering instances with the load balancer: %q", err)
return nil, err
}
klog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, aws.StringValue(loadBalancer.DNSName))
// TODO: Wait for creation?
status := toStatus(loadBalancer)
return status, nil
}
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
func (c *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
if err != nil {
return nil, false, err
}
if lb == nil {
return nil, false, nil
}
return v2toStatus(lb), true, nil
}
lb, err := c.describeLoadBalancer(loadBalancerName)
if err != nil {
return nil, false, err
}
if lb == nil {
return nil, false, nil
}
status := toStatus(lb)
return status, true, nil
}
// GetLoadBalancerName is an implementation of LoadBalancer.GetLoadBalancerName
func (c *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
// TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names.
return cloudprovider.DefaultLoadBalancerName(service)
}
func toStatus(lb *elb.LoadBalancerDescription) *v1.LoadBalancerStatus {
status := &v1.LoadBalancerStatus{}
if aws.StringValue(lb.DNSName) != "" {
var ingress v1.LoadBalancerIngress
ingress.Hostname = aws.StringValue(lb.DNSName)
status.Ingress = []v1.LoadBalancerIngress{ingress}
}
return status
}
func v2toStatus(lb *elbv2.LoadBalancer) *v1.LoadBalancerStatus {
status := &v1.LoadBalancerStatus{}
if lb == nil {
klog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report")
return status
}
// We check for Active or Provisioning, the only successful statuses
if aws.StringValue(lb.DNSName) != "" && (aws.StringValue(lb.State.Code) == elbv2.LoadBalancerStateEnumActive ||
aws.StringValue(lb.State.Code) == elbv2.LoadBalancerStateEnumProvisioning) {
var ingress v1.LoadBalancerIngress
ingress.Hostname = aws.StringValue(lb.DNSName)
status.Ingress = []v1.LoadBalancerIngress{ingress}
}
return status
}
// Returns the first security group for an instance, or nil
// We only create instances with one security group, so we don't expect multiple security groups.
// However, if there are multiple security groups, we will choose the one tagged with our cluster filter.
// Otherwise we will return an error.
func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups map[string]*ec2.SecurityGroup) (*ec2.GroupIdentifier, error) {
instanceID := aws.StringValue(instance.InstanceId)
var tagged []*ec2.GroupIdentifier
var untagged []*ec2.GroupIdentifier
for _, group := range instance.SecurityGroups {
groupID := aws.StringValue(group.GroupId)
if groupID == "" {
klog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group)
continue
}
_, isTagged := taggedSecurityGroups[groupID]
if isTagged {
tagged = append(tagged, group)
} else {
untagged = append(untagged, group)
}
}
if len(tagged) > 0 {
// We create instances with one SG
// If users create multiple SGs, they must tag one of them as being k8s owned
if len(tagged) != 1 {
taggedGroups := ""
for _, v := range tagged {
taggedGroups += fmt.Sprintf("%s(%s) ", *v.GroupId, *v.GroupName)
}
return nil, fmt.Errorf("Multiple tagged security groups found for instance %s; ensure only the k8s security group is tagged; the tagged groups were %v", instanceID, taggedGroups)
}
return tagged[0], nil
}
if len(untagged) > 0 {
// For back-compat, we will allow a single untagged SG
if len(untagged) != 1 {
return nil, fmt.Errorf("Multiple untagged security groups found for instance %s; ensure the k8s security group is tagged", instanceID)
}
return untagged[0], nil
}
klog.Warningf("No security group found for instance %q", instanceID)
return nil, nil
}
// Return all the security groups that are tagged as being part of our cluster
func (c *Cloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) {
request := &ec2.DescribeSecurityGroupsInput{}
groups, err := c.ec2.DescribeSecurityGroups(request)
if err != nil {
return nil, fmt.Errorf("error querying security groups: %q", err)
}
m := make(map[string]*ec2.SecurityGroup)
for _, group := range groups {
if !c.tagging.hasClusterTag(group.Tags) {
continue
}
id := aws.StringValue(group.GroupId)
if id == "" {
klog.Warningf("Ignoring group without id: %v", group)
continue
}
m[id] = group
}
return m, nil
}
// Open security group ingress rules on the instances so that the load balancer can talk to them
// Will also remove any security groups ingress rules for the load balancer that are _not_ needed for allInstances
func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancerDescription, instances map[InstanceID]*ec2.Instance) error {
if c.cfg.Global.DisableSecurityGroupIngress {
return nil
}
// Determine the load balancer security group id
loadBalancerSecurityGroupID := ""
for _, securityGroup := range lb.SecurityGroups {
if aws.StringValue(securityGroup) == "" {
continue
}
if loadBalancerSecurityGroupID != "" {
// We create LBs with one SG
klog.Warningf("Multiple security groups for load balancer: %q", aws.StringValue(lb.LoadBalancerName))
}
loadBalancerSecurityGroupID = *securityGroup
}
if loadBalancerSecurityGroupID == "" {
return fmt.Errorf("could not determine security group for load balancer: %s", aws.StringValue(lb.LoadBalancerName))
}
// Get the actual list of groups that allow ingress from the load-balancer
var actualGroups []*ec2.SecurityGroup
{
describeRequest := &ec2.DescribeSecurityGroupsInput{}
describeRequest.Filters = []*ec2.Filter{
newEc2Filter("ip-permission.group-id", loadBalancerSecurityGroupID),
}
response, err := c.ec2.DescribeSecurityGroups(describeRequest)
if err != nil {
return fmt.Errorf("error querying security groups for ELB: %q", err)
}
for _, sg := range response {
if !c.tagging.hasClusterTag(sg.Tags) {
continue
}
actualGroups = append(actualGroups, sg)
}
}
taggedSecurityGroups, err := c.getTaggedSecurityGroups()
if err != nil {
return fmt.Errorf("error querying for tagged security groups: %q", err)
}
// Open the firewall from the load balancer to the instance
// We don't actually have a trivial way to know in advance which security group the instance is in
// (it is probably the node security group, but we don't easily have that).
// However, we _do_ have the list of security groups on the instance records.
// Map containing the changes we want to make; true to add, false to remove
instanceSecurityGroupIds := map[string]bool{}
// Scan instances for groups we want open
for _, instance := range instances {
securityGroup, err := findSecurityGroupForInstance(instance, taggedSecurityGroups)
if err != nil {
return err
}
if securityGroup == nil {
klog.Warning("Ignoring instance without security group: ", aws.StringValue(instance.InstanceId))
continue
}
id := aws.StringValue(securityGroup.GroupId)
if id == "" {
klog.Warningf("found security group without id: %v", securityGroup)
continue
}
instanceSecurityGroupIds[id] = true
}
// Compare to actual groups
for _, actualGroup := range actualGroups {
actualGroupID := aws.StringValue(actualGroup.GroupId)
if actualGroupID == "" {
klog.Warning("Ignoring group without ID: ", actualGroup)
continue
}
adding, found := instanceSecurityGroupIds[actualGroupID]
if found && adding {
// We don't need to make a change; the permission is already in place
delete(instanceSecurityGroupIds, actualGroupID)
} else {
// This group is not needed by allInstances; delete it
instanceSecurityGroupIds[actualGroupID] = false
}
}
for instanceSecurityGroupID, add := range instanceSecurityGroupIds {
if add {
klog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID)
} else {
klog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID)
}
sourceGroupID := &ec2.UserIdGroupPair{}
sourceGroupID.GroupId = &loadBalancerSecurityGroupID
allProtocols := "-1"
permission := &ec2.IpPermission{}
permission.IpProtocol = &allProtocols
permission.UserIdGroupPairs = []*ec2.UserIdGroupPair{sourceGroupID}
permissions := []*ec2.IpPermission{permission}
if add {
changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, permissions)
if err != nil {
return err
}
if !changed {
klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
}
} else {
changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, permissions)
if err != nil {
return err
}
if !changed {
klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
}
}
}
return nil
}
// EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted.
func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
klog.Info("Load balancer already deleted: ", loadBalancerName)
return nil
}
// Delete the LoadBalancer and target groups
//
// Deleting a target group while associated with a load balancer will
// fail. We delete the loadbalancer first. This does leave the
// possibility of zombie target groups if DeleteLoadBalancer() fails
//
// * Get target groups for NLB
// * Delete Load Balancer
// * Delete target groups
// * Clean up SecurityGroupRules
{
targetGroups, err := c.elbv2.DescribeTargetGroups(
&elbv2.DescribeTargetGroupsInput{LoadBalancerArn: lb.LoadBalancerArn},
)
if err != nil {
return fmt.Errorf("error listing target groups before deleting load balancer: %q", err)
}
_, err = c.elbv2.DeleteLoadBalancer(
&elbv2.DeleteLoadBalancerInput{LoadBalancerArn: lb.LoadBalancerArn},
)
if err != nil {
return fmt.Errorf("error deleting load balancer %q: %v", loadBalancerName, err)
}
for _, group := range targetGroups.TargetGroups {
_, err := c.elbv2.DeleteTargetGroup(
&elbv2.DeleteTargetGroupInput{TargetGroupArn: group.TargetGroupArn},
)
if err != nil {
return fmt.Errorf("error deleting target groups after deleting load balancer: %q", err)
}
}
}
return c.updateInstanceSecurityGroupsForNLB(loadBalancerName, nil, nil, nil)
}
lb, err := c.describeLoadBalancer(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
klog.Info("Load balancer already deleted: ", loadBalancerName)
return nil
}
{
// De-authorize the load balancer security group from the instances security group
err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, nil)
if err != nil {
klog.Errorf("Error deregistering load balancer from instance security groups: %q", err)
return err
}
}
{
// Delete the load balancer itself
request := &elb.DeleteLoadBalancerInput{}
request.LoadBalancerName = lb.LoadBalancerName
_, err = c.elb.DeleteLoadBalancer(request)
if err != nil {
// TODO: Check if error was because load balancer was concurrently deleted
klog.Errorf("Error deleting load balancer: %q", err)
return err
}
}
{
// Delete the security group(s) for the load balancer
// Note that this is annoying: the load balancer disappears from the API immediately, but it is still
// deleting in the background. We get a DependencyViolation until the load balancer has deleted itself
var loadBalancerSGs = aws.StringValueSlice(lb.SecurityGroups)
describeRequest := &ec2.DescribeSecurityGroupsInput{}
describeRequest.Filters = []*ec2.Filter{
newEc2Filter("group-id", loadBalancerSGs...),
}
response, err := c.ec2.DescribeSecurityGroups(describeRequest)
if err != nil {
return fmt.Errorf("error querying security groups for ELB: %q", err)
}
// Collect the security groups to delete
securityGroupIDs := map[string]struct{}{}
for _, sg := range response {
sgID := aws.StringValue(sg.GroupId)
if sgID == c.cfg.Global.ElbSecurityGroup {
//We don't want to delete a security group that was defined in the Cloud Configuration.
continue
}
if sgID == "" {
klog.Warningf("Ignoring empty security group in %s", service.Name)
continue
}
if !c.tagging.hasClusterTag(sg.Tags) {
klog.Warningf("Ignoring security group with no cluster tag in %s", service.Name)
continue
}
securityGroupIDs[sgID] = struct{}{}
}
// Loop through and try to delete them
timeoutAt := time.Now().Add(time.Second * 600)
for {
for securityGroupID := range securityGroupIDs {
request := &ec2.DeleteSecurityGroupInput{}
request.GroupId = &securityGroupID
_, err := c.ec2.DeleteSecurityGroup(request)
if err == nil {
delete(securityGroupIDs, securityGroupID)
} else {
ignore := false
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "DependencyViolation" {
klog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID)
ignore = true
}
}
if !ignore {
return fmt.Errorf("error while deleting load balancer security group (%s): %q", securityGroupID, err)
}
}
}
if len(securityGroupIDs) == 0 {
klog.V(2).Info("Deleted all security groups for load balancer: ", service.Name)
break
}
if time.Now().After(timeoutAt) {
ids := []string{}
for id := range securityGroupIDs {
ids = append(ids, id)
}
return fmt.Errorf("timed out deleting ELB: %s. Could not delete security groups %v", service.Name, strings.Join(ids, ","))
}
klog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name)
time.Sleep(10 * time.Second)
}
}
return nil
}
// UpdateLoadBalancer implements LoadBalancer.UpdateLoadBalancer
func (c *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
instances, err := c.findInstancesForELB(nodes)
if err != nil {
return err
}
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
return fmt.Errorf("Load balancer not found")
}
_, err = c.EnsureLoadBalancer(ctx, clusterName, service, nodes)
return err
}
lb, err := c.describeLoadBalancer(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
return fmt.Errorf("Load balancer not found")
}
if sslPolicyName, ok := service.Annotations[ServiceAnnotationLoadBalancerSSLNegotiationPolicy]; ok {
err := c.ensureSSLNegotiationPolicy(lb, sslPolicyName)
if err != nil {
return err
}
for _, port := range c.getLoadBalancerTLSPorts(lb) {
err := c.setSSLNegotiationPolicy(loadBalancerName, sslPolicyName, port)
if err != nil {
return err
}
}
}
err = c.ensureLoadBalancerInstances(aws.StringValue(lb.LoadBalancerName), lb.Instances, instances)
if err != nil {
return nil
}
err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, instances)
if err != nil {
return err
}
return nil
}
// Returns the instance with the specified ID
func (c *Cloud) getInstanceByID(instanceID string) (*ec2.Instance, error) {
instances, err := c.getInstancesByIDs([]*string{&instanceID})
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, cloudprovider.InstanceNotFound
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
return instances[instanceID], nil
}
func (c *Cloud) getInstancesByIDs(instanceIDs []*string) (map[string]*ec2.Instance, error) {
instancesByID := make(map[string]*ec2.Instance)
if len(instanceIDs) == 0 {
return instancesByID, nil
}
request := &ec2.DescribeInstancesInput{
InstanceIds: instanceIDs,
}
instances, err := c.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
for _, instance := range instances {
instanceID := aws.StringValue(instance.InstanceId)
if instanceID == "" {
continue
}
instancesByID[instanceID] = instance
}
return instancesByID, nil
}
func (c *Cloud) getInstancesByNodeNames(nodeNames []string, states ...string) ([]*ec2.Instance, error) {
names := aws.StringSlice(nodeNames)
ec2Instances := []*ec2.Instance{}
for i := 0; i < len(names); i += filterNodeLimit {
end := i + filterNodeLimit
if end > len(names) {
end = len(names)
}
nameSlice := names[i:end]
nodeNameFilter := &ec2.Filter{
Name: aws.String("private-dns-name"),
Values: nameSlice,
}
filters := []*ec2.Filter{nodeNameFilter}
if len(states) > 0 {
filters = append(filters, newEc2Filter("instance-state-name", states...))
}
instances, err := c.describeInstances(filters)
if err != nil {
klog.V(2).Infof("Failed to describe instances %v", nodeNames)
return nil, err
}
ec2Instances = append(ec2Instances, instances...)
}
if len(ec2Instances) == 0 {
klog.V(3).Infof("Failed to find any instances %v", nodeNames)
return nil, nil
}
return ec2Instances, nil
}
// TODO: Move to instanceCache
func (c *Cloud) describeInstances(filters []*ec2.Filter) ([]*ec2.Instance, error) {
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
response, err := c.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
var matches []*ec2.Instance
for _, instance := range response {
if c.tagging.hasClusterTag(instance.Tags) {
matches = append(matches, instance)
}
}
return matches, nil
}
// mapNodeNameToPrivateDNSName maps a k8s NodeName to an AWS Instance PrivateDNSName
// This is a simple string cast
func mapNodeNameToPrivateDNSName(nodeName types.NodeName) string {
return string(nodeName)
}
// mapInstanceToNodeName maps a EC2 instance to a k8s NodeName, by extracting the PrivateDNSName
func mapInstanceToNodeName(i *ec2.Instance) types.NodeName {
return types.NodeName(aws.StringValue(i.PrivateDnsName))
}
var aliveFilter = []string{
ec2.InstanceStateNamePending,
ec2.InstanceStateNameRunning,
ec2.InstanceStateNameShuttingDown,
ec2.InstanceStateNameStopping,
ec2.InstanceStateNameStopped,
}
// Returns the instance with the specified node name
// Returns nil if it does not exist
func (c *Cloud) findInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
privateDNSName := mapNodeNameToPrivateDNSName(nodeName)
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", privateDNSName),
// exclude instances in "terminated" state
newEc2Filter("instance-state-name", aliveFilter...),
}
instances, err := c.describeInstances(filters)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, nil
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for name: %s", nodeName)
}
return instances[0], nil
}
// Returns the instance with the specified node name
// Like findInstanceByNodeName, but returns error if node not found
func (c *Cloud) getInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
var instance *ec2.Instance
// we leverage node cache to try to retrieve node's provider id first, as
// get instance by provider id is way more efficient than by filters in
// aws context
awsID, err := c.nodeNameToProviderID(nodeName)
if err != nil {
klog.V(3).Infof("Unable to convert node name %q to aws instanceID, fall back to findInstanceByNodeName: %v", nodeName, err)
instance, err = c.findInstanceByNodeName(nodeName)
} else {
instance, err = c.getInstanceByID(string(awsID))
}
if err == nil && instance == nil {
return nil, cloudprovider.InstanceNotFound
}
return instance, err
}
func (c *Cloud) getFullInstance(nodeName types.NodeName) (*awsInstance, *ec2.Instance, error) {
if nodeName == "" {
instance, err := c.getInstanceByID(c.selfAWSInstance.awsID)
return c.selfAWSInstance, instance, err
}
instance, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return nil, nil, err
}
awsInstance := newAWSInstance(c.ec2, instance)
return awsInstance, instance, err
}
func (c *Cloud) nodeNameToProviderID(nodeName types.NodeName) (InstanceID, error) {
if len(nodeName) == 0 {
return "", fmt.Errorf("no nodeName provided")
}
if c.nodeInformerHasSynced == nil || !c.nodeInformerHasSynced() {
return "", fmt.Errorf("node informer has not synced yet")
}
node, err := c.nodeInformer.Lister().Get(string(nodeName))
if err != nil {
return "", err
}
if len(node.Spec.ProviderID) == 0 {
return "", fmt.Errorf("node has no providerID")
}
return KubernetesInstanceID(node.Spec.ProviderID).MapToAWSInstanceID()
}
func setNodeDisk(
nodeDiskMap map[types.NodeName]map[KubernetesVolumeID]bool,
volumeID KubernetesVolumeID,
nodeName types.NodeName,
check bool) {
volumeMap := nodeDiskMap[nodeName]
if volumeMap == nil {
volumeMap = make(map[KubernetesVolumeID]bool)
nodeDiskMap[nodeName] = volumeMap
}
volumeMap[volumeID] = check
}
func getInitialAttachDetachDelay(status string) time.Duration {
if status == "detached" {
return volumeDetachmentStatusInitialDelay
}
// Attach typically takes 2-6 seconds (average is 2). Asking before 2 seconds is just waste of API quota
return volumeAttachmentStatusInitialDelay
}
UPSTREAM: <drop>: Revert: Lower DescribeVolumes polling frequency"
This reverts commit ec19d5d2910c8cb16b1e2eeab4a005a4d2a426de.
// +build !providerless
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"context"
"errors"
"fmt"
"io"
"net"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/sts"
"gopkg.in/gcfg.v1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
informercorev1 "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/pkg/version"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
nodehelpers "k8s.io/cloud-provider/node/helpers"
servicehelpers "k8s.io/cloud-provider/service/helpers"
cloudvolume "k8s.io/cloud-provider/volume"
volerr "k8s.io/cloud-provider/volume/errors"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
)
// NLBHealthCheckRuleDescription is the comment used on a security group rule to
// indicate that it is used for health checks
const NLBHealthCheckRuleDescription = "kubernetes.io/rule/nlb/health"
// NLBClientRuleDescription is the comment used on a security group rule to
// indicate that it is used for client traffic
const NLBClientRuleDescription = "kubernetes.io/rule/nlb/client"
// NLBMtuDiscoveryRuleDescription is the comment used on a security group rule
// to indicate that it is used for mtu discovery
const NLBMtuDiscoveryRuleDescription = "kubernetes.io/rule/nlb/mtu"
// ProviderName is the name of this cloud provider.
const ProviderName = "aws"
// TagNameKubernetesService is the tag name we use to differentiate multiple
// services. Used currently for ELBs only.
const TagNameKubernetesService = "kubernetes.io/service-name"
// TagNameSubnetInternalELB is the tag name used on a subnet to designate that
// it should be used for internal ELBs
const TagNameSubnetInternalELB = "kubernetes.io/role/internal-elb"
// TagNameSubnetPublicELB is the tag name used on a subnet to designate that
// it should be used for internet ELBs
const TagNameSubnetPublicELB = "kubernetes.io/role/elb"
// ServiceAnnotationLoadBalancerType is the annotation used on the service
// to indicate what type of Load Balancer we want. Right now, the only accepted
// value is "nlb"
const ServiceAnnotationLoadBalancerType = "service.beta.kubernetes.io/aws-load-balancer-type"
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
// to indicate that we want an internal ELB.
const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/aws-load-balancer-internal"
// ServiceAnnotationLoadBalancerProxyProtocol is the annotation used on the
// service to enable the proxy protocol on an ELB. Right now we only accept the
// value "*" which means enable the proxy protocol on all ELB backends. In the
// future we could adjust this to allow setting the proxy protocol only on
// certain backends.
const ServiceAnnotationLoadBalancerProxyProtocol = "service.beta.kubernetes.io/aws-load-balancer-proxy-protocol"
// ServiceAnnotationLoadBalancerAccessLogEmitInterval is the annotation used to
// specify access log emit interval.
const ServiceAnnotationLoadBalancerAccessLogEmitInterval = "service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval"
// ServiceAnnotationLoadBalancerAccessLogEnabled is the annotation used on the
// service to enable or disable access logs.
const ServiceAnnotationLoadBalancerAccessLogEnabled = "service.beta.kubernetes.io/aws-load-balancer-access-log-enabled"
// ServiceAnnotationLoadBalancerAccessLogS3BucketName is the annotation used to
// specify access log s3 bucket name.
const ServiceAnnotationLoadBalancerAccessLogS3BucketName = "service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name"
// ServiceAnnotationLoadBalancerAccessLogS3BucketPrefix is the annotation used
// to specify access log s3 bucket prefix.
const ServiceAnnotationLoadBalancerAccessLogS3BucketPrefix = "service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix"
// ServiceAnnotationLoadBalancerConnectionDrainingEnabled is the annnotation
// used on the service to enable or disable connection draining.
const ServiceAnnotationLoadBalancerConnectionDrainingEnabled = "service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled"
// ServiceAnnotationLoadBalancerConnectionDrainingTimeout is the annotation
// used on the service to specify a connection draining timeout.
const ServiceAnnotationLoadBalancerConnectionDrainingTimeout = "service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout"
// ServiceAnnotationLoadBalancerConnectionIdleTimeout is the annotation used
// on the service to specify the idle connection timeout.
const ServiceAnnotationLoadBalancerConnectionIdleTimeout = "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout"
// ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled is the annotation
// used on the service to enable or disable cross-zone load balancing.
const ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled = "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled"
// ServiceAnnotationLoadBalancerExtraSecurityGroups is the annotation used
// on the service to specify additional security groups to be added to ELB created
const ServiceAnnotationLoadBalancerExtraSecurityGroups = "service.beta.kubernetes.io/aws-load-balancer-extra-security-groups"
// ServiceAnnotationLoadBalancerSecurityGroups is the annotation used
// on the service to specify the security groups to be added to ELB created. Differently from the annotation
// "service.beta.kubernetes.io/aws-load-balancer-extra-security-groups", this replaces all other security groups previously assigned to the ELB.
const ServiceAnnotationLoadBalancerSecurityGroups = "service.beta.kubernetes.io/aws-load-balancer-security-groups"
// ServiceAnnotationLoadBalancerCertificate is the annotation used on the
// service to request a secure listener. Value is a valid certificate ARN.
// For more, see http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html
// CertARN is an IAM or CM certificate ARN, e.g. arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
const ServiceAnnotationLoadBalancerCertificate = "service.beta.kubernetes.io/aws-load-balancer-ssl-cert"
// ServiceAnnotationLoadBalancerSSLPorts is the annotation used on the service
// to specify a comma-separated list of ports that will use SSL/HTTPS
// listeners. Defaults to '*' (all).
const ServiceAnnotationLoadBalancerSSLPorts = "service.beta.kubernetes.io/aws-load-balancer-ssl-ports"
// ServiceAnnotationLoadBalancerSSLNegotiationPolicy is the annotation used on
// the service to specify a SSL negotiation settings for the HTTPS/SSL listeners
// of your load balancer. Defaults to AWS's default
const ServiceAnnotationLoadBalancerSSLNegotiationPolicy = "service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy"
// ServiceAnnotationLoadBalancerBEProtocol is the annotation used on the service
// to specify the protocol spoken by the backend (pod) behind a listener.
// If `http` (default) or `https`, an HTTPS listener that terminates the
// connection and parses headers is created.
// If set to `ssl` or `tcp`, a "raw" SSL listener is used.
// If set to `http` and `aws-load-balancer-ssl-cert` is not used then
// a HTTP listener is used.
const ServiceAnnotationLoadBalancerBEProtocol = "service.beta.kubernetes.io/aws-load-balancer-backend-protocol"
// ServiceAnnotationLoadBalancerAdditionalTags is the annotation used on the service
// to specify a comma-separated list of key-value pairs which will be recorded as
// additional tags in the ELB.
// For example: "Key1=Val1,Key2=Val2,KeyNoVal1=,KeyNoVal2"
const ServiceAnnotationLoadBalancerAdditionalTags = "service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags"
// ServiceAnnotationLoadBalancerHCHealthyThreshold is the annotation used on
// the service to specify the number of successive successful health checks
// required for a backend to be considered healthy for traffic.
const ServiceAnnotationLoadBalancerHCHealthyThreshold = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold"
// ServiceAnnotationLoadBalancerHCUnhealthyThreshold is the annotation used
// on the service to specify the number of unsuccessful health checks
// required for a backend to be considered unhealthy for traffic
const ServiceAnnotationLoadBalancerHCUnhealthyThreshold = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold"
// ServiceAnnotationLoadBalancerHCTimeout is the annotation used on the
// service to specify, in seconds, how long to wait before marking a health
// check as failed.
const ServiceAnnotationLoadBalancerHCTimeout = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout"
// ServiceAnnotationLoadBalancerHCInterval is the annotation used on the
// service to specify, in seconds, the interval between health checks.
const ServiceAnnotationLoadBalancerHCInterval = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval"
// ServiceAnnotationLoadBalancerEIPAllocations is the annotation used on the
// service to specify a comma separated list of EIP allocations to use as
// static IP addresses for the NLB. Only supported on elbv2 (NLB)
const ServiceAnnotationLoadBalancerEIPAllocations = "service.beta.kubernetes.io/aws-load-balancer-eip-allocations"
// Event key when a volume is stuck on attaching state when being attached to a volume
const volumeAttachmentStuck = "VolumeAttachmentStuck"
// Indicates that a node has volumes stuck in attaching state and hence it is not fit for scheduling more pods
const nodeWithImpairedVolumes = "NodeWithImpairedVolumes"
const (
// volumeAttachmentConsecutiveErrorLimit is the number of consecutive errors we will ignore when waiting for a volume to attach/detach
volumeAttachmentStatusConsecutiveErrorLimit = 10
// most attach/detach operations on AWS finish within 1-4 seconds
// By using 1 second starting interval with a backoff of 1.8
// we get - [1, 1.8, 3.24, 5.832000000000001, 10.4976]
// in total we wait for 2601 seconds
volumeAttachmentStatusInitialDelay = 1 * time.Second
volumeAttachmentStatusFactor = 1.8
volumeAttachmentStatusSteps = 13
// createTag* is configuration of exponential backoff for CreateTag call. We
// retry mainly because if we create an object, we cannot tag it until it is
// "fully created" (eventual consistency). Starting with 1 second, doubling
// it every step and taking 9 steps results in 255 second total waiting
// time.
createTagInitialDelay = 1 * time.Second
createTagFactor = 2.0
createTagSteps = 9
// volumeCreate* is configuration of exponential backoff for created volume.
// On a random AWS account (shared among several developers) it took 4s on
// average, 8s max.
volumeCreateInitialDelay = 5 * time.Second
volumeCreateBackoffFactor = 1.2
volumeCreateBackoffSteps = 10
// Number of node names that can be added to a filter. The AWS limit is 200
// but we are using a lower limit on purpose
filterNodeLimit = 150
)
// awsTagNameMasterRoles is a set of well-known AWS tag names that indicate the instance is a master
// The major consequence is that it is then not considered for AWS zone discovery for dynamic volume creation.
var awsTagNameMasterRoles = sets.NewString("kubernetes.io/role/master", "k8s.io/role/master")
// Maps from backend protocol to ELB protocol
var backendProtocolMapping = map[string]string{
"https": "https",
"http": "https",
"ssl": "ssl",
"tcp": "ssl",
}
// MaxReadThenCreateRetries sets the maximum number of attempts we will make when
// we read to see if something exists and then try to create it if we didn't find it.
// This can fail once in a consistent system if done in parallel
// In an eventually consistent system, it could fail unboundedly
const MaxReadThenCreateRetries = 30
// DefaultVolumeType specifies which storage to use for newly created Volumes
// TODO: Remove when user/admin can configure volume types and thus we don't
// need hardcoded defaults.
const DefaultVolumeType = "gp2"
// Services is an abstraction over AWS, to allow mocking/other implementations
type Services interface {
Compute(region string) (EC2, error)
LoadBalancing(region string) (ELB, error)
LoadBalancingV2(region string) (ELBV2, error)
Autoscaling(region string) (ASG, error)
Metadata() (EC2Metadata, error)
KeyManagement(region string) (KMS, error)
}
// EC2 is an abstraction over AWS', to allow mocking/other implementations
// Note that the DescribeX functions return a list, so callers don't need to deal with paging
// TODO: Should we rename this to AWS (EBS & ELB are not technically part of EC2)
type EC2 interface {
// Query EC2 for instances matching the filter
DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error)
// Attach a volume to an instance
AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error)
// Detach a volume from an instance it is attached to
DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error)
// Lists volumes
DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error)
// Create an EBS volume
CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error)
// Delete an EBS volume
DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error)
DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error)
DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error)
CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
DeleteSecurityGroup(request *ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error)
AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error)
DescribeSubnets(*ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error)
CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error)
CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error)
DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error)
ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error)
DescribeVpcs(input *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error)
}
// ELB is a simple pass-through of AWS' ELB client interface, which allows for testing
type ELB interface {
CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error)
DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error)
DescribeLoadBalancers(*elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error)
AddTags(*elb.AddTagsInput) (*elb.AddTagsOutput, error)
RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error)
DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error)
CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error)
SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error)
SetLoadBalancerPoliciesOfListener(input *elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error)
DescribeLoadBalancerPolicies(input *elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error)
DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error)
AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error)
CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error)
DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error)
ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error)
ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error)
DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error)
ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error)
}
// ELBV2 is a simple pass-through of AWS' ELBV2 client interface, which allows for testing
type ELBV2 interface {
AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error)
CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error)
DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error)
DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error)
ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error)
DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error)
CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error)
DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error)
ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error)
DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error)
DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error)
DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error)
ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error)
RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error)
DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error)
CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error)
DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error)
DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error)
ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error)
WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error
}
// ASG is a simple pass-through of the Autoscaling client interface, which
// allows for testing.
type ASG interface {
UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error)
DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error)
}
// KMS is a simple pass-through of the Key Management Service client interface,
// which allows for testing.
type KMS interface {
DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error)
}
// EC2Metadata is an abstraction over the AWS metadata service.
type EC2Metadata interface {
// Query the EC2 metadata service (used to discover instance-id etc)
GetMetadata(path string) (string, error)
}
// AWS volume types
const (
// Provisioned IOPS SSD
VolumeTypeIO1 = "io1"
// General Purpose SSD
VolumeTypeGP2 = "gp2"
// Cold HDD (sc1)
VolumeTypeSC1 = "sc1"
// Throughput Optimized HDD
VolumeTypeST1 = "st1"
)
// AWS provisioning limits.
// Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
const (
MinTotalIOPS = 100
MaxTotalIOPS = 20000
)
// VolumeOptions specifies capacity and tags for a volume.
type VolumeOptions struct {
CapacityGB int
Tags map[string]string
VolumeType string
AvailabilityZone string
// IOPSPerGB x CapacityGB will give total IOPS of the volume to create.
// Calculated total IOPS will be capped at MaxTotalIOPS.
IOPSPerGB int
Encrypted bool
// fully qualified resource name to the key to use for encryption.
// example: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef
KmsKeyID string
}
// Volumes is an interface for managing cloud-provisioned volumes
// TODO: Allow other clouds to implement this
type Volumes interface {
// Attach the disk to the node with the specified NodeName
// nodeName can be empty to mean "the instance on which we are running"
// Returns the device (e.g. /dev/xvdf) where we attached the volume
AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error)
// Detach the disk from the node with the specified NodeName
// nodeName can be empty to mean "the instance on which we are running"
// Returns the device where the volume was attached
DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error)
// Create a volume with the specified options
CreateDisk(volumeOptions *VolumeOptions) (volumeName KubernetesVolumeID, err error)
// Delete the specified volume
// Returns true iff the volume was deleted
// If the was not found, returns (false, nil)
DeleteDisk(volumeName KubernetesVolumeID) (bool, error)
// Get labels to apply to volume on creation
GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]string, error)
// Get volume's disk path from volume name
// return the device path where the volume is attached
GetDiskPath(volumeName KubernetesVolumeID) (string, error)
// Check if the volume is already attached to the node with the specified NodeName
DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error)
// Check if disks specified in argument map are still attached to their respective nodes.
DisksAreAttached(map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error)
// Expand the disk to new size
ResizeDisk(diskName KubernetesVolumeID, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
}
// InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups
// TODO: Allow other clouds to implement this
type InstanceGroups interface {
// Set the size to the fixed size
ResizeInstanceGroup(instanceGroupName string, size int) error
// Queries the cloud provider for information about the specified instance group
DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error)
}
// InstanceGroupInfo is returned by InstanceGroups.Describe, and exposes information about the group.
type InstanceGroupInfo interface {
// The number of instances currently running under control of this group
CurrentSize() (int, error)
}
var _ cloudprovider.Interface = (*Cloud)(nil)
var _ cloudprovider.Instances = (*Cloud)(nil)
var _ cloudprovider.LoadBalancer = (*Cloud)(nil)
var _ cloudprovider.Routes = (*Cloud)(nil)
var _ cloudprovider.Zones = (*Cloud)(nil)
var _ cloudprovider.PVLabeler = (*Cloud)(nil)
// Cloud is an implementation of Interface, LoadBalancer and Instances for Amazon Web Services.
type Cloud struct {
ec2 EC2
elb ELB
elbv2 ELBV2
asg ASG
kms KMS
metadata EC2Metadata
cfg *CloudConfig
region string
vpcID string
tagging awsTagging
// The AWS instance that we are running on
// Note that we cache some state in awsInstance (mountpoints), so we must preserve the instance
selfAWSInstance *awsInstance
instanceCache instanceCache
clientBuilder cloudprovider.ControllerClientBuilder
kubeClient clientset.Interface
nodeInformer informercorev1.NodeInformer
// Extract the function out to make it easier to test
nodeInformerHasSynced cache.InformerSynced
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
// We keep an active list of devices we have assigned but not yet
// attached, to avoid a race condition where we assign a device mapping
// and then get a second request before we attach the volume
attachingMutex sync.Mutex
attaching map[types.NodeName]map[mountDevice]EBSVolumeID
// state of our device allocator for each node
deviceAllocators map[types.NodeName]DeviceAllocator
}
var _ Volumes = &Cloud{}
// CloudConfig wraps the settings for the AWS cloud provider.
// NOTE: Cloud config files should follow the same Kubernetes deprecation policy as
// flags or CLIs. Config fields should not change behavior in incompatible ways and
// should be deprecated for at least 2 release prior to removing.
// See https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli
// for more details.
type CloudConfig struct {
Global struct {
// TODO: Is there any use for this? We can get it from the instance metadata service
// Maybe if we're not running on AWS, e.g. bootstrap; for now it is not very useful
Zone string
// The AWS VPC flag enables the possibility to run the master components
// on a different aws account, on a different cloud provider or on-premises.
// If the flag is set also the KubernetesClusterTag must be provided
VPC string
// SubnetID enables using a specific subnet to use for ELB's
SubnetID string
// RouteTableID enables using a specific RouteTable
RouteTableID string
// RoleARN is the IAM role to assume when interaction with AWS APIs.
RoleARN string
// KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources
KubernetesClusterTag string
// KubernetesClusterID is the cluster id we'll use to identify our cluster resources
KubernetesClusterID string
//The aws provider creates an inbound rule per load balancer on the node security
//group. However, this can run into the AWS security group rule limit of 50 if
//many LoadBalancers are created.
//
//This flag disables the automatic ingress creation. It requires that the user
//has setup a rule that allows inbound traffic on kubelet ports from the
//local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
DisableSecurityGroupIngress bool
//AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB
//can cause the max number of security groups to be reached. If this is set instead of creating a new
//Security group for each ELB this security group will be used instead.
ElbSecurityGroup string
//During the instantiation of an new AWS cloud provider, the detected region
//is validated against a known set of regions.
//
//In a non-standard, AWS like environment (e.g. Eucalyptus), this check may
//be undesirable. Setting this to true will disable the check and provide
//a warning that the check was skipped. Please note that this is an
//experimental feature and work-in-progress for the moment. If you find
//yourself in an non-AWS cloud and open an issue, please indicate that in the
//issue body.
DisableStrictZoneCheck bool
}
// [ServiceOverride "1"]
// Service = s3
// Region = region1
// URL = https://s3.foo.bar
// SigningRegion = signing_region
// SigningMethod = signing_method
//
// [ServiceOverride "2"]
// Service = ec2
// Region = region2
// URL = https://ec2.foo.bar
// SigningRegion = signing_region
// SigningMethod = signing_method
ServiceOverride map[string]*struct {
Service string
Region string
URL string
SigningRegion string
SigningMethod string
SigningName string
}
}
func (cfg *CloudConfig) validateOverrides() error {
if len(cfg.ServiceOverride) == 0 {
return nil
}
set := make(map[string]bool)
for onum, ovrd := range cfg.ServiceOverride {
// Note: gcfg does not space trim, so we have to when comparing to empty string ""
name := strings.TrimSpace(ovrd.Service)
if name == "" {
return fmt.Errorf("service name is missing [Service is \"\"] in override %s", onum)
}
// insure the map service name is space trimmed
ovrd.Service = name
region := strings.TrimSpace(ovrd.Region)
if region == "" {
return fmt.Errorf("service region is missing [Region is \"\"] in override %s", onum)
}
// insure the map region is space trimmed
ovrd.Region = region
url := strings.TrimSpace(ovrd.URL)
if url == "" {
return fmt.Errorf("url is missing [URL is \"\"] in override %s", onum)
}
signingRegion := strings.TrimSpace(ovrd.SigningRegion)
if signingRegion == "" {
return fmt.Errorf("signingRegion is missing [SigningRegion is \"\"] in override %s", onum)
}
signature := name + "_" + region
if set[signature] {
return fmt.Errorf("duplicate entry found for service override [%s] (%s in %s)", onum, name, region)
}
set[signature] = true
}
return nil
}
func (cfg *CloudConfig) getResolver() endpoints.ResolverFunc {
defaultResolver := endpoints.DefaultResolver()
defaultResolverFn := func(service, region string,
optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
return defaultResolver.EndpointFor(service, region, optFns...)
}
if len(cfg.ServiceOverride) == 0 {
return defaultResolverFn
}
return func(service, region string,
optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
for _, override := range cfg.ServiceOverride {
if override.Service == service && override.Region == region {
return endpoints.ResolvedEndpoint{
URL: override.URL,
SigningRegion: override.SigningRegion,
SigningMethod: override.SigningMethod,
SigningName: override.SigningName,
}, nil
}
}
return defaultResolver.EndpointFor(service, region, optFns...)
}
}
// awsSdkEC2 is an implementation of the EC2 interface, backed by aws-sdk-go
type awsSdkEC2 struct {
ec2 *ec2.EC2
}
// Interface to make the CloudConfig immutable for awsSDKProvider
type awsCloudConfigProvider interface {
getResolver() endpoints.ResolverFunc
}
type awsSDKProvider struct {
creds *credentials.Credentials
cfg awsCloudConfigProvider
mutex sync.Mutex
regionDelayers map[string]*CrossRequestRetryDelay
}
func newAWSSDKProvider(creds *credentials.Credentials, cfg *CloudConfig) *awsSDKProvider {
return &awsSDKProvider{
creds: creds,
cfg: cfg,
regionDelayers: make(map[string]*CrossRequestRetryDelay),
}
}
func (p *awsSDKProvider) addHandlers(regionName string, h *request.Handlers) {
h.Build.PushFrontNamed(request.NamedHandler{
Name: "k8s/user-agent",
Fn: request.MakeAddToUserAgentHandler("kubernetes", version.Get().String()),
})
h.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/logger",
Fn: awsHandlerLogger,
})
delayer := p.getCrossRequestRetryDelay(regionName)
if delayer != nil {
h.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/delay-presign",
Fn: delayer.BeforeSign,
})
h.AfterRetry.PushFrontNamed(request.NamedHandler{
Name: "k8s/delay-afterretry",
Fn: delayer.AfterRetry,
})
}
p.addAPILoggingHandlers(h)
}
func (p *awsSDKProvider) addAPILoggingHandlers(h *request.Handlers) {
h.Send.PushBackNamed(request.NamedHandler{
Name: "k8s/api-request",
Fn: awsSendHandlerLogger,
})
h.ValidateResponse.PushFrontNamed(request.NamedHandler{
Name: "k8s/api-validate-response",
Fn: awsValidateResponseHandlerLogger,
})
}
// Get a CrossRequestRetryDelay, scoped to the region, not to the request.
// This means that when we hit a limit on a call, we will delay _all_ calls to the API.
// We do this to protect the AWS account from becoming overloaded and effectively locked.
// We also log when we hit request limits.
// Note that this delays the current goroutine; this is bad behaviour and will
// likely cause k8s to become slow or unresponsive for cloud operations.
// However, this throttle is intended only as a last resort. When we observe
// this throttling, we need to address the root cause (e.g. add a delay to a
// controller retry loop)
func (p *awsSDKProvider) getCrossRequestRetryDelay(regionName string) *CrossRequestRetryDelay {
p.mutex.Lock()
defer p.mutex.Unlock()
delayer, found := p.regionDelayers[regionName]
if !found {
delayer = NewCrossRequestRetryDelay()
p.regionDelayers[regionName] = delayer
}
return delayer
}
// SetInformers implements InformerUser interface by setting up informer-fed caches for aws lib to
// leverage Kubernetes API for caching
func (c *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
klog.Infof("Setting up informers for Cloud")
c.nodeInformer = informerFactory.Core().V1().Nodes()
c.nodeInformerHasSynced = c.nodeInformer.Informer().HasSynced
}
func (p *awsSDKProvider) Compute(regionName string) (EC2, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
service := ec2.New(sess)
p.addHandlers(regionName, &service.Handlers)
ec2 := &awsSdkEC2{
ec2: service,
}
return ec2, nil
}
func (p *awsSDKProvider) LoadBalancing(regionName string) (ELB, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
elbClient := elb.New(sess)
p.addHandlers(regionName, &elbClient.Handlers)
return elbClient, nil
}
func (p *awsSDKProvider) LoadBalancingV2(regionName string) (ELBV2, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
elbClient := elbv2.New(sess)
p.addHandlers(regionName, &elbClient.Handlers)
return elbClient, nil
}
func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
client := autoscaling.New(sess)
p.addHandlers(regionName, &client.Handlers)
return client, nil
}
func (p *awsSDKProvider) Metadata() (EC2Metadata, error) {
sess, err := session.NewSession(&aws.Config{
EndpointResolver: p.cfg.getResolver(),
})
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
client := ec2metadata.New(sess)
p.addAPILoggingHandlers(&client.Handlers)
return client, nil
}
func (p *awsSDKProvider) KeyManagement(regionName string) (KMS, error) {
awsConfig := &aws.Config{
Region: ®ionName,
Credentials: p.creds,
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
WithEndpointResolver(p.cfg.getResolver())
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
kmsClient := kms.New(sess)
p.addHandlers(regionName, &kmsClient.Handlers)
return kmsClient, nil
}
func newEc2Filter(name string, values ...string) *ec2.Filter {
filter := &ec2.Filter{
Name: aws.String(name),
}
for _, value := range values {
filter.Values = append(filter.Values, aws.String(value))
}
return filter
}
// AddSSHKeyToAllInstances is currently not implemented.
func (c *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
return cloudprovider.NotImplemented
}
// CurrentNodeName returns the name of the current node
func (c *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
return c.selfAWSInstance.nodeName, nil
}
// Implementation of EC2.Instances
func (s *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) {
// Instances are paged
results := []*ec2.Instance{}
var nextToken *string
requestTime := time.Now()
for {
response, err := s.ec2.DescribeInstances(request)
if err != nil {
recordAWSMetric("describe_instance", 0, err)
return nil, fmt.Errorf("error listing AWS instances: %q", err)
}
for _, reservation := range response.Reservations {
results = append(results, reservation.Instances...)
}
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_instance", timeTaken, nil)
return results, nil
}
// Implements EC2.DescribeSecurityGroups
func (s *awsSdkEC2) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) {
// Security groups are paged
results := []*ec2.SecurityGroup{}
var nextToken *string
requestTime := time.Now()
for {
response, err := s.ec2.DescribeSecurityGroups(request)
if err != nil {
recordAWSMetric("describe_security_groups", 0, err)
return nil, fmt.Errorf("error listing AWS security groups: %q", err)
}
results = append(results, response.SecurityGroups...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_security_groups", timeTaken, nil)
return results, nil
}
func (s *awsSdkEC2) AttachVolume(request *ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) {
requestTime := time.Now()
resp, err := s.ec2.AttachVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("attach_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DetachVolume(request *ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) {
requestTime := time.Now()
resp, err := s.ec2.DetachVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("detach_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) {
// Volumes are paged
results := []*ec2.Volume{}
var nextToken *string
requestTime := time.Now()
for {
response, err := s.ec2.DescribeVolumes(request)
if err != nil {
recordAWSMetric("describe_volume", 0, err)
return nil, err
}
results = append(results, response.Volumes...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_volume", timeTaken, nil)
return results, nil
}
func (s *awsSdkEC2) CreateVolume(request *ec2.CreateVolumeInput) (*ec2.Volume, error) {
requestTime := time.Now()
resp, err := s.ec2.CreateVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("create_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DeleteVolume(request *ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) {
requestTime := time.Now()
resp, err := s.ec2.DeleteVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("delete_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) ModifyVolume(request *ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) {
requestTime := time.Now()
resp, err := s.ec2.ModifyVolume(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("modify_volume", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DescribeVolumeModifications(request *ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) {
requestTime := time.Now()
results := []*ec2.VolumeModification{}
var nextToken *string
for {
resp, err := s.ec2.DescribeVolumesModifications(request)
if err != nil {
recordAWSMetric("describe_volume_modification", 0, err)
return nil, fmt.Errorf("error listing volume modifictions : %v", err)
}
results = append(results, resp.VolumesModifications...)
nextToken = resp.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_volume_modification", timeTaken, nil)
return results, nil
}
func (s *awsSdkEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) {
// Subnets are not paged
response, err := s.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS subnets: %q", err)
}
return response.Subnets, nil
}
func (s *awsSdkEC2) CreateSecurityGroup(request *ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) {
return s.ec2.CreateSecurityGroup(request)
}
func (s *awsSdkEC2) DeleteSecurityGroup(request *ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) {
return s.ec2.DeleteSecurityGroup(request)
}
func (s *awsSdkEC2) AuthorizeSecurityGroupIngress(request *ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) {
return s.ec2.AuthorizeSecurityGroupIngress(request)
}
func (s *awsSdkEC2) RevokeSecurityGroupIngress(request *ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) {
return s.ec2.RevokeSecurityGroupIngress(request)
}
func (s *awsSdkEC2) CreateTags(request *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
requestTime := time.Now()
resp, err := s.ec2.CreateTags(request)
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("create_tags", timeTaken, err)
return resp, err
}
func (s *awsSdkEC2) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) {
results := []*ec2.RouteTable{}
var nextToken *string
requestTime := time.Now()
for {
response, err := s.ec2.DescribeRouteTables(request)
if err != nil {
recordAWSMetric("describe_route_tables", 0, err)
return nil, fmt.Errorf("error listing AWS route tables: %q", err)
}
results = append(results, response.RouteTables...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
timeTaken := time.Since(requestTime).Seconds()
recordAWSMetric("describe_route_tables", timeTaken, nil)
return results, nil
}
func (s *awsSdkEC2) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) {
return s.ec2.CreateRoute(request)
}
func (s *awsSdkEC2) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) {
return s.ec2.DeleteRoute(request)
}
func (s *awsSdkEC2) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) {
return s.ec2.ModifyInstanceAttribute(request)
}
func (s *awsSdkEC2) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) {
return s.ec2.DescribeVpcs(request)
}
func init() {
registerMetrics()
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
cfg, err := readAWSCloudConfig(config)
if err != nil {
return nil, fmt.Errorf("unable to read AWS cloud provider config file: %v", err)
}
if err = cfg.validateOverrides(); err != nil {
return nil, fmt.Errorf("unable to validate custom endpoint overrides: %v", err)
}
sess, err := session.NewSession(&aws.Config{})
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
var provider credentials.Provider
if cfg.Global.RoleARN == "" {
provider = &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(sess),
}
} else {
klog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN)
provider = &stscreds.AssumeRoleProvider{
Client: sts.New(sess),
RoleARN: cfg.Global.RoleARN,
}
}
creds := credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.EnvProvider{},
provider,
&credentials.SharedCredentialsProvider{},
})
aws := newAWSSDKProvider(creds, cfg)
return newAWSCloud(*cfg, aws)
})
}
// readAWSCloudConfig reads an instance of AWSCloudConfig from config reader.
func readAWSCloudConfig(config io.Reader) (*CloudConfig, error) {
var cfg CloudConfig
var err error
if config != nil {
err = gcfg.ReadInto(&cfg, config)
if err != nil {
return nil, err
}
}
return &cfg, nil
}
func updateConfigZone(cfg *CloudConfig, metadata EC2Metadata) error {
if cfg.Global.Zone == "" {
if metadata != nil {
klog.Info("Zone not specified in configuration file; querying AWS metadata service")
var err error
cfg.Global.Zone, err = getAvailabilityZone(metadata)
if err != nil {
return err
}
}
if cfg.Global.Zone == "" {
return fmt.Errorf("no zone specified in configuration file")
}
}
return nil
}
func getAvailabilityZone(metadata EC2Metadata) (string, error) {
return metadata.GetMetadata("placement/availability-zone")
}
// Derives the region from a valid az name.
// Returns an error if the az is known invalid (empty)
func azToRegion(az string) (string, error) {
if len(az) < 1 {
return "", fmt.Errorf("invalid (empty) AZ")
}
region := az[:len(az)-1]
return region, nil
}
// newAWSCloud creates a new instance of AWSCloud.
// AWSProvider and instanceId are primarily for tests
func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) {
// We have some state in the Cloud object - in particular the attaching map
// Log so that if we are building multiple Cloud objects, it is obvious!
klog.Infof("Building AWS cloudprovider")
metadata, err := awsServices.Metadata()
if err != nil {
return nil, fmt.Errorf("error creating AWS metadata client: %q", err)
}
err = updateConfigZone(&cfg, metadata)
if err != nil {
return nil, fmt.Errorf("unable to determine AWS zone from cloud provider config or EC2 instance metadata: %v", err)
}
zone := cfg.Global.Zone
if len(zone) <= 1 {
return nil, fmt.Errorf("invalid AWS zone in config file: %s", zone)
}
regionName, err := azToRegion(zone)
if err != nil {
return nil, err
}
if !cfg.Global.DisableStrictZoneCheck {
if !isRegionValid(regionName, metadata) {
return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone)
}
} else {
klog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone)
}
ec2, err := awsServices.Compute(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS EC2 client: %v", err)
}
elb, err := awsServices.LoadBalancing(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS ELB client: %v", err)
}
elbv2, err := awsServices.LoadBalancingV2(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS ELBV2 client: %v", err)
}
asg, err := awsServices.Autoscaling(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS autoscaling client: %v", err)
}
kms, err := awsServices.KeyManagement(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS key management client: %v", err)
}
awsCloud := &Cloud{
ec2: ec2,
elb: elb,
elbv2: elbv2,
asg: asg,
metadata: metadata,
kms: kms,
cfg: &cfg,
region: regionName,
attaching: make(map[types.NodeName]map[mountDevice]EBSVolumeID),
deviceAllocators: make(map[types.NodeName]DeviceAllocator),
}
awsCloud.instanceCache.cloud = awsCloud
tagged := cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != ""
if cfg.Global.VPC != "" && (cfg.Global.SubnetID != "" || cfg.Global.RoleARN != "") && tagged {
// When the master is running on a different AWS account, cloud provider or on-premise
// build up a dummy instance and use the VPC from the nodes account
klog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises")
awsCloud.selfAWSInstance = &awsInstance{
nodeName: "master-dummy",
vpcID: cfg.Global.VPC,
subnetID: cfg.Global.SubnetID,
}
awsCloud.vpcID = cfg.Global.VPC
} else {
selfAWSInstance, err := awsCloud.buildSelfAWSInstance()
if err != nil {
return nil, err
}
awsCloud.selfAWSInstance = selfAWSInstance
awsCloud.vpcID = selfAWSInstance.vpcID
}
if cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != "" {
if err := awsCloud.tagging.init(cfg.Global.KubernetesClusterTag, cfg.Global.KubernetesClusterID); err != nil {
return nil, err
}
} else {
// TODO: Clean up double-API query
info, err := awsCloud.selfAWSInstance.describeInstance()
if err != nil {
return nil, err
}
if err := awsCloud.tagging.initFromTags(info.Tags); err != nil {
return nil, err
}
}
return awsCloud, nil
}
// isRegionValid accepts an AWS region name and returns if the region is a
// valid region known to the AWS SDK. Considers the region returned from the
// EC2 metadata service to be a valid region as it's only available on a host
// running in a valid AWS region.
func isRegionValid(region string, metadata EC2Metadata) bool {
// Does the AWS SDK know about the region?
for _, p := range endpoints.DefaultPartitions() {
for r := range p.Regions() {
if r == region {
return true
}
}
}
// ap-northeast-3 is purposely excluded from the SDK because it
// requires an access request (for more details see):
// https://github.com/aws/aws-sdk-go/issues/1863
if region == "ap-northeast-3" {
return true
}
// Fallback to checking if the region matches the instance metadata region
// (ignoring any user overrides). This just accounts for running an old
// build of Kubernetes in a new region that wasn't compiled into the SDK
// when Kubernetes was built.
if az, err := getAvailabilityZone(metadata); err == nil {
if r, err := azToRegion(az); err == nil && region == r {
return true
}
}
return false
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (c *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
c.clientBuilder = clientBuilder
c.kubeClient = clientBuilder.ClientOrDie("aws-cloud-provider")
c.eventBroadcaster = record.NewBroadcaster()
c.eventBroadcaster.StartLogging(klog.Infof)
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.kubeClient.CoreV1().Events("")})
c.eventRecorder = c.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "aws-cloud-provider"})
}
// Clusters returns the list of clusters.
func (c *Cloud) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}
// ProviderName returns the cloud provider ID.
func (c *Cloud) ProviderName() string {
return ProviderName
}
// LoadBalancer returns an implementation of LoadBalancer for Amazon Web Services.
func (c *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return c, true
}
// Instances returns an implementation of Instances for Amazon Web Services.
func (c *Cloud) Instances() (cloudprovider.Instances, bool) {
return c, true
}
// Zones returns an implementation of Zones for Amazon Web Services.
func (c *Cloud) Zones() (cloudprovider.Zones, bool) {
return c, true
}
// Routes returns an implementation of Routes for Amazon Web Services.
func (c *Cloud) Routes() (cloudprovider.Routes, bool) {
return c, true
}
// HasClusterID returns true if the cluster has a clusterID
func (c *Cloud) HasClusterID() bool {
return len(c.tagging.clusterID()) > 0
}
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
if c.selfAWSInstance.nodeName == name || len(name) == 0 {
addresses := []v1.NodeAddress{}
macs, err := c.metadata.GetMetadata("network/interfaces/macs/")
if err != nil {
return nil, fmt.Errorf("error querying AWS metadata for %q: %q", "network/interfaces/macs", err)
}
// We want the IPs to end up in order by interface (in particular, we want eth0's
// IPs first), but macs isn't necessarily sorted in that order so we have to
// explicitly order by device-number (device-number == the "0" in "eth0").
macIPs := make(map[int]string)
for _, macID := range strings.Split(macs, "\n") {
if macID == "" {
continue
}
numPath := path.Join("network/interfaces/macs/", macID, "device-number")
numStr, err := c.metadata.GetMetadata(numPath)
if err != nil {
return nil, fmt.Errorf("error querying AWS metadata for %q: %q", numPath, err)
}
num, err := strconv.Atoi(strings.TrimSpace(numStr))
if err != nil {
klog.Warningf("Bad device-number %q for interface %s\n", numStr, macID)
continue
}
ipPath := path.Join("network/interfaces/macs/", macID, "local-ipv4s")
macIPs[num], err = c.metadata.GetMetadata(ipPath)
if err != nil {
return nil, fmt.Errorf("error querying AWS metadata for %q: %q", ipPath, err)
}
}
for i := 0; i < len(macIPs); i++ {
internalIPs := macIPs[i]
if internalIPs == "" {
continue
}
for _, internalIP := range strings.Split(internalIPs, "\n") {
if internalIP == "" {
continue
}
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalIP, Address: internalIP})
}
}
externalIP, err := c.metadata.GetMetadata("public-ipv4")
if err != nil {
//TODO: It would be nice to be able to determine the reason for the failure,
// but the AWS client masks all failures with the same error description.
klog.V(4).Info("Could not determine public IP from AWS metadata.")
} else {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: externalIP})
}
localHostname, err := c.metadata.GetMetadata("local-hostname")
if err != nil || len(localHostname) == 0 {
//TODO: It would be nice to be able to determine the reason for the failure,
// but the AWS client masks all failures with the same error description.
klog.V(4).Info("Could not determine private DNS from AWS metadata.")
} else {
hostname, internalDNS := parseMetadataLocalHostname(localHostname)
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: hostname})
for _, d := range internalDNS {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: d})
}
}
externalDNS, err := c.metadata.GetMetadata("public-hostname")
if err != nil || len(externalDNS) == 0 {
//TODO: It would be nice to be able to determine the reason for the failure,
// but the AWS client masks all failures with the same error description.
klog.V(4).Info("Could not determine public DNS from AWS metadata.")
} else {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalDNS, Address: externalDNS})
}
return addresses, nil
}
instance, err := c.getInstanceByNodeName(name)
if err != nil {
return nil, fmt.Errorf("getInstanceByNodeName failed for %q with %q", name, err)
}
return extractNodeAddresses(instance)
}
// parseMetadataLocalHostname parses the output of "local-hostname" metadata.
// If a DHCP option set is configured for a VPC and it has multiple domain names, GetMetadata
// returns a string containing first the hostname followed by additional domain names,
// space-separated. For example, if the DHCP option set has:
// domain-name = us-west-2.compute.internal a.a b.b c.c d.d;
// $ curl http://169.254.169.254/latest/meta-data/local-hostname
// ip-192-168-111-51.us-west-2.compute.internal a.a b.b c.c d.d
func parseMetadataLocalHostname(metadata string) (string, []string) {
localHostnames := strings.Fields(metadata)
hostname := localHostnames[0]
internalDNS := []string{hostname}
privateAddress := strings.Split(hostname, ".")[0]
for _, h := range localHostnames[1:] {
internalDNSAddress := privateAddress + "." + h
internalDNS = append(internalDNS, internalDNSAddress)
}
return hostname, internalDNS
}
// extractNodeAddresses maps the instance information from EC2 to an array of NodeAddresses
func extractNodeAddresses(instance *ec2.Instance) ([]v1.NodeAddress, error) {
// Not clear if the order matters here, but we might as well indicate a sensible preference order
if instance == nil {
return nil, fmt.Errorf("nil instance passed to extractNodeAddresses")
}
addresses := []v1.NodeAddress{}
// handle internal network interfaces
for _, networkInterface := range instance.NetworkInterfaces {
// skip network interfaces that are not currently in use
if aws.StringValue(networkInterface.Status) != ec2.NetworkInterfaceStatusInUse {
continue
}
for _, internalIP := range networkInterface.PrivateIpAddresses {
if ipAddress := aws.StringValue(internalIP.PrivateIpAddress); ipAddress != "" {
ip := net.ParseIP(ipAddress)
if ip == nil {
return nil, fmt.Errorf("EC2 instance had invalid private address: %s (%q)", aws.StringValue(instance.InstanceId), ipAddress)
}
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalIP, Address: ip.String()})
}
}
}
// TODO: Other IP addresses (multiple ips)?
publicIPAddress := aws.StringValue(instance.PublicIpAddress)
if publicIPAddress != "" {
ip := net.ParseIP(publicIPAddress)
if ip == nil {
return nil, fmt.Errorf("EC2 instance had invalid public address: %s (%s)", aws.StringValue(instance.InstanceId), publicIPAddress)
}
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: ip.String()})
}
privateDNSName := aws.StringValue(instance.PrivateDnsName)
if privateDNSName != "" {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: privateDNSName})
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: privateDNSName})
}
publicDNSName := aws.StringValue(instance.PublicDnsName)
if publicDNSName != "" {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalDNS, Address: publicDNSName})
}
return addresses, nil
}
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (c *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return nil, err
}
instance, err := describeInstance(c.ec2, instanceID)
if err != nil {
return nil, err
}
return extractNodeAddresses(instance)
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return false, err
}
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{instanceID.awsString()},
}
instances, err := c.ec2.DescribeInstances(request)
if err != nil {
return false, err
}
if len(instances) == 0 {
return false, nil
}
if len(instances) > 1 {
return false, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
state := instances[0].State.Name
if *state == ec2.InstanceStateNameTerminated {
klog.Warningf("the instance %s is terminated", instanceID)
return false, nil
}
return true, nil
}
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
func (c *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return false, err
}
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{instanceID.awsString()},
}
instances, err := c.ec2.DescribeInstances(request)
if err != nil {
return false, err
}
if len(instances) == 0 {
klog.Warningf("the instance %s does not exist anymore", providerID)
// returns false, because otherwise node is not deleted from cluster
// false means that it will continue to check InstanceExistsByProviderID
return false, nil
}
if len(instances) > 1 {
return false, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
instance := instances[0]
if instance.State != nil {
state := aws.StringValue(instance.State.Name)
// valid state for detaching volumes
if state == ec2.InstanceStateNameStopped {
return true, nil
}
}
return false, nil
}
// InstanceID returns the cloud provider ID of the node with the specified nodeName.
func (c *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) {
// In the future it is possible to also return an endpoint as:
// <endpoint>/<zone>/<instanceid>
if c.selfAWSInstance.nodeName == nodeName {
return "/" + c.selfAWSInstance.availabilityZone + "/" + c.selfAWSInstance.awsID, nil
}
inst, err := c.getInstanceByNodeName(nodeName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// The Instances interface requires that we return InstanceNotFound (without wrapping)
return "", err
}
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %q", nodeName, err)
}
return "/" + aws.StringValue(inst.Placement.AvailabilityZone) + "/" + aws.StringValue(inst.InstanceId), nil
}
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (c *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return "", err
}
instance, err := describeInstance(c.ec2, instanceID)
if err != nil {
return "", err
}
return aws.StringValue(instance.InstanceType), nil
}
// InstanceType returns the type of the node with the specified nodeName.
func (c *Cloud) InstanceType(ctx context.Context, nodeName types.NodeName) (string, error) {
if c.selfAWSInstance.nodeName == nodeName {
return c.selfAWSInstance.instanceType, nil
}
inst, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %q", nodeName, err)
}
return aws.StringValue(inst.InstanceType), nil
}
// GetCandidateZonesForDynamicVolume retrieves a list of all the zones in which nodes are running
// It currently involves querying all instances
func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) {
// We don't currently cache this; it is currently used only in volume
// creation which is expected to be a comparatively rare occurrence.
// TODO: Caching / expose v1.Nodes to the cloud provider?
// TODO: We could also query for subnets, I think
// Note: It is more efficient to call the EC2 API twice with different tag
// filters than to call it once with a tag filter that results in a logical
// OR. For really large clusters the logical OR will result in EC2 API rate
// limiting.
instances := []*ec2.Instance{}
baseFilters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")}
filters := c.tagging.addFilters(baseFilters)
di, err := c.describeInstances(filters)
if err != nil {
return nil, err
}
instances = append(instances, di...)
if c.tagging.usesLegacyTags {
filters = c.tagging.addLegacyFilters(baseFilters)
di, err = c.describeInstances(filters)
if err != nil {
return nil, err
}
instances = append(instances, di...)
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances returned")
}
zones := sets.NewString()
for _, instance := range instances {
// We skip over master nodes, if the installation tool labels them with one of the well-known master labels
// This avoids creating a volume in a zone where only the master is running - e.g. #34583
// This is a short-term workaround until the scheduler takes care of zone selection
master := false
for _, tag := range instance.Tags {
tagKey := aws.StringValue(tag.Key)
if awsTagNameMasterRoles.Has(tagKey) {
master = true
}
}
if master {
klog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId))
continue
}
if instance.Placement != nil {
zone := aws.StringValue(instance.Placement.AvailabilityZone)
zones.Insert(zone)
}
}
klog.V(2).Infof("Found instances in zones %s", zones)
return zones, nil
}
// GetZone implements Zones.GetZone
func (c *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
return cloudprovider.Zone{
FailureDomain: c.selfAWSInstance.availabilityZone,
Region: c.region,
}, nil
}
// GetZoneByProviderID implements Zones.GetZoneByProviderID
// This is particularly useful in external cloud providers where the kubelet
// does not initialize node data.
func (c *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
instanceID, err := KubernetesInstanceID(providerID).MapToAWSInstanceID()
if err != nil {
return cloudprovider.Zone{}, err
}
instance, err := c.getInstanceByID(string(instanceID))
if err != nil {
return cloudprovider.Zone{}, err
}
zone := cloudprovider.Zone{
FailureDomain: *(instance.Placement.AvailabilityZone),
Region: c.region,
}
return zone, nil
}
// GetZoneByNodeName implements Zones.GetZoneByNodeName
// This is particularly useful in external cloud providers where the kubelet
// does not initialize node data.
func (c *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
instance, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return cloudprovider.Zone{}, err
}
zone := cloudprovider.Zone{
FailureDomain: *(instance.Placement.AvailabilityZone),
Region: c.region,
}
return zone, nil
}
// Used to represent a mount device for attaching an EBS volume
// This should be stored as a single letter (i.e. c, not sdc or /dev/sdc)
type mountDevice string
type awsInstance struct {
ec2 EC2
// id in AWS
awsID string
// node name in k8s
nodeName types.NodeName
// availability zone the instance resides in
availabilityZone string
// ID of VPC the instance resides in
vpcID string
// ID of subnet the instance resides in
subnetID string
// instance type
instanceType string
}
// newAWSInstance creates a new awsInstance object
func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance {
az := ""
if instance.Placement != nil {
az = aws.StringValue(instance.Placement.AvailabilityZone)
}
self := &awsInstance{
ec2: ec2Service,
awsID: aws.StringValue(instance.InstanceId),
nodeName: mapInstanceToNodeName(instance),
availabilityZone: az,
instanceType: aws.StringValue(instance.InstanceType),
vpcID: aws.StringValue(instance.VpcId),
subnetID: aws.StringValue(instance.SubnetId),
}
return self
}
// Gets the full information about this instance from the EC2 API
func (i *awsInstance) describeInstance() (*ec2.Instance, error) {
return describeInstance(i.ec2, InstanceID(i.awsID))
}
// Gets the mountDevice already assigned to the volume, or assigns an unused mountDevice.
// If the volume is already assigned, this will return the existing mountDevice with alreadyAttached=true.
// Otherwise the mountDevice is assigned by finding the first available mountDevice, and it is returned with alreadyAttached=false.
func (c *Cloud) getMountDevice(
i *awsInstance,
info *ec2.Instance,
volumeID EBSVolumeID,
assign bool) (assigned mountDevice, alreadyAttached bool, err error) {
deviceMappings := map[mountDevice]EBSVolumeID{}
volumeStatus := map[EBSVolumeID]string{} // for better logging of volume status
for _, blockDevice := range info.BlockDeviceMappings {
name := aws.StringValue(blockDevice.DeviceName)
if strings.HasPrefix(name, "/dev/sd") {
name = name[7:]
}
if strings.HasPrefix(name, "/dev/xvd") {
name = name[8:]
}
if len(name) < 1 || len(name) > 2 {
klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName))
}
if blockDevice.Ebs != nil && blockDevice.Ebs.VolumeId != nil {
volumeStatus[EBSVolumeID(*blockDevice.Ebs.VolumeId)] = aws.StringValue(blockDevice.Ebs.Status)
}
deviceMappings[mountDevice(name)] = EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId))
}
// We lock to prevent concurrent mounts from conflicting
// We may still conflict if someone calls the API concurrently,
// but the AWS API will then fail one of the two attach operations
c.attachingMutex.Lock()
defer c.attachingMutex.Unlock()
for mountDevice, volume := range c.attaching[i.nodeName] {
deviceMappings[mountDevice] = volume
}
// Check to see if this volume is already assigned a device on this machine
for mountDevice, mappingVolumeID := range deviceMappings {
if volumeID == mappingVolumeID {
if assign {
// DescribeInstances shows the volume as attached / detaching, while Kubernetes
// cloud provider thinks it's detached.
// This can happened when the volume has just been detached from the same node
// and AWS API returns stale data in this DescribeInstances ("eventual consistency").
// Fail the attachment and let A/D controller retry in a while, hoping that
// AWS API returns consistent result next time (i.e. the volume is detached).
status := volumeStatus[mappingVolumeID]
klog.Warningf("Got assignment call for already-assigned volume: %s@%s, volume status: %s", mountDevice, mappingVolumeID, status)
return mountDevice, false, fmt.Errorf("volume is still being detached from the node")
}
return mountDevice, true, nil
}
}
if !assign {
return mountDevice(""), false, nil
}
// Find the next unused device name
deviceAllocator := c.deviceAllocators[i.nodeName]
if deviceAllocator == nil {
// we want device names with two significant characters, starting with /dev/xvdbb
// the allowed range is /dev/xvd[b-c][a-z]
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
deviceAllocator = NewDeviceAllocator()
c.deviceAllocators[i.nodeName] = deviceAllocator
}
// We need to lock deviceAllocator to prevent possible race with Deprioritize function
deviceAllocator.Lock()
defer deviceAllocator.Unlock()
chosen, err := deviceAllocator.GetNext(deviceMappings)
if err != nil {
klog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err)
return "", false, fmt.Errorf("too many EBS volumes attached to node %s", i.nodeName)
}
attaching := c.attaching[i.nodeName]
if attaching == nil {
attaching = make(map[mountDevice]EBSVolumeID)
c.attaching[i.nodeName] = attaching
}
attaching[chosen] = volumeID
klog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID)
return chosen, false, nil
}
// endAttaching removes the entry from the "attachments in progress" map
// It returns true if it was found (and removed), false otherwise
func (c *Cloud) endAttaching(i *awsInstance, volumeID EBSVolumeID, mountDevice mountDevice) bool {
c.attachingMutex.Lock()
defer c.attachingMutex.Unlock()
existingVolumeID, found := c.attaching[i.nodeName][mountDevice]
if !found {
return false
}
if volumeID != existingVolumeID {
// This actually can happen, because getMountDevice combines the attaching map with the volumes
// attached to the instance (as reported by the EC2 API). So if endAttaching comes after
// a 10 second poll delay, we might well have had a concurrent request to allocate a mountpoint,
// which because we allocate sequentially is _very_ likely to get the immediately freed volume
klog.Infof("endAttaching on device %q assigned to different volume: %q vs %q", mountDevice, volumeID, existingVolumeID)
return false
}
klog.V(2).Infof("Releasing in-process attachment entry: %s -> volume %s", mountDevice, volumeID)
delete(c.attaching[i.nodeName], mountDevice)
return true
}
type awsDisk struct {
ec2 EC2
// Name in k8s
name KubernetesVolumeID
// id in AWS
awsID EBSVolumeID
}
func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) {
awsID, err := name.MapToAWSVolumeID()
if err != nil {
return nil, err
}
disk := &awsDisk{ec2: aws.ec2, name: name, awsID: awsID}
return disk, nil
}
// Helper function for describeVolume callers. Tries to retype given error to AWS error
// and returns true in case the AWS error is "InvalidVolume.NotFound", false otherwise
func isAWSErrorVolumeNotFound(err error) bool {
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
if awsError.Code() == "InvalidVolume.NotFound" {
return true
}
}
}
return false
}
// Gets the full information about this volume from the EC2 API
func (d *awsDisk) describeVolume() (*ec2.Volume, error) {
volumeID := d.awsID
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{volumeID.awsString()},
}
volumes, err := d.ec2.DescribeVolumes(request)
if err != nil {
return nil, err
}
if len(volumes) == 0 {
return nil, fmt.Errorf("no volumes found")
}
if len(volumes) > 1 {
return nil, fmt.Errorf("multiple volumes found")
}
return volumes[0], nil
}
func (d *awsDisk) describeVolumeModification() (*ec2.VolumeModification, error) {
volumeID := d.awsID
request := &ec2.DescribeVolumesModificationsInput{
VolumeIds: []*string{volumeID.awsString()},
}
volumeMods, err := d.ec2.DescribeVolumeModifications(request)
if err != nil {
return nil, fmt.Errorf("error describing volume modification %s with %v", volumeID, err)
}
if len(volumeMods) == 0 {
return nil, fmt.Errorf("no volume modifications found for %s", volumeID)
}
lastIndex := len(volumeMods) - 1
return volumeMods[lastIndex], nil
}
func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) {
volumeID := d.awsID
request := &ec2.ModifyVolumeInput{
VolumeId: volumeID.awsString(),
Size: aws.Int64(requestGiB),
}
output, err := d.ec2.ModifyVolume(request)
if err != nil {
modifyError := fmt.Errorf("AWS modifyVolume failed for %s with %v", volumeID, err)
return requestGiB, modifyError
}
volumeModification := output.VolumeModification
if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateCompleted {
return aws.Int64Value(volumeModification.TargetSize), nil
}
backoff := wait.Backoff{
Duration: 1 * time.Second,
Factor: 2,
Steps: 10,
}
checkForResize := func() (bool, error) {
volumeModification, err := d.describeVolumeModification()
if err != nil {
return false, err
}
// According to https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring_mods.html
// Size changes usually take a few seconds to complete and take effect after a volume is in the Optimizing state.
if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateOptimizing {
return true, nil
}
return false, nil
}
waitWithErr := wait.ExponentialBackoff(backoff, checkForResize)
return requestGiB, waitWithErr
}
// applyUnSchedulableTaint applies a unschedulable taint to a node after verifying
// if node has become unusable because of volumes getting stuck in attaching state.
func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) {
node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{})
if fetchErr != nil {
klog.Errorf("Error fetching node %s with %v", nodeName, fetchErr)
return
}
taint := &v1.Taint{
Key: nodeWithImpairedVolumes,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
}
err := nodehelpers.AddOrUpdateTaintOnNode(c.kubeClient, string(nodeName), taint)
if err != nil {
klog.Errorf("Error applying taint to node %s with error %v", nodeName, err)
return
}
c.eventRecorder.Eventf(node, v1.EventTypeWarning, volumeAttachmentStuck, reason)
}
// waitForAttachmentStatus polls until the attachment status is the expected value
// On success, it returns the last attachment state.
func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, error) {
backoff := wait.Backoff{
Duration: volumeAttachmentStatusInitialDelay,
Factor: volumeAttachmentStatusFactor,
Steps: volumeAttachmentStatusSteps,
}
// Because of rate limiting, we often see errors from describeVolume
// So we tolerate a limited number of failures.
// But once we see more than 10 errors in a row, we return the error
describeErrorCount := 0
var attachment *ec2.VolumeAttachment
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
info, err := d.describeVolume()
if err != nil {
// The VolumeNotFound error is special -- we don't need to wait for it to repeat
if isAWSErrorVolumeNotFound(err) {
if status == "detached" {
// The disk doesn't exist, assume it's detached, log warning and stop waiting
klog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID)
stateStr := "detached"
attachment = &ec2.VolumeAttachment{
State: &stateStr,
}
return true, nil
}
if status == "attached" {
// The disk doesn't exist, complain, give up waiting and report error
klog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID)
return false, err
}
}
describeErrorCount++
if describeErrorCount > volumeAttachmentStatusConsecutiveErrorLimit {
// report the error
return false, err
}
klog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err)
return false, nil
}
describeErrorCount = 0
if len(info.Attachments) > 1 {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info)
}
attachmentStatus := ""
for _, a := range info.Attachments {
if attachmentStatus != "" {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info)
}
if a.State != nil {
attachment = a
attachmentStatus = *a.State
} else {
// Shouldn't happen; log so we know if it is
klog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a)
}
}
if attachmentStatus == "" {
attachmentStatus = "detached"
}
if attachmentStatus == status {
// Attachment is in requested state, finish waiting
return true, nil
}
// continue waiting
klog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status)
return false, nil
})
return attachment, err
}
// Deletes the EBS disk
func (d *awsDisk) deleteVolume() (bool, error) {
request := &ec2.DeleteVolumeInput{VolumeId: d.awsID.awsString()}
_, err := d.ec2.DeleteVolume(request)
if err != nil {
if isAWSErrorVolumeNotFound(err) {
return false, nil
}
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "VolumeInUse" {
return false, volerr.NewDeletedVolumeInUseError(err.Error())
}
}
return false, fmt.Errorf("error deleting EBS volume %q: %q", d.awsID, err)
}
return true, nil
}
// Builds the awsInstance for the EC2 instance on which we are running.
// This is called when the AWSCloud is initialized, and should not be called otherwise (because the awsInstance for the local instance is a singleton with drive mapping state)
func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) {
if c.selfAWSInstance != nil {
panic("do not call buildSelfAWSInstance directly")
}
instanceID, err := c.metadata.GetMetadata("instance-id")
if err != nil {
return nil, fmt.Errorf("error fetching instance-id from ec2 metadata service: %q", err)
}
// We want to fetch the hostname via the EC2 metadata service
// (`GetMetadata("local-hostname")`): But see #11543 - we need to use
// the EC2 API to get the privateDnsName in case of a private DNS zone
// e.g. mydomain.io, because the metadata service returns the wrong
// hostname. Once we're doing that, we might as well get all our
// information from the instance returned by the EC2 API - it is a
// single API call to get all the information, and it means we don't
// have two code paths.
instance, err := c.getInstanceByID(instanceID)
if err != nil {
return nil, fmt.Errorf("error finding instance %s: %q", instanceID, err)
}
return newAWSInstance(c.ec2, instance), nil
}
// wrapAttachError wraps the error returned by an AttachVolume request with
// additional information, if needed and possible.
func wrapAttachError(err error, disk *awsDisk, instance string) error {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "VolumeInUse" {
info, err := disk.describeVolume()
if err != nil {
klog.Errorf("Error describing volume %q: %q", disk.awsID, err)
} else {
for _, a := range info.Attachments {
if disk.awsID != EBSVolumeID(aws.StringValue(a.VolumeId)) {
klog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId))
} else if aws.StringValue(a.State) == "attached" {
return fmt.Errorf("error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId))
}
}
}
}
}
return fmt.Errorf("error attaching EBS volume %q to instance %q: %q", disk.awsID, instance, err)
}
// AttachDisk implements Volumes.AttachDisk
func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
disk, err := newAWSDisk(c, diskName)
if err != nil {
return "", err
}
awsInstance, info, err := c.getFullInstance(nodeName)
if err != nil {
return "", fmt.Errorf("error finding instance %s: %q", nodeName, err)
}
// mountDevice will hold the device where we should try to attach the disk
var mountDevice mountDevice
// alreadyAttached is true if we have already called AttachVolume on this disk
var alreadyAttached bool
// attachEnded is set to true if the attach operation completed
// (successfully or not), and is thus no longer in progress
attachEnded := false
defer func() {
if attachEnded {
if !c.endAttaching(awsInstance, disk.awsID, mountDevice) {
klog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID)
}
}
}()
mountDevice, alreadyAttached, err = c.getMountDevice(awsInstance, info, disk.awsID, true)
if err != nil {
return "", err
}
// Inside the instance, the mountpoint always looks like /dev/xvdX (?)
hostDevice := "/dev/xvd" + string(mountDevice)
// We are using xvd names (so we are HVM only)
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
ec2Device := "/dev/xvd" + string(mountDevice)
if !alreadyAttached {
available, err := c.checkIfAvailable(disk, "attaching", awsInstance.awsID)
if err != nil {
klog.Error(err)
}
if !available {
attachEnded = true
return "", err
}
request := &ec2.AttachVolumeInput{
Device: aws.String(ec2Device),
InstanceId: aws.String(awsInstance.awsID),
VolumeId: disk.awsID.awsString(),
}
attachResponse, err := c.ec2.AttachVolume(request)
if err != nil {
attachEnded = true
// TODO: Check if the volume was concurrently attached?
return "", wrapAttachError(err, disk, awsInstance.awsID)
}
if da, ok := c.deviceAllocators[awsInstance.nodeName]; ok {
da.Deprioritize(mountDevice)
}
klog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse)
}
attachment, err := disk.waitForAttachmentStatus("attached")
if err != nil {
if err == wait.ErrWaitTimeout {
c.applyUnSchedulableTaint(nodeName, "Volume stuck in attaching state - node needs reboot to fix impaired state.")
}
return "", err
}
// The attach operation has finished
attachEnded = true
// Double check the attachment to be 100% sure we attached the correct volume at the correct mountpoint
// It could happen otherwise that we see the volume attached from a previous/separate AttachVolume call,
// which could theoretically be against a different device (or even instance).
if attachment == nil {
// Impossible?
return "", fmt.Errorf("unexpected state: attachment nil after attached %q to %q", diskName, nodeName)
}
if ec2Device != aws.StringValue(attachment.Device) {
return "", fmt.Errorf("disk attachment of %q to %q failed: requested device %q but found %q", diskName, nodeName, ec2Device, aws.StringValue(attachment.Device))
}
if awsInstance.awsID != aws.StringValue(attachment.InstanceId) {
return "", fmt.Errorf("disk attachment of %q to %q failed: requested instance %q but found %q", diskName, nodeName, awsInstance.awsID, aws.StringValue(attachment.InstanceId))
}
return hostDevice, nil
}
// DetachDisk implements Volumes.DetachDisk
func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
if err != nil {
if isAWSErrorVolumeNotFound(err) {
// Someone deleted the volume being detached; complain, but do nothing else and return success
klog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName)
return "", nil
}
return "", err
}
if !attached && diskInfo.ec2Instance != nil {
klog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName)
return "", nil
}
if !attached {
return "", nil
}
awsInstance := newAWSInstance(c.ec2, diskInfo.ec2Instance)
mountDevice, alreadyAttached, err := c.getMountDevice(awsInstance, diskInfo.ec2Instance, diskInfo.disk.awsID, false)
if err != nil {
return "", err
}
if !alreadyAttached {
klog.Warningf("DetachDisk called on non-attached disk: %s", diskName)
// TODO: Continue? Tolerate non-attached error from the AWS DetachVolume call?
}
request := ec2.DetachVolumeInput{
InstanceId: &awsInstance.awsID,
VolumeId: diskInfo.disk.awsID.awsString(),
}
response, err := c.ec2.DetachVolume(&request)
if err != nil {
return "", fmt.Errorf("error detaching EBS volume %q from %q: %q", diskInfo.disk.awsID, awsInstance.awsID, err)
}
if response == nil {
return "", errors.New("no response from DetachVolume")
}
attachment, err := diskInfo.disk.waitForAttachmentStatus("detached")
if err != nil {
return "", err
}
if da, ok := c.deviceAllocators[awsInstance.nodeName]; ok {
da.Deprioritize(mountDevice)
}
if attachment != nil {
// We expect it to be nil, it is (maybe) interesting if it is not
klog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment)
}
if mountDevice != "" {
c.endAttaching(awsInstance, diskInfo.disk.awsID, mountDevice)
// We don't check the return value - we don't really expect the attachment to have been
// in progress, though it might have been
}
hostDevicePath := "/dev/xvd" + string(mountDevice)
return hostDevicePath, err
}
// CreateDisk implements Volumes.CreateDisk
func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, error) {
var createType string
var iops int64
switch volumeOptions.VolumeType {
case VolumeTypeGP2, VolumeTypeSC1, VolumeTypeST1:
createType = volumeOptions.VolumeType
case VolumeTypeIO1:
// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html
// for IOPS constraints. AWS will throw an error if IOPS per GB gets out
// of supported bounds, no need to check it here.
createType = volumeOptions.VolumeType
iops = int64(volumeOptions.CapacityGB * volumeOptions.IOPSPerGB)
// Cap at min/max total IOPS, AWS would throw an error if it gets too
// low/high.
if iops < MinTotalIOPS {
iops = MinTotalIOPS
}
if iops > MaxTotalIOPS {
iops = MaxTotalIOPS
}
case "":
createType = DefaultVolumeType
default:
return "", fmt.Errorf("invalid AWS VolumeType %q", volumeOptions.VolumeType)
}
request := &ec2.CreateVolumeInput{}
request.AvailabilityZone = aws.String(volumeOptions.AvailabilityZone)
request.Size = aws.Int64(int64(volumeOptions.CapacityGB))
request.VolumeType = aws.String(createType)
request.Encrypted = aws.Bool(volumeOptions.Encrypted)
if len(volumeOptions.KmsKeyID) > 0 {
request.KmsKeyId = aws.String(volumeOptions.KmsKeyID)
request.Encrypted = aws.Bool(true)
}
if iops > 0 {
request.Iops = aws.Int64(iops)
}
tags := volumeOptions.Tags
tags = c.tagging.buildTags(ResourceLifecycleOwned, tags)
var tagList []*ec2.Tag
for k, v := range tags {
tagList = append(tagList, &ec2.Tag{
Key: aws.String(k), Value: aws.String(v),
})
}
request.TagSpecifications = append(request.TagSpecifications, &ec2.TagSpecification{
Tags: tagList,
ResourceType: aws.String(ec2.ResourceTypeVolume),
})
response, err := c.ec2.CreateVolume(request)
if err != nil {
return "", err
}
awsID := EBSVolumeID(aws.StringValue(response.VolumeId))
if awsID == "" {
return "", fmt.Errorf("VolumeID was not returned by CreateVolume")
}
volumeName := KubernetesVolumeID("aws://" + aws.StringValue(response.AvailabilityZone) + "/" + string(awsID))
err = c.waitUntilVolumeAvailable(volumeName)
if err != nil {
// AWS has a bad habbit of reporting success when creating a volume with
// encryption keys that either don't exists or have wrong permissions.
// Such volume lives for couple of seconds and then it's silently deleted
// by AWS. There is no other check to ensure that given KMS key is correct,
// because Kubernetes may have limited permissions to the key.
if isAWSErrorVolumeNotFound(err) {
err = fmt.Errorf("failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key")
}
return "", err
}
return volumeName, nil
}
func (c *Cloud) waitUntilVolumeAvailable(volumeName KubernetesVolumeID) error {
disk, err := newAWSDisk(c, volumeName)
if err != nil {
// Unreachable code
return err
}
time.Sleep(5 * time.Second)
backoff := wait.Backoff{
Duration: volumeCreateInitialDelay,
Factor: volumeCreateBackoffFactor,
Steps: volumeCreateBackoffSteps,
}
err = wait.ExponentialBackoff(backoff, func() (done bool, err error) {
vol, err := disk.describeVolume()
if err != nil {
return true, err
}
if vol.State != nil {
switch *vol.State {
case "available":
// The volume is Available, it won't be deleted now.
return true, nil
case "creating":
return false, nil
default:
return true, fmt.Errorf("unexpected State of newly created AWS EBS volume %s: %q", volumeName, *vol.State)
}
}
return false, nil
})
return err
}
// DeleteDisk implements Volumes.DeleteDisk
func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return false, err
}
available, err := c.checkIfAvailable(awsDisk, "deleting", "")
if err != nil {
if isAWSErrorVolumeNotFound(err) {
klog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID)
return false, nil
}
klog.Error(err)
}
if !available {
return false, err
}
return awsDisk.deleteVolume()
}
func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) (bool, error) {
info, err := disk.describeVolume()
if err != nil {
klog.Errorf("Error describing volume %q: %q", disk.awsID, err)
// if for some reason we can not describe volume we will return error
return false, err
}
volumeState := aws.StringValue(info.State)
opError := fmt.Sprintf("Error %s EBS volume %q", opName, disk.awsID)
if len(instance) != 0 {
opError = fmt.Sprintf("%q to instance %q", opError, instance)
}
// Only available volumes can be attached or deleted
if volumeState != "available" {
// Volume is attached somewhere else and we can not attach it here
if len(info.Attachments) > 0 {
attachment := info.Attachments[0]
instanceID := aws.StringValue(attachment.InstanceId)
attachedInstance, ierr := c.getInstanceByID(instanceID)
attachErr := fmt.Sprintf("%s since volume is currently attached to %q", opError, instanceID)
if ierr != nil {
klog.Error(attachErr)
return false, errors.New(attachErr)
}
devicePath := aws.StringValue(attachment.Device)
nodeName := mapInstanceToNodeName(attachedInstance)
danglingErr := volerr.NewDanglingError(attachErr, nodeName, devicePath)
return false, danglingErr
}
attachErr := fmt.Errorf("%s since volume is in %q state", opError, volumeState)
return false, attachErr
}
return true, nil
}
// GetLabelsForVolume gets the volume labels for a volume
func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
// Ignore if not AWSElasticBlockStore.
if pv.Spec.AWSElasticBlockStore == nil {
return nil, nil
}
// Ignore any volumes that are being provisioned
if pv.Spec.AWSElasticBlockStore.VolumeID == cloudvolume.ProvisionedVolumeName {
return nil, nil
}
spec := KubernetesVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
labels, err := c.GetVolumeLabels(spec)
if err != nil {
return nil, err
}
return labels, nil
}
// GetVolumeLabels implements Volumes.GetVolumeLabels
func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]string, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return nil, err
}
info, err := awsDisk.describeVolume()
if err != nil {
return nil, err
}
labels := make(map[string]string)
az := aws.StringValue(info.AvailabilityZone)
if az == "" {
return nil, fmt.Errorf("volume did not have AZ information: %q", aws.StringValue(info.VolumeId))
}
labels[v1.LabelZoneFailureDomain] = az
region, err := azToRegion(az)
if err != nil {
return nil, err
}
labels[v1.LabelZoneRegion] = region
return labels, nil
}
// GetDiskPath implements Volumes.GetDiskPath
func (c *Cloud) GetDiskPath(volumeName KubernetesVolumeID) (string, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return "", err
}
info, err := awsDisk.describeVolume()
if err != nil {
return "", err
}
if len(info.Attachments) == 0 {
return "", fmt.Errorf("No attachment to volume %s", volumeName)
}
return aws.StringValue(info.Attachments[0].Device), nil
}
// DiskIsAttached implements Volumes.DiskIsAttached
func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
_, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
if err != nil {
if isAWSErrorVolumeNotFound(err) {
// The disk doesn't exist, can't be attached
klog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName)
return false, nil
}
return true, err
}
return attached, nil
}
// DisksAreAttached returns a map of nodes and Kubernetes volume IDs indicating
// if the volumes are attached to the node
func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error) {
attached := make(map[types.NodeName]map[KubernetesVolumeID]bool)
if len(nodeDisks) == 0 {
return attached, nil
}
nodeNames := []string{}
for nodeName, diskNames := range nodeDisks {
for _, diskName := range diskNames {
setNodeDisk(attached, diskName, nodeName, false)
}
nodeNames = append(nodeNames, mapNodeNameToPrivateDNSName(nodeName))
}
// Note that we get instances regardless of state.
// This means there might be multiple nodes with the same node names.
awsInstances, err := c.getInstancesByNodeNames(nodeNames)
if err != nil {
// When there is an error fetching instance information
// it is safer to return nil and let volume information not be touched.
return nil, err
}
if len(awsInstances) == 0 {
klog.V(2).Infof("DisksAreAttached found no instances matching node names; will assume disks not attached")
return attached, nil
}
// Note that we check that the volume is attached to the correct node, not that it is attached to _a_ node
for _, awsInstance := range awsInstances {
nodeName := mapInstanceToNodeName(awsInstance)
diskNames := nodeDisks[nodeName]
if len(diskNames) == 0 {
continue
}
awsInstanceState := "<nil>"
if awsInstance != nil && awsInstance.State != nil {
awsInstanceState = aws.StringValue(awsInstance.State.Name)
}
if awsInstanceState == "terminated" {
// Instance is terminated, safe to assume volumes not attached
// Note that we keep volumes attached to instances in other states (most notably, stopped)
continue
}
idToDiskName := make(map[EBSVolumeID]KubernetesVolumeID)
for _, diskName := range diskNames {
volumeID, err := diskName.MapToAWSVolumeID()
if err != nil {
return nil, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err)
}
idToDiskName[volumeID] = diskName
}
for _, blockDevice := range awsInstance.BlockDeviceMappings {
volumeID := EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId))
diskName, found := idToDiskName[volumeID]
if found {
// Disk is still attached to node
setNodeDisk(attached, diskName, nodeName, true)
}
}
}
return attached, nil
}
// ResizeDisk resizes an EBS volume in GiB increments, it will round up to the
// next GiB if arguments are not provided in even GiB increments
func (c *Cloud) ResizeDisk(
diskName KubernetesVolumeID,
oldSize resource.Quantity,
newSize resource.Quantity) (resource.Quantity, error) {
awsDisk, err := newAWSDisk(c, diskName)
if err != nil {
return oldSize, err
}
volumeInfo, err := awsDisk.describeVolume()
if err != nil {
descErr := fmt.Errorf("AWS.ResizeDisk Error describing volume %s with %v", diskName, err)
return oldSize, descErr
}
// AWS resizes in chunks of GiB (not GB)
requestGiB := volumehelpers.RoundUpToGiB(newSize)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
// If disk already if of greater or equal size than requested we return
if aws.Int64Value(volumeInfo.Size) >= requestGiB {
return newSizeQuant, nil
}
_, err = awsDisk.modifyVolume(requestGiB)
if err != nil {
return oldSize, err
}
return newSizeQuant, nil
}
// Gets the current load balancer state
func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) {
request := &elb.DescribeLoadBalancersInput{}
request.LoadBalancerNames = []*string{&name}
response, err := c.elb.DescribeLoadBalancers(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "LoadBalancerNotFound" {
return nil, nil
}
}
return nil, err
}
var ret *elb.LoadBalancerDescription
for _, loadBalancer := range response.LoadBalancerDescriptions {
if ret != nil {
klog.Errorf("Found multiple load balancers with name: %s", name)
}
ret = loadBalancer
}
return ret, nil
}
func (c *Cloud) addLoadBalancerTags(loadBalancerName string, requested map[string]string) error {
var tags []*elb.Tag
for k, v := range requested {
tag := &elb.Tag{
Key: aws.String(k),
Value: aws.String(v),
}
tags = append(tags, tag)
}
request := &elb.AddTagsInput{}
request.LoadBalancerNames = []*string{&loadBalancerName}
request.Tags = tags
_, err := c.elb.AddTags(request)
if err != nil {
return fmt.Errorf("error adding tags to load balancer: %v", err)
}
return nil
}
// Gets the current load balancer state
func (c *Cloud) describeLoadBalancerv2(name string) (*elbv2.LoadBalancer, error) {
request := &elbv2.DescribeLoadBalancersInput{
Names: []*string{aws.String(name)},
}
response, err := c.elbv2.DescribeLoadBalancers(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == elbv2.ErrCodeLoadBalancerNotFoundException {
return nil, nil
}
}
return nil, fmt.Errorf("error describing load balancer: %q", err)
}
// AWS will not return 2 load balancers with the same name _and_ type.
for i := range response.LoadBalancers {
if aws.StringValue(response.LoadBalancers[i].Type) == elbv2.LoadBalancerTypeEnumNetwork {
return response.LoadBalancers[i], nil
}
}
return nil, fmt.Errorf("NLB '%s' could not be found", name)
}
// Retrieves instance's vpc id from metadata
func (c *Cloud) findVPCID() (string, error) {
macs, err := c.metadata.GetMetadata("network/interfaces/macs/")
if err != nil {
return "", fmt.Errorf("could not list interfaces of the instance: %q", err)
}
// loop over interfaces, first vpc id returned wins
for _, macPath := range strings.Split(macs, "\n") {
if len(macPath) == 0 {
continue
}
url := fmt.Sprintf("network/interfaces/macs/%svpc-id", macPath)
vpcID, err := c.metadata.GetMetadata(url)
if err != nil {
continue
}
return vpcID, nil
}
return "", fmt.Errorf("could not find VPC ID in instance metadata")
}
// Retrieves the specified security group from the AWS API, or returns nil if not found
func (c *Cloud) findSecurityGroup(securityGroupID string) (*ec2.SecurityGroup, error) {
describeSecurityGroupsRequest := &ec2.DescribeSecurityGroupsInput{
GroupIds: []*string{&securityGroupID},
}
// We don't apply our tag filters because we are retrieving by ID
groups, err := c.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest)
if err != nil {
klog.Warningf("Error retrieving security group: %q", err)
return nil, err
}
if len(groups) == 0 {
return nil, nil
}
if len(groups) != 1 {
// This should not be possible - ids should be unique
return nil, fmt.Errorf("multiple security groups found with same id %q", securityGroupID)
}
group := groups[0]
return group, nil
}
func isEqualIntPointer(l, r *int64) bool {
if l == nil {
return r == nil
}
if r == nil {
return l == nil
}
return *l == *r
}
func isEqualStringPointer(l, r *string) bool {
if l == nil {
return r == nil
}
if r == nil {
return l == nil
}
return *l == *r
}
func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupUserIDs bool) bool {
if !isEqualIntPointer(newPermission.FromPort, existing.FromPort) {
return false
}
if !isEqualIntPointer(newPermission.ToPort, existing.ToPort) {
return false
}
if !isEqualStringPointer(newPermission.IpProtocol, existing.IpProtocol) {
return false
}
// Check only if newPermission is a subset of existing. Usually it has zero or one elements.
// Not doing actual CIDR math yet; not clear it's needed, either.
klog.V(4).Infof("Comparing %v to %v", newPermission, existing)
if len(newPermission.IpRanges) > len(existing.IpRanges) {
return false
}
for j := range newPermission.IpRanges {
found := false
for k := range existing.IpRanges {
if isEqualStringPointer(newPermission.IpRanges[j].CidrIp, existing.IpRanges[k].CidrIp) {
found = true
break
}
}
if !found {
return false
}
}
for _, leftPair := range newPermission.UserIdGroupPairs {
found := false
for _, rightPair := range existing.UserIdGroupPairs {
if isEqualUserGroupPair(leftPair, rightPair, compareGroupUserIDs) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) bool {
klog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId)
if isEqualStringPointer(l.GroupId, r.GroupId) {
if compareGroupUserIDs {
if isEqualStringPointer(l.UserId, r.UserId) {
return true
}
} else {
return true
}
}
return false
}
// Makes sure the security group ingress is exactly the specified permissions
// Returns true if and only if changes were made
// The security group must already exist
func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPermissionSet) (bool, error) {
// We do not want to make changes to the Global defined SG
if securityGroupID == c.cfg.Global.ElbSecurityGroup {
return false, nil
}
group, err := c.findSecurityGroup(securityGroupID)
if err != nil {
klog.Warningf("Error retrieving security group %q", err)
return false, err
}
if group == nil {
return false, fmt.Errorf("security group not found: %s", securityGroupID)
}
klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions)
actual := NewIPPermissionSet(group.IpPermissions...)
// EC2 groups rules together, for example combining:
//
// { Port=80, Range=[A] } and { Port=80, Range=[B] }
//
// into { Port=80, Range=[A,B] }
//
// We have to ungroup them, because otherwise the logic becomes really
// complicated, and also because if we have Range=[A,B] and we try to
// add Range=[A] then EC2 complains about a duplicate rule.
permissions = permissions.Ungroup()
actual = actual.Ungroup()
remove := actual.Difference(permissions)
add := permissions.Difference(actual)
if add.Len() == 0 && remove.Len() == 0 {
return false, nil
}
// TODO: There is a limit in VPC of 100 rules per security group, so we
// probably should try grouping or combining to fit under this limit.
// But this is only used on the ELB security group currently, so it
// would require (ports * CIDRS) > 100. Also, it isn't obvious exactly
// how removing single permissions from compound rules works, and we
// don't want to accidentally open more than intended while we're
// applying changes.
if add.Len() != 0 {
klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List())
request := &ec2.AuthorizeSecurityGroupIngressInput{}
request.GroupId = &securityGroupID
request.IpPermissions = add.List()
_, err = c.ec2.AuthorizeSecurityGroupIngress(request)
if err != nil {
return false, fmt.Errorf("error authorizing security group ingress: %q", err)
}
}
if remove.Len() != 0 {
klog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List())
request := &ec2.RevokeSecurityGroupIngressInput{}
request.GroupId = &securityGroupID
request.IpPermissions = remove.List()
_, err = c.ec2.RevokeSecurityGroupIngress(request)
if err != nil {
return false, fmt.Errorf("error revoking security group ingress: %q", err)
}
}
return true, nil
}
// Makes sure the security group includes the specified permissions
// Returns true if and only if changes were made
// The security group must already exist
func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions []*ec2.IpPermission) (bool, error) {
// We do not want to make changes to the Global defined SG
if securityGroupID == c.cfg.Global.ElbSecurityGroup {
return false, nil
}
group, err := c.findSecurityGroup(securityGroupID)
if err != nil {
klog.Warningf("Error retrieving security group: %q", err)
return false, err
}
if group == nil {
return false, fmt.Errorf("security group not found: %s", securityGroupID)
}
klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions)
changes := []*ec2.IpPermission{}
for _, addPermission := range addPermissions {
hasUserID := false
for i := range addPermission.UserIdGroupPairs {
if addPermission.UserIdGroupPairs[i].UserId != nil {
hasUserID = true
}
}
found := false
for _, groupPermission := range group.IpPermissions {
if ipPermissionExists(addPermission, groupPermission, hasUserID) {
found = true
break
}
}
if !found {
changes = append(changes, addPermission)
}
}
if len(changes) == 0 {
return false, nil
}
klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes)
request := &ec2.AuthorizeSecurityGroupIngressInput{}
request.GroupId = &securityGroupID
request.IpPermissions = changes
_, err = c.ec2.AuthorizeSecurityGroupIngress(request)
if err != nil {
klog.Warningf("Error authorizing security group ingress %q", err)
return false, fmt.Errorf("error authorizing security group ingress: %q", err)
}
return true, nil
}
// Makes sure the security group no longer includes the specified permissions
// Returns true if and only if changes were made
// If the security group no longer exists, will return (false, nil)
func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermissions []*ec2.IpPermission) (bool, error) {
// We do not want to make changes to the Global defined SG
if securityGroupID == c.cfg.Global.ElbSecurityGroup {
return false, nil
}
group, err := c.findSecurityGroup(securityGroupID)
if err != nil {
klog.Warningf("Error retrieving security group: %q", err)
return false, err
}
if group == nil {
klog.Warning("Security group not found: ", securityGroupID)
return false, nil
}
changes := []*ec2.IpPermission{}
for _, removePermission := range removePermissions {
hasUserID := false
for i := range removePermission.UserIdGroupPairs {
if removePermission.UserIdGroupPairs[i].UserId != nil {
hasUserID = true
}
}
var found *ec2.IpPermission
for _, groupPermission := range group.IpPermissions {
if ipPermissionExists(removePermission, groupPermission, hasUserID) {
found = removePermission
break
}
}
if found != nil {
changes = append(changes, found)
}
}
if len(changes) == 0 {
return false, nil
}
klog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes)
request := &ec2.RevokeSecurityGroupIngressInput{}
request.GroupId = &securityGroupID
request.IpPermissions = changes
_, err = c.ec2.RevokeSecurityGroupIngress(request)
if err != nil {
klog.Warningf("Error revoking security group ingress: %q", err)
return false, err
}
return true, nil
}
// Makes sure the security group exists.
// For multi-cluster isolation, name must be globally unique, for example derived from the service UUID.
// Additional tags can be specified
// Returns the security group id or error
func (c *Cloud) ensureSecurityGroup(name string, description string, additionalTags map[string]string) (string, error) {
groupID := ""
attempt := 0
for {
attempt++
// Note that we do _not_ add our tag filters; group-name + vpc-id is the EC2 primary key.
// However, we do check that it matches our tags.
// If it doesn't have any tags, we tag it; this is how we recover if we failed to tag before.
// If it has a different cluster's tags, that is an error.
// This shouldn't happen because name is expected to be globally unique (UUID derived)
request := &ec2.DescribeSecurityGroupsInput{}
request.Filters = []*ec2.Filter{
newEc2Filter("group-name", name),
newEc2Filter("vpc-id", c.vpcID),
}
securityGroups, err := c.ec2.DescribeSecurityGroups(request)
if err != nil {
return "", err
}
if len(securityGroups) >= 1 {
if len(securityGroups) > 1 {
klog.Warningf("Found multiple security groups with name: %q", name)
}
err := c.tagging.readRepairClusterTags(
c.ec2, aws.StringValue(securityGroups[0].GroupId),
ResourceLifecycleOwned, nil, securityGroups[0].Tags)
if err != nil {
return "", err
}
return aws.StringValue(securityGroups[0].GroupId), nil
}
createRequest := &ec2.CreateSecurityGroupInput{}
createRequest.VpcId = &c.vpcID
createRequest.GroupName = &name
createRequest.Description = &description
createResponse, err := c.ec2.CreateSecurityGroup(createRequest)
if err != nil {
ignore := false
switch err := err.(type) {
case awserr.Error:
if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries {
klog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry")
ignore = true
}
}
if !ignore {
klog.Errorf("Error creating security group: %q", err)
return "", err
}
time.Sleep(1 * time.Second)
} else {
groupID = aws.StringValue(createResponse.GroupId)
break
}
}
if groupID == "" {
return "", fmt.Errorf("created security group, but id was not returned: %s", name)
}
err := c.tagging.createTags(c.ec2, groupID, ResourceLifecycleOwned, additionalTags)
if err != nil {
// If we retry, ensureClusterTags will recover from this - it
// will add the missing tags. We could delete the security
// group here, but that doesn't feel like the right thing, as
// the caller is likely to retry the create
return "", fmt.Errorf("error tagging security group: %q", err)
}
return groupID, nil
}
// Finds the value for a given tag.
func findTag(tags []*ec2.Tag, key string) (string, bool) {
for _, tag := range tags {
if aws.StringValue(tag.Key) == key {
return aws.StringValue(tag.Value), true
}
}
return "", false
}
// Finds the subnets associated with the cluster, by matching tags.
// For maximal backwards compatibility, if no subnets are tagged, it will fall-back to the current subnet.
// However, in future this will likely be treated as an error.
func (c *Cloud) findSubnets() ([]*ec2.Subnet, error) {
request := &ec2.DescribeSubnetsInput{}
request.Filters = []*ec2.Filter{newEc2Filter("vpc-id", c.vpcID)}
subnets, err := c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error describing subnets: %q", err)
}
var matches []*ec2.Subnet
for _, subnet := range subnets {
if c.tagging.hasClusterTag(subnet.Tags) {
matches = append(matches, subnet)
}
}
if len(matches) != 0 {
return matches, nil
}
// Fall back to the current instance subnets, if nothing is tagged
klog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.")
request = &ec2.DescribeSubnetsInput{}
request.Filters = []*ec2.Filter{newEc2Filter("subnet-id", c.selfAWSInstance.subnetID)}
subnets, err = c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error describing subnets: %q", err)
}
return subnets, nil
}
// Finds the subnets to use for an ELB we are creating.
// Normal (Internet-facing) ELBs must use public subnets, so we skip private subnets.
// Internal ELBs can use public or private subnets, but if we have a private subnet we should prefer that.
func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) {
vpcIDFilter := newEc2Filter("vpc-id", c.vpcID)
subnets, err := c.findSubnets()
if err != nil {
return nil, err
}
rRequest := &ec2.DescribeRouteTablesInput{}
rRequest.Filters = []*ec2.Filter{vpcIDFilter}
rt, err := c.ec2.DescribeRouteTables(rRequest)
if err != nil {
return nil, fmt.Errorf("error describe route table: %q", err)
}
subnetsByAZ := make(map[string]*ec2.Subnet)
for _, subnet := range subnets {
az := aws.StringValue(subnet.AvailabilityZone)
id := aws.StringValue(subnet.SubnetId)
if az == "" || id == "" {
klog.Warningf("Ignoring subnet with empty az/id: %v", subnet)
continue
}
isPublic, err := isSubnetPublic(rt, id)
if err != nil {
return nil, err
}
if !internalELB && !isPublic {
klog.V(2).Infof("Ignoring private subnet for public ELB %q", id)
continue
}
existing := subnetsByAZ[az]
if existing == nil {
subnetsByAZ[az] = subnet
continue
}
// Try to break the tie using a tag
var tagName string
if internalELB {
tagName = TagNameSubnetInternalELB
} else {
tagName = TagNameSubnetPublicELB
}
_, existingHasTag := findTag(existing.Tags, tagName)
_, subnetHasTag := findTag(subnet.Tags, tagName)
if existingHasTag != subnetHasTag {
if subnetHasTag {
subnetsByAZ[az] = subnet
}
continue
}
// If we have two subnets for the same AZ we arbitrarily choose the one that is first lexicographically.
// TODO: Should this be an error.
if strings.Compare(*existing.SubnetId, *subnet.SubnetId) > 0 {
klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *subnet.SubnetId, *existing.SubnetId, *subnet.SubnetId)
subnetsByAZ[az] = subnet
continue
}
klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *existing.SubnetId, *existing.SubnetId, *subnet.SubnetId)
continue
}
var azNames []string
for key := range subnetsByAZ {
azNames = append(azNames, key)
}
sort.Strings(azNames)
var subnetIDs []string
for _, key := range azNames {
subnetIDs = append(subnetIDs, aws.StringValue(subnetsByAZ[key].SubnetId))
}
return subnetIDs, nil
}
func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) {
var subnetTable *ec2.RouteTable
for _, table := range rt {
for _, assoc := range table.Associations {
if aws.StringValue(assoc.SubnetId) == subnetID {
subnetTable = table
break
}
}
}
if subnetTable == nil {
// If there is no explicit association, the subnet will be implicitly
// associated with the VPC's main routing table.
for _, table := range rt {
for _, assoc := range table.Associations {
if aws.BoolValue(assoc.Main) == true {
klog.V(4).Infof("Assuming implicit use of main routing table %s for %s",
aws.StringValue(table.RouteTableId), subnetID)
subnetTable = table
break
}
}
}
}
if subnetTable == nil {
return false, fmt.Errorf("could not locate routing table for subnet %s", subnetID)
}
for _, route := range subnetTable.Routes {
// There is no direct way in the AWS API to determine if a subnet is public or private.
// A public subnet is one which has an internet gateway route
// we look for the gatewayId and make sure it has the prefix of igw to differentiate
// from the default in-subnet route which is called "local"
// or other virtual gateway (starting with vgv)
// or vpc peering connections (starting with pcx).
if strings.HasPrefix(aws.StringValue(route.GatewayId), "igw") {
return true, nil
}
}
return false, nil
}
type portSets struct {
names sets.String
numbers sets.Int64
}
// getPortSets returns a portSets structure representing port names and numbers
// that the comma-separated string describes. If the input is empty or equal to
// "*", a nil pointer is returned.
func getPortSets(annotation string) (ports *portSets) {
if annotation != "" && annotation != "*" {
ports = &portSets{
sets.NewString(),
sets.NewInt64(),
}
portStringSlice := strings.Split(annotation, ",")
for _, item := range portStringSlice {
port, err := strconv.Atoi(item)
if err != nil {
ports.names.Insert(item)
} else {
ports.numbers.Insert(int64(port))
}
}
}
return
}
// buildELBSecurityGroupList returns list of SecurityGroups which should be
// attached to ELB created by a service. List always consist of at least
// 1 member which is an SG created for this service or a SG from the Global config.
// Extra groups can be specified via annotation, as can extra tags for any
// new groups. The annotation "ServiceAnnotationLoadBalancerSecurityGroups" allows for
// setting the security groups specified.
func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, loadBalancerName string, annotations map[string]string) ([]string, error) {
var err error
var securityGroupID string
if c.cfg.Global.ElbSecurityGroup != "" {
securityGroupID = c.cfg.Global.ElbSecurityGroup
} else {
// Create a security group for the load balancer
sgName := "k8s-elb-" + loadBalancerName
sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName)
securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription, getLoadBalancerAdditionalTags(annotations))
if err != nil {
klog.Errorf("Error creating load balancer security group: %q", err)
return nil, err
}
}
sgList := []string{}
for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerSecurityGroups], ",") {
extraSG = strings.TrimSpace(extraSG)
if len(extraSG) > 0 {
sgList = append(sgList, extraSG)
}
}
// If no Security Groups have been specified with the ServiceAnnotationLoadBalancerSecurityGroups annotation, we add the default one.
if len(sgList) == 0 {
sgList = append(sgList, securityGroupID)
}
for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups], ",") {
extraSG = strings.TrimSpace(extraSG)
if len(extraSG) > 0 {
sgList = append(sgList, extraSG)
}
}
return sgList, nil
}
// buildListener creates a new listener from the given port, adding an SSL certificate
// if indicated by the appropriate annotations.
func buildListener(port v1.ServicePort, annotations map[string]string, sslPorts *portSets) (*elb.Listener, error) {
loadBalancerPort := int64(port.Port)
portName := strings.ToLower(port.Name)
instancePort := int64(port.NodePort)
protocol := strings.ToLower(string(port.Protocol))
instanceProtocol := protocol
listener := &elb.Listener{}
listener.InstancePort = &instancePort
listener.LoadBalancerPort = &loadBalancerPort
certID := annotations[ServiceAnnotationLoadBalancerCertificate]
if certID != "" && (sslPorts == nil || sslPorts.numbers.Has(loadBalancerPort) || sslPorts.names.Has(portName)) {
instanceProtocol = annotations[ServiceAnnotationLoadBalancerBEProtocol]
if instanceProtocol == "" {
protocol = "ssl"
instanceProtocol = "tcp"
} else {
protocol = backendProtocolMapping[instanceProtocol]
if protocol == "" {
return nil, fmt.Errorf("Invalid backend protocol %s for %s in %s", instanceProtocol, certID, ServiceAnnotationLoadBalancerBEProtocol)
}
}
listener.SSLCertificateId = &certID
} else if annotationProtocol := annotations[ServiceAnnotationLoadBalancerBEProtocol]; annotationProtocol == "http" {
instanceProtocol = annotationProtocol
protocol = "http"
}
listener.Protocol = &protocol
listener.InstanceProtocol = &instanceProtocol
return listener, nil
}
// EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer
func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
annotations := apiService.Annotations
klog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)",
clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, annotations)
if apiService.Spec.SessionAffinity != v1.ServiceAffinityNone {
// ELB supports sticky sessions, but only when configured for HTTP/HTTPS
return nil, fmt.Errorf("unsupported load balancer affinity: %v", apiService.Spec.SessionAffinity)
}
if len(apiService.Spec.Ports) == 0 {
return nil, fmt.Errorf("requested load balancer with no ports")
}
// Figure out what mappings we want on the load balancer
listeners := []*elb.Listener{}
v2Mappings := []nlbPortMapping{}
sslPorts := getPortSets(annotations[ServiceAnnotationLoadBalancerSSLPorts])
for _, port := range apiService.Spec.Ports {
if port.Protocol != v1.ProtocolTCP {
return nil, fmt.Errorf("Only TCP LoadBalancer is supported for AWS ELB")
}
if port.NodePort == 0 {
klog.Errorf("Ignoring port without NodePort defined: %v", port)
continue
}
if isNLB(annotations) {
portMapping := nlbPortMapping{
FrontendPort: int64(port.Port),
FrontendProtocol: string(port.Protocol),
TrafficPort: int64(port.NodePort),
TrafficProtocol: string(port.Protocol),
// if externalTrafficPolicy == "Local", we'll override the
// health check later
HealthCheckPort: int64(port.NodePort),
HealthCheckProtocol: elbv2.ProtocolEnumTcp,
}
certificateARN := annotations[ServiceAnnotationLoadBalancerCertificate]
if certificateARN != "" && (sslPorts == nil || sslPorts.numbers.Has(int64(port.Port)) || sslPorts.names.Has(port.Name)) {
portMapping.FrontendProtocol = elbv2.ProtocolEnumTls
portMapping.SSLCertificateARN = certificateARN
portMapping.SSLPolicy = annotations[ServiceAnnotationLoadBalancerSSLNegotiationPolicy]
if backendProtocol := annotations[ServiceAnnotationLoadBalancerBEProtocol]; backendProtocol == "ssl" {
portMapping.TrafficProtocol = elbv2.ProtocolEnumTls
}
}
v2Mappings = append(v2Mappings, portMapping)
}
listener, err := buildListener(port, annotations, sslPorts)
if err != nil {
return nil, err
}
listeners = append(listeners, listener)
}
if apiService.Spec.LoadBalancerIP != "" {
return nil, fmt.Errorf("LoadBalancerIP cannot be specified for AWS ELB")
}
instances, err := c.findInstancesForELB(nodes)
if err != nil {
return nil, err
}
sourceRanges, err := servicehelpers.GetLoadBalancerSourceRanges(apiService)
if err != nil {
return nil, err
}
// Determine if this is tagged as an Internal ELB
internalELB := false
internalAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerInternal]
if internalAnnotation == "false" {
internalELB = false
} else if internalAnnotation != "" {
internalELB = true
}
if isNLB(annotations) {
if path, healthCheckNodePort := servicehelpers.GetServiceHealthCheckPathPort(apiService); path != "" {
for i := range v2Mappings {
v2Mappings[i].HealthCheckPort = int64(healthCheckNodePort)
v2Mappings[i].HealthCheckPath = path
v2Mappings[i].HealthCheckProtocol = elbv2.ProtocolEnumHttp
}
}
// Find the subnets that the ELB will live in
subnetIDs, err := c.findELBSubnets(internalELB)
if err != nil {
klog.Errorf("Error listing subnets in VPC: %q", err)
return nil, err
}
// Bail out early if there are no subnets
if len(subnetIDs) == 0 {
return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB")
}
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, apiService)
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
instanceIDs := []string{}
for id := range instances {
instanceIDs = append(instanceIDs, string(id))
}
v2LoadBalancer, err := c.ensureLoadBalancerv2(
serviceName,
loadBalancerName,
v2Mappings,
instanceIDs,
subnetIDs,
internalELB,
annotations,
)
if err != nil {
return nil, err
}
sourceRangeCidrs := []string{}
for cidr := range sourceRanges {
sourceRangeCidrs = append(sourceRangeCidrs, cidr)
}
if len(sourceRangeCidrs) == 0 {
sourceRangeCidrs = append(sourceRangeCidrs, "0.0.0.0/0")
}
err = c.updateInstanceSecurityGroupsForNLB(loadBalancerName, instances, sourceRangeCidrs, v2Mappings)
if err != nil {
klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err)
return nil, err
}
// We don't have an `ensureLoadBalancerInstances()` function for elbv2
// because `ensureLoadBalancerv2()` requires instance Ids
// TODO: Wait for creation?
return v2toStatus(v2LoadBalancer), nil
}
// Determine if we need to set the Proxy protocol policy
proxyProtocol := false
proxyProtocolAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerProxyProtocol]
if proxyProtocolAnnotation != "" {
if proxyProtocolAnnotation != "*" {
return nil, fmt.Errorf("annotation %q=%q detected, but the only value supported currently is '*'", ServiceAnnotationLoadBalancerProxyProtocol, proxyProtocolAnnotation)
}
proxyProtocol = true
}
// Some load balancer attributes are required, so defaults are set. These can be overridden by annotations.
loadBalancerAttributes := &elb.LoadBalancerAttributes{
AccessLog: &elb.AccessLog{Enabled: aws.Bool(false)},
ConnectionDraining: &elb.ConnectionDraining{Enabled: aws.Bool(false)},
ConnectionSettings: &elb.ConnectionSettings{IdleTimeout: aws.Int64(60)},
CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{Enabled: aws.Bool(false)},
}
// Determine if an access log emit interval has been specified
accessLogEmitIntervalAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogEmitInterval]
if accessLogEmitIntervalAnnotation != "" {
accessLogEmitInterval, err := strconv.ParseInt(accessLogEmitIntervalAnnotation, 10, 64)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerAccessLogEmitInterval,
accessLogEmitIntervalAnnotation,
)
}
loadBalancerAttributes.AccessLog.EmitInterval = &accessLogEmitInterval
}
// Determine if access log enabled/disabled has been specified
accessLogEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogEnabled]
if accessLogEnabledAnnotation != "" {
accessLogEnabled, err := strconv.ParseBool(accessLogEnabledAnnotation)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerAccessLogEnabled,
accessLogEnabledAnnotation,
)
}
loadBalancerAttributes.AccessLog.Enabled = &accessLogEnabled
}
// Determine if access log s3 bucket name has been specified
accessLogS3BucketNameAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogS3BucketName]
if accessLogS3BucketNameAnnotation != "" {
loadBalancerAttributes.AccessLog.S3BucketName = &accessLogS3BucketNameAnnotation
}
// Determine if access log s3 bucket prefix has been specified
accessLogS3BucketPrefixAnnotation := annotations[ServiceAnnotationLoadBalancerAccessLogS3BucketPrefix]
if accessLogS3BucketPrefixAnnotation != "" {
loadBalancerAttributes.AccessLog.S3BucketPrefix = &accessLogS3BucketPrefixAnnotation
}
// Determine if connection draining enabled/disabled has been specified
connectionDrainingEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionDrainingEnabled]
if connectionDrainingEnabledAnnotation != "" {
connectionDrainingEnabled, err := strconv.ParseBool(connectionDrainingEnabledAnnotation)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerConnectionDrainingEnabled,
connectionDrainingEnabledAnnotation,
)
}
loadBalancerAttributes.ConnectionDraining.Enabled = &connectionDrainingEnabled
}
// Determine if connection draining timeout has been specified
connectionDrainingTimeoutAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionDrainingTimeout]
if connectionDrainingTimeoutAnnotation != "" {
connectionDrainingTimeout, err := strconv.ParseInt(connectionDrainingTimeoutAnnotation, 10, 64)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerConnectionDrainingTimeout,
connectionDrainingTimeoutAnnotation,
)
}
loadBalancerAttributes.ConnectionDraining.Timeout = &connectionDrainingTimeout
}
// Determine if connection idle timeout has been specified
connectionIdleTimeoutAnnotation := annotations[ServiceAnnotationLoadBalancerConnectionIdleTimeout]
if connectionIdleTimeoutAnnotation != "" {
connectionIdleTimeout, err := strconv.ParseInt(connectionIdleTimeoutAnnotation, 10, 64)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerConnectionIdleTimeout,
connectionIdleTimeoutAnnotation,
)
}
loadBalancerAttributes.ConnectionSettings.IdleTimeout = &connectionIdleTimeout
}
// Determine if cross zone load balancing enabled/disabled has been specified
crossZoneLoadBalancingEnabledAnnotation := annotations[ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled]
if crossZoneLoadBalancingEnabledAnnotation != "" {
crossZoneLoadBalancingEnabled, err := strconv.ParseBool(crossZoneLoadBalancingEnabledAnnotation)
if err != nil {
return nil, fmt.Errorf("error parsing service annotation: %s=%s",
ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled,
crossZoneLoadBalancingEnabledAnnotation,
)
}
loadBalancerAttributes.CrossZoneLoadBalancing.Enabled = &crossZoneLoadBalancingEnabled
}
// Find the subnets that the ELB will live in
subnetIDs, err := c.findELBSubnets(internalELB)
if err != nil {
klog.Errorf("Error listing subnets in VPC: %q", err)
return nil, err
}
// Bail out early if there are no subnets
if len(subnetIDs) == 0 {
return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB")
}
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, apiService)
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
securityGroupIDs, err := c.buildELBSecurityGroupList(serviceName, loadBalancerName, annotations)
if err != nil {
return nil, err
}
if len(securityGroupIDs) == 0 {
return nil, fmt.Errorf("[BUG] ELB can't have empty list of Security Groups to be assigned, this is a Kubernetes bug, please report")
}
{
ec2SourceRanges := []*ec2.IpRange{}
for _, sourceRange := range sourceRanges.StringSlice() {
ec2SourceRanges = append(ec2SourceRanges, &ec2.IpRange{CidrIp: aws.String(sourceRange)})
}
permissions := NewIPPermissionSet()
for _, port := range apiService.Spec.Ports {
portInt64 := int64(port.Port)
protocol := strings.ToLower(string(port.Protocol))
permission := &ec2.IpPermission{}
permission.FromPort = &portInt64
permission.ToPort = &portInt64
permission.IpRanges = ec2SourceRanges
permission.IpProtocol = &protocol
permissions.Insert(permission)
}
// Allow ICMP fragmentation packets, important for MTU discovery
{
permission := &ec2.IpPermission{
IpProtocol: aws.String("icmp"),
FromPort: aws.Int64(3),
ToPort: aws.Int64(4),
IpRanges: ec2SourceRanges,
}
permissions.Insert(permission)
}
_, err = c.setSecurityGroupIngress(securityGroupIDs[0], permissions)
if err != nil {
return nil, err
}
}
// Build the load balancer itself
loadBalancer, err := c.ensureLoadBalancer(
serviceName,
loadBalancerName,
listeners,
subnetIDs,
securityGroupIDs,
internalELB,
proxyProtocol,
loadBalancerAttributes,
annotations,
)
if err != nil {
return nil, err
}
if sslPolicyName, ok := annotations[ServiceAnnotationLoadBalancerSSLNegotiationPolicy]; ok {
err := c.ensureSSLNegotiationPolicy(loadBalancer, sslPolicyName)
if err != nil {
return nil, err
}
for _, port := range c.getLoadBalancerTLSPorts(loadBalancer) {
err := c.setSSLNegotiationPolicy(loadBalancerName, sslPolicyName, port)
if err != nil {
return nil, err
}
}
}
if path, healthCheckNodePort := servicehelpers.GetServiceHealthCheckPathPort(apiService); path != "" {
klog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path)
err = c.ensureLoadBalancerHealthCheck(loadBalancer, "HTTP", healthCheckNodePort, path, annotations)
if err != nil {
return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %q", loadBalancerName, healthCheckNodePort, err)
}
} else {
klog.V(4).Infof("service %v does not need custom health checks", apiService.Name)
// We only configure a TCP health-check on the first port
var tcpHealthCheckPort int32
for _, listener := range listeners {
if listener.InstancePort == nil {
continue
}
tcpHealthCheckPort = int32(*listener.InstancePort)
break
}
annotationProtocol := strings.ToLower(annotations[ServiceAnnotationLoadBalancerBEProtocol])
var hcProtocol string
if annotationProtocol == "https" || annotationProtocol == "ssl" {
hcProtocol = "SSL"
} else {
hcProtocol = "TCP"
}
// there must be no path on TCP health check
err = c.ensureLoadBalancerHealthCheck(loadBalancer, hcProtocol, tcpHealthCheckPort, "", annotations)
if err != nil {
return nil, err
}
}
err = c.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances)
if err != nil {
klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err)
return nil, err
}
err = c.ensureLoadBalancerInstances(aws.StringValue(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances)
if err != nil {
klog.Warningf("Error registering instances with the load balancer: %q", err)
return nil, err
}
klog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, aws.StringValue(loadBalancer.DNSName))
// TODO: Wait for creation?
status := toStatus(loadBalancer)
return status, nil
}
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
func (c *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
if err != nil {
return nil, false, err
}
if lb == nil {
return nil, false, nil
}
return v2toStatus(lb), true, nil
}
lb, err := c.describeLoadBalancer(loadBalancerName)
if err != nil {
return nil, false, err
}
if lb == nil {
return nil, false, nil
}
status := toStatus(lb)
return status, true, nil
}
// GetLoadBalancerName is an implementation of LoadBalancer.GetLoadBalancerName
func (c *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
// TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names.
return cloudprovider.DefaultLoadBalancerName(service)
}
func toStatus(lb *elb.LoadBalancerDescription) *v1.LoadBalancerStatus {
status := &v1.LoadBalancerStatus{}
if aws.StringValue(lb.DNSName) != "" {
var ingress v1.LoadBalancerIngress
ingress.Hostname = aws.StringValue(lb.DNSName)
status.Ingress = []v1.LoadBalancerIngress{ingress}
}
return status
}
func v2toStatus(lb *elbv2.LoadBalancer) *v1.LoadBalancerStatus {
status := &v1.LoadBalancerStatus{}
if lb == nil {
klog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report")
return status
}
// We check for Active or Provisioning, the only successful statuses
if aws.StringValue(lb.DNSName) != "" && (aws.StringValue(lb.State.Code) == elbv2.LoadBalancerStateEnumActive ||
aws.StringValue(lb.State.Code) == elbv2.LoadBalancerStateEnumProvisioning) {
var ingress v1.LoadBalancerIngress
ingress.Hostname = aws.StringValue(lb.DNSName)
status.Ingress = []v1.LoadBalancerIngress{ingress}
}
return status
}
// Returns the first security group for an instance, or nil
// We only create instances with one security group, so we don't expect multiple security groups.
// However, if there are multiple security groups, we will choose the one tagged with our cluster filter.
// Otherwise we will return an error.
func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups map[string]*ec2.SecurityGroup) (*ec2.GroupIdentifier, error) {
instanceID := aws.StringValue(instance.InstanceId)
var tagged []*ec2.GroupIdentifier
var untagged []*ec2.GroupIdentifier
for _, group := range instance.SecurityGroups {
groupID := aws.StringValue(group.GroupId)
if groupID == "" {
klog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group)
continue
}
_, isTagged := taggedSecurityGroups[groupID]
if isTagged {
tagged = append(tagged, group)
} else {
untagged = append(untagged, group)
}
}
if len(tagged) > 0 {
// We create instances with one SG
// If users create multiple SGs, they must tag one of them as being k8s owned
if len(tagged) != 1 {
taggedGroups := ""
for _, v := range tagged {
taggedGroups += fmt.Sprintf("%s(%s) ", *v.GroupId, *v.GroupName)
}
return nil, fmt.Errorf("Multiple tagged security groups found for instance %s; ensure only the k8s security group is tagged; the tagged groups were %v", instanceID, taggedGroups)
}
return tagged[0], nil
}
if len(untagged) > 0 {
// For back-compat, we will allow a single untagged SG
if len(untagged) != 1 {
return nil, fmt.Errorf("Multiple untagged security groups found for instance %s; ensure the k8s security group is tagged", instanceID)
}
return untagged[0], nil
}
klog.Warningf("No security group found for instance %q", instanceID)
return nil, nil
}
// Return all the security groups that are tagged as being part of our cluster
func (c *Cloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) {
request := &ec2.DescribeSecurityGroupsInput{}
groups, err := c.ec2.DescribeSecurityGroups(request)
if err != nil {
return nil, fmt.Errorf("error querying security groups: %q", err)
}
m := make(map[string]*ec2.SecurityGroup)
for _, group := range groups {
if !c.tagging.hasClusterTag(group.Tags) {
continue
}
id := aws.StringValue(group.GroupId)
if id == "" {
klog.Warningf("Ignoring group without id: %v", group)
continue
}
m[id] = group
}
return m, nil
}
// Open security group ingress rules on the instances so that the load balancer can talk to them
// Will also remove any security groups ingress rules for the load balancer that are _not_ needed for allInstances
func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancerDescription, instances map[InstanceID]*ec2.Instance) error {
if c.cfg.Global.DisableSecurityGroupIngress {
return nil
}
// Determine the load balancer security group id
loadBalancerSecurityGroupID := ""
for _, securityGroup := range lb.SecurityGroups {
if aws.StringValue(securityGroup) == "" {
continue
}
if loadBalancerSecurityGroupID != "" {
// We create LBs with one SG
klog.Warningf("Multiple security groups for load balancer: %q", aws.StringValue(lb.LoadBalancerName))
}
loadBalancerSecurityGroupID = *securityGroup
}
if loadBalancerSecurityGroupID == "" {
return fmt.Errorf("could not determine security group for load balancer: %s", aws.StringValue(lb.LoadBalancerName))
}
// Get the actual list of groups that allow ingress from the load-balancer
var actualGroups []*ec2.SecurityGroup
{
describeRequest := &ec2.DescribeSecurityGroupsInput{}
describeRequest.Filters = []*ec2.Filter{
newEc2Filter("ip-permission.group-id", loadBalancerSecurityGroupID),
}
response, err := c.ec2.DescribeSecurityGroups(describeRequest)
if err != nil {
return fmt.Errorf("error querying security groups for ELB: %q", err)
}
for _, sg := range response {
if !c.tagging.hasClusterTag(sg.Tags) {
continue
}
actualGroups = append(actualGroups, sg)
}
}
taggedSecurityGroups, err := c.getTaggedSecurityGroups()
if err != nil {
return fmt.Errorf("error querying for tagged security groups: %q", err)
}
// Open the firewall from the load balancer to the instance
// We don't actually have a trivial way to know in advance which security group the instance is in
// (it is probably the node security group, but we don't easily have that).
// However, we _do_ have the list of security groups on the instance records.
// Map containing the changes we want to make; true to add, false to remove
instanceSecurityGroupIds := map[string]bool{}
// Scan instances for groups we want open
for _, instance := range instances {
securityGroup, err := findSecurityGroupForInstance(instance, taggedSecurityGroups)
if err != nil {
return err
}
if securityGroup == nil {
klog.Warning("Ignoring instance without security group: ", aws.StringValue(instance.InstanceId))
continue
}
id := aws.StringValue(securityGroup.GroupId)
if id == "" {
klog.Warningf("found security group without id: %v", securityGroup)
continue
}
instanceSecurityGroupIds[id] = true
}
// Compare to actual groups
for _, actualGroup := range actualGroups {
actualGroupID := aws.StringValue(actualGroup.GroupId)
if actualGroupID == "" {
klog.Warning("Ignoring group without ID: ", actualGroup)
continue
}
adding, found := instanceSecurityGroupIds[actualGroupID]
if found && adding {
// We don't need to make a change; the permission is already in place
delete(instanceSecurityGroupIds, actualGroupID)
} else {
// This group is not needed by allInstances; delete it
instanceSecurityGroupIds[actualGroupID] = false
}
}
for instanceSecurityGroupID, add := range instanceSecurityGroupIds {
if add {
klog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID)
} else {
klog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID)
}
sourceGroupID := &ec2.UserIdGroupPair{}
sourceGroupID.GroupId = &loadBalancerSecurityGroupID
allProtocols := "-1"
permission := &ec2.IpPermission{}
permission.IpProtocol = &allProtocols
permission.UserIdGroupPairs = []*ec2.UserIdGroupPair{sourceGroupID}
permissions := []*ec2.IpPermission{permission}
if add {
changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, permissions)
if err != nil {
return err
}
if !changed {
klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
}
} else {
changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, permissions)
if err != nil {
return err
}
if !changed {
klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
}
}
}
return nil
}
// EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted.
func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
klog.Info("Load balancer already deleted: ", loadBalancerName)
return nil
}
// Delete the LoadBalancer and target groups
//
// Deleting a target group while associated with a load balancer will
// fail. We delete the loadbalancer first. This does leave the
// possibility of zombie target groups if DeleteLoadBalancer() fails
//
// * Get target groups for NLB
// * Delete Load Balancer
// * Delete target groups
// * Clean up SecurityGroupRules
{
targetGroups, err := c.elbv2.DescribeTargetGroups(
&elbv2.DescribeTargetGroupsInput{LoadBalancerArn: lb.LoadBalancerArn},
)
if err != nil {
return fmt.Errorf("error listing target groups before deleting load balancer: %q", err)
}
_, err = c.elbv2.DeleteLoadBalancer(
&elbv2.DeleteLoadBalancerInput{LoadBalancerArn: lb.LoadBalancerArn},
)
if err != nil {
return fmt.Errorf("error deleting load balancer %q: %v", loadBalancerName, err)
}
for _, group := range targetGroups.TargetGroups {
_, err := c.elbv2.DeleteTargetGroup(
&elbv2.DeleteTargetGroupInput{TargetGroupArn: group.TargetGroupArn},
)
if err != nil {
return fmt.Errorf("error deleting target groups after deleting load balancer: %q", err)
}
}
}
return c.updateInstanceSecurityGroupsForNLB(loadBalancerName, nil, nil, nil)
}
lb, err := c.describeLoadBalancer(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
klog.Info("Load balancer already deleted: ", loadBalancerName)
return nil
}
{
// De-authorize the load balancer security group from the instances security group
err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, nil)
if err != nil {
klog.Errorf("Error deregistering load balancer from instance security groups: %q", err)
return err
}
}
{
// Delete the load balancer itself
request := &elb.DeleteLoadBalancerInput{}
request.LoadBalancerName = lb.LoadBalancerName
_, err = c.elb.DeleteLoadBalancer(request)
if err != nil {
// TODO: Check if error was because load balancer was concurrently deleted
klog.Errorf("Error deleting load balancer: %q", err)
return err
}
}
{
// Delete the security group(s) for the load balancer
// Note that this is annoying: the load balancer disappears from the API immediately, but it is still
// deleting in the background. We get a DependencyViolation until the load balancer has deleted itself
var loadBalancerSGs = aws.StringValueSlice(lb.SecurityGroups)
describeRequest := &ec2.DescribeSecurityGroupsInput{}
describeRequest.Filters = []*ec2.Filter{
newEc2Filter("group-id", loadBalancerSGs...),
}
response, err := c.ec2.DescribeSecurityGroups(describeRequest)
if err != nil {
return fmt.Errorf("error querying security groups for ELB: %q", err)
}
// Collect the security groups to delete
securityGroupIDs := map[string]struct{}{}
for _, sg := range response {
sgID := aws.StringValue(sg.GroupId)
if sgID == c.cfg.Global.ElbSecurityGroup {
//We don't want to delete a security group that was defined in the Cloud Configuration.
continue
}
if sgID == "" {
klog.Warningf("Ignoring empty security group in %s", service.Name)
continue
}
if !c.tagging.hasClusterTag(sg.Tags) {
klog.Warningf("Ignoring security group with no cluster tag in %s", service.Name)
continue
}
securityGroupIDs[sgID] = struct{}{}
}
// Loop through and try to delete them
timeoutAt := time.Now().Add(time.Second * 600)
for {
for securityGroupID := range securityGroupIDs {
request := &ec2.DeleteSecurityGroupInput{}
request.GroupId = &securityGroupID
_, err := c.ec2.DeleteSecurityGroup(request)
if err == nil {
delete(securityGroupIDs, securityGroupID)
} else {
ignore := false
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "DependencyViolation" {
klog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID)
ignore = true
}
}
if !ignore {
return fmt.Errorf("error while deleting load balancer security group (%s): %q", securityGroupID, err)
}
}
}
if len(securityGroupIDs) == 0 {
klog.V(2).Info("Deleted all security groups for load balancer: ", service.Name)
break
}
if time.Now().After(timeoutAt) {
ids := []string{}
for id := range securityGroupIDs {
ids = append(ids, id)
}
return fmt.Errorf("timed out deleting ELB: %s. Could not delete security groups %v", service.Name, strings.Join(ids, ","))
}
klog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name)
time.Sleep(10 * time.Second)
}
}
return nil
}
// UpdateLoadBalancer implements LoadBalancer.UpdateLoadBalancer
func (c *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
instances, err := c.findInstancesForELB(nodes)
if err != nil {
return err
}
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
return fmt.Errorf("Load balancer not found")
}
_, err = c.EnsureLoadBalancer(ctx, clusterName, service, nodes)
return err
}
lb, err := c.describeLoadBalancer(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
return fmt.Errorf("Load balancer not found")
}
if sslPolicyName, ok := service.Annotations[ServiceAnnotationLoadBalancerSSLNegotiationPolicy]; ok {
err := c.ensureSSLNegotiationPolicy(lb, sslPolicyName)
if err != nil {
return err
}
for _, port := range c.getLoadBalancerTLSPorts(lb) {
err := c.setSSLNegotiationPolicy(loadBalancerName, sslPolicyName, port)
if err != nil {
return err
}
}
}
err = c.ensureLoadBalancerInstances(aws.StringValue(lb.LoadBalancerName), lb.Instances, instances)
if err != nil {
return nil
}
err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, instances)
if err != nil {
return err
}
return nil
}
// Returns the instance with the specified ID
func (c *Cloud) getInstanceByID(instanceID string) (*ec2.Instance, error) {
instances, err := c.getInstancesByIDs([]*string{&instanceID})
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, cloudprovider.InstanceNotFound
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
return instances[instanceID], nil
}
func (c *Cloud) getInstancesByIDs(instanceIDs []*string) (map[string]*ec2.Instance, error) {
instancesByID := make(map[string]*ec2.Instance)
if len(instanceIDs) == 0 {
return instancesByID, nil
}
request := &ec2.DescribeInstancesInput{
InstanceIds: instanceIDs,
}
instances, err := c.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
for _, instance := range instances {
instanceID := aws.StringValue(instance.InstanceId)
if instanceID == "" {
continue
}
instancesByID[instanceID] = instance
}
return instancesByID, nil
}
func (c *Cloud) getInstancesByNodeNames(nodeNames []string, states ...string) ([]*ec2.Instance, error) {
names := aws.StringSlice(nodeNames)
ec2Instances := []*ec2.Instance{}
for i := 0; i < len(names); i += filterNodeLimit {
end := i + filterNodeLimit
if end > len(names) {
end = len(names)
}
nameSlice := names[i:end]
nodeNameFilter := &ec2.Filter{
Name: aws.String("private-dns-name"),
Values: nameSlice,
}
filters := []*ec2.Filter{nodeNameFilter}
if len(states) > 0 {
filters = append(filters, newEc2Filter("instance-state-name", states...))
}
instances, err := c.describeInstances(filters)
if err != nil {
klog.V(2).Infof("Failed to describe instances %v", nodeNames)
return nil, err
}
ec2Instances = append(ec2Instances, instances...)
}
if len(ec2Instances) == 0 {
klog.V(3).Infof("Failed to find any instances %v", nodeNames)
return nil, nil
}
return ec2Instances, nil
}
// TODO: Move to instanceCache
func (c *Cloud) describeInstances(filters []*ec2.Filter) ([]*ec2.Instance, error) {
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
response, err := c.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
var matches []*ec2.Instance
for _, instance := range response {
if c.tagging.hasClusterTag(instance.Tags) {
matches = append(matches, instance)
}
}
return matches, nil
}
// mapNodeNameToPrivateDNSName maps a k8s NodeName to an AWS Instance PrivateDNSName
// This is a simple string cast
func mapNodeNameToPrivateDNSName(nodeName types.NodeName) string {
return string(nodeName)
}
// mapInstanceToNodeName maps a EC2 instance to a k8s NodeName, by extracting the PrivateDNSName
func mapInstanceToNodeName(i *ec2.Instance) types.NodeName {
return types.NodeName(aws.StringValue(i.PrivateDnsName))
}
var aliveFilter = []string{
ec2.InstanceStateNamePending,
ec2.InstanceStateNameRunning,
ec2.InstanceStateNameShuttingDown,
ec2.InstanceStateNameStopping,
ec2.InstanceStateNameStopped,
}
// Returns the instance with the specified node name
// Returns nil if it does not exist
func (c *Cloud) findInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
privateDNSName := mapNodeNameToPrivateDNSName(nodeName)
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", privateDNSName),
// exclude instances in "terminated" state
newEc2Filter("instance-state-name", aliveFilter...),
}
instances, err := c.describeInstances(filters)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, nil
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for name: %s", nodeName)
}
return instances[0], nil
}
// Returns the instance with the specified node name
// Like findInstanceByNodeName, but returns error if node not found
func (c *Cloud) getInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
var instance *ec2.Instance
// we leverage node cache to try to retrieve node's provider id first, as
// get instance by provider id is way more efficient than by filters in
// aws context
awsID, err := c.nodeNameToProviderID(nodeName)
if err != nil {
klog.V(3).Infof("Unable to convert node name %q to aws instanceID, fall back to findInstanceByNodeName: %v", nodeName, err)
instance, err = c.findInstanceByNodeName(nodeName)
} else {
instance, err = c.getInstanceByID(string(awsID))
}
if err == nil && instance == nil {
return nil, cloudprovider.InstanceNotFound
}
return instance, err
}
func (c *Cloud) getFullInstance(nodeName types.NodeName) (*awsInstance, *ec2.Instance, error) {
if nodeName == "" {
instance, err := c.getInstanceByID(c.selfAWSInstance.awsID)
return c.selfAWSInstance, instance, err
}
instance, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return nil, nil, err
}
awsInstance := newAWSInstance(c.ec2, instance)
return awsInstance, instance, err
}
func (c *Cloud) nodeNameToProviderID(nodeName types.NodeName) (InstanceID, error) {
if len(nodeName) == 0 {
return "", fmt.Errorf("no nodeName provided")
}
if c.nodeInformerHasSynced == nil || !c.nodeInformerHasSynced() {
return "", fmt.Errorf("node informer has not synced yet")
}
node, err := c.nodeInformer.Lister().Get(string(nodeName))
if err != nil {
return "", err
}
if len(node.Spec.ProviderID) == 0 {
return "", fmt.Errorf("node has no providerID")
}
return KubernetesInstanceID(node.Spec.ProviderID).MapToAWSInstanceID()
}
func setNodeDisk(
nodeDiskMap map[types.NodeName]map[KubernetesVolumeID]bool,
volumeID KubernetesVolumeID,
nodeName types.NodeName,
check bool) {
volumeMap := nodeDiskMap[nodeName]
if volumeMap == nil {
volumeMap = make(map[KubernetesVolumeID]bool)
nodeDiskMap[nodeName] = volumeMap
}
volumeMap[volumeID] = check
}
|
// package main provides the monstache binary
package main
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/BurntSushi/toml"
"github.com/coreos/go-systemd/daemon"
"github.com/evanphx/json-patch"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/olivere/elastic"
"github.com/robertkrimen/otto"
_ "github.com/robertkrimen/otto/underscore"
"github.com/rwynn/gtm"
"github.com/rwynn/gtm/consistent"
"github.com/rwynn/monstache/monstachemap"
"golang.org/x/net/context"
"gopkg.in/Graylog2/go-gelf.v2/gelf"
"gopkg.in/natefinch/lumberjack.v2"
"io"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"plugin"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"time"
)
var infoLog = log.New(os.Stdout, "INFO ", log.Flags())
var warnLog = log.New(os.Stdout, "WARN ", log.Flags())
var statsLog = log.New(os.Stdout, "STATS ", log.Flags())
var traceLog = log.New(os.Stdout, "TRACE ", log.Flags())
var errorLog = log.New(os.Stderr, "ERROR ", log.Flags())
var mapperPlugin func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error)
var filterPlugin func(*monstachemap.MapperPluginInput) (bool, error)
var mapEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var filterEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var mapIndexTypes map[string]*indexTypeMapping = make(map[string]*indexTypeMapping)
var fileNamespaces map[string]bool = make(map[string]bool)
var patchNamespaces map[string]bool = make(map[string]bool)
var tmNamespaces map[string]bool = make(map[string]bool)
var routingNamespaces map[string]bool = make(map[string]bool)
var chunksRegex = regexp.MustCompile("\\.chunks$")
var systemsRegex = regexp.MustCompile("system\\..+$")
var exitStatus = 0
const version = "4.9.0"
const mongoURLDefault string = "localhost"
const resumeNameDefault string = "default"
const elasticMaxConnsDefault int = 4
const elasticClientTimeoutDefault int = 60
const elasticMaxDocsDefault int = -1
const elasticMaxBytesDefault int = 8 * 1024 * 1024
const gtmChannelSizeDefault int = 512
const typeFromFuture string = "_doc"
const fileDownloadersDefault = 10
type deleteStrategy int
const (
statelessDeleteStrategy deleteStrategy = iota
statefulDeleteStrategy
ignoreDeleteStrategy
)
type stringargs []string
type executionEnv struct {
VM *otto.Otto
Script string
lock *sync.Mutex
}
type javascript struct {
Namespace string
Script string
Path string
Routing bool
}
type indexTypeMapping struct {
Namespace string
Index string
Type string
}
type findConf struct {
vm *otto.Otto
ns string
name string
session *mgo.Session
byId bool
multi bool
}
type findCall struct {
config *findConf
session *mgo.Session
query interface{}
db string
col string
limit int
sort []string
sel map[string]int
}
type logFiles struct {
Info string
Warn string
Error string
Trace string
Stats string
}
type indexingMeta struct {
Routing string
Index string
Type string
Parent string
Version int64
VersionType string
Pipeline string
RetryOnConflict int
}
type mongoDialSettings struct {
Timeout int
Ssl bool
}
type mongoSessionSettings struct {
SocketTimeout int `toml:"socket-timeout"`
SyncTimeout int `toml:"sync-timeout"`
}
type gtmSettings struct {
ChannelSize int `toml:"channel-size"`
BufferSize int `toml:"buffer-size"`
BufferDuration string `toml:"buffer-duration"`
}
type httpServerCtx struct {
httpServer *http.Server
bulk *elastic.BulkProcessor
config *configOptions
shutdown bool
started time.Time
}
type configOptions struct {
MongoURL string `toml:"mongo-url"`
MongoConfigURL string `toml:"mongo-config-url"`
MongoPemFile string `toml:"mongo-pem-file"`
MongoValidatePemFile bool `toml:"mongo-validate-pem-file"`
MongoOpLogDatabaseName string `toml:"mongo-oplog-database-name"`
MongoOpLogCollectionName string `toml:"mongo-oplog-collection-name"`
MongoDialSettings mongoDialSettings `toml:"mongo-dial-settings"`
MongoSessionSettings mongoSessionSettings `toml:"mongo-session-settings"`
GtmSettings gtmSettings `toml:"gtm-settings"`
Logs logFiles `toml:"logs"`
GraylogAddr string `toml:"graylog-addr"`
ElasticUrls stringargs `toml:"elasticsearch-urls"`
ElasticUser string `toml:"elasticsearch-user"`
ElasticPassword string `toml:"elasticsearch-password"`
ElasticPemFile string `toml:"elasticsearch-pem-file"`
ElasticValidatePemFile bool `toml:"elasticsearch-validate-pem-file"`
ElasticVersion string `toml:"elasticsearch-version"`
ResumeName string `toml:"resume-name"`
NsRegex string `toml:"namespace-regex"`
NsDropRegex string `toml:"namespace-drop-regex"`
NsExcludeRegex string `toml:"namespace-exclude-regex"`
NsDropExcludeRegex string `toml:"namespace-drop-exclude-regex"`
ClusterName string `toml:"cluster-name"`
Print bool `toml:"print-config"`
Version bool
Pprof bool
EnableEasyJSON bool `toml:"enable-easy-json"`
Stats bool
IndexStats bool `toml:"index-stats"`
StatsDuration string `toml:"stats-duration"`
StatsIndexFormat string `toml:"stats-index-format"`
Gzip bool
Verbose bool
Resume bool
ResumeWriteUnsafe bool `toml:"resume-write-unsafe"`
ResumeFromTimestamp int64 `toml:"resume-from-timestamp"`
Replay bool
DroppedDatabases bool `toml:"dropped-databases"`
DroppedCollections bool `toml:"dropped-collections"`
IndexFiles bool `toml:"index-files"`
IndexAsUpdate bool `toml:"index-as-update"`
FileHighlighting bool `toml:"file-highlighting"`
EnablePatches bool `toml:"enable-patches"`
FailFast bool `toml:"fail-fast"`
IndexOplogTime bool `toml:"index-oplog-time"`
OplogTsFieldName string `toml:"oplog-ts-field-name"`
OplogDateFieldName string `toml:"oplog-date-field-name"`
OplogDateFieldFormat string `toml:"oplog-date-field-format"`
ExitAfterDirectReads bool `toml:"exit-after-direct-reads"`
MergePatchAttr string `toml:"merge-patch-attribute"`
ElasticMaxConns int `toml:"elasticsearch-max-conns"`
ElasticRetry bool `toml:"elasticsearch-retry"`
ElasticMaxDocs int `toml:"elasticsearch-max-docs"`
ElasticMaxBytes int `toml:"elasticsearch-max-bytes"`
ElasticMaxSeconds int `toml:"elasticsearch-max-seconds"`
ElasticClientTimeout int `toml:"elasticsearch-client-timeout"`
ElasticMajorVersion int
ElasticMinorVersion int
MaxFileSize int64 `toml:"max-file-size"`
ConfigFile string
Script []javascript
Filter []javascript
Mapping []indexTypeMapping
FileNamespaces stringargs `toml:"file-namespaces"`
PatchNamespaces stringargs `toml:"patch-namespaces"`
Workers stringargs
Worker string
DirectReadNs stringargs `toml:"direct-read-namespaces"`
DirectReadSplitMax int `toml:"direct-read-split-max"`
MapperPluginPath string `toml:"mapper-plugin-path"`
EnableHTTPServer bool `toml:"enable-http-server"`
HTTPServerAddr string `toml:"http-server-addr"`
TimeMachineNamespaces stringargs `toml:"time-machine-namespaces"`
TimeMachineIndexPrefix string `toml:"time-machine-index-prefix"`
TimeMachineIndexSuffix string `toml:"time-machine-index-suffix"`
TimeMachineDirectReads bool `toml:"time-machine-direct-reads"`
RoutingNamespaces stringargs `toml:"routing-namespaces"`
DeleteStrategy deleteStrategy `toml:"delete-strategy"`
DeleteIndexPattern string `toml:"delete-index-pattern"`
FileDownloaders int `toml:"file-downloaders"`
PruneInvalidJSON bool `toml:"prune-invalid-json"`
}
func (arg *deleteStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *deleteStrategy) Set(value string) error {
if i, err := strconv.Atoi(value); err != nil {
return err
} else {
ds := deleteStrategy(i)
*arg = ds
return nil
}
}
func (args *stringargs) String() string {
return fmt.Sprintf("%s", *args)
}
func (args *stringargs) Set(value string) error {
*args = append(*args, value)
return nil
}
func (config *configOptions) isSharded() bool {
return config.MongoConfigURL != ""
}
func afterBulk(executionId int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
if err != nil {
errorLog.Printf("Bulk index request with execution ID %d failed: %s", executionId, err)
}
if response != nil && response.Errors {
failed := response.Failed()
if failed != nil {
errorLog.Printf("Bulk index request with execution ID %d has %d line failure/warning(s)", executionId, len(failed))
for i, item := range failed {
json, err := json.Marshal(item)
if err != nil {
errorLog.Printf("Unable to marshall failed request line #%d: %s", i, err)
} else {
if item.Status == 409 {
warnLog.Printf("Conflict request line #%d details: %s", i, string(json))
} else {
errorLog.Printf("Failed request line #%d details: %s", i, string(json))
}
}
}
}
}
}
func (config *configOptions) useTypeFromFuture() (use bool) {
if config.ElasticMajorVersion > 6 {
use = true
} else if config.ElasticMajorVersion == 6 && config.ElasticMinorVersion >= 2 {
use = true
}
return
}
func (config *configOptions) parseElasticsearchVersion(number string) (err error) {
if number == "" {
err = errors.New("Elasticsearch version cannot be blank")
} else {
versionParts := strings.Split(number, ".")
var majorVersion, minorVersion int
majorVersion, err = strconv.Atoi(versionParts[0])
if err == nil {
config.ElasticMajorVersion = majorVersion
if majorVersion == 0 {
err = errors.New("Invalid Elasticsearch major version 0")
}
}
if len(versionParts) > 1 {
minorVersion, err = strconv.Atoi(versionParts[1])
if err == nil {
config.ElasticMinorVersion = minorVersion
}
}
}
return
}
func (config *configOptions) newBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache")
bulkService.Workers(config.ElasticMaxConns)
bulkService.Stats(config.Stats)
bulkService.BulkActions(config.ElasticMaxDocs)
bulkService.BulkSize(config.ElasticMaxBytes)
if config.ElasticRetry == false {
bulkService.Backoff(&elastic.StopBackoff{})
}
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(config.ElasticMaxSeconds) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) newStatsBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache-stats")
bulkService.Workers(1)
bulkService.Stats(false)
bulkService.BulkActions(-1)
bulkService.BulkSize(-1)
bulkService.FlushInterval(time.Duration(5) * time.Second)
bulkService.After(afterBulk)
return bulkService.Do(context.Background())
}
func (config *configOptions) needsSecureScheme() bool {
if len(config.ElasticUrls) > 0 {
for _, url := range config.ElasticUrls {
if strings.HasPrefix(url, "https") {
return true
}
}
}
return false
}
func (config *configOptions) newElasticClient() (client *elastic.Client, err error) {
var clientOptions []elastic.ClientOptionFunc
var httpClient *http.Client
clientOptions = append(clientOptions, elastic.SetErrorLog(errorLog))
clientOptions = append(clientOptions, elastic.SetSniff(false))
if config.needsSecureScheme() {
clientOptions = append(clientOptions, elastic.SetScheme("https"))
}
if len(config.ElasticUrls) > 0 {
clientOptions = append(clientOptions, elastic.SetURL(config.ElasticUrls...))
} else {
config.ElasticUrls = append(config.ElasticUrls, elastic.DefaultURL)
}
if config.Verbose {
clientOptions = append(clientOptions, elastic.SetTraceLog(traceLog))
}
if config.ElasticUser != "" {
clientOptions = append(clientOptions, elastic.SetBasicAuth(config.ElasticUser, config.ElasticPassword))
}
if config.ElasticRetry {
d1, d2 := time.Duration(50)*time.Millisecond, time.Duration(20)*time.Second
retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(d1, d2))
clientOptions = append(clientOptions, elastic.SetRetrier(retrier))
}
httpClient, err = config.NewHTTPClient()
if err != nil {
return client, err
}
clientOptions = append(clientOptions, elastic.SetHttpClient(httpClient))
return elastic.NewClient(clientOptions...)
}
func (config *configOptions) testElasticsearchConn(client *elastic.Client) (err error) {
var number string
url := config.ElasticUrls[0]
number, err = client.ElasticsearchVersion(url)
if err == nil {
infoLog.Printf("Successfully connected to Elasticsearch version %s", number)
err = config.parseElasticsearchVersion(number)
}
return
}
func deleteIndexes(client *elastic.Client, db string, config *configOptions) (err error) {
index := strings.ToLower(db + "*")
for ns, m := range mapIndexTypes {
dbCol := strings.SplitN(ns, ".", 2)
if dbCol[0] == db {
if m.Index != "" {
index = strings.ToLower(m.Index + "*")
}
break
}
}
_, err = client.DeleteIndex(index).Do(context.Background())
return
}
func deleteIndex(client *elastic.Client, namespace string, config *configOptions) (err error) {
ctx := context.Background()
index := strings.ToLower(namespace)
if m := mapIndexTypes[namespace]; m != nil {
if m.Index != "" {
index = strings.ToLower(m.Index)
}
}
_, err = client.DeleteIndex(index).Do(ctx)
return err
}
func ensureFileMapping(client *elastic.Client) (err error) {
ctx := context.Background()
pipeline := map[string]interface{}{
"description": "Extract file information",
"processors": [1]map[string]interface{}{
{
"attachment": map[string]interface{}{
"field": "file",
},
},
},
}
_, err = client.IngestPutPipeline("attachment").BodyJson(pipeline).Do(ctx)
return err
}
func defaultIndexTypeMapping(config *configOptions, op *gtm.Op) *indexTypeMapping {
typeName := typeFromFuture
if !config.useTypeFromFuture() {
typeName = op.GetCollection()
}
return &indexTypeMapping{
Namespace: op.Namespace,
Index: strings.ToLower(op.Namespace),
Type: typeName,
}
}
func mapIndexType(config *configOptions, op *gtm.Op) *indexTypeMapping {
mapping := defaultIndexTypeMapping(config, op)
if m := mapIndexTypes[op.Namespace]; m != nil {
if m.Index != "" {
mapping.Index = m.Index
}
if m.Type != "" {
mapping.Type = m.Type
}
}
return mapping
}
func opIDToString(op *gtm.Op) string {
var opIDStr string
switch op.Id.(type) {
case bson.ObjectId:
opIDStr = op.Id.(bson.ObjectId).Hex()
case float64:
intID := int(op.Id.(float64))
if op.Id.(float64) == float64(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
case float32:
intID := int(op.Id.(float32))
if op.Id.(float32) == float32(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
default:
opIDStr = fmt.Sprintf("%v", op.Id)
}
return opIDStr
}
func convertSliceJavascript(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = convertMapJavascript(achild)
case []interface{}:
avc = convertSliceJavascript(achild)
case bson.ObjectId:
avc = achild.Hex()
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func convertMapJavascript(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = convertMapJavascript(child)
case []interface{}:
o[k] = convertSliceJavascript(child)
case bson.ObjectId:
o[k] = child.Hex()
default:
o[k] = v
}
}
return o
}
func fixSlicePruneInvalidJSON(id string, a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = fixPruneInvalidJSON(id, achild)
case []interface{}:
avc = fixSlicePruneInvalidJSON(id, achild)
case time.Time:
year := achild.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping invalid time.Time value: %s for document _id: %s", achild, id)
continue
} else {
avc = av
}
case float64:
if math.IsNaN(achild) {
// causes an error in the json serializer
warnLog.Printf("Dropping invalid float64 value: %v for document _id: %s", achild, id)
continue
} else if math.IsInf(achild, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping invalid float64 value: %v for document _id: %s", achild, id)
continue
} else {
avc = av
}
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func fixPruneInvalidJSON(id string, e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = fixPruneInvalidJSON(id, child)
case []interface{}:
o[k] = fixSlicePruneInvalidJSON(id, child)
case time.Time:
year := child.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping invalid time.Time value: %s for document _id: %s", child, id)
continue
} else {
o[k] = v
}
case float64:
if math.IsNaN(child) {
// causes an error in the json serializer
warnLog.Printf("Dropping invalid float64 value: %v for document _id: %s", child, id)
continue
} else if math.IsInf(child, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping invalid float64 value: %v for document _id: %s", child, id)
continue
} else {
o[k] = v
}
default:
o[k] = v
}
}
return o
}
func deepExportValue(a interface{}) (b interface{}) {
switch t := a.(type) {
case otto.Value:
ex, err := t.Export()
if t.Class() == "Date" {
ex, err = time.Parse("Mon, 2 Jan 2006 15:04:05 MST", t.String())
}
if err == nil {
b = deepExportValue(ex)
} else {
errorLog.Printf("Error exporting from javascript: %s", err)
}
case map[string]interface{}:
b = deepExportMap(t)
case []interface{}:
b = deepExportSlice(t)
default:
b = a
}
return
}
func deepExportSlice(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportValue(av))
}
return avs
}
func deepExportMap(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
o[k] = deepExportValue(v)
}
return o
}
func mapDataJavascript(op *gtm.Op) error {
names := []string{"", op.Namespace}
for _, name := range names {
if env := mapEnvs[name]; env != nil {
arg := convertMapJavascript(op.Data)
val, err := env.VM.Call("module.exports", arg, arg, op.Namespace)
if err != nil {
return err
}
if strings.ToLower(val.Class()) == "object" {
data, err := val.Export()
if err != nil {
return err
} else if data == val {
return errors.New("Exported function must return an object")
} else {
dm := data.(map[string]interface{})
op.Data = deepExportMap(dm)
}
} else {
indexed, err := val.ToBoolean()
if err != nil {
return err
} else if !indexed {
op.Data = nil
break
}
}
}
}
return nil
}
func mapDataGolang(s *mgo.Session, op *gtm.Op) error {
session := s.Copy()
defer session.Close()
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
Session: session,
}
output, err := mapperPlugin(input)
if err != nil {
return err
}
if output != nil {
if output.Drop {
op.Data = nil
} else {
if output.Passthrough == false {
op.Data = output.Document
}
meta := make(map[string]interface{})
if output.Index != "" {
meta["index"] = output.Index
}
if output.Type != "" {
meta["type"] = output.Type
}
if output.Routing != "" {
meta["routing"] = output.Routing
}
if output.Parent != "" {
meta["parent"] = output.Parent
}
if output.Version != 0 {
meta["version"] = output.Version
}
if output.VersionType != "" {
meta["versionType"] = output.VersionType
}
if output.Pipeline != "" {
meta["pipeline"] = output.Pipeline
}
if output.RetryOnConflict != 0 {
meta["retryOnConflict"] = output.RetryOnConflict
}
if len(meta) > 0 {
op.Data["_meta_monstache"] = meta
}
}
}
return nil
}
func mapData(session *mgo.Session, config *configOptions, op *gtm.Op) error {
if config.MapperPluginPath != "" {
return mapDataGolang(session, op)
}
return mapDataJavascript(op)
}
func prepareDataForIndexing(config *configOptions, op *gtm.Op) {
data := op.Data
if config.IndexOplogTime {
secs := int64(op.Timestamp >> 32)
t := time.Unix(secs, 0).UTC()
data[config.OplogTsFieldName] = op.Timestamp
data[config.OplogDateFieldName] = t.Format(config.OplogDateFieldFormat)
}
delete(data, "_id")
delete(data, "_meta_monstache")
if config.PruneInvalidJSON {
op.Data = fixPruneInvalidJSON(opIDToString(op), data)
}
}
func parseIndexMeta(op *gtm.Op) (meta *indexingMeta) {
meta = &indexingMeta{
Version: int64(op.Timestamp),
VersionType: "external",
}
if m, ok := op.Data["_meta_monstache"]; ok {
switch m.(type) {
case map[string]interface{}:
metaAttrs := m.(map[string]interface{})
meta.load(metaAttrs)
case otto.Value:
ex, err := m.(otto.Value).Export()
if err == nil && ex != m {
switch ex.(type) {
case map[string]interface{}:
metaAttrs := ex.(map[string]interface{})
meta.load(metaAttrs)
default:
errorLog.Println("Invalid indexing metadata")
}
}
default:
errorLog.Println("Invalid indexing metadata")
}
}
return meta
}
func addFileContent(s *mgo.Session, op *gtm.Op, config *configOptions) (err error) {
session := s.Copy()
defer session.Close()
op.Data["file"] = ""
var gridByteBuffer bytes.Buffer
db, bucket :=
session.DB(op.GetDatabase()),
strings.SplitN(op.GetCollection(), ".", 2)[0]
encoder := base64.NewEncoder(base64.StdEncoding, &gridByteBuffer)
file, err := db.GridFS(bucket).OpenId(op.Id)
if err != nil {
return
}
defer file.Close()
if config.MaxFileSize > 0 {
if file.Size() > config.MaxFileSize {
warnLog.Printf("File %s md5(%s) exceeds max file size. file content omitted.",
file.Name(), file.MD5())
return
}
}
if _, err = io.Copy(encoder, file); err != nil {
return
}
if err = encoder.Close(); err != nil {
return
}
op.Data["file"] = string(gridByteBuffer.Bytes())
return
}
func notMonstache(op *gtm.Op) bool {
return op.GetDatabase() != "monstache"
}
func notChunks(op *gtm.Op) bool {
return !chunksRegex.MatchString(op.GetCollection())
}
func notConfig(op *gtm.Op) bool {
return op.GetDatabase() != "config"
}
func notSystem(op *gtm.Op) bool {
return !systemsRegex.MatchString(op.GetCollection())
}
func filterWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
} else {
return validNameSpace.MatchString(op.Namespace)
}
}
}
func filterDropWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return validNameSpace.MatchString(op.Namespace)
} else {
return true
}
}
}
func filterWithPlugin() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
keep = false
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
}
if ok, err := filterPlugin(input); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
return keep
}
}
func filterWithScript() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
nss := []string{"", op.Namespace}
for _, ns := range nss {
if env := filterEnvs[ns]; env != nil {
keep = false
arg := convertMapJavascript(op.Data)
env.lock.Lock()
val, err := env.VM.Call("module.exports", arg, arg, op.Namespace)
if err != nil {
errorLog.Println(err)
} else {
if ok, err := val.ToBoolean(); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
env.lock.Unlock()
}
if !keep {
break
}
}
}
return keep
}
}
func filterInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
} else {
return !invalidNameSpace.MatchString(op.Namespace)
}
}
}
func filterDropInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return !invalidNameSpace.MatchString(op.Namespace)
} else {
return true
}
}
}
func ensureClusterTTL(session *mgo.Session) error {
col := session.DB("monstache").C("cluster")
return col.EnsureIndex(mgo.Index{
Key: []string{"expireAt"},
Background: true,
ExpireAfter: time.Duration(30) * time.Second,
})
}
func enableProcess(s *mgo.Session, config *configOptions) (bool, error) {
session := s.Copy()
defer session.Close()
col := session.DB("monstache").C("cluster")
doc := make(map[string]interface{})
doc["_id"] = config.ResumeName
doc["expireAt"] = time.Now().UTC()
doc["pid"] = os.Getpid()
if host, err := os.Hostname(); err == nil {
doc["host"] = host
} else {
return false, err
}
err := col.Insert(doc)
if err == nil {
return true, nil
}
if mgo.IsDup(err) {
return false, nil
}
return false, err
}
func resetClusterState(session *mgo.Session, config *configOptions) error {
col := session.DB("monstache").C("cluster")
return col.RemoveId(config.ResumeName)
}
func ensureEnabled(s *mgo.Session, config *configOptions) (enabled bool, err error) {
session := s.Copy()
defer session.Close()
col := session.DB("monstache").C("cluster")
doc := make(map[string]interface{})
if err = col.FindId(config.ResumeName).One(doc); err == nil {
if doc["pid"] != nil && doc["host"] != nil {
var hostname string
pid := doc["pid"].(int)
host := doc["host"].(string)
if hostname, err = os.Hostname(); err == nil {
enabled = (pid == os.Getpid() && host == hostname)
if enabled {
err = col.UpdateId(config.ResumeName,
bson.M{"$set": bson.M{"expireAt": time.Now().UTC()}})
}
}
}
}
return
}
func resumeWork(ctx *gtm.OpCtxMulti, session *mgo.Session, config *configOptions) {
col := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
col.FindId(config.ResumeName).One(doc)
if doc["ts"] != nil {
ts := doc["ts"].(bson.MongoTimestamp)
ctx.Since(ts)
}
ctx.Resume()
}
func saveTimestamp(s *mgo.Session, ts bson.MongoTimestamp, config *configOptions) error {
session := s.Copy()
session.SetSocketTimeout(time.Duration(5) * time.Second)
session.SetSyncTimeout(time.Duration(5) * time.Second)
if config.ResumeWriteUnsafe {
session.SetSafe(nil)
}
defer session.Close()
col := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
doc["ts"] = ts
_, err := col.UpsertId(config.ResumeName, bson.M{"$set": doc})
return err
}
func (config *configOptions) parseCommandLineFlags() *configOptions {
flag.BoolVar(&config.Print, "print-config", false, "Print the configuration and then exit")
flag.StringVar(&config.MongoURL, "mongo-url", "", "MongoDB server or router server connection URL")
flag.StringVar(&config.MongoConfigURL, "mongo-config-url", "", "MongoDB config server connection URL")
flag.StringVar(&config.MongoPemFile, "mongo-pem-file", "", "Path to a PEM file for secure connections to MongoDB")
flag.BoolVar(&config.MongoValidatePemFile, "mongo-validate-pem-file", true, "Set to boolean false to not validate the MongoDB PEM file")
flag.StringVar(&config.MongoOpLogDatabaseName, "mongo-oplog-database-name", "", "Override the database name which contains the mongodb oplog")
flag.StringVar(&config.MongoOpLogCollectionName, "mongo-oplog-collection-name", "", "Override the collection name which contains the mongodb oplog")
flag.StringVar(&config.GraylogAddr, "graylog-addr", "", "Send logs to a Graylog server at this address")
flag.StringVar(&config.ElasticVersion, "elasticsearch-version", "", "Specify elasticsearch version directly instead of getting it from the server")
flag.StringVar(&config.ElasticUser, "elasticsearch-user", "", "The elasticsearch user name for basic auth")
flag.StringVar(&config.ElasticPassword, "elasticsearch-password", "", "The elasticsearch password for basic auth")
flag.StringVar(&config.ElasticPemFile, "elasticsearch-pem-file", "", "Path to a PEM file for secure connections to elasticsearch")
flag.BoolVar(&config.ElasticValidatePemFile, "elasticsearch-validate-pem-file", true, "Set to boolean false to not validate the Elasticsearch PEM file")
flag.IntVar(&config.ElasticMaxConns, "elasticsearch-max-conns", 0, "Elasticsearch max connections")
flag.IntVar(&config.FileDownloaders, "file-downloaders", 0, "GridFs download go routines")
flag.BoolVar(&config.ElasticRetry, "elasticsearch-retry", false, "True to retry failed request to Elasticsearch")
flag.IntVar(&config.ElasticMaxDocs, "elasticsearch-max-docs", 0, "Number of docs to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxBytes, "elasticsearch-max-bytes", 0, "Number of bytes to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxSeconds, "elasticsearch-max-seconds", 0, "Number of seconds before flushing to Elasticsearch")
flag.IntVar(&config.ElasticClientTimeout, "elasticsearch-client-timeout", 0, "Number of seconds before a request to Elasticsearch is timed out")
flag.Int64Var(&config.MaxFileSize, "max-file-size", 0, "GridFs file content exceeding this limit in bytes will not be indexed in Elasticsearch")
flag.StringVar(&config.ConfigFile, "f", "", "Location of configuration file")
flag.BoolVar(&config.DroppedDatabases, "dropped-databases", true, "True to delete indexes from dropped databases")
flag.BoolVar(&config.DroppedCollections, "dropped-collections", true, "True to delete indexes from dropped collections")
flag.BoolVar(&config.Version, "v", false, "True to print the version number")
flag.BoolVar(&config.Gzip, "gzip", false, "True to use gzip for requests to elasticsearch")
flag.BoolVar(&config.Verbose, "verbose", false, "True to output verbose messages")
flag.BoolVar(&config.Pprof, "pprof", false, "True to enable pprof endpoints")
flag.BoolVar(&config.EnableEasyJSON, "enable-easy-json", false, "True to enable easy-json serialization")
flag.BoolVar(&config.Stats, "stats", false, "True to print out statistics")
flag.BoolVar(&config.IndexStats, "index-stats", false, "True to index stats in elasticsearch")
flag.StringVar(&config.StatsDuration, "stats-duration", "", "The duration after which stats are logged")
flag.StringVar(&config.StatsIndexFormat, "stats-index-format", "", "time.Time supported format to use for the stats index names")
flag.BoolVar(&config.Resume, "resume", false, "True to capture the last timestamp of this run and resume on a subsequent run")
flag.Int64Var(&config.ResumeFromTimestamp, "resume-from-timestamp", 0, "Timestamp to resume syncing from")
flag.BoolVar(&config.ResumeWriteUnsafe, "resume-write-unsafe", false, "True to speedup writes of the last timestamp synched for resuming at the cost of error checking")
flag.BoolVar(&config.Replay, "replay", false, "True to replay all events from the oplog and index them in elasticsearch")
flag.BoolVar(&config.IndexFiles, "index-files", false, "True to index gridfs files into elasticsearch. Requires the elasticsearch mapper-attachments (deprecated) or ingest-attachment plugin")
flag.BoolVar(&config.IndexAsUpdate, "index-as-update", false, "True to index documents as updates instead of overwrites")
flag.BoolVar(&config.FileHighlighting, "file-highlighting", false, "True to enable the ability to highlight search times for a file query")
flag.BoolVar(&config.EnablePatches, "enable-patches", false, "True to include an json-patch field on updates")
flag.BoolVar(&config.FailFast, "fail-fast", false, "True to exit if a single _bulk request fails")
flag.BoolVar(&config.IndexOplogTime, "index-oplog-time", false, "True to add date/time information from the oplog to each document when indexing")
flag.BoolVar(&config.ExitAfterDirectReads, "exit-after-direct-reads", false, "True to exit the program after reading directly from the configured namespaces")
flag.StringVar(&config.MergePatchAttr, "merge-patch-attribute", "", "Attribute to store json-patch values under")
flag.StringVar(&config.ResumeName, "resume-name", "", "Name under which to load/store the resume state. Defaults to 'default'")
flag.StringVar(&config.ClusterName, "cluster-name", "", "Name of the monstache process cluster")
flag.StringVar(&config.Worker, "worker", "", "The name of this worker in a multi-worker configuration")
flag.StringVar(&config.MapperPluginPath, "mapper-plugin-path", "", "The path to a .so file to load as a document mapper plugin")
flag.StringVar(&config.NsRegex, "namespace-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which match are synched to elasticsearch")
flag.StringVar(&config.NsDropRegex, "namespace-drop-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which match are synched to elasticsearch")
flag.StringVar(&config.NsExcludeRegex, "namespace-exclude-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which do not match are synched to elasticsearch")
flag.StringVar(&config.NsDropExcludeRegex, "namespace-drop-exclude-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which do not match are synched to elasticsearch")
flag.Var(&config.DirectReadNs, "direct-read-namespace", "A list of direct read namespaces")
flag.IntVar(&config.DirectReadSplitMax, "direct-read-split-max", 0, "Max number of times to split a collection for direct reads")
flag.Var(&config.RoutingNamespaces, "routing-namespace", "A list of namespaces that override routing information")
flag.Var(&config.TimeMachineNamespaces, "time-machine-namespace", "A list of direct read namespaces")
flag.StringVar(&config.TimeMachineIndexPrefix, "time-machine-index-prefix", "", "A prefix to preprend to time machine indexes")
flag.StringVar(&config.TimeMachineIndexSuffix, "time-machine-index-suffix", "", "A suffix to append to time machine indexes")
flag.BoolVar(&config.TimeMachineDirectReads, "time-machine-direct-reads", false, "True to index the results of direct reads into the any time machine indexes")
flag.Var(&config.ElasticUrls, "elasticsearch-url", "A list of Elasticsearch URLs")
flag.Var(&config.FileNamespaces, "file-namespace", "A list of file namespaces")
flag.Var(&config.PatchNamespaces, "patch-namespace", "A list of patch namespaces")
flag.Var(&config.Workers, "workers", "A list of worker names")
flag.BoolVar(&config.EnableHTTPServer, "enable-http-server", false, "True to enable an internal http server")
flag.StringVar(&config.HTTPServerAddr, "http-server-addr", "", "The address the internal http server listens on")
flag.BoolVar(&config.PruneInvalidJSON, "prune-invalid-json", false, "True to omit values which do not serialize to JSON such as +Inf and -Inf and thus cause errors")
flag.Var(&config.DeleteStrategy, "delete-strategy", "Stategy to use for deletes. 0=stateless,1=stateful,2=ignore")
flag.StringVar(&config.DeleteIndexPattern, "delete-index-pattern", "", "An Elasticsearch index-pattern to restric the scope of stateless deletes")
flag.StringVar(&config.OplogTsFieldName, "oplog-ts-field-name", "", "Field name to use for the oplog timestamp")
flag.StringVar(&config.OplogDateFieldName, "oplog-date-field-name", "", "Field name to use for the oplog date")
flag.StringVar(&config.OplogDateFieldFormat, "oplog-date-field-format", "", "Format to use for the oplog date")
flag.Parse()
return config
}
func (config *configOptions) loadIndexTypes() {
if config.Mapping != nil {
for _, m := range config.Mapping {
if m.Namespace != "" && (m.Index != "" || m.Type != "") {
mapIndexTypes[m.Namespace] = &indexTypeMapping{
Namespace: m.Namespace,
Index: strings.ToLower(m.Index),
Type: m.Type,
}
} else {
panic("Mappings must specify namespace and at least one of index and type")
}
}
}
}
func (config *configOptions) loadFilters() {
for _, s := range config.Filter {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
panic("Filters must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
panic(fmt.Sprintf("Unable to load filter at path %s: %s", s.Path, err))
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
panic(fmt.Sprintf("Multiple filters with namespace: %s", s.Namespace))
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
panic(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
panic(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
panic(err)
} else if !val.IsFunction() {
panic("module.exports must be a function")
}
filterEnvs[s.Namespace] = env
} else {
panic("Filters must specify path or script attributes")
}
}
}
func (config *configOptions) loadScripts() {
for _, s := range config.Script {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
panic("Scripts must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
panic(fmt.Sprintf("Unable to load script at path %s: %s", s.Path, err))
}
}
if _, exists := mapEnvs[s.Namespace]; exists {
panic(fmt.Sprintf("Multiple scripts with namespace: %s", s.Namespace))
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
panic(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
panic(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
panic(err)
} else if !val.IsFunction() {
panic("module.exports must be a function")
}
mapEnvs[s.Namespace] = env
if s.Routing {
routingNamespaces[s.Namespace] = true
}
} else {
panic("Scripts must specify path or script")
}
}
}
func (config *configOptions) loadPlugins() *configOptions {
if config.MapperPluginPath != "" {
p, err := plugin.Open(config.MapperPluginPath)
if err != nil {
panic(fmt.Sprintf("Unable to load mapper plugin %s: %s", config.MapperPluginPath, err))
}
mapper, err := p.Lookup("Map")
if err != nil {
panic(fmt.Sprintf("Unable to find symbol 'Map' in mapper plugin: %s", err))
}
switch mapper.(type) {
case func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error):
mapperPlugin = mapper.(func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error))
default:
panic(fmt.Sprintf("Plugin 'Map' function must be typed %T", mapperPlugin))
}
filter, err := p.Lookup("Filter")
if err == nil {
switch filter.(type) {
case func(*monstachemap.MapperPluginInput) (bool, error):
filterPlugin = filter.(func(*monstachemap.MapperPluginInput) (bool, error))
default:
panic(fmt.Sprintf("Plugin 'Filter' function must be typed %T", filterPlugin))
}
}
}
return config
}
func (config *configOptions) loadConfigFile() *configOptions {
if config.ConfigFile != "" {
var tomlConfig = configOptions{
DroppedDatabases: true,
DroppedCollections: true,
MongoDialSettings: mongoDialSettings{Timeout: -1},
MongoSessionSettings: mongoSessionSettings{SocketTimeout: -1, SyncTimeout: -1},
GtmSettings: gtmDefaultSettings(),
}
if _, err := toml.DecodeFile(config.ConfigFile, &tomlConfig); err != nil {
panic(err)
}
if config.MongoURL == "" {
config.MongoURL = tomlConfig.MongoURL
}
if config.MongoConfigURL == "" {
config.MongoConfigURL = tomlConfig.MongoConfigURL
}
if config.MongoPemFile == "" {
config.MongoPemFile = tomlConfig.MongoPemFile
}
if config.MongoValidatePemFile && !tomlConfig.MongoValidatePemFile {
config.MongoValidatePemFile = false
}
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = tomlConfig.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = tomlConfig.MongoOpLogCollectionName
}
if config.ElasticUser == "" {
config.ElasticUser = tomlConfig.ElasticUser
}
if config.ElasticPassword == "" {
config.ElasticPassword = tomlConfig.ElasticPassword
}
if config.ElasticPemFile == "" {
config.ElasticPemFile = tomlConfig.ElasticPemFile
}
if config.ElasticValidatePemFile && !tomlConfig.ElasticValidatePemFile {
config.ElasticValidatePemFile = false
}
if config.ElasticVersion == "" {
config.ElasticVersion = tomlConfig.ElasticVersion
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = tomlConfig.ElasticMaxConns
}
if config.DirectReadSplitMax == 0 {
config.DirectReadSplitMax = tomlConfig.DirectReadSplitMax
}
if !config.ElasticRetry && tomlConfig.ElasticRetry {
config.ElasticRetry = true
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = tomlConfig.ElasticMaxDocs
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = tomlConfig.ElasticMaxBytes
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = tomlConfig.ElasticMaxSeconds
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = tomlConfig.ElasticClientTimeout
}
if config.MaxFileSize == 0 {
config.MaxFileSize = tomlConfig.MaxFileSize
}
if config.FileDownloaders == 0 {
config.FileDownloaders = tomlConfig.FileDownloaders
}
if config.DeleteStrategy == 0 {
config.DeleteStrategy = tomlConfig.DeleteStrategy
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = tomlConfig.DeleteIndexPattern
}
if config.DroppedDatabases && !tomlConfig.DroppedDatabases {
config.DroppedDatabases = false
}
if config.DroppedCollections && !tomlConfig.DroppedCollections {
config.DroppedCollections = false
}
if !config.Gzip && tomlConfig.Gzip {
config.Gzip = true
}
if !config.Verbose && tomlConfig.Verbose {
config.Verbose = true
}
if !config.Stats && tomlConfig.Stats {
config.Stats = true
}
if !config.Pprof && tomlConfig.Pprof {
config.Pprof = true
}
if !config.EnableEasyJSON && tomlConfig.EnableEasyJSON {
config.EnableEasyJSON = true
}
if !config.IndexStats && tomlConfig.IndexStats {
config.IndexStats = true
}
if config.StatsDuration == "" {
config.StatsDuration = tomlConfig.StatsDuration
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = tomlConfig.StatsIndexFormat
}
if !config.IndexFiles && tomlConfig.IndexFiles {
config.IndexFiles = true
}
if !config.IndexAsUpdate && tomlConfig.IndexAsUpdate {
config.IndexAsUpdate = true
}
if !config.FileHighlighting && tomlConfig.FileHighlighting {
config.FileHighlighting = true
}
if !config.EnablePatches && tomlConfig.EnablePatches {
config.EnablePatches = true
}
if !config.PruneInvalidJSON && tomlConfig.PruneInvalidJSON {
config.PruneInvalidJSON = true
}
if !config.Replay && tomlConfig.Replay {
config.Replay = true
}
if !config.Resume && tomlConfig.Resume {
config.Resume = true
}
if !config.ResumeWriteUnsafe && tomlConfig.ResumeWriteUnsafe {
config.ResumeWriteUnsafe = true
}
if config.ResumeFromTimestamp == 0 {
config.ResumeFromTimestamp = tomlConfig.ResumeFromTimestamp
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = tomlConfig.MergePatchAttr
}
if !config.FailFast && tomlConfig.FailFast {
config.FailFast = true
}
if !config.IndexOplogTime && tomlConfig.IndexOplogTime {
config.IndexOplogTime = true
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = tomlConfig.OplogTsFieldName
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = tomlConfig.OplogDateFieldName
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = tomlConfig.OplogDateFieldFormat
}
if !config.ExitAfterDirectReads && tomlConfig.ExitAfterDirectReads {
config.ExitAfterDirectReads = true
}
if config.Resume && config.ResumeName == "" {
config.ResumeName = tomlConfig.ResumeName
}
if config.ClusterName == "" {
config.ClusterName = tomlConfig.ClusterName
}
if config.NsRegex == "" {
config.NsRegex = tomlConfig.NsRegex
}
if config.NsDropRegex == "" {
config.NsDropRegex = tomlConfig.NsDropRegex
}
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = tomlConfig.NsExcludeRegex
}
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = tomlConfig.NsDropExcludeRegex
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = tomlConfig.FileNamespaces
}
config.loadGridFsConfig()
}
if config.Worker == "" {
config.Worker = tomlConfig.Worker
}
if config.GraylogAddr == "" {
config.GraylogAddr = tomlConfig.GraylogAddr
}
if config.MapperPluginPath == "" {
config.MapperPluginPath = tomlConfig.MapperPluginPath
}
if config.EnablePatches {
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = tomlConfig.PatchNamespaces
}
config.loadPatchNamespaces()
}
if len(config.RoutingNamespaces) == 0 {
config.RoutingNamespaces = tomlConfig.RoutingNamespaces
config.loadRoutingNamespaces()
}
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = tomlConfig.TimeMachineNamespaces
config.loadTimeMachineNamespaces()
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = tomlConfig.TimeMachineIndexPrefix
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = tomlConfig.TimeMachineIndexSuffix
}
if !config.TimeMachineDirectReads {
config.TimeMachineDirectReads = tomlConfig.TimeMachineDirectReads
}
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = tomlConfig.DirectReadNs
}
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = tomlConfig.ElasticUrls
}
if len(config.Workers) == 0 {
config.Workers = tomlConfig.Workers
}
if !config.EnableHTTPServer && tomlConfig.EnableHTTPServer {
config.EnableHTTPServer = true
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = tomlConfig.HTTPServerAddr
}
config.MongoDialSettings = tomlConfig.MongoDialSettings
config.MongoSessionSettings = tomlConfig.MongoSessionSettings
config.GtmSettings = tomlConfig.GtmSettings
config.Logs = tomlConfig.Logs
tomlConfig.loadScripts()
tomlConfig.loadFilters()
tomlConfig.loadIndexTypes()
}
return config
}
func (config *configOptions) newLogger(path string) *lumberjack.Logger {
return &lumberjack.Logger{
Filename: path,
MaxSize: 500, // megabytes
MaxBackups: 5,
MaxAge: 28, //days
}
}
func (config *configOptions) setupLogging() *configOptions {
if config.GraylogAddr != "" {
gelfWriter, err := gelf.NewUDPWriter(config.GraylogAddr)
if err != nil {
errorLog.Fatalf("Error creating gelf writer: %s", err)
}
infoLog.SetOutput(gelfWriter)
warnLog.SetOutput(gelfWriter)
errorLog.SetOutput(gelfWriter)
traceLog.SetOutput(gelfWriter)
statsLog.SetOutput(gelfWriter)
} else {
logs := config.Logs
if logs.Info != "" {
infoLog.SetOutput(config.newLogger(logs.Info))
}
if logs.Warn != "" {
warnLog.SetOutput(config.newLogger(logs.Warn))
}
if logs.Error != "" {
errorLog.SetOutput(config.newLogger(logs.Error))
}
if logs.Trace != "" {
traceLog.SetOutput(config.newLogger(logs.Trace))
}
if logs.Stats != "" {
statsLog.SetOutput(config.newLogger(logs.Stats))
}
}
return config
}
func (config *configOptions) loadRoutingNamespaces() *configOptions {
for _, namespace := range config.RoutingNamespaces {
routingNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadTimeMachineNamespaces() *configOptions {
for _, namespace := range config.TimeMachineNamespaces {
tmNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadPatchNamespaces() *configOptions {
for _, namespace := range config.PatchNamespaces {
patchNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadGridFsConfig() *configOptions {
for _, namespace := range config.FileNamespaces {
fileNamespaces[namespace] = true
}
return config
}
func (config *configOptions) dump() {
json, err := json.MarshalIndent(config, "", " ")
if err != nil {
errorLog.Printf("Unable to print configuration: %s", err)
} else {
infoLog.Println(string(json))
}
}
/*
if ssl=true is set on the connection string, remove the option
from the connection string and enable TLS because the mgo
driver does not support the option in the connection string
*/
func (config *configOptions) parseMongoURL(inURL string) (outURL string) {
const queryDelim string = "?"
outURL = inURL
hostQuery := strings.SplitN(outURL, queryDelim, 2)
if len(hostQuery) == 2 {
host, query := hostQuery[0], hostQuery[1]
r := regexp.MustCompile(`ssl=true&?|&ssl=true$`)
qstr := r.ReplaceAllString(query, "")
if qstr != query {
config.MongoDialSettings.Ssl = true
if qstr == "" {
outURL = host
} else {
outURL = strings.Join([]string{host, qstr}, queryDelim)
}
}
}
return
}
func (config *configOptions) setDefaults() *configOptions {
if config.MongoURL == "" {
config.MongoURL = mongoURLDefault
}
if config.ClusterName != "" {
if config.ClusterName != "" && config.Worker != "" {
config.ResumeName = fmt.Sprintf("%s:%s", config.ClusterName, config.Worker)
} else {
config.ResumeName = config.ClusterName
}
config.Resume = true
} else if config.ResumeName == "" {
if config.Worker != "" {
config.ResumeName = config.Worker
} else {
config.ResumeName = resumeNameDefault
}
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = elasticMaxConnsDefault
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = elasticClientTimeoutDefault
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = "json-merge-patches"
}
if config.ElasticMaxSeconds == 0 {
if len(config.DirectReadNs) > 0 {
config.ElasticMaxSeconds = 5
} else {
config.ElasticMaxSeconds = 1
}
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = elasticMaxDocsDefault
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = elasticMaxBytesDefault
}
if config.MongoURL != "" {
config.MongoURL = config.parseMongoURL(config.MongoURL)
}
if config.MongoConfigURL != "" {
config.MongoConfigURL = config.parseMongoURL(config.MongoConfigURL)
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = ":8080"
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = "monstache.stats.2006-01-02"
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = "log"
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = "2006-01-02"
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = "*"
}
if config.FileDownloaders == 0 {
config.FileDownloaders = fileDownloadersDefault
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = "oplog_ts"
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = "oplog_date"
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = "2006/01/02 15:04:05"
}
return config
}
func (config *configOptions) getAuthURL(inURL string) string {
cred := strings.SplitN(config.MongoURL, "@", 2)
if len(cred) == 2 {
return cred[0] + "@" + inURL
} else {
return inURL
}
}
func (config *configOptions) configureMongo(session *mgo.Session) {
session.SetMode(mgo.Primary, true)
if config.MongoSessionSettings.SocketTimeout != -1 {
timeOut := time.Duration(config.MongoSessionSettings.SocketTimeout) * time.Second
session.SetSocketTimeout(timeOut)
}
if config.MongoSessionSettings.SyncTimeout != -1 {
timeOut := time.Duration(config.MongoSessionSettings.SyncTimeout) * time.Second
session.SetSyncTimeout(timeOut)
}
}
func (config *configOptions) dialMongo(inURL string) (*mgo.Session, error) {
ssl := config.MongoDialSettings.Ssl || config.MongoPemFile != ""
if ssl {
tlsConfig := &tls.Config{}
if config.MongoPemFile != "" {
certs := x509.NewCertPool()
if ca, err := ioutil.ReadFile(config.MongoPemFile); err == nil {
certs.AppendCertsFromPEM(ca)
} else {
return nil, err
}
tlsConfig.RootCAs = certs
}
// Check to see if we don't need to validate the PEM
if config.MongoValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
dialInfo, err := mgo.ParseURL(inURL)
if err != nil {
return nil, err
}
dialInfo.Timeout = time.Duration(10) * time.Second
if config.MongoDialSettings.Timeout != -1 {
dialInfo.Timeout = time.Duration(config.MongoDialSettings.Timeout) * time.Second
}
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
if err != nil {
errorLog.Printf("Unable to dial mongodb: %s", err)
}
return conn, err
}
session, err := mgo.DialWithInfo(dialInfo)
if err == nil {
session.SetSyncTimeout(1 * time.Minute)
session.SetSocketTimeout(1 * time.Minute)
}
return session, err
}
if config.MongoDialSettings.Timeout != -1 {
return mgo.DialWithTimeout(inURL,
time.Duration(config.MongoDialSettings.Timeout)*time.Second)
}
return mgo.Dial(inURL)
}
func (config *configOptions) NewHTTPClient() (client *http.Client, err error) {
tlsConfig := &tls.Config{}
if config.ElasticPemFile != "" {
var ca []byte
certs := x509.NewCertPool()
if ca, err = ioutil.ReadFile(config.ElasticPemFile); err == nil {
certs.AppendCertsFromPEM(ca)
tlsConfig.RootCAs = certs
} else {
return client, err
}
}
if config.ElasticValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
transport := &http.Transport{
DisableCompression: !config.Gzip,
TLSHandshakeTimeout: time.Duration(30) * time.Second,
TLSClientConfig: tlsConfig,
}
client = &http.Client{
Timeout: time.Duration(config.ElasticClientTimeout) * time.Second,
Transport: transport,
}
return client, err
}
func doDrop(mongo *mgo.Session, elastic *elastic.Client, op *gtm.Op, config *configOptions) (err error) {
if db, drop := op.IsDropDatabase(); drop {
if config.DroppedDatabases {
if err = deleteIndexes(elastic, db, config); err == nil {
if e := dropDBMeta(mongo, db); e != nil {
errorLog.Printf("Unable to delete metadata for db: %s", e)
}
}
}
} else if col, drop := op.IsDropCollection(); drop {
if config.DroppedCollections {
if err = deleteIndex(elastic, op.GetDatabase()+"."+col, config); err == nil {
if e := dropCollectionMeta(mongo, op.GetDatabase()+"."+col); e != nil {
errorLog.Printf("Unable to delete metadata for collection: %s", e)
}
}
}
}
return
}
func hasFileContent(op *gtm.Op, config *configOptions) (ingest bool) {
if !config.IndexFiles {
return
}
return fileNamespaces[op.Namespace]
}
func addPatch(config *configOptions, client *elastic.Client, op *gtm.Op,
objectID string, indexType *indexTypeMapping, meta *indexingMeta) (err error) {
var merges []interface{}
var toJSON []byte
if op.IsSourceDirect() {
return nil
}
if op.Timestamp == 0 {
return nil
}
if op.IsUpdate() {
ctx := context.Background()
service := client.Get()
service.Id(objectID)
service.Index(indexType.Index)
service.Type(indexType.Type)
if meta.Index != "" {
service.Index(meta.Index)
}
if meta.Type != "" {
service.Type(meta.Type)
}
if meta.Routing != "" {
service.Routing(meta.Routing)
}
if meta.Parent != "" {
service.Parent(meta.Parent)
}
var resp *elastic.GetResult
if resp, err = service.Do(ctx); err == nil {
if resp.Found {
var src map[string]interface{}
if err = json.Unmarshal(*resp.Source, &src); err == nil {
if val, ok := src[config.MergePatchAttr]; ok {
merges = val.([]interface{})
for _, m := range merges {
entry := m.(map[string]interface{})
entry["ts"] = int(entry["ts"].(float64))
entry["v"] = int(entry["v"].(float64))
}
}
delete(src, config.MergePatchAttr)
var fromJSON, mergeDoc []byte
if fromJSON, err = json.Marshal(src); err == nil {
if toJSON, err = json.Marshal(op.Data); err == nil {
if mergeDoc, err = jsonpatch.CreateMergePatch(fromJSON, toJSON); err == nil {
merge := make(map[string]interface{})
merge["ts"] = op.Timestamp >> 32
merge["p"] = string(mergeDoc)
merge["v"] = len(merges) + 1
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
}
} else {
err = errors.New("Last document revision not found")
}
}
} else {
if _, found := op.Data[config.MergePatchAttr]; !found {
if toJSON, err = json.Marshal(op.Data); err == nil {
merge := make(map[string]interface{})
merge["v"] = 1
merge["ts"] = op.Timestamp >> 32
merge["p"] = string(toJSON)
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
return
}
func doIndexing(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, ingestAttachment bool) (err error) {
meta := parseIndexMeta(op)
prepareDataForIndexing(config, op)
objectID, indexType := opIDToString(op), mapIndexType(config, op)
if config.EnablePatches {
if patchNamespaces[op.Namespace] {
if e := addPatch(config, client, op, objectID, indexType, meta); e != nil {
errorLog.Printf("Unable to save json-patch info: %s", e)
}
}
}
if config.IndexAsUpdate && meta.Pipeline == "" && ingestAttachment == false {
req := elastic.NewBulkUpdateRequest()
req.UseEasyJSON(config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Type(indexType.Type)
req.Doc(op.Data)
req.DocAsUpsert(true)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
bulk.Add(req)
} else {
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Type(indexType.Type)
req.Doc(op.Data)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.Version != 0 {
req.Version(meta.Version)
}
if meta.VersionType != "" {
req.VersionType(meta.VersionType)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if ingestAttachment {
req.Pipeline("attachment")
}
bulk.Add(req)
}
if meta.shouldSave(config) {
if e := setIndexMeta(mongo, op.Namespace, objectID, meta); e != nil {
errorLog.Printf("Unable to save routing info: %s", e)
}
}
if tmNamespaces[op.Namespace] {
if op.IsSourceOplog() || config.TimeMachineDirectReads {
t := time.Now().UTC()
tmIndex := func(idx string) string {
pre, suf := config.TimeMachineIndexPrefix, config.TimeMachineIndexSuffix
tmFormat := strings.Join([]string{pre, idx, suf}, ".")
return strings.ToLower(t.Format(tmFormat))
}
data := make(map[string]interface{})
for k, v := range op.Data {
data[k] = v
}
data["_source_id"] = objectID
if config.IndexOplogTime == false {
secs := int64(op.Timestamp >> 32)
t := time.Unix(secs, 0).UTC()
data[config.OplogTsFieldName] = op.Timestamp
data[config.OplogDateFieldName] = t.Format(config.OplogDateFieldFormat)
}
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(config.EnableEasyJSON)
req.Index(tmIndex(indexType.Index))
req.Type(indexType.Type)
req.Routing(objectID)
req.Doc(data)
if meta.Index != "" {
req.Index(tmIndex(meta.Index))
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if ingestAttachment {
req.Pipeline("attachment")
}
bulk.Add(req)
}
}
return
}
func doIndex(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, ingestAttachment bool) (err error) {
if err = mapData(mongo, config, op); err == nil {
if op.Data != nil {
err = doIndexing(config, mongo, bulk, client, op, ingestAttachment)
} else if op.IsUpdate() {
doDelete(config, client, mongo, bulk, op)
}
}
return
}
func processOp(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, fileC chan *gtm.Op) (err error) {
if op.IsDrop() {
bulk.Flush()
err = doDrop(mongo, client, op, config)
} else if op.IsDelete() {
doDelete(config, client, mongo, bulk, op)
} else if op.Data != nil {
if hasFileContent(op, config) {
fileC <- op
} else {
err = doIndex(config, mongo, bulk, client, op, false)
}
}
return
}
func processErr(err error, config *configOptions) {
exitStatus = 1
errorLog.Println(err)
if config.FailFast {
os.Exit(exitStatus)
}
}
func doIndexStats(config *configOptions, bulkStats *elastic.BulkProcessor, stats elastic.BulkProcessorStats) (err error) {
var hostname string
doc := make(map[string]interface{})
t := time.Now().UTC()
doc["Timestamp"] = t.Format("2006-01-02T15:04:05")
hostname, err = os.Hostname()
if err == nil {
doc["Host"] = hostname
}
doc["Pid"] = os.Getpid()
doc["Stats"] = stats
index := strings.ToLower(t.Format(config.StatsIndexFormat))
typeName := "stats"
if config.useTypeFromFuture() {
typeName = typeFromFuture
}
req := elastic.NewBulkIndexRequest().Index(index).Type(typeName)
req.UseEasyJSON(config.EnableEasyJSON)
req.Doc(doc)
bulkStats.Add(req)
return
}
func dropDBMeta(session *mgo.Session, db string) (err error) {
col := session.DB("monstache").C("meta")
q := bson.M{"db": db}
_, err = col.RemoveAll(q)
return
}
func dropCollectionMeta(session *mgo.Session, namespace string) (err error) {
col := session.DB("monstache").C("meta")
q := bson.M{"namespace": namespace}
_, err = col.RemoveAll(q)
return
}
func (meta *indexingMeta) load(metaAttrs map[string]interface{}) {
var v interface{}
var ok bool
var s string
if v, ok = metaAttrs["routing"]; ok {
meta.Routing = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["index"]; ok {
meta.Index = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["type"]; ok {
meta.Type = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["parent"]; ok {
meta.Parent = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["version"]; ok {
s = fmt.Sprintf("%v", v)
if version, err := strconv.ParseInt(s, 10, 64); err == nil {
meta.Version = version
}
}
if v, ok = metaAttrs["versionType"]; ok {
meta.VersionType = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["pipeline"]; ok {
meta.Pipeline = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["retryOnConflict"]; ok {
s = fmt.Sprintf("%v", v)
if roc, err := strconv.Atoi(s); err == nil {
meta.RetryOnConflict = roc
}
}
}
func (meta *indexingMeta) shouldSave(config *configOptions) bool {
if config.DeleteStrategy == statefulDeleteStrategy {
return (meta.Routing != "" ||
meta.Index != "" ||
meta.Type != "" ||
meta.Parent != "" ||
meta.Pipeline != "")
} else {
return false
}
}
func setIndexMeta(session *mgo.Session, namespace, id string, meta *indexingMeta) error {
col := session.DB("monstache").C("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
doc := make(map[string]interface{})
doc["routing"] = meta.Routing
doc["index"] = meta.Index
doc["type"] = meta.Type
doc["parent"] = meta.Parent
doc["pipeline"] = meta.Pipeline
doc["db"] = strings.SplitN(namespace, ".", 2)[0]
doc["namespace"] = namespace
_, err := col.UpsertId(metaID, bson.M{"$set": doc})
return err
}
func getIndexMeta(session *mgo.Session, namespace, id string) (meta *indexingMeta) {
meta = &indexingMeta{}
col := session.DB("monstache").C("meta")
doc := make(map[string]interface{})
metaID := fmt.Sprintf("%s.%s", namespace, id)
col.FindId(metaID).One(doc)
if doc["routing"] != nil {
meta.Routing = doc["routing"].(string)
}
if doc["index"] != nil {
meta.Index = strings.ToLower(doc["index"].(string))
}
if doc["type"] != nil {
meta.Type = doc["type"].(string)
}
if doc["parent"] != nil {
meta.Parent = doc["parent"].(string)
}
if doc["pipeline"] != nil {
meta.Pipeline = doc["pipeline"].(string)
}
col.RemoveId(metaID)
return
}
func loadBuiltinFunctions(s *mgo.Session) {
for ns, env := range mapEnvs {
var fa *findConf
fa = &findConf{
session: s,
name: "findId",
vm: env.VM,
ns: ns,
byId: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "findOne",
vm: env.VM,
ns: ns,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "find",
vm: env.VM,
ns: ns,
multi: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
}
}
func (fc *findCall) setDatabase(topts map[string]interface{}) (err error) {
if ov, ok := topts["database"]; ok {
if ovs, ok := ov.(string); ok {
fc.db = ovs
} else {
err = errors.New("Invalid database option value")
}
}
return
}
func (fc *findCall) setCollection(topts map[string]interface{}) (err error) {
if ov, ok := topts["collection"]; ok {
if ovs, ok := ov.(string); ok {
fc.col = ovs
} else {
err = errors.New("Invalid collection option value")
}
}
return
}
func (fc *findCall) setSelect(topts map[string]interface{}) (err error) {
if ov, ok := topts["select"]; ok {
if ovsel, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsel {
if vi, ok := v.(int64); ok {
fc.sel[k] = int(vi)
}
}
} else {
err = errors.New("Invalid select option value")
}
}
return
}
func (fc *findCall) setSort(topts map[string]interface{}) (err error) {
if ov, ok := topts["sort"]; ok {
if ovs, ok := ov.([]string); ok {
fc.sort = ovs
} else {
err = errors.New("Invalid sort option value")
}
}
return
}
func (fc *findCall) setLimit(topts map[string]interface{}) (err error) {
if ov, ok := topts["limit"]; ok {
if ovl, ok := ov.(int64); ok {
fc.limit = int(ovl)
} else {
err = errors.New("Invalid limit option value")
}
}
return
}
func (fc *findCall) setQuery(v otto.Value) (err error) {
var q interface{}
if q, err = v.Export(); err == nil {
fc.query = fc.restoreIds(q)
}
return
}
func (fc *findCall) setOptions(v otto.Value) (err error) {
var opts interface{}
if opts, err = v.Export(); err == nil {
switch topts := opts.(type) {
case map[string]interface{}:
if err = fc.setDatabase(topts); err != nil {
return
}
if err = fc.setCollection(topts); err != nil {
return
}
if err = fc.setSelect(topts); err != nil {
return
}
if fc.isMulti() {
if err = fc.setSort(topts); err != nil {
return
}
if err = fc.setLimit(topts); err != nil {
return
}
}
default:
err = errors.New("Invalid options argument")
return
}
} else {
err = errors.New("Invalid options argument")
}
return
}
func (fc *findCall) setDefaults() {
if fc.config.ns != "" {
ns := strings.Split(fc.config.ns, ".")
fc.db = ns[0]
fc.col = ns[1]
}
}
func (fc *findCall) getCollection() *mgo.Collection {
return fc.session.DB(fc.db).C(fc.col)
}
func (fc *findCall) getVM() *otto.Otto {
return fc.config.vm
}
func (fc *findCall) getFunctionName() string {
return fc.config.name
}
func (fc *findCall) isMulti() bool {
return fc.config.multi
}
func (fc *findCall) logError(err error) {
errorLog.Printf("Error in function %s: %s\n", fc.getFunctionName(), err)
}
func (fc *findCall) restoreIds(v interface{}) (r interface{}) {
switch vt := v.(type) {
case string:
if bson.IsObjectIdHex(vt) {
r = bson.ObjectIdHex(vt)
} else {
r = v
}
case []interface{}:
var avs []interface{}
for _, av := range vt {
avs = append(avs, fc.restoreIds(av))
}
r = avs
case map[string]interface{}:
mvs := make(map[string]interface{})
for k, v := range vt {
mvs[k] = fc.restoreIds(v)
}
r = mvs
default:
r = v
}
return
}
func (fc *findCall) execute() (r otto.Value, err error) {
var q *mgo.Query
col := fc.getCollection()
if fc.isMulti() {
q = col.Find(fc.query)
if fc.limit > 0 {
q.Limit(fc.limit)
}
if len(fc.sort) > 0 {
q.Sort(fc.sort...)
}
if len(fc.sel) > 0 {
q.Select(fc.sel)
}
var docs []map[string]interface{}
if err = q.All(&docs); err == nil {
var rdocs []map[string]interface{}
for _, doc := range docs {
rdocs = append(rdocs, convertMapJavascript(doc))
}
r, err = fc.getVM().ToValue(rdocs)
}
} else {
if fc.config.byId {
q = col.FindId(fc.query)
} else {
q = col.Find(fc.query)
}
if len(fc.sel) > 0 {
q.Select(fc.sel)
}
doc := make(map[string]interface{})
if err = q.One(doc); err == nil {
rdoc := convertMapJavascript(doc)
r, err = fc.getVM().ToValue(rdoc)
}
}
return
}
func makeFind(fa *findConf) func(otto.FunctionCall) otto.Value {
return func(call otto.FunctionCall) (r otto.Value) {
var err error
fc := &findCall{
config: fa,
session: fa.session.Copy(),
sel: make(map[string]int),
}
defer fc.session.Close()
fc.setDefaults()
args := call.ArgumentList
argLen := len(args)
r = otto.NullValue()
if argLen >= 1 {
if argLen >= 2 {
if err = fc.setOptions(call.Argument(1)); err != nil {
fc.logError(err)
return
}
}
if fc.db == "" || fc.col == "" {
fc.logError(errors.New("Find call must specify db and collection"))
return
}
if err = fc.setQuery(call.Argument(0)); err == nil {
var result otto.Value
if result, err = fc.execute(); err == nil {
r = result
} else {
fc.logError(err)
}
} else {
fc.logError(err)
}
} else {
fc.logError(errors.New("At least one argument is required"))
}
return
}
}
func doDelete(config *configOptions, client *elastic.Client, mongo *mgo.Session, bulk *elastic.BulkProcessor, op *gtm.Op) {
req := elastic.NewBulkDeleteRequest()
req.UseEasyJSON(config.EnableEasyJSON)
if config.DeleteStrategy == ignoreDeleteStrategy {
return
}
objectID, indexType, meta := opIDToString(op), mapIndexType(config, op), &indexingMeta{}
req.Id(objectID)
if config.IndexAsUpdate == false {
req.Version(int64(op.Timestamp))
req.VersionType("external")
}
if config.DeleteStrategy == statefulDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
meta = getIndexMeta(mongo, op.Namespace, objectID)
}
req.Index(indexType.Index)
req.Type(indexType.Type)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
} else if config.DeleteStrategy == statelessDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
termQuery := elastic.NewTermQuery("_id", objectID)
searchResult, err := client.Search().FetchSource(false).Size(1).Index(config.DeleteIndexPattern).Query(termQuery).Do(context.Background())
if err != nil {
errorLog.Printf("Unable to delete document %s: %s", objectID, err)
return
}
if searchResult.Hits != nil && searchResult.Hits.TotalHits == 1 {
hit := searchResult.Hits.Hits[0]
req.Index(hit.Index)
req.Type(hit.Type)
if hit.Routing != "" {
req.Routing(hit.Routing)
}
if hit.Parent != "" {
req.Parent(hit.Parent)
}
} else {
errorLog.Printf("Failed to find unique document %s for deletion using index pattern %s", objectID, config.DeleteIndexPattern)
return
}
} else {
req.Index(indexType.Index)
req.Type(indexType.Type)
}
} else {
return
}
bulk.Add(req)
return
}
func gtmDefaultSettings() gtmSettings {
return gtmSettings{
ChannelSize: gtmChannelSizeDefault,
BufferSize: 32,
BufferDuration: "75ms",
}
}
func notifySdFailed(config *configOptions, err error) {
if err != nil {
errorLog.Printf("Systemd notification failed: %s", err)
} else {
if config.Verbose {
warnLog.Println("Systemd notification not supported (i.e. NOTIFY_SOCKET is unset)")
}
}
}
func watchdogSdFailed(config *configOptions, err error) {
if err != nil {
errorLog.Printf("Error determining systemd WATCHDOG interval: %s", err)
} else {
if config.Verbose {
warnLog.Println("Systemd WATCHDOG not enabled")
}
}
}
func (ctx *httpServerCtx) serveHttp() {
s := ctx.httpServer
if ctx.config.Verbose {
infoLog.Printf("Starting http server at %s", s.Addr)
}
ctx.started = time.Now()
err := s.ListenAndServe()
if !ctx.shutdown {
panic(fmt.Sprintf("Unable to serve http at address %s: %s", s.Addr, err))
}
}
func (ctx *httpServerCtx) buildServer() {
mux := http.NewServeMux()
mux.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(ctx.started)).String()
w.Write([]byte(data))
})
mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
if ctx.config.Stats {
mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) {
stats, err := json.MarshalIndent(ctx.bulk.Stats(), "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(stats)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print statistics: %s", err)
}
})
}
if ctx.config.Pprof {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
s := &http.Server{
Addr: ctx.config.HTTPServerAddr,
Handler: mux,
ErrorLog: errorLog,
}
ctx.httpServer = s
}
func notifySd(config *configOptions) {
var interval time.Duration
if config.Verbose {
infoLog.Println("Sending systemd READY=1")
}
sent, err := daemon.SdNotify(false, "READY=1")
if sent {
if config.Verbose {
infoLog.Println("READY=1 successfully sent to systemd")
}
} else {
notifySdFailed(config, err)
return
}
interval, err = daemon.SdWatchdogEnabled(false)
if err != nil || interval == 0 {
watchdogSdFailed(config, err)
return
}
for {
if config.Verbose {
infoLog.Println("Sending systemd WATCHDOG=1")
}
sent, err = daemon.SdNotify(false, "WATCHDOG=1")
if sent {
if config.Verbose {
infoLog.Println("WATCHDOG=1 successfully sent to systemd")
}
} else {
notifySdFailed(config, err)
return
}
time.Sleep(interval / 2)
}
}
func (config *configOptions) makeShardInsertHandler() gtm.ShardInsertHandler {
return func(shardInfo *gtm.ShardInfo) (*mgo.Session, error) {
infoLog.Printf("Adding shard found at %s\n", shardInfo.GetURL())
shardURL := config.getAuthURL(shardInfo.GetURL())
shard, err := config.dialMongo(shardURL)
if err == nil {
config.configureMongo(shard)
return shard, nil
} else {
return nil, err
}
}
}
func shutdown(timeout int, hsc *httpServerCtx, bulk *elastic.BulkProcessor, bulkStats *elastic.BulkProcessor, mongo *mgo.Session, config *configOptions) {
infoLog.Println("Shutting down")
closeC := make(chan bool)
go func() {
if config.ClusterName != "" {
resetClusterState(mongo, config)
}
if hsc != nil {
hsc.shutdown = true
hsc.httpServer.Shutdown(context.Background())
}
bulk.Flush()
if bulkStats != nil {
bulkStats.Flush()
}
close(closeC)
}()
doneC := make(chan bool)
go func() {
closeT := time.NewTicker(time.Duration(timeout) * time.Second)
done := false
for !done {
select {
case <-closeC:
done = true
close(doneC)
case <-closeT.C:
done = true
close(doneC)
}
}
}()
<-doneC
os.Exit(exitStatus)
}
func handlePanic() {
if r := recover(); r != nil {
errorLog.Println(r)
infoLog.Println("Shutting down with exit status 1 after panic.")
time.Sleep(3 * time.Second)
os.Exit(1)
}
}
func main() {
enabled := true
defer handlePanic()
config := &configOptions{
MongoDialSettings: mongoDialSettings{Timeout: -1},
MongoSessionSettings: mongoSessionSettings{SocketTimeout: -1, SyncTimeout: -1},
GtmSettings: gtmDefaultSettings(),
}
config.parseCommandLineFlags()
if config.Version {
fmt.Println(version)
os.Exit(0)
}
config.loadTimeMachineNamespaces()
config.loadRoutingNamespaces()
config.loadPatchNamespaces()
config.loadGridFsConfig()
config.loadConfigFile()
config.setDefaults()
if config.Print {
config.dump()
os.Exit(0)
}
config.setupLogging()
config.loadPlugins()
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
mongo, err := config.dialMongo(config.MongoURL)
if err != nil {
panic(fmt.Sprintf("Unable to connect to mongodb using URL %s: %s", config.MongoURL, err))
}
if mongoInfo, err := mongo.BuildInfo(); err == nil {
infoLog.Printf("Successfully connected to MongoDB version %s", mongoInfo.Version)
} else {
infoLog.Println("Successfully connected to MongoDB")
}
defer mongo.Close()
config.configureMongo(mongo)
loadBuiltinFunctions(mongo)
elasticClient, err := config.newElasticClient()
if err != nil {
panic(fmt.Sprintf("Unable to create elasticsearch client: %s", err))
}
if config.ElasticVersion == "" {
if err := config.testElasticsearchConn(elasticClient); err != nil {
panic(fmt.Sprintf("Unable to validate connection to elasticsearch using client %s: %s",
elasticClient, err))
}
} else {
if err := config.parseElasticsearchVersion(config.ElasticVersion); err != nil {
panic(fmt.Sprintf("Elasticsearch version must conform to major.minor.fix: %s", err))
}
}
bulk, err := config.newBulkProcessor(elasticClient)
if err != nil {
panic(fmt.Sprintf("Unable to start bulk processor: %s", err))
}
defer bulk.Stop()
var bulkStats *elastic.BulkProcessor
if config.IndexStats {
bulkStats, err = config.newStatsBulkProcessor(elasticClient)
if err != nil {
panic(fmt.Sprintf("Unable to start stats bulk processor: %s", err))
}
defer bulkStats.Stop()
}
var after gtm.TimestampGenerator
if config.Resume {
after = func(session *mgo.Session, options *gtm.Options) bson.MongoTimestamp {
ts := gtm.LastOpTimestamp(session, options)
if config.Replay {
ts = bson.MongoTimestamp(0)
} else if config.ResumeFromTimestamp != 0 {
ts = bson.MongoTimestamp(config.ResumeFromTimestamp)
} else {
collection := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
collection.FindId(config.ResumeName).One(doc)
if doc["ts"] != nil {
ts = doc["ts"].(bson.MongoTimestamp)
}
}
return ts
}
} else if config.Replay {
after = func(session *mgo.Session, options *gtm.Options) bson.MongoTimestamp {
return bson.MongoTimestamp(0)
}
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
errorLog.Fatalln("File indexing is ON but no file namespaces are configured")
}
if err := ensureFileMapping(elasticClient); err != nil {
panic(err)
}
}
var nsFilter, filter, directReadFilter gtm.OpFilter
filterChain := []gtm.OpFilter{notMonstache, notSystem, notChunks}
filterArray := []gtm.OpFilter{}
if config.isSharded() {
filterChain = append(filterChain, notConfig)
}
if config.NsRegex != "" {
filterChain = append(filterChain, filterWithRegex(config.NsRegex))
}
if config.NsDropRegex != "" {
filterChain = append(filterChain, filterDropWithRegex(config.NsDropRegex))
}
if config.NsExcludeRegex != "" {
filterChain = append(filterChain, filterInverseWithRegex(config.NsExcludeRegex))
}
if config.NsDropExcludeRegex != "" {
filterChain = append(filterChain, filterDropInverseWithRegex(config.NsDropExcludeRegex))
}
if config.Worker != "" {
workerFilter, err := consistent.ConsistentHashFilter(config.Worker, config.Workers)
if err != nil {
panic(err)
}
filterChain = append(filterChain, workerFilter)
} else if config.Workers != nil {
panic("Workers configured but this worker is undefined. worker must be set to one of the workers.")
}
if filterPlugin != nil {
filterArray = append(filterArray, filterWithPlugin())
} else if len(filterEnvs) > 0 {
filterArray = append(filterArray, filterWithScript())
}
nsFilter = gtm.ChainOpFilters(filterChain...)
filter = gtm.ChainOpFilters(filterArray...)
directReadFilter = gtm.ChainOpFilters(filterArray...)
var oplogDatabaseName, oplogCollectionName *string
if config.MongoOpLogDatabaseName != "" {
oplogDatabaseName = &config.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName != "" {
oplogCollectionName = &config.MongoOpLogCollectionName
}
if config.ClusterName != "" {
if err = ensureClusterTTL(mongo); err == nil {
infoLog.Printf("Joined cluster %s", config.ClusterName)
} else {
panic(fmt.Sprintf("Unable to enable cluster mode: %s", err))
}
enabled, err = enableProcess(mongo, config)
if err != nil {
panic(fmt.Sprintf("Unable to determine enabled cluster process: %s", err))
}
if !enabled {
config.DirectReadNs = stringargs{}
}
}
gtmBufferDuration, err := time.ParseDuration(config.GtmSettings.BufferDuration)
if err != nil {
panic(fmt.Sprintf("Unable to parse gtm buffer duration %s: %s", config.GtmSettings.BufferDuration, err))
}
var mongos []*mgo.Session
var configSession *mgo.Session
if config.isSharded() {
// if we have a config server URL then we are running in a sharded cluster
configSession, err = config.dialMongo(config.MongoConfigURL)
if err != nil {
panic(fmt.Sprintf("Unable to connect to mongodb config server using URL %s: %s", config.MongoConfigURL, err))
}
config.configureMongo(configSession)
// get the list of shard servers
shardInfos := gtm.GetShards(configSession)
if len(shardInfos) == 0 {
errorLog.Fatalln("Shards enabled but none found in config.shards collection")
}
// add each shard server to the sync list
for _, shardInfo := range shardInfos {
infoLog.Printf("Adding shard found at %s\n", shardInfo.GetURL())
shardURL := config.getAuthURL(shardInfo.GetURL())
shard, err := config.dialMongo(shardURL)
if err != nil {
panic(fmt.Sprintf("Unable to connect to mongodb shard using URL %s: %s", shardURL, err))
}
defer shard.Close()
config.configureMongo(shard)
mongos = append(mongos, shard)
}
} else {
mongos = append(mongos, mongo)
}
gtmOpts := >m.Options{
After: after,
Filter: filter,
NamespaceFilter: nsFilter,
OpLogDatabaseName: oplogDatabaseName,
OpLogCollectionName: oplogCollectionName,
ChannelSize: config.GtmSettings.ChannelSize,
Ordering: gtm.AnyOrder,
WorkerCount: 10,
BufferDuration: gtmBufferDuration,
BufferSize: config.GtmSettings.BufferSize,
DirectReadNs: config.DirectReadNs,
DirectReadSplitMax: config.DirectReadSplitMax,
DirectReadFilter: directReadFilter,
Log: infoLog,
}
gtmCtx := gtm.StartMulti(mongos, gtmOpts)
if config.isSharded() {
gtmCtx.AddShardListener(configSession, gtmOpts, config.makeShardInsertHandler())
}
if config.ClusterName != "" {
if enabled {
infoLog.Printf("Starting work for cluster %s", config.ClusterName)
} else {
infoLog.Printf("Pausing work for cluster %s", config.ClusterName)
gtmCtx.Pause()
}
}
timestampTicker := time.NewTicker(10 * time.Second)
if config.Resume == false {
timestampTicker.Stop()
}
heartBeat := time.NewTicker(10 * time.Second)
if config.ClusterName == "" {
heartBeat.Stop()
}
statsTimeout := time.Duration(30) * time.Second
if config.StatsDuration != "" {
statsTimeout, err = time.ParseDuration(config.StatsDuration)
if err != nil {
panic(fmt.Sprintf("Unable to parse stats duration: %s", err))
}
}
printStats := time.NewTicker(statsTimeout)
if config.Stats == false {
printStats.Stop()
}
go notifySd(config)
var hsc *httpServerCtx
if config.EnableHTTPServer {
hsc = &httpServerCtx{
bulk: bulk,
config: config,
}
hsc.buildServer()
go hsc.serveHttp()
}
doneC := make(chan int)
go func() {
<-sigs
shutdown(10, hsc, bulk, bulkStats, mongo, config)
}()
var lastTimestamp, lastSavedTimestamp bson.MongoTimestamp
var fileWg sync.WaitGroup
fileC := make(chan *gtm.Op)
fileDoneC := make(chan *gtm.Op)
for i := 0; i < config.FileDownloaders; i++ {
fileWg.Add(1)
go func() {
defer fileWg.Done()
for op := range fileC {
err := addFileContent(mongo, op, config)
if err != nil {
processErr(err, config)
}
fileDoneC <- op
}
}()
}
if len(config.DirectReadNs) > 0 {
if config.ExitAfterDirectReads {
go func() {
gtmCtx.DirectReadWg.Wait()
gtmCtx.Stop()
close(gtmCtx.OpC)
for op := range gtmCtx.OpC {
if err = processOp(config, mongo, bulk, elasticClient, op, fileC); err != nil {
processErr(err, config)
}
}
close(fileC)
fileWg.Wait()
doneC <- 30
}()
}
}
infoLog.Println("Entering event loop")
for {
select {
case timeout := <-doneC:
shutdown(timeout, hsc, bulk, bulkStats, mongo, config)
return
case <-timestampTicker.C:
if lastTimestamp > lastSavedTimestamp {
bulk.Flush()
if saveTimestamp(mongo, lastTimestamp, config); err == nil {
lastSavedTimestamp = lastTimestamp
} else {
processErr(err, config)
}
}
case <-heartBeat.C:
if config.ClusterName == "" {
break
}
if enabled {
enabled, err = ensureEnabled(mongo, config)
if !enabled {
infoLog.Printf("Pausing work for cluster %s", config.ClusterName)
gtmCtx.Pause()
bulk.Stop()
}
} else {
enabled, err = enableProcess(mongo, config)
if enabled {
infoLog.Printf("Resuming work for cluster %s", config.ClusterName)
bulk.Start(context.Background())
resumeWork(gtmCtx, mongo, config)
}
}
if err != nil {
processErr(err, config)
}
case <-printStats.C:
if !enabled {
break
}
if config.IndexStats {
if err := doIndexStats(config, bulkStats, bulk.Stats()); err != nil {
errorLog.Printf("Error indexing statistics: %s", err)
}
} else {
stats, err := json.Marshal(bulk.Stats())
if err != nil {
errorLog.Printf("Unable to log statistics: %s", err)
} else {
statsLog.Println(string(stats))
}
}
case err = <-gtmCtx.ErrC:
processErr(err, config)
case op := <-fileDoneC:
ingest := op.Data["file"] != nil
if err = doIndex(config, mongo, bulk, elasticClient, op, ingest); err != nil {
processErr(err, config)
}
case op := <-gtmCtx.OpC:
if !enabled || op == nil {
break
}
if op.IsSourceOplog() {
lastTimestamp = op.Timestamp
}
if err = processOp(config, mongo, bulk, elasticClient, op, fileC); err != nil {
processErr(err, config)
}
}
}
}
add pipe() function for aggregation in javascript
// package main provides the monstache binary
package main
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/BurntSushi/toml"
"github.com/coreos/go-systemd/daemon"
"github.com/evanphx/json-patch"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/olivere/elastic"
"github.com/robertkrimen/otto"
_ "github.com/robertkrimen/otto/underscore"
"github.com/rwynn/gtm"
"github.com/rwynn/gtm/consistent"
"github.com/rwynn/monstache/monstachemap"
"golang.org/x/net/context"
"gopkg.in/Graylog2/go-gelf.v2/gelf"
"gopkg.in/natefinch/lumberjack.v2"
"io"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"plugin"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"time"
)
var infoLog = log.New(os.Stdout, "INFO ", log.Flags())
var warnLog = log.New(os.Stdout, "WARN ", log.Flags())
var statsLog = log.New(os.Stdout, "STATS ", log.Flags())
var traceLog = log.New(os.Stdout, "TRACE ", log.Flags())
var errorLog = log.New(os.Stderr, "ERROR ", log.Flags())
var mapperPlugin func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error)
var filterPlugin func(*monstachemap.MapperPluginInput) (bool, error)
var mapEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var filterEnvs map[string]*executionEnv = make(map[string]*executionEnv)
var mapIndexTypes map[string]*indexTypeMapping = make(map[string]*indexTypeMapping)
var fileNamespaces map[string]bool = make(map[string]bool)
var patchNamespaces map[string]bool = make(map[string]bool)
var tmNamespaces map[string]bool = make(map[string]bool)
var routingNamespaces map[string]bool = make(map[string]bool)
var chunksRegex = regexp.MustCompile("\\.chunks$")
var systemsRegex = regexp.MustCompile("system\\..+$")
var exitStatus = 0
const version = "4.9.0"
const mongoURLDefault string = "localhost"
const resumeNameDefault string = "default"
const elasticMaxConnsDefault int = 4
const elasticClientTimeoutDefault int = 60
const elasticMaxDocsDefault int = -1
const elasticMaxBytesDefault int = 8 * 1024 * 1024
const gtmChannelSizeDefault int = 512
const typeFromFuture string = "_doc"
const fileDownloadersDefault = 10
type deleteStrategy int
const (
statelessDeleteStrategy deleteStrategy = iota
statefulDeleteStrategy
ignoreDeleteStrategy
)
type stringargs []string
type executionEnv struct {
VM *otto.Otto
Script string
lock *sync.Mutex
}
type javascript struct {
Namespace string
Script string
Path string
Routing bool
}
type indexTypeMapping struct {
Namespace string
Index string
Type string
}
type findConf struct {
vm *otto.Otto
ns string
name string
session *mgo.Session
byId bool
multi bool
pipe bool
}
type findCall struct {
config *findConf
session *mgo.Session
query interface{}
db string
col string
limit int
sort []string
sel map[string]int
}
type logFiles struct {
Info string
Warn string
Error string
Trace string
Stats string
}
type indexingMeta struct {
Routing string
Index string
Type string
Parent string
Version int64
VersionType string
Pipeline string
RetryOnConflict int
}
type mongoDialSettings struct {
Timeout int
Ssl bool
}
type mongoSessionSettings struct {
SocketTimeout int `toml:"socket-timeout"`
SyncTimeout int `toml:"sync-timeout"`
}
type gtmSettings struct {
ChannelSize int `toml:"channel-size"`
BufferSize int `toml:"buffer-size"`
BufferDuration string `toml:"buffer-duration"`
}
type httpServerCtx struct {
httpServer *http.Server
bulk *elastic.BulkProcessor
config *configOptions
shutdown bool
started time.Time
}
type configOptions struct {
MongoURL string `toml:"mongo-url"`
MongoConfigURL string `toml:"mongo-config-url"`
MongoPemFile string `toml:"mongo-pem-file"`
MongoValidatePemFile bool `toml:"mongo-validate-pem-file"`
MongoOpLogDatabaseName string `toml:"mongo-oplog-database-name"`
MongoOpLogCollectionName string `toml:"mongo-oplog-collection-name"`
MongoDialSettings mongoDialSettings `toml:"mongo-dial-settings"`
MongoSessionSettings mongoSessionSettings `toml:"mongo-session-settings"`
GtmSettings gtmSettings `toml:"gtm-settings"`
Logs logFiles `toml:"logs"`
GraylogAddr string `toml:"graylog-addr"`
ElasticUrls stringargs `toml:"elasticsearch-urls"`
ElasticUser string `toml:"elasticsearch-user"`
ElasticPassword string `toml:"elasticsearch-password"`
ElasticPemFile string `toml:"elasticsearch-pem-file"`
ElasticValidatePemFile bool `toml:"elasticsearch-validate-pem-file"`
ElasticVersion string `toml:"elasticsearch-version"`
ResumeName string `toml:"resume-name"`
NsRegex string `toml:"namespace-regex"`
NsDropRegex string `toml:"namespace-drop-regex"`
NsExcludeRegex string `toml:"namespace-exclude-regex"`
NsDropExcludeRegex string `toml:"namespace-drop-exclude-regex"`
ClusterName string `toml:"cluster-name"`
Print bool `toml:"print-config"`
Version bool
Pprof bool
EnableEasyJSON bool `toml:"enable-easy-json"`
Stats bool
IndexStats bool `toml:"index-stats"`
StatsDuration string `toml:"stats-duration"`
StatsIndexFormat string `toml:"stats-index-format"`
Gzip bool
Verbose bool
Resume bool
ResumeWriteUnsafe bool `toml:"resume-write-unsafe"`
ResumeFromTimestamp int64 `toml:"resume-from-timestamp"`
Replay bool
DroppedDatabases bool `toml:"dropped-databases"`
DroppedCollections bool `toml:"dropped-collections"`
IndexFiles bool `toml:"index-files"`
IndexAsUpdate bool `toml:"index-as-update"`
FileHighlighting bool `toml:"file-highlighting"`
EnablePatches bool `toml:"enable-patches"`
FailFast bool `toml:"fail-fast"`
IndexOplogTime bool `toml:"index-oplog-time"`
OplogTsFieldName string `toml:"oplog-ts-field-name"`
OplogDateFieldName string `toml:"oplog-date-field-name"`
OplogDateFieldFormat string `toml:"oplog-date-field-format"`
ExitAfterDirectReads bool `toml:"exit-after-direct-reads"`
MergePatchAttr string `toml:"merge-patch-attribute"`
ElasticMaxConns int `toml:"elasticsearch-max-conns"`
ElasticRetry bool `toml:"elasticsearch-retry"`
ElasticMaxDocs int `toml:"elasticsearch-max-docs"`
ElasticMaxBytes int `toml:"elasticsearch-max-bytes"`
ElasticMaxSeconds int `toml:"elasticsearch-max-seconds"`
ElasticClientTimeout int `toml:"elasticsearch-client-timeout"`
ElasticMajorVersion int
ElasticMinorVersion int
MaxFileSize int64 `toml:"max-file-size"`
ConfigFile string
Script []javascript
Filter []javascript
Mapping []indexTypeMapping
FileNamespaces stringargs `toml:"file-namespaces"`
PatchNamespaces stringargs `toml:"patch-namespaces"`
Workers stringargs
Worker string
DirectReadNs stringargs `toml:"direct-read-namespaces"`
DirectReadSplitMax int `toml:"direct-read-split-max"`
MapperPluginPath string `toml:"mapper-plugin-path"`
EnableHTTPServer bool `toml:"enable-http-server"`
HTTPServerAddr string `toml:"http-server-addr"`
TimeMachineNamespaces stringargs `toml:"time-machine-namespaces"`
TimeMachineIndexPrefix string `toml:"time-machine-index-prefix"`
TimeMachineIndexSuffix string `toml:"time-machine-index-suffix"`
TimeMachineDirectReads bool `toml:"time-machine-direct-reads"`
RoutingNamespaces stringargs `toml:"routing-namespaces"`
DeleteStrategy deleteStrategy `toml:"delete-strategy"`
DeleteIndexPattern string `toml:"delete-index-pattern"`
FileDownloaders int `toml:"file-downloaders"`
PruneInvalidJSON bool `toml:"prune-invalid-json"`
}
func (arg *deleteStrategy) String() string {
return fmt.Sprintf("%d", *arg)
}
func (arg *deleteStrategy) Set(value string) error {
if i, err := strconv.Atoi(value); err != nil {
return err
} else {
ds := deleteStrategy(i)
*arg = ds
return nil
}
}
func (args *stringargs) String() string {
return fmt.Sprintf("%s", *args)
}
func (args *stringargs) Set(value string) error {
*args = append(*args, value)
return nil
}
func (config *configOptions) isSharded() bool {
return config.MongoConfigURL != ""
}
func afterBulk(executionId int64, requests []elastic.BulkableRequest, response *elastic.BulkResponse, err error) {
if err != nil {
errorLog.Printf("Bulk index request with execution ID %d failed: %s", executionId, err)
}
if response != nil && response.Errors {
failed := response.Failed()
if failed != nil {
errorLog.Printf("Bulk index request with execution ID %d has %d line failure/warning(s)", executionId, len(failed))
for i, item := range failed {
json, err := json.Marshal(item)
if err != nil {
errorLog.Printf("Unable to marshall failed request line #%d: %s", i, err)
} else {
if item.Status == 409 {
warnLog.Printf("Conflict request line #%d details: %s", i, string(json))
} else {
errorLog.Printf("Failed request line #%d details: %s", i, string(json))
}
}
}
}
}
}
func (config *configOptions) useTypeFromFuture() (use bool) {
if config.ElasticMajorVersion > 6 {
use = true
} else if config.ElasticMajorVersion == 6 && config.ElasticMinorVersion >= 2 {
use = true
}
return
}
func (config *configOptions) parseElasticsearchVersion(number string) (err error) {
if number == "" {
err = errors.New("Elasticsearch version cannot be blank")
} else {
versionParts := strings.Split(number, ".")
var majorVersion, minorVersion int
majorVersion, err = strconv.Atoi(versionParts[0])
if err == nil {
config.ElasticMajorVersion = majorVersion
if majorVersion == 0 {
err = errors.New("Invalid Elasticsearch major version 0")
}
}
if len(versionParts) > 1 {
minorVersion, err = strconv.Atoi(versionParts[1])
if err == nil {
config.ElasticMinorVersion = minorVersion
}
}
}
return
}
func (config *configOptions) newBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache")
bulkService.Workers(config.ElasticMaxConns)
bulkService.Stats(config.Stats)
bulkService.BulkActions(config.ElasticMaxDocs)
bulkService.BulkSize(config.ElasticMaxBytes)
if config.ElasticRetry == false {
bulkService.Backoff(&elastic.StopBackoff{})
}
bulkService.After(afterBulk)
bulkService.FlushInterval(time.Duration(config.ElasticMaxSeconds) * time.Second)
return bulkService.Do(context.Background())
}
func (config *configOptions) newStatsBulkProcessor(client *elastic.Client) (bulk *elastic.BulkProcessor, err error) {
bulkService := client.BulkProcessor().Name("monstache-stats")
bulkService.Workers(1)
bulkService.Stats(false)
bulkService.BulkActions(-1)
bulkService.BulkSize(-1)
bulkService.FlushInterval(time.Duration(5) * time.Second)
bulkService.After(afterBulk)
return bulkService.Do(context.Background())
}
func (config *configOptions) needsSecureScheme() bool {
if len(config.ElasticUrls) > 0 {
for _, url := range config.ElasticUrls {
if strings.HasPrefix(url, "https") {
return true
}
}
}
return false
}
func (config *configOptions) newElasticClient() (client *elastic.Client, err error) {
var clientOptions []elastic.ClientOptionFunc
var httpClient *http.Client
clientOptions = append(clientOptions, elastic.SetErrorLog(errorLog))
clientOptions = append(clientOptions, elastic.SetSniff(false))
if config.needsSecureScheme() {
clientOptions = append(clientOptions, elastic.SetScheme("https"))
}
if len(config.ElasticUrls) > 0 {
clientOptions = append(clientOptions, elastic.SetURL(config.ElasticUrls...))
} else {
config.ElasticUrls = append(config.ElasticUrls, elastic.DefaultURL)
}
if config.Verbose {
clientOptions = append(clientOptions, elastic.SetTraceLog(traceLog))
}
if config.ElasticUser != "" {
clientOptions = append(clientOptions, elastic.SetBasicAuth(config.ElasticUser, config.ElasticPassword))
}
if config.ElasticRetry {
d1, d2 := time.Duration(50)*time.Millisecond, time.Duration(20)*time.Second
retrier := elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(d1, d2))
clientOptions = append(clientOptions, elastic.SetRetrier(retrier))
}
httpClient, err = config.NewHTTPClient()
if err != nil {
return client, err
}
clientOptions = append(clientOptions, elastic.SetHttpClient(httpClient))
return elastic.NewClient(clientOptions...)
}
func (config *configOptions) testElasticsearchConn(client *elastic.Client) (err error) {
var number string
url := config.ElasticUrls[0]
number, err = client.ElasticsearchVersion(url)
if err == nil {
infoLog.Printf("Successfully connected to Elasticsearch version %s", number)
err = config.parseElasticsearchVersion(number)
}
return
}
func deleteIndexes(client *elastic.Client, db string, config *configOptions) (err error) {
index := strings.ToLower(db + "*")
for ns, m := range mapIndexTypes {
dbCol := strings.SplitN(ns, ".", 2)
if dbCol[0] == db {
if m.Index != "" {
index = strings.ToLower(m.Index + "*")
}
break
}
}
_, err = client.DeleteIndex(index).Do(context.Background())
return
}
func deleteIndex(client *elastic.Client, namespace string, config *configOptions) (err error) {
ctx := context.Background()
index := strings.ToLower(namespace)
if m := mapIndexTypes[namespace]; m != nil {
if m.Index != "" {
index = strings.ToLower(m.Index)
}
}
_, err = client.DeleteIndex(index).Do(ctx)
return err
}
func ensureFileMapping(client *elastic.Client) (err error) {
ctx := context.Background()
pipeline := map[string]interface{}{
"description": "Extract file information",
"processors": [1]map[string]interface{}{
{
"attachment": map[string]interface{}{
"field": "file",
},
},
},
}
_, err = client.IngestPutPipeline("attachment").BodyJson(pipeline).Do(ctx)
return err
}
func defaultIndexTypeMapping(config *configOptions, op *gtm.Op) *indexTypeMapping {
typeName := typeFromFuture
if !config.useTypeFromFuture() {
typeName = op.GetCollection()
}
return &indexTypeMapping{
Namespace: op.Namespace,
Index: strings.ToLower(op.Namespace),
Type: typeName,
}
}
func mapIndexType(config *configOptions, op *gtm.Op) *indexTypeMapping {
mapping := defaultIndexTypeMapping(config, op)
if m := mapIndexTypes[op.Namespace]; m != nil {
if m.Index != "" {
mapping.Index = m.Index
}
if m.Type != "" {
mapping.Type = m.Type
}
}
return mapping
}
func opIDToString(op *gtm.Op) string {
var opIDStr string
switch op.Id.(type) {
case bson.ObjectId:
opIDStr = op.Id.(bson.ObjectId).Hex()
case float64:
intID := int(op.Id.(float64))
if op.Id.(float64) == float64(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
case float32:
intID := int(op.Id.(float32))
if op.Id.(float32) == float32(intID) {
opIDStr = fmt.Sprintf("%v", intID)
} else {
opIDStr = fmt.Sprintf("%v", op.Id)
}
default:
opIDStr = fmt.Sprintf("%v", op.Id)
}
return opIDStr
}
func convertSliceJavascript(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = convertMapJavascript(achild)
case []interface{}:
avc = convertSliceJavascript(achild)
case bson.ObjectId:
avc = achild.Hex()
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func convertMapJavascript(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = convertMapJavascript(child)
case []interface{}:
o[k] = convertSliceJavascript(child)
case bson.ObjectId:
o[k] = child.Hex()
default:
o[k] = v
}
}
return o
}
func fixSlicePruneInvalidJSON(id string, a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
var avc interface{}
switch achild := av.(type) {
case map[string]interface{}:
avc = fixPruneInvalidJSON(id, achild)
case []interface{}:
avc = fixSlicePruneInvalidJSON(id, achild)
case time.Time:
year := achild.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping invalid time.Time value: %s for document _id: %s", achild, id)
continue
} else {
avc = av
}
case float64:
if math.IsNaN(achild) {
// causes an error in the json serializer
warnLog.Printf("Dropping invalid float64 value: %v for document _id: %s", achild, id)
continue
} else if math.IsInf(achild, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping invalid float64 value: %v for document _id: %s", achild, id)
continue
} else {
avc = av
}
default:
avc = av
}
avs = append(avs, avc)
}
return avs
}
func fixPruneInvalidJSON(id string, e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
switch child := v.(type) {
case map[string]interface{}:
o[k] = fixPruneInvalidJSON(id, child)
case []interface{}:
o[k] = fixSlicePruneInvalidJSON(id, child)
case time.Time:
year := child.Year()
if year < 0 || year > 9999 {
// year outside of valid range
warnLog.Printf("Dropping invalid time.Time value: %s for document _id: %s", child, id)
continue
} else {
o[k] = v
}
case float64:
if math.IsNaN(child) {
// causes an error in the json serializer
warnLog.Printf("Dropping invalid float64 value: %v for document _id: %s", child, id)
continue
} else if math.IsInf(child, 0) {
// causes an error in the json serializer
warnLog.Printf("Dropping invalid float64 value: %v for document _id: %s", child, id)
continue
} else {
o[k] = v
}
default:
o[k] = v
}
}
return o
}
func deepExportValue(a interface{}) (b interface{}) {
switch t := a.(type) {
case otto.Value:
ex, err := t.Export()
if t.Class() == "Date" {
ex, err = time.Parse("Mon, 2 Jan 2006 15:04:05 MST", t.String())
}
if err == nil {
b = deepExportValue(ex)
} else {
errorLog.Printf("Error exporting from javascript: %s", err)
}
case map[string]interface{}:
b = deepExportMap(t)
case []interface{}:
b = deepExportSlice(t)
default:
b = a
}
return
}
func deepExportSlice(a []interface{}) []interface{} {
var avs []interface{}
for _, av := range a {
avs = append(avs, deepExportValue(av))
}
return avs
}
func deepExportMap(e map[string]interface{}) map[string]interface{} {
o := make(map[string]interface{})
for k, v := range e {
o[k] = deepExportValue(v)
}
return o
}
func mapDataJavascript(op *gtm.Op) error {
names := []string{"", op.Namespace}
for _, name := range names {
if env := mapEnvs[name]; env != nil {
arg := convertMapJavascript(op.Data)
val, err := env.VM.Call("module.exports", arg, arg, op.Namespace)
if err != nil {
return err
}
if strings.ToLower(val.Class()) == "object" {
data, err := val.Export()
if err != nil {
return err
} else if data == val {
return errors.New("Exported function must return an object")
} else {
dm := data.(map[string]interface{})
op.Data = deepExportMap(dm)
}
} else {
indexed, err := val.ToBoolean()
if err != nil {
return err
} else if !indexed {
op.Data = nil
break
}
}
}
}
return nil
}
func mapDataGolang(s *mgo.Session, op *gtm.Op) error {
session := s.Copy()
defer session.Close()
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
Session: session,
}
output, err := mapperPlugin(input)
if err != nil {
return err
}
if output != nil {
if output.Drop {
op.Data = nil
} else {
if output.Passthrough == false {
op.Data = output.Document
}
meta := make(map[string]interface{})
if output.Index != "" {
meta["index"] = output.Index
}
if output.Type != "" {
meta["type"] = output.Type
}
if output.Routing != "" {
meta["routing"] = output.Routing
}
if output.Parent != "" {
meta["parent"] = output.Parent
}
if output.Version != 0 {
meta["version"] = output.Version
}
if output.VersionType != "" {
meta["versionType"] = output.VersionType
}
if output.Pipeline != "" {
meta["pipeline"] = output.Pipeline
}
if output.RetryOnConflict != 0 {
meta["retryOnConflict"] = output.RetryOnConflict
}
if len(meta) > 0 {
op.Data["_meta_monstache"] = meta
}
}
}
return nil
}
func mapData(session *mgo.Session, config *configOptions, op *gtm.Op) error {
if config.MapperPluginPath != "" {
return mapDataGolang(session, op)
}
return mapDataJavascript(op)
}
func prepareDataForIndexing(config *configOptions, op *gtm.Op) {
data := op.Data
if config.IndexOplogTime {
secs := int64(op.Timestamp >> 32)
t := time.Unix(secs, 0).UTC()
data[config.OplogTsFieldName] = op.Timestamp
data[config.OplogDateFieldName] = t.Format(config.OplogDateFieldFormat)
}
delete(data, "_id")
delete(data, "_meta_monstache")
if config.PruneInvalidJSON {
op.Data = fixPruneInvalidJSON(opIDToString(op), data)
}
}
func parseIndexMeta(op *gtm.Op) (meta *indexingMeta) {
meta = &indexingMeta{
Version: int64(op.Timestamp),
VersionType: "external",
}
if m, ok := op.Data["_meta_monstache"]; ok {
switch m.(type) {
case map[string]interface{}:
metaAttrs := m.(map[string]interface{})
meta.load(metaAttrs)
case otto.Value:
ex, err := m.(otto.Value).Export()
if err == nil && ex != m {
switch ex.(type) {
case map[string]interface{}:
metaAttrs := ex.(map[string]interface{})
meta.load(metaAttrs)
default:
errorLog.Println("Invalid indexing metadata")
}
}
default:
errorLog.Println("Invalid indexing metadata")
}
}
return meta
}
func addFileContent(s *mgo.Session, op *gtm.Op, config *configOptions) (err error) {
session := s.Copy()
defer session.Close()
op.Data["file"] = ""
var gridByteBuffer bytes.Buffer
db, bucket :=
session.DB(op.GetDatabase()),
strings.SplitN(op.GetCollection(), ".", 2)[0]
encoder := base64.NewEncoder(base64.StdEncoding, &gridByteBuffer)
file, err := db.GridFS(bucket).OpenId(op.Id)
if err != nil {
return
}
defer file.Close()
if config.MaxFileSize > 0 {
if file.Size() > config.MaxFileSize {
warnLog.Printf("File %s md5(%s) exceeds max file size. file content omitted.",
file.Name(), file.MD5())
return
}
}
if _, err = io.Copy(encoder, file); err != nil {
return
}
if err = encoder.Close(); err != nil {
return
}
op.Data["file"] = string(gridByteBuffer.Bytes())
return
}
func notMonstache(op *gtm.Op) bool {
return op.GetDatabase() != "monstache"
}
func notChunks(op *gtm.Op) bool {
return !chunksRegex.MatchString(op.GetCollection())
}
func notConfig(op *gtm.Op) bool {
return op.GetDatabase() != "config"
}
func notSystem(op *gtm.Op) bool {
return !systemsRegex.MatchString(op.GetCollection())
}
func filterWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
} else {
return validNameSpace.MatchString(op.Namespace)
}
}
}
func filterDropWithRegex(regex string) gtm.OpFilter {
var validNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return validNameSpace.MatchString(op.Namespace)
} else {
return true
}
}
}
func filterWithPlugin() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
keep = false
input := &monstachemap.MapperPluginInput{
Document: op.Data,
Namespace: op.Namespace,
Database: op.GetDatabase(),
Collection: op.GetCollection(),
Operation: op.Operation,
}
if ok, err := filterPlugin(input); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
return keep
}
}
func filterWithScript() gtm.OpFilter {
return func(op *gtm.Op) bool {
var keep bool = true
if (op.IsInsert() || op.IsUpdate()) && op.Data != nil {
nss := []string{"", op.Namespace}
for _, ns := range nss {
if env := filterEnvs[ns]; env != nil {
keep = false
arg := convertMapJavascript(op.Data)
env.lock.Lock()
val, err := env.VM.Call("module.exports", arg, arg, op.Namespace)
if err != nil {
errorLog.Println(err)
} else {
if ok, err := val.ToBoolean(); err == nil {
keep = ok
} else {
errorLog.Println(err)
}
}
env.lock.Unlock()
}
if !keep {
break
}
}
}
return keep
}
}
func filterInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return true
} else {
return !invalidNameSpace.MatchString(op.Namespace)
}
}
}
func filterDropInverseWithRegex(regex string) gtm.OpFilter {
var invalidNameSpace = regexp.MustCompile(regex)
return func(op *gtm.Op) bool {
if op.IsDrop() {
return !invalidNameSpace.MatchString(op.Namespace)
} else {
return true
}
}
}
func ensureClusterTTL(session *mgo.Session) error {
col := session.DB("monstache").C("cluster")
return col.EnsureIndex(mgo.Index{
Key: []string{"expireAt"},
Background: true,
ExpireAfter: time.Duration(30) * time.Second,
})
}
func enableProcess(s *mgo.Session, config *configOptions) (bool, error) {
session := s.Copy()
defer session.Close()
col := session.DB("monstache").C("cluster")
doc := make(map[string]interface{})
doc["_id"] = config.ResumeName
doc["expireAt"] = time.Now().UTC()
doc["pid"] = os.Getpid()
if host, err := os.Hostname(); err == nil {
doc["host"] = host
} else {
return false, err
}
err := col.Insert(doc)
if err == nil {
return true, nil
}
if mgo.IsDup(err) {
return false, nil
}
return false, err
}
func resetClusterState(session *mgo.Session, config *configOptions) error {
col := session.DB("monstache").C("cluster")
return col.RemoveId(config.ResumeName)
}
func ensureEnabled(s *mgo.Session, config *configOptions) (enabled bool, err error) {
session := s.Copy()
defer session.Close()
col := session.DB("monstache").C("cluster")
doc := make(map[string]interface{})
if err = col.FindId(config.ResumeName).One(doc); err == nil {
if doc["pid"] != nil && doc["host"] != nil {
var hostname string
pid := doc["pid"].(int)
host := doc["host"].(string)
if hostname, err = os.Hostname(); err == nil {
enabled = (pid == os.Getpid() && host == hostname)
if enabled {
err = col.UpdateId(config.ResumeName,
bson.M{"$set": bson.M{"expireAt": time.Now().UTC()}})
}
}
}
}
return
}
func resumeWork(ctx *gtm.OpCtxMulti, session *mgo.Session, config *configOptions) {
col := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
col.FindId(config.ResumeName).One(doc)
if doc["ts"] != nil {
ts := doc["ts"].(bson.MongoTimestamp)
ctx.Since(ts)
}
ctx.Resume()
}
func saveTimestamp(s *mgo.Session, ts bson.MongoTimestamp, config *configOptions) error {
session := s.Copy()
session.SetSocketTimeout(time.Duration(5) * time.Second)
session.SetSyncTimeout(time.Duration(5) * time.Second)
if config.ResumeWriteUnsafe {
session.SetSafe(nil)
}
defer session.Close()
col := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
doc["ts"] = ts
_, err := col.UpsertId(config.ResumeName, bson.M{"$set": doc})
return err
}
func (config *configOptions) parseCommandLineFlags() *configOptions {
flag.BoolVar(&config.Print, "print-config", false, "Print the configuration and then exit")
flag.StringVar(&config.MongoURL, "mongo-url", "", "MongoDB server or router server connection URL")
flag.StringVar(&config.MongoConfigURL, "mongo-config-url", "", "MongoDB config server connection URL")
flag.StringVar(&config.MongoPemFile, "mongo-pem-file", "", "Path to a PEM file for secure connections to MongoDB")
flag.BoolVar(&config.MongoValidatePemFile, "mongo-validate-pem-file", true, "Set to boolean false to not validate the MongoDB PEM file")
flag.StringVar(&config.MongoOpLogDatabaseName, "mongo-oplog-database-name", "", "Override the database name which contains the mongodb oplog")
flag.StringVar(&config.MongoOpLogCollectionName, "mongo-oplog-collection-name", "", "Override the collection name which contains the mongodb oplog")
flag.StringVar(&config.GraylogAddr, "graylog-addr", "", "Send logs to a Graylog server at this address")
flag.StringVar(&config.ElasticVersion, "elasticsearch-version", "", "Specify elasticsearch version directly instead of getting it from the server")
flag.StringVar(&config.ElasticUser, "elasticsearch-user", "", "The elasticsearch user name for basic auth")
flag.StringVar(&config.ElasticPassword, "elasticsearch-password", "", "The elasticsearch password for basic auth")
flag.StringVar(&config.ElasticPemFile, "elasticsearch-pem-file", "", "Path to a PEM file for secure connections to elasticsearch")
flag.BoolVar(&config.ElasticValidatePemFile, "elasticsearch-validate-pem-file", true, "Set to boolean false to not validate the Elasticsearch PEM file")
flag.IntVar(&config.ElasticMaxConns, "elasticsearch-max-conns", 0, "Elasticsearch max connections")
flag.IntVar(&config.FileDownloaders, "file-downloaders", 0, "GridFs download go routines")
flag.BoolVar(&config.ElasticRetry, "elasticsearch-retry", false, "True to retry failed request to Elasticsearch")
flag.IntVar(&config.ElasticMaxDocs, "elasticsearch-max-docs", 0, "Number of docs to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxBytes, "elasticsearch-max-bytes", 0, "Number of bytes to hold before flushing to Elasticsearch")
flag.IntVar(&config.ElasticMaxSeconds, "elasticsearch-max-seconds", 0, "Number of seconds before flushing to Elasticsearch")
flag.IntVar(&config.ElasticClientTimeout, "elasticsearch-client-timeout", 0, "Number of seconds before a request to Elasticsearch is timed out")
flag.Int64Var(&config.MaxFileSize, "max-file-size", 0, "GridFs file content exceeding this limit in bytes will not be indexed in Elasticsearch")
flag.StringVar(&config.ConfigFile, "f", "", "Location of configuration file")
flag.BoolVar(&config.DroppedDatabases, "dropped-databases", true, "True to delete indexes from dropped databases")
flag.BoolVar(&config.DroppedCollections, "dropped-collections", true, "True to delete indexes from dropped collections")
flag.BoolVar(&config.Version, "v", false, "True to print the version number")
flag.BoolVar(&config.Gzip, "gzip", false, "True to use gzip for requests to elasticsearch")
flag.BoolVar(&config.Verbose, "verbose", false, "True to output verbose messages")
flag.BoolVar(&config.Pprof, "pprof", false, "True to enable pprof endpoints")
flag.BoolVar(&config.EnableEasyJSON, "enable-easy-json", false, "True to enable easy-json serialization")
flag.BoolVar(&config.Stats, "stats", false, "True to print out statistics")
flag.BoolVar(&config.IndexStats, "index-stats", false, "True to index stats in elasticsearch")
flag.StringVar(&config.StatsDuration, "stats-duration", "", "The duration after which stats are logged")
flag.StringVar(&config.StatsIndexFormat, "stats-index-format", "", "time.Time supported format to use for the stats index names")
flag.BoolVar(&config.Resume, "resume", false, "True to capture the last timestamp of this run and resume on a subsequent run")
flag.Int64Var(&config.ResumeFromTimestamp, "resume-from-timestamp", 0, "Timestamp to resume syncing from")
flag.BoolVar(&config.ResumeWriteUnsafe, "resume-write-unsafe", false, "True to speedup writes of the last timestamp synched for resuming at the cost of error checking")
flag.BoolVar(&config.Replay, "replay", false, "True to replay all events from the oplog and index them in elasticsearch")
flag.BoolVar(&config.IndexFiles, "index-files", false, "True to index gridfs files into elasticsearch. Requires the elasticsearch mapper-attachments (deprecated) or ingest-attachment plugin")
flag.BoolVar(&config.IndexAsUpdate, "index-as-update", false, "True to index documents as updates instead of overwrites")
flag.BoolVar(&config.FileHighlighting, "file-highlighting", false, "True to enable the ability to highlight search times for a file query")
flag.BoolVar(&config.EnablePatches, "enable-patches", false, "True to include an json-patch field on updates")
flag.BoolVar(&config.FailFast, "fail-fast", false, "True to exit if a single _bulk request fails")
flag.BoolVar(&config.IndexOplogTime, "index-oplog-time", false, "True to add date/time information from the oplog to each document when indexing")
flag.BoolVar(&config.ExitAfterDirectReads, "exit-after-direct-reads", false, "True to exit the program after reading directly from the configured namespaces")
flag.StringVar(&config.MergePatchAttr, "merge-patch-attribute", "", "Attribute to store json-patch values under")
flag.StringVar(&config.ResumeName, "resume-name", "", "Name under which to load/store the resume state. Defaults to 'default'")
flag.StringVar(&config.ClusterName, "cluster-name", "", "Name of the monstache process cluster")
flag.StringVar(&config.Worker, "worker", "", "The name of this worker in a multi-worker configuration")
flag.StringVar(&config.MapperPluginPath, "mapper-plugin-path", "", "The path to a .so file to load as a document mapper plugin")
flag.StringVar(&config.NsRegex, "namespace-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which match are synched to elasticsearch")
flag.StringVar(&config.NsDropRegex, "namespace-drop-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which match are synched to elasticsearch")
flag.StringVar(&config.NsExcludeRegex, "namespace-exclude-regex", "", "A regex which is matched against an operation's namespace (<database>.<collection>). Only operations which do not match are synched to elasticsearch")
flag.StringVar(&config.NsDropExcludeRegex, "namespace-drop-exclude-regex", "", "A regex which is matched against a drop operation's namespace (<database>.<collection>). Only drop operations which do not match are synched to elasticsearch")
flag.Var(&config.DirectReadNs, "direct-read-namespace", "A list of direct read namespaces")
flag.IntVar(&config.DirectReadSplitMax, "direct-read-split-max", 0, "Max number of times to split a collection for direct reads")
flag.Var(&config.RoutingNamespaces, "routing-namespace", "A list of namespaces that override routing information")
flag.Var(&config.TimeMachineNamespaces, "time-machine-namespace", "A list of direct read namespaces")
flag.StringVar(&config.TimeMachineIndexPrefix, "time-machine-index-prefix", "", "A prefix to preprend to time machine indexes")
flag.StringVar(&config.TimeMachineIndexSuffix, "time-machine-index-suffix", "", "A suffix to append to time machine indexes")
flag.BoolVar(&config.TimeMachineDirectReads, "time-machine-direct-reads", false, "True to index the results of direct reads into the any time machine indexes")
flag.Var(&config.ElasticUrls, "elasticsearch-url", "A list of Elasticsearch URLs")
flag.Var(&config.FileNamespaces, "file-namespace", "A list of file namespaces")
flag.Var(&config.PatchNamespaces, "patch-namespace", "A list of patch namespaces")
flag.Var(&config.Workers, "workers", "A list of worker names")
flag.BoolVar(&config.EnableHTTPServer, "enable-http-server", false, "True to enable an internal http server")
flag.StringVar(&config.HTTPServerAddr, "http-server-addr", "", "The address the internal http server listens on")
flag.BoolVar(&config.PruneInvalidJSON, "prune-invalid-json", false, "True to omit values which do not serialize to JSON such as +Inf and -Inf and thus cause errors")
flag.Var(&config.DeleteStrategy, "delete-strategy", "Stategy to use for deletes. 0=stateless,1=stateful,2=ignore")
flag.StringVar(&config.DeleteIndexPattern, "delete-index-pattern", "", "An Elasticsearch index-pattern to restric the scope of stateless deletes")
flag.StringVar(&config.OplogTsFieldName, "oplog-ts-field-name", "", "Field name to use for the oplog timestamp")
flag.StringVar(&config.OplogDateFieldName, "oplog-date-field-name", "", "Field name to use for the oplog date")
flag.StringVar(&config.OplogDateFieldFormat, "oplog-date-field-format", "", "Format to use for the oplog date")
flag.Parse()
return config
}
func (config *configOptions) loadIndexTypes() {
if config.Mapping != nil {
for _, m := range config.Mapping {
if m.Namespace != "" && (m.Index != "" || m.Type != "") {
mapIndexTypes[m.Namespace] = &indexTypeMapping{
Namespace: m.Namespace,
Index: strings.ToLower(m.Index),
Type: m.Type,
}
} else {
panic("Mappings must specify namespace and at least one of index and type")
}
}
}
}
func (config *configOptions) loadFilters() {
for _, s := range config.Filter {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
panic("Filters must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
panic(fmt.Sprintf("Unable to load filter at path %s: %s", s.Path, err))
}
}
if _, exists := filterEnvs[s.Namespace]; exists {
panic(fmt.Sprintf("Multiple filters with namespace: %s", s.Namespace))
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
lock: &sync.Mutex{},
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
panic(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
panic(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
panic(err)
} else if !val.IsFunction() {
panic("module.exports must be a function")
}
filterEnvs[s.Namespace] = env
} else {
panic("Filters must specify path or script attributes")
}
}
}
func (config *configOptions) loadScripts() {
for _, s := range config.Script {
if s.Script != "" || s.Path != "" {
if s.Path != "" && s.Script != "" {
panic("Scripts must specify path or script but not both")
}
if s.Path != "" {
if script, err := ioutil.ReadFile(s.Path); err == nil {
s.Script = string(script[:])
} else {
panic(fmt.Sprintf("Unable to load script at path %s: %s", s.Path, err))
}
}
if _, exists := mapEnvs[s.Namespace]; exists {
panic(fmt.Sprintf("Multiple scripts with namespace: %s", s.Namespace))
}
env := &executionEnv{
VM: otto.New(),
Script: s.Script,
}
if err := env.VM.Set("module", make(map[string]interface{})); err != nil {
panic(err)
}
if _, err := env.VM.Run(env.Script); err != nil {
panic(err)
}
val, err := env.VM.Run("module.exports")
if err != nil {
panic(err)
} else if !val.IsFunction() {
panic("module.exports must be a function")
}
mapEnvs[s.Namespace] = env
if s.Routing {
routingNamespaces[s.Namespace] = true
}
} else {
panic("Scripts must specify path or script")
}
}
}
func (config *configOptions) loadPlugins() *configOptions {
if config.MapperPluginPath != "" {
p, err := plugin.Open(config.MapperPluginPath)
if err != nil {
panic(fmt.Sprintf("Unable to load mapper plugin %s: %s", config.MapperPluginPath, err))
}
mapper, err := p.Lookup("Map")
if err != nil {
panic(fmt.Sprintf("Unable to find symbol 'Map' in mapper plugin: %s", err))
}
switch mapper.(type) {
case func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error):
mapperPlugin = mapper.(func(*monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error))
default:
panic(fmt.Sprintf("Plugin 'Map' function must be typed %T", mapperPlugin))
}
filter, err := p.Lookup("Filter")
if err == nil {
switch filter.(type) {
case func(*monstachemap.MapperPluginInput) (bool, error):
filterPlugin = filter.(func(*monstachemap.MapperPluginInput) (bool, error))
default:
panic(fmt.Sprintf("Plugin 'Filter' function must be typed %T", filterPlugin))
}
}
}
return config
}
func (config *configOptions) loadConfigFile() *configOptions {
if config.ConfigFile != "" {
var tomlConfig = configOptions{
DroppedDatabases: true,
DroppedCollections: true,
MongoDialSettings: mongoDialSettings{Timeout: -1},
MongoSessionSettings: mongoSessionSettings{SocketTimeout: -1, SyncTimeout: -1},
GtmSettings: gtmDefaultSettings(),
}
if _, err := toml.DecodeFile(config.ConfigFile, &tomlConfig); err != nil {
panic(err)
}
if config.MongoURL == "" {
config.MongoURL = tomlConfig.MongoURL
}
if config.MongoConfigURL == "" {
config.MongoConfigURL = tomlConfig.MongoConfigURL
}
if config.MongoPemFile == "" {
config.MongoPemFile = tomlConfig.MongoPemFile
}
if config.MongoValidatePemFile && !tomlConfig.MongoValidatePemFile {
config.MongoValidatePemFile = false
}
if config.MongoOpLogDatabaseName == "" {
config.MongoOpLogDatabaseName = tomlConfig.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName == "" {
config.MongoOpLogCollectionName = tomlConfig.MongoOpLogCollectionName
}
if config.ElasticUser == "" {
config.ElasticUser = tomlConfig.ElasticUser
}
if config.ElasticPassword == "" {
config.ElasticPassword = tomlConfig.ElasticPassword
}
if config.ElasticPemFile == "" {
config.ElasticPemFile = tomlConfig.ElasticPemFile
}
if config.ElasticValidatePemFile && !tomlConfig.ElasticValidatePemFile {
config.ElasticValidatePemFile = false
}
if config.ElasticVersion == "" {
config.ElasticVersion = tomlConfig.ElasticVersion
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = tomlConfig.ElasticMaxConns
}
if config.DirectReadSplitMax == 0 {
config.DirectReadSplitMax = tomlConfig.DirectReadSplitMax
}
if !config.ElasticRetry && tomlConfig.ElasticRetry {
config.ElasticRetry = true
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = tomlConfig.ElasticMaxDocs
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = tomlConfig.ElasticMaxBytes
}
if config.ElasticMaxSeconds == 0 {
config.ElasticMaxSeconds = tomlConfig.ElasticMaxSeconds
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = tomlConfig.ElasticClientTimeout
}
if config.MaxFileSize == 0 {
config.MaxFileSize = tomlConfig.MaxFileSize
}
if config.FileDownloaders == 0 {
config.FileDownloaders = tomlConfig.FileDownloaders
}
if config.DeleteStrategy == 0 {
config.DeleteStrategy = tomlConfig.DeleteStrategy
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = tomlConfig.DeleteIndexPattern
}
if config.DroppedDatabases && !tomlConfig.DroppedDatabases {
config.DroppedDatabases = false
}
if config.DroppedCollections && !tomlConfig.DroppedCollections {
config.DroppedCollections = false
}
if !config.Gzip && tomlConfig.Gzip {
config.Gzip = true
}
if !config.Verbose && tomlConfig.Verbose {
config.Verbose = true
}
if !config.Stats && tomlConfig.Stats {
config.Stats = true
}
if !config.Pprof && tomlConfig.Pprof {
config.Pprof = true
}
if !config.EnableEasyJSON && tomlConfig.EnableEasyJSON {
config.EnableEasyJSON = true
}
if !config.IndexStats && tomlConfig.IndexStats {
config.IndexStats = true
}
if config.StatsDuration == "" {
config.StatsDuration = tomlConfig.StatsDuration
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = tomlConfig.StatsIndexFormat
}
if !config.IndexFiles && tomlConfig.IndexFiles {
config.IndexFiles = true
}
if !config.IndexAsUpdate && tomlConfig.IndexAsUpdate {
config.IndexAsUpdate = true
}
if !config.FileHighlighting && tomlConfig.FileHighlighting {
config.FileHighlighting = true
}
if !config.EnablePatches && tomlConfig.EnablePatches {
config.EnablePatches = true
}
if !config.PruneInvalidJSON && tomlConfig.PruneInvalidJSON {
config.PruneInvalidJSON = true
}
if !config.Replay && tomlConfig.Replay {
config.Replay = true
}
if !config.Resume && tomlConfig.Resume {
config.Resume = true
}
if !config.ResumeWriteUnsafe && tomlConfig.ResumeWriteUnsafe {
config.ResumeWriteUnsafe = true
}
if config.ResumeFromTimestamp == 0 {
config.ResumeFromTimestamp = tomlConfig.ResumeFromTimestamp
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = tomlConfig.MergePatchAttr
}
if !config.FailFast && tomlConfig.FailFast {
config.FailFast = true
}
if !config.IndexOplogTime && tomlConfig.IndexOplogTime {
config.IndexOplogTime = true
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = tomlConfig.OplogTsFieldName
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = tomlConfig.OplogDateFieldName
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = tomlConfig.OplogDateFieldFormat
}
if !config.ExitAfterDirectReads && tomlConfig.ExitAfterDirectReads {
config.ExitAfterDirectReads = true
}
if config.Resume && config.ResumeName == "" {
config.ResumeName = tomlConfig.ResumeName
}
if config.ClusterName == "" {
config.ClusterName = tomlConfig.ClusterName
}
if config.NsRegex == "" {
config.NsRegex = tomlConfig.NsRegex
}
if config.NsDropRegex == "" {
config.NsDropRegex = tomlConfig.NsDropRegex
}
if config.NsExcludeRegex == "" {
config.NsExcludeRegex = tomlConfig.NsExcludeRegex
}
if config.NsDropExcludeRegex == "" {
config.NsDropExcludeRegex = tomlConfig.NsDropExcludeRegex
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
config.FileNamespaces = tomlConfig.FileNamespaces
}
config.loadGridFsConfig()
}
if config.Worker == "" {
config.Worker = tomlConfig.Worker
}
if config.GraylogAddr == "" {
config.GraylogAddr = tomlConfig.GraylogAddr
}
if config.MapperPluginPath == "" {
config.MapperPluginPath = tomlConfig.MapperPluginPath
}
if config.EnablePatches {
if len(config.PatchNamespaces) == 0 {
config.PatchNamespaces = tomlConfig.PatchNamespaces
}
config.loadPatchNamespaces()
}
if len(config.RoutingNamespaces) == 0 {
config.RoutingNamespaces = tomlConfig.RoutingNamespaces
config.loadRoutingNamespaces()
}
if len(config.TimeMachineNamespaces) == 0 {
config.TimeMachineNamespaces = tomlConfig.TimeMachineNamespaces
config.loadTimeMachineNamespaces()
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = tomlConfig.TimeMachineIndexPrefix
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = tomlConfig.TimeMachineIndexSuffix
}
if !config.TimeMachineDirectReads {
config.TimeMachineDirectReads = tomlConfig.TimeMachineDirectReads
}
if len(config.DirectReadNs) == 0 {
config.DirectReadNs = tomlConfig.DirectReadNs
}
if len(config.ElasticUrls) == 0 {
config.ElasticUrls = tomlConfig.ElasticUrls
}
if len(config.Workers) == 0 {
config.Workers = tomlConfig.Workers
}
if !config.EnableHTTPServer && tomlConfig.EnableHTTPServer {
config.EnableHTTPServer = true
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = tomlConfig.HTTPServerAddr
}
config.MongoDialSettings = tomlConfig.MongoDialSettings
config.MongoSessionSettings = tomlConfig.MongoSessionSettings
config.GtmSettings = tomlConfig.GtmSettings
config.Logs = tomlConfig.Logs
tomlConfig.loadScripts()
tomlConfig.loadFilters()
tomlConfig.loadIndexTypes()
}
return config
}
func (config *configOptions) newLogger(path string) *lumberjack.Logger {
return &lumberjack.Logger{
Filename: path,
MaxSize: 500, // megabytes
MaxBackups: 5,
MaxAge: 28, //days
}
}
func (config *configOptions) setupLogging() *configOptions {
if config.GraylogAddr != "" {
gelfWriter, err := gelf.NewUDPWriter(config.GraylogAddr)
if err != nil {
errorLog.Fatalf("Error creating gelf writer: %s", err)
}
infoLog.SetOutput(gelfWriter)
warnLog.SetOutput(gelfWriter)
errorLog.SetOutput(gelfWriter)
traceLog.SetOutput(gelfWriter)
statsLog.SetOutput(gelfWriter)
} else {
logs := config.Logs
if logs.Info != "" {
infoLog.SetOutput(config.newLogger(logs.Info))
}
if logs.Warn != "" {
warnLog.SetOutput(config.newLogger(logs.Warn))
}
if logs.Error != "" {
errorLog.SetOutput(config.newLogger(logs.Error))
}
if logs.Trace != "" {
traceLog.SetOutput(config.newLogger(logs.Trace))
}
if logs.Stats != "" {
statsLog.SetOutput(config.newLogger(logs.Stats))
}
}
return config
}
func (config *configOptions) loadRoutingNamespaces() *configOptions {
for _, namespace := range config.RoutingNamespaces {
routingNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadTimeMachineNamespaces() *configOptions {
for _, namespace := range config.TimeMachineNamespaces {
tmNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadPatchNamespaces() *configOptions {
for _, namespace := range config.PatchNamespaces {
patchNamespaces[namespace] = true
}
return config
}
func (config *configOptions) loadGridFsConfig() *configOptions {
for _, namespace := range config.FileNamespaces {
fileNamespaces[namespace] = true
}
return config
}
func (config *configOptions) dump() {
json, err := json.MarshalIndent(config, "", " ")
if err != nil {
errorLog.Printf("Unable to print configuration: %s", err)
} else {
infoLog.Println(string(json))
}
}
/*
if ssl=true is set on the connection string, remove the option
from the connection string and enable TLS because the mgo
driver does not support the option in the connection string
*/
func (config *configOptions) parseMongoURL(inURL string) (outURL string) {
const queryDelim string = "?"
outURL = inURL
hostQuery := strings.SplitN(outURL, queryDelim, 2)
if len(hostQuery) == 2 {
host, query := hostQuery[0], hostQuery[1]
r := regexp.MustCompile(`ssl=true&?|&ssl=true$`)
qstr := r.ReplaceAllString(query, "")
if qstr != query {
config.MongoDialSettings.Ssl = true
if qstr == "" {
outURL = host
} else {
outURL = strings.Join([]string{host, qstr}, queryDelim)
}
}
}
return
}
func (config *configOptions) setDefaults() *configOptions {
if config.MongoURL == "" {
config.MongoURL = mongoURLDefault
}
if config.ClusterName != "" {
if config.ClusterName != "" && config.Worker != "" {
config.ResumeName = fmt.Sprintf("%s:%s", config.ClusterName, config.Worker)
} else {
config.ResumeName = config.ClusterName
}
config.Resume = true
} else if config.ResumeName == "" {
if config.Worker != "" {
config.ResumeName = config.Worker
} else {
config.ResumeName = resumeNameDefault
}
}
if config.ElasticMaxConns == 0 {
config.ElasticMaxConns = elasticMaxConnsDefault
}
if config.ElasticClientTimeout == 0 {
config.ElasticClientTimeout = elasticClientTimeoutDefault
}
if config.MergePatchAttr == "" {
config.MergePatchAttr = "json-merge-patches"
}
if config.ElasticMaxSeconds == 0 {
if len(config.DirectReadNs) > 0 {
config.ElasticMaxSeconds = 5
} else {
config.ElasticMaxSeconds = 1
}
}
if config.ElasticMaxDocs == 0 {
config.ElasticMaxDocs = elasticMaxDocsDefault
}
if config.ElasticMaxBytes == 0 {
config.ElasticMaxBytes = elasticMaxBytesDefault
}
if config.MongoURL != "" {
config.MongoURL = config.parseMongoURL(config.MongoURL)
}
if config.MongoConfigURL != "" {
config.MongoConfigURL = config.parseMongoURL(config.MongoConfigURL)
}
if config.HTTPServerAddr == "" {
config.HTTPServerAddr = ":8080"
}
if config.StatsIndexFormat == "" {
config.StatsIndexFormat = "monstache.stats.2006-01-02"
}
if config.TimeMachineIndexPrefix == "" {
config.TimeMachineIndexPrefix = "log"
}
if config.TimeMachineIndexSuffix == "" {
config.TimeMachineIndexSuffix = "2006-01-02"
}
if config.DeleteIndexPattern == "" {
config.DeleteIndexPattern = "*"
}
if config.FileDownloaders == 0 {
config.FileDownloaders = fileDownloadersDefault
}
if config.OplogTsFieldName == "" {
config.OplogTsFieldName = "oplog_ts"
}
if config.OplogDateFieldName == "" {
config.OplogDateFieldName = "oplog_date"
}
if config.OplogDateFieldFormat == "" {
config.OplogDateFieldFormat = "2006/01/02 15:04:05"
}
return config
}
func (config *configOptions) getAuthURL(inURL string) string {
cred := strings.SplitN(config.MongoURL, "@", 2)
if len(cred) == 2 {
return cred[0] + "@" + inURL
} else {
return inURL
}
}
func (config *configOptions) configureMongo(session *mgo.Session) {
session.SetMode(mgo.Primary, true)
if config.MongoSessionSettings.SocketTimeout != -1 {
timeOut := time.Duration(config.MongoSessionSettings.SocketTimeout) * time.Second
session.SetSocketTimeout(timeOut)
}
if config.MongoSessionSettings.SyncTimeout != -1 {
timeOut := time.Duration(config.MongoSessionSettings.SyncTimeout) * time.Second
session.SetSyncTimeout(timeOut)
}
}
func (config *configOptions) dialMongo(inURL string) (*mgo.Session, error) {
ssl := config.MongoDialSettings.Ssl || config.MongoPemFile != ""
if ssl {
tlsConfig := &tls.Config{}
if config.MongoPemFile != "" {
certs := x509.NewCertPool()
if ca, err := ioutil.ReadFile(config.MongoPemFile); err == nil {
certs.AppendCertsFromPEM(ca)
} else {
return nil, err
}
tlsConfig.RootCAs = certs
}
// Check to see if we don't need to validate the PEM
if config.MongoValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
dialInfo, err := mgo.ParseURL(inURL)
if err != nil {
return nil, err
}
dialInfo.Timeout = time.Duration(10) * time.Second
if config.MongoDialSettings.Timeout != -1 {
dialInfo.Timeout = time.Duration(config.MongoDialSettings.Timeout) * time.Second
}
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
if err != nil {
errorLog.Printf("Unable to dial mongodb: %s", err)
}
return conn, err
}
session, err := mgo.DialWithInfo(dialInfo)
if err == nil {
session.SetSyncTimeout(1 * time.Minute)
session.SetSocketTimeout(1 * time.Minute)
}
return session, err
}
if config.MongoDialSettings.Timeout != -1 {
return mgo.DialWithTimeout(inURL,
time.Duration(config.MongoDialSettings.Timeout)*time.Second)
}
return mgo.Dial(inURL)
}
func (config *configOptions) NewHTTPClient() (client *http.Client, err error) {
tlsConfig := &tls.Config{}
if config.ElasticPemFile != "" {
var ca []byte
certs := x509.NewCertPool()
if ca, err = ioutil.ReadFile(config.ElasticPemFile); err == nil {
certs.AppendCertsFromPEM(ca)
tlsConfig.RootCAs = certs
} else {
return client, err
}
}
if config.ElasticValidatePemFile == false {
// Turn off validation
tlsConfig.InsecureSkipVerify = true
}
transport := &http.Transport{
DisableCompression: !config.Gzip,
TLSHandshakeTimeout: time.Duration(30) * time.Second,
TLSClientConfig: tlsConfig,
}
client = &http.Client{
Timeout: time.Duration(config.ElasticClientTimeout) * time.Second,
Transport: transport,
}
return client, err
}
func doDrop(mongo *mgo.Session, elastic *elastic.Client, op *gtm.Op, config *configOptions) (err error) {
if db, drop := op.IsDropDatabase(); drop {
if config.DroppedDatabases {
if err = deleteIndexes(elastic, db, config); err == nil {
if e := dropDBMeta(mongo, db); e != nil {
errorLog.Printf("Unable to delete metadata for db: %s", e)
}
}
}
} else if col, drop := op.IsDropCollection(); drop {
if config.DroppedCollections {
if err = deleteIndex(elastic, op.GetDatabase()+"."+col, config); err == nil {
if e := dropCollectionMeta(mongo, op.GetDatabase()+"."+col); e != nil {
errorLog.Printf("Unable to delete metadata for collection: %s", e)
}
}
}
}
return
}
func hasFileContent(op *gtm.Op, config *configOptions) (ingest bool) {
if !config.IndexFiles {
return
}
return fileNamespaces[op.Namespace]
}
func addPatch(config *configOptions, client *elastic.Client, op *gtm.Op,
objectID string, indexType *indexTypeMapping, meta *indexingMeta) (err error) {
var merges []interface{}
var toJSON []byte
if op.IsSourceDirect() {
return nil
}
if op.Timestamp == 0 {
return nil
}
if op.IsUpdate() {
ctx := context.Background()
service := client.Get()
service.Id(objectID)
service.Index(indexType.Index)
service.Type(indexType.Type)
if meta.Index != "" {
service.Index(meta.Index)
}
if meta.Type != "" {
service.Type(meta.Type)
}
if meta.Routing != "" {
service.Routing(meta.Routing)
}
if meta.Parent != "" {
service.Parent(meta.Parent)
}
var resp *elastic.GetResult
if resp, err = service.Do(ctx); err == nil {
if resp.Found {
var src map[string]interface{}
if err = json.Unmarshal(*resp.Source, &src); err == nil {
if val, ok := src[config.MergePatchAttr]; ok {
merges = val.([]interface{})
for _, m := range merges {
entry := m.(map[string]interface{})
entry["ts"] = int(entry["ts"].(float64))
entry["v"] = int(entry["v"].(float64))
}
}
delete(src, config.MergePatchAttr)
var fromJSON, mergeDoc []byte
if fromJSON, err = json.Marshal(src); err == nil {
if toJSON, err = json.Marshal(op.Data); err == nil {
if mergeDoc, err = jsonpatch.CreateMergePatch(fromJSON, toJSON); err == nil {
merge := make(map[string]interface{})
merge["ts"] = op.Timestamp >> 32
merge["p"] = string(mergeDoc)
merge["v"] = len(merges) + 1
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
}
} else {
err = errors.New("Last document revision not found")
}
}
} else {
if _, found := op.Data[config.MergePatchAttr]; !found {
if toJSON, err = json.Marshal(op.Data); err == nil {
merge := make(map[string]interface{})
merge["v"] = 1
merge["ts"] = op.Timestamp >> 32
merge["p"] = string(toJSON)
merges = append(merges, merge)
op.Data[config.MergePatchAttr] = merges
}
}
}
return
}
func doIndexing(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, ingestAttachment bool) (err error) {
meta := parseIndexMeta(op)
prepareDataForIndexing(config, op)
objectID, indexType := opIDToString(op), mapIndexType(config, op)
if config.EnablePatches {
if patchNamespaces[op.Namespace] {
if e := addPatch(config, client, op, objectID, indexType, meta); e != nil {
errorLog.Printf("Unable to save json-patch info: %s", e)
}
}
}
if config.IndexAsUpdate && meta.Pipeline == "" && ingestAttachment == false {
req := elastic.NewBulkUpdateRequest()
req.UseEasyJSON(config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Type(indexType.Type)
req.Doc(op.Data)
req.DocAsUpsert(true)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
bulk.Add(req)
} else {
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(config.EnableEasyJSON)
req.Id(objectID)
req.Index(indexType.Index)
req.Type(indexType.Type)
req.Doc(op.Data)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
if meta.Version != 0 {
req.Version(meta.Version)
}
if meta.VersionType != "" {
req.VersionType(meta.VersionType)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if meta.RetryOnConflict != 0 {
req.RetryOnConflict(meta.RetryOnConflict)
}
if ingestAttachment {
req.Pipeline("attachment")
}
bulk.Add(req)
}
if meta.shouldSave(config) {
if e := setIndexMeta(mongo, op.Namespace, objectID, meta); e != nil {
errorLog.Printf("Unable to save routing info: %s", e)
}
}
if tmNamespaces[op.Namespace] {
if op.IsSourceOplog() || config.TimeMachineDirectReads {
t := time.Now().UTC()
tmIndex := func(idx string) string {
pre, suf := config.TimeMachineIndexPrefix, config.TimeMachineIndexSuffix
tmFormat := strings.Join([]string{pre, idx, suf}, ".")
return strings.ToLower(t.Format(tmFormat))
}
data := make(map[string]interface{})
for k, v := range op.Data {
data[k] = v
}
data["_source_id"] = objectID
if config.IndexOplogTime == false {
secs := int64(op.Timestamp >> 32)
t := time.Unix(secs, 0).UTC()
data[config.OplogTsFieldName] = op.Timestamp
data[config.OplogDateFieldName] = t.Format(config.OplogDateFieldFormat)
}
req := elastic.NewBulkIndexRequest()
req.UseEasyJSON(config.EnableEasyJSON)
req.Index(tmIndex(indexType.Index))
req.Type(indexType.Type)
req.Routing(objectID)
req.Doc(data)
if meta.Index != "" {
req.Index(tmIndex(meta.Index))
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Pipeline != "" {
req.Pipeline(meta.Pipeline)
}
if ingestAttachment {
req.Pipeline("attachment")
}
bulk.Add(req)
}
}
return
}
func doIndex(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, ingestAttachment bool) (err error) {
if err = mapData(mongo, config, op); err == nil {
if op.Data != nil {
err = doIndexing(config, mongo, bulk, client, op, ingestAttachment)
} else if op.IsUpdate() {
doDelete(config, client, mongo, bulk, op)
}
}
return
}
func processOp(config *configOptions, mongo *mgo.Session, bulk *elastic.BulkProcessor, client *elastic.Client, op *gtm.Op, fileC chan *gtm.Op) (err error) {
if op.IsDrop() {
bulk.Flush()
err = doDrop(mongo, client, op, config)
} else if op.IsDelete() {
doDelete(config, client, mongo, bulk, op)
} else if op.Data != nil {
if hasFileContent(op, config) {
fileC <- op
} else {
err = doIndex(config, mongo, bulk, client, op, false)
}
}
return
}
func processErr(err error, config *configOptions) {
exitStatus = 1
errorLog.Println(err)
if config.FailFast {
os.Exit(exitStatus)
}
}
func doIndexStats(config *configOptions, bulkStats *elastic.BulkProcessor, stats elastic.BulkProcessorStats) (err error) {
var hostname string
doc := make(map[string]interface{})
t := time.Now().UTC()
doc["Timestamp"] = t.Format("2006-01-02T15:04:05")
hostname, err = os.Hostname()
if err == nil {
doc["Host"] = hostname
}
doc["Pid"] = os.Getpid()
doc["Stats"] = stats
index := strings.ToLower(t.Format(config.StatsIndexFormat))
typeName := "stats"
if config.useTypeFromFuture() {
typeName = typeFromFuture
}
req := elastic.NewBulkIndexRequest().Index(index).Type(typeName)
req.UseEasyJSON(config.EnableEasyJSON)
req.Doc(doc)
bulkStats.Add(req)
return
}
func dropDBMeta(session *mgo.Session, db string) (err error) {
col := session.DB("monstache").C("meta")
q := bson.M{"db": db}
_, err = col.RemoveAll(q)
return
}
func dropCollectionMeta(session *mgo.Session, namespace string) (err error) {
col := session.DB("monstache").C("meta")
q := bson.M{"namespace": namespace}
_, err = col.RemoveAll(q)
return
}
func (meta *indexingMeta) load(metaAttrs map[string]interface{}) {
var v interface{}
var ok bool
var s string
if v, ok = metaAttrs["routing"]; ok {
meta.Routing = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["index"]; ok {
meta.Index = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["type"]; ok {
meta.Type = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["parent"]; ok {
meta.Parent = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["version"]; ok {
s = fmt.Sprintf("%v", v)
if version, err := strconv.ParseInt(s, 10, 64); err == nil {
meta.Version = version
}
}
if v, ok = metaAttrs["versionType"]; ok {
meta.VersionType = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["pipeline"]; ok {
meta.Pipeline = fmt.Sprintf("%v", v)
}
if v, ok = metaAttrs["retryOnConflict"]; ok {
s = fmt.Sprintf("%v", v)
if roc, err := strconv.Atoi(s); err == nil {
meta.RetryOnConflict = roc
}
}
}
func (meta *indexingMeta) shouldSave(config *configOptions) bool {
if config.DeleteStrategy == statefulDeleteStrategy {
return (meta.Routing != "" ||
meta.Index != "" ||
meta.Type != "" ||
meta.Parent != "" ||
meta.Pipeline != "")
} else {
return false
}
}
func setIndexMeta(session *mgo.Session, namespace, id string, meta *indexingMeta) error {
col := session.DB("monstache").C("meta")
metaID := fmt.Sprintf("%s.%s", namespace, id)
doc := make(map[string]interface{})
doc["routing"] = meta.Routing
doc["index"] = meta.Index
doc["type"] = meta.Type
doc["parent"] = meta.Parent
doc["pipeline"] = meta.Pipeline
doc["db"] = strings.SplitN(namespace, ".", 2)[0]
doc["namespace"] = namespace
_, err := col.UpsertId(metaID, bson.M{"$set": doc})
return err
}
func getIndexMeta(session *mgo.Session, namespace, id string) (meta *indexingMeta) {
meta = &indexingMeta{}
col := session.DB("monstache").C("meta")
doc := make(map[string]interface{})
metaID := fmt.Sprintf("%s.%s", namespace, id)
col.FindId(metaID).One(doc)
if doc["routing"] != nil {
meta.Routing = doc["routing"].(string)
}
if doc["index"] != nil {
meta.Index = strings.ToLower(doc["index"].(string))
}
if doc["type"] != nil {
meta.Type = doc["type"].(string)
}
if doc["parent"] != nil {
meta.Parent = doc["parent"].(string)
}
if doc["pipeline"] != nil {
meta.Pipeline = doc["pipeline"].(string)
}
col.RemoveId(metaID)
return
}
func loadBuiltinFunctions(s *mgo.Session) {
for ns, env := range mapEnvs {
var fa *findConf
fa = &findConf{
session: s,
name: "findId",
vm: env.VM,
ns: ns,
byId: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "findOne",
vm: env.VM,
ns: ns,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "find",
vm: env.VM,
ns: ns,
multi: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
fa = &findConf{
session: s,
name: "pipe",
vm: env.VM,
ns: ns,
multi: true,
pipe: true,
}
if err := env.VM.Set(fa.name, makeFind(fa)); err != nil {
panic(err)
}
}
}
func (fc *findCall) setDatabase(topts map[string]interface{}) (err error) {
if ov, ok := topts["database"]; ok {
if ovs, ok := ov.(string); ok {
fc.db = ovs
} else {
err = errors.New("Invalid database option value")
}
}
return
}
func (fc *findCall) setCollection(topts map[string]interface{}) (err error) {
if ov, ok := topts["collection"]; ok {
if ovs, ok := ov.(string); ok {
fc.col = ovs
} else {
err = errors.New("Invalid collection option value")
}
}
return
}
func (fc *findCall) setSelect(topts map[string]interface{}) (err error) {
if ov, ok := topts["select"]; ok {
if ovsel, ok := ov.(map[string]interface{}); ok {
for k, v := range ovsel {
if vi, ok := v.(int64); ok {
fc.sel[k] = int(vi)
}
}
} else {
err = errors.New("Invalid select option value")
}
}
return
}
func (fc *findCall) setSort(topts map[string]interface{}) (err error) {
if ov, ok := topts["sort"]; ok {
if ovs, ok := ov.([]string); ok {
fc.sort = ovs
} else {
err = errors.New("Invalid sort option value")
}
}
return
}
func (fc *findCall) setLimit(topts map[string]interface{}) (err error) {
if ov, ok := topts["limit"]; ok {
if ovl, ok := ov.(int64); ok {
fc.limit = int(ovl)
} else {
err = errors.New("Invalid limit option value")
}
}
return
}
func (fc *findCall) setQuery(v otto.Value) (err error) {
var q interface{}
if q, err = v.Export(); err == nil {
fc.query = fc.restoreIds(q)
}
return
}
func (fc *findCall) setOptions(v otto.Value) (err error) {
var opts interface{}
if opts, err = v.Export(); err == nil {
switch topts := opts.(type) {
case map[string]interface{}:
if err = fc.setDatabase(topts); err != nil {
return
}
if err = fc.setCollection(topts); err != nil {
return
}
if err = fc.setSelect(topts); err != nil {
return
}
if fc.isMulti() {
if err = fc.setSort(topts); err != nil {
return
}
if err = fc.setLimit(topts); err != nil {
return
}
}
default:
err = errors.New("Invalid options argument")
return
}
} else {
err = errors.New("Invalid options argument")
}
return
}
func (fc *findCall) setDefaults() {
if fc.config.ns != "" {
ns := strings.Split(fc.config.ns, ".")
fc.db = ns[0]
fc.col = ns[1]
}
}
func (fc *findCall) getCollection() *mgo.Collection {
return fc.session.DB(fc.db).C(fc.col)
}
func (fc *findCall) getVM() *otto.Otto {
return fc.config.vm
}
func (fc *findCall) getFunctionName() string {
return fc.config.name
}
func (fc *findCall) isMulti() bool {
return fc.config.multi
}
func (fc *findCall) isPipe() bool {
return fc.config.pipe
}
func (fc *findCall) logError(err error) {
errorLog.Printf("Error in function %s: %s\n", fc.getFunctionName(), err)
}
func (fc *findCall) restoreIds(v interface{}) (r interface{}) {
switch vt := v.(type) {
case string:
if bson.IsObjectIdHex(vt) {
r = bson.ObjectIdHex(vt)
} else {
r = v
}
case []interface{}:
var avs []interface{}
for _, av := range vt {
avs = append(avs, fc.restoreIds(av))
}
r = avs
case map[string]interface{}:
mvs := make(map[string]interface{})
for k, v := range vt {
mvs[k] = fc.restoreIds(v)
}
r = mvs
default:
r = v
}
return
}
func (fc *findCall) execute() (r otto.Value, err error) {
var q *mgo.Query
col := fc.getCollection()
if fc.isMulti() {
if fc.isPipe() {
pipe := col.Pipe(fc.query)
var docs []map[string]interface{}
if err = pipe.All(&docs); err == nil {
var rdocs []map[string]interface{}
for _, doc := range docs {
rdocs = append(rdocs, convertMapJavascript(doc))
}
r, err = fc.getVM().ToValue(rdocs)
}
} else {
q = col.Find(fc.query)
if fc.limit > 0 {
q.Limit(fc.limit)
}
if len(fc.sort) > 0 {
q.Sort(fc.sort...)
}
if len(fc.sel) > 0 {
q.Select(fc.sel)
}
var docs []map[string]interface{}
if err = q.All(&docs); err == nil {
var rdocs []map[string]interface{}
for _, doc := range docs {
rdocs = append(rdocs, convertMapJavascript(doc))
}
r, err = fc.getVM().ToValue(rdocs)
}
}
} else {
if fc.config.byId {
q = col.FindId(fc.query)
} else {
q = col.Find(fc.query)
}
if len(fc.sel) > 0 {
q.Select(fc.sel)
}
doc := make(map[string]interface{})
if err = q.One(doc); err == nil {
rdoc := convertMapJavascript(doc)
r, err = fc.getVM().ToValue(rdoc)
}
}
return
}
func makeFind(fa *findConf) func(otto.FunctionCall) otto.Value {
return func(call otto.FunctionCall) (r otto.Value) {
var err error
fc := &findCall{
config: fa,
session: fa.session.Copy(),
sel: make(map[string]int),
}
defer fc.session.Close()
fc.setDefaults()
args := call.ArgumentList
argLen := len(args)
r = otto.NullValue()
if argLen >= 1 {
if argLen >= 2 {
if err = fc.setOptions(call.Argument(1)); err != nil {
fc.logError(err)
return
}
}
if fc.db == "" || fc.col == "" {
fc.logError(errors.New("Find call must specify db and collection"))
return
}
if err = fc.setQuery(call.Argument(0)); err == nil {
var result otto.Value
if result, err = fc.execute(); err == nil {
r = result
} else {
fc.logError(err)
}
} else {
fc.logError(err)
}
} else {
fc.logError(errors.New("At least one argument is required"))
}
return
}
}
func doDelete(config *configOptions, client *elastic.Client, mongo *mgo.Session, bulk *elastic.BulkProcessor, op *gtm.Op) {
req := elastic.NewBulkDeleteRequest()
req.UseEasyJSON(config.EnableEasyJSON)
if config.DeleteStrategy == ignoreDeleteStrategy {
return
}
objectID, indexType, meta := opIDToString(op), mapIndexType(config, op), &indexingMeta{}
req.Id(objectID)
if config.IndexAsUpdate == false {
req.Version(int64(op.Timestamp))
req.VersionType("external")
}
if config.DeleteStrategy == statefulDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
meta = getIndexMeta(mongo, op.Namespace, objectID)
}
req.Index(indexType.Index)
req.Type(indexType.Type)
if meta.Index != "" {
req.Index(meta.Index)
}
if meta.Type != "" {
req.Type(meta.Type)
}
if meta.Routing != "" {
req.Routing(meta.Routing)
}
if meta.Parent != "" {
req.Parent(meta.Parent)
}
} else if config.DeleteStrategy == statelessDeleteStrategy {
if routingNamespaces[""] || routingNamespaces[op.Namespace] {
termQuery := elastic.NewTermQuery("_id", objectID)
searchResult, err := client.Search().FetchSource(false).Size(1).Index(config.DeleteIndexPattern).Query(termQuery).Do(context.Background())
if err != nil {
errorLog.Printf("Unable to delete document %s: %s", objectID, err)
return
}
if searchResult.Hits != nil && searchResult.Hits.TotalHits == 1 {
hit := searchResult.Hits.Hits[0]
req.Index(hit.Index)
req.Type(hit.Type)
if hit.Routing != "" {
req.Routing(hit.Routing)
}
if hit.Parent != "" {
req.Parent(hit.Parent)
}
} else {
errorLog.Printf("Failed to find unique document %s for deletion using index pattern %s", objectID, config.DeleteIndexPattern)
return
}
} else {
req.Index(indexType.Index)
req.Type(indexType.Type)
}
} else {
return
}
bulk.Add(req)
return
}
func gtmDefaultSettings() gtmSettings {
return gtmSettings{
ChannelSize: gtmChannelSizeDefault,
BufferSize: 32,
BufferDuration: "75ms",
}
}
func notifySdFailed(config *configOptions, err error) {
if err != nil {
errorLog.Printf("Systemd notification failed: %s", err)
} else {
if config.Verbose {
warnLog.Println("Systemd notification not supported (i.e. NOTIFY_SOCKET is unset)")
}
}
}
func watchdogSdFailed(config *configOptions, err error) {
if err != nil {
errorLog.Printf("Error determining systemd WATCHDOG interval: %s", err)
} else {
if config.Verbose {
warnLog.Println("Systemd WATCHDOG not enabled")
}
}
}
func (ctx *httpServerCtx) serveHttp() {
s := ctx.httpServer
if ctx.config.Verbose {
infoLog.Printf("Starting http server at %s", s.Addr)
}
ctx.started = time.Now()
err := s.ListenAndServe()
if !ctx.shutdown {
panic(fmt.Sprintf("Unable to serve http at address %s: %s", s.Addr, err))
}
}
func (ctx *httpServerCtx) buildServer() {
mux := http.NewServeMux()
mux.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(ctx.started)).String()
w.Write([]byte(data))
})
mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
if ctx.config.Stats {
mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) {
stats, err := json.MarshalIndent(ctx.bulk.Stats(), "", " ")
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write(stats)
} else {
w.WriteHeader(500)
fmt.Fprintf(w, "Unable to print statistics: %s", err)
}
})
}
if ctx.config.Pprof {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
s := &http.Server{
Addr: ctx.config.HTTPServerAddr,
Handler: mux,
ErrorLog: errorLog,
}
ctx.httpServer = s
}
func notifySd(config *configOptions) {
var interval time.Duration
if config.Verbose {
infoLog.Println("Sending systemd READY=1")
}
sent, err := daemon.SdNotify(false, "READY=1")
if sent {
if config.Verbose {
infoLog.Println("READY=1 successfully sent to systemd")
}
} else {
notifySdFailed(config, err)
return
}
interval, err = daemon.SdWatchdogEnabled(false)
if err != nil || interval == 0 {
watchdogSdFailed(config, err)
return
}
for {
if config.Verbose {
infoLog.Println("Sending systemd WATCHDOG=1")
}
sent, err = daemon.SdNotify(false, "WATCHDOG=1")
if sent {
if config.Verbose {
infoLog.Println("WATCHDOG=1 successfully sent to systemd")
}
} else {
notifySdFailed(config, err)
return
}
time.Sleep(interval / 2)
}
}
func (config *configOptions) makeShardInsertHandler() gtm.ShardInsertHandler {
return func(shardInfo *gtm.ShardInfo) (*mgo.Session, error) {
infoLog.Printf("Adding shard found at %s\n", shardInfo.GetURL())
shardURL := config.getAuthURL(shardInfo.GetURL())
shard, err := config.dialMongo(shardURL)
if err == nil {
config.configureMongo(shard)
return shard, nil
} else {
return nil, err
}
}
}
func shutdown(timeout int, hsc *httpServerCtx, bulk *elastic.BulkProcessor, bulkStats *elastic.BulkProcessor, mongo *mgo.Session, config *configOptions) {
infoLog.Println("Shutting down")
closeC := make(chan bool)
go func() {
if config.ClusterName != "" {
resetClusterState(mongo, config)
}
if hsc != nil {
hsc.shutdown = true
hsc.httpServer.Shutdown(context.Background())
}
bulk.Flush()
if bulkStats != nil {
bulkStats.Flush()
}
close(closeC)
}()
doneC := make(chan bool)
go func() {
closeT := time.NewTicker(time.Duration(timeout) * time.Second)
done := false
for !done {
select {
case <-closeC:
done = true
close(doneC)
case <-closeT.C:
done = true
close(doneC)
}
}
}()
<-doneC
os.Exit(exitStatus)
}
func handlePanic() {
if r := recover(); r != nil {
errorLog.Println(r)
infoLog.Println("Shutting down with exit status 1 after panic.")
time.Sleep(3 * time.Second)
os.Exit(1)
}
}
func main() {
enabled := true
defer handlePanic()
config := &configOptions{
MongoDialSettings: mongoDialSettings{Timeout: -1},
MongoSessionSettings: mongoSessionSettings{SocketTimeout: -1, SyncTimeout: -1},
GtmSettings: gtmDefaultSettings(),
}
config.parseCommandLineFlags()
if config.Version {
fmt.Println(version)
os.Exit(0)
}
config.loadTimeMachineNamespaces()
config.loadRoutingNamespaces()
config.loadPatchNamespaces()
config.loadGridFsConfig()
config.loadConfigFile()
config.setDefaults()
if config.Print {
config.dump()
os.Exit(0)
}
config.setupLogging()
config.loadPlugins()
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
mongo, err := config.dialMongo(config.MongoURL)
if err != nil {
panic(fmt.Sprintf("Unable to connect to mongodb using URL %s: %s", config.MongoURL, err))
}
if mongoInfo, err := mongo.BuildInfo(); err == nil {
infoLog.Printf("Successfully connected to MongoDB version %s", mongoInfo.Version)
} else {
infoLog.Println("Successfully connected to MongoDB")
}
defer mongo.Close()
config.configureMongo(mongo)
loadBuiltinFunctions(mongo)
elasticClient, err := config.newElasticClient()
if err != nil {
panic(fmt.Sprintf("Unable to create elasticsearch client: %s", err))
}
if config.ElasticVersion == "" {
if err := config.testElasticsearchConn(elasticClient); err != nil {
panic(fmt.Sprintf("Unable to validate connection to elasticsearch using client %s: %s",
elasticClient, err))
}
} else {
if err := config.parseElasticsearchVersion(config.ElasticVersion); err != nil {
panic(fmt.Sprintf("Elasticsearch version must conform to major.minor.fix: %s", err))
}
}
bulk, err := config.newBulkProcessor(elasticClient)
if err != nil {
panic(fmt.Sprintf("Unable to start bulk processor: %s", err))
}
defer bulk.Stop()
var bulkStats *elastic.BulkProcessor
if config.IndexStats {
bulkStats, err = config.newStatsBulkProcessor(elasticClient)
if err != nil {
panic(fmt.Sprintf("Unable to start stats bulk processor: %s", err))
}
defer bulkStats.Stop()
}
var after gtm.TimestampGenerator
if config.Resume {
after = func(session *mgo.Session, options *gtm.Options) bson.MongoTimestamp {
ts := gtm.LastOpTimestamp(session, options)
if config.Replay {
ts = bson.MongoTimestamp(0)
} else if config.ResumeFromTimestamp != 0 {
ts = bson.MongoTimestamp(config.ResumeFromTimestamp)
} else {
collection := session.DB("monstache").C("monstache")
doc := make(map[string]interface{})
collection.FindId(config.ResumeName).One(doc)
if doc["ts"] != nil {
ts = doc["ts"].(bson.MongoTimestamp)
}
}
return ts
}
} else if config.Replay {
after = func(session *mgo.Session, options *gtm.Options) bson.MongoTimestamp {
return bson.MongoTimestamp(0)
}
}
if config.IndexFiles {
if len(config.FileNamespaces) == 0 {
errorLog.Fatalln("File indexing is ON but no file namespaces are configured")
}
if err := ensureFileMapping(elasticClient); err != nil {
panic(err)
}
}
var nsFilter, filter, directReadFilter gtm.OpFilter
filterChain := []gtm.OpFilter{notMonstache, notSystem, notChunks}
filterArray := []gtm.OpFilter{}
if config.isSharded() {
filterChain = append(filterChain, notConfig)
}
if config.NsRegex != "" {
filterChain = append(filterChain, filterWithRegex(config.NsRegex))
}
if config.NsDropRegex != "" {
filterChain = append(filterChain, filterDropWithRegex(config.NsDropRegex))
}
if config.NsExcludeRegex != "" {
filterChain = append(filterChain, filterInverseWithRegex(config.NsExcludeRegex))
}
if config.NsDropExcludeRegex != "" {
filterChain = append(filterChain, filterDropInverseWithRegex(config.NsDropExcludeRegex))
}
if config.Worker != "" {
workerFilter, err := consistent.ConsistentHashFilter(config.Worker, config.Workers)
if err != nil {
panic(err)
}
filterChain = append(filterChain, workerFilter)
} else if config.Workers != nil {
panic("Workers configured but this worker is undefined. worker must be set to one of the workers.")
}
if filterPlugin != nil {
filterArray = append(filterArray, filterWithPlugin())
} else if len(filterEnvs) > 0 {
filterArray = append(filterArray, filterWithScript())
}
nsFilter = gtm.ChainOpFilters(filterChain...)
filter = gtm.ChainOpFilters(filterArray...)
directReadFilter = gtm.ChainOpFilters(filterArray...)
var oplogDatabaseName, oplogCollectionName *string
if config.MongoOpLogDatabaseName != "" {
oplogDatabaseName = &config.MongoOpLogDatabaseName
}
if config.MongoOpLogCollectionName != "" {
oplogCollectionName = &config.MongoOpLogCollectionName
}
if config.ClusterName != "" {
if err = ensureClusterTTL(mongo); err == nil {
infoLog.Printf("Joined cluster %s", config.ClusterName)
} else {
panic(fmt.Sprintf("Unable to enable cluster mode: %s", err))
}
enabled, err = enableProcess(mongo, config)
if err != nil {
panic(fmt.Sprintf("Unable to determine enabled cluster process: %s", err))
}
if !enabled {
config.DirectReadNs = stringargs{}
}
}
gtmBufferDuration, err := time.ParseDuration(config.GtmSettings.BufferDuration)
if err != nil {
panic(fmt.Sprintf("Unable to parse gtm buffer duration %s: %s", config.GtmSettings.BufferDuration, err))
}
var mongos []*mgo.Session
var configSession *mgo.Session
if config.isSharded() {
// if we have a config server URL then we are running in a sharded cluster
configSession, err = config.dialMongo(config.MongoConfigURL)
if err != nil {
panic(fmt.Sprintf("Unable to connect to mongodb config server using URL %s: %s", config.MongoConfigURL, err))
}
config.configureMongo(configSession)
// get the list of shard servers
shardInfos := gtm.GetShards(configSession)
if len(shardInfos) == 0 {
errorLog.Fatalln("Shards enabled but none found in config.shards collection")
}
// add each shard server to the sync list
for _, shardInfo := range shardInfos {
infoLog.Printf("Adding shard found at %s\n", shardInfo.GetURL())
shardURL := config.getAuthURL(shardInfo.GetURL())
shard, err := config.dialMongo(shardURL)
if err != nil {
panic(fmt.Sprintf("Unable to connect to mongodb shard using URL %s: %s", shardURL, err))
}
defer shard.Close()
config.configureMongo(shard)
mongos = append(mongos, shard)
}
} else {
mongos = append(mongos, mongo)
}
gtmOpts := >m.Options{
After: after,
Filter: filter,
NamespaceFilter: nsFilter,
OpLogDatabaseName: oplogDatabaseName,
OpLogCollectionName: oplogCollectionName,
ChannelSize: config.GtmSettings.ChannelSize,
Ordering: gtm.AnyOrder,
WorkerCount: 10,
BufferDuration: gtmBufferDuration,
BufferSize: config.GtmSettings.BufferSize,
DirectReadNs: config.DirectReadNs,
DirectReadSplitMax: config.DirectReadSplitMax,
DirectReadFilter: directReadFilter,
Log: infoLog,
}
gtmCtx := gtm.StartMulti(mongos, gtmOpts)
if config.isSharded() {
gtmCtx.AddShardListener(configSession, gtmOpts, config.makeShardInsertHandler())
}
if config.ClusterName != "" {
if enabled {
infoLog.Printf("Starting work for cluster %s", config.ClusterName)
} else {
infoLog.Printf("Pausing work for cluster %s", config.ClusterName)
gtmCtx.Pause()
}
}
timestampTicker := time.NewTicker(10 * time.Second)
if config.Resume == false {
timestampTicker.Stop()
}
heartBeat := time.NewTicker(10 * time.Second)
if config.ClusterName == "" {
heartBeat.Stop()
}
statsTimeout := time.Duration(30) * time.Second
if config.StatsDuration != "" {
statsTimeout, err = time.ParseDuration(config.StatsDuration)
if err != nil {
panic(fmt.Sprintf("Unable to parse stats duration: %s", err))
}
}
printStats := time.NewTicker(statsTimeout)
if config.Stats == false {
printStats.Stop()
}
go notifySd(config)
var hsc *httpServerCtx
if config.EnableHTTPServer {
hsc = &httpServerCtx{
bulk: bulk,
config: config,
}
hsc.buildServer()
go hsc.serveHttp()
}
doneC := make(chan int)
go func() {
<-sigs
shutdown(10, hsc, bulk, bulkStats, mongo, config)
}()
var lastTimestamp, lastSavedTimestamp bson.MongoTimestamp
var fileWg sync.WaitGroup
fileC := make(chan *gtm.Op)
fileDoneC := make(chan *gtm.Op)
for i := 0; i < config.FileDownloaders; i++ {
fileWg.Add(1)
go func() {
defer fileWg.Done()
for op := range fileC {
err := addFileContent(mongo, op, config)
if err != nil {
processErr(err, config)
}
fileDoneC <- op
}
}()
}
if len(config.DirectReadNs) > 0 {
if config.ExitAfterDirectReads {
go func() {
gtmCtx.DirectReadWg.Wait()
gtmCtx.Stop()
close(gtmCtx.OpC)
for op := range gtmCtx.OpC {
if err = processOp(config, mongo, bulk, elasticClient, op, fileC); err != nil {
processErr(err, config)
}
}
close(fileC)
fileWg.Wait()
doneC <- 30
}()
}
}
infoLog.Println("Entering event loop")
for {
select {
case timeout := <-doneC:
shutdown(timeout, hsc, bulk, bulkStats, mongo, config)
return
case <-timestampTicker.C:
if lastTimestamp > lastSavedTimestamp {
bulk.Flush()
if saveTimestamp(mongo, lastTimestamp, config); err == nil {
lastSavedTimestamp = lastTimestamp
} else {
processErr(err, config)
}
}
case <-heartBeat.C:
if config.ClusterName == "" {
break
}
if enabled {
enabled, err = ensureEnabled(mongo, config)
if !enabled {
infoLog.Printf("Pausing work for cluster %s", config.ClusterName)
gtmCtx.Pause()
bulk.Stop()
}
} else {
enabled, err = enableProcess(mongo, config)
if enabled {
infoLog.Printf("Resuming work for cluster %s", config.ClusterName)
bulk.Start(context.Background())
resumeWork(gtmCtx, mongo, config)
}
}
if err != nil {
processErr(err, config)
}
case <-printStats.C:
if !enabled {
break
}
if config.IndexStats {
if err := doIndexStats(config, bulkStats, bulk.Stats()); err != nil {
errorLog.Printf("Error indexing statistics: %s", err)
}
} else {
stats, err := json.Marshal(bulk.Stats())
if err != nil {
errorLog.Printf("Unable to log statistics: %s", err)
} else {
statsLog.Println(string(stats))
}
}
case err = <-gtmCtx.ErrC:
processErr(err, config)
case op := <-fileDoneC:
ingest := op.Data["file"] != nil
if err = doIndex(config, mongo, bulk, elasticClient, op, ingest); err != nil {
processErr(err, config)
}
case op := <-gtmCtx.OpC:
if !enabled || op == nil {
break
}
if op.IsSourceOplog() {
lastTimestamp = op.Timestamp
}
if err = processOp(config, mongo, bulk, elasticClient, op, fileC); err != nil {
processErr(err, config)
}
}
}
}
|
package rados
// #cgo LDFLAGS: -lrados
// #include <errno.h>
// #include <stdlib.h>
// #include <rados/librados.h>
import "C"
import "unsafe"
import "time"
// PoolStat represents Ceph pool statistics.
type PoolStat struct {
// space used in bytes
Num_bytes uint64
// space used in KB
Num_kb uint64
// number of objects in the pool
Num_objects uint64
// number of clones of objects
Num_object_clones uint64
// num_objects * num_replicas
Num_object_copies uint64
Num_objects_missing_on_primary uint64
// number of objects found on no OSDs
Num_objects_unfound uint64
// number of objects replicated fewer times than they should be
// (but found on at least one OSD)
Num_objects_degraded uint64
Num_rd uint64
Num_rd_kb uint64
Num_wr uint64
Num_wr_kb uint64
}
// ObjectStat represents an object stat information
type ObjectStat struct {
// current length in bytes
Size uint64
// last modification time
ModTime time.Time
}
// IOContext represents a context for performing I/O within a pool.
type IOContext struct {
ioctx C.rados_ioctx_t
}
// Pointer returns a uintptr representation of the IOContext.
func (ioctx *IOContext) Pointer() uintptr {
return uintptr(ioctx.ioctx)
}
// Write writes len(data) bytes to the object with key oid starting at byte
// offset offset. It returns an error, if any.
func (ioctx *IOContext) Write(oid string, data []byte, offset uint64) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
ret := C.rados_write(ioctx.ioctx, c_oid,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)),
(C.uint64_t)(offset))
return GetRadosError(ret)
}
// WriteFull writes len(data) bytes to the object with key oid.
// The object is filled with the provided data. If the object exists,
// it is atomically truncated and then written. It returns an error, if any.
func (ioctx *IOContext) WriteFull(oid string, data []byte) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
ret := C.rados_write_full(ioctx.ioctx, c_oid,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)))
return GetRadosError(ret)
}
// Append appends len(data) bytes to the object with key oid.
// The object is appended with the provided data. If the object exists,
// it is atomically appended to. It returns an error, if any.
func (ioctx *IOContext) Append(oid string, data []byte) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
ret := C.rados_append(ioctx.ioctx, c_oid,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)))
return GetRadosError(ret)
}
// Read reads up to len(data) bytes from the object with key oid starting at byte
// offset offset. It returns the number of bytes read and an error, if any.
func (ioctx *IOContext) Read(oid string, data []byte, offset uint64) (int, error) {
if len(data) == 0 {
return 0, nil
}
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
ret := C.rados_read(
ioctx.ioctx,
c_oid,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)),
(C.uint64_t)(offset))
if ret >= 0 {
return int(ret), nil
} else {
return 0, GetRadosError(ret)
}
}
// Delete deletes the object with key oid. It returns an error, if any.
func (ioctx *IOContext) Delete(oid string) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
return GetRadosError(C.rados_remove(ioctx.ioctx, c_oid))
}
// Truncate resizes the object with key oid to size size. If the operation
// enlarges the object, the new area is logically filled with zeroes. If the
// operation shrinks the object, the excess data is removed. It returns an
// error, if any.
func (ioctx *IOContext) Truncate(oid string, size uint64) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
return GetRadosError(C.rados_trunc(ioctx.ioctx, c_oid, (C.uint64_t)(size)))
}
// Destroy informs librados that the I/O context is no longer in use.
// Resources associated with the context may not be freed immediately, and the
// context should not be used again after calling this method.
func (ioctx *IOContext) Destroy() {
C.rados_ioctx_destroy(ioctx.ioctx)
}
// Stat returns a set of statistics about the pool associated with this I/O
// context.
func (ioctx *IOContext) GetPoolStats() (stat PoolStat, err error) {
c_stat := C.struct_rados_pool_stat_t{}
ret := C.rados_ioctx_pool_stat(ioctx.ioctx, &c_stat)
if ret < 0 {
return PoolStat{}, RadosError(int(ret))
} else {
return PoolStat{
Num_bytes: uint64(c_stat.num_bytes),
Num_kb: uint64(c_stat.num_kb),
Num_objects: uint64(c_stat.num_objects),
Num_object_clones: uint64(c_stat.num_object_clones),
Num_object_copies: uint64(c_stat.num_object_copies),
Num_objects_missing_on_primary: uint64(c_stat.num_objects_missing_on_primary),
Num_objects_unfound: uint64(c_stat.num_objects_unfound),
Num_objects_degraded: uint64(c_stat.num_objects_degraded),
Num_rd: uint64(c_stat.num_rd),
Num_rd_kb: uint64(c_stat.num_rd_kb),
Num_wr: uint64(c_stat.num_wr),
Num_wr_kb: uint64(c_stat.num_wr_kb),
}, nil
}
}
// GetPoolName returns the name of the pool associated with the I/O context.
func (ioctx *IOContext) GetPoolName() (name string, err error) {
buf := make([]byte, 128)
for {
ret := C.rados_ioctx_get_pool_name(ioctx.ioctx,
(*C.char)(unsafe.Pointer(&buf[0])), C.unsigned(len(buf)))
if ret == -34 { // FIXME
buf = make([]byte, len(buf)*2)
continue
} else if ret < 0 {
return "", RadosError(ret)
}
name = C.GoStringN((*C.char)(unsafe.Pointer(&buf[0])), ret)
return name, nil
}
}
// ObjectListFunc is the type of the function called for each object visited
// by ListObjects.
type ObjectListFunc func(oid string)
// ListObjects lists all of the objects in the pool associated with the I/O
// context, and called the provided listFn function for each object, passing
// to the function the name of the object.
func (ioctx *IOContext) ListObjects(listFn ObjectListFunc) error {
var ctx C.rados_list_ctx_t
ret := C.rados_objects_list_open(ioctx.ioctx, &ctx)
if ret < 0 {
return RadosError(ret)
}
defer func() { C.rados_objects_list_close(ctx) }()
for {
var c_entry *C.char
ret := C.rados_objects_list_next(ctx, &c_entry, nil)
if ret == -2 { // FIXME
return nil
} else if ret < 0 {
return RadosError(ret)
}
listFn(C.GoString(c_entry))
}
panic("invalid state")
}
// Stat returns the size of the object and its last modification time
func (ioctx *IOContext) Stat(object string) (stat ObjectStat, err error) {
var c_psize C.uint64_t
var c_pmtime C.time_t
c_object := C.CString(object)
defer C.free(unsafe.Pointer(c_object))
ret := C.rados_stat(
ioctx.ioctx,
c_object,
&c_psize,
&c_pmtime)
if ret < 0 {
return ObjectStat{}, RadosError(int(ret))
} else {
return ObjectStat{
Size: uint64(c_psize),
ModTime: time.Unix(int64(c_pmtime), 0),
}, nil
}
}
// GetXattr gets an xattr with key `name`, it returns the length of
// the key read or an error if not successful
func (ioctx *IOContext) GetXattr(object string, name string, data []byte) (int, error) {
c_object := C.CString(object)
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_object))
defer C.free(unsafe.Pointer(c_name))
ret := C.rados_getxattr(
ioctx.ioctx,
c_object,
c_name,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)))
if ret >= 0 {
return int(ret), nil
} else {
return 0, GetRadosError(ret)
}
}
// Sets an xattr for an object with key `name` with value as `data`
func (ioctx *IOContext) SetXattr(object string, name string, data []byte) error {
c_object := C.CString(object)
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_object))
defer C.free(unsafe.Pointer(c_name))
ret := C.rados_setxattr(
ioctx.ioctx,
c_object,
c_name,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)))
return GetRadosError(ret)
}
// function that lists all the xattrs for an object, since xattrs are
// a k-v pair, this function returns a map of k-v pairs on
// success, error code on failure
func (ioctx *IOContext) ListXattrs(oid string) (map[string][]byte, error) {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
var it C.rados_xattrs_iter_t
ret := C.rados_getxattrs(ioctx.ioctx, c_oid, &it)
if ret < 0 {
return nil, GetRadosError(ret)
}
defer func() { C.rados_getxattrs_end(it) }()
m := make(map[string][]byte)
for {
var c_name, c_val *C.char
var c_len C.size_t
defer C.free(unsafe.Pointer(c_name))
defer C.free(unsafe.Pointer(c_val))
ret := C.rados_getxattrs_next(it, &c_name, &c_val, &c_len)
if ret < 0 {
return nil, GetRadosError(ret)
}
// rados api returns a null name,val & 0-length upon
// end of iteration
if c_name == nil {
return m, nil // stop iteration
}
m[C.GoString(c_name)] = C.GoBytes(unsafe.Pointer(c_val), (C.int)(c_len))
}
}
// Remove an xattr with key `name` from object `oid`
func (ioctx *IOContext) RmXattr(oid string, name string) error {
c_oid := C.CString(oid)
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_oid))
defer C.free(unsafe.Pointer(c_name))
ret := C.rados_rmxattr(
ioctx.ioctx,
c_oid,
c_name)
return GetRadosError(ret)
}
// Append the map `pairs` to the omap `oid`
func (ioctx *IOContext) SetOmap(oid string, pairs map[string][]byte) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
var s C.size_t
var c *C.char
ptrSize := unsafe.Sizeof(c)
c_keys := C.malloc(C.size_t(len(pairs)) * C.size_t(ptrSize))
c_values := C.malloc(C.size_t(len(pairs)) * C.size_t(ptrSize))
c_lengths := C.malloc(C.size_t(len(pairs)) * C.size_t(unsafe.Sizeof(s)))
defer C.free(unsafe.Pointer(c_keys))
defer C.free(unsafe.Pointer(c_values))
defer C.free(unsafe.Pointer(c_lengths))
i := 0
for key, value := range pairs {
// key
c_key_ptr := (**C.char)(unsafe.Pointer(uintptr(c_keys) + uintptr(i)*ptrSize))
*c_key_ptr = C.CString(key)
defer C.free(unsafe.Pointer(*c_key_ptr))
// value and its length
c_value_ptr := (**C.char)(unsafe.Pointer(uintptr(c_values) + uintptr(i)*ptrSize))
var c_length C.size_t
if len(value) > 0 {
*c_value_ptr = (*C.char)(unsafe.Pointer(&value[0]))
c_length = C.size_t(len(value))
} else {
*c_value_ptr = nil
c_length = C.size_t(0)
}
c_length_ptr := (*C.size_t)(unsafe.Pointer(uintptr(c_lengths) + uintptr(i)*ptrSize))
*c_length_ptr = c_length
i++
}
op := C.rados_create_write_op()
C.rados_write_op_omap_set(
op,
(**C.char)(c_keys),
(**C.char)(c_values),
(*C.size_t)(c_lengths),
C.size_t(len(pairs)))
ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0)
C.rados_release_write_op(op)
return GetRadosError(ret)
}
// OmapListFunc is the type of the function called for each omap key
// visited by ListOmapValues
type OmapListFunc func(key string, value []byte)
// Iterate on a set of keys and their values from an omap
// `startAfter`: iterate only on the keys after this specified one
// `filterPrefix`: iterate only on the keys beginning with this prefix
// `maxReturn`: iterate no more than `maxReturn` key/value pairs
// `listFn`: the function called at each iteration
func (ioctx *IOContext) ListOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64, listFn OmapListFunc) error {
c_oid := C.CString(oid)
c_start_after := C.CString(startAfter)
c_filter_prefix := C.CString(filterPrefix)
c_max_return := C.uint64_t(maxReturn)
defer C.free(unsafe.Pointer(c_oid))
defer C.free(unsafe.Pointer(c_start_after))
defer C.free(unsafe.Pointer(c_filter_prefix))
op := C.rados_create_read_op()
var c_iter C.rados_omap_iter_t
var c_prval C.int
C.rados_read_op_omap_get_vals(
op,
c_start_after,
c_filter_prefix,
c_max_return,
&c_iter,
&c_prval,
)
ret := C.rados_read_op_operate(op, ioctx.ioctx, c_oid, 0)
if int(c_prval) != 0 {
return RadosError(int(c_prval))
} else if int(ret) != 0 {
return GetRadosError(ret)
}
for {
var c_key *C.char
var c_val *C.char
var c_len C.size_t
ret = C.rados_omap_get_next(c_iter, &c_key, &c_val, &c_len)
if int(ret) != 0 {
return GetRadosError(ret)
}
if c_key == nil {
break
}
listFn(C.GoString(c_key), C.GoBytes(unsafe.Pointer(c_val), C.int(c_len)))
}
C.rados_omap_get_end(c_iter)
C.rados_release_read_op(op)
return nil
}
// Fetch a set of keys and their values from an omap and returns then as a map
// `startAfter`: retrieve only the keys after this specified one
// `filterPrefix`: retrieve only the keys beginning with this prefix
// `maxReturn`: retrieve no more than `maxReturn` key/value pairs
func (ioctx *IOContext) GetOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64) (map[string][]byte, error) {
omap := map[string][]byte{}
err := ioctx.ListOmapValues(
oid, startAfter, filterPrefix, maxReturn,
func(key string, value []byte) {
omap[key] = value
},
)
return omap, err
}
// Fetch all the keys and their values from an omap and returns then as a map
// `startAfter`: retrieve only the keys after this specified one
// `filterPrefix`: retrieve only the keys beginning with this prefix
// `iteratorSize`: internal number of keys to fetch during a read operation
func (ioctx *IOContext) GetAllOmapValues(oid string, startAfter string, filterPrefix string, iteratorSize int64) (map[string][]byte, error) {
omap := map[string][]byte{}
omapSize := 0
for {
err := ioctx.ListOmapValues(
oid, startAfter, filterPrefix, iteratorSize,
func(key string, value []byte) {
omap[key] = value
startAfter = key
},
)
if err != nil {
return omap, err
}
// End of omap
if len(omap) == omapSize {
break
}
omapSize = len(omap)
}
return omap, nil
}
// Remove the specified `keys` from the omap `oid`
func (ioctx *IOContext) RmOmapKeys(oid string, keys []string) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
var c *C.char
ptrSize := unsafe.Sizeof(c)
c_keys := C.malloc(C.size_t(len(keys)) * C.size_t(ptrSize))
defer C.free(unsafe.Pointer(c_keys))
i := 0
for _, key := range keys {
c_key_ptr := (**C.char)(unsafe.Pointer(uintptr(c_keys) + uintptr(i)*ptrSize))
*c_key_ptr = C.CString(key)
defer C.free(unsafe.Pointer(*c_key_ptr))
i++
}
op := C.rados_create_write_op()
C.rados_write_op_omap_rm_keys(
op,
(**C.char)(c_keys),
C.size_t(len(keys)))
ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0)
C.rados_release_write_op(op)
return GetRadosError(ret)
}
// Clear the omap `oid`
func (ioctx *IOContext) CleanOmap(oid string) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
op := C.rados_create_write_op()
C.rados_write_op_omap_clear(op)
ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0)
C.rados_release_write_op(op)
return GetRadosError(ret)
}
type Iter struct {
ctx C.rados_list_ctx_t
err error
entry string
}
type IterToken uint32
// Return a Iterator object that can be used to list the object names in the current pool
func (ioctx *IOContext) Iter() (*Iter, error) {
iter := Iter{}
if cerr := C.rados_objects_list_open(ioctx.ioctx, &iter.ctx); cerr < 0 {
return nil, GetRadosError(cerr)
}
return &iter, nil
}
// Returns a token marking the current position of the iterator. To be used in combination with Iter.Seek()
func (iter *Iter) Token() IterToken {
return IterToken(C.rados_objects_list_get_pg_hash_position(iter.ctx))
}
func (iter *Iter) Seek(token IterToken) {
C.rados_objects_list_seek(iter.ctx, C.uint32_t(token))
}
// Next retrieves the next object name in the pool/namespace iterator.
// Upon a successful invocation (return value of true), the Value method should
// be used to obtain the name of the retrieved object name. When the iterator is
// exhausted, Next returns false. The Err method should used to verify whether the
// end of the iterator was reached, or the iterator received an error.
//
// Example:
// iter := pool.Iter()
// defer iter.Close()
// for iter.Next() {
// fmt.Printf("%v\n", iter.Value())
// }
// return iter.Err()
//
func (iter *Iter) Next() bool {
centry := (*C.char)(C.calloc(1, 1024))
defer C.free(unsafe.Pointer(centry))
if cerr := C.rados_objects_list_next(iter.ctx, ¢ry, nil); cerr < 0 {
iter.err = GetRadosError(cerr)
return false
}
iter.entry = C.GoString(centry)
return true
}
// Returns the current value of the iterator (object name), after a successful call to Next.
func (iter *Iter) Value() string {
if iter.err != nil {
return ""
}
return iter.entry
}
// Checks whether the iterator has encountered an error.
func (iter *Iter) Err() error {
if iter.err == RadosErrorNotFound {
return nil
}
return iter.err
}
// Closes the iterator cursor on the server. Be aware that iterators are not closed automatically
// at the end of iteration.
func (iter *Iter) Close() {
C.rados_objects_list_close(iter.ctx)
}
rados: Fix iterator memory allocation; allocating a c_entry is not necessary and performed directly by the library.
package rados
// #cgo LDFLAGS: -lrados
// #include <errno.h>
// #include <stdlib.h>
// #include <rados/librados.h>
import "C"
import "unsafe"
import "time"
// PoolStat represents Ceph pool statistics.
type PoolStat struct {
// space used in bytes
Num_bytes uint64
// space used in KB
Num_kb uint64
// number of objects in the pool
Num_objects uint64
// number of clones of objects
Num_object_clones uint64
// num_objects * num_replicas
Num_object_copies uint64
Num_objects_missing_on_primary uint64
// number of objects found on no OSDs
Num_objects_unfound uint64
// number of objects replicated fewer times than they should be
// (but found on at least one OSD)
Num_objects_degraded uint64
Num_rd uint64
Num_rd_kb uint64
Num_wr uint64
Num_wr_kb uint64
}
// ObjectStat represents an object stat information
type ObjectStat struct {
// current length in bytes
Size uint64
// last modification time
ModTime time.Time
}
// IOContext represents a context for performing I/O within a pool.
type IOContext struct {
ioctx C.rados_ioctx_t
}
// Pointer returns a uintptr representation of the IOContext.
func (ioctx *IOContext) Pointer() uintptr {
return uintptr(ioctx.ioctx)
}
// Write writes len(data) bytes to the object with key oid starting at byte
// offset offset. It returns an error, if any.
func (ioctx *IOContext) Write(oid string, data []byte, offset uint64) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
ret := C.rados_write(ioctx.ioctx, c_oid,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)),
(C.uint64_t)(offset))
return GetRadosError(ret)
}
// WriteFull writes len(data) bytes to the object with key oid.
// The object is filled with the provided data. If the object exists,
// it is atomically truncated and then written. It returns an error, if any.
func (ioctx *IOContext) WriteFull(oid string, data []byte) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
ret := C.rados_write_full(ioctx.ioctx, c_oid,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)))
return GetRadosError(ret)
}
// Append appends len(data) bytes to the object with key oid.
// The object is appended with the provided data. If the object exists,
// it is atomically appended to. It returns an error, if any.
func (ioctx *IOContext) Append(oid string, data []byte) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
ret := C.rados_append(ioctx.ioctx, c_oid,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)))
return GetRadosError(ret)
}
// Read reads up to len(data) bytes from the object with key oid starting at byte
// offset offset. It returns the number of bytes read and an error, if any.
func (ioctx *IOContext) Read(oid string, data []byte, offset uint64) (int, error) {
if len(data) == 0 {
return 0, nil
}
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
ret := C.rados_read(
ioctx.ioctx,
c_oid,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)),
(C.uint64_t)(offset))
if ret >= 0 {
return int(ret), nil
} else {
return 0, GetRadosError(ret)
}
}
// Delete deletes the object with key oid. It returns an error, if any.
func (ioctx *IOContext) Delete(oid string) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
return GetRadosError(C.rados_remove(ioctx.ioctx, c_oid))
}
// Truncate resizes the object with key oid to size size. If the operation
// enlarges the object, the new area is logically filled with zeroes. If the
// operation shrinks the object, the excess data is removed. It returns an
// error, if any.
func (ioctx *IOContext) Truncate(oid string, size uint64) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
return GetRadosError(C.rados_trunc(ioctx.ioctx, c_oid, (C.uint64_t)(size)))
}
// Destroy informs librados that the I/O context is no longer in use.
// Resources associated with the context may not be freed immediately, and the
// context should not be used again after calling this method.
func (ioctx *IOContext) Destroy() {
C.rados_ioctx_destroy(ioctx.ioctx)
}
// Stat returns a set of statistics about the pool associated with this I/O
// context.
func (ioctx *IOContext) GetPoolStats() (stat PoolStat, err error) {
c_stat := C.struct_rados_pool_stat_t{}
ret := C.rados_ioctx_pool_stat(ioctx.ioctx, &c_stat)
if ret < 0 {
return PoolStat{}, RadosError(int(ret))
} else {
return PoolStat{
Num_bytes: uint64(c_stat.num_bytes),
Num_kb: uint64(c_stat.num_kb),
Num_objects: uint64(c_stat.num_objects),
Num_object_clones: uint64(c_stat.num_object_clones),
Num_object_copies: uint64(c_stat.num_object_copies),
Num_objects_missing_on_primary: uint64(c_stat.num_objects_missing_on_primary),
Num_objects_unfound: uint64(c_stat.num_objects_unfound),
Num_objects_degraded: uint64(c_stat.num_objects_degraded),
Num_rd: uint64(c_stat.num_rd),
Num_rd_kb: uint64(c_stat.num_rd_kb),
Num_wr: uint64(c_stat.num_wr),
Num_wr_kb: uint64(c_stat.num_wr_kb),
}, nil
}
}
// GetPoolName returns the name of the pool associated with the I/O context.
func (ioctx *IOContext) GetPoolName() (name string, err error) {
buf := make([]byte, 128)
for {
ret := C.rados_ioctx_get_pool_name(ioctx.ioctx,
(*C.char)(unsafe.Pointer(&buf[0])), C.unsigned(len(buf)))
if ret == -34 { // FIXME
buf = make([]byte, len(buf)*2)
continue
} else if ret < 0 {
return "", RadosError(ret)
}
name = C.GoStringN((*C.char)(unsafe.Pointer(&buf[0])), ret)
return name, nil
}
}
// ObjectListFunc is the type of the function called for each object visited
// by ListObjects.
type ObjectListFunc func(oid string)
// ListObjects lists all of the objects in the pool associated with the I/O
// context, and called the provided listFn function for each object, passing
// to the function the name of the object.
func (ioctx *IOContext) ListObjects(listFn ObjectListFunc) error {
var ctx C.rados_list_ctx_t
ret := C.rados_objects_list_open(ioctx.ioctx, &ctx)
if ret < 0 {
return RadosError(ret)
}
defer func() { C.rados_objects_list_close(ctx) }()
for {
var c_entry *C.char
ret := C.rados_objects_list_next(ctx, &c_entry, nil)
if ret == -2 { // FIXME
return nil
} else if ret < 0 {
return RadosError(ret)
}
listFn(C.GoString(c_entry))
}
panic("invalid state")
}
// Stat returns the size of the object and its last modification time
func (ioctx *IOContext) Stat(object string) (stat ObjectStat, err error) {
var c_psize C.uint64_t
var c_pmtime C.time_t
c_object := C.CString(object)
defer C.free(unsafe.Pointer(c_object))
ret := C.rados_stat(
ioctx.ioctx,
c_object,
&c_psize,
&c_pmtime)
if ret < 0 {
return ObjectStat{}, RadosError(int(ret))
} else {
return ObjectStat{
Size: uint64(c_psize),
ModTime: time.Unix(int64(c_pmtime), 0),
}, nil
}
}
// GetXattr gets an xattr with key `name`, it returns the length of
// the key read or an error if not successful
func (ioctx *IOContext) GetXattr(object string, name string, data []byte) (int, error) {
c_object := C.CString(object)
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_object))
defer C.free(unsafe.Pointer(c_name))
ret := C.rados_getxattr(
ioctx.ioctx,
c_object,
c_name,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)))
if ret >= 0 {
return int(ret), nil
} else {
return 0, GetRadosError(ret)
}
}
// Sets an xattr for an object with key `name` with value as `data`
func (ioctx *IOContext) SetXattr(object string, name string, data []byte) error {
c_object := C.CString(object)
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_object))
defer C.free(unsafe.Pointer(c_name))
ret := C.rados_setxattr(
ioctx.ioctx,
c_object,
c_name,
(*C.char)(unsafe.Pointer(&data[0])),
(C.size_t)(len(data)))
return GetRadosError(ret)
}
// function that lists all the xattrs for an object, since xattrs are
// a k-v pair, this function returns a map of k-v pairs on
// success, error code on failure
func (ioctx *IOContext) ListXattrs(oid string) (map[string][]byte, error) {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
var it C.rados_xattrs_iter_t
ret := C.rados_getxattrs(ioctx.ioctx, c_oid, &it)
if ret < 0 {
return nil, GetRadosError(ret)
}
defer func() { C.rados_getxattrs_end(it) }()
m := make(map[string][]byte)
for {
var c_name, c_val *C.char
var c_len C.size_t
defer C.free(unsafe.Pointer(c_name))
defer C.free(unsafe.Pointer(c_val))
ret := C.rados_getxattrs_next(it, &c_name, &c_val, &c_len)
if ret < 0 {
return nil, GetRadosError(ret)
}
// rados api returns a null name,val & 0-length upon
// end of iteration
if c_name == nil {
return m, nil // stop iteration
}
m[C.GoString(c_name)] = C.GoBytes(unsafe.Pointer(c_val), (C.int)(c_len))
}
}
// Remove an xattr with key `name` from object `oid`
func (ioctx *IOContext) RmXattr(oid string, name string) error {
c_oid := C.CString(oid)
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_oid))
defer C.free(unsafe.Pointer(c_name))
ret := C.rados_rmxattr(
ioctx.ioctx,
c_oid,
c_name)
return GetRadosError(ret)
}
// Append the map `pairs` to the omap `oid`
func (ioctx *IOContext) SetOmap(oid string, pairs map[string][]byte) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
var s C.size_t
var c *C.char
ptrSize := unsafe.Sizeof(c)
c_keys := C.malloc(C.size_t(len(pairs)) * C.size_t(ptrSize))
c_values := C.malloc(C.size_t(len(pairs)) * C.size_t(ptrSize))
c_lengths := C.malloc(C.size_t(len(pairs)) * C.size_t(unsafe.Sizeof(s)))
defer C.free(unsafe.Pointer(c_keys))
defer C.free(unsafe.Pointer(c_values))
defer C.free(unsafe.Pointer(c_lengths))
i := 0
for key, value := range pairs {
// key
c_key_ptr := (**C.char)(unsafe.Pointer(uintptr(c_keys) + uintptr(i)*ptrSize))
*c_key_ptr = C.CString(key)
defer C.free(unsafe.Pointer(*c_key_ptr))
// value and its length
c_value_ptr := (**C.char)(unsafe.Pointer(uintptr(c_values) + uintptr(i)*ptrSize))
var c_length C.size_t
if len(value) > 0 {
*c_value_ptr = (*C.char)(unsafe.Pointer(&value[0]))
c_length = C.size_t(len(value))
} else {
*c_value_ptr = nil
c_length = C.size_t(0)
}
c_length_ptr := (*C.size_t)(unsafe.Pointer(uintptr(c_lengths) + uintptr(i)*ptrSize))
*c_length_ptr = c_length
i++
}
op := C.rados_create_write_op()
C.rados_write_op_omap_set(
op,
(**C.char)(c_keys),
(**C.char)(c_values),
(*C.size_t)(c_lengths),
C.size_t(len(pairs)))
ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0)
C.rados_release_write_op(op)
return GetRadosError(ret)
}
// OmapListFunc is the type of the function called for each omap key
// visited by ListOmapValues
type OmapListFunc func(key string, value []byte)
// Iterate on a set of keys and their values from an omap
// `startAfter`: iterate only on the keys after this specified one
// `filterPrefix`: iterate only on the keys beginning with this prefix
// `maxReturn`: iterate no more than `maxReturn` key/value pairs
// `listFn`: the function called at each iteration
func (ioctx *IOContext) ListOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64, listFn OmapListFunc) error {
c_oid := C.CString(oid)
c_start_after := C.CString(startAfter)
c_filter_prefix := C.CString(filterPrefix)
c_max_return := C.uint64_t(maxReturn)
defer C.free(unsafe.Pointer(c_oid))
defer C.free(unsafe.Pointer(c_start_after))
defer C.free(unsafe.Pointer(c_filter_prefix))
op := C.rados_create_read_op()
var c_iter C.rados_omap_iter_t
var c_prval C.int
C.rados_read_op_omap_get_vals(
op,
c_start_after,
c_filter_prefix,
c_max_return,
&c_iter,
&c_prval,
)
ret := C.rados_read_op_operate(op, ioctx.ioctx, c_oid, 0)
if int(c_prval) != 0 {
return RadosError(int(c_prval))
} else if int(ret) != 0 {
return GetRadosError(ret)
}
for {
var c_key *C.char
var c_val *C.char
var c_len C.size_t
ret = C.rados_omap_get_next(c_iter, &c_key, &c_val, &c_len)
if int(ret) != 0 {
return GetRadosError(ret)
}
if c_key == nil {
break
}
listFn(C.GoString(c_key), C.GoBytes(unsafe.Pointer(c_val), C.int(c_len)))
}
C.rados_omap_get_end(c_iter)
C.rados_release_read_op(op)
return nil
}
// Fetch a set of keys and their values from an omap and returns then as a map
// `startAfter`: retrieve only the keys after this specified one
// `filterPrefix`: retrieve only the keys beginning with this prefix
// `maxReturn`: retrieve no more than `maxReturn` key/value pairs
func (ioctx *IOContext) GetOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64) (map[string][]byte, error) {
omap := map[string][]byte{}
err := ioctx.ListOmapValues(
oid, startAfter, filterPrefix, maxReturn,
func(key string, value []byte) {
omap[key] = value
},
)
return omap, err
}
// Fetch all the keys and their values from an omap and returns then as a map
// `startAfter`: retrieve only the keys after this specified one
// `filterPrefix`: retrieve only the keys beginning with this prefix
// `iteratorSize`: internal number of keys to fetch during a read operation
func (ioctx *IOContext) GetAllOmapValues(oid string, startAfter string, filterPrefix string, iteratorSize int64) (map[string][]byte, error) {
omap := map[string][]byte{}
omapSize := 0
for {
err := ioctx.ListOmapValues(
oid, startAfter, filterPrefix, iteratorSize,
func(key string, value []byte) {
omap[key] = value
startAfter = key
},
)
if err != nil {
return omap, err
}
// End of omap
if len(omap) == omapSize {
break
}
omapSize = len(omap)
}
return omap, nil
}
// Remove the specified `keys` from the omap `oid`
func (ioctx *IOContext) RmOmapKeys(oid string, keys []string) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
var c *C.char
ptrSize := unsafe.Sizeof(c)
c_keys := C.malloc(C.size_t(len(keys)) * C.size_t(ptrSize))
defer C.free(unsafe.Pointer(c_keys))
i := 0
for _, key := range keys {
c_key_ptr := (**C.char)(unsafe.Pointer(uintptr(c_keys) + uintptr(i)*ptrSize))
*c_key_ptr = C.CString(key)
defer C.free(unsafe.Pointer(*c_key_ptr))
i++
}
op := C.rados_create_write_op()
C.rados_write_op_omap_rm_keys(
op,
(**C.char)(c_keys),
C.size_t(len(keys)))
ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0)
C.rados_release_write_op(op)
return GetRadosError(ret)
}
// Clear the omap `oid`
func (ioctx *IOContext) CleanOmap(oid string) error {
c_oid := C.CString(oid)
defer C.free(unsafe.Pointer(c_oid))
op := C.rados_create_write_op()
C.rados_write_op_omap_clear(op)
ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0)
C.rados_release_write_op(op)
return GetRadosError(ret)
}
type Iter struct {
ctx C.rados_list_ctx_t
err error
entry string
}
type IterToken uint32
// Return a Iterator object that can be used to list the object names in the current pool
func (ioctx *IOContext) Iter() (*Iter, error) {
iter := Iter{}
if cerr := C.rados_objects_list_open(ioctx.ioctx, &iter.ctx); cerr < 0 {
return nil, GetRadosError(cerr)
}
return &iter, nil
}
// Returns a token marking the current position of the iterator. To be used in combination with Iter.Seek()
func (iter *Iter) Token() IterToken {
return IterToken(C.rados_objects_list_get_pg_hash_position(iter.ctx))
}
func (iter *Iter) Seek(token IterToken) {
C.rados_objects_list_seek(iter.ctx, C.uint32_t(token))
}
// Next retrieves the next object name in the pool/namespace iterator.
// Upon a successful invocation (return value of true), the Value method should
// be used to obtain the name of the retrieved object name. When the iterator is
// exhausted, Next returns false. The Err method should used to verify whether the
// end of the iterator was reached, or the iterator received an error.
//
// Example:
// iter := pool.Iter()
// defer iter.Close()
// for iter.Next() {
// fmt.Printf("%v\n", iter.Value())
// }
// return iter.Err()
//
func (iter *Iter) Next() bool {
var c_entry *C.char
if cerr := C.rados_objects_list_next(iter.ctx, &c_entry, nil); cerr < 0 {
iter.err = GetRadosError(cerr)
return false
}
iter.entry = C.GoString(c_entry)
return true
}
// Returns the current value of the iterator (object name), after a successful call to Next.
func (iter *Iter) Value() string {
if iter.err != nil {
return ""
}
return iter.entry
}
// Checks whether the iterator has encountered an error.
func (iter *Iter) Err() error {
if iter.err == RadosErrorNotFound {
return nil
}
return iter.err
}
// Closes the iterator cursor on the server. Be aware that iterators are not closed automatically
// at the end of iteration.
func (iter *Iter) Close() {
C.rados_objects_list_close(iter.ctx)
}
|
package messenger
import (
"fmt"
"math/rand"
"net"
"net/http"
"net/http/httptest"
"strconv"
"sync"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/mesos/mesos-go/messenger/testmessage"
"github.com/mesos/mesos-go/upid"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
)
var (
startPort = 10000 + rand.Intn(30000)
globalWG = new(sync.WaitGroup)
)
func noopHandler(*upid.UPID, proto.Message) {
globalWG.Done()
}
func getNewPort() int {
startPort++
return startPort
}
func shuffleMessages(queue *[]proto.Message) {
for i := range *queue {
index := rand.Intn(i + 1)
(*queue)[i], (*queue)[index] = (*queue)[index], (*queue)[i]
}
}
func generateSmallMessages(n int) []proto.Message {
queue := make([]proto.Message, n)
for i := range queue {
queue[i] = testmessage.GenerateSmallMessage()
}
return queue
}
func generateMediumMessages(n int) []proto.Message {
queue := make([]proto.Message, n)
for i := range queue {
queue[i] = testmessage.GenerateMediumMessage()
}
return queue
}
func generateBigMessages(n int) []proto.Message {
queue := make([]proto.Message, n)
for i := range queue {
queue[i] = testmessage.GenerateBigMessage()
}
return queue
}
func generateLargeMessages(n int) []proto.Message {
queue := make([]proto.Message, n)
for i := range queue {
queue[i] = testmessage.GenerateLargeMessage()
}
return queue
}
func generateMixedMessages(n int) []proto.Message {
queue := make([]proto.Message, n*4)
for i := 0; i < n*4; i = i + 4 {
queue[i] = testmessage.GenerateSmallMessage()
queue[i+1] = testmessage.GenerateMediumMessage()
queue[i+2] = testmessage.GenerateBigMessage()
queue[i+3] = testmessage.GenerateLargeMessage()
}
shuffleMessages(&queue)
return queue
}
func installMessages(t *testing.T, m Messenger, queue *[]proto.Message, counts *[]int, done chan struct{}) {
testCounts := func(counts []int, done chan struct{}) {
for i := range counts {
if counts[i] != cap(*queue)/4 {
return
}
}
close(done)
}
hander1 := func(from *upid.UPID, pbMsg proto.Message) {
(*queue) = append(*queue, pbMsg)
(*counts)[0]++
testCounts(*counts, done)
}
hander2 := func(from *upid.UPID, pbMsg proto.Message) {
(*queue) = append(*queue, pbMsg)
(*counts)[1]++
testCounts(*counts, done)
}
hander3 := func(from *upid.UPID, pbMsg proto.Message) {
(*queue) = append(*queue, pbMsg)
(*counts)[2]++
testCounts(*counts, done)
}
hander4 := func(from *upid.UPID, pbMsg proto.Message) {
(*queue) = append(*queue, pbMsg)
(*counts)[3]++
testCounts(*counts, done)
}
assert.NoError(t, m.Install(hander1, &testmessage.SmallMessage{}))
assert.NoError(t, m.Install(hander2, &testmessage.MediumMessage{}))
assert.NoError(t, m.Install(hander3, &testmessage.BigMessage{}))
assert.NoError(t, m.Install(hander4, &testmessage.LargeMessage{}))
}
func runTestServer(b *testing.B, wg *sync.WaitGroup) *httptest.Server {
mux := http.NewServeMux()
mux.HandleFunc("/testserver/mesos.internal.SmallMessage", func(http.ResponseWriter, *http.Request) {
wg.Done()
})
mux.HandleFunc("/testserver/mesos.internal.MediumMessage", func(http.ResponseWriter, *http.Request) {
wg.Done()
})
mux.HandleFunc("/testserver/mesos.internal.BigMessage", func(http.ResponseWriter, *http.Request) {
wg.Done()
})
mux.HandleFunc("/testserver/mesos.internal.LargeMessage", func(http.ResponseWriter, *http.Request) {
wg.Done()
})
return httptest.NewServer(mux)
}
func TestMessengerFailToInstall(t *testing.T) {
m := NewHttp(&upid.UPID{ID: "mesos"})
handler := func(from *upid.UPID, pbMsg proto.Message) {}
assert.NotNil(t, m)
assert.NoError(t, m.Install(handler, &testmessage.SmallMessage{}))
assert.Error(t, m.Install(handler, &testmessage.SmallMessage{}))
}
func TestMessengerFailToStart(t *testing.T) {
port := strconv.Itoa(getNewPort())
m1 := NewHttp(&upid.UPID{ID: "mesos", Host: "localhost", Port: port})
m2 := NewHttp(&upid.UPID{ID: "mesos", Host: "localhost", Port: port})
assert.NoError(t, m1.Start())
assert.Error(t, m2.Start())
}
func TestMessengerFailToSend(t *testing.T) {
upid, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(t, err)
m := NewHttp(upid)
assert.NoError(t, m.Start())
assert.Error(t, m.Send(context.TODO(), upid, &testmessage.SmallMessage{}))
}
func TestMessenger(t *testing.T) {
messages := generateMixedMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(t, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(t, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
done := make(chan struct{})
counts := make([]int, 4)
msgQueue := make([]proto.Message, 0, len(messages))
installMessages(t, m2, &msgQueue, &counts, done)
assert.NoError(t, m1.Start())
assert.NoError(t, m2.Start())
go func() {
for _, msg := range messages {
assert.NoError(t, m1.Send(context.TODO(), upid2, msg))
}
}()
select {
case <-time.After(time.Second * 10):
t.Fatalf("Timeout")
case <-done:
}
for i := range counts {
assert.Equal(t, 1000, counts[i])
}
assert.Equal(t, messages, msgQueue)
}
func BenchmarkMessengerSendSmallMessage(b *testing.B) {
messages := generateSmallMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendMediumMessage(b *testing.B) {
messages := generateMediumMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendBigMessage(b *testing.B) {
messages := generateBigMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendLargeMessage(b *testing.B) {
messages := generateLargeMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendMixedMessage(b *testing.B) {
messages := generateMixedMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendRecvSmallMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateSmallMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.SmallMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func BenchmarkMessengerSendRecvMediumMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateMediumMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.MediumMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func BenchmarkMessengerSendRecvBigMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateBigMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.BigMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func BenchmarkMessengerSendRecvLargeMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateLargeMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.LargeMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func BenchmarkMessengerSendRecvMixedMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateMixedMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.SmallMessage{}))
assert.NoError(b, m2.Install(noopHandler, &testmessage.MediumMessage{}))
assert.NoError(b, m2.Install(noopHandler, &testmessage.BigMessage{}))
assert.NoError(b, m2.Install(noopHandler, &testmessage.LargeMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func TestUPIDBindingAddress(t *testing.T) {
tt := []struct {
hostname string
binding net.IP
expected string
}{
{"", nil, ""},
{"", net.IPv4(1, 2, 3, 4), "1.2.3.4"},
{"", net.IPv4(0, 0, 0, 0), ""},
{"localhost", nil, "127.0.0.1"},
{"localhost", net.IPv4(5, 6, 7, 8), "5.6.7.8"},
{"localhost", net.IPv4(0, 0, 0, 0), "127.0.0.1"},
{"0.0.0.0", nil, ""},
{"7.8.9.1", nil, "7.8.9.1"},
{"7.8.9.1", net.IPv4(0, 0, 0, 0), "7.8.9.1"},
{"7.8.9.1", net.IPv4(8, 9, 1, 2), "8.9.1.2"},
}
for i, tc := range tt {
actual, err := UPIDBindingAddress(tc.hostname, tc.binding)
if err != nil && tc.expected != "" {
t.Fatalf("test case %d failed; expected %q instead of error %v", i+1, tc.expected, err)
}
if err == nil && actual != tc.expected {
t.Fatalf("test case %d failed; expected %q instead of %q", i+1, tc.expected, actual)
}
if err != nil {
t.Logf("test case %d; received expected error %v", i+1, err)
}
}
}
added todo re: test flake
package messenger
import (
"fmt"
"math/rand"
"net"
"net/http"
"net/http/httptest"
"strconv"
"sync"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/mesos/mesos-go/messenger/testmessage"
"github.com/mesos/mesos-go/upid"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
)
var (
startPort = 10000 + rand.Intn(30000)
globalWG = new(sync.WaitGroup)
)
func noopHandler(*upid.UPID, proto.Message) {
globalWG.Done()
}
// TODO(jdef) this is a test flake waiting to happen. unit tests are better off listening on
// a random port (port=0) and querying the listener later (need a way to expose the port, perhaps
// by querying the UPID)
func getNewPort() int {
startPort++
return startPort
}
func shuffleMessages(queue *[]proto.Message) {
for i := range *queue {
index := rand.Intn(i + 1)
(*queue)[i], (*queue)[index] = (*queue)[index], (*queue)[i]
}
}
func generateSmallMessages(n int) []proto.Message {
queue := make([]proto.Message, n)
for i := range queue {
queue[i] = testmessage.GenerateSmallMessage()
}
return queue
}
func generateMediumMessages(n int) []proto.Message {
queue := make([]proto.Message, n)
for i := range queue {
queue[i] = testmessage.GenerateMediumMessage()
}
return queue
}
func generateBigMessages(n int) []proto.Message {
queue := make([]proto.Message, n)
for i := range queue {
queue[i] = testmessage.GenerateBigMessage()
}
return queue
}
func generateLargeMessages(n int) []proto.Message {
queue := make([]proto.Message, n)
for i := range queue {
queue[i] = testmessage.GenerateLargeMessage()
}
return queue
}
func generateMixedMessages(n int) []proto.Message {
queue := make([]proto.Message, n*4)
for i := 0; i < n*4; i = i + 4 {
queue[i] = testmessage.GenerateSmallMessage()
queue[i+1] = testmessage.GenerateMediumMessage()
queue[i+2] = testmessage.GenerateBigMessage()
queue[i+3] = testmessage.GenerateLargeMessage()
}
shuffleMessages(&queue)
return queue
}
func installMessages(t *testing.T, m Messenger, queue *[]proto.Message, counts *[]int, done chan struct{}) {
testCounts := func(counts []int, done chan struct{}) {
for i := range counts {
if counts[i] != cap(*queue)/4 {
return
}
}
close(done)
}
hander1 := func(from *upid.UPID, pbMsg proto.Message) {
(*queue) = append(*queue, pbMsg)
(*counts)[0]++
testCounts(*counts, done)
}
hander2 := func(from *upid.UPID, pbMsg proto.Message) {
(*queue) = append(*queue, pbMsg)
(*counts)[1]++
testCounts(*counts, done)
}
hander3 := func(from *upid.UPID, pbMsg proto.Message) {
(*queue) = append(*queue, pbMsg)
(*counts)[2]++
testCounts(*counts, done)
}
hander4 := func(from *upid.UPID, pbMsg proto.Message) {
(*queue) = append(*queue, pbMsg)
(*counts)[3]++
testCounts(*counts, done)
}
assert.NoError(t, m.Install(hander1, &testmessage.SmallMessage{}))
assert.NoError(t, m.Install(hander2, &testmessage.MediumMessage{}))
assert.NoError(t, m.Install(hander3, &testmessage.BigMessage{}))
assert.NoError(t, m.Install(hander4, &testmessage.LargeMessage{}))
}
func runTestServer(b *testing.B, wg *sync.WaitGroup) *httptest.Server {
mux := http.NewServeMux()
mux.HandleFunc("/testserver/mesos.internal.SmallMessage", func(http.ResponseWriter, *http.Request) {
wg.Done()
})
mux.HandleFunc("/testserver/mesos.internal.MediumMessage", func(http.ResponseWriter, *http.Request) {
wg.Done()
})
mux.HandleFunc("/testserver/mesos.internal.BigMessage", func(http.ResponseWriter, *http.Request) {
wg.Done()
})
mux.HandleFunc("/testserver/mesos.internal.LargeMessage", func(http.ResponseWriter, *http.Request) {
wg.Done()
})
return httptest.NewServer(mux)
}
func TestMessengerFailToInstall(t *testing.T) {
m := NewHttp(&upid.UPID{ID: "mesos"})
handler := func(from *upid.UPID, pbMsg proto.Message) {}
assert.NotNil(t, m)
assert.NoError(t, m.Install(handler, &testmessage.SmallMessage{}))
assert.Error(t, m.Install(handler, &testmessage.SmallMessage{}))
}
func TestMessengerFailToStart(t *testing.T) {
port := strconv.Itoa(getNewPort())
m1 := NewHttp(&upid.UPID{ID: "mesos", Host: "localhost", Port: port})
m2 := NewHttp(&upid.UPID{ID: "mesos", Host: "localhost", Port: port})
assert.NoError(t, m1.Start())
assert.Error(t, m2.Start())
}
func TestMessengerFailToSend(t *testing.T) {
upid, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(t, err)
m := NewHttp(upid)
assert.NoError(t, m.Start())
assert.Error(t, m.Send(context.TODO(), upid, &testmessage.SmallMessage{}))
}
func TestMessenger(t *testing.T) {
messages := generateMixedMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(t, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(t, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
done := make(chan struct{})
counts := make([]int, 4)
msgQueue := make([]proto.Message, 0, len(messages))
installMessages(t, m2, &msgQueue, &counts, done)
assert.NoError(t, m1.Start())
assert.NoError(t, m2.Start())
go func() {
for _, msg := range messages {
assert.NoError(t, m1.Send(context.TODO(), upid2, msg))
}
}()
select {
case <-time.After(time.Second * 10):
t.Fatalf("Timeout")
case <-done:
}
for i := range counts {
assert.Equal(t, 1000, counts[i])
}
assert.Equal(t, messages, msgQueue)
}
func BenchmarkMessengerSendSmallMessage(b *testing.B) {
messages := generateSmallMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendMediumMessage(b *testing.B) {
messages := generateMediumMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendBigMessage(b *testing.B) {
messages := generateBigMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendLargeMessage(b *testing.B) {
messages := generateLargeMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendMixedMessage(b *testing.B) {
messages := generateMixedMessages(1000)
wg := new(sync.WaitGroup)
wg.Add(b.N)
srv := runTestServer(b, wg)
defer srv.Close()
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
assert.NoError(b, m1.Start())
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
wg.Wait()
}
func BenchmarkMessengerSendRecvSmallMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateSmallMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.SmallMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func BenchmarkMessengerSendRecvMediumMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateMediumMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.MediumMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func BenchmarkMessengerSendRecvBigMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateBigMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.BigMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func BenchmarkMessengerSendRecvLargeMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateLargeMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.LargeMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func BenchmarkMessengerSendRecvMixedMessage(b *testing.B) {
globalWG.Add(b.N)
messages := generateMixedMessages(1000)
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
assert.NoError(b, err)
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
assert.NoError(b, err)
m1 := NewHttp(upid1)
m2 := NewHttp(upid2)
assert.NoError(b, m1.Start())
assert.NoError(b, m2.Start())
assert.NoError(b, m2.Install(noopHandler, &testmessage.SmallMessage{}))
assert.NoError(b, m2.Install(noopHandler, &testmessage.MediumMessage{}))
assert.NoError(b, m2.Install(noopHandler, &testmessage.BigMessage{}))
assert.NoError(b, m2.Install(noopHandler, &testmessage.LargeMessage{}))
time.Sleep(time.Second) // Avoid race on upid.
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1.Send(context.TODO(), upid2, messages[i%1000])
}
globalWG.Wait()
}
func TestUPIDBindingAddress(t *testing.T) {
tt := []struct {
hostname string
binding net.IP
expected string
}{
{"", nil, ""},
{"", net.IPv4(1, 2, 3, 4), "1.2.3.4"},
{"", net.IPv4(0, 0, 0, 0), ""},
{"localhost", nil, "127.0.0.1"},
{"localhost", net.IPv4(5, 6, 7, 8), "5.6.7.8"},
{"localhost", net.IPv4(0, 0, 0, 0), "127.0.0.1"},
{"0.0.0.0", nil, ""},
{"7.8.9.1", nil, "7.8.9.1"},
{"7.8.9.1", net.IPv4(0, 0, 0, 0), "7.8.9.1"},
{"7.8.9.1", net.IPv4(8, 9, 1, 2), "8.9.1.2"},
}
for i, tc := range tt {
actual, err := UPIDBindingAddress(tc.hostname, tc.binding)
if err != nil && tc.expected != "" {
t.Fatalf("test case %d failed; expected %q instead of error %v", i+1, tc.expected, err)
}
if err == nil && actual != tc.expected {
t.Fatalf("test case %d failed; expected %q instead of %q", i+1, tc.expected, actual)
}
if err != nil {
t.Logf("test case %d; received expected error %v", i+1, err)
}
}
}
|
package main
import "testing"
const aroundD4 = (CFile|DFile|EFile)&(Rank3|Rank4|Rank5) ^ (DFile & Rank4)
func TestSouthAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), AFile ^ a8.Board()},
{h8.Board(), ^h8.Board(), HFile ^ h8.Board()},
{d4.Board(), 0, d3.Board()},
{d4.Board(), aroundD4, d3.Board() | d2.Board()},
} {
if got := attackFill(v.input, v.empty, south); got != v.want {
t.Errorf("attackFill(%016x, %016x, south): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestWestAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), Rank1 ^ h1.Board()},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), Rank8 ^ h8.Board()},
{d4.Board(), 0, c4.Board()},
{d4.Board(), aroundD4, c4.Board() | b4.Board()},
} {
if got := attackFill(v.input, v.empty, west); got != v.want {
t.Errorf("attackFill(%016x, %016x, west): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestEastAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), Rank1 ^ a1.Board()},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), Rank8 ^ a8.Board()},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, e4.Board()},
{d4.Board(), aroundD4, e4.Board() | f4.Board()},
} {
if got := attackFill(v.input, v.empty, east); got != v.want {
t.Errorf("attackFill(%016x, %016x, east): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestNorthAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), AFile ^ a1.Board()},
{h1.Board(), ^h1.Board(), HFile ^ h1.Board()},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, d5.Board()},
{d4.Board(), aroundD4, d5.Board() | d6.Board()},
} {
if got := attackFill(v.input, v.empty, north); got != v.want {
t.Errorf("attackFill(%016x, %016x, north): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestSouthwestAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), LongDiagonal ^ h8.Board()},
{d4.Board(), 0, c3.Board()},
{d4.Board(), aroundD4, c3.Board() | b2.Board()},
} {
if got := attackFill(v.input, v.empty, southwest); got != v.want {
t.Errorf("attackFill(%016x, %016x, southwest): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestSoutheastAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), LongAntiDiagonal ^ a8.Board()},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, e3.Board()},
{d4.Board(), aroundD4, e3.Board() | f2.Board()},
} {
if got := attackFill(v.input, v.empty, southeast); got != v.want {
t.Errorf("attackFill(%016x, %016x, southeast): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestNorthwestAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), LongAntiDiagonal ^ h1.Board()},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, c5.Board()},
{d4.Board(), aroundD4, c5.Board() | b6.Board()},
} {
if got := attackFill(v.input, v.empty, northwest); got != v.want {
t.Errorf("attackFill(%016x, %016x, northwest): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestNortheastAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), LongDiagonal ^ a1.Board()},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, e5.Board()},
{d4.Board(), aroundD4, e5.Board() | f6.Board()},
} {
if got := attackFill(v.input, v.empty, northeast); got != v.want {
t.Errorf("attackFill(%016x, %016x, northeast): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestWhitePawnAdvances(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a2.Board(), ^a2.Board(), a3.Board() | a4.Board()},
{a2.Board(), ^(a2.Board() | a4.Board()), a3.Board()},
{a2.Board(), ^(a2.Board() | a3.Board()), 0},
{a5.Board(), ^a5.Board(), a6.Board()},
{a5.Board(), ^(a5.Board() | a6.Board()), 0},
{a7.Board(), ^a7.Board(), a8.Board()},
{a7.Board(), ^(a7.Board() | a8.Board()), 0},
{e2.Board(), ^e2.Board(), e3.Board() | e4.Board()},
{e2.Board(), ^(e2.Board() | e4.Board()), e3.Board()},
{e2.Board(), ^(e2.Board() | e3.Board()), 0},
{e5.Board(), ^e5.Board(), e6.Board()},
{e5.Board(), ^(e5.Board() | e6.Board()), 0},
{e7.Board(), ^e7.Board(), e8.Board()},
{e7.Board(), ^(e7.Board() | e8.Board()), 0},
{h2.Board(), ^h2.Board(), h3.Board() | h4.Board()},
{h2.Board(), ^(h2.Board() | h4.Board()), h3.Board()},
{h2.Board(), ^(h2.Board() | h3.Board()), 0},
{h5.Board(), ^h5.Board(), h6.Board()},
{h5.Board(), ^(h5.Board() | h6.Board()), 0},
{h7.Board(), ^h7.Board(), h8.Board()},
{h7.Board(), ^(h7.Board() | h8.Board()), 0},
} {
if got := whitePawnAdvances(v.input, v.empty); got != v.want {
t.Errorf("whitePawnAdvances(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestBlackPawnAdvances(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a7.Board(), ^a7.Board(), a6.Board() | a5.Board()},
{a7.Board(), ^(a7.Board() | a5.Board()), a6.Board()},
{a7.Board(), ^(a7.Board() | a6.Board()), 0},
{a4.Board(), ^a4.Board(), a3.Board()},
{a4.Board(), ^(a4.Board() | a3.Board()), 0},
{a2.Board(), ^a2.Board(), a1.Board()},
{a2.Board(), ^(a2.Board() | a1.Board()), 0},
{e7.Board(), ^e7.Board(), e6.Board() | e5.Board()},
{e7.Board(), ^(e7.Board() | e5.Board()), e6.Board()},
{e7.Board(), ^(e7.Board() | e6.Board()), 0},
{e4.Board(), ^e4.Board(), e3.Board()},
{e4.Board(), ^(e4.Board() | e3.Board()), 0},
{e2.Board(), ^e2.Board(), e1.Board()},
{e2.Board(), ^(e2.Board() | e1.Board()), 0},
{h7.Board(), ^h7.Board(), h6.Board() | h5.Board()},
{h7.Board(), ^(h7.Board() | h5.Board()), h6.Board()},
{h7.Board(), ^(h7.Board() | h6.Board()), 0},
{h4.Board(), ^h4.Board(), h3.Board()},
{h4.Board(), ^(h4.Board() | h3.Board()), 0},
{h2.Board(), ^h2.Board(), h1.Board()},
{h2.Board(), ^(h2.Board() | h1.Board()), 0},
} {
if got := blackPawnAdvances(v.input, v.empty); got != v.want {
t.Errorf("blackPawnAdvances(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestWhitePawnAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a2.Board(), 0, b3.Board()},
{a2.Board(), BFile, 0},
{a5.Board(), 0, b6.Board()},
{a5.Board(), BFile, 0},
{a7.Board(), 0, b8.Board()},
{a7.Board(), BFile, 0},
{e2.Board(), 0, d3.Board() | f3.Board()},
{e2.Board(), DFile, f3.Board()},
{e2.Board(), FFile, d3.Board()},
{e2.Board(), DFile | FFile, 0},
{e5.Board(), 0, d6.Board() | f6.Board()},
{e5.Board(), DFile, f6.Board()},
{e5.Board(), FFile, d6.Board()},
{e5.Board(), DFile | FFile, 0},
{e7.Board(), 0, d8.Board() | f8.Board()},
{e7.Board(), DFile, f8.Board()},
{e7.Board(), FFile, d8.Board()},
{e7.Board(), DFile | FFile, 0},
{h2.Board(), 0, g3.Board()},
{h2.Board(), GFile, 0},
{h5.Board(), 0, g6.Board()},
{h5.Board(), GFile, 0},
{h7.Board(), 0, g8.Board()},
{h7.Board(), GFile, 0},
} {
if got := whitePawnAttacks(v.input, v.empty); got != v.want {
t.Errorf("whitePawnAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestBlackPawnAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a7.Board(), 0, b6.Board()},
{a7.Board(), BFile, 0},
{a4.Board(), 0, b3.Board()},
{a4.Board(), BFile, 0},
{a2.Board(), 0, b1.Board()},
{a2.Board(), BFile, 0},
{e7.Board(), 0, d6.Board() | f6.Board()},
{e7.Board(), DFile, f6.Board()},
{e7.Board(), FFile, d6.Board()},
{e7.Board(), DFile | FFile, 0},
{e4.Board(), 0, d3.Board() | f3.Board()},
{e4.Board(), DFile, f3.Board()},
{e4.Board(), FFile, d3.Board()},
{e4.Board(), DFile | FFile, 0},
{e2.Board(), 0, d1.Board() | f1.Board()},
{e2.Board(), DFile, f1.Board()},
{e2.Board(), FFile, d1.Board()},
{e2.Board(), DFile | FFile, 0},
{h7.Board(), 0, g6.Board()},
{h7.Board(), GFile, 0},
{h4.Board(), 0, g3.Board()},
{h4.Board(), GFile, 0},
{h2.Board(), 0, g1.Board()},
{h2.Board(), GFile, 0},
} {
if got := blackPawnAttacks(v.input, v.empty); got != v.want {
t.Errorf("blackPawnAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestBishopAttacks(t *testing.T) {
var ld, la = LongDiagonal, LongAntiDiagonal
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), LongDiagonal ^ a1.Board()},
{e7.Board(), ^e7.Board(), (ld<<16 | la<<24) ^ e7.Board()},
{d4.Board(), 0, c3.Board() | e3.Board() | c5.Board() | e5.Board()},
{d4.Board(), aroundD4, b2.Board() | f2.Board() | c3.Board() | e3.Board() | c5.Board() | e5.Board() | b6.Board() | f6.Board()},
} {
if got := bishopAttacks(v.input, v.empty); got != v.want {
t.Errorf("bishopAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestRookAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), (AFile | Rank1) ^ a1.Board()},
{e7.Board(), ^e7.Board(), (EFile | Rank7) ^ e7.Board()},
{d4.Board(), 0, d3.Board() | c4.Board() | e4.Board() | d5.Board()},
{d4.Board(), aroundD4, d2.Board() | d3.Board() | b4.Board() | c4.Board() | e4.Board() | f4.Board() | d5.Board() | d6.Board()},
} {
if got := rookAttacks(v.input, v.empty); got != v.want {
t.Errorf("rookAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestQueenAttacks(t *testing.T) {
var ld, la = LongDiagonal, LongAntiDiagonal
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), (AFile | Rank1 | LongDiagonal) ^ a1.Board()},
{e7.Board(), ^e7.Board(), (EFile | Rank7 | ld<<16 | la<<24) ^ e7.Board()},
{d4.Board(), 0, aroundD4},
{d4.Board(), aroundD4, aroundD4 | b2.Board() | d2.Board() | f2.Board() | b4.Board() | f4.Board() | b6.Board() | d6.Board() | f6.Board()},
} {
if got := queenAttacks(v.input, v.empty); got != v.want {
t.Errorf("queenAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestIsAttacked(t *testing.T) {
for _, v := range []struct {
pos Position
s Square
c Color
want bool
}{
{InitialPosition, a1, White, false},
{InitialPosition, b1, White, true},
{InitialPosition, b1, Black, false},
{InitialPosition, f3, White, true},
{InitialPosition, f4, White, false},
{InitialPosition, f6, White, false},
{InitialPosition, f6, Black, true},
{InitialPosition, f8, Black, true},
{InitialPosition, f8, White, false},
{InitialPosition, h8, Black, false},
} {
if got := IsAttacked(v.pos, v.s, v.c); got != v.want {
t.Errorf("IsAttacked(%v, %v, %v): got %v, want %v", v.pos, v.s, v.c, got, v.want)
}
}
}
func TestCanQSCastle(t *testing.T) {
for _, test := range []struct {
fen string
want bool
}{
{"4k3/8/8/8/8/8/8/4K2R w Q - 0 1", true},
{"4k3/8/8/8/8/8/8/4K2R w - - 0 1", false},
{"4k3/8/8/8/8/8/8/4K2R b Q - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K2R w Q - 0 1", true},
{"1r2k3/8/8/8/8/8/8/4K2R w Q - 0 1", true},
{"3rk3/8/8/8/8/8/8/4K2R w Q - 0 1", false},
{"4k3/8/8/8/8/8/8/RN2K3 w Q - 0 1", false},
{"4k3/8/8/8/8/8/8/R3K1N1 w Q - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K3 b q - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K3 b - - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K3 w q - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K2R b q - 0 1", true},
{"4k2r/8/8/8/8/8/8/1R2K3 b q - 0 1", true},
{"4k2r/8/8/8/8/8/8/3RK3 b q - 0 1", false},
{"rn2k3/8/8/8/8/8/8/4K3 b q - 0 1", false},
{"r3k1n1/8/8/8/8/8/8/4K3 b q - 0 1", true},
} {
pos, err := ParseFEN(test.fen)
if err != nil {
t.Fatal(err)
}
if got := canQSCastle(pos); got != test.want {
t.Errorf("canQSCastle(%v): got %v, want %v", test.fen, got, test.want)
}
}
}
func TestCanKSCastle(t *testing.T) {
for _, test := range []struct {
fen string
want bool
}{
{"4k3/8/8/8/8/8/8/4K2R w K - 0 1", true},
{"4k3/8/8/8/8/8/8/4K2R w - - 0 1", false},
{"4k3/8/8/8/8/8/8/4K2R b K - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K2R w K - 0 1", true},
{"4k1r1/8/8/8/8/8/8/4K2R w K - 0 1", false},
{"4kr2/8/8/8/8/8/8/4K2R w K - 0 1", false},
{"4k3/8/8/8/8/8/8/4K1NR w K - 0 1", false},
{"4k3/8/8/8/8/8/8/RN2K2R w K - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K3 b k - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K3 b - - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K3 w k - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K2R b k - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K1R1 b k - 0 1", false},
{"4k2r/8/8/8/8/8/8/4KR2 b k - 0 1", false},
{"4k1nr/8/8/8/8/8/8/4K3 b k - 0 1", false},
{"rn2k2r/8/8/8/8/8/8/4K3 b k - 0 1", true},
} {
pos, err := ParseFEN(test.fen)
if err != nil {
t.Fatal(err)
}
if got := canKSCastle(pos); got != test.want {
t.Errorf("canKSCastle(%v): got %v, want %v", test.fen, got, test.want)
}
}
}
func TestAlgebraic(t *testing.T) {
for _, test := range []struct {
fen string
move Move
want string
}{
{InitialPositionFEN, Move{From: e2, To: e4, Piece: Pawn}, "e4"},
{InitialPositionFEN, Move{From: g2, To: g3, Piece: Pawn}, "g3"},
{InitialPositionFEN, Move{From: g1, To: f3, Piece: Knight}, "Nf3"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: e1, To: f2, Piece: King}, "Kf2"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: g2, To: g4, Piece: Pawn}, "g4"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: d5, To: e6, Piece: Pawn, CapturePiece: Pawn, CaptureSquare: e5}, "dxe6"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: a1, To: a8, Piece: Rook, CapturePiece: Rook, CaptureSquare: a8}, "Rxa8+"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: b7, To: b8, Piece: Pawn, PromotePiece: Queen}, "b8Q+"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: b7, To: a8, Piece: Pawn, CapturePiece: Rook, CaptureSquare: a8, PromotePiece: Queen}, "bxa8Q+"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: e1, To: c1, Piece: King}, "O-O-O"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: e1, To: g1, Piece: King}, "O-O+"},
{"r4k2/1P6/3P2Q1/4p3/8/8/6P1/R3K2R w KQ - 0 1", Move{From: e1, To: g1, Piece: King}, "O-O#"},
{"7k/8/8/8/8/8/8/R4RK1 w - - 0 1", Move{From: a1, To: d1, Piece: Rook}, "Rad1"},
{"7k/R7/8/8/8/8/8/R5K1 w - - 0 1", Move{From: a7, To: a6, Piece: Rook}, "R7a6"},
{"7k/R7/8/8/8/8/8/R5K1 w - - 0 1", Move{From: a7, To: a8, Piece: Rook}, "Ra8+"},
{"8/B7/8/8/8/6k1/6P1/B5K1 w - - 0 1", Move{From: a7, To: d4, Piece: Bishop}, "B7d4"},
{"8/B5B1/8/8/8/6k1/6P1/B5K1 w - - 0 1", Move{From: a1, To: d4, Piece: Bishop}, "B1d4"},
{"8/B5B1/8/8/8/6k1/6P1/B5K1 w - - 0 1", Move{From: g7, To: d4, Piece: Bishop}, "Bgd4"},
{"8/B5B1/8/8/8/6k1/6P1/B5K1 w - - 0 1", Move{From: a7, To: d4, Piece: Bishop}, "Ba7d4"},
} {
pos, err := ParseFEN(test.fen)
if err != nil {
t.Fatal(err)
}
if got := algebraic(pos, test.move); got != test.want {
t.Errorf("algebraic(%v, %+v): got %v, want %v", test.fen, test.move, got, test.want)
}
}
}
func BenchmarkConstructMove(b *testing.B) {
m := Move{}
for i := 0; i < b.N; i++ {
m.From = b7
m.To = a8
m.Piece = Pawn
m.CapturePiece = Rook
m.CaptureSquare = a8
m.PromotePiece = Queen
}
}
func BenchmarkReadMove(b *testing.B) {
m := Move{From: b7, To: a8, Piece: Pawn, CapturePiece: Rook, CaptureSquare: a8, PromotePiece: Queen}
var from, to, captureSquare Square
var piece, capturePiece, promotePiece Piece
for i := 0; i < b.N; i++ {
from = m.From
to = m.To
piece = m.Piece
capturePiece = m.CapturePiece
captureSquare = m.CaptureSquare
promotePiece = m.PromotePiece
}
_, _, _ = from, to, captureSquare
_, _, _ = piece, capturePiece, promotePiece
}
func BenchmarkMake(b *testing.B) {
pos, err := ParseFEN("r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1")
if err != nil {
b.Fatal(err)
}
for _, benchmark := range []struct {
name string
move Move
}{
{"quiet", Move{From: e1, To: f2, Piece: King}},
{"double", Move{From: g2, To: g4, Piece: Pawn}},
{"en passant", Move{From: d5, To: e6, Piece: Pawn, CapturePiece: Pawn, CaptureSquare: e5}},
{"capture", Move{From: a1, To: a8, Piece: Rook, CapturePiece: Rook, CaptureSquare: a8}},
{"promotion", Move{From: b7, To: b8, Piece: Pawn, PromotePiece: Queen}},
{"capture promotion", Move{From: b7, To: a8, Piece: Pawn, CapturePiece: Rook, CaptureSquare: a8, PromotePiece: Queen}},
{"castle queenside", Move{From: e1, To: c1, Piece: King}},
{"castle kingside", Move{From: e1, To: g1, Piece: King}},
} {
b.Run(benchmark.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = Make(pos, benchmark.move)
}
})
}
}
func BenchmarkCandidates(b *testing.B) {
pos, err := ParseFEN("r3k2r/p1ppqpb1/bn2pnp1/3PN3/1p2P3/2N2Q1p/PPPBBPPP/R3K2R w KQkq - 0 1")
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
_ = Candidates(pos)
}
}
add long algebraic Move String tests
package main
import "testing"
const aroundD4 = (CFile|DFile|EFile)&(Rank3|Rank4|Rank5) ^ (DFile & Rank4)
func TestSouthAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), AFile ^ a8.Board()},
{h8.Board(), ^h8.Board(), HFile ^ h8.Board()},
{d4.Board(), 0, d3.Board()},
{d4.Board(), aroundD4, d3.Board() | d2.Board()},
} {
if got := attackFill(v.input, v.empty, south); got != v.want {
t.Errorf("attackFill(%016x, %016x, south): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestWestAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), Rank1 ^ h1.Board()},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), Rank8 ^ h8.Board()},
{d4.Board(), 0, c4.Board()},
{d4.Board(), aroundD4, c4.Board() | b4.Board()},
} {
if got := attackFill(v.input, v.empty, west); got != v.want {
t.Errorf("attackFill(%016x, %016x, west): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestEastAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), Rank1 ^ a1.Board()},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), Rank8 ^ a8.Board()},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, e4.Board()},
{d4.Board(), aroundD4, e4.Board() | f4.Board()},
} {
if got := attackFill(v.input, v.empty, east); got != v.want {
t.Errorf("attackFill(%016x, %016x, east): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestNorthAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), AFile ^ a1.Board()},
{h1.Board(), ^h1.Board(), HFile ^ h1.Board()},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, d5.Board()},
{d4.Board(), aroundD4, d5.Board() | d6.Board()},
} {
if got := attackFill(v.input, v.empty, north); got != v.want {
t.Errorf("attackFill(%016x, %016x, north): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestSouthwestAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), LongDiagonal ^ h8.Board()},
{d4.Board(), 0, c3.Board()},
{d4.Board(), aroundD4, c3.Board() | b2.Board()},
} {
if got := attackFill(v.input, v.empty, southwest); got != v.want {
t.Errorf("attackFill(%016x, %016x, southwest): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestSoutheastAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), LongAntiDiagonal ^ a8.Board()},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, e3.Board()},
{d4.Board(), aroundD4, e3.Board() | f2.Board()},
} {
if got := attackFill(v.input, v.empty, southeast); got != v.want {
t.Errorf("attackFill(%016x, %016x, southeast): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestNorthwestAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), 0},
{h1.Board(), ^h1.Board(), LongAntiDiagonal ^ h1.Board()},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, c5.Board()},
{d4.Board(), aroundD4, c5.Board() | b6.Board()},
} {
if got := attackFill(v.input, v.empty, northwest); got != v.want {
t.Errorf("attackFill(%016x, %016x, northwest): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestNortheastAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), LongDiagonal ^ a1.Board()},
{h1.Board(), ^h1.Board(), 0},
{a8.Board(), ^a8.Board(), 0},
{h8.Board(), ^h8.Board(), 0},
{d4.Board(), 0, e5.Board()},
{d4.Board(), aroundD4, e5.Board() | f6.Board()},
} {
if got := attackFill(v.input, v.empty, northeast); got != v.want {
t.Errorf("attackFill(%016x, %016x, northeast): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestWhitePawnAdvances(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a2.Board(), ^a2.Board(), a3.Board() | a4.Board()},
{a2.Board(), ^(a2.Board() | a4.Board()), a3.Board()},
{a2.Board(), ^(a2.Board() | a3.Board()), 0},
{a5.Board(), ^a5.Board(), a6.Board()},
{a5.Board(), ^(a5.Board() | a6.Board()), 0},
{a7.Board(), ^a7.Board(), a8.Board()},
{a7.Board(), ^(a7.Board() | a8.Board()), 0},
{e2.Board(), ^e2.Board(), e3.Board() | e4.Board()},
{e2.Board(), ^(e2.Board() | e4.Board()), e3.Board()},
{e2.Board(), ^(e2.Board() | e3.Board()), 0},
{e5.Board(), ^e5.Board(), e6.Board()},
{e5.Board(), ^(e5.Board() | e6.Board()), 0},
{e7.Board(), ^e7.Board(), e8.Board()},
{e7.Board(), ^(e7.Board() | e8.Board()), 0},
{h2.Board(), ^h2.Board(), h3.Board() | h4.Board()},
{h2.Board(), ^(h2.Board() | h4.Board()), h3.Board()},
{h2.Board(), ^(h2.Board() | h3.Board()), 0},
{h5.Board(), ^h5.Board(), h6.Board()},
{h5.Board(), ^(h5.Board() | h6.Board()), 0},
{h7.Board(), ^h7.Board(), h8.Board()},
{h7.Board(), ^(h7.Board() | h8.Board()), 0},
} {
if got := whitePawnAdvances(v.input, v.empty); got != v.want {
t.Errorf("whitePawnAdvances(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestBlackPawnAdvances(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a7.Board(), ^a7.Board(), a6.Board() | a5.Board()},
{a7.Board(), ^(a7.Board() | a5.Board()), a6.Board()},
{a7.Board(), ^(a7.Board() | a6.Board()), 0},
{a4.Board(), ^a4.Board(), a3.Board()},
{a4.Board(), ^(a4.Board() | a3.Board()), 0},
{a2.Board(), ^a2.Board(), a1.Board()},
{a2.Board(), ^(a2.Board() | a1.Board()), 0},
{e7.Board(), ^e7.Board(), e6.Board() | e5.Board()},
{e7.Board(), ^(e7.Board() | e5.Board()), e6.Board()},
{e7.Board(), ^(e7.Board() | e6.Board()), 0},
{e4.Board(), ^e4.Board(), e3.Board()},
{e4.Board(), ^(e4.Board() | e3.Board()), 0},
{e2.Board(), ^e2.Board(), e1.Board()},
{e2.Board(), ^(e2.Board() | e1.Board()), 0},
{h7.Board(), ^h7.Board(), h6.Board() | h5.Board()},
{h7.Board(), ^(h7.Board() | h5.Board()), h6.Board()},
{h7.Board(), ^(h7.Board() | h6.Board()), 0},
{h4.Board(), ^h4.Board(), h3.Board()},
{h4.Board(), ^(h4.Board() | h3.Board()), 0},
{h2.Board(), ^h2.Board(), h1.Board()},
{h2.Board(), ^(h2.Board() | h1.Board()), 0},
} {
if got := blackPawnAdvances(v.input, v.empty); got != v.want {
t.Errorf("blackPawnAdvances(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestWhitePawnAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a2.Board(), 0, b3.Board()},
{a2.Board(), BFile, 0},
{a5.Board(), 0, b6.Board()},
{a5.Board(), BFile, 0},
{a7.Board(), 0, b8.Board()},
{a7.Board(), BFile, 0},
{e2.Board(), 0, d3.Board() | f3.Board()},
{e2.Board(), DFile, f3.Board()},
{e2.Board(), FFile, d3.Board()},
{e2.Board(), DFile | FFile, 0},
{e5.Board(), 0, d6.Board() | f6.Board()},
{e5.Board(), DFile, f6.Board()},
{e5.Board(), FFile, d6.Board()},
{e5.Board(), DFile | FFile, 0},
{e7.Board(), 0, d8.Board() | f8.Board()},
{e7.Board(), DFile, f8.Board()},
{e7.Board(), FFile, d8.Board()},
{e7.Board(), DFile | FFile, 0},
{h2.Board(), 0, g3.Board()},
{h2.Board(), GFile, 0},
{h5.Board(), 0, g6.Board()},
{h5.Board(), GFile, 0},
{h7.Board(), 0, g8.Board()},
{h7.Board(), GFile, 0},
} {
if got := whitePawnAttacks(v.input, v.empty); got != v.want {
t.Errorf("whitePawnAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestBlackPawnAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a7.Board(), 0, b6.Board()},
{a7.Board(), BFile, 0},
{a4.Board(), 0, b3.Board()},
{a4.Board(), BFile, 0},
{a2.Board(), 0, b1.Board()},
{a2.Board(), BFile, 0},
{e7.Board(), 0, d6.Board() | f6.Board()},
{e7.Board(), DFile, f6.Board()},
{e7.Board(), FFile, d6.Board()},
{e7.Board(), DFile | FFile, 0},
{e4.Board(), 0, d3.Board() | f3.Board()},
{e4.Board(), DFile, f3.Board()},
{e4.Board(), FFile, d3.Board()},
{e4.Board(), DFile | FFile, 0},
{e2.Board(), 0, d1.Board() | f1.Board()},
{e2.Board(), DFile, f1.Board()},
{e2.Board(), FFile, d1.Board()},
{e2.Board(), DFile | FFile, 0},
{h7.Board(), 0, g6.Board()},
{h7.Board(), GFile, 0},
{h4.Board(), 0, g3.Board()},
{h4.Board(), GFile, 0},
{h2.Board(), 0, g1.Board()},
{h2.Board(), GFile, 0},
} {
if got := blackPawnAttacks(v.input, v.empty); got != v.want {
t.Errorf("blackPawnAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestBishopAttacks(t *testing.T) {
var ld, la = LongDiagonal, LongAntiDiagonal
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), LongDiagonal ^ a1.Board()},
{e7.Board(), ^e7.Board(), (ld<<16 | la<<24) ^ e7.Board()},
{d4.Board(), 0, c3.Board() | e3.Board() | c5.Board() | e5.Board()},
{d4.Board(), aroundD4, b2.Board() | f2.Board() | c3.Board() | e3.Board() | c5.Board() | e5.Board() | b6.Board() | f6.Board()},
} {
if got := bishopAttacks(v.input, v.empty); got != v.want {
t.Errorf("bishopAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestRookAttacks(t *testing.T) {
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), (AFile | Rank1) ^ a1.Board()},
{e7.Board(), ^e7.Board(), (EFile | Rank7) ^ e7.Board()},
{d4.Board(), 0, d3.Board() | c4.Board() | e4.Board() | d5.Board()},
{d4.Board(), aroundD4, d2.Board() | d3.Board() | b4.Board() | c4.Board() | e4.Board() | f4.Board() | d5.Board() | d6.Board()},
} {
if got := rookAttacks(v.input, v.empty); got != v.want {
t.Errorf("rookAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestQueenAttacks(t *testing.T) {
var ld, la = LongDiagonal, LongAntiDiagonal
for _, v := range []struct {
input Board
empty Board
want Board
}{
{a1.Board(), ^a1.Board(), (AFile | Rank1 | LongDiagonal) ^ a1.Board()},
{e7.Board(), ^e7.Board(), (EFile | Rank7 | ld<<16 | la<<24) ^ e7.Board()},
{d4.Board(), 0, aroundD4},
{d4.Board(), aroundD4, aroundD4 | b2.Board() | d2.Board() | f2.Board() | b4.Board() | f4.Board() | b6.Board() | d6.Board() | f6.Board()},
} {
if got := queenAttacks(v.input, v.empty); got != v.want {
t.Errorf("queenAttacks(%016x, %016x): got %016x, want %016x", v.input, v.empty, got, v.want)
}
}
}
func TestIsAttacked(t *testing.T) {
for _, v := range []struct {
pos Position
s Square
c Color
want bool
}{
{InitialPosition, a1, White, false},
{InitialPosition, b1, White, true},
{InitialPosition, b1, Black, false},
{InitialPosition, f3, White, true},
{InitialPosition, f4, White, false},
{InitialPosition, f6, White, false},
{InitialPosition, f6, Black, true},
{InitialPosition, f8, Black, true},
{InitialPosition, f8, White, false},
{InitialPosition, h8, Black, false},
} {
if got := IsAttacked(v.pos, v.s, v.c); got != v.want {
t.Errorf("IsAttacked(%v, %v, %v): got %v, want %v", v.pos, v.s, v.c, got, v.want)
}
}
}
func TestCanQSCastle(t *testing.T) {
for _, test := range []struct {
fen string
want bool
}{
{"4k3/8/8/8/8/8/8/4K2R w Q - 0 1", true},
{"4k3/8/8/8/8/8/8/4K2R w - - 0 1", false},
{"4k3/8/8/8/8/8/8/4K2R b Q - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K2R w Q - 0 1", true},
{"1r2k3/8/8/8/8/8/8/4K2R w Q - 0 1", true},
{"3rk3/8/8/8/8/8/8/4K2R w Q - 0 1", false},
{"4k3/8/8/8/8/8/8/RN2K3 w Q - 0 1", false},
{"4k3/8/8/8/8/8/8/R3K1N1 w Q - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K3 b q - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K3 b - - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K3 w q - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K2R b q - 0 1", true},
{"4k2r/8/8/8/8/8/8/1R2K3 b q - 0 1", true},
{"4k2r/8/8/8/8/8/8/3RK3 b q - 0 1", false},
{"rn2k3/8/8/8/8/8/8/4K3 b q - 0 1", false},
{"r3k1n1/8/8/8/8/8/8/4K3 b q - 0 1", true},
} {
pos, err := ParseFEN(test.fen)
if err != nil {
t.Fatal(err)
}
if got := canQSCastle(pos); got != test.want {
t.Errorf("canQSCastle(%v): got %v, want %v", test.fen, got, test.want)
}
}
}
func TestCanKSCastle(t *testing.T) {
for _, test := range []struct {
fen string
want bool
}{
{"4k3/8/8/8/8/8/8/4K2R w K - 0 1", true},
{"4k3/8/8/8/8/8/8/4K2R w - - 0 1", false},
{"4k3/8/8/8/8/8/8/4K2R b K - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K2R w K - 0 1", true},
{"4k1r1/8/8/8/8/8/8/4K2R w K - 0 1", false},
{"4kr2/8/8/8/8/8/8/4K2R w K - 0 1", false},
{"4k3/8/8/8/8/8/8/4K1NR w K - 0 1", false},
{"4k3/8/8/8/8/8/8/RN2K2R w K - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K3 b k - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K3 b - - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K3 w k - 0 1", false},
{"4k2r/8/8/8/8/8/8/4K2R b k - 0 1", true},
{"4k2r/8/8/8/8/8/8/4K1R1 b k - 0 1", false},
{"4k2r/8/8/8/8/8/8/4KR2 b k - 0 1", false},
{"4k1nr/8/8/8/8/8/8/4K3 b k - 0 1", false},
{"rn2k2r/8/8/8/8/8/8/4K3 b k - 0 1", true},
} {
pos, err := ParseFEN(test.fen)
if err != nil {
t.Fatal(err)
}
if got := canKSCastle(pos); got != test.want {
t.Errorf("canKSCastle(%v): got %v, want %v", test.fen, got, test.want)
}
}
}
var algebraicTests = []struct {
fen string
move Move
alg string
long string
}{
{InitialPositionFEN, Move{From: e2, To: e4, Piece: Pawn}, "e4", "e2-e4"},
{InitialPositionFEN, Move{From: g2, To: g3, Piece: Pawn}, "g3", "g2-g3"},
{InitialPositionFEN, Move{From: g1, To: f3, Piece: Knight}, "Nf3", "Ng1-f3"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: e1, To: f2, Piece: King}, "Kf2", "Ke1-f2"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: g2, To: g4, Piece: Pawn}, "g4", "g2-g4"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: d5, To: e6, Piece: Pawn, CapturePiece: Pawn, CaptureSquare: e5}, "dxe6", "d5xe6"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: a1, To: a8, Piece: Rook, CapturePiece: Rook, CaptureSquare: a8}, "Rxa8+", "Ra1xa8"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: b7, To: b8, Piece: Pawn, PromotePiece: Queen}, "b8Q+", "b7-b8Q"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: b7, To: a8, Piece: Pawn, CapturePiece: Rook, CaptureSquare: a8, PromotePiece: Queen}, "bxa8Q+", "b7xa8Q"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: e1, To: c1, Piece: King}, "O-O-O", "O-O-O"},
{"r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1", Move{From: e1, To: g1, Piece: King}, "O-O+", "O-O"},
{"r4k2/1P6/3P2Q1/4p3/8/8/6P1/R3K2R w KQ - 0 1", Move{From: e1, To: g1, Piece: King}, "O-O#", "O-O"},
{"7k/8/8/8/8/8/8/R4RK1 w - - 0 1", Move{From: a1, To: d1, Piece: Rook}, "Rad1", "Ra1-d1"},
{"7k/R7/8/8/8/8/8/R5K1 w - - 0 1", Move{From: a7, To: a6, Piece: Rook}, "R7a6", "Ra7-a6"},
{"7k/R7/8/8/8/8/8/R5K1 w - - 0 1", Move{From: a7, To: a8, Piece: Rook}, "Ra8+", "Ra7-a8"},
{"8/B7/8/8/8/6k1/6P1/B5K1 w - - 0 1", Move{From: a7, To: d4, Piece: Bishop}, "B7d4", "Ba7-d4"},
{"8/B5B1/8/8/8/6k1/6P1/B5K1 w - - 0 1", Move{From: a1, To: d4, Piece: Bishop}, "B1d4", "Ba1-d4"},
{"8/B5B1/8/8/8/6k1/6P1/B5K1 w - - 0 1", Move{From: g7, To: d4, Piece: Bishop}, "Bgd4", "Bg7-d4"},
{"8/B5B1/8/8/8/6k1/6P1/B5K1 w - - 0 1", Move{From: a7, To: d4, Piece: Bishop}, "Ba7d4", "Ba7-d4"},
}
func TestMoveString(t *testing.T) {
// long algebraic notation without check
for _, test := range algebraicTests {
if got := test.move.String(); got != test.long {
t.Errorf("String(%v): got %v, want %v", test.move, got, test.long)
}
}
}
func TestAlgebraic(t *testing.T) {
for _, test := range algebraicTests {
pos, err := ParseFEN(test.fen)
if err != nil {
t.Fatal(err)
}
if got := algebraic(pos, test.move); got != test.alg {
t.Errorf("algebraic(%v, %+v): got %v, want %v", test.fen, test.move, got, test.alg)
}
}
}
func BenchmarkConstructMove(b *testing.B) {
m := Move{}
for i := 0; i < b.N; i++ {
m.From = b7
m.To = a8
m.Piece = Pawn
m.CapturePiece = Rook
m.CaptureSquare = a8
m.PromotePiece = Queen
}
}
func BenchmarkReadMove(b *testing.B) {
m := Move{From: b7, To: a8, Piece: Pawn, CapturePiece: Rook, CaptureSquare: a8, PromotePiece: Queen}
var from, to, captureSquare Square
var piece, capturePiece, promotePiece Piece
for i := 0; i < b.N; i++ {
from = m.From
to = m.To
piece = m.Piece
capturePiece = m.CapturePiece
captureSquare = m.CaptureSquare
promotePiece = m.PromotePiece
}
_, _, _ = from, to, captureSquare
_, _, _ = piece, capturePiece, promotePiece
}
func BenchmarkMake(b *testing.B) {
pos, err := ParseFEN("r4k2/1P6/8/3Pp3/8/8/6P1/R3K2R w KQ e6 0 1")
if err != nil {
b.Fatal(err)
}
for _, benchmark := range []struct {
name string
move Move
}{
{"quiet", Move{From: e1, To: f2, Piece: King}},
{"double", Move{From: g2, To: g4, Piece: Pawn}},
{"en passant", Move{From: d5, To: e6, Piece: Pawn, CapturePiece: Pawn, CaptureSquare: e5}},
{"capture", Move{From: a1, To: a8, Piece: Rook, CapturePiece: Rook, CaptureSquare: a8}},
{"promotion", Move{From: b7, To: b8, Piece: Pawn, PromotePiece: Queen}},
{"capture promotion", Move{From: b7, To: a8, Piece: Pawn, CapturePiece: Rook, CaptureSquare: a8, PromotePiece: Queen}},
{"castle queenside", Move{From: e1, To: c1, Piece: King}},
{"castle kingside", Move{From: e1, To: g1, Piece: King}},
} {
b.Run(benchmark.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = Make(pos, benchmark.move)
}
})
}
}
func BenchmarkCandidates(b *testing.B) {
pos, err := ParseFEN("r3k2r/p1ppqpb1/bn2pnp1/3PN3/1p2P3/2N2Q1p/PPPBBPPP/R3K2R w KQkq - 0 1")
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
_ = Candidates(pos)
}
}
|
package main
import (
"reflect"
"regexp"
"testing"
)
var genMatcherTests = []struct {
src string
dst *regexp.Regexp
}{
// one branch
{`abc`, regexp.MustCompile(`(abc)`)},
{`abcdef`, regexp.MustCompile(`(abcdef)`)},
// multiple branches
{`a,b`, regexp.MustCompile(`(a|b)`)},
{`a,,b,c`, regexp.MustCompile(`(a||b|c)`)},
{`a,bc,def`, regexp.MustCompile(`(a|bc|def)`)},
{`,a`, regexp.MustCompile(`(|a)`)},
{`a,`, regexp.MustCompile(`(a|)`)},
{`,a,`, regexp.MustCompile(`(|a|)`)},
// use escape
{`a\,b`, regexp.MustCompile(`(a,b)`)},
{`a\,bc\,def`, regexp.MustCompile(`(a,bc,def)`)},
// multiple branches with escape
{`a\,b,c`, regexp.MustCompile(`(a,b|c)`)},
{`a,bc\,def`, regexp.MustCompile(`(a|bc,def)`)},
// regexp quote
{`a+b`, regexp.MustCompile(`(a\+b)`)},
{`(a|bc)*def`, regexp.MustCompile(`(\(a\|bc\)\*def)`)},
// unquote special values
{`a\\bc`, regexp.MustCompile("(a\\\\bc)")},
{`a\tb\,c`, regexp.MustCompile("(a\tb,c)")},
{`a\tbc\n\ndef`, regexp.MustCompile("(a\tbc\n\ndef)")},
// multiple groups
{`a/b`, regexp.MustCompile("(a)(b)")},
{`a//b/c`, regexp.MustCompile(`(a)()(b)(c)`)},
{`a/bc/def`, regexp.MustCompile("(a)(bc)(def)")},
{`a,b/c`, regexp.MustCompile("(a|b)(c)")},
{`/a`, regexp.MustCompile(`()(a)`)},
{`a/`, regexp.MustCompile(`(a)()`)},
{`/a/`, regexp.MustCompile(`()(a)()`)},
// multiple groups with escape
{`a/b\/c`, regexp.MustCompile("(a)(b/c)")},
{`a/\/bc\//def`, regexp.MustCompile("(a)(/bc/)(def)")},
{`a\,b,c/d,e\/f`, regexp.MustCompile("(a,b|c)(d|e/f)")},
}
func TestGenMatcher(t *testing.T) {
for _, test := range genMatcherTests {
expect := test.dst
actual, err := newMatcher(test.src, false)
if err != nil {
t.Errorf("newMatcher(%q) returns %q, want nil",
test.src, err)
continue
}
if !reflect.DeepEqual(actual, expect) {
t.Errorf("%q: got %q, want %q",
test.src, actual, expect)
}
}
}
var genMatcherWithBoundaryTests = []struct {
src string
dst *regexp.Regexp
}{
{`abc`, regexp.MustCompile(`\b(abc)\b`)},
{`a,b`, regexp.MustCompile(`\b(a|b)\b`)},
{`a\,b,c`, regexp.MustCompile(`\b(a,b|c)\b`)},
{`a/b`, regexp.MustCompile(`\b(a)(b)\b`)},
{`a/bc/def`, regexp.MustCompile(`\b(a)(bc)(def)\b`)},
{`a,b/c`, regexp.MustCompile(`\b(a|b)(c)\b`)},
{`a\,b,c/d,e\/f`, regexp.MustCompile(`\b(a,b|c)(d|e/f)\b`)},
}
func TestGenMatcherWithBoundary(t *testing.T) {
for _, test := range genMatcherWithBoundaryTests {
expect := test.dst
actual, err := newMatcher(test.src, true)
if err != nil {
t.Errorf("newMatcher(%q) returns %q, want nil",
test.src, err)
continue
}
if !reflect.DeepEqual(actual, expect) {
t.Errorf("%q: got %q, want %q",
test.src, actual, expect)
}
}
}
var genReplacementTests = []struct {
srcFrom string
srcTo string
dst []map[string]string
}{
// one branch
{
"abc",
"def",
[]map[string]string{
map[string]string{
"abc": "def",
},
},
},
{
"abcdef",
"ghijkl",
[]map[string]string{
map[string]string{
"abcdef": "ghijkl",
},
},
},
// multiple branches
{
"a,b",
"b,a",
[]map[string]string{
map[string]string{
"a": "b",
"b": "a",
},
},
},
{
"a,,b,c",
"d,e,f,g",
[]map[string]string{
map[string]string{
"a": "d",
"": "e",
"b": "f",
"c": "g",
},
},
},
{
",a",
"a,",
[]map[string]string{
map[string]string{"": "a", "a": ""},
},
},
{
"a,b,c",
",d,",
[]map[string]string{
map[string]string{
"a": "",
"b": "d",
"c": "",
},
},
},
// multiple groups
{
"a/b",
"c/d",
[]map[string]string{
map[string]string{
"a": "c",
},
map[string]string{
"b": "d",
},
},
},
{
"a//b/c",
"d/e/f/g",
[]map[string]string{
map[string]string{
"a": "d",
},
map[string]string{
"": "e",
},
map[string]string{
"b": "f",
},
map[string]string{
"c": "g",
},
},
},
{
"a,b/c",
"d,e/f",
[]map[string]string{
map[string]string{
"a": "d",
"b": "e",
},
map[string]string{
"c": "f",
},
},
},
{
"/a",
"a/",
[]map[string]string{
map[string]string{
"": "a",
},
map[string]string{
"a": "",
},
},
},
{
"/a/",
"b/c/d",
[]map[string]string{
map[string]string{
"": "b",
},
map[string]string{
"a": "c",
},
map[string]string{
"": "d",
},
},
},
}
func TestGenReplacement(t *testing.T) {
for _, test := range genReplacementTests {
expect := test.dst
actual, err := newReplacement(test.srcFrom, test.srcTo)
if err != nil {
t.Errorf("newReplacement(%q, %q) returns %q, want nil",
test.srcFrom, test.srcTo, err)
continue
}
if !reflect.DeepEqual(actual, expect) {
t.Errorf("%q, %q: got %q, want %q",
test.srcFrom, test.srcTo, actual, expect)
}
}
}
var replaceTests = []struct {
srcFrom string
srcTo string
srcText string
dst string
}{
// one branch
{
"abc",
"def",
"foo bar",
"foo bar",
},
{
"abc",
"def",
"abc def",
"def def",
},
{
"a",
"b",
"a b c a b c",
"b b c b b c",
},
// multiple branches
{
"abc,def",
"def,abc",
"abc def",
"def abc",
},
{
"a,b,c,d",
"e,f,g,h",
"d c b a",
"h g f e",
},
{
"a, ",
" ,a",
"a a a",
" a a ",
},
// multiple groups
{
"a/b",
"c/d",
"aa ab ac ad",
"aa cd ac ad",
},
{
"a//b/c",
"d/e/f/g",
"abc bca cab",
"defg bca cab",
},
{
"dog,cat/s",
"cat,dog/s",
"cats cats dogs dogs cats",
"dogs dogs cats cats dogs",
},
}
func TestReplace(t *testing.T) {
for _, test := range replaceTests {
r, err := NewReplacer(test.srcFrom, test.srcTo, false)
if err != nil {
t.Errorf("NewReplacer(%q, %q) returns %q, want nil",
test.srcFrom, test.srcTo, err)
continue
}
expect := test.dst
actual := r.ReplaceAll(test.srcText)
if !reflect.DeepEqual(actual, expect) {
t.Errorf("Replacer{%q, %q}: %q: got %q, want %q",
test.srcFrom, test.srcTo, test.srcText, actual, expect)
}
}
}
Use field name in tests
package main
import (
"reflect"
"regexp"
"testing"
)
var genMatcherTests = []struct {
src string
dst *regexp.Regexp
}{
// one branch
{`abc`, regexp.MustCompile(`(abc)`)},
{`abcdef`, regexp.MustCompile(`(abcdef)`)},
// multiple branches
{`a,b`, regexp.MustCompile(`(a|b)`)},
{`a,,b,c`, regexp.MustCompile(`(a||b|c)`)},
{`a,bc,def`, regexp.MustCompile(`(a|bc|def)`)},
{`,a`, regexp.MustCompile(`(|a)`)},
{`a,`, regexp.MustCompile(`(a|)`)},
{`,a,`, regexp.MustCompile(`(|a|)`)},
// use escape
{`a\,b`, regexp.MustCompile(`(a,b)`)},
{`a\,bc\,def`, regexp.MustCompile(`(a,bc,def)`)},
// multiple branches with escape
{`a\,b,c`, regexp.MustCompile(`(a,b|c)`)},
{`a,bc\,def`, regexp.MustCompile(`(a|bc,def)`)},
// regexp quote
{`a+b`, regexp.MustCompile(`(a\+b)`)},
{`(a|bc)*def`, regexp.MustCompile(`(\(a\|bc\)\*def)`)},
// unquote special values
{`a\\bc`, regexp.MustCompile("(a\\\\bc)")},
{`a\tb\,c`, regexp.MustCompile("(a\tb,c)")},
{`a\tbc\n\ndef`, regexp.MustCompile("(a\tbc\n\ndef)")},
// multiple groups
{`a/b`, regexp.MustCompile("(a)(b)")},
{`a//b/c`, regexp.MustCompile(`(a)()(b)(c)`)},
{`a/bc/def`, regexp.MustCompile("(a)(bc)(def)")},
{`a,b/c`, regexp.MustCompile("(a|b)(c)")},
{`/a`, regexp.MustCompile(`()(a)`)},
{`a/`, regexp.MustCompile(`(a)()`)},
{`/a/`, regexp.MustCompile(`()(a)()`)},
// multiple groups with escape
{`a/b\/c`, regexp.MustCompile("(a)(b/c)")},
{`a/\/bc\//def`, regexp.MustCompile("(a)(/bc/)(def)")},
{`a\,b,c/d,e\/f`, regexp.MustCompile("(a,b|c)(d|e/f)")},
}
func TestGenMatcher(t *testing.T) {
for _, test := range genMatcherTests {
expect := test.dst
actual, err := newMatcher(test.src, false)
if err != nil {
t.Errorf("newMatcher(%q) returns %q, want nil",
test.src, err)
continue
}
if !reflect.DeepEqual(actual, expect) {
t.Errorf("%q: got %q, want %q",
test.src, actual, expect)
}
}
}
var genMatcherWithBoundaryTests = []struct {
src string
dst *regexp.Regexp
}{
{`abc`, regexp.MustCompile(`\b(abc)\b`)},
{`a,b`, regexp.MustCompile(`\b(a|b)\b`)},
{`a\,b,c`, regexp.MustCompile(`\b(a,b|c)\b`)},
{`a/b`, regexp.MustCompile(`\b(a)(b)\b`)},
{`a/bc/def`, regexp.MustCompile(`\b(a)(bc)(def)\b`)},
{`a,b/c`, regexp.MustCompile(`\b(a|b)(c)\b`)},
{`a\,b,c/d,e\/f`, regexp.MustCompile(`\b(a,b|c)(d|e/f)\b`)},
}
func TestGenMatcherWithBoundary(t *testing.T) {
for _, test := range genMatcherWithBoundaryTests {
expect := test.dst
actual, err := newMatcher(test.src, true)
if err != nil {
t.Errorf("newMatcher(%q) returns %q, want nil",
test.src, err)
continue
}
if !reflect.DeepEqual(actual, expect) {
t.Errorf("%q: got %q, want %q",
test.src, actual, expect)
}
}
}
var genReplacementTests = []struct {
from string
to string
replacement []map[string]string
}{
// one branch
{
from: "abc",
to: "def",
replacement: []map[string]string{
map[string]string{
"abc": "def",
},
},
},
{
from: "abcdef",
to: "ghijkl",
replacement: []map[string]string{
map[string]string{
"abcdef": "ghijkl",
},
},
},
// multiple branches
{
from: "a,b",
to: "b,a",
replacement: []map[string]string{
map[string]string{
"a": "b",
"b": "a",
},
},
},
{
from: "a,,b,c",
to: "d,e,f,g",
replacement: []map[string]string{
map[string]string{
"a": "d",
"": "e",
"b": "f",
"c": "g",
},
},
},
{
from: ",a",
to: "a,",
replacement: []map[string]string{
map[string]string{"": "a", "a": ""},
},
},
{
from: "a,b,c",
to: ",d,",
replacement: []map[string]string{
map[string]string{
"a": "",
"b": "d",
"c": "",
},
},
},
// multiple groups
{
from: "a/b",
to: "c/d",
replacement: []map[string]string{
map[string]string{
"a": "c",
},
map[string]string{
"b": "d",
},
},
},
{
from: "a//b/c",
to: "d/e/f/g",
replacement: []map[string]string{
map[string]string{
"a": "d",
},
map[string]string{
"": "e",
},
map[string]string{
"b": "f",
},
map[string]string{
"c": "g",
},
},
},
{
from: "a,b/c",
to: "d,e/f",
replacement: []map[string]string{
map[string]string{
"a": "d",
"b": "e",
},
map[string]string{
"c": "f",
},
},
},
{
from: "/a",
to: "a/",
replacement: []map[string]string{
map[string]string{
"": "a",
},
map[string]string{
"a": "",
},
},
},
{
from: "/a/",
to: "b/c/d",
replacement: []map[string]string{
map[string]string{
"": "b",
},
map[string]string{
"a": "c",
},
map[string]string{
"": "d",
},
},
},
}
func TestGenReplacement(t *testing.T) {
for _, test := range genReplacementTests {
expect := test.replacement
actual, err := newReplacement(test.from, test.to)
if err != nil {
t.Errorf("newReplacement(%q, %q) returns %q, want nil",
test.from, test.to, err)
continue
}
if !reflect.DeepEqual(actual, expect) {
t.Errorf("%q, %q: got %q, want %q",
test.from, test.to, actual, expect)
}
}
}
var replaceTests = []struct {
from string
to string
src string
dst string
}{
// one branch
{
from: "abc",
to: "def",
src: "foo bar",
dst: "foo bar",
},
{
from: "abc",
to: "def",
src: "abc def",
dst: "def def",
},
{
from: "a",
to: "b",
src: "a b c a b c",
dst: "b b c b b c",
},
// multiple branches
{
from: "abc,def",
to: "def,abc",
src: "abc def",
dst: "def abc",
},
{
from: "a,b,c,d",
to: "e,f,g,h",
src: "d c b a",
dst: "h g f e",
},
{
from: "a, ",
to: " ,a",
src: "a a a",
dst: " a a ",
},
// multiple groups
{
from: "a/b",
to: "c/d",
src: "aa ab ac ad",
dst: "aa cd ac ad",
},
{
from: "a//b/c",
to: "d/e/f/g",
src: "abc bca cab",
dst: "defg bca cab",
},
{
from: "dog,cat/s",
to: "cat,dog/s",
src: "cats cats dogs dogs cats",
dst: "dogs dogs cats cats dogs",
},
}
func TestReplace(t *testing.T) {
for _, test := range replaceTests {
r, err := NewReplacer(test.from, test.to, false)
if err != nil {
t.Errorf("NewReplacer(%q, %q) returns %q, want nil",
test.from, test.to, err)
continue
}
expect := test.dst
actual := r.ReplaceAll(test.src)
if !reflect.DeepEqual(actual, expect) {
t.Errorf("Replacer{%q, %q}: %q: got %q, want %q",
test.from, test.to, test.src, actual, expect)
}
}
}
|
// Copyright © 2013-2016 Pierre Neidhardt <ambrevar@gmail.com>
// Use of this file is governed by the license that can be found in LICENSE.
// TODO: Add shell auto-completion file.
// TODO: Allow for embedding covers. Have a look at:
// * mp4art (libmp4v2): mp4art --add cover.jpg track.m4a
// * vorbiscomment (vorbis-tools)
// * beets
// * http://superuser.com/questions/169151/embed-album-art-in-ogg-through-command-line-in-linux
// * ffmpeg -i in.mp3 -i in.jpg -map 0 -map 1 -c copy -metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
// TODO: Allow for fetching lyrics?
// TODO: GUI for manual tag editing?
// TODO: Duplicate audio detection? This might be overkill.
// TODO: Discogs support?
package main
import (
"bitbucket.org/ambrevar/demlo/cuesheet"
"bytes"
"crypto/md5"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/aarzilli/golua/lua"
"github.com/mgutz/ansi"
"github.com/wtolson/go-taglib"
"github.com/yookoala/realpath"
"image"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
)
const (
APPLICATION = "demlo"
VERSION = "2-rolling"
COPYRIGHT = "Copyright (C) 2013-2016 Pierre Neidhardt"
URL = "http://ambrevar.bitbucket.org/demlo"
// COVER_CHECKSUM_BLOCK limits cover checksums to this amount of bytes for performance gain.
COVER_CHECKSUM_BLOCK = 8 * 4096
// 10M seems to be a reasonable max.
CUESHEET_MAXSIZE = 10 * 1024 * 1024
INDEX_MAXSIZE = 10 * 1024 * 1024
SCRIPT_MAXSIZE = 10 * 1024 * 1024
)
var usage = `Batch-transcode files with user-written scripts for dynamic tagging
and encoding.
Folders are processed recursively. Only files with known extensions are processed.
New extensions can be specified from command-line.
All flags that do not require an argument are booleans. Without argument, they
take the true value. To negate them, use the form '-flag=false'.
See ` + URL + ` for more details.
`
var (
XDG_CONFIG_HOME = os.Getenv("XDG_CONFIG_HOME")
XDG_CONFIG_DIRS = os.Getenv("XDG_CONFIG_DIRS")
XDG_DATA_DIRS = os.Getenv("XDG_DATA_DIRS")
SYSTEM_SCRIPTROOT string
USER_SCRIPTROOT string
CONFIG string
COVER_EXT_LIST = map[string]bool{"gif": true, "jpeg": true, "jpg": true, "png": true}
OPTIONS = options{}
CACHE = struct {
index map[string][]outputDesc
scripts []scriptBuffer
}{}
RE_PRINTABLE = regexp.MustCompile(`\pC`)
VISITED_DST_COVERS = struct {
v map[dstCoverKey]bool
sync.RWMutex
}{v: map[dstCoverKey]bool{}}
)
type dstCoverKey struct {
path string
checksum string
}
// Options used in the config file and/or as CLI flags.
// Precedence: flags > config > defaults.
// Exception: extensions specified in flags are merged with config extensions.
type options struct {
color bool
cores int
debug bool
extensions stringSetFlag
getcover bool
gettags bool
graphical bool
index string
overwrite bool
postscript string
prescript string
process bool
removesource bool
scripts scriptSlice
}
type scriptBuffer struct {
name string
buf string
}
type scriptSlice []string
func (s *scriptSlice) String() string {
// Print the default/config value.
return fmt.Sprintf("%q", OPTIONS.scripts)
}
func (s *scriptSlice) Set(arg string) error {
*s = append(*s, arg)
return nil
}
type stringSetFlag map[string]bool
func (s *stringSetFlag) String() string {
keylist := []string{}
for k := range *s {
keylist = append(keylist, k)
}
sort.Strings(keylist)
return ": " + strings.Join(keylist, " ")
}
func (s *stringSetFlag) Set(arg string) error {
(*s)[arg] = true
return nil
}
type inputCover struct {
// Supported format: gif, jpeg, png.
format string
// Size.
width int
height int
// Cover checksum is partial. This speeds up the process but can yield false duplicates.
checksum string
}
type outputCover struct {
Path string
Format string
Parameters []string
}
// TODO: Export all fields? Probably not a good idea: if FFprobe output changes,
// it could lead to undesired field overwriting.
// TODO: We cannot create an 'input struct' if we want all entries. However we
// can use a struct to unmarshal easily to known types. So we can use 2
// unmarshals: one to a struct for processing, one to an interface{} to pass to
// Lua.
type inputDesc struct {
path string // Realpath.
bitrate int // In bytes per second.
tags map[string]string
embeddedCovers []inputCover
externalCovers map[string]inputCover
onlineCover inputCover
// Index of the first audio stream.
audioIndex int
// FFmpeg data.
Streams []struct {
Bit_rate string
Codec_name string
Codec_type string
Duration string
Height int
Tags map[string]string
Width int
}
Format struct {
Bit_rate string
Duration string
Format_name string
Nb_streams int
Tags map[string]string
}
// The following details for multi-track files are not transferred to Lua.
filetags map[string]string
cuesheet cuesheet.Cuesheet
// Name of the matching file in the cuesheet.
cuesheetFile string
trackCount int
}
// We could store everything in 'parameters', but having a separate 'path' and
// 'format' allows for foolproofing.
type outputDesc struct {
Path string
Format string
Parameters []string
Tags map[string]string
EmbeddedCovers []outputCover
ExternalCovers map[string]outputCover
OnlineCover outputCover
}
////////////////////////////////////////////////////////////////////////////////
// The format is:
// [input] | attr | [output]
func prettyPrint(attr, input, output string, attrMaxlen, valueMaxlen int, display *Slogger) {
colorIn := ""
colorOut := ""
if OPTIONS.color && input != output &&
(attr != "parameters" || output != "[-c:a copy]") &&
((attr != "embedded" && attr != "external") || (len(output) >= 3 && output[len(output)-3:] != " ''")) {
colorIn = "red"
colorOut = "green"
}
// Replace control characters to avoid mangling the output.
input = RE_PRINTABLE.ReplaceAllString(input, " / ")
output = RE_PRINTABLE.ReplaceAllString(output, " / ")
in := []rune(input)
out := []rune(output)
min := func(a, b int) int {
if a < b {
return a
}
return b
}
// Print first line with title.
display.Output.Printf(
"%*v["+ansi.Color("%.*s", colorIn)+"] | %-*v | ["+ansi.Color("%.*s", colorOut)+"]\n",
valueMaxlen-min(valueMaxlen, len(in)), "",
valueMaxlen, input,
attrMaxlen, attr,
valueMaxlen, output)
// Print the rest that does not fit on first line.
for i := valueMaxlen; i < len(in) || i < len(out); i += valueMaxlen {
in_lo := min(i, len(in))
in_hi := min(i+valueMaxlen, len(in))
out_lo := min(i, len(out))
out_hi := min(i+valueMaxlen, len(out))
in_delim_left, in_delim_right := "[", "]"
out_delim_left, out_delim_right := "[", "]"
if i >= len(in) {
in_delim_left, in_delim_right = " ", " "
}
if i >= len(out) {
out_delim_left, out_delim_right = "", ""
}
display.Output.Printf(
"%s"+ansi.Color("%s", colorIn)+"%s%*v | %*v | %s"+ansi.Color("%s", colorOut)+"%s\n",
in_delim_left,
string(in[in_lo:in_hi]),
in_delim_right,
valueMaxlen-in_hi+in_lo, "",
attrMaxlen, "",
out_delim_left,
string(out[out_lo:out_hi]),
out_delim_right)
}
}
func preview(input inputDesc, output outputDesc, track int, display *Slogger) {
prepareTrackTags(input, track)
attrMaxlen := len("parameters")
for k := range input.tags {
if len(k) > attrMaxlen {
attrMaxlen = len(k)
}
}
for k := range output.Tags {
if len(k) > attrMaxlen {
attrMaxlen = len(k)
}
}
maxCols, _, err := TerminalSize(int(os.Stdout.Fd()))
if err != nil {
log.Fatal(err)
}
// 'valueMaxlen' is the available width for input and output values. We
// subtract some characters for the ' | ' around the attribute name and the
// brackets around the values.
valueMaxlen := (maxCols - attrMaxlen - 10) / 2
// Sort tags.
var tagList []string
for k := range input.tags {
tagList = append(tagList, k)
}
for k := range output.Tags {
_, ok := input.tags[k]
if !ok {
tagList = append(tagList, k)
}
}
sort.Strings(tagList)
colorTitle := ""
if OPTIONS.color {
colorTitle = "white+b"
}
display.Output.Println()
display.Output.Printf("%*v === "+ansi.Color("%-*v", colorTitle)+" ===\n",
valueMaxlen, "",
attrMaxlen, "FILE")
prettyPrint("path", input.path, output.Path, attrMaxlen, valueMaxlen, display)
prettyPrint("format", input.Format.Format_name, output.Format, attrMaxlen, valueMaxlen, display)
prettyPrint("parameters", "bitrate="+strconv.Itoa(input.bitrate), fmt.Sprintf("%v", output.Parameters), attrMaxlen, valueMaxlen, display)
display.Output.Printf("%*v === "+ansi.Color("%-*v", colorTitle)+" ===\n",
valueMaxlen, "",
attrMaxlen, "TAGS")
for _, v := range tagList {
// "encoder" is a field that is usually out of control, discard it.
if v != "encoder" {
prettyPrint(v, input.tags[v], output.Tags[v], attrMaxlen, valueMaxlen, display)
}
}
display.Output.Printf("%*v === "+ansi.Color("%-*v", colorTitle)+" ===\n",
valueMaxlen, "",
attrMaxlen, "COVERS")
for stream, cover := range input.embeddedCovers {
in := fmt.Sprintf("'stream %v' [%vx%v] <%v>", stream, cover.width, cover.height, cover.format)
out := "<> [] ''"
if stream < len(output.EmbeddedCovers) {
out = fmt.Sprintf("<%v> %q '%v'", output.EmbeddedCovers[stream].Format, output.EmbeddedCovers[stream].Parameters, output.EmbeddedCovers[stream].Path)
}
prettyPrint("embedded", in, out, attrMaxlen, valueMaxlen, display)
}
for file, cover := range input.externalCovers {
in := fmt.Sprintf("'%v' [%vx%v] <%v>", file, cover.width, cover.height, cover.format)
out := fmt.Sprintf("<%v> %q '%v'", output.ExternalCovers[file].Format, output.ExternalCovers[file].Parameters, output.ExternalCovers[file].Path)
prettyPrint("external", in, out, attrMaxlen, valueMaxlen, display)
}
if input.onlineCover.format != "" {
cover := input.onlineCover
in := fmt.Sprintf("[%vx%v] <%v>", cover.width, cover.height, cover.format)
out := fmt.Sprintf("<%v> %q '%v'", output.OnlineCover.Format, output.OnlineCover.Parameters, output.OnlineCover.Path)
prettyPrint("online", in, out, attrMaxlen, valueMaxlen, display)
}
display.Output.Println()
}
func getEmbeddedCover(input inputDesc, display *Slogger) (embeddedCovers []inputCover, embeddedCoversCache [][]byte) {
// FFmpeg treats embedded covers like video streams.
for i := 0; i < input.Format.Nb_streams; i++ {
if input.Streams[i].Codec_name != "image2" &&
input.Streams[i].Codec_name != "png" &&
input.Streams[i].Codec_name != "mjpeg" {
continue
}
cmd := exec.Command("ffmpeg", "-nostdin", "-v", "error", "-y", "-i", input.path, "-an", "-sn", "-c:v", "copy", "-f", "image2", "-map", "0:"+strconv.Itoa(i), "-")
var stderr bytes.Buffer
cmd.Stderr = &stderr
cover, err := cmd.Output()
if err != nil {
display.Error.Printf(stderr.String())
continue
}
reader := bytes.NewBuffer(cover)
config, format, err := image.DecodeConfig(reader)
if err != nil {
display.Warning.Print(err)
continue
}
hi := len(cover)
if hi > COVER_CHECKSUM_BLOCK {
hi = COVER_CHECKSUM_BLOCK
}
checksum := fmt.Sprintf("%x", md5.Sum(cover[:hi]))
embeddedCoversCache = append(embeddedCoversCache, cover)
embeddedCovers = append(embeddedCovers, inputCover{format: format, width: config.Width, height: config.Height, checksum: checksum})
}
return embeddedCovers, embeddedCoversCache
}
func getExternalCover(input inputDesc, display *Slogger) (externalCovers map[string]inputCover, err error) {
// TODO: Memoize external cover queries.
fd, err := os.Open(filepath.Dir(input.path))
if err != nil {
return nil, err
}
names, err := fd.Readdirnames(-1)
fd.Close()
if err != nil {
return nil, err
}
externalCovers = make(map[string]inputCover)
for _, f := range names {
if !COVER_EXT_LIST[Ext(f)] {
continue
}
fd, err := os.Open(filepath.Join(filepath.Dir(input.path), f))
if err != nil {
display.Warning.Print(err)
continue
}
defer fd.Close()
st, err := fd.Stat()
if err != nil {
display.Warning.Print(err)
continue
}
config, format, err := image.DecodeConfig(fd)
if err != nil {
display.Warning.Print(err)
continue
}
hi := st.Size()
if hi > COVER_CHECKSUM_BLOCK {
hi = COVER_CHECKSUM_BLOCK
}
buf := [COVER_CHECKSUM_BLOCK]byte{}
_, err = (*fd).ReadAt(buf[:hi], 0)
if err != nil && err != io.EOF {
display.Warning.Print(err)
continue
}
checksum := fmt.Sprintf("%x", md5.Sum(buf[:hi]))
externalCovers[f] = inputCover{format: format, width: config.Width, height: config.Height, checksum: checksum}
}
return externalCovers, nil
}
func prepareTags(input *inputDesc, display *Slogger) {
input.tags = make(map[string]string)
input.filetags = make(map[string]string)
// Precedence: cuesheet > stream tags > format tags.
for k, v := range input.Format.Tags {
input.filetags[strings.ToLower(k)] = v
}
for k, v := range input.Streams[input.audioIndex].Tags {
key := strings.ToLower(k)
_, ok := input.filetags[key]
if !ok || input.filetags[key] == "" {
input.filetags[key] = v
}
}
var err error
var ErrCuesheet error
input.cuesheet, ErrCuesheet = cuesheet.New(input.filetags["cuesheet"])
if err != nil {
// If no cuesheet was found in the tags, we check for external ones.
pathNoext := StripExt(input.path)
// Instead of checking the extension of files in current folder, we check
// if a file with the 'cue' extension exists. This is faster, especially
// for huge folders.
for _, ext := range []string{"cue", "cuE", "cUe", "cUE", "Cue", "CuE", "CUe", "CUE"} {
cs := pathNoext + "." + ext
st, err := os.Stat(cs)
if err != nil {
continue
}
if st.Size() > CUESHEET_MAXSIZE {
display.Warning.Printf("Cuesheet size %v > %v bytes, skipping", cs, CUESHEET_MAXSIZE)
continue
}
buf, err := ioutil.ReadFile(cs)
if err != nil {
display.Warning.Print(err)
continue
}
input.cuesheet, ErrCuesheet = cuesheet.New(string(buf))
break
}
}
// Remove cuesheet from tags to avoid printing it.
delete(input.filetags, "cuesheet")
// The number of tracks in current file is usually 1, it can be more if a
// cuesheet is found.
input.trackCount = 1
if ErrCuesheet == nil {
// Copy the cuesheet header to the tags. Some entries appear both in the
// header and in the track details. We map the cuesheet header entries to
// the respective quivalent for FFmpeg tags.
for k, v := range input.cuesheet.Header {
switch k {
case "PERFORMER":
input.filetags["album_artist"] = v
case "SONGWRITER":
input.filetags["album_artist"] = v
case "TITLE":
input.filetags["album"] = v
default:
input.filetags[strings.ToLower(k)] = v
}
}
// A cuesheet might have several FILE entries, or even none (non-standard).
// In case of none, tracks are stored at file "" (the empty string) in the
// Cuesheet structure. Otherwise, we find the most related file.
base := stringNorm(filepath.Base(input.path))
max := 0.0
for f := range input.cuesheet.Files {
r := stringRel(stringNorm(f), base)
if r > max {
max = r
input.cuesheetFile = f
}
}
input.trackCount = len(input.cuesheet.Files[input.cuesheetFile])
}
}
func prepareTrackTags(input inputDesc, track int) {
// Copy all tags from input.filetags to input.tags.
for k, v := range input.filetags {
input.tags[k] = v
}
if len(input.cuesheet.Files) > 0 {
// If there is a cuesheet, we fetch track tags as required. Note that this
// process differs from the above cuesheet extraction in that it is
// track-related as opposed to album-related. Cuesheets make a distinction
// between the two. Some tags may appear both in an album field and a track
// field. Thus track tags must have higher priority.
for k, v := range input.cuesheet.Files[input.cuesheetFile][track].Tags {
input.tags[strings.ToLower(k)] = v
}
}
}
func runAllScripts(input inputDesc, track int, defaultTags map[string]string, L *lua.State, display *Slogger) (output outputDesc) {
prepareTrackTags(input, track)
if o, ok := CACHE.index[input.path]; ok && len(o) > track {
output = CACHE.index[input.path][track]
OPTIONS.gettags = false
} else {
// Default tags.
output.Tags = make(map[string]string)
for k, v := range input.tags {
output.Tags[k] = v
}
for k, v := range defaultTags {
output.Tags[k] = v
}
// Default codec options.
output.Format = input.Format.Format_name
}
// Create a Lua sandbox containing input and output, then run scripts.
makeSandboxOutput(L, output)
for _, script := range CACHE.scripts {
err := runScript(L, script.name, input)
if err != nil {
display.Error.Printf("Script %s: %s", script.name, err)
continue
}
}
output = scriptOutput(L)
// Foolproofing.
// -No format: use input.format.
// -No parameters: use "-c:a copy".
// -Empty output basename: use input path.
// -Remove empty tags to avoid storing empty strings in FFmpeg.
if output.Format == "" {
output.Format = input.Format.Format_name
}
if len(output.Parameters) == 0 {
output.Parameters = []string{"-c:a", "copy"}
}
if Basename(output.Path) == "" {
output.Path = input.path
}
var err error
output.Path, err = filepath.Abs(output.Path)
if err != nil {
display.Warning.Print("Cannot get absolute path:", err)
return output
}
for tag, value := range output.Tags {
if value == "" {
delete(output.Tags, tag)
}
}
return output
}
// Create a new destination file 'dst'.
// As a special case, if 'inputPath == dst' and 'removesource == true',
// then modify the file inplace.
// If no third-party program overwrites existing files, this approach cannot
// clobber existing files.
func makeTrackDst(dst string, inputPath string, removeSource bool) (string, error) {
if _, err := os.Stat(dst); err == nil || !os.IsNotExist(err) {
// 'dst' exists.
// The realpath is required to check if inplace.
// The 'realpath' can only be expanded when the parent folder exists.
dst, err = realpath.Realpath(dst)
if err != nil {
return "", err
}
if inputPath != dst || !removeSource {
// If not inplace, create a temp file.
f, err := TempFile(filepath.Dir(dst), StripExt(filepath.Base(dst))+"_", "."+Ext(dst))
if err != nil {
return "", err
}
dst = f.Name()
f.Close()
}
} else {
st, err := os.Stat(inputPath)
if err != nil {
return "", err
}
f, err := os.OpenFile(dst, os.O_CREATE|os.O_EXCL, st.Mode())
if err != nil {
// Either the parent folder is not writable, or a race condition happened:
// file was created between existence check and file creation.
return "", err
}
f.Close()
}
return dst, nil
}
// Create a new destination file 'dst'. See makeTrackDst.
// As a special case, if the checksums match in input and dst, return "", nil.
// TODO: Test how memoization scales with VISITED_DST_COVERS.
func makeCoverDst(dst string, inputPath string, checksum string, display *Slogger) (string, error) {
if st, err := os.Stat(dst); err == nil || !os.IsNotExist(err) {
// 'dst' exists.
// Realpath is required for cache key uniqueness.
dst, err = realpath.Realpath(dst)
if err != nil {
return "", err
}
VISITED_DST_COVERS.RLock()
visited := VISITED_DST_COVERS.v[dstCoverKey{path: dst, checksum: checksum}]
VISITED_DST_COVERS.RUnlock()
if visited {
return "", nil
}
VISITED_DST_COVERS.Lock()
VISITED_DST_COVERS.v[dstCoverKey{path: dst, checksum: checksum}] = true
VISITED_DST_COVERS.Unlock()
// Compute checksum of existing cover and early-out if equal.
fd, err := os.Open(dst)
if err != nil {
return "", err
}
defer fd.Close()
// TODO: Cache checksums.
hi := st.Size()
if hi > COVER_CHECKSUM_BLOCK {
hi = COVER_CHECKSUM_BLOCK
}
buf := [COVER_CHECKSUM_BLOCK]byte{}
_, err = (*fd).ReadAt(buf[:hi], 0)
if err != nil && err != io.EOF {
return "", err
}
dstChecksum := fmt.Sprintf("%x", md5.Sum(buf[:hi]))
if checksum == dstChecksum {
return "", nil
}
// If not inplace, create a temp file.
f, err := TempFile(filepath.Dir(dst), StripExt(filepath.Base(dst))+"_", "."+Ext(dst))
if err != nil {
return "", err
}
dst = f.Name()
f.Close()
} else {
// 'dst' does not exist.
st, err := os.Stat(inputPath)
if err != nil {
return "", err
}
fd, err := os.OpenFile(dst, os.O_CREATE|os.O_EXCL, st.Mode())
if err != nil {
// Either the parent folder is not writable, or a race condition happened:
// file was created between existence check and file creation.
return "", err
}
fd.Close()
// Save to cache.
dst, err = realpath.Realpath(dst)
if err != nil {
return "", err
}
VISITED_DST_COVERS.Lock()
VISITED_DST_COVERS.v[dstCoverKey{path: dst, checksum: checksum}] = true
VISITED_DST_COVERS.Unlock()
}
return dst, nil
}
func transferCovers(cover outputCover, coverName string, inputPath string, inputSource io.Reader, checksum string, display *Slogger) {
var err error
if cover.Path == "" {
return
}
if len(cover.Parameters) == 0 || cover.Format == "" {
cover.Path, err = makeCoverDst(cover.Path, inputPath, checksum, display)
if err != nil {
display.Error.Print(err)
return
}
if cover.Path == "" {
// Identical file exists.
return
}
fd, err := os.OpenFile(cover.Path, os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
display.Warning.Println(err)
return
}
if _, err = io.Copy(fd, inputSource); err != nil {
display.Warning.Println(err)
return
}
fd.Close()
} else {
cover.Path, err = makeCoverDst(cover.Path, inputPath, checksum, display)
if err != nil {
display.Error.Print(err)
return
}
if cover.Path == "" {
// Identical file exists.
return
}
cmdArray := []string{"-nostdin", "-v", "error", "-y", "-i", "-", "-an", "-sn"}
cmdArray = append(cmdArray, cover.Parameters...)
cmdArray = append(cmdArray, "-f", cover.Format, cover.Path)
display.Debug.Printf("Cover %v parameters: %q", coverName, cmdArray)
cmd := exec.Command("ffmpeg", cmdArray...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
cmd.Stdin = inputSource
_, err := cmd.Output()
if err != nil {
display.Warning.Printf(stderr.String())
return
}
}
}
// goroutine main function, a.k.a worker.
// 'queue' contains realpaths to files.
func process(queue chan string, quit chan bool) {
defer func() { quit <- true }()
display := newSlogger(OPTIONS.debug, OPTIONS.color)
defer display.Flush()
// Compile scripts.
L, err := makeSandbox(CACHE.scripts, display)
if err != nil {
display.Error.Print(err)
}
defer L.Close()
for file := range queue {
display.Flush()
display.Section.Println(file)
cmd := exec.Command("ffprobe", "-v", "error", "-print_format", "json", "-show_streams", "-show_format", file)
var stderr bytes.Buffer
cmd.Stderr = &stderr
out, err := cmd.Output()
if err != nil {
display.Error.Print("ffprobe: ", stderr.String())
continue
}
var input inputDesc
err = json.Unmarshal(out, &input)
if err != nil {
display.Error.Print(err)
continue
}
input.path = file // realpath
// Index of the first audio stream.
input.audioIndex = -1
for k, v := range input.Streams {
if v.Codec_type == "audio" {
input.audioIndex = k
break
}
}
if input.audioIndex == -1 {
display.Warning.Print("Non-audio file:", input.path)
continue
}
// Set bitrate.
// FFmpeg stores bitrate as a string, Demlo needs a number. If
// 'streams[audioIndex].bit_rate' is empty (e.g. in APE files), look for
// 'format.bit_rate'. To ease querying bitrate from user scripts, store it
// in 'input.bitrate'.
input.bitrate, err = strconv.Atoi(input.Streams[input.audioIndex].Bit_rate)
if err != nil {
input.bitrate, err = strconv.Atoi(input.Format.Bit_rate)
if err != nil {
display.Warning.Print("Cannot get bitrate from", input.path)
continue
}
}
// prepareTags should be run before setting the covers.
prepareTags(&input, display)
input.externalCovers, err = getExternalCover(input, display)
if err != nil {
display.Warning.Print(err)
continue
}
var embeddedCoversCache [][]byte
var onlineCoverCache []byte
input.embeddedCovers, embeddedCoversCache = getEmbeddedCover(input, display)
var defaultTags map[string]string
// We retrieve tags online only for single-track files. TODO: Add support for multi-track files.
if input.trackCount == 1 {
var releaseID ReleaseID
prepareTrackTags(input, 1)
if OPTIONS.gettags {
releaseID, defaultTags, err = getOnlineTags(input, display)
if err != nil {
display.Debug.Print("Online tags query error: ", err)
}
}
if OPTIONS.getcover {
onlineCoverCache, input.onlineCover, err = getOnlineCover(input, releaseID, display)
if err != nil {
display.Debug.Print("Online cover query error: ", err)
}
}
}
var output = make([]outputDesc, input.trackCount)
for track := 0; track < input.trackCount; track++ {
output[track] = runAllScripts(input, track, defaultTags, L, display)
}
//--------------------------------------------------------------------------------
// Preview.
if OPTIONS.graphical {
for track := 0; track < input.trackCount; track++ {
preview(input, output[track], track, display)
// Warn for existence.
_, err = os.Stat(output[track].Path)
if err == nil || !os.IsNotExist(err) {
display.Warning.Println("Destination exists:", output[track].Path)
}
}
} else {
// Should never fail.
buf1, _ := json.Marshal(input.path)
buf2, _ := json.MarshalIndent(output, "", "\t")
display.Output.Printf("%s: %s,\n", buf1, buf2)
}
if !OPTIONS.process {
continue
}
//--------------------------------------------------------------------------------
// Re-encode / copy / rename.
for track := 0; track < input.trackCount; track++ {
err = os.MkdirAll(filepath.Dir(output[track].Path), 0777)
if err != nil {
display.Error.Print(err)
continue
}
// Copy embeddedCovers, externalCovers and onlineCover.
for stream, cover := range output[track].EmbeddedCovers {
inputSource := bytes.NewBuffer(embeddedCoversCache[stream])
transferCovers(cover, "embedded "+strconv.Itoa(stream), input.path, inputSource, input.embeddedCovers[stream].checksum, display)
}
for file, cover := range output[track].ExternalCovers {
inputPath := filepath.Join(filepath.Dir(input.path), file)
inputSource, err := os.Open(inputPath)
if err != nil {
continue
}
transferCovers(cover, "external '"+file+"'", inputPath, inputSource, input.externalCovers[file].checksum, display)
inputSource.Close()
}
{
inputSource := bytes.NewBuffer(onlineCoverCache)
transferCovers(output[track].OnlineCover, "online", input.path, inputSource, input.onlineCover.checksum, display)
}
// If encoding changed, use FFmpeg. Otherwise, copy/rename the file to
// speed up the process. If tags have changed but not the encoding, we use
// taglib to set them.
var encodingChanged = false
var tagsChanged = false
if input.trackCount > 1 {
// Split cue-sheet.
encodingChanged = true
}
if input.Format.Format_name != output[track].Format {
encodingChanged = true
}
if len(output[track].Parameters) != 2 ||
output[track].Parameters[0] != "-c:a" ||
output[track].Parameters[1] != "copy" {
encodingChanged = true
}
// Test if tags have changed.
for k, v := range input.tags {
if k != "encoder" && output[track].Tags[k] != v {
tagsChanged = true
break
}
}
if !tagsChanged {
for k, v := range output[track].Tags {
if k != "encoder" && input.tags[k] != v {
tagsChanged = true
break
}
}
}
// TODO: Move this to 2/3 separate functions.
// TODO: Add to condition: `|| output[track].format == "taglib-unsupported-format"`.
if encodingChanged {
// Store encoding parameters.
ffmpegParameters := []string{}
// Be verbose only when running a single process. Otherwise output gets
// would get messy.
if OPTIONS.cores > 1 {
ffmpegParameters = append(ffmpegParameters, "-v", "warning")
} else {
ffmpegParameters = append(ffmpegParameters, "-v", "error")
}
// By default, FFmpeg reads stdin while running. Disable this feature to
// avoid unexpected problems.
ffmpegParameters = append(ffmpegParameters, "-nostdin")
// FFmpeg should always overwrite: if a temp file is created to avoid
// overwriting, FFmpeg should clobber it.
ffmpegParameters = append(ffmpegParameters, "-y")
ffmpegParameters = append(ffmpegParameters, "-i", input.path)
// Stream codec.
ffmpegParameters = append(ffmpegParameters, output[track].Parameters...)
// Get cuesheet splitting parameters.
if len(input.cuesheet.Files) > 0 {
d, _ := strconv.ParseFloat(input.Streams[input.audioIndex].Duration, 64)
start, duration := FFmpegSplitTimes(input.cuesheet, input.cuesheetFile, track, d)
ffmpegParameters = append(ffmpegParameters, "-ss", start, "-t", duration)
}
// If there are no covers, do not copy any video stream to avoid errors.
if input.Format.Nb_streams < 2 {
ffmpegParameters = append(ffmpegParameters, "-vn")
}
// Remove non-cover streams and extra audio streams.
// Must add all streams first.
ffmpegParameters = append(ffmpegParameters, "-map", "0")
for i := 0; i < input.Format.Nb_streams; i++ {
if (input.Streams[i].Codec_type == "video" && input.Streams[i].Codec_name != "image2" && input.Streams[i].Codec_name != "png" && input.Streams[i].Codec_name != "mjpeg") ||
(input.Streams[i].Codec_type == "audio" && i > input.audioIndex) ||
(input.Streams[i].Codec_type != "audio" && input.Streams[i].Codec_type != "video") {
ffmpegParameters = append(ffmpegParameters, "-map", "-0:"+strconv.Itoa(i))
}
}
// Remove subtitles if any.
ffmpegParameters = append(ffmpegParameters, "-sn")
// '-map_metadata -1' clears all metadata first.
ffmpegParameters = append(ffmpegParameters, "-map_metadata", "-1")
for tag, value := range output[track].Tags {
ffmpegParameters = append(ffmpegParameters, "-metadata", tag+"="+value)
}
// Format.
ffmpegParameters = append(ffmpegParameters, "-f", output[track].Format)
// Output file.
// FFmpeg cannot transcode inplace, so we force creating a temp file if
// necessary.
var dst string
dst, err := makeTrackDst(output[track].Path, input.path, false)
if err != nil {
display.Error.Print(err)
continue
}
ffmpegParameters = append(ffmpegParameters, dst)
display.Debug.Printf("Audio %v parameters: %q", track, ffmpegParameters)
cmd := exec.Command("ffmpeg", ffmpegParameters...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
display.Error.Printf(stderr.String())
continue
}
if OPTIONS.removesource {
// TODO: This realpath is already expanded in 'makeTrackDst'. Factor
// it.
output[track].Path, err = realpath.Realpath(output[track].Path)
if err != nil {
display.Error.Print(err)
continue
}
if input.path == output[track].Path {
// If inplace, rename.
err = os.Rename(dst, output[track].Path)
if err != nil {
display.Error.Print(err)
}
} else {
err = os.Remove(input.path)
if err != nil {
display.Error.Print(err)
}
}
}
} else {
var err error
var dst string
dst, err = makeTrackDst(output[track].Path, input.path, OPTIONS.removesource)
if err != nil {
display.Error.Print(err)
continue
}
if input.path != dst {
// Copy/rename file if not inplace.
err = nil
if OPTIONS.removesource {
err = os.Rename(input.path, dst)
}
if err != nil || !OPTIONS.removesource {
// If renaming failed, it might be because of a cross-device
// destination. We try to copy instead.
err := CopyFile(dst, input.path)
if err != nil {
display.Error.Println(err)
continue
}
if OPTIONS.removesource {
err = os.Remove(input.path)
if err != nil {
display.Error.Println(err)
}
}
}
}
if tagsChanged {
// TODO: Can TagLib remove extra tags?
f, err := taglib.Read(dst)
if err != nil {
display.Error.Print(err)
continue
}
defer f.Close()
// TODO: Arbitrary tag support with taglib?
if output[track].Tags["album"] != "" {
f.SetAlbum(output[track].Tags["album"])
}
if output[track].Tags["artist"] != "" {
f.SetArtist(output[track].Tags["artist"])
}
if output[track].Tags["comment"] != "" {
f.SetComment(output[track].Tags["comment"])
}
if output[track].Tags["genre"] != "" {
f.SetGenre(output[track].Tags["genre"])
}
if output[track].Tags["title"] != "" {
f.SetTitle(output[track].Tags["title"])
}
if output[track].Tags["track"] != "" {
t, err := strconv.Atoi(output[track].Tags["track"])
if err == nil {
f.SetTrack(t)
}
}
if output[track].Tags["date"] != "" {
t, err := strconv.Atoi(output[track].Tags["date"])
if err == nil {
f.SetYear(t)
}
}
err = f.Save()
if err != nil {
display.Error.Print(err)
}
}
}
}
}
}
// Return the first existing match from 'list'.
func findscript(name string) (path string, st os.FileInfo, err error) {
nameExt := name + ".lua"
list := []string{
name,
nameExt,
filepath.Join(USER_SCRIPTROOT, name),
filepath.Join(USER_SCRIPTROOT, nameExt),
filepath.Join(SYSTEM_SCRIPTROOT, name),
filepath.Join(SYSTEM_SCRIPTROOT, nameExt),
}
for _, path := range list {
if st, err := os.Stat(path); err == nil {
return path, st, nil
}
}
return "", nil, errors.New("Script not found")
}
// Note to packagers: those following lines can be patched to fit the local
// filesystem.
func init() {
log.SetFlags(0)
if XDG_CONFIG_HOME == "" {
XDG_CONFIG_HOME = filepath.Join(os.Getenv("HOME"), ".config")
}
if XDG_CONFIG_DIRS == "" {
XDG_CONFIG_DIRS = "/etc/xdg"
}
if XDG_DATA_DIRS == "" {
XDG_DATA_DIRS = "/usr/local/share/:/usr/share"
}
pathlistSub := func(pathlist, subpath string) string {
for _, dir := range filepath.SplitList(pathlist) {
if dir == "" {
dir = "."
}
file := filepath.Join(dir, subpath)
_, err := os.Stat(file)
if err == nil {
return file
}
}
return ""
}
SYSTEM_SCRIPTROOT = pathlistSub(XDG_DATA_DIRS, filepath.Join(APPLICATION, "scripts"))
USER_SCRIPTROOT = pathlistSub(XDG_CONFIG_HOME, filepath.Join(APPLICATION, "scripts"))
CONFIG = os.Getenv("DEMLORC")
if CONFIG == "" {
CONFIG = filepath.Join(XDG_CONFIG_HOME, APPLICATION, APPLICATION+"rc")
}
}
func main() {
// Load config first since it changes the default flag values.
st, err := os.Stat(CONFIG)
if err == nil && st.Mode().IsRegular() {
fmt.Fprintf(os.Stderr, ":: Load config: %v\n", CONFIG)
OPTIONS = loadConfig(CONFIG)
}
if OPTIONS.extensions == nil {
// Defaults: Init here so that unspecified config options get properly set.
OPTIONS.extensions = stringSetFlag{
"aac": true,
"ape": true,
"flac": true,
"ogg": true,
"m4a": true,
"mp3": true,
"mp4": true,
"mpc": true,
"wav": true,
"wv": true,
}
}
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %v [OPTIONS] FILES|FOLDERS\n\n", os.Args[0])
fmt.Fprintln(os.Stderr, usage)
fmt.Fprintln(os.Stderr, "Options:")
flag.PrintDefaults()
}
flag.BoolVar(&OPTIONS.color, "color", OPTIONS.color, "Color output.")
flag.IntVar(&OPTIONS.cores, "cores", OPTIONS.cores, "Run N processes in parallel. If 0, use all online cores.")
flag.BoolVar(&OPTIONS.debug, "debug", false, "Enable debug messages.")
flag.Var(&OPTIONS.extensions, "ext", "Additional extensions to look for when a folder is browsed.")
flag.BoolVar(&OPTIONS.getcover, "c", OPTIONS.getcover, "Fetch cover from the Internet.")
flag.BoolVar(&OPTIONS.gettags, "t", OPTIONS.gettags, "Fetch tags from the Internet.")
flag.BoolVar(&OPTIONS.graphical, "g", OPTIONS.graphical, "Use formatted output.")
flag.StringVar(&OPTIONS.index, "i", OPTIONS.index, `Use index file to set input and output metadata.
The index can be built using the non-formatted preview output.`)
flag.StringVar(&OPTIONS.postscript, "post", OPTIONS.postscript, "Run Lua commands after the other scripts.")
flag.StringVar(&OPTIONS.prescript, "pre", OPTIONS.prescript, "Run Lua commands before the other scripts.")
flag.BoolVar(&OPTIONS.process, "p", OPTIONS.process, "Apply changes: set tags and format, move/copy result to destination file.")
flag.BoolVar(&OPTIONS.removesource, "rmsrc", OPTIONS.removesource, "Remove source file after processing.")
var flagScripts scriptSlice
flag.Var(&flagScripts, "s", `Specify scripts to run in provided order.
This option can be specified several times. If only the basename without extension is given,
and if it is not found in current folder, the corresponding standard script will be used.`)
var flagVersion = flag.Bool("v", false, "Print version and exit.")
flag.Parse()
if *flagVersion {
fmt.Println(APPLICATION, VERSION, COPYRIGHT)
return
}
if flag.Arg(0) == "" {
flag.Usage()
return
}
// Check for essential programs.
_, err = exec.LookPath("ffmpeg")
if err != nil {
log.Fatal(err)
}
_, err = exec.LookPath("ffprobe")
if err != nil {
log.Fatal(err)
}
// Disable formatted output if piped.
st, _ = os.Stdout.Stat()
if (st.Mode() & os.ModeCharDevice) == 0 {
OPTIONS.graphical = false
}
st, _ = os.Stderr.Stat()
if (st.Mode() & os.ModeCharDevice) == 0 {
OPTIONS.color = false
}
// Main logger.
display := newSlogger(OPTIONS.debug, OPTIONS.color)
// Load index to cache.
if OPTIONS.index != "" {
st, err := os.Stat(OPTIONS.index)
if err != nil {
display.Warning.Printf("Index not found: [%v]", OPTIONS.index)
} else {
if st.Size() > INDEX_MAXSIZE {
display.Warning.Printf("Index size > %v bytes, skipping: %v", INDEX_MAXSIZE, OPTIONS.index)
} else {
buf, err := ioutil.ReadFile(OPTIONS.index)
if err != nil {
display.Warning.Print("Index is not readable:", err)
} else {
// Enclose JSON list in a valid structure. Since index ends with a
// comma, hence the required dummy entry.
buf = append(append([]byte{'{'}, buf...), []byte(`"": null}`)...)
err = json.Unmarshal(buf, &CACHE.index)
if err != nil {
display.Warning.Printf("Invalid index %v: %v", OPTIONS.index, err)
}
}
}
}
}
// Load scripts to cache.
if OPTIONS.prescript != "" {
CACHE.scripts = append(CACHE.scripts, scriptBuffer{name: "prescript", buf: OPTIONS.prescript})
}
if len(flagScripts) > 0 {
// CLI overrides default/config values.
OPTIONS.scripts = flagScripts
}
for _, s := range OPTIONS.scripts {
path, st, err := findscript(s)
if err != nil {
display.Warning.Printf("%v: %v", err, s)
continue
}
if sz := st.Size(); sz > SCRIPT_MAXSIZE {
display.Warning.Printf("Script size %v > %v bytes, skipping: %v", sz, SCRIPT_MAXSIZE, path)
continue
}
buf, err := ioutil.ReadFile(path)
if err != nil {
display.Warning.Print("Script is not readable: ", err)
continue
}
display.Info.Printf("Load script: %v", path)
CACHE.scripts = append(CACHE.scripts, scriptBuffer{name: path, buf: string(buf)})
}
if OPTIONS.postscript != "" {
CACHE.scripts = append(CACHE.scripts, scriptBuffer{name: "postscript", buf: OPTIONS.postscript})
}
// Limit number of cores to online cores.
if OPTIONS.cores > runtime.NumCPU() || OPTIONS.cores <= 0 {
OPTIONS.cores = runtime.NumCPU()
}
display.Flush()
// If all workers are ready at the same time, they will query 'OPTIONS.cores'
// files from the queue. Add some extra space to the queue in the unlikely
// event the folder walk is slower than the workers.
queue := make(chan string, 2*OPTIONS.cores)
quit := make(chan bool, OPTIONS.cores)
for i := 0; i < OPTIONS.cores; i++ {
go process(queue, quit)
// Wait for all routines.
defer func() { <-quit }()
}
visited := map[string]bool{}
for _, file := range flag.Args() {
visit := func(path string, info os.FileInfo, err error) error {
if err != nil || !info.Mode().IsRegular() {
return nil
}
if !OPTIONS.extensions[strings.ToLower(Ext(path))] {
return nil
}
rpath, err := realpath.Realpath(path)
if err != nil {
display.Error.Print("Cannot get real path:", err)
display.Flush()
return nil
}
if !visited[rpath] {
visited[rpath] = true
queue <- rpath
}
return nil
}
// 'visit' always keeps going, so no error.
_ = filepath.Walk(file, visit)
}
close(queue)
}
demlo.go: Comment types
// Copyright © 2013-2016 Pierre Neidhardt <ambrevar@gmail.com>
// Use of this file is governed by the license that can be found in LICENSE.
// TODO: Add shell auto-completion file.
// TODO: Allow for embedding covers. Have a look at:
// * mp4art (libmp4v2): mp4art --add cover.jpg track.m4a
// * vorbiscomment (vorbis-tools)
// * beets
// * http://superuser.com/questions/169151/embed-album-art-in-ogg-through-command-line-in-linux
// * ffmpeg -i in.mp3 -i in.jpg -map 0 -map 1 -c copy -metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
// TODO: Allow for fetching lyrics?
// TODO: GUI for manual tag editing?
// TODO: Duplicate audio detection? This might be overkill.
// TODO: Discogs support?
package main
import (
"bitbucket.org/ambrevar/demlo/cuesheet"
"bytes"
"crypto/md5"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/aarzilli/golua/lua"
"github.com/mgutz/ansi"
"github.com/wtolson/go-taglib"
"github.com/yookoala/realpath"
"image"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
)
const (
APPLICATION = "demlo"
VERSION = "2-rolling"
COPYRIGHT = "Copyright (C) 2013-2016 Pierre Neidhardt"
URL = "http://ambrevar.bitbucket.org/demlo"
// COVER_CHECKSUM_BLOCK limits cover checksums to this amount of bytes for performance gain.
COVER_CHECKSUM_BLOCK = 8 * 4096
// 10M seems to be a reasonable max.
CUESHEET_MAXSIZE = 10 * 1024 * 1024
INDEX_MAXSIZE = 10 * 1024 * 1024
SCRIPT_MAXSIZE = 10 * 1024 * 1024
)
const usage = `Batch-transcode files with user-written scripts for dynamic tagging
and encoding.
Folders are processed recursively. Only files with known extensions are processed.
New extensions can be specified from command-line.
All flags that do not require an argument are booleans. Without argument, they
take the true value. To negate them, use the form '-flag=false'.
See ` + URL + ` for more details.
`
var (
XDG_CONFIG_HOME = os.Getenv("XDG_CONFIG_HOME")
XDG_CONFIG_DIRS = os.Getenv("XDG_CONFIG_DIRS")
XDG_DATA_DIRS = os.Getenv("XDG_DATA_DIRS")
SYSTEM_SCRIPTROOT string
USER_SCRIPTROOT string
CONFIG string
COVER_EXT_LIST = map[string]bool{"gif": true, "jpeg": true, "jpg": true, "png": true}
OPTIONS = options{}
CACHE = struct {
index map[string][]outputDesc
scripts []scriptBuffer
}{}
RE_PRINTABLE = regexp.MustCompile(`\pC`)
VISITED_DST_COVERS = struct {
v map[dstCoverKey]bool
sync.RWMutex
}{v: map[dstCoverKey]bool{}}
)
// Identify visited cover files with {path,checksum} as map key.
type dstCoverKey struct {
path string
checksum string
}
// Options used in the config file and/or as CLI flags.
// Precedence: flags > config > defaults.
// Exception: extensions specified in flags are merged with config extensions.
type options struct {
color bool
cores int
debug bool
extensions stringSetFlag
getcover bool
gettags bool
graphical bool
index string
overwrite bool
postscript string
prescript string
process bool
removesource bool
scripts scriptSlice
}
// Load scripts in memory to reduce I/O.
// We need to store the script name as well for logging.
type scriptBuffer struct {
name string
buf string
}
// Scripts specified from commandline.
type scriptSlice []string
func (s *scriptSlice) String() string {
// Print the default/config value.
return fmt.Sprintf("%q", OPTIONS.scripts)
}
func (s *scriptSlice) Set(arg string) error {
*s = append(*s, arg)
return nil
}
type stringSetFlag map[string]bool
func (s *stringSetFlag) String() string {
keylist := []string{}
for k := range *s {
keylist = append(keylist, k)
}
sort.Strings(keylist)
return ": " + strings.Join(keylist, " ")
}
func (s *stringSetFlag) Set(arg string) error {
(*s)[arg] = true
return nil
}
type inputCover struct {
// Supported format: gif, jpeg, png.
format string
// Size.
width int
height int
// Cover checksum is partial. This speeds up the process but can yield false duplicates.
checksum string
}
type outputCover struct {
Path string
Format string
Parameters []string
}
// TODO: Export all fields? Probably not a good idea: if FFprobe output changes,
// it could lead to undesired field overwriting.
// TODO: We cannot create an 'input struct' if we want all entries. However we
// can use a struct to unmarshal easily to known types. So we can use 2
// unmarshals: one to a struct for processing, one to an interface{} to pass to
// Lua.
type inputDesc struct {
path string // Realpath.
bitrate int // In bytes per second.
tags map[string]string
embeddedCovers []inputCover
externalCovers map[string]inputCover
onlineCover inputCover
// Index of the first audio stream.
audioIndex int
// FFmpeg data.
Streams []struct {
Bit_rate string
Codec_name string
Codec_type string
Duration string
Height int
Tags map[string]string
Width int
}
Format struct {
Bit_rate string
Duration string
Format_name string
Nb_streams int
Tags map[string]string
}
// The following details for multi-track files are not transferred to Lua.
filetags map[string]string
cuesheet cuesheet.Cuesheet
// Name of the matching file in the cuesheet.
cuesheetFile string
trackCount int
}
// We could store everything in 'parameters', but having a separate 'path' and
// 'format' allows for foolproofing.
type outputDesc struct {
Path string
Format string
Parameters []string
Tags map[string]string
EmbeddedCovers []outputCover
ExternalCovers map[string]outputCover
OnlineCover outputCover
}
////////////////////////////////////////////////////////////////////////////////
// The format is:
// [input] | attr | [output]
func prettyPrint(attr, input, output string, attrMaxlen, valueMaxlen int, display *Slogger) {
colorIn := ""
colorOut := ""
if OPTIONS.color && input != output &&
(attr != "parameters" || output != "[-c:a copy]") &&
((attr != "embedded" && attr != "external") || (len(output) >= 3 && output[len(output)-3:] != " ''")) {
colorIn = "red"
colorOut = "green"
}
// Replace control characters to avoid mangling the output.
input = RE_PRINTABLE.ReplaceAllString(input, " / ")
output = RE_PRINTABLE.ReplaceAllString(output, " / ")
in := []rune(input)
out := []rune(output)
min := func(a, b int) int {
if a < b {
return a
}
return b
}
// Print first line with title.
display.Output.Printf(
"%*v["+ansi.Color("%.*s", colorIn)+"] | %-*v | ["+ansi.Color("%.*s", colorOut)+"]\n",
valueMaxlen-min(valueMaxlen, len(in)), "",
valueMaxlen, input,
attrMaxlen, attr,
valueMaxlen, output)
// Print the rest that does not fit on first line.
for i := valueMaxlen; i < len(in) || i < len(out); i += valueMaxlen {
in_lo := min(i, len(in))
in_hi := min(i+valueMaxlen, len(in))
out_lo := min(i, len(out))
out_hi := min(i+valueMaxlen, len(out))
in_delim_left, in_delim_right := "[", "]"
out_delim_left, out_delim_right := "[", "]"
if i >= len(in) {
in_delim_left, in_delim_right = " ", " "
}
if i >= len(out) {
out_delim_left, out_delim_right = "", ""
}
display.Output.Printf(
"%s"+ansi.Color("%s", colorIn)+"%s%*v | %*v | %s"+ansi.Color("%s", colorOut)+"%s\n",
in_delim_left,
string(in[in_lo:in_hi]),
in_delim_right,
valueMaxlen-in_hi+in_lo, "",
attrMaxlen, "",
out_delim_left,
string(out[out_lo:out_hi]),
out_delim_right)
}
}
func preview(input inputDesc, output outputDesc, track int, display *Slogger) {
prepareTrackTags(input, track)
attrMaxlen := len("parameters")
for k := range input.tags {
if len(k) > attrMaxlen {
attrMaxlen = len(k)
}
}
for k := range output.Tags {
if len(k) > attrMaxlen {
attrMaxlen = len(k)
}
}
maxCols, _, err := TerminalSize(int(os.Stdout.Fd()))
if err != nil {
log.Fatal(err)
}
// 'valueMaxlen' is the available width for input and output values. We
// subtract some characters for the ' | ' around the attribute name and the
// brackets around the values.
valueMaxlen := (maxCols - attrMaxlen - 10) / 2
// Sort tags.
var tagList []string
for k := range input.tags {
tagList = append(tagList, k)
}
for k := range output.Tags {
_, ok := input.tags[k]
if !ok {
tagList = append(tagList, k)
}
}
sort.Strings(tagList)
colorTitle := ""
if OPTIONS.color {
colorTitle = "white+b"
}
display.Output.Println()
display.Output.Printf("%*v === "+ansi.Color("%-*v", colorTitle)+" ===\n",
valueMaxlen, "",
attrMaxlen, "FILE")
prettyPrint("path", input.path, output.Path, attrMaxlen, valueMaxlen, display)
prettyPrint("format", input.Format.Format_name, output.Format, attrMaxlen, valueMaxlen, display)
prettyPrint("parameters", "bitrate="+strconv.Itoa(input.bitrate), fmt.Sprintf("%v", output.Parameters), attrMaxlen, valueMaxlen, display)
display.Output.Printf("%*v === "+ansi.Color("%-*v", colorTitle)+" ===\n",
valueMaxlen, "",
attrMaxlen, "TAGS")
for _, v := range tagList {
// "encoder" is a field that is usually out of control, discard it.
if v != "encoder" {
prettyPrint(v, input.tags[v], output.Tags[v], attrMaxlen, valueMaxlen, display)
}
}
display.Output.Printf("%*v === "+ansi.Color("%-*v", colorTitle)+" ===\n",
valueMaxlen, "",
attrMaxlen, "COVERS")
for stream, cover := range input.embeddedCovers {
in := fmt.Sprintf("'stream %v' [%vx%v] <%v>", stream, cover.width, cover.height, cover.format)
out := "<> [] ''"
if stream < len(output.EmbeddedCovers) {
out = fmt.Sprintf("<%v> %q '%v'", output.EmbeddedCovers[stream].Format, output.EmbeddedCovers[stream].Parameters, output.EmbeddedCovers[stream].Path)
}
prettyPrint("embedded", in, out, attrMaxlen, valueMaxlen, display)
}
for file, cover := range input.externalCovers {
in := fmt.Sprintf("'%v' [%vx%v] <%v>", file, cover.width, cover.height, cover.format)
out := fmt.Sprintf("<%v> %q '%v'", output.ExternalCovers[file].Format, output.ExternalCovers[file].Parameters, output.ExternalCovers[file].Path)
prettyPrint("external", in, out, attrMaxlen, valueMaxlen, display)
}
if input.onlineCover.format != "" {
cover := input.onlineCover
in := fmt.Sprintf("[%vx%v] <%v>", cover.width, cover.height, cover.format)
out := fmt.Sprintf("<%v> %q '%v'", output.OnlineCover.Format, output.OnlineCover.Parameters, output.OnlineCover.Path)
prettyPrint("online", in, out, attrMaxlen, valueMaxlen, display)
}
display.Output.Println()
}
func getEmbeddedCover(input inputDesc, display *Slogger) (embeddedCovers []inputCover, embeddedCoversCache [][]byte) {
// FFmpeg treats embedded covers like video streams.
for i := 0; i < input.Format.Nb_streams; i++ {
if input.Streams[i].Codec_name != "image2" &&
input.Streams[i].Codec_name != "png" &&
input.Streams[i].Codec_name != "mjpeg" {
continue
}
cmd := exec.Command("ffmpeg", "-nostdin", "-v", "error", "-y", "-i", input.path, "-an", "-sn", "-c:v", "copy", "-f", "image2", "-map", "0:"+strconv.Itoa(i), "-")
var stderr bytes.Buffer
cmd.Stderr = &stderr
cover, err := cmd.Output()
if err != nil {
display.Error.Printf(stderr.String())
continue
}
reader := bytes.NewBuffer(cover)
config, format, err := image.DecodeConfig(reader)
if err != nil {
display.Warning.Print(err)
continue
}
hi := len(cover)
if hi > COVER_CHECKSUM_BLOCK {
hi = COVER_CHECKSUM_BLOCK
}
checksum := fmt.Sprintf("%x", md5.Sum(cover[:hi]))
embeddedCoversCache = append(embeddedCoversCache, cover)
embeddedCovers = append(embeddedCovers, inputCover{format: format, width: config.Width, height: config.Height, checksum: checksum})
}
return embeddedCovers, embeddedCoversCache
}
func getExternalCover(input inputDesc, display *Slogger) (externalCovers map[string]inputCover, err error) {
// TODO: Memoize external cover queries.
fd, err := os.Open(filepath.Dir(input.path))
if err != nil {
return nil, err
}
names, err := fd.Readdirnames(-1)
fd.Close()
if err != nil {
return nil, err
}
externalCovers = make(map[string]inputCover)
for _, f := range names {
if !COVER_EXT_LIST[Ext(f)] {
continue
}
fd, err := os.Open(filepath.Join(filepath.Dir(input.path), f))
if err != nil {
display.Warning.Print(err)
continue
}
defer fd.Close()
st, err := fd.Stat()
if err != nil {
display.Warning.Print(err)
continue
}
config, format, err := image.DecodeConfig(fd)
if err != nil {
display.Warning.Print(err)
continue
}
hi := st.Size()
if hi > COVER_CHECKSUM_BLOCK {
hi = COVER_CHECKSUM_BLOCK
}
buf := [COVER_CHECKSUM_BLOCK]byte{}
_, err = (*fd).ReadAt(buf[:hi], 0)
if err != nil && err != io.EOF {
display.Warning.Print(err)
continue
}
checksum := fmt.Sprintf("%x", md5.Sum(buf[:hi]))
externalCovers[f] = inputCover{format: format, width: config.Width, height: config.Height, checksum: checksum}
}
return externalCovers, nil
}
func prepareTags(input *inputDesc, display *Slogger) {
input.tags = make(map[string]string)
input.filetags = make(map[string]string)
// Precedence: cuesheet > stream tags > format tags.
for k, v := range input.Format.Tags {
input.filetags[strings.ToLower(k)] = v
}
for k, v := range input.Streams[input.audioIndex].Tags {
key := strings.ToLower(k)
_, ok := input.filetags[key]
if !ok || input.filetags[key] == "" {
input.filetags[key] = v
}
}
var err error
var ErrCuesheet error
input.cuesheet, ErrCuesheet = cuesheet.New(input.filetags["cuesheet"])
if err != nil {
// If no cuesheet was found in the tags, we check for external ones.
pathNoext := StripExt(input.path)
// Instead of checking the extension of files in current folder, we check
// if a file with the 'cue' extension exists. This is faster, especially
// for huge folders.
for _, ext := range []string{"cue", "cuE", "cUe", "cUE", "Cue", "CuE", "CUe", "CUE"} {
cs := pathNoext + "." + ext
st, err := os.Stat(cs)
if err != nil {
continue
}
if st.Size() > CUESHEET_MAXSIZE {
display.Warning.Printf("Cuesheet size %v > %v bytes, skipping", cs, CUESHEET_MAXSIZE)
continue
}
buf, err := ioutil.ReadFile(cs)
if err != nil {
display.Warning.Print(err)
continue
}
input.cuesheet, ErrCuesheet = cuesheet.New(string(buf))
break
}
}
// Remove cuesheet from tags to avoid printing it.
delete(input.filetags, "cuesheet")
// The number of tracks in current file is usually 1, it can be more if a
// cuesheet is found.
input.trackCount = 1
if ErrCuesheet == nil {
// Copy the cuesheet header to the tags. Some entries appear both in the
// header and in the track details. We map the cuesheet header entries to
// the respective quivalent for FFmpeg tags.
for k, v := range input.cuesheet.Header {
switch k {
case "PERFORMER":
input.filetags["album_artist"] = v
case "SONGWRITER":
input.filetags["album_artist"] = v
case "TITLE":
input.filetags["album"] = v
default:
input.filetags[strings.ToLower(k)] = v
}
}
// A cuesheet might have several FILE entries, or even none (non-standard).
// In case of none, tracks are stored at file "" (the empty string) in the
// Cuesheet structure. Otherwise, we find the most related file.
base := stringNorm(filepath.Base(input.path))
max := 0.0
for f := range input.cuesheet.Files {
r := stringRel(stringNorm(f), base)
if r > max {
max = r
input.cuesheetFile = f
}
}
input.trackCount = len(input.cuesheet.Files[input.cuesheetFile])
}
}
func prepareTrackTags(input inputDesc, track int) {
// Copy all tags from input.filetags to input.tags.
for k, v := range input.filetags {
input.tags[k] = v
}
if len(input.cuesheet.Files) > 0 {
// If there is a cuesheet, we fetch track tags as required. Note that this
// process differs from the above cuesheet extraction in that it is
// track-related as opposed to album-related. Cuesheets make a distinction
// between the two. Some tags may appear both in an album field and a track
// field. Thus track tags must have higher priority.
for k, v := range input.cuesheet.Files[input.cuesheetFile][track].Tags {
input.tags[strings.ToLower(k)] = v
}
}
}
func runAllScripts(input inputDesc, track int, defaultTags map[string]string, L *lua.State, display *Slogger) (output outputDesc) {
prepareTrackTags(input, track)
if o, ok := CACHE.index[input.path]; ok && len(o) > track {
output = CACHE.index[input.path][track]
OPTIONS.gettags = false
} else {
// Default tags.
output.Tags = make(map[string]string)
for k, v := range input.tags {
output.Tags[k] = v
}
for k, v := range defaultTags {
output.Tags[k] = v
}
// Default codec options.
output.Format = input.Format.Format_name
}
// Create a Lua sandbox containing input and output, then run scripts.
makeSandboxOutput(L, output)
for _, script := range CACHE.scripts {
err := runScript(L, script.name, input)
if err != nil {
display.Error.Printf("Script %s: %s", script.name, err)
continue
}
}
output = scriptOutput(L)
// Foolproofing.
// -No format: use input.format.
// -No parameters: use "-c:a copy".
// -Empty output basename: use input path.
// -Remove empty tags to avoid storing empty strings in FFmpeg.
if output.Format == "" {
output.Format = input.Format.Format_name
}
if len(output.Parameters) == 0 {
output.Parameters = []string{"-c:a", "copy"}
}
if Basename(output.Path) == "" {
output.Path = input.path
}
var err error
output.Path, err = filepath.Abs(output.Path)
if err != nil {
display.Warning.Print("Cannot get absolute path:", err)
return output
}
for tag, value := range output.Tags {
if value == "" {
delete(output.Tags, tag)
}
}
return output
}
// Create a new destination file 'dst'.
// As a special case, if 'inputPath == dst' and 'removesource == true',
// then modify the file inplace.
// If no third-party program overwrites existing files, this approach cannot
// clobber existing files.
func makeTrackDst(dst string, inputPath string, removeSource bool) (string, error) {
if _, err := os.Stat(dst); err == nil || !os.IsNotExist(err) {
// 'dst' exists.
// The realpath is required to check if inplace.
// The 'realpath' can only be expanded when the parent folder exists.
dst, err = realpath.Realpath(dst)
if err != nil {
return "", err
}
if inputPath != dst || !removeSource {
// If not inplace, create a temp file.
f, err := TempFile(filepath.Dir(dst), StripExt(filepath.Base(dst))+"_", "."+Ext(dst))
if err != nil {
return "", err
}
dst = f.Name()
f.Close()
}
} else {
st, err := os.Stat(inputPath)
if err != nil {
return "", err
}
f, err := os.OpenFile(dst, os.O_CREATE|os.O_EXCL, st.Mode())
if err != nil {
// Either the parent folder is not writable, or a race condition happened:
// file was created between existence check and file creation.
return "", err
}
f.Close()
}
return dst, nil
}
// Create a new destination file 'dst'. See makeTrackDst.
// As a special case, if the checksums match in input and dst, return "", nil.
// TODO: Test how memoization scales with VISITED_DST_COVERS.
func makeCoverDst(dst string, inputPath string, checksum string, display *Slogger) (string, error) {
if st, err := os.Stat(dst); err == nil || !os.IsNotExist(err) {
// 'dst' exists.
// Realpath is required for cache key uniqueness.
dst, err = realpath.Realpath(dst)
if err != nil {
return "", err
}
VISITED_DST_COVERS.RLock()
visited := VISITED_DST_COVERS.v[dstCoverKey{path: dst, checksum: checksum}]
VISITED_DST_COVERS.RUnlock()
if visited {
return "", nil
}
VISITED_DST_COVERS.Lock()
VISITED_DST_COVERS.v[dstCoverKey{path: dst, checksum: checksum}] = true
VISITED_DST_COVERS.Unlock()
// Compute checksum of existing cover and early-out if equal.
fd, err := os.Open(dst)
if err != nil {
return "", err
}
defer fd.Close()
// TODO: Cache checksums.
hi := st.Size()
if hi > COVER_CHECKSUM_BLOCK {
hi = COVER_CHECKSUM_BLOCK
}
buf := [COVER_CHECKSUM_BLOCK]byte{}
_, err = (*fd).ReadAt(buf[:hi], 0)
if err != nil && err != io.EOF {
return "", err
}
dstChecksum := fmt.Sprintf("%x", md5.Sum(buf[:hi]))
if checksum == dstChecksum {
return "", nil
}
// If not inplace, create a temp file.
f, err := TempFile(filepath.Dir(dst), StripExt(filepath.Base(dst))+"_", "."+Ext(dst))
if err != nil {
return "", err
}
dst = f.Name()
f.Close()
} else {
// 'dst' does not exist.
st, err := os.Stat(inputPath)
if err != nil {
return "", err
}
fd, err := os.OpenFile(dst, os.O_CREATE|os.O_EXCL, st.Mode())
if err != nil {
// Either the parent folder is not writable, or a race condition happened:
// file was created between existence check and file creation.
return "", err
}
fd.Close()
// Save to cache.
dst, err = realpath.Realpath(dst)
if err != nil {
return "", err
}
VISITED_DST_COVERS.Lock()
VISITED_DST_COVERS.v[dstCoverKey{path: dst, checksum: checksum}] = true
VISITED_DST_COVERS.Unlock()
}
return dst, nil
}
func transferCovers(cover outputCover, coverName string, inputPath string, inputSource io.Reader, checksum string, display *Slogger) {
var err error
if cover.Path == "" {
return
}
if len(cover.Parameters) == 0 || cover.Format == "" {
cover.Path, err = makeCoverDst(cover.Path, inputPath, checksum, display)
if err != nil {
display.Error.Print(err)
return
}
if cover.Path == "" {
// Identical file exists.
return
}
fd, err := os.OpenFile(cover.Path, os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
display.Warning.Println(err)
return
}
if _, err = io.Copy(fd, inputSource); err != nil {
display.Warning.Println(err)
return
}
fd.Close()
} else {
cover.Path, err = makeCoverDst(cover.Path, inputPath, checksum, display)
if err != nil {
display.Error.Print(err)
return
}
if cover.Path == "" {
// Identical file exists.
return
}
cmdArray := []string{"-nostdin", "-v", "error", "-y", "-i", "-", "-an", "-sn"}
cmdArray = append(cmdArray, cover.Parameters...)
cmdArray = append(cmdArray, "-f", cover.Format, cover.Path)
display.Debug.Printf("Cover %v parameters: %q", coverName, cmdArray)
cmd := exec.Command("ffmpeg", cmdArray...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
cmd.Stdin = inputSource
_, err := cmd.Output()
if err != nil {
display.Warning.Printf(stderr.String())
return
}
}
}
// goroutine main function, a.k.a worker.
// 'queue' contains realpaths to files.
func process(queue chan string, quit chan bool) {
defer func() { quit <- true }()
display := newSlogger(OPTIONS.debug, OPTIONS.color)
defer display.Flush()
// Compile scripts.
L, err := makeSandbox(CACHE.scripts, display)
if err != nil {
display.Error.Print(err)
}
defer L.Close()
for file := range queue {
display.Flush()
display.Section.Println(file)
cmd := exec.Command("ffprobe", "-v", "error", "-print_format", "json", "-show_streams", "-show_format", file)
var stderr bytes.Buffer
cmd.Stderr = &stderr
out, err := cmd.Output()
if err != nil {
display.Error.Print("ffprobe: ", stderr.String())
continue
}
var input inputDesc
err = json.Unmarshal(out, &input)
if err != nil {
display.Error.Print(err)
continue
}
input.path = file // realpath
// Index of the first audio stream.
input.audioIndex = -1
for k, v := range input.Streams {
if v.Codec_type == "audio" {
input.audioIndex = k
break
}
}
if input.audioIndex == -1 {
display.Warning.Print("Non-audio file:", input.path)
continue
}
// Set bitrate.
// FFmpeg stores bitrate as a string, Demlo needs a number. If
// 'streams[audioIndex].bit_rate' is empty (e.g. in APE files), look for
// 'format.bit_rate'. To ease querying bitrate from user scripts, store it
// in 'input.bitrate'.
input.bitrate, err = strconv.Atoi(input.Streams[input.audioIndex].Bit_rate)
if err != nil {
input.bitrate, err = strconv.Atoi(input.Format.Bit_rate)
if err != nil {
display.Warning.Print("Cannot get bitrate from", input.path)
continue
}
}
// prepareTags should be run before setting the covers.
prepareTags(&input, display)
input.externalCovers, err = getExternalCover(input, display)
if err != nil {
display.Warning.Print(err)
continue
}
var embeddedCoversCache [][]byte
var onlineCoverCache []byte
input.embeddedCovers, embeddedCoversCache = getEmbeddedCover(input, display)
var defaultTags map[string]string
// We retrieve tags online only for single-track files. TODO: Add support for multi-track files.
if input.trackCount == 1 {
var releaseID ReleaseID
prepareTrackTags(input, 1)
if OPTIONS.gettags {
releaseID, defaultTags, err = getOnlineTags(input, display)
if err != nil {
display.Debug.Print("Online tags query error: ", err)
}
}
if OPTIONS.getcover {
onlineCoverCache, input.onlineCover, err = getOnlineCover(input, releaseID, display)
if err != nil {
display.Debug.Print("Online cover query error: ", err)
}
}
}
var output = make([]outputDesc, input.trackCount)
for track := 0; track < input.trackCount; track++ {
output[track] = runAllScripts(input, track, defaultTags, L, display)
}
//--------------------------------------------------------------------------------
// Preview.
if OPTIONS.graphical {
for track := 0; track < input.trackCount; track++ {
preview(input, output[track], track, display)
// Warn for existence.
_, err = os.Stat(output[track].Path)
if err == nil || !os.IsNotExist(err) {
display.Warning.Println("Destination exists:", output[track].Path)
}
}
} else {
// Should never fail.
buf1, _ := json.Marshal(input.path)
buf2, _ := json.MarshalIndent(output, "", "\t")
display.Output.Printf("%s: %s,\n", buf1, buf2)
}
if !OPTIONS.process {
continue
}
//--------------------------------------------------------------------------------
// Re-encode / copy / rename.
for track := 0; track < input.trackCount; track++ {
err = os.MkdirAll(filepath.Dir(output[track].Path), 0777)
if err != nil {
display.Error.Print(err)
continue
}
// Copy embeddedCovers, externalCovers and onlineCover.
for stream, cover := range output[track].EmbeddedCovers {
inputSource := bytes.NewBuffer(embeddedCoversCache[stream])
transferCovers(cover, "embedded "+strconv.Itoa(stream), input.path, inputSource, input.embeddedCovers[stream].checksum, display)
}
for file, cover := range output[track].ExternalCovers {
inputPath := filepath.Join(filepath.Dir(input.path), file)
inputSource, err := os.Open(inputPath)
if err != nil {
continue
}
transferCovers(cover, "external '"+file+"'", inputPath, inputSource, input.externalCovers[file].checksum, display)
inputSource.Close()
}
{
inputSource := bytes.NewBuffer(onlineCoverCache)
transferCovers(output[track].OnlineCover, "online", input.path, inputSource, input.onlineCover.checksum, display)
}
// If encoding changed, use FFmpeg. Otherwise, copy/rename the file to
// speed up the process. If tags have changed but not the encoding, we use
// taglib to set them.
var encodingChanged = false
var tagsChanged = false
if input.trackCount > 1 {
// Split cue-sheet.
encodingChanged = true
}
if input.Format.Format_name != output[track].Format {
encodingChanged = true
}
if len(output[track].Parameters) != 2 ||
output[track].Parameters[0] != "-c:a" ||
output[track].Parameters[1] != "copy" {
encodingChanged = true
}
// Test if tags have changed.
for k, v := range input.tags {
if k != "encoder" && output[track].Tags[k] != v {
tagsChanged = true
break
}
}
if !tagsChanged {
for k, v := range output[track].Tags {
if k != "encoder" && input.tags[k] != v {
tagsChanged = true
break
}
}
}
// TODO: Move this to 2/3 separate functions.
// TODO: Add to condition: `|| output[track].format == "taglib-unsupported-format"`.
if encodingChanged {
// Store encoding parameters.
ffmpegParameters := []string{}
// Be verbose only when running a single process. Otherwise output gets
// would get messy.
if OPTIONS.cores > 1 {
ffmpegParameters = append(ffmpegParameters, "-v", "warning")
} else {
ffmpegParameters = append(ffmpegParameters, "-v", "error")
}
// By default, FFmpeg reads stdin while running. Disable this feature to
// avoid unexpected problems.
ffmpegParameters = append(ffmpegParameters, "-nostdin")
// FFmpeg should always overwrite: if a temp file is created to avoid
// overwriting, FFmpeg should clobber it.
ffmpegParameters = append(ffmpegParameters, "-y")
ffmpegParameters = append(ffmpegParameters, "-i", input.path)
// Stream codec.
ffmpegParameters = append(ffmpegParameters, output[track].Parameters...)
// Get cuesheet splitting parameters.
if len(input.cuesheet.Files) > 0 {
d, _ := strconv.ParseFloat(input.Streams[input.audioIndex].Duration, 64)
start, duration := FFmpegSplitTimes(input.cuesheet, input.cuesheetFile, track, d)
ffmpegParameters = append(ffmpegParameters, "-ss", start, "-t", duration)
}
// If there are no covers, do not copy any video stream to avoid errors.
if input.Format.Nb_streams < 2 {
ffmpegParameters = append(ffmpegParameters, "-vn")
}
// Remove non-cover streams and extra audio streams.
// Must add all streams first.
ffmpegParameters = append(ffmpegParameters, "-map", "0")
for i := 0; i < input.Format.Nb_streams; i++ {
if (input.Streams[i].Codec_type == "video" && input.Streams[i].Codec_name != "image2" && input.Streams[i].Codec_name != "png" && input.Streams[i].Codec_name != "mjpeg") ||
(input.Streams[i].Codec_type == "audio" && i > input.audioIndex) ||
(input.Streams[i].Codec_type != "audio" && input.Streams[i].Codec_type != "video") {
ffmpegParameters = append(ffmpegParameters, "-map", "-0:"+strconv.Itoa(i))
}
}
// Remove subtitles if any.
ffmpegParameters = append(ffmpegParameters, "-sn")
// '-map_metadata -1' clears all metadata first.
ffmpegParameters = append(ffmpegParameters, "-map_metadata", "-1")
for tag, value := range output[track].Tags {
ffmpegParameters = append(ffmpegParameters, "-metadata", tag+"="+value)
}
// Format.
ffmpegParameters = append(ffmpegParameters, "-f", output[track].Format)
// Output file.
// FFmpeg cannot transcode inplace, so we force creating a temp file if
// necessary.
var dst string
dst, err := makeTrackDst(output[track].Path, input.path, false)
if err != nil {
display.Error.Print(err)
continue
}
ffmpegParameters = append(ffmpegParameters, dst)
display.Debug.Printf("Audio %v parameters: %q", track, ffmpegParameters)
cmd := exec.Command("ffmpeg", ffmpegParameters...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
display.Error.Printf(stderr.String())
continue
}
if OPTIONS.removesource {
// TODO: This realpath is already expanded in 'makeTrackDst'. Factor
// it.
output[track].Path, err = realpath.Realpath(output[track].Path)
if err != nil {
display.Error.Print(err)
continue
}
if input.path == output[track].Path {
// If inplace, rename.
err = os.Rename(dst, output[track].Path)
if err != nil {
display.Error.Print(err)
}
} else {
err = os.Remove(input.path)
if err != nil {
display.Error.Print(err)
}
}
}
} else {
var err error
var dst string
dst, err = makeTrackDst(output[track].Path, input.path, OPTIONS.removesource)
if err != nil {
display.Error.Print(err)
continue
}
if input.path != dst {
// Copy/rename file if not inplace.
err = nil
if OPTIONS.removesource {
err = os.Rename(input.path, dst)
}
if err != nil || !OPTIONS.removesource {
// If renaming failed, it might be because of a cross-device
// destination. We try to copy instead.
err := CopyFile(dst, input.path)
if err != nil {
display.Error.Println(err)
continue
}
if OPTIONS.removesource {
err = os.Remove(input.path)
if err != nil {
display.Error.Println(err)
}
}
}
}
if tagsChanged {
// TODO: Can TagLib remove extra tags?
f, err := taglib.Read(dst)
if err != nil {
display.Error.Print(err)
continue
}
defer f.Close()
// TODO: Arbitrary tag support with taglib?
if output[track].Tags["album"] != "" {
f.SetAlbum(output[track].Tags["album"])
}
if output[track].Tags["artist"] != "" {
f.SetArtist(output[track].Tags["artist"])
}
if output[track].Tags["comment"] != "" {
f.SetComment(output[track].Tags["comment"])
}
if output[track].Tags["genre"] != "" {
f.SetGenre(output[track].Tags["genre"])
}
if output[track].Tags["title"] != "" {
f.SetTitle(output[track].Tags["title"])
}
if output[track].Tags["track"] != "" {
t, err := strconv.Atoi(output[track].Tags["track"])
if err == nil {
f.SetTrack(t)
}
}
if output[track].Tags["date"] != "" {
t, err := strconv.Atoi(output[track].Tags["date"])
if err == nil {
f.SetYear(t)
}
}
err = f.Save()
if err != nil {
display.Error.Print(err)
}
}
}
}
}
}
// Return the first existing match from 'list'.
func findscript(name string) (path string, st os.FileInfo, err error) {
nameExt := name + ".lua"
list := []string{
name,
nameExt,
filepath.Join(USER_SCRIPTROOT, name),
filepath.Join(USER_SCRIPTROOT, nameExt),
filepath.Join(SYSTEM_SCRIPTROOT, name),
filepath.Join(SYSTEM_SCRIPTROOT, nameExt),
}
for _, path := range list {
if st, err := os.Stat(path); err == nil {
return path, st, nil
}
}
return "", nil, errors.New("Script not found")
}
// Note to packagers: those following lines can be patched to fit the local
// filesystem.
func init() {
log.SetFlags(0)
if XDG_CONFIG_HOME == "" {
XDG_CONFIG_HOME = filepath.Join(os.Getenv("HOME"), ".config")
}
if XDG_CONFIG_DIRS == "" {
XDG_CONFIG_DIRS = "/etc/xdg"
}
if XDG_DATA_DIRS == "" {
XDG_DATA_DIRS = "/usr/local/share/:/usr/share"
}
pathlistSub := func(pathlist, subpath string) string {
for _, dir := range filepath.SplitList(pathlist) {
if dir == "" {
dir = "."
}
file := filepath.Join(dir, subpath)
_, err := os.Stat(file)
if err == nil {
return file
}
}
return ""
}
SYSTEM_SCRIPTROOT = pathlistSub(XDG_DATA_DIRS, filepath.Join(APPLICATION, "scripts"))
USER_SCRIPTROOT = pathlistSub(XDG_CONFIG_HOME, filepath.Join(APPLICATION, "scripts"))
CONFIG = os.Getenv("DEMLORC")
if CONFIG == "" {
CONFIG = filepath.Join(XDG_CONFIG_HOME, APPLICATION, APPLICATION+"rc")
}
}
func main() {
// Load config first since it changes the default flag values.
st, err := os.Stat(CONFIG)
if err == nil && st.Mode().IsRegular() {
fmt.Fprintf(os.Stderr, ":: Load config: %v\n", CONFIG)
OPTIONS = loadConfig(CONFIG)
}
if OPTIONS.extensions == nil {
// Defaults: Init here so that unspecified config options get properly set.
OPTIONS.extensions = stringSetFlag{
"aac": true,
"ape": true,
"flac": true,
"ogg": true,
"m4a": true,
"mp3": true,
"mp4": true,
"mpc": true,
"wav": true,
"wv": true,
}
}
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %v [OPTIONS] FILES|FOLDERS\n\n", os.Args[0])
fmt.Fprintln(os.Stderr, usage)
fmt.Fprintln(os.Stderr, "Options:")
flag.PrintDefaults()
}
flag.BoolVar(&OPTIONS.color, "color", OPTIONS.color, "Color output.")
flag.IntVar(&OPTIONS.cores, "cores", OPTIONS.cores, "Run N processes in parallel. If 0, use all online cores.")
flag.BoolVar(&OPTIONS.debug, "debug", false, "Enable debug messages.")
flag.Var(&OPTIONS.extensions, "ext", "Additional extensions to look for when a folder is browsed.")
flag.BoolVar(&OPTIONS.getcover, "c", OPTIONS.getcover, "Fetch cover from the Internet.")
flag.BoolVar(&OPTIONS.gettags, "t", OPTIONS.gettags, "Fetch tags from the Internet.")
flag.BoolVar(&OPTIONS.graphical, "g", OPTIONS.graphical, "Use formatted output.")
flag.StringVar(&OPTIONS.index, "i", OPTIONS.index, `Use index file to set input and output metadata.
The index can be built using the non-formatted preview output.`)
flag.StringVar(&OPTIONS.postscript, "post", OPTIONS.postscript, "Run Lua commands after the other scripts.")
flag.StringVar(&OPTIONS.prescript, "pre", OPTIONS.prescript, "Run Lua commands before the other scripts.")
flag.BoolVar(&OPTIONS.process, "p", OPTIONS.process, "Apply changes: set tags and format, move/copy result to destination file.")
flag.BoolVar(&OPTIONS.removesource, "rmsrc", OPTIONS.removesource, "Remove source file after processing.")
var flagScripts scriptSlice
flag.Var(&flagScripts, "s", `Specify scripts to run in provided order.
This option can be specified several times. If only the basename without extension is given,
and if it is not found in current folder, the corresponding standard script will be used.`)
var flagVersion = flag.Bool("v", false, "Print version and exit.")
flag.Parse()
if *flagVersion {
fmt.Println(APPLICATION, VERSION, COPYRIGHT)
return
}
if flag.Arg(0) == "" {
flag.Usage()
return
}
// Check for essential programs.
_, err = exec.LookPath("ffmpeg")
if err != nil {
log.Fatal(err)
}
_, err = exec.LookPath("ffprobe")
if err != nil {
log.Fatal(err)
}
// Disable formatted output if piped.
st, _ = os.Stdout.Stat()
if (st.Mode() & os.ModeCharDevice) == 0 {
OPTIONS.graphical = false
}
st, _ = os.Stderr.Stat()
if (st.Mode() & os.ModeCharDevice) == 0 {
OPTIONS.color = false
}
// Main logger.
display := newSlogger(OPTIONS.debug, OPTIONS.color)
// Load index to cache.
if OPTIONS.index != "" {
st, err := os.Stat(OPTIONS.index)
if err != nil {
display.Warning.Printf("Index not found: [%v]", OPTIONS.index)
} else {
if st.Size() > INDEX_MAXSIZE {
display.Warning.Printf("Index size > %v bytes, skipping: %v", INDEX_MAXSIZE, OPTIONS.index)
} else {
buf, err := ioutil.ReadFile(OPTIONS.index)
if err != nil {
display.Warning.Print("Index is not readable:", err)
} else {
// Enclose JSON list in a valid structure. Since index ends with a
// comma, hence the required dummy entry.
buf = append(append([]byte{'{'}, buf...), []byte(`"": null}`)...)
err = json.Unmarshal(buf, &CACHE.index)
if err != nil {
display.Warning.Printf("Invalid index %v: %v", OPTIONS.index, err)
}
}
}
}
}
// Load scripts to cache.
if OPTIONS.prescript != "" {
CACHE.scripts = append(CACHE.scripts, scriptBuffer{name: "prescript", buf: OPTIONS.prescript})
}
if len(flagScripts) > 0 {
// CLI overrides default/config values.
OPTIONS.scripts = flagScripts
}
for _, s := range OPTIONS.scripts {
path, st, err := findscript(s)
if err != nil {
display.Warning.Printf("%v: %v", err, s)
continue
}
if sz := st.Size(); sz > SCRIPT_MAXSIZE {
display.Warning.Printf("Script size %v > %v bytes, skipping: %v", sz, SCRIPT_MAXSIZE, path)
continue
}
buf, err := ioutil.ReadFile(path)
if err != nil {
display.Warning.Print("Script is not readable: ", err)
continue
}
display.Info.Printf("Load script: %v", path)
CACHE.scripts = append(CACHE.scripts, scriptBuffer{name: path, buf: string(buf)})
}
if OPTIONS.postscript != "" {
CACHE.scripts = append(CACHE.scripts, scriptBuffer{name: "postscript", buf: OPTIONS.postscript})
}
// Limit number of cores to online cores.
if OPTIONS.cores > runtime.NumCPU() || OPTIONS.cores <= 0 {
OPTIONS.cores = runtime.NumCPU()
}
display.Flush()
// If all workers are ready at the same time, they will query 'OPTIONS.cores'
// files from the queue. Add some extra space to the queue in the unlikely
// event the folder walk is slower than the workers.
queue := make(chan string, 2*OPTIONS.cores)
quit := make(chan bool, OPTIONS.cores)
for i := 0; i < OPTIONS.cores; i++ {
go process(queue, quit)
// Wait for all routines.
defer func() { <-quit }()
}
visited := map[string]bool{}
for _, file := range flag.Args() {
visit := func(path string, info os.FileInfo, err error) error {
if err != nil || !info.Mode().IsRegular() {
return nil
}
if !OPTIONS.extensions[strings.ToLower(Ext(path))] {
return nil
}
rpath, err := realpath.Realpath(path)
if err != nil {
display.Error.Print("Cannot get real path:", err)
display.Flush()
return nil
}
if !visited[rpath] {
visited[rpath] = true
queue <- rpath
}
return nil
}
// 'visit' always keeps going, so no error.
_ = filepath.Walk(file, visit)
}
close(queue)
}
|
package main
import (
"errors"
"flag"
"github.com/crowdmob/goamz/aws"
"github.com/crowdmob/goamz/cloudwatch"
mp "github.com/mackerelio/go-mackerel-plugin"
"log"
"os"
"time"
)
var graphdef map[string](mp.Graphs) = map[string](mp.Graphs){
"elb.latency": mp.Graphs{
Label: "Whole ELB Latency",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "Latency", Label: "Latency"},
},
},
"elb.http_backend": mp.Graphs{
Label: "Whole ELB HTTP Backend Count",
Unit: "integer",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "HTTPCode_Backend_2XX", Label: "2XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_3XX", Label: "3XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_4XX", Label: "4XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_5XX", Label: "5XX", Stacked: true},
},
},
// "elb.healthy_host_count", "elb.unhealthy_host_count" will be generated dynamically
}
type ELBPlugin struct {
Region string
AccessKeyId string
SecretAccessKey string
AZs []string
CloudWatch *cloudwatch.CloudWatch
}
func (p *ELBPlugin) Prepare() error {
auth, err := aws.GetAuth(p.AccessKeyId, p.SecretAccessKey, "", time.Now())
if err != nil {
return err
}
p.CloudWatch, err = cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint)
if err != nil {
return err
}
ret, err := p.CloudWatch.ListMetrics(&cloudwatch.ListMetricsRequest{
Namespace: "AWS/ELB",
Dimensions: []cloudwatch.Dimension{
cloudwatch.Dimension{
Name: "AvailabilityZone",
},
},
MetricName: "HealthyHostCount",
})
if err != nil {
return err
}
p.AZs = make([]string, 0, len(ret.ListMetricsResult.Metrics))
for _, met := range ret.ListMetricsResult.Metrics {
if len(met.Dimensions) > 1 {
continue
} else if met.Dimensions[0].Name != "AvailabilityZone" {
continue
}
p.AZs = append(p.AZs, met.Dimensions[0].Value)
}
return nil
}
func (p ELBPlugin) GetLastPoint(dimension *cloudwatch.Dimension, metricName string, statType string) (float64, error) {
now := time.Now()
response, err := p.CloudWatch.GetMetricStatistics(&cloudwatch.GetMetricStatisticsRequest{
Dimensions: []cloudwatch.Dimension{*dimension},
StartTime: now.Add(time.Duration(120) * time.Second * -1), // 2 min (to fetch at least 1 data-point)
EndTime: now,
MetricName: metricName,
Period: 60,
Statistics: []string{statType},
Namespace: "AWS/ELB",
})
if err != nil {
return 0, err
}
datapoints := response.GetMetricStatisticsResult.Datapoints
if len(datapoints) == 0 {
return 0, errors.New("fetched no datapoints")
}
latest := time.Unix(0, 0)
var latestVal float64
for _, dp := range datapoints {
if dp.Timestamp.Before(latest) {
continue
}
latest = dp.Timestamp
switch statType {
case "Average":
latestVal = dp.Average
case "Sum":
latestVal = dp.Sum
}
}
return latestVal, nil
}
func (p ELBPlugin) FetchMetrics() (map[string]float64, error) {
stat := make(map[string]float64)
// HostCount per AZ
for _, az := range p.AZs {
d := &cloudwatch.Dimension{
Name: "AvailabilityZone",
Value: az,
}
for _, met := range []string{"HealthyHostCount", "UnHealthyHostCount"} {
v, err := p.GetLastPoint(d, met, "Average")
if err == nil {
stat[met+"_"+az] = v
}
}
}
glb := &cloudwatch.Dimension{
Name: "Service",
Value: "ELB",
}
v, err := p.GetLastPoint(glb, "Latency", "Average")
if err == nil {
stat["Latency"] = v
}
for _, met := range [...]string{"HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX"} {
v, err := p.GetLastPoint(glb, met, "Sum")
if err == nil {
stat[met] = v
}
}
return stat, nil
}
func (p ELBPlugin) GraphDefinition() map[string](mp.Graphs) {
for _, grp := range [...]string{"elb.healthy_host_count", "elb.unhealthy_host_count"} {
var name_pre string
var label string
switch grp {
case "elb.healthy_host_count":
name_pre = "HealthyHostCount_"
label = "ELB Healthy Host Count"
case "elb.unhealthy_host_count":
name_pre = "UnHealthyHostCount_"
label = "ELB Unhealthy Host Count"
}
var metrics [](mp.Metrics)
for _, az := range p.AZs {
metrics = append(metrics, mp.Metrics{Name: name_pre + az, Label: az, Stacked: true})
}
graphdef[grp] = mp.Graphs{
Label: label,
Unit: "integer",
Metrics: metrics,
}
}
return graphdef
}
func main() {
optRegion := flag.String("region", "", "AWS Region")
optAccessKeyId := flag.String("access-key-id", "", "AWS Access Key ID")
optSecretAccessKey := flag.String("secret-access-key", "", "AWS Secret Access Key")
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
var elb ELBPlugin
if *optRegion == "" {
elb.Region = aws.InstanceRegion()
} else {
elb.Region = *optRegion
}
elb.AccessKeyId = *optAccessKeyId
elb.SecretAccessKey = *optSecretAccessKey
err := elb.Prepare()
if err != nil {
log.Fatalln(err)
}
helper := mp.NewMackerelPlugin(elb)
if *optTempfile != "" {
helper.Tempfile = *optTempfile
} else {
helper.Tempfile = "/tmp/mackerel-plugin-elb"
}
if os.Getenv("MACKEREL_AGENT_PLUGIN_META") != "" {
helper.OutputDefinitions()
} else {
helper.OutputValues()
}
}
use iota
package main
import (
"errors"
"flag"
"github.com/crowdmob/goamz/aws"
"github.com/crowdmob/goamz/cloudwatch"
mp "github.com/mackerelio/go-mackerel-plugin"
"log"
"os"
"time"
)
var graphdef map[string](mp.Graphs) = map[string](mp.Graphs){
"elb.latency": mp.Graphs{
Label: "Whole ELB Latency",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "Latency", Label: "Latency"},
},
},
"elb.http_backend": mp.Graphs{
Label: "Whole ELB HTTP Backend Count",
Unit: "integer",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "HTTPCode_Backend_2XX", Label: "2XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_3XX", Label: "3XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_4XX", Label: "4XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_5XX", Label: "5XX", Stacked: true},
},
},
// "elb.healthy_host_count", "elb.unhealthy_host_count" will be generated dynamically
}
type StatType int
const (
Average StatType = iota
Sum
)
func (s StatType) String() string {
switch s {
case Average:
return "Average"
case Sum:
return "Sum"
}
return ""
}
type ELBPlugin struct {
Region string
AccessKeyId string
SecretAccessKey string
AZs []string
CloudWatch *cloudwatch.CloudWatch
}
func (p *ELBPlugin) Prepare() error {
auth, err := aws.GetAuth(p.AccessKeyId, p.SecretAccessKey, "", time.Now())
if err != nil {
return err
}
p.CloudWatch, err = cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint)
if err != nil {
return err
}
ret, err := p.CloudWatch.ListMetrics(&cloudwatch.ListMetricsRequest{
Namespace: "AWS/ELB",
Dimensions: []cloudwatch.Dimension{
cloudwatch.Dimension{
Name: "AvailabilityZone",
},
},
MetricName: "HealthyHostCount",
})
if err != nil {
return err
}
p.AZs = make([]string, 0, len(ret.ListMetricsResult.Metrics))
for _, met := range ret.ListMetricsResult.Metrics {
if len(met.Dimensions) > 1 {
continue
} else if met.Dimensions[0].Name != "AvailabilityZone" {
continue
}
p.AZs = append(p.AZs, met.Dimensions[0].Value)
}
return nil
}
func (p ELBPlugin) GetLastPoint(dimension *cloudwatch.Dimension, metricName string, statType StatType) (float64, error) {
now := time.Now()
response, err := p.CloudWatch.GetMetricStatistics(&cloudwatch.GetMetricStatisticsRequest{
Dimensions: []cloudwatch.Dimension{*dimension},
StartTime: now.Add(time.Duration(120) * time.Second * -1), // 2 min (to fetch at least 1 data-point)
EndTime: now,
MetricName: metricName,
Period: 60,
Statistics: []string{statType.String()},
Namespace: "AWS/ELB",
})
if err != nil {
return 0, err
}
datapoints := response.GetMetricStatisticsResult.Datapoints
if len(datapoints) == 0 {
return 0, errors.New("fetched no datapoints")
}
latest := time.Unix(0, 0)
var latestVal float64
for _, dp := range datapoints {
if dp.Timestamp.Before(latest) {
continue
}
latest = dp.Timestamp
switch statType {
case Average:
latestVal = dp.Average
case Sum:
latestVal = dp.Sum
}
}
return latestVal, nil
}
func (p ELBPlugin) FetchMetrics() (map[string]float64, error) {
stat := make(map[string]float64)
// HostCount per AZ
for _, az := range p.AZs {
d := &cloudwatch.Dimension{
Name: "AvailabilityZone",
Value: az,
}
for _, met := range []string{"HealthyHostCount", "UnHealthyHostCount"} {
v, err := p.GetLastPoint(d, met, Average)
if err == nil {
stat[met+"_"+az] = v
}
}
}
glb := &cloudwatch.Dimension{
Name: "Service",
Value: "ELB",
}
v, err := p.GetLastPoint(glb, "Latency", Average)
if err == nil {
stat["Latency"] = v
}
for _, met := range [...]string{"HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX"} {
v, err := p.GetLastPoint(glb, met, Sum)
if err == nil {
stat[met] = v
}
}
return stat, nil
}
func (p ELBPlugin) GraphDefinition() map[string](mp.Graphs) {
for _, grp := range [...]string{"elb.healthy_host_count", "elb.unhealthy_host_count"} {
var name_pre string
var label string
switch grp {
case "elb.healthy_host_count":
name_pre = "HealthyHostCount_"
label = "ELB Healthy Host Count"
case "elb.unhealthy_host_count":
name_pre = "UnHealthyHostCount_"
label = "ELB Unhealthy Host Count"
}
var metrics [](mp.Metrics)
for _, az := range p.AZs {
metrics = append(metrics, mp.Metrics{Name: name_pre + az, Label: az, Stacked: true})
}
graphdef[grp] = mp.Graphs{
Label: label,
Unit: "integer",
Metrics: metrics,
}
}
return graphdef
}
func main() {
optRegion := flag.String("region", "", "AWS Region")
optAccessKeyId := flag.String("access-key-id", "", "AWS Access Key ID")
optSecretAccessKey := flag.String("secret-access-key", "", "AWS Secret Access Key")
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
var elb ELBPlugin
if *optRegion == "" {
elb.Region = aws.InstanceRegion()
} else {
elb.Region = *optRegion
}
elb.AccessKeyId = *optAccessKeyId
elb.SecretAccessKey = *optSecretAccessKey
err := elb.Prepare()
if err != nil {
log.Fatalln(err)
}
helper := mp.NewMackerelPlugin(elb)
if *optTempfile != "" {
helper.Tempfile = *optTempfile
} else {
helper.Tempfile = "/tmp/mackerel-plugin-elb"
}
if os.Getenv("MACKEREL_AGENT_PLUGIN_META") != "" {
helper.OutputDefinitions()
} else {
helper.OutputValues()
}
}
|
package manager
import (
"crypto/x509"
"encoding/pem"
"fmt"
"net"
"os"
"path/filepath"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/ca"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/allocator"
"github.com/docker/swarmkit/manager/controlapi"
"github.com/docker/swarmkit/manager/dispatcher"
"github.com/docker/swarmkit/manager/health"
"github.com/docker/swarmkit/manager/keymanager"
"github.com/docker/swarmkit/manager/orchestrator"
"github.com/docker/swarmkit/manager/raftpicker"
"github.com/docker/swarmkit/manager/scheduler"
"github.com/docker/swarmkit/manager/state/raft"
"github.com/docker/swarmkit/manager/state/store"
"github.com/docker/swarmkit/protobuf/ptypes"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
const (
// defaultTaskHistoryRetentionLimit is the number of tasks to keep.
defaultTaskHistoryRetentionLimit = 5
)
// Config is used to tune the Manager.
type Config struct {
SecurityConfig *ca.SecurityConfig
// ExternalCAs is a list of initial CAs to which a manager node
// will make certificate signing requests for node certificates.
ExternalCAs []*api.ExternalCA
ProtoAddr map[string]string
// ProtoListener will be used for grpc serving if it's not nil,
// ProtoAddr fields will be used to create listeners otherwise.
ProtoListener map[string]net.Listener
// AdvertiseAddr is a map of addresses to advertise, by protocol.
AdvertiseAddr string
// JoinRaft is an optional address of a node in an existing raft
// cluster to join.
JoinRaft string
// Top-level state directory
StateDir string
// ForceNewCluster defines if we have to force a new cluster
// because we are recovering from a backup data directory.
ForceNewCluster bool
// ElectionTick defines the amount of ticks needed without
// leader to trigger a new election
ElectionTick uint32
// HeartbeatTick defines the amount of ticks between each
// heartbeat sent to other members for health-check purposes
HeartbeatTick uint32
}
// Manager is the cluster manager for Swarm.
// This is the high-level object holding and initializing all the manager
// subsystems.
type Manager struct {
config *Config
listeners map[string]net.Listener
caserver *ca.Server
Dispatcher *dispatcher.Dispatcher
replicatedOrchestrator *orchestrator.ReplicatedOrchestrator
globalOrchestrator *orchestrator.GlobalOrchestrator
taskReaper *orchestrator.TaskReaper
scheduler *scheduler.Scheduler
allocator *allocator.Allocator
keyManager *keymanager.KeyManager
server *grpc.Server
localserver *grpc.Server
RaftNode *raft.Node
connSelector *raftpicker.ConnSelector
mu sync.Mutex
stopped chan struct{}
}
type closeOnceListener struct {
once sync.Once
net.Listener
}
func (l *closeOnceListener) Close() error {
var err error
l.once.Do(func() {
err = l.Listener.Close()
})
return err
}
// New creates a Manager which has not started to accept requests yet.
func New(config *Config) (*Manager, error) {
dispatcherConfig := dispatcher.DefaultConfig()
if config.ProtoAddr == nil {
config.ProtoAddr = make(map[string]string)
}
if config.ProtoListener != nil && config.ProtoListener["tcp"] != nil {
config.ProtoAddr["tcp"] = config.ProtoListener["tcp"].Addr().String()
}
// If an AdvertiseAddr was specified, we use that as our
// externally-reachable address.
tcpAddr := config.AdvertiseAddr
if tcpAddr == "" {
// Otherwise, we know we are joining an existing swarm. Use a
// wildcard address to trigger remote autodetection of our
// address.
_, tcpAddrPort, err := net.SplitHostPort(config.ProtoAddr["tcp"])
if err != nil {
return nil, fmt.Errorf("missing or invalid listen address %s", config.ProtoAddr["tcp"])
}
// Even with an IPv6 listening address, it's okay to use
// 0.0.0.0 here. Any "unspecified" (wildcard) IP will
// be substituted with the actual source address.
tcpAddr = net.JoinHostPort("0.0.0.0", tcpAddrPort)
}
err := os.MkdirAll(filepath.Dir(config.ProtoAddr["unix"]), 0700)
if err != nil {
return nil, fmt.Errorf("failed to create socket directory: %v", err)
}
err = os.MkdirAll(config.StateDir, 0700)
if err != nil {
return nil, fmt.Errorf("failed to create state directory: %v", err)
}
raftStateDir := filepath.Join(config.StateDir, "raft")
err = os.MkdirAll(raftStateDir, 0700)
if err != nil {
return nil, fmt.Errorf("failed to create raft state directory: %v", err)
}
var listeners map[string]net.Listener
if len(config.ProtoListener) > 0 {
listeners = config.ProtoListener
} else {
listeners = make(map[string]net.Listener)
for proto, addr := range config.ProtoAddr {
l, err := net.Listen(proto, addr)
// A unix socket may fail to bind if the file already
// exists. Try replacing the file.
unwrappedErr := err
if op, ok := unwrappedErr.(*net.OpError); ok {
unwrappedErr = op.Err
}
if sys, ok := unwrappedErr.(*os.SyscallError); ok {
unwrappedErr = sys.Err
}
if proto == "unix" && unwrappedErr == syscall.EADDRINUSE {
os.Remove(addr)
l, err = net.Listen(proto, addr)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
listeners[proto] = l
}
}
raftCfg := raft.DefaultNodeConfig()
if config.ElectionTick > 0 {
raftCfg.ElectionTick = int(config.ElectionTick)
}
if config.HeartbeatTick > 0 {
raftCfg.HeartbeatTick = int(config.HeartbeatTick)
}
newNodeOpts := raft.NewNodeOptions{
ID: config.SecurityConfig.ClientTLSCreds.NodeID(),
Addr: tcpAddr,
JoinAddr: config.JoinRaft,
Config: raftCfg,
StateDir: raftStateDir,
ForceNewCluster: config.ForceNewCluster,
TLSCredentials: config.SecurityConfig.ClientTLSCreds,
}
RaftNode := raft.NewNode(context.TODO(), newNodeOpts)
opts := []grpc.ServerOption{
grpc.Creds(config.SecurityConfig.ServerTLSCreds)}
m := &Manager{
config: config,
listeners: listeners,
caserver: ca.NewServer(RaftNode.MemoryStore(), config.SecurityConfig),
Dispatcher: dispatcher.New(RaftNode, dispatcherConfig),
server: grpc.NewServer(opts...),
localserver: grpc.NewServer(opts...),
RaftNode: RaftNode,
stopped: make(chan struct{}),
}
return m, nil
}
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
//
// TODO(aluzzardi): /!\ This function is *way* too complex. /!\
// It needs to be split into smaller manageable functions.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
// Harakiri.
go func() {
select {
case <-ctx.Done():
case <-m.stopped:
ctxCancel()
}
}()
leadershipCh, cancel := m.RaftNode.SubscribeLeadership()
defer cancel()
go func() {
for leadershipEvent := range leadershipCh {
// read out and discard all of the messages when we've stopped
// don't acquire the mutex yet. if stopped is closed, we don't need
// this stops this loop from starving Run()'s attempt to Lock
select {
case <-m.stopped:
continue
default:
// do nothing, we're not stopped
}
// we're not stopping so NOW acquire the mutex
m.mu.Lock()
newState := leadershipEvent.(raft.LeadershipState)
if newState == raft.IsLeader {
s := m.RaftNode.MemoryStore()
rootCA := m.config.SecurityConfig.RootCA()
nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
raftCfg := raft.DefaultRaftConfig()
raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
store.CreateCluster(tx, &api.Cluster{
ID: clusterID,
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
},
Dispatcher: api.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
},
Raft: raftCfg,
CAConfig: initialCAConfig,
},
RootCA: api.RootCA{
CAKey: rootCA.Key,
CACert: rootCA.Cert,
CACertHash: rootCA.Digest.String(),
JoinTokens: api.JoinTokens{
Worker: ca.GenerateJoinToken(rootCA),
Manager: ca.GenerateJoinToken(rootCA),
},
},
})
// Add Node entry for ourself, if one
// doesn't exist already.
store.CreateNode(tx, &api.Node{
ID: nodeID,
Certificate: api.Certificate{
CN: nodeID,
Role: api.NodeRoleManager,
Status: api.IssuanceStatus{
State: api.IssuanceStateIssued,
},
},
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
})
return nil
})
// Attempt to rotate the key-encrypting-key of the root CA key-material
err := m.rotateRootCAKEK(ctx, clusterID)
if err != nil {
log.G(ctx).WithError(err).Error("root key-encrypting-key rotation failed")
}
m.replicatedOrchestrator = orchestrator.NewReplicatedOrchestrator(s)
m.globalOrchestrator = orchestrator.NewGlobalOrchestrator(s)
m.taskReaper = orchestrator.NewTaskReaper(s)
m.scheduler = scheduler.New(s)
m.keyManager = keymanager.New(m.RaftNode.MemoryStore(), keymanager.DefaultConfig())
// TODO(stevvooe): Allocate a context that can be used to
// shutdown underlying manager processes when leadership is
// lost.
m.allocator, err = allocator.New(s)
if err != nil {
log.G(ctx).WithError(err).Error("failed to create allocator")
// TODO(stevvooe): It doesn't seem correct here to fail
// creating the allocator but then use it anyway.
}
go func(keyManager *keymanager.KeyManager) {
if err := keyManager.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("keymanager failed with an error")
}
}(m.keyManager)
go func(d *dispatcher.Dispatcher) {
if err := d.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("Dispatcher exited with an error")
}
}(m.Dispatcher)
go func(server *ca.Server) {
if err := server.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("CA signer exited with an error")
}
}(m.caserver)
// Start all sub-components in separate goroutines.
// TODO(aluzzardi): This should have some kind of error handling so that
// any component that goes down would bring the entire manager down.
if m.allocator != nil {
go func(allocator *allocator.Allocator) {
if err := allocator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("allocator exited with an error")
}
}(m.allocator)
}
go func(scheduler *scheduler.Scheduler) {
if err := scheduler.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("scheduler exited with an error")
}
}(m.scheduler)
go func(taskReaper *orchestrator.TaskReaper) {
taskReaper.Run()
}(m.taskReaper)
go func(orchestrator *orchestrator.ReplicatedOrchestrator) {
if err := orchestrator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("replicated orchestrator exited with an error")
}
}(m.replicatedOrchestrator)
go func(globalOrchestrator *orchestrator.GlobalOrchestrator) {
if err := globalOrchestrator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("global orchestrator exited with an error")
}
}(m.globalOrchestrator)
} else if newState == raft.IsFollower {
m.Dispatcher.Stop()
m.caserver.Stop()
if m.allocator != nil {
m.allocator.Stop()
m.allocator = nil
}
m.replicatedOrchestrator.Stop()
m.replicatedOrchestrator = nil
m.globalOrchestrator.Stop()
m.globalOrchestrator = nil
m.taskReaper.Stop()
m.taskReaper = nil
m.scheduler.Stop()
m.scheduler = nil
m.keyManager.Stop()
m.keyManager = nil
}
m.mu.Unlock()
}
}()
proxyOpts := []grpc.DialOption{
grpc.WithBackoffMaxDelay(2 * time.Second),
grpc.WithTransportCredentials(m.config.SecurityConfig.ClientTLSCreds),
}
cs := raftpicker.NewConnSelector(m.RaftNode, proxyOpts...)
m.mu.Lock()
m.connSelector = cs
m.mu.Unlock()
authorize := func(ctx context.Context, roles []string) error {
// Authorize the remote roles, ensure they can only be forwarded by managers
_, err := ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization())
return err
}
baseControlAPI := controlapi.NewServer(m.RaftNode.MemoryStore(), m.RaftNode, m.config.SecurityConfig.RootCA())
healthServer := health.NewHealthServer()
authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize)
authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.Dispatcher, authorize)
authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize)
authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.RaftNode, authorize)
authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.RaftNode, authorize)
proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
// localProxyControlAPI is a special kind of proxy. It is only wired up
// to receive requests from a trusted local socket, and these requests
// don't use TLS, therefore the requests it handles locally should
// bypass authorization. When it proxies, it sends them as requests from
// this manager rather than forwarded requests (it has no TLS
// information to put in the metadata map).
forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, cs, m.RaftNode, forwardAsOwnRequest)
// Everything registered on m.server should be an authenticated
// wrapper, or a proxy wrapping an authenticated wrapper!
api.RegisterCAServer(m.server, proxyCAAPI)
api.RegisterNodeCAServer(m.server, proxyNodeCAAPI)
api.RegisterRaftServer(m.server, authenticatedRaftAPI)
api.RegisterHealthServer(m.server, authenticatedHealthAPI)
api.RegisterRaftMembershipServer(m.server, proxyRaftMembershipAPI)
api.RegisterControlServer(m.localserver, localProxyControlAPI)
api.RegisterControlServer(m.server, authenticatedControlAPI)
api.RegisterDispatcherServer(m.server, proxyDispatcherAPI)
errServe := make(chan error, 2)
for proto, l := range m.listeners {
go func(proto string, lis net.Listener) {
ctx := log.WithLogger(ctx, log.G(ctx).WithFields(
logrus.Fields{
"proto": lis.Addr().Network(),
"addr": lis.Addr().String()}))
if proto == "unix" {
log.G(ctx).Info("Listening for local connections")
// we need to disallow double closes because UnixListener.Close
// can delete unix-socket file of newer listener. grpc calls
// Close twice indeed: in Serve and in Stop.
errServe <- m.localserver.Serve(&closeOnceListener{Listener: lis})
} else {
log.G(ctx).Info("Listening for connections")
errServe <- m.server.Serve(lis)
}
}(proto, l)
}
// Set the raft server as serving for the health server
healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING)
if err := m.RaftNode.JoinAndStart(); err != nil {
for _, lis := range m.listeners {
lis.Close()
}
return fmt.Errorf("can't initialize raft node: %v", err)
}
go func() {
err := m.RaftNode.Run(ctx)
if err != nil {
log.G(ctx).Error(err)
m.Stop(ctx)
}
}()
if err := raft.WaitForLeader(ctx, m.RaftNode); err != nil {
m.server.Stop()
return err
}
c, err := raft.WaitForCluster(ctx, m.RaftNode)
if err != nil {
m.server.Stop()
return err
}
raftConfig := c.Spec.Raft
if int(raftConfig.ElectionTick) != m.RaftNode.Config.ElectionTick {
log.G(ctx).Warningf("election tick value (%ds) is different from the one defined in the cluster config (%vs), the cluster may be unstable", m.RaftNode.Config.ElectionTick, raftConfig.ElectionTick)
}
if int(raftConfig.HeartbeatTick) != m.RaftNode.Config.HeartbeatTick {
log.G(ctx).Warningf("heartbeat tick value (%ds) is different from the one defined in the cluster config (%vs), the cluster may be unstable", m.RaftNode.Config.HeartbeatTick, raftConfig.HeartbeatTick)
}
// wait for an error in serving.
err = <-errServe
select {
// check to see if stopped was posted to. if so, we're in the process of
// stopping, or done and that's why we got the error. if stopping is
// deliberate, stopped will ALWAYS be closed before the error is trigger,
// so this path will ALWAYS be taken if the stop was deliberate
case <-m.stopped:
// shutdown was requested, do not return an error
// but first, we wait to acquire a mutex to guarantee that stopping is
// finished. as long as we acquire the mutex BEFORE we return, we know
// that stopping is stopped.
m.mu.Lock()
m.mu.Unlock()
return nil
// otherwise, we'll get something from errServe, which indicates that an
// error in serving has actually occurred and this isn't a planned shutdown
default:
return err
}
}
// Stop stops the manager. It immediately closes all open connections and
// active RPCs as well as stopping the scheduler.
func (m *Manager) Stop(ctx context.Context) {
log.G(ctx).Info("Stopping manager")
// the mutex stops us from trying to stop while we're alrady stopping, or
// from returning before we've finished stopping.
m.mu.Lock()
defer m.mu.Unlock()
select {
// check to see that we've already stopped
case <-m.stopped:
return
default:
// do nothing, we're stopping for the first time
}
// once we start stopping, send a signal that we're doing so. this tells
// Run that we've started stopping, when it gets the error from errServe
// it also prevents the loop from processing any more stuff.
close(m.stopped)
m.Dispatcher.Stop()
m.caserver.Stop()
if m.allocator != nil {
m.allocator.Stop()
}
if m.replicatedOrchestrator != nil {
m.replicatedOrchestrator.Stop()
}
if m.globalOrchestrator != nil {
m.globalOrchestrator.Stop()
}
if m.taskReaper != nil {
m.taskReaper.Stop()
}
if m.scheduler != nil {
m.scheduler.Stop()
}
if m.keyManager != nil {
m.keyManager.Stop()
}
if m.connSelector != nil {
m.connSelector.Stop()
}
m.RaftNode.Shutdown()
// some time after this point, Run will receive an error from one of these
m.server.Stop()
m.localserver.Stop()
log.G(ctx).Info("Manager shut down")
// mutex is released and Run can return now
}
// rotateRootCAKEK will attempt to rotate the key-encryption-key for root CA key-material in raft.
// If there is no passphrase set in ENV, it returns.
// If there is plain-text root key-material, and a passphrase set, it encrypts it.
// If there is encrypted root key-material and it is using the current passphrase, it returns.
// If there is encrypted root key-material, and it is using the previous passphrase, it
// re-encrypts it with the current passphrase.
func (m *Manager) rotateRootCAKEK(ctx context.Context, clusterID string) error {
// If we don't have a KEK, we won't ever be rotating anything
strPassphrase := os.Getenv(ca.PassphraseENVVar)
if strPassphrase == "" {
return nil
}
strPassphrasePrev := os.Getenv(ca.PassphraseENVVarPrev)
passphrase := []byte(strPassphrase)
passphrasePrev := []byte(strPassphrasePrev)
s := m.RaftNode.MemoryStore()
var (
cluster *api.Cluster
err error
finalKey []byte
)
// Retrieve the cluster identified by ClusterID
s.View(func(readTx store.ReadTx) {
cluster = store.GetCluster(readTx, clusterID)
})
if cluster == nil {
return fmt.Errorf("cluster not found: %s", clusterID)
}
// Try to get the private key from the cluster
privKeyPEM := cluster.RootCA.CAKey
if privKeyPEM == nil || len(privKeyPEM) == 0 {
// We have no PEM root private key in this cluster.
log.G(ctx).Warnf("cluster %s does not have private key material", clusterID)
return nil
}
// Decode the PEM private key
keyBlock, _ := pem.Decode(privKeyPEM)
if keyBlock == nil {
return fmt.Errorf("invalid PEM-encoded private key inside of cluster %s", clusterID)
}
// If this key is not encrypted, then we have to encrypt it
if !x509.IsEncryptedPEMBlock(keyBlock) {
finalKey, err = ca.EncryptECPrivateKey(privKeyPEM, strPassphrase)
if err != nil {
return err
}
} else {
// This key is already encrypted, let's try to decrypt with the current main passphrase
_, err = x509.DecryptPEMBlock(keyBlock, []byte(passphrase))
if err == nil {
// The main key is the correct KEK, nothing to do here
return nil
}
// This key is already encrypted, but failed with current main passphrase.
// Let's try to decrypt with the previous passphrase
unencryptedKey, err := x509.DecryptPEMBlock(keyBlock, []byte(passphrasePrev))
if err != nil {
// We were not able to decrypt either with the main or backup passphrase, error
return err
}
unencryptedKeyBlock := &pem.Block{
Type: keyBlock.Type,
Bytes: unencryptedKey,
Headers: keyBlock.Headers,
}
// We were able to decrypt the key, but with the previous passphrase. Let's encrypt
// with the new one and store it in raft
finalKey, err = ca.EncryptECPrivateKey(pem.EncodeToMemory(unencryptedKeyBlock), strPassphrase)
if err != nil {
log.G(ctx).Debugf("failed to rotate the key-encrypting-key for the root key material of cluster %s", clusterID)
return err
}
}
log.G(ctx).Infof("Re-encrypting the root key material of cluster %s", clusterID)
// Let's update the key in the cluster object
return s.Update(func(tx store.Tx) error {
cluster = store.GetCluster(tx, clusterID)
if cluster == nil {
return fmt.Errorf("cluster not found: %s", clusterID)
}
cluster.RootCA.CAKey = finalKey
return store.UpdateCluster(tx, cluster)
})
}
manager: Fix start/stop race
m.connSelector is set in Run while the manager is starting up. If Stop
is called before this point, m.connSelector might be created after there
is any possibility of stopping it, which would result in a goroutine
leak.
Make Stop wait for the connSelector to be created, to avoid this
situation.
Signed-off-by: Aaron Lehmann <8ecfc6017a87905413dcd7d63696a2a4c351b604@docker.com>
package manager
import (
"crypto/x509"
"encoding/pem"
"fmt"
"net"
"os"
"path/filepath"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/ca"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/allocator"
"github.com/docker/swarmkit/manager/controlapi"
"github.com/docker/swarmkit/manager/dispatcher"
"github.com/docker/swarmkit/manager/health"
"github.com/docker/swarmkit/manager/keymanager"
"github.com/docker/swarmkit/manager/orchestrator"
"github.com/docker/swarmkit/manager/raftpicker"
"github.com/docker/swarmkit/manager/scheduler"
"github.com/docker/swarmkit/manager/state/raft"
"github.com/docker/swarmkit/manager/state/store"
"github.com/docker/swarmkit/protobuf/ptypes"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
const (
// defaultTaskHistoryRetentionLimit is the number of tasks to keep.
defaultTaskHistoryRetentionLimit = 5
)
// Config is used to tune the Manager.
type Config struct {
SecurityConfig *ca.SecurityConfig
// ExternalCAs is a list of initial CAs to which a manager node
// will make certificate signing requests for node certificates.
ExternalCAs []*api.ExternalCA
ProtoAddr map[string]string
// ProtoListener will be used for grpc serving if it's not nil,
// ProtoAddr fields will be used to create listeners otherwise.
ProtoListener map[string]net.Listener
// AdvertiseAddr is a map of addresses to advertise, by protocol.
AdvertiseAddr string
// JoinRaft is an optional address of a node in an existing raft
// cluster to join.
JoinRaft string
// Top-level state directory
StateDir string
// ForceNewCluster defines if we have to force a new cluster
// because we are recovering from a backup data directory.
ForceNewCluster bool
// ElectionTick defines the amount of ticks needed without
// leader to trigger a new election
ElectionTick uint32
// HeartbeatTick defines the amount of ticks between each
// heartbeat sent to other members for health-check purposes
HeartbeatTick uint32
}
// Manager is the cluster manager for Swarm.
// This is the high-level object holding and initializing all the manager
// subsystems.
type Manager struct {
config *Config
listeners map[string]net.Listener
caserver *ca.Server
Dispatcher *dispatcher.Dispatcher
replicatedOrchestrator *orchestrator.ReplicatedOrchestrator
globalOrchestrator *orchestrator.GlobalOrchestrator
taskReaper *orchestrator.TaskReaper
scheduler *scheduler.Scheduler
allocator *allocator.Allocator
keyManager *keymanager.KeyManager
server *grpc.Server
localserver *grpc.Server
RaftNode *raft.Node
connSelector *raftpicker.ConnSelector
mu sync.Mutex
started chan struct{}
stopped chan struct{}
}
type closeOnceListener struct {
once sync.Once
net.Listener
}
func (l *closeOnceListener) Close() error {
var err error
l.once.Do(func() {
err = l.Listener.Close()
})
return err
}
// New creates a Manager which has not started to accept requests yet.
func New(config *Config) (*Manager, error) {
dispatcherConfig := dispatcher.DefaultConfig()
if config.ProtoAddr == nil {
config.ProtoAddr = make(map[string]string)
}
if config.ProtoListener != nil && config.ProtoListener["tcp"] != nil {
config.ProtoAddr["tcp"] = config.ProtoListener["tcp"].Addr().String()
}
// If an AdvertiseAddr was specified, we use that as our
// externally-reachable address.
tcpAddr := config.AdvertiseAddr
if tcpAddr == "" {
// Otherwise, we know we are joining an existing swarm. Use a
// wildcard address to trigger remote autodetection of our
// address.
_, tcpAddrPort, err := net.SplitHostPort(config.ProtoAddr["tcp"])
if err != nil {
return nil, fmt.Errorf("missing or invalid listen address %s", config.ProtoAddr["tcp"])
}
// Even with an IPv6 listening address, it's okay to use
// 0.0.0.0 here. Any "unspecified" (wildcard) IP will
// be substituted with the actual source address.
tcpAddr = net.JoinHostPort("0.0.0.0", tcpAddrPort)
}
err := os.MkdirAll(filepath.Dir(config.ProtoAddr["unix"]), 0700)
if err != nil {
return nil, fmt.Errorf("failed to create socket directory: %v", err)
}
err = os.MkdirAll(config.StateDir, 0700)
if err != nil {
return nil, fmt.Errorf("failed to create state directory: %v", err)
}
raftStateDir := filepath.Join(config.StateDir, "raft")
err = os.MkdirAll(raftStateDir, 0700)
if err != nil {
return nil, fmt.Errorf("failed to create raft state directory: %v", err)
}
var listeners map[string]net.Listener
if len(config.ProtoListener) > 0 {
listeners = config.ProtoListener
} else {
listeners = make(map[string]net.Listener)
for proto, addr := range config.ProtoAddr {
l, err := net.Listen(proto, addr)
// A unix socket may fail to bind if the file already
// exists. Try replacing the file.
unwrappedErr := err
if op, ok := unwrappedErr.(*net.OpError); ok {
unwrappedErr = op.Err
}
if sys, ok := unwrappedErr.(*os.SyscallError); ok {
unwrappedErr = sys.Err
}
if proto == "unix" && unwrappedErr == syscall.EADDRINUSE {
os.Remove(addr)
l, err = net.Listen(proto, addr)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
listeners[proto] = l
}
}
raftCfg := raft.DefaultNodeConfig()
if config.ElectionTick > 0 {
raftCfg.ElectionTick = int(config.ElectionTick)
}
if config.HeartbeatTick > 0 {
raftCfg.HeartbeatTick = int(config.HeartbeatTick)
}
newNodeOpts := raft.NewNodeOptions{
ID: config.SecurityConfig.ClientTLSCreds.NodeID(),
Addr: tcpAddr,
JoinAddr: config.JoinRaft,
Config: raftCfg,
StateDir: raftStateDir,
ForceNewCluster: config.ForceNewCluster,
TLSCredentials: config.SecurityConfig.ClientTLSCreds,
}
RaftNode := raft.NewNode(context.TODO(), newNodeOpts)
opts := []grpc.ServerOption{
grpc.Creds(config.SecurityConfig.ServerTLSCreds)}
m := &Manager{
config: config,
listeners: listeners,
caserver: ca.NewServer(RaftNode.MemoryStore(), config.SecurityConfig),
Dispatcher: dispatcher.New(RaftNode, dispatcherConfig),
server: grpc.NewServer(opts...),
localserver: grpc.NewServer(opts...),
RaftNode: RaftNode,
started: make(chan struct{}),
stopped: make(chan struct{}),
}
return m, nil
}
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
//
// TODO(aluzzardi): /!\ This function is *way* too complex. /!\
// It needs to be split into smaller manageable functions.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
// Harakiri.
go func() {
select {
case <-ctx.Done():
case <-m.stopped:
ctxCancel()
}
}()
leadershipCh, cancel := m.RaftNode.SubscribeLeadership()
defer cancel()
go func() {
for leadershipEvent := range leadershipCh {
// read out and discard all of the messages when we've stopped
// don't acquire the mutex yet. if stopped is closed, we don't need
// this stops this loop from starving Run()'s attempt to Lock
select {
case <-m.stopped:
continue
default:
// do nothing, we're not stopped
}
// we're not stopping so NOW acquire the mutex
m.mu.Lock()
newState := leadershipEvent.(raft.LeadershipState)
if newState == raft.IsLeader {
s := m.RaftNode.MemoryStore()
rootCA := m.config.SecurityConfig.RootCA()
nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
raftCfg := raft.DefaultRaftConfig()
raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
store.CreateCluster(tx, &api.Cluster{
ID: clusterID,
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
},
Dispatcher: api.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
},
Raft: raftCfg,
CAConfig: initialCAConfig,
},
RootCA: api.RootCA{
CAKey: rootCA.Key,
CACert: rootCA.Cert,
CACertHash: rootCA.Digest.String(),
JoinTokens: api.JoinTokens{
Worker: ca.GenerateJoinToken(rootCA),
Manager: ca.GenerateJoinToken(rootCA),
},
},
})
// Add Node entry for ourself, if one
// doesn't exist already.
store.CreateNode(tx, &api.Node{
ID: nodeID,
Certificate: api.Certificate{
CN: nodeID,
Role: api.NodeRoleManager,
Status: api.IssuanceStatus{
State: api.IssuanceStateIssued,
},
},
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
})
return nil
})
// Attempt to rotate the key-encrypting-key of the root CA key-material
err := m.rotateRootCAKEK(ctx, clusterID)
if err != nil {
log.G(ctx).WithError(err).Error("root key-encrypting-key rotation failed")
}
m.replicatedOrchestrator = orchestrator.NewReplicatedOrchestrator(s)
m.globalOrchestrator = orchestrator.NewGlobalOrchestrator(s)
m.taskReaper = orchestrator.NewTaskReaper(s)
m.scheduler = scheduler.New(s)
m.keyManager = keymanager.New(m.RaftNode.MemoryStore(), keymanager.DefaultConfig())
// TODO(stevvooe): Allocate a context that can be used to
// shutdown underlying manager processes when leadership is
// lost.
m.allocator, err = allocator.New(s)
if err != nil {
log.G(ctx).WithError(err).Error("failed to create allocator")
// TODO(stevvooe): It doesn't seem correct here to fail
// creating the allocator but then use it anyway.
}
go func(keyManager *keymanager.KeyManager) {
if err := keyManager.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("keymanager failed with an error")
}
}(m.keyManager)
go func(d *dispatcher.Dispatcher) {
if err := d.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("Dispatcher exited with an error")
}
}(m.Dispatcher)
go func(server *ca.Server) {
if err := server.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("CA signer exited with an error")
}
}(m.caserver)
// Start all sub-components in separate goroutines.
// TODO(aluzzardi): This should have some kind of error handling so that
// any component that goes down would bring the entire manager down.
if m.allocator != nil {
go func(allocator *allocator.Allocator) {
if err := allocator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("allocator exited with an error")
}
}(m.allocator)
}
go func(scheduler *scheduler.Scheduler) {
if err := scheduler.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("scheduler exited with an error")
}
}(m.scheduler)
go func(taskReaper *orchestrator.TaskReaper) {
taskReaper.Run()
}(m.taskReaper)
go func(orchestrator *orchestrator.ReplicatedOrchestrator) {
if err := orchestrator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("replicated orchestrator exited with an error")
}
}(m.replicatedOrchestrator)
go func(globalOrchestrator *orchestrator.GlobalOrchestrator) {
if err := globalOrchestrator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("global orchestrator exited with an error")
}
}(m.globalOrchestrator)
} else if newState == raft.IsFollower {
m.Dispatcher.Stop()
m.caserver.Stop()
if m.allocator != nil {
m.allocator.Stop()
m.allocator = nil
}
m.replicatedOrchestrator.Stop()
m.replicatedOrchestrator = nil
m.globalOrchestrator.Stop()
m.globalOrchestrator = nil
m.taskReaper.Stop()
m.taskReaper = nil
m.scheduler.Stop()
m.scheduler = nil
m.keyManager.Stop()
m.keyManager = nil
}
m.mu.Unlock()
}
}()
proxyOpts := []grpc.DialOption{
grpc.WithBackoffMaxDelay(2 * time.Second),
grpc.WithTransportCredentials(m.config.SecurityConfig.ClientTLSCreds),
}
cs := raftpicker.NewConnSelector(m.RaftNode, proxyOpts...)
m.connSelector = cs
authorize := func(ctx context.Context, roles []string) error {
// Authorize the remote roles, ensure they can only be forwarded by managers
_, err := ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization())
return err
}
baseControlAPI := controlapi.NewServer(m.RaftNode.MemoryStore(), m.RaftNode, m.config.SecurityConfig.RootCA())
healthServer := health.NewHealthServer()
authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize)
authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.Dispatcher, authorize)
authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize)
authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.RaftNode, authorize)
authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.RaftNode, authorize)
proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
// localProxyControlAPI is a special kind of proxy. It is only wired up
// to receive requests from a trusted local socket, and these requests
// don't use TLS, therefore the requests it handles locally should
// bypass authorization. When it proxies, it sends them as requests from
// this manager rather than forwarded requests (it has no TLS
// information to put in the metadata map).
forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, cs, m.RaftNode, forwardAsOwnRequest)
// Everything registered on m.server should be an authenticated
// wrapper, or a proxy wrapping an authenticated wrapper!
api.RegisterCAServer(m.server, proxyCAAPI)
api.RegisterNodeCAServer(m.server, proxyNodeCAAPI)
api.RegisterRaftServer(m.server, authenticatedRaftAPI)
api.RegisterHealthServer(m.server, authenticatedHealthAPI)
api.RegisterRaftMembershipServer(m.server, proxyRaftMembershipAPI)
api.RegisterControlServer(m.localserver, localProxyControlAPI)
api.RegisterControlServer(m.server, authenticatedControlAPI)
api.RegisterDispatcherServer(m.server, proxyDispatcherAPI)
errServe := make(chan error, 2)
for proto, l := range m.listeners {
go func(proto string, lis net.Listener) {
ctx := log.WithLogger(ctx, log.G(ctx).WithFields(
logrus.Fields{
"proto": lis.Addr().Network(),
"addr": lis.Addr().String()}))
if proto == "unix" {
log.G(ctx).Info("Listening for local connections")
// we need to disallow double closes because UnixListener.Close
// can delete unix-socket file of newer listener. grpc calls
// Close twice indeed: in Serve and in Stop.
errServe <- m.localserver.Serve(&closeOnceListener{Listener: lis})
} else {
log.G(ctx).Info("Listening for connections")
errServe <- m.server.Serve(lis)
}
}(proto, l)
}
// Set the raft server as serving for the health server
healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING)
if err := m.RaftNode.JoinAndStart(); err != nil {
for _, lis := range m.listeners {
lis.Close()
}
return fmt.Errorf("can't initialize raft node: %v", err)
}
close(m.started)
go func() {
err := m.RaftNode.Run(ctx)
if err != nil {
log.G(ctx).Error(err)
m.Stop(ctx)
}
}()
if err := raft.WaitForLeader(ctx, m.RaftNode); err != nil {
m.server.Stop()
return err
}
c, err := raft.WaitForCluster(ctx, m.RaftNode)
if err != nil {
m.server.Stop()
return err
}
raftConfig := c.Spec.Raft
if int(raftConfig.ElectionTick) != m.RaftNode.Config.ElectionTick {
log.G(ctx).Warningf("election tick value (%ds) is different from the one defined in the cluster config (%vs), the cluster may be unstable", m.RaftNode.Config.ElectionTick, raftConfig.ElectionTick)
}
if int(raftConfig.HeartbeatTick) != m.RaftNode.Config.HeartbeatTick {
log.G(ctx).Warningf("heartbeat tick value (%ds) is different from the one defined in the cluster config (%vs), the cluster may be unstable", m.RaftNode.Config.HeartbeatTick, raftConfig.HeartbeatTick)
}
// wait for an error in serving.
err = <-errServe
select {
// check to see if stopped was posted to. if so, we're in the process of
// stopping, or done and that's why we got the error. if stopping is
// deliberate, stopped will ALWAYS be closed before the error is trigger,
// so this path will ALWAYS be taken if the stop was deliberate
case <-m.stopped:
// shutdown was requested, do not return an error
// but first, we wait to acquire a mutex to guarantee that stopping is
// finished. as long as we acquire the mutex BEFORE we return, we know
// that stopping is stopped.
m.mu.Lock()
m.mu.Unlock()
return nil
// otherwise, we'll get something from errServe, which indicates that an
// error in serving has actually occurred and this isn't a planned shutdown
default:
return err
}
}
// Stop stops the manager. It immediately closes all open connections and
// active RPCs as well as stopping the scheduler.
func (m *Manager) Stop(ctx context.Context) {
log.G(ctx).Info("Stopping manager")
// It's not safe to start shutting down while the manager is still
// starting up.
<-m.started
// the mutex stops us from trying to stop while we're alrady stopping, or
// from returning before we've finished stopping.
m.mu.Lock()
defer m.mu.Unlock()
select {
// check to see that we've already stopped
case <-m.stopped:
return
default:
// do nothing, we're stopping for the first time
}
// once we start stopping, send a signal that we're doing so. this tells
// Run that we've started stopping, when it gets the error from errServe
// it also prevents the loop from processing any more stuff.
close(m.stopped)
m.Dispatcher.Stop()
m.caserver.Stop()
if m.allocator != nil {
m.allocator.Stop()
}
if m.replicatedOrchestrator != nil {
m.replicatedOrchestrator.Stop()
}
if m.globalOrchestrator != nil {
m.globalOrchestrator.Stop()
}
if m.taskReaper != nil {
m.taskReaper.Stop()
}
if m.scheduler != nil {
m.scheduler.Stop()
}
if m.keyManager != nil {
m.keyManager.Stop()
}
if m.connSelector != nil {
m.connSelector.Stop()
}
m.RaftNode.Shutdown()
// some time after this point, Run will receive an error from one of these
m.server.Stop()
m.localserver.Stop()
log.G(ctx).Info("Manager shut down")
// mutex is released and Run can return now
}
// rotateRootCAKEK will attempt to rotate the key-encryption-key for root CA key-material in raft.
// If there is no passphrase set in ENV, it returns.
// If there is plain-text root key-material, and a passphrase set, it encrypts it.
// If there is encrypted root key-material and it is using the current passphrase, it returns.
// If there is encrypted root key-material, and it is using the previous passphrase, it
// re-encrypts it with the current passphrase.
func (m *Manager) rotateRootCAKEK(ctx context.Context, clusterID string) error {
// If we don't have a KEK, we won't ever be rotating anything
strPassphrase := os.Getenv(ca.PassphraseENVVar)
if strPassphrase == "" {
return nil
}
strPassphrasePrev := os.Getenv(ca.PassphraseENVVarPrev)
passphrase := []byte(strPassphrase)
passphrasePrev := []byte(strPassphrasePrev)
s := m.RaftNode.MemoryStore()
var (
cluster *api.Cluster
err error
finalKey []byte
)
// Retrieve the cluster identified by ClusterID
s.View(func(readTx store.ReadTx) {
cluster = store.GetCluster(readTx, clusterID)
})
if cluster == nil {
return fmt.Errorf("cluster not found: %s", clusterID)
}
// Try to get the private key from the cluster
privKeyPEM := cluster.RootCA.CAKey
if privKeyPEM == nil || len(privKeyPEM) == 0 {
// We have no PEM root private key in this cluster.
log.G(ctx).Warnf("cluster %s does not have private key material", clusterID)
return nil
}
// Decode the PEM private key
keyBlock, _ := pem.Decode(privKeyPEM)
if keyBlock == nil {
return fmt.Errorf("invalid PEM-encoded private key inside of cluster %s", clusterID)
}
// If this key is not encrypted, then we have to encrypt it
if !x509.IsEncryptedPEMBlock(keyBlock) {
finalKey, err = ca.EncryptECPrivateKey(privKeyPEM, strPassphrase)
if err != nil {
return err
}
} else {
// This key is already encrypted, let's try to decrypt with the current main passphrase
_, err = x509.DecryptPEMBlock(keyBlock, []byte(passphrase))
if err == nil {
// The main key is the correct KEK, nothing to do here
return nil
}
// This key is already encrypted, but failed with current main passphrase.
// Let's try to decrypt with the previous passphrase
unencryptedKey, err := x509.DecryptPEMBlock(keyBlock, []byte(passphrasePrev))
if err != nil {
// We were not able to decrypt either with the main or backup passphrase, error
return err
}
unencryptedKeyBlock := &pem.Block{
Type: keyBlock.Type,
Bytes: unencryptedKey,
Headers: keyBlock.Headers,
}
// We were able to decrypt the key, but with the previous passphrase. Let's encrypt
// with the new one and store it in raft
finalKey, err = ca.EncryptECPrivateKey(pem.EncodeToMemory(unencryptedKeyBlock), strPassphrase)
if err != nil {
log.G(ctx).Debugf("failed to rotate the key-encrypting-key for the root key material of cluster %s", clusterID)
return err
}
}
log.G(ctx).Infof("Re-encrypting the root key material of cluster %s", clusterID)
// Let's update the key in the cluster object
return s.Update(func(tx store.Tx) error {
cluster = store.GetCluster(tx, clusterID)
if cluster == nil {
return fmt.Errorf("cluster not found: %s", clusterID)
}
cluster.RootCA.CAKey = finalKey
return store.UpdateCluster(tx, cluster)
})
}
|
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Manager of cAdvisor-monitored containers.
package manager
import (
"flag"
"fmt"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/docker/libcontainer/cgroups"
"github.com/golang/glog"
"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/docker"
"github.com/google/cadvisor/container/raw"
"github.com/google/cadvisor/events"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/storage/memory"
"github.com/google/cadvisor/utils/cpuload"
"github.com/google/cadvisor/utils/oomparser"
"github.com/google/cadvisor/utils/sysfs"
)
var globalHousekeepingInterval = flag.Duration("global_housekeeping_interval", 1*time.Minute, "Interval between global housekeepings")
var logCadvisorUsage = flag.Bool("log_cadvisor_usage", false, "Whether to log the usage of the cAdvisor container")
// The Manager interface defines operations for starting a manager and getting
// container and machine information.
type Manager interface {
// Start the manager.
Start() error
// Stops the manager.
Stop() error
// Get information about a container.
GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error)
// Get information about all subcontainers of the specified container (includes self).
SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error)
// Gets all the Docker containers. Return is a map from full container name to ContainerInfo.
AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error)
// Gets information about a specific Docker container. The specified name is within the Docker namespace.
DockerContainer(dockerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error)
// Gets spec for a container.
GetContainerSpec(containerName string) (v2.ContainerSpec, error)
// Get derived stats for a container.
GetContainerDerivedStats(containerName string) (v2.DerivedStats, error)
// Get information about the machine.
GetMachineInfo() (*info.MachineInfo, error)
// Get version information about different components we depend on.
GetVersionInfo() (*info.VersionInfo, error)
// Get filesystem information for a given label.
// Returns information for all global filesystems if label is empty.
GetFsInfo(label string) ([]v2.FsInfo, error)
// Get events streamed through passedChannel that fit the request.
WatchForEvents(request *events.Request, passedChannel chan *events.Event) error
// Get past events that have been detected and that fit the request.
GetPastEvents(request *events.Request) (events.EventSlice, error)
}
// New takes a memory storage and returns a new manager.
func New(memoryStorage *memory.InMemoryStorage, sysfs sysfs.SysFs) (Manager, error) {
if memoryStorage == nil {
return nil, fmt.Errorf("manager requires memory storage")
}
// Detect the container we are running on.
selfContainer, err := cgroups.GetThisCgroupDir("cpu")
if err != nil {
return nil, err
}
glog.Infof("cAdvisor running in container: %q", selfContainer)
context := fs.Context{DockerRoot: docker.RootDir()}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
quitChannels: make([]chan error, 0, 2),
memoryStorage: memoryStorage,
fsInfo: fsInfo,
cadvisorContainer: selfContainer,
startupTime: time.Now(),
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
glog.Infof("Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
glog.Infof("Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager()
// Register Docker container factory.
err = docker.Register(newManager, fsInfo)
if err != nil {
glog.Errorf("Docker container factory registration failed: %v.", err)
}
// Register the raw driver.
err = raw.Register(newManager, fsInfo)
if err != nil {
glog.Errorf("Registration of the raw container factory failed: %v", err)
}
return newManager, nil
}
// A namespaced container name.
type namespacedContainerName struct {
// The namespace of the container. Can be empty for the root namespace.
Namespace string
// The name of the container in this namespace.
Name string
}
type manager struct {
containers map[namespacedContainerName]*containerData
containersLock sync.RWMutex
memoryStorage *memory.InMemoryStorage
fsInfo fs.FsInfo
machineInfo info.MachineInfo
versionInfo info.VersionInfo
quitChannels []chan error
cadvisorContainer string
dockerContainersRegexp *regexp.Regexp
loadReader cpuload.CpuLoadReader
eventHandler events.EventManager
startupTime time.Time
}
// Start the container manager.
func (self *manager) Start() error {
// TODO(rjnagal): Skip creating cpu load reader while we improve resource usage and accuracy.
if false {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
// TODO(rjnagal): Promote to warning once we support cpu load inside namespaces.
glog.Infof("Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
glog.Warning("Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err := self.watchForNewOoms()
if err != nil {
glog.Errorf("Failed to start OOM watcher, will not get OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
glog.Infof("Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
glog.Infof("Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
func (self *manager) globalHousekeeping(quit chan error) {
// Long housekeeping is either 100ms or half of the housekeeping interval.
longHousekeeping := 100 * time.Millisecond
if *globalHousekeepingInterval/2 < longHousekeeping {
longHousekeeping = *globalHousekeepingInterval / 2
}
ticker := time.Tick(*globalHousekeepingInterval)
for {
select {
case t := <-ticker:
start := time.Now()
// Check for new containers.
err := self.detectSubcontainers("/")
if err != nil {
glog.Errorf("Failed to detect containers: %s", err)
}
// Log if housekeeping took too long.
duration := time.Since(start)
if duration >= longHousekeeping {
glog.V(1).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration)
}
case <-quit:
// Quit if asked to do so.
quit <- nil
glog.Infof("Exiting global housekeeping thread")
return
}
}
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) GetContainerSpec(containerName string) (v2.ContainerSpec, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return v2.ContainerSpec{}, err
}
cinfo, err := cont.GetInfo()
if err != nil {
return v2.ContainerSpec{}, err
}
spec := self.getV2Spec(cinfo)
return spec, nil
}
// Get V2 container spec from v1 container info.
func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
specV1 := self.getAdjustedSpec(cinfo)
specV2 := v2.ContainerSpec{
CreationTime: specV1.CreationTime,
HasCpu: specV1.HasCpu,
HasMemory: specV1.HasMemory,
}
if specV1.HasCpu {
specV2.Cpu.Limit = specV1.Cpu.Limit
specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit
specV2.Cpu.Mask = specV1.Cpu.Mask
}
if specV1.HasMemory {
specV2.Memory.Limit = specV1.Memory.Limit
specV2.Memory.Reservation = specV1.Memory.Reservation
specV2.Memory.SwapLimit = specV1.Memory.SwapLimit
}
specV2.Aliases = cinfo.Aliases
specV2.Namespace = cinfo.Namespace
return specV2
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont, query)
}
func (self *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := self.memoryStorage.RecentStats(cinfo.Name, query.Start, query.End, query.NumStats)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
var containers []*containerData
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers = make([]*containerData, 0, len(self.containers))
// Get all the subcontainers of the specified container
matchedName := path.Join(containerName, "/")
for i := range self.containers {
name := self.containers[i].info.Name
if name == containerName || strings.HasPrefix(name, matchedName) {
containers = append(containers, self.containers[i])
}
}
}()
return self.containerDataSliceToContainerInfoSlice(containers, query)
}
func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
var containers map[string]*containerData
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers = make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
}()
output := make(map[string]info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont, query)
if err != nil {
return nil, err
}
output[name] = *inf
}
return output, nil
}
func (self *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
var container *containerData = nil
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if ok {
container = cont
}
}()
if container == nil {
return info.ContainerInfo{}, fmt.Errorf("unable to find Docker container %q", containerName)
}
inf, err := self.containerDataToContainerInfo(container, query)
if err != nil {
return info.ContainerInfo{}, err
}
return *inf, nil
}
func (self *manager) containerDataSliceToContainerInfoSlice(containers []*containerData, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
if len(containers) == 0 {
return nil, fmt.Errorf("no containers found")
}
// Get the info for each container.
output := make([]*info.ContainerInfo, 0, len(containers))
for i := range containers {
cinfo, err := self.containerDataToContainerInfo(containers[i], query)
if err != nil {
// Skip containers with errors, we try to degrade gracefully.
continue
}
output = append(output, cinfo)
}
return output, nil
}
func (self *manager) GetContainerDerivedStats(containerName string) (v2.DerivedStats, error) {
var ok bool
var cont *containerData
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
cont, ok = self.containers[namespacedContainerName{Name: containerName}]
}()
if !ok {
return v2.DerivedStats{}, fmt.Errorf("unknown container %q", containerName)
}
return cont.DerivedStats()
}
func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
var empty time.Time
// Get latest data from filesystems hanging off root container.
stats, err := self.memoryStorage.RecentStats("/", empty, empty, 1)
if err != nil {
return nil, err
}
dev := ""
if len(label) != 0 {
dev, err = self.fsInfo.GetDeviceForLabel(label)
if err != nil {
return nil, err
}
}
fsInfo := []v2.FsInfo{}
for _, fs := range stats[0].Filesystem {
if len(label) != 0 && fs.Device != dev {
continue
}
mountpoint, err := self.fsInfo.GetMountpointForDevice(fs.Device)
if err != nil {
return nil, err
}
labels, err := self.fsInfo.GetLabelsForDevice(fs.Device)
if err != nil {
return nil, err
}
fi := v2.FsInfo{
Device: fs.Device,
Mountpoint: mountpoint,
Capacity: fs.Limit,
Usage: fs.Usage,
Labels: labels,
}
fsInfo = append(fsInfo, fi)
}
return fsInfo, nil
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// Create a container.
func (m *manager) createContainer(containerName string) error {
handler, err := container.NewContainerHandler(containerName)
if err != nil {
return err
}
logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer
cont, err := newContainerData(containerName, m.memoryStorage, handler, m.loadReader, logUsage)
if err != nil {
return err
}
// Add to the containers map.
alreadyExists := func() bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
// Check that the container didn't already exist.
_, ok := m.containers[namespacedName]
if ok {
return true
}
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
m.containers[namespacedName] = cont
for _, alias := range cont.info.Aliases {
m.containers[namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
}] = cont
}
return false
}()
if alreadyExists {
return nil
}
glog.Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contSpecs, err := cont.handler.GetSpec()
if err != nil {
return err
}
if contSpecs.CreationTime.After(m.startupTime) {
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &events.Event{
ContainerName: contRef.Name,
EventData: contSpecs,
Timestamp: contSpecs.CreationTime,
EventType: events.TypeContainerCreation,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
}
// Start the container's housekeeping.
cont.Start()
return nil
}
func (m *manager) destroyContainer(containerName string) error {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
cont, ok := m.containers[namespacedName]
if !ok {
// Already destroyed, done.
return nil
}
// Tell the container to stop.
err := cont.Stop()
if err != nil {
return err
}
// Remove the container from our records (and all its aliases).
delete(m.containers, namespacedName)
for _, alias := range cont.info.Aliases {
delete(m.containers, namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
})
}
glog.Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &events.Event{
ContainerName: contRef.Name,
Timestamp: time.Now(),
EventType: events.TypeContainerDeletion,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
return nil
}
// Detect all containers that have been added or deleted from the specified container.
func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Get all subcontainers recursively.
cont, ok := m.containers[namespacedContainerName{
Name: containerName,
}]
if !ok {
return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName)
}
allContainers, err := cont.handler.ListContainers(container.ListRecursive)
if err != nil {
return nil, nil, err
}
allContainers = append(allContainers, info.ContainerReference{Name: containerName})
// Determine which were added and which were removed.
allContainersSet := make(map[string]*containerData)
for name, d := range m.containers {
// Only add the canonical name.
if d.info.Name == name.Name {
allContainersSet[name.Name] = d
}
}
// Added containers
for _, c := range allContainers {
delete(allContainersSet, c.Name)
_, ok := m.containers[namespacedContainerName{
Name: c.Name,
}]
if !ok {
added = append(added, c)
}
}
// Removed ones are no longer in the container listing.
for _, d := range allContainersSet {
removed = append(removed, d.info.ContainerReference)
}
return
}
// Detect the existing subcontainers and reflect the setup here.
func (m *manager) detectSubcontainers(containerName string) error {
added, removed, err := m.getContainersDiff(containerName)
if err != nil {
return err
}
// Add the new containers.
for _, cont := range added {
err = m.createContainer(cont.Name)
if err != nil {
glog.Errorf("Failed to create existing container: %s: %s", cont.Name, err)
}
}
// Remove the old containers.
for _, cont := range removed {
err = m.destroyContainer(cont.Name)
if err != nil {
glog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err)
}
}
return nil
}
// Watches for new containers started in the system. Runs forever unless there is a setup error.
func (self *manager) watchForNewContainers(quit chan error) error {
var root *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
root, ok = self.containers[namespacedContainerName{
Name: "/",
}]
}()
if !ok {
return fmt.Errorf("root container does not exist when watching for new containers")
}
// Register for new subcontainers.
eventsChannel := make(chan container.SubcontainerEvent, 16)
err := root.handler.WatchSubcontainers(eventsChannel)
if err != nil {
return err
}
// There is a race between starting the watch and new container creation so we do a detection before we read new containers.
err = self.detectSubcontainers("/")
if err != nil {
return err
}
// Listen to events from the container handler.
go func() {
for {
select {
case event := <-eventsChannel:
switch {
case event.EventType == container.SubcontainerAdd:
err = self.createContainer(event.Name)
case event.EventType == container.SubcontainerDelete:
err = self.destroyContainer(event.Name)
}
if err != nil {
glog.Warning("Failed to process watch event: %v", err)
}
case <-quit:
// Stop processing events if asked to quit.
err := root.handler.StopWatchingSubcontainers()
quit <- err
if err == nil {
glog.Infof("Exiting thread watching subcontainers")
return
}
}
}
}()
return nil
}
func (self *manager) watchForNewOoms() error {
glog.Infof("Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
if err != nil {
return err
}
err = oomLog.StreamOoms(outStream)
if err != nil {
return err
}
go func() {
for oomInstance := range outStream {
newEvent := &events.Event{
ContainerName: oomInstance.ContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: events.TypeOom,
EventData: oomInstance,
}
glog.V(1).Infof("Created an oom event: %v", newEvent)
err := self.eventHandler.AddEvent(newEvent)
if err != nil {
glog.Errorf("Failed to add event %v, got error: %v", newEvent, err)
}
}
}()
return nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request, passedChannel chan *events.Event) error {
return self.eventHandler.WatchEvents(passedChannel, request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) (events.EventSlice, error) {
return self.eventHandler.GetEvents(request)
}
Return unique containers in Subcontainers().
Before this we'd return an instance for each alias we'd saved.
Fixes #592.
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Manager of cAdvisor-monitored containers.
package manager
import (
"flag"
"fmt"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/docker/libcontainer/cgroups"
"github.com/golang/glog"
"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/docker"
"github.com/google/cadvisor/container/raw"
"github.com/google/cadvisor/events"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/storage/memory"
"github.com/google/cadvisor/utils/cpuload"
"github.com/google/cadvisor/utils/oomparser"
"github.com/google/cadvisor/utils/sysfs"
)
var globalHousekeepingInterval = flag.Duration("global_housekeeping_interval", 1*time.Minute, "Interval between global housekeepings")
var logCadvisorUsage = flag.Bool("log_cadvisor_usage", false, "Whether to log the usage of the cAdvisor container")
// The Manager interface defines operations for starting a manager and getting
// container and machine information.
type Manager interface {
// Start the manager.
Start() error
// Stops the manager.
Stop() error
// Get information about a container.
GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error)
// Get information about all subcontainers of the specified container (includes self).
SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error)
// Gets all the Docker containers. Return is a map from full container name to ContainerInfo.
AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error)
// Gets information about a specific Docker container. The specified name is within the Docker namespace.
DockerContainer(dockerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error)
// Gets spec for a container.
GetContainerSpec(containerName string) (v2.ContainerSpec, error)
// Get derived stats for a container.
GetContainerDerivedStats(containerName string) (v2.DerivedStats, error)
// Get information about the machine.
GetMachineInfo() (*info.MachineInfo, error)
// Get version information about different components we depend on.
GetVersionInfo() (*info.VersionInfo, error)
// Get filesystem information for a given label.
// Returns information for all global filesystems if label is empty.
GetFsInfo(label string) ([]v2.FsInfo, error)
// Get events streamed through passedChannel that fit the request.
WatchForEvents(request *events.Request, passedChannel chan *events.Event) error
// Get past events that have been detected and that fit the request.
GetPastEvents(request *events.Request) (events.EventSlice, error)
}
// New takes a memory storage and returns a new manager.
func New(memoryStorage *memory.InMemoryStorage, sysfs sysfs.SysFs) (Manager, error) {
if memoryStorage == nil {
return nil, fmt.Errorf("manager requires memory storage")
}
// Detect the container we are running on.
selfContainer, err := cgroups.GetThisCgroupDir("cpu")
if err != nil {
return nil, err
}
glog.Infof("cAdvisor running in container: %q", selfContainer)
context := fs.Context{DockerRoot: docker.RootDir()}
fsInfo, err := fs.NewFsInfo(context)
if err != nil {
return nil, err
}
newManager := &manager{
containers: make(map[namespacedContainerName]*containerData),
quitChannels: make([]chan error, 0, 2),
memoryStorage: memoryStorage,
fsInfo: fsInfo,
cadvisorContainer: selfContainer,
startupTime: time.Now(),
}
machineInfo, err := getMachineInfo(sysfs, fsInfo)
if err != nil {
return nil, err
}
newManager.machineInfo = *machineInfo
glog.Infof("Machine: %+v", newManager.machineInfo)
versionInfo, err := getVersionInfo()
if err != nil {
return nil, err
}
newManager.versionInfo = *versionInfo
glog.Infof("Version: %+v", newManager.versionInfo)
newManager.eventHandler = events.NewEventManager()
// Register Docker container factory.
err = docker.Register(newManager, fsInfo)
if err != nil {
glog.Errorf("Docker container factory registration failed: %v.", err)
}
// Register the raw driver.
err = raw.Register(newManager, fsInfo)
if err != nil {
glog.Errorf("Registration of the raw container factory failed: %v", err)
}
return newManager, nil
}
// A namespaced container name.
type namespacedContainerName struct {
// The namespace of the container. Can be empty for the root namespace.
Namespace string
// The name of the container in this namespace.
Name string
}
type manager struct {
containers map[namespacedContainerName]*containerData
containersLock sync.RWMutex
memoryStorage *memory.InMemoryStorage
fsInfo fs.FsInfo
machineInfo info.MachineInfo
versionInfo info.VersionInfo
quitChannels []chan error
cadvisorContainer string
dockerContainersRegexp *regexp.Regexp
loadReader cpuload.CpuLoadReader
eventHandler events.EventManager
startupTime time.Time
}
// Start the container manager.
func (self *manager) Start() error {
// TODO(rjnagal): Skip creating cpu load reader while we improve resource usage and accuracy.
if false {
// Create cpu load reader.
cpuLoadReader, err := cpuload.New()
if err != nil {
// TODO(rjnagal): Promote to warning once we support cpu load inside namespaces.
glog.Infof("Could not initialize cpu load reader: %s", err)
} else {
err = cpuLoadReader.Start()
if err != nil {
glog.Warning("Could not start cpu load stat collector: %s", err)
} else {
self.loadReader = cpuLoadReader
}
}
}
// Watch for OOMs.
err := self.watchForNewOoms()
if err != nil {
glog.Errorf("Failed to start OOM watcher, will not get OOM events: %v", err)
}
// If there are no factories, don't start any housekeeping and serve the information we do have.
if !container.HasFactories() {
return nil
}
// Create root and then recover all containers.
err = self.createContainer("/")
if err != nil {
return err
}
glog.Infof("Starting recovery of all containers")
err = self.detectSubcontainers("/")
if err != nil {
return err
}
glog.Infof("Recovery completed")
// Watch for new container.
quitWatcher := make(chan error)
err = self.watchForNewContainers(quitWatcher)
if err != nil {
return err
}
self.quitChannels = append(self.quitChannels, quitWatcher)
// Look for new containers in the main housekeeping thread.
quitGlobalHousekeeping := make(chan error)
self.quitChannels = append(self.quitChannels, quitGlobalHousekeeping)
go self.globalHousekeeping(quitGlobalHousekeeping)
return nil
}
func (self *manager) Stop() error {
// Stop and wait on all quit channels.
for i, c := range self.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel).
c <- nil
err := <-c
if err != nil {
// Remove the channels that quit successfully.
self.quitChannels = self.quitChannels[i:]
return err
}
}
self.quitChannels = make([]chan error, 0, 2)
if self.loadReader != nil {
self.loadReader.Stop()
self.loadReader = nil
}
return nil
}
func (self *manager) globalHousekeeping(quit chan error) {
// Long housekeeping is either 100ms or half of the housekeeping interval.
longHousekeeping := 100 * time.Millisecond
if *globalHousekeepingInterval/2 < longHousekeeping {
longHousekeeping = *globalHousekeepingInterval / 2
}
ticker := time.Tick(*globalHousekeepingInterval)
for {
select {
case t := <-ticker:
start := time.Now()
// Check for new containers.
err := self.detectSubcontainers("/")
if err != nil {
glog.Errorf("Failed to detect containers: %s", err)
}
// Log if housekeeping took too long.
duration := time.Since(start)
if duration >= longHousekeeping {
glog.V(1).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration)
}
case <-quit:
// Quit if asked to do so.
quit <- nil
glog.Infof("Exiting global housekeeping thread")
return
}
}
}
func (self *manager) getContainerData(containerName string) (*containerData, error) {
var cont *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Ensure we have the container.
cont, ok = self.containers[namespacedContainerName{
Name: containerName,
}]
}()
if !ok {
return nil, fmt.Errorf("unknown container %q", containerName)
}
return cont, nil
}
func (self *manager) GetContainerSpec(containerName string) (v2.ContainerSpec, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return v2.ContainerSpec{}, err
}
cinfo, err := cont.GetInfo()
if err != nil {
return v2.ContainerSpec{}, err
}
spec := self.getV2Spec(cinfo)
return spec, nil
}
// Get V2 container spec from v1 container info.
func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
specV1 := self.getAdjustedSpec(cinfo)
specV2 := v2.ContainerSpec{
CreationTime: specV1.CreationTime,
HasCpu: specV1.HasCpu,
HasMemory: specV1.HasMemory,
}
if specV1.HasCpu {
specV2.Cpu.Limit = specV1.Cpu.Limit
specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit
specV2.Cpu.Mask = specV1.Cpu.Mask
}
if specV1.HasMemory {
specV2.Memory.Limit = specV1.Memory.Limit
specV2.Memory.Reservation = specV1.Memory.Reservation
specV2.Memory.SwapLimit = specV1.Memory.SwapLimit
}
specV2.Aliases = cinfo.Aliases
specV2.Namespace = cinfo.Namespace
return specV2
}
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
spec := cinfo.Spec
// Set default value to an actual value
if spec.HasMemory {
// Memory.Limit is 0 means there's no limit
if spec.Memory.Limit == 0 {
spec.Memory.Limit = uint64(self.machineInfo.MemoryCapacity)
}
}
return spec
}
// Get a container by name.
func (self *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
cont, err := self.getContainerData(containerName)
if err != nil {
return nil, err
}
return self.containerDataToContainerInfo(cont, query)
}
func (self *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
// Get the info from the container.
cinfo, err := cont.GetInfo()
if err != nil {
return nil, err
}
stats, err := self.memoryStorage.RecentStats(cinfo.Name, query.Start, query.End, query.NumStats)
if err != nil {
return nil, err
}
// Make a copy of the info for the user.
ret := &info.ContainerInfo{
ContainerReference: cinfo.ContainerReference,
Subcontainers: cinfo.Subcontainers,
Spec: self.getAdjustedSpec(cinfo),
Stats: stats,
}
return ret, nil
}
func (self *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
var containersMap map[string]*containerData
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containersMap = make(map[string]*containerData, len(self.containers))
// Get all the unique subcontainers of the specified container
matchedName := path.Join(containerName, "/")
for i := range self.containers {
name := self.containers[i].info.Name
if name == containerName || strings.HasPrefix(name, matchedName) {
containersMap[self.containers[i].info.Name] = self.containers[i]
}
}
}()
containers := make([]*containerData, 0, len(containersMap))
for _, cont := range containersMap {
containers = append(containers, cont)
}
return self.containerDataSliceToContainerInfoSlice(containers, query)
}
func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
var containers map[string]*containerData
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
containers = make(map[string]*containerData, len(self.containers))
// Get containers in the Docker namespace.
for name, cont := range self.containers {
if name.Namespace == docker.DockerNamespace {
containers[cont.info.Name] = cont
}
}
}()
output := make(map[string]info.ContainerInfo, len(containers))
for name, cont := range containers {
inf, err := self.containerDataToContainerInfo(cont, query)
if err != nil {
return nil, err
}
output[name] = *inf
}
return output, nil
}
func (self *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
var container *containerData = nil
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
// Check for the container in the Docker container namespace.
cont, ok := self.containers[namespacedContainerName{
Namespace: docker.DockerNamespace,
Name: containerName,
}]
if ok {
container = cont
}
}()
if container == nil {
return info.ContainerInfo{}, fmt.Errorf("unable to find Docker container %q", containerName)
}
inf, err := self.containerDataToContainerInfo(container, query)
if err != nil {
return info.ContainerInfo{}, err
}
return *inf, nil
}
func (self *manager) containerDataSliceToContainerInfoSlice(containers []*containerData, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
if len(containers) == 0 {
return nil, fmt.Errorf("no containers found")
}
// Get the info for each container.
output := make([]*info.ContainerInfo, 0, len(containers))
for i := range containers {
cinfo, err := self.containerDataToContainerInfo(containers[i], query)
if err != nil {
// Skip containers with errors, we try to degrade gracefully.
continue
}
output = append(output, cinfo)
}
return output, nil
}
func (self *manager) GetContainerDerivedStats(containerName string) (v2.DerivedStats, error) {
var ok bool
var cont *containerData
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
cont, ok = self.containers[namespacedContainerName{Name: containerName}]
}()
if !ok {
return v2.DerivedStats{}, fmt.Errorf("unknown container %q", containerName)
}
return cont.DerivedStats()
}
func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
var empty time.Time
// Get latest data from filesystems hanging off root container.
stats, err := self.memoryStorage.RecentStats("/", empty, empty, 1)
if err != nil {
return nil, err
}
dev := ""
if len(label) != 0 {
dev, err = self.fsInfo.GetDeviceForLabel(label)
if err != nil {
return nil, err
}
}
fsInfo := []v2.FsInfo{}
for _, fs := range stats[0].Filesystem {
if len(label) != 0 && fs.Device != dev {
continue
}
mountpoint, err := self.fsInfo.GetMountpointForDevice(fs.Device)
if err != nil {
return nil, err
}
labels, err := self.fsInfo.GetLabelsForDevice(fs.Device)
if err != nil {
return nil, err
}
fi := v2.FsInfo{
Device: fs.Device,
Mountpoint: mountpoint,
Capacity: fs.Limit,
Usage: fs.Usage,
Labels: labels,
}
fsInfo = append(fsInfo, fi)
}
return fsInfo, nil
}
func (m *manager) GetMachineInfo() (*info.MachineInfo, error) {
// Copy and return the MachineInfo.
return &m.machineInfo, nil
}
func (m *manager) GetVersionInfo() (*info.VersionInfo, error) {
return &m.versionInfo, nil
}
// Create a container.
func (m *manager) createContainer(containerName string) error {
handler, err := container.NewContainerHandler(containerName)
if err != nil {
return err
}
logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer
cont, err := newContainerData(containerName, m.memoryStorage, handler, m.loadReader, logUsage)
if err != nil {
return err
}
// Add to the containers map.
alreadyExists := func() bool {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
// Check that the container didn't already exist.
_, ok := m.containers[namespacedName]
if ok {
return true
}
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
m.containers[namespacedName] = cont
for _, alias := range cont.info.Aliases {
m.containers[namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
}] = cont
}
return false
}()
if alreadyExists {
return nil
}
glog.Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contSpecs, err := cont.handler.GetSpec()
if err != nil {
return err
}
if contSpecs.CreationTime.After(m.startupTime) {
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &events.Event{
ContainerName: contRef.Name,
EventData: contSpecs,
Timestamp: contSpecs.CreationTime,
EventType: events.TypeContainerCreation,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
}
// Start the container's housekeeping.
cont.Start()
return nil
}
func (m *manager) destroyContainer(containerName string) error {
m.containersLock.Lock()
defer m.containersLock.Unlock()
namespacedName := namespacedContainerName{
Name: containerName,
}
cont, ok := m.containers[namespacedName]
if !ok {
// Already destroyed, done.
return nil
}
// Tell the container to stop.
err := cont.Stop()
if err != nil {
return err
}
// Remove the container from our records (and all its aliases).
delete(m.containers, namespacedName)
for _, alias := range cont.info.Aliases {
delete(m.containers, namespacedContainerName{
Namespace: cont.info.Namespace,
Name: alias,
})
}
glog.Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
contRef, err := cont.handler.ContainerReference()
if err != nil {
return err
}
newEvent := &events.Event{
ContainerName: contRef.Name,
Timestamp: time.Now(),
EventType: events.TypeContainerDeletion,
}
err = m.eventHandler.AddEvent(newEvent)
if err != nil {
return err
}
return nil
}
// Detect all containers that have been added or deleted from the specified container.
func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) {
m.containersLock.RLock()
defer m.containersLock.RUnlock()
// Get all subcontainers recursively.
cont, ok := m.containers[namespacedContainerName{
Name: containerName,
}]
if !ok {
return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName)
}
allContainers, err := cont.handler.ListContainers(container.ListRecursive)
if err != nil {
return nil, nil, err
}
allContainers = append(allContainers, info.ContainerReference{Name: containerName})
// Determine which were added and which were removed.
allContainersSet := make(map[string]*containerData)
for name, d := range m.containers {
// Only add the canonical name.
if d.info.Name == name.Name {
allContainersSet[name.Name] = d
}
}
// Added containers
for _, c := range allContainers {
delete(allContainersSet, c.Name)
_, ok := m.containers[namespacedContainerName{
Name: c.Name,
}]
if !ok {
added = append(added, c)
}
}
// Removed ones are no longer in the container listing.
for _, d := range allContainersSet {
removed = append(removed, d.info.ContainerReference)
}
return
}
// Detect the existing subcontainers and reflect the setup here.
func (m *manager) detectSubcontainers(containerName string) error {
added, removed, err := m.getContainersDiff(containerName)
if err != nil {
return err
}
// Add the new containers.
for _, cont := range added {
err = m.createContainer(cont.Name)
if err != nil {
glog.Errorf("Failed to create existing container: %s: %s", cont.Name, err)
}
}
// Remove the old containers.
for _, cont := range removed {
err = m.destroyContainer(cont.Name)
if err != nil {
glog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err)
}
}
return nil
}
// Watches for new containers started in the system. Runs forever unless there is a setup error.
func (self *manager) watchForNewContainers(quit chan error) error {
var root *containerData
var ok bool
func() {
self.containersLock.RLock()
defer self.containersLock.RUnlock()
root, ok = self.containers[namespacedContainerName{
Name: "/",
}]
}()
if !ok {
return fmt.Errorf("root container does not exist when watching for new containers")
}
// Register for new subcontainers.
eventsChannel := make(chan container.SubcontainerEvent, 16)
err := root.handler.WatchSubcontainers(eventsChannel)
if err != nil {
return err
}
// There is a race between starting the watch and new container creation so we do a detection before we read new containers.
err = self.detectSubcontainers("/")
if err != nil {
return err
}
// Listen to events from the container handler.
go func() {
for {
select {
case event := <-eventsChannel:
switch {
case event.EventType == container.SubcontainerAdd:
err = self.createContainer(event.Name)
case event.EventType == container.SubcontainerDelete:
err = self.destroyContainer(event.Name)
}
if err != nil {
glog.Warning("Failed to process watch event: %v", err)
}
case <-quit:
// Stop processing events if asked to quit.
err := root.handler.StopWatchingSubcontainers()
quit <- err
if err == nil {
glog.Infof("Exiting thread watching subcontainers")
return
}
}
}
}()
return nil
}
func (self *manager) watchForNewOoms() error {
glog.Infof("Started watching for new ooms in manager")
outStream := make(chan *oomparser.OomInstance, 10)
oomLog, err := oomparser.New()
if err != nil {
return err
}
err = oomLog.StreamOoms(outStream)
if err != nil {
return err
}
go func() {
for oomInstance := range outStream {
newEvent := &events.Event{
ContainerName: oomInstance.ContainerName,
Timestamp: oomInstance.TimeOfDeath,
EventType: events.TypeOom,
EventData: oomInstance,
}
glog.V(1).Infof("Created an oom event: %v", newEvent)
err := self.eventHandler.AddEvent(newEvent)
if err != nil {
glog.Errorf("Failed to add event %v, got error: %v", newEvent, err)
}
}
}()
return nil
}
// can be called by the api which will take events returned on the channel
func (self *manager) WatchForEvents(request *events.Request, passedChannel chan *events.Event) error {
return self.eventHandler.WatchEvents(passedChannel, request)
}
// can be called by the api which will return all events satisfying the request
func (self *manager) GetPastEvents(request *events.Request) (events.EventSlice, error) {
return self.eventHandler.GetEvents(request)
}
|
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package n1ql
import (
"bytes"
"crypto/tls"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"unicode"
"github.com/couchbase/go-couchbase"
"github.com/couchbase/godbc"
"github.com/couchbase/query/util"
)
// Common error codes
var (
ErrNotSupported = fmt.Errorf("N1QL:Not supported")
ErrNotImplemented = fmt.Errorf("N1QL: Not implemented")
ErrUnknownCommand = fmt.Errorf("N1QL: Unknown Command")
ErrInternalError = fmt.Errorf("N1QL: Internal Error")
)
// defaults
var (
N1QL_SERVICE_ENDPOINT = "/query/service"
N1QL_DEFAULT_HOST = "127.0.0.1"
N1QL_DEFAULT_PORT = 8093
N1QL_POOL_SIZE = 2 ^ 10 // 1 MB
N1QL_DEFAULT_STATEMENT = "SELECT RAW 1;"
LOCALHOST = N1QL_DEFAULT_HOST
)
// flags
var (
N1QL_PASSTHROUGH_MODE = false
)
// Rest API query parameters
var QueryParams map[string]string
// Username and password. Used for querying the cluster endpoint,
// which may require authorization.
var username, password string
// Used to decide whether to skip verification of certificates when
// connecting to an ssl port.
var skipVerify = true
var certFile = ""
var keyFile = ""
var rootFile = ""
var isAnalytics = false
var networkCfg = "default"
func init() {
QueryParams = make(map[string]string)
}
func SetIsAnalytics(val bool) {
isAnalytics = val
}
func SetNetworkType(networkType string) {
networkCfg = networkType
}
func SetQueryParams(key string, value string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
QueryParams[key] = value
return nil
}
func UnsetQueryParams(key string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
delete(QueryParams, key)
return nil
}
func SetPassthroughMode(val bool) {
N1QL_PASSTHROUGH_MODE = val
}
func SetUsernamePassword(u, p string) {
username = u
password = p
}
func hasUsernamePassword() bool {
return username != "" || password != ""
}
func SetSkipVerify(skip bool) {
skipVerify = skip
}
func SetCertFile(cert string) {
certFile = cert
}
func SetKeyFile(cert string) {
keyFile = cert
}
func SetRootFile(cert string) {
rootFile = cert
}
// implements driver.Conn interface
type n1qlConn struct {
clusterAddr string
queryAPIs []string
client *http.Client
lock sync.RWMutex
}
// HTTPClient to use for REST and view operations.
var MaxIdleConnsPerHost = 10
var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
var HTTPClient = &http.Client{Transport: HTTPTransport}
// Auto discover N1QL and Analytics services depending on input
func discoverN1QLService(name string, ps couchbase.PoolServices, isAnalytics bool, networkType string) ([]string, error) {
var hostnm string
var port int
var ipv6, ok, external bool
var hostUrl *url.URL
prefixUrl := "http://"
serviceType := "n1ql"
if isAnalytics {
serviceType = "cbas"
}
// Since analytics doesnt have a rest endpoint that lists the cluster nodes
// We need to populate the list of analytics APIs here itself
// We might as well do the same for query. This makes getQueryApi() redundant.
queryAPIs := []string{}
// If the network type isn't provided, then we need to detect whether to use default address or alternate address
// by comparing the input hostname with the hostname's under services.
// If it matches then we know its a default (internal address), else we can think of it as an external address and
// move on, throwing an error if that doesnt work.
hostnm = strings.TrimSpace(name)
if strings.HasPrefix(hostnm, "http://") || strings.HasPrefix(hostnm, "https://") {
if strings.HasPrefix(hostnm, "https://") {
prefixUrl = "https://"
serviceType += "SSL"
}
hostUrl, _ = url.Parse(name)
hostnm = hostUrl.Host
}
if networkCfg == "external" {
external = true
} else if networkCfg == "auto" {
for _, ns := range ps.NodesExt {
if v, found := ns.AlternateNames["external"]; found {
if strings.Compare(v.Hostname, hostUrl.Hostname()) == 0 {
external = true
break
}
}
}
}
for _, ns := range ps.NodesExt {
if ns.Services == nil {
continue
}
port, ok = ns.Services[serviceType]
if !external {
if ns.Hostname != "" {
hostnm = ns.Hostname
}
} else {
v, found := ns.AlternateNames["external"]
if !found || v.Hostname == "" {
continue
}
hostnm = v.Hostname
if v.Ports != nil {
port, ok = v.Ports[serviceType]
}
}
hostnm, _, ipv6, _ = HostNameandPort(hostnm)
// we have found a port. And we have hostname as well.
if ok {
// n1ql or analytics service found
if ipv6 {
queryAPIs = append(queryAPIs, fmt.Sprintf("%s[%s]:%d"+N1QL_SERVICE_ENDPOINT, prefixUrl, hostnm, port))
} else {
queryAPIs = append(queryAPIs, fmt.Sprintf("%s%s:%d"+N1QL_SERVICE_ENDPOINT, prefixUrl, hostnm, port))
}
}
}
return queryAPIs, nil
}
var cbUserAgent string = "godbc/" + util.VERSION
func SetCBUserAgentHeader(v string) {
cbUserAgent = v
}
func setCBUserAgent(request *http.Request) {
request.Header.Add("CB-User-Agent", cbUserAgent)
}
func getQueryApi(n1qlEndPoint string, isHttps bool) ([]string, error) {
queryAdmin := n1qlEndPoint + "/admin/clusters/default/nodes"
if isHttps {
queryAdmin = "https://" + queryAdmin
} else {
queryAdmin = "http://" + queryAdmin
}
request, _ := http.NewRequest("GET", queryAdmin, nil)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
queryAPIs := make([]string, 0)
hostname, _, ipv6, err := HostNameandPort(n1qlEndPoint)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse URL. Error %v", err)
}
resp, err := HTTPClient.Do(request)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var nodesInfo []interface{}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &nodesInfo); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
for _, queryNode := range nodesInfo {
switch queryNode := queryNode.(type) {
case map[string]interface{}:
queryAPIs = append(queryAPIs, queryNode["queryEndpoint"].(string))
}
}
if ipv6 {
hostname = "[" + hostname + "]"
LOCALHOST = "[::1]"
}
// if the end-points contain localhost IPv4 or IPv6 then replace them with the actual hostname
for i, qa := range queryAPIs {
queryAPIs[i] = strings.Replace(qa, LOCALHOST, hostname, -1)
}
if len(queryAPIs) == 0 {
return nil, fmt.Errorf("Query endpoints not found")
}
return queryAPIs, nil
}
func OpenN1QLConnection(name string) (*n1qlConn, error) {
var queryAPIs []string = nil
if name == "" {
return nil, fmt.Errorf(" N1QL: Invalid query service endpoint.")
}
if strings.HasPrefix(name, "https") {
//First check if the input string is a cluster endpoint
couchbase.SetSkipVerify(skipVerify)
if skipVerify {
HTTPTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
} else {
if certFile != "" && keyFile != "" {
couchbase.SetCertFile(certFile)
couchbase.SetKeyFile(keyFile)
} else {
//error need to pass both certfile and keyfile
return nil, fmt.Errorf("N1QL: Need to pass both certfile and keyfile")
}
if rootFile != "" {
couchbase.SetRootFile(rootFile)
}
// For 18093 connections
cfg, err := couchbase.ClientConfigForX509(certFile, keyFile, rootFile)
if err != nil {
return nil, err
}
HTTPTransport.TLSClientConfig = cfg
}
}
var client couchbase.Client
var err error
var fname *url.URL
var perr error = nil
fname, err = url.Parse(name)
if err != nil {
return nil, fmt.Errorf(" N1QL: Invalid input url.")
}
_, addr, err := net.LookupSRV(fname.Scheme, "tcp", fname.Host)
if err == nil {
portV := fname.Port()
scheme := fname.Scheme
if fname.Scheme == "couchbases" {
if fname.Port() == "" {
portV = "18091"
}
scheme = "https"
}
if fname.Scheme == "couchbase" {
if fname.Port() == "" {
portV = "8091"
}
scheme = "http"
}
name = scheme + "://" + addr[0].Target + ":" + portV
} else {
// Connect to a couchbase cluster
if hasUsernamePassword() {
client, err = couchbase.ConnectWithAuthCreds(name, username, password)
} else {
client, err = couchbase.Connect(name)
}
}
if err != nil {
// Direct query entry (8093 or 8095 for example. So connect to that.) or DNS SRV
perr = fmt.Errorf("N1QL: Unable to connect to cluster endpoint %s. Error %v", name, err)
// If not cluster endpoint then check if query endpoint
name = strings.TrimSuffix(name, "/")
queryAPI := name + N1QL_SERVICE_ENDPOINT
queryAPIs = make([]string, 1, 1)
queryAPIs[0] = queryAPI
} else {
// Connection was possible - means this is a cluster endpoint.
// We need to auto detect the query / analytics nodes.
// Query by default. Analytics if option is set.
// Get pools/default/nodeServices
ps, err := client.GetPoolServices("default")
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to get NodeServices list. Error %v", err)
}
queryAPIs, err = discoverN1QLService(name, ps, isAnalytics, networkCfg)
if err != nil {
return nil, err
}
sType := "N1QL"
if isAnalytics {
sType = "Analytics"
}
if len(queryAPIs) <= 0 {
return nil, fmt.Errorf("N1QL: No " + sType + " service found on this cluster")
}
}
conn := &n1qlConn{client: HTTPClient, queryAPIs: queryAPIs}
request, err := prepareRequest(N1QL_DEFAULT_STATEMENT, queryAPIs[0], nil)
if err != nil {
return nil, err
}
resp, err := conn.client.Do(request)
if err != nil {
final_error := fmt.Errorf("N1QL: Connection failed %v", stripurl(err.Error())).Error()
if perr != nil {
final_error = final_error + "\n " + stripurl(perr.Error())
}
return nil, fmt.Errorf("%v", final_error)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("N1QL: Connection failure %s", bod)
}
return conn, nil
}
func stripurl(inputstring string) string {
// Detect http* within the string.
startindex := strings.Index(inputstring, "http")
endindex := strings.Index(inputstring[startindex:], " ")
inputurl := inputstring[startindex : startindex+endindex]
// Parse into a url and detect password
urlpart, err := url.Parse(inputurl)
if err != nil {
return inputstring
}
u := urlpart.User
if u == nil {
return inputstring
}
uname := u.Username()
pwd, _ := u.Password()
//Find how many symbols there are in the User string
num := 0
for _, letter := range fmt.Sprintf("%v", pwd) {
if (unicode.IsSymbol(letter) || unicode.IsPunct(letter)) && letter != '*' {
num = num + 1
}
}
// detect the index on the password
startindex = strings.Index(inputstring, uname)
//reform the error message, with * as the password
inputstring = inputstring[:startindex+len(uname)+1] + "*" + inputstring[startindex+len(uname)+1+len(pwd):]
//Replace all the special characters encoding
for num > 0 {
num = num - 1
inputstring = stripurl(inputstring)
}
return inputstring
}
// do client request with retry
func (conn *n1qlConn) doClientRequest(query string, requestValues *url.Values) (*http.Response, error) {
ok := false
for !ok {
var request *http.Request
var err error
// select query API
rand.Seed(time.Now().Unix())
numNodes := len(conn.queryAPIs)
selectedNode := rand.Intn(numNodes)
conn.lock.RLock()
queryAPI := conn.queryAPIs[selectedNode]
conn.lock.RUnlock()
if query != "" {
request, err = prepareRequest(query, queryAPI, nil)
if err != nil {
return nil, err
}
} else {
if requestValues != nil {
request, _ = http.NewRequest("POST", queryAPI, bytes.NewBufferString(requestValues.Encode()))
} else {
request, _ = http.NewRequest("POST", queryAPI, nil)
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
}
resp, err := conn.client.Do(request)
if err != nil {
// if this is the last node return with error
if numNodes == 1 {
break
}
// remove the node that failed from the list of query nodes
conn.lock.Lock()
conn.queryAPIs = append(conn.queryAPIs[:selectedNode], conn.queryAPIs[selectedNode+1:]...)
conn.lock.Unlock()
continue
} else {
return resp, nil
}
}
return nil, fmt.Errorf("N1QL: Query nodes not responding")
}
func serializeErrors(errors interface{}) string {
var errString string
switch errors := errors.(type) {
case []interface{}:
for _, e := range errors {
switch e := e.(type) {
case map[string]interface{}:
code, _ := e["code"]
msg, _ := e["msg"]
if code != 0 && msg != "" {
if errString != "" {
errString = fmt.Sprintf("%v Code : %v Message : %v", errString, code, msg)
} else {
errString = fmt.Sprintf("Code : %v Message : %v", code, msg)
}
}
}
}
}
if errString != "" {
return errString
}
return fmt.Sprintf(" Error %v %T", errors, errors)
}
func (conn *n1qlConn) Prepare(query string) (*n1qlStmt, error) {
var argCount int
query = "PREPARE " + query
query, argCount = prepareQuery(query)
resp, err := conn.doClientRequest(query, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
stmt := &n1qlStmt{conn: conn, argCount: argCount}
errors, ok := resultMap["errors"]
if ok && errors != nil {
var errs []interface{}
_ = json.Unmarshal(*errors, &errs)
return nil, fmt.Errorf("N1QL: Error preparing statement %v", serializeErrors(errs))
}
for name, results := range resultMap {
switch name {
case "results":
var preparedResults []interface{}
if err := json.Unmarshal(*results, &preparedResults); err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal results %v", err)
}
if len(preparedResults) == 0 {
return nil, fmt.Errorf("N1QL: Unknown error, no prepared results returned")
}
serialized, _ := json.Marshal(preparedResults[0])
stmt.name = preparedResults[0].(map[string]interface{})["name"].(string)
stmt.prepared = string(serialized)
case "signature":
stmt.signature = string(*results)
}
}
if stmt.prepared == "" {
return nil, ErrInternalError
}
return stmt, nil
}
func (conn *n1qlConn) Begin() (driver.Tx, error) {
return nil, ErrNotSupported
}
func (conn *n1qlConn) Close() error {
return nil
}
func decodeSignature(signature *json.RawMessage) interface{} {
var sign interface{}
var rows map[string]interface{}
json.Unmarshal(*signature, &sign)
switch s := sign.(type) {
case map[string]interface{}:
return s
case string:
return s
default:
fmt.Printf(" Cannot decode signature. Type of this signature is %T", s)
return map[string]interface{}{"*": "*"}
}
return rows
}
func (conn *n1qlConn) performQueryRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func getDecoder(r io.Reader) (*json.Decoder, error) {
if r == nil {
return nil, fmt.Errorf("Failed to decode nil response.")
}
return json.NewDecoder(r), nil
}
func (conn *n1qlConn) performQuery(query string, requestValues *url.Values) (godbc.Rows, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
decoder, err := getDecoder(resp.Body)
if err != nil {
return nil, err
}
err = decoder.Decode(&resultMap)
if err != nil {
return nil, fmt.Errorf(" N1QL: Failed to decode result %v", err)
}
var signature interface{}
var resultRows *json.RawMessage
var metrics interface{}
var status interface{}
var requestId interface{}
var errs interface{}
for name, results := range resultMap {
switch name {
case "errors":
_ = json.Unmarshal(*results, &errs)
case "signature":
if results != nil {
signature = decodeSignature(results)
} else if N1QL_PASSTHROUGH_MODE == true {
// for certain types of DML queries, the returned signature could be null
// however in passthrough mode we always return the metrics, status etc as
// rows therefore we need to ensure that there is a default signature.
signature = map[string]interface{}{"*": "*"}
}
case "results":
resultRows = results
case "metrics":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &metrics)
}
case "status":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &status)
}
case "requestID":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &requestId)
}
}
}
if N1QL_PASSTHROUGH_MODE == true {
extraVals := map[string]interface{}{"requestID": requestId,
"status": status,
"signature": signature,
}
// in passthrough mode last line will always be en error line
errors := map[string]interface{}{"errors": errs}
return resultToRows(bytes.NewReader(*resultRows), resp, signature, metrics, errors, extraVals)
}
// we return the errors with the rows because we can have scenarios where there are valid
// results returned along with the error and this interface doesn't allow for both to be
// returned and hence this workaround.
return resultToRows(bytes.NewReader(*resultRows), resp, signature, nil, errs, nil)
}
// Executes a query that returns a set of Rows.
// Select statements should use this interface
func (conn *n1qlConn) Query(query string, args ...interface{}) (godbc.Rows, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQuery(query, nil)
}
func (conn *n1qlConn) QueryRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQueryRaw(query, nil)
}
func (conn *n1qlConn) performExecRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func (conn *n1qlConn) performExec(query string, requestValues *url.Values) (godbc.Result, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
var execErr error
res := &n1qlResult{}
for name, results := range resultMap {
switch name {
case "metrics":
var metrics map[string]interface{}
err := json.Unmarshal(*results, &metrics)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal response. Error %v", err)
}
if mc, ok := metrics["mutationCount"]; ok {
res.affectedRows = int64(mc.(float64))
}
break
case "errors":
var errs []interface{}
_ = json.Unmarshal(*results, &errs)
execErr = fmt.Errorf("N1QL: Error executing query %v", serializeErrors(errs))
}
}
return res, execErr
}
// Execer implementation. To be used for queries that do not return any rows
// such as Create Index, Insert, Upset, Delete etc
func (conn *n1qlConn) Exec(query string, args ...interface{}) (godbc.Result, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExec(query, nil)
}
func (conn *n1qlConn) ExecRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExecRaw(query, nil)
}
func prepareQuery(query string) (string, int) {
var count int
re := regexp.MustCompile("\\?")
f := func(s string) string {
count++
return fmt.Sprintf("$%d", count)
}
return re.ReplaceAllStringFunc(query, f), count
}
//
// Replace the conditional pqrams in the query and return the list of left-over args
func preparePositionalArgs(query string, argCount int, args []interface{}) (string, []interface{}) {
subList := make([]string, 0)
newArgs := make([]interface{}, 0)
for i, arg := range args {
if i < argCount {
var a string
switch arg := arg.(type) {
case string:
a = fmt.Sprintf("\"%v\"", arg)
case []byte:
a = string(arg)
default:
a = fmt.Sprintf("%v", arg)
}
sub := []string{fmt.Sprintf("$%d", i+1), a}
subList = append(subList, sub...)
} else {
newArgs = append(newArgs, arg)
}
}
r := strings.NewReplacer(subList...)
return r.Replace(query), newArgs
}
// prepare a http request for the query
//
func prepareRequest(query string, queryAPI string, args []interface{}) (*http.Request, error) {
postData := url.Values{}
postData.Set("statement", query)
if len(args) > 0 {
paStr := buildPositionalArgList(args)
if len(paStr) > 0 {
postData.Set("args", paStr)
}
}
setQueryParams(&postData)
request, err := http.NewRequest("POST", queryAPI, bytes.NewBufferString(postData.Encode()))
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
return request, nil
}
//
// Set query params
func setQueryParams(v *url.Values) {
for key, value := range QueryParams {
v.Set(key, value)
}
}
// Return hostname and port for IPv4 and IPv6
func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {
tokens := []string{}
// Set _IPv6 based on input address
ipv6, err = IsIPv6(node)
if err != nil {
return "", "", false, err
}
err = nil
// For IPv6
if ipv6 {
// Then the url should be of the form [::1]:8091
tokens = strings.Split(node, "]:")
host = strings.Replace(tokens[0], "[", "", 1)
} else {
// For IPv4
tokens = strings.Split(node, ":")
host = tokens[0]
}
if len(tokens) == 2 {
port = tokens[1]
} else {
port = ""
}
return
}
func IsIPv6(str string) (bool, error) {
//ipv6 - can be [::1]:8091
host, _, err := net.SplitHostPort(str)
if err != nil {
host = str
}
if host == "localhost" {
host = LOCALHOST
}
ip := net.ParseIP(host)
if ip == nil {
// Essentially this is a FQDN. Golangs ParseIP cannot parse IPs that are non-numerical.
// It could also be an incorrect address. But that can be handled by split host port.
// This method is only to check if address is IPv6.
return false, nil
}
if ip.To4() == nil {
//Not an ipv4 address
// check if ipv6
if ip.To16() == nil {
// Not ipv6
return false, fmt.Errorf("\nThis is an incorrect address %v", str)
}
// IPv6
return true, nil
}
// IPv4
return false, nil
}
MB-41015: For DNS SRV use only couchbase and couchbases as schemes
Change-Id: I980e05f5daae4a277b62144835c1bb6cf1591678
Reviewed-on: http://review.couchbase.org/c/godbc/+/134941
Reviewed-by: Sitaram Vemulapalli <8d86a491dcdf321bac78bef141a8229a59bf9211@couchbase.com>
Tested-by: Isha Kandaswamy <e3cc845ebc6144fc4d71cf5f07a0ce9db6fdfa91@couchbase.com>
// Copyright (c) 2014 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package n1ql
import (
"bytes"
"crypto/tls"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"unicode"
"github.com/couchbase/go-couchbase"
"github.com/couchbase/godbc"
"github.com/couchbase/query/util"
)
// Common error codes
var (
ErrNotSupported = fmt.Errorf("N1QL:Not supported")
ErrNotImplemented = fmt.Errorf("N1QL: Not implemented")
ErrUnknownCommand = fmt.Errorf("N1QL: Unknown Command")
ErrInternalError = fmt.Errorf("N1QL: Internal Error")
)
// defaults
var (
N1QL_SERVICE_ENDPOINT = "/query/service"
N1QL_DEFAULT_HOST = "127.0.0.1"
N1QL_DEFAULT_PORT = 8093
N1QL_POOL_SIZE = 2 ^ 10 // 1 MB
N1QL_DEFAULT_STATEMENT = "SELECT RAW 1;"
LOCALHOST = N1QL_DEFAULT_HOST
)
// flags
var (
N1QL_PASSTHROUGH_MODE = false
)
// Rest API query parameters
var QueryParams map[string]string
// Username and password. Used for querying the cluster endpoint,
// which may require authorization.
var username, password string
// Used to decide whether to skip verification of certificates when
// connecting to an ssl port.
var skipVerify = true
var certFile = ""
var keyFile = ""
var rootFile = ""
var isAnalytics = false
var networkCfg = "default"
func init() {
QueryParams = make(map[string]string)
}
func SetIsAnalytics(val bool) {
isAnalytics = val
}
func SetNetworkType(networkType string) {
networkCfg = networkType
}
func SetQueryParams(key string, value string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
QueryParams[key] = value
return nil
}
func UnsetQueryParams(key string) error {
if key == "" {
return fmt.Errorf("N1QL: Key not specified")
}
delete(QueryParams, key)
return nil
}
func SetPassthroughMode(val bool) {
N1QL_PASSTHROUGH_MODE = val
}
func SetUsernamePassword(u, p string) {
username = u
password = p
}
func hasUsernamePassword() bool {
return username != "" || password != ""
}
func SetSkipVerify(skip bool) {
skipVerify = skip
}
func SetCertFile(cert string) {
certFile = cert
}
func SetKeyFile(cert string) {
keyFile = cert
}
func SetRootFile(cert string) {
rootFile = cert
}
// implements driver.Conn interface
type n1qlConn struct {
clusterAddr string
queryAPIs []string
client *http.Client
lock sync.RWMutex
}
// HTTPClient to use for REST and view operations.
var MaxIdleConnsPerHost = 10
var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
var HTTPClient = &http.Client{Transport: HTTPTransport}
// Auto discover N1QL and Analytics services depending on input
func discoverN1QLService(name string, ps couchbase.PoolServices, isAnalytics bool, networkType string) ([]string, error) {
var hostnm string
var port int
var ipv6, ok, external bool
var hostUrl *url.URL
prefixUrl := "http://"
serviceType := "n1ql"
if isAnalytics {
serviceType = "cbas"
}
// Since analytics doesnt have a rest endpoint that lists the cluster nodes
// We need to populate the list of analytics APIs here itself
// We might as well do the same for query. This makes getQueryApi() redundant.
queryAPIs := []string{}
// If the network type isn't provided, then we need to detect whether to use default address or alternate address
// by comparing the input hostname with the hostname's under services.
// If it matches then we know its a default (internal address), else we can think of it as an external address and
// move on, throwing an error if that doesnt work.
hostnm = strings.TrimSpace(name)
if strings.HasPrefix(hostnm, "http://") || strings.HasPrefix(hostnm, "https://") {
if strings.HasPrefix(hostnm, "https://") {
prefixUrl = "https://"
serviceType += "SSL"
}
hostUrl, _ = url.Parse(name)
hostnm = hostUrl.Host
}
if networkCfg == "external" {
external = true
} else if networkCfg == "auto" {
for _, ns := range ps.NodesExt {
if v, found := ns.AlternateNames["external"]; found {
if strings.Compare(v.Hostname, hostUrl.Hostname()) == 0 {
external = true
break
}
}
}
}
for _, ns := range ps.NodesExt {
if ns.Services == nil {
continue
}
port, ok = ns.Services[serviceType]
if !external {
if ns.Hostname != "" {
hostnm = ns.Hostname
}
} else {
v, found := ns.AlternateNames["external"]
if !found || v.Hostname == "" {
continue
}
hostnm = v.Hostname
if v.Ports != nil {
port, ok = v.Ports[serviceType]
}
}
hostnm, _, ipv6, _ = HostNameandPort(hostnm)
// we have found a port. And we have hostname as well.
if ok {
// n1ql or analytics service found
if ipv6 {
queryAPIs = append(queryAPIs, fmt.Sprintf("%s[%s]:%d"+N1QL_SERVICE_ENDPOINT, prefixUrl, hostnm, port))
} else {
queryAPIs = append(queryAPIs, fmt.Sprintf("%s%s:%d"+N1QL_SERVICE_ENDPOINT, prefixUrl, hostnm, port))
}
}
}
return queryAPIs, nil
}
var cbUserAgent string = "godbc/" + util.VERSION
func SetCBUserAgentHeader(v string) {
cbUserAgent = v
}
func setCBUserAgent(request *http.Request) {
request.Header.Add("CB-User-Agent", cbUserAgent)
}
func getQueryApi(n1qlEndPoint string, isHttps bool) ([]string, error) {
queryAdmin := n1qlEndPoint + "/admin/clusters/default/nodes"
if isHttps {
queryAdmin = "https://" + queryAdmin
} else {
queryAdmin = "http://" + queryAdmin
}
request, _ := http.NewRequest("GET", queryAdmin, nil)
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
queryAPIs := make([]string, 0)
hostname, _, ipv6, err := HostNameandPort(n1qlEndPoint)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse URL. Error %v", err)
}
resp, err := HTTPClient.Do(request)
if err != nil {
return nil, fmt.Errorf("%v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var nodesInfo []interface{}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &nodesInfo); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
for _, queryNode := range nodesInfo {
switch queryNode := queryNode.(type) {
case map[string]interface{}:
queryAPIs = append(queryAPIs, queryNode["queryEndpoint"].(string))
}
}
if ipv6 {
hostname = "[" + hostname + "]"
LOCALHOST = "[::1]"
}
// if the end-points contain localhost IPv4 or IPv6 then replace them with the actual hostname
for i, qa := range queryAPIs {
queryAPIs[i] = strings.Replace(qa, LOCALHOST, hostname, -1)
}
if len(queryAPIs) == 0 {
return nil, fmt.Errorf("Query endpoints not found")
}
return queryAPIs, nil
}
func OpenN1QLConnection(name string) (*n1qlConn, error) {
var queryAPIs []string = nil
if name == "" {
return nil, fmt.Errorf(" N1QL: Invalid query service endpoint.")
}
if strings.HasPrefix(name, "https") {
//First check if the input string is a cluster endpoint
couchbase.SetSkipVerify(skipVerify)
if skipVerify {
HTTPTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
} else {
if certFile != "" && keyFile != "" {
couchbase.SetCertFile(certFile)
couchbase.SetKeyFile(keyFile)
} else {
//error need to pass both certfile and keyfile
return nil, fmt.Errorf("N1QL: Need to pass both certfile and keyfile")
}
if rootFile != "" {
couchbase.SetRootFile(rootFile)
}
// For 18093 connections
cfg, err := couchbase.ClientConfigForX509(certFile, keyFile, rootFile)
if err != nil {
return nil, err
}
HTTPTransport.TLSClientConfig = cfg
}
}
var client couchbase.Client
var err error
var perr error = nil
// Connect to a couchbase cluster
if hasUsernamePassword() {
client, err = couchbase.ConnectWithAuthCreds(name, username, password)
} else {
client, err = couchbase.Connect(name)
}
if err != nil {
// Direct query entry (8093 or 8095 for example. So connect to that.)
perr = fmt.Errorf("N1QL: Unable to connect to cluster endpoint %s. Error %v", name, err)
// If not cluster endpoint then check if query endpoint
name = strings.TrimSuffix(name, "/")
queryAPI := name + N1QL_SERVICE_ENDPOINT
queryAPIs = make([]string, 1, 1)
queryAPIs[0] = queryAPI
} else {
// Connection was possible - means this is a cluster endpoint.
// We need to auto detect the query / analytics nodes.
// Query by default. Analytics if option is set.
// Get pools/default/nodeServices
ps, err := client.GetPoolServices("default")
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to get NodeServices list. Error %v", err)
}
queryAPIs, err = discoverN1QLService(name, ps, isAnalytics, networkCfg)
if err != nil {
return nil, err
}
sType := "N1QL"
if isAnalytics {
sType = "Analytics"
}
if len(queryAPIs) <= 0 {
return nil, fmt.Errorf("N1QL: No " + sType + " service found on this cluster")
}
}
conn := &n1qlConn{client: HTTPClient, queryAPIs: queryAPIs}
request, err := prepareRequest(N1QL_DEFAULT_STATEMENT, queryAPIs[0], nil)
if err != nil {
return nil, err
}
resp, err := conn.client.Do(request)
if err != nil {
final_error := fmt.Errorf("N1QL: Connection failed %v", stripurl(err.Error())).Error()
if perr != nil {
final_error = final_error + "\n " + stripurl(perr.Error())
}
return nil, fmt.Errorf("%v", final_error)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("N1QL: Connection failure %s", bod)
}
return conn, nil
}
func stripurl(inputstring string) string {
// Detect http* within the string.
startindex := strings.Index(inputstring, "http")
endindex := strings.Index(inputstring[startindex:], " ")
inputurl := inputstring[startindex : startindex+endindex]
// Parse into a url and detect password
urlpart, err := url.Parse(inputurl)
if err != nil {
return inputstring
}
u := urlpart.User
if u == nil {
return inputstring
}
uname := u.Username()
pwd, _ := u.Password()
//Find how many symbols there are in the User string
num := 0
for _, letter := range fmt.Sprintf("%v", pwd) {
if (unicode.IsSymbol(letter) || unicode.IsPunct(letter)) && letter != '*' {
num = num + 1
}
}
// detect the index on the password
startindex = strings.Index(inputstring, uname)
//reform the error message, with * as the password
inputstring = inputstring[:startindex+len(uname)+1] + "*" + inputstring[startindex+len(uname)+1+len(pwd):]
//Replace all the special characters encoding
for num > 0 {
num = num - 1
inputstring = stripurl(inputstring)
}
return inputstring
}
// do client request with retry
func (conn *n1qlConn) doClientRequest(query string, requestValues *url.Values) (*http.Response, error) {
ok := false
for !ok {
var request *http.Request
var err error
// select query API
rand.Seed(time.Now().Unix())
numNodes := len(conn.queryAPIs)
selectedNode := rand.Intn(numNodes)
conn.lock.RLock()
queryAPI := conn.queryAPIs[selectedNode]
conn.lock.RUnlock()
if query != "" {
request, err = prepareRequest(query, queryAPI, nil)
if err != nil {
return nil, err
}
} else {
if requestValues != nil {
request, _ = http.NewRequest("POST", queryAPI, bytes.NewBufferString(requestValues.Encode()))
} else {
request, _ = http.NewRequest("POST", queryAPI, nil)
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
}
resp, err := conn.client.Do(request)
if err != nil {
// if this is the last node return with error
if numNodes == 1 {
break
}
// remove the node that failed from the list of query nodes
conn.lock.Lock()
conn.queryAPIs = append(conn.queryAPIs[:selectedNode], conn.queryAPIs[selectedNode+1:]...)
conn.lock.Unlock()
continue
} else {
return resp, nil
}
}
return nil, fmt.Errorf("N1QL: Query nodes not responding")
}
func serializeErrors(errors interface{}) string {
var errString string
switch errors := errors.(type) {
case []interface{}:
for _, e := range errors {
switch e := e.(type) {
case map[string]interface{}:
code, _ := e["code"]
msg, _ := e["msg"]
if code != 0 && msg != "" {
if errString != "" {
errString = fmt.Sprintf("%v Code : %v Message : %v", errString, code, msg)
} else {
errString = fmt.Sprintf("Code : %v Message : %v", code, msg)
}
}
}
}
}
if errString != "" {
return errString
}
return fmt.Sprintf(" Error %v %T", errors, errors)
}
func (conn *n1qlConn) Prepare(query string) (*n1qlStmt, error) {
var argCount int
query = "PREPARE " + query
query, argCount = prepareQuery(query)
resp, err := conn.doClientRequest(query, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
stmt := &n1qlStmt{conn: conn, argCount: argCount}
errors, ok := resultMap["errors"]
if ok && errors != nil {
var errs []interface{}
_ = json.Unmarshal(*errors, &errs)
return nil, fmt.Errorf("N1QL: Error preparing statement %v", serializeErrors(errs))
}
for name, results := range resultMap {
switch name {
case "results":
var preparedResults []interface{}
if err := json.Unmarshal(*results, &preparedResults); err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal results %v", err)
}
if len(preparedResults) == 0 {
return nil, fmt.Errorf("N1QL: Unknown error, no prepared results returned")
}
serialized, _ := json.Marshal(preparedResults[0])
stmt.name = preparedResults[0].(map[string]interface{})["name"].(string)
stmt.prepared = string(serialized)
case "signature":
stmt.signature = string(*results)
}
}
if stmt.prepared == "" {
return nil, ErrInternalError
}
return stmt, nil
}
func (conn *n1qlConn) Begin() (driver.Tx, error) {
return nil, ErrNotSupported
}
func (conn *n1qlConn) Close() error {
return nil
}
func decodeSignature(signature *json.RawMessage) interface{} {
var sign interface{}
var rows map[string]interface{}
json.Unmarshal(*signature, &sign)
switch s := sign.(type) {
case map[string]interface{}:
return s
case string:
return s
default:
fmt.Printf(" Cannot decode signature. Type of this signature is %T", s)
return map[string]interface{}{"*": "*"}
}
return rows
}
func (conn *n1qlConn) performQueryRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func getDecoder(r io.Reader) (*json.Decoder, error) {
if r == nil {
return nil, fmt.Errorf("Failed to decode nil response.")
}
return json.NewDecoder(r), nil
}
func (conn *n1qlConn) performQuery(query string, requestValues *url.Values) (godbc.Rows, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
decoder, err := getDecoder(resp.Body)
if err != nil {
return nil, err
}
err = decoder.Decode(&resultMap)
if err != nil {
return nil, fmt.Errorf(" N1QL: Failed to decode result %v", err)
}
var signature interface{}
var resultRows *json.RawMessage
var metrics interface{}
var status interface{}
var requestId interface{}
var errs interface{}
for name, results := range resultMap {
switch name {
case "errors":
_ = json.Unmarshal(*results, &errs)
case "signature":
if results != nil {
signature = decodeSignature(results)
} else if N1QL_PASSTHROUGH_MODE == true {
// for certain types of DML queries, the returned signature could be null
// however in passthrough mode we always return the metrics, status etc as
// rows therefore we need to ensure that there is a default signature.
signature = map[string]interface{}{"*": "*"}
}
case "results":
resultRows = results
case "metrics":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &metrics)
}
case "status":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &status)
}
case "requestID":
if N1QL_PASSTHROUGH_MODE == true {
_ = json.Unmarshal(*results, &requestId)
}
}
}
if N1QL_PASSTHROUGH_MODE == true {
extraVals := map[string]interface{}{"requestID": requestId,
"status": status,
"signature": signature,
}
// in passthrough mode last line will always be en error line
errors := map[string]interface{}{"errors": errs}
return resultToRows(bytes.NewReader(*resultRows), resp, signature, metrics, errors, extraVals)
}
// we return the errors with the rows because we can have scenarios where there are valid
// results returned along with the error and this interface doesn't allow for both to be
// returned and hence this workaround.
return resultToRows(bytes.NewReader(*resultRows), resp, signature, nil, errs, nil)
}
// Executes a query that returns a set of Rows.
// Select statements should use this interface
func (conn *n1qlConn) Query(query string, args ...interface{}) (godbc.Rows, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQuery(query, nil)
}
func (conn *n1qlConn) QueryRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performQueryRaw(query, nil)
}
func (conn *n1qlConn) performExecRaw(query string, requestValues *url.Values) (io.ReadCloser, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return resp.Body, fmt.Errorf("Request failed with error code %d.", resp.StatusCode)
}
return resp.Body, nil
}
func (conn *n1qlConn) performExec(query string, requestValues *url.Values) (godbc.Result, error) {
resp, err := conn.doClientRequest(query, requestValues)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 512))
return nil, fmt.Errorf("%s", bod)
}
var resultMap map[string]*json.RawMessage
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to read response body from server. Error %v", err)
}
if err := json.Unmarshal(body, &resultMap); err != nil {
return nil, fmt.Errorf("N1QL: Failed to parse response. Error %v", err)
}
var execErr error
res := &n1qlResult{}
for name, results := range resultMap {
switch name {
case "metrics":
var metrics map[string]interface{}
err := json.Unmarshal(*results, &metrics)
if err != nil {
return nil, fmt.Errorf("N1QL: Failed to unmarshal response. Error %v", err)
}
if mc, ok := metrics["mutationCount"]; ok {
res.affectedRows = int64(mc.(float64))
}
break
case "errors":
var errs []interface{}
_ = json.Unmarshal(*results, &errs)
execErr = fmt.Errorf("N1QL: Error executing query %v", serializeErrors(errs))
}
}
return res, execErr
}
// Execer implementation. To be used for queries that do not return any rows
// such as Create Index, Insert, Upset, Delete etc
func (conn *n1qlConn) Exec(query string, args ...interface{}) (godbc.Result, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExec(query, nil)
}
func (conn *n1qlConn) ExecRaw(query string, args ...interface{}) (io.ReadCloser, error) {
if len(args) > 0 {
var argCount int
query, argCount = prepareQuery(query)
if argCount != len(args) {
return nil, fmt.Errorf("Argument count mismatch %d != %d", argCount, len(args))
}
query, args = preparePositionalArgs(query, argCount, args)
}
return conn.performExecRaw(query, nil)
}
func prepareQuery(query string) (string, int) {
var count int
re := regexp.MustCompile("\\?")
f := func(s string) string {
count++
return fmt.Sprintf("$%d", count)
}
return re.ReplaceAllStringFunc(query, f), count
}
//
// Replace the conditional pqrams in the query and return the list of left-over args
func preparePositionalArgs(query string, argCount int, args []interface{}) (string, []interface{}) {
subList := make([]string, 0)
newArgs := make([]interface{}, 0)
for i, arg := range args {
if i < argCount {
var a string
switch arg := arg.(type) {
case string:
a = fmt.Sprintf("\"%v\"", arg)
case []byte:
a = string(arg)
default:
a = fmt.Sprintf("%v", arg)
}
sub := []string{fmt.Sprintf("$%d", i+1), a}
subList = append(subList, sub...)
} else {
newArgs = append(newArgs, arg)
}
}
r := strings.NewReplacer(subList...)
return r.Replace(query), newArgs
}
// prepare a http request for the query
//
func prepareRequest(query string, queryAPI string, args []interface{}) (*http.Request, error) {
postData := url.Values{}
postData.Set("statement", query)
if len(args) > 0 {
paStr := buildPositionalArgList(args)
if len(paStr) > 0 {
postData.Set("args", paStr)
}
}
setQueryParams(&postData)
request, err := http.NewRequest("POST", queryAPI, bytes.NewBufferString(postData.Encode()))
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
setCBUserAgent(request)
if hasUsernamePassword() {
request.SetBasicAuth(username, password)
}
return request, nil
}
//
// Set query params
func setQueryParams(v *url.Values) {
for key, value := range QueryParams {
v.Set(key, value)
}
}
// Return hostname and port for IPv4 and IPv6
func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {
tokens := []string{}
// Set _IPv6 based on input address
ipv6, err = IsIPv6(node)
if err != nil {
return "", "", false, err
}
err = nil
// For IPv6
if ipv6 {
// Then the url should be of the form [::1]:8091
tokens = strings.Split(node, "]:")
host = strings.Replace(tokens[0], "[", "", 1)
} else {
// For IPv4
tokens = strings.Split(node, ":")
host = tokens[0]
}
if len(tokens) == 2 {
port = tokens[1]
} else {
port = ""
}
return
}
func IsIPv6(str string) (bool, error) {
//ipv6 - can be [::1]:8091
host, _, err := net.SplitHostPort(str)
if err != nil {
host = str
}
if host == "localhost" {
host = LOCALHOST
}
ip := net.ParseIP(host)
if ip == nil {
// Essentially this is a FQDN. Golangs ParseIP cannot parse IPs that are non-numerical.
// It could also be an incorrect address. But that can be handled by split host port.
// This method is only to check if address is IPv6.
return false, nil
}
if ip.To4() == nil {
//Not an ipv4 address
// check if ipv6
if ip.To16() == nil {
// Not ipv6
return false, fmt.Errorf("\nThis is an incorrect address %v", str)
}
// IPv6
return true, nil
}
// IPv4
return false, nil
}
|
package lfs
import (
"bytes"
"io/ioutil"
"testing"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
)
func TestWriterWithCallback(t *testing.T) {
called := 0
calledRead := make([]int64, 0, 2)
reader := &CallbackReader{
TotalSize: 5,
Reader: bytes.NewBufferString("BOOYA"),
C: func(total int64, read int64, current int) error {
called += 1
calledRead = append(calledRead, read)
assert.Equal(t, 5, int(total))
return nil
},
}
readBuf := make([]byte, 3)
n, err := reader.Read(readBuf)
assert.Equal(t, nil, err)
assert.Equal(t, "BOO", string(readBuf[0:n]))
n, err = reader.Read(readBuf)
assert.Equal(t, nil, err)
assert.Equal(t, "YA", string(readBuf[0:n]))
assert.Equal(t, 2, called)
assert.Equal(t, 2, len(calledRead))
assert.Equal(t, 3, int(calledRead[0]))
assert.Equal(t, 5, int(calledRead[1]))
}
func TestCopyWithCallback(t *testing.T) {
buf := bytes.NewBufferString("BOOYA")
called := 0
calledWritten := make([]int64, 0, 2)
n, err := CopyWithCallback(ioutil.Discard, buf, 5, func(total int64, written int64, current int) error {
called += 1
calledWritten = append(calledWritten, written)
assert.Equal(t, 5, int(total))
return nil
})
assert.Equal(t, nil, err)
assert.Equal(t, 5, int(n))
assert.Equal(t, 1, called)
assert.Equal(t, 1, len(calledWritten))
assert.Equal(t, 5, int(calledWritten[0]))
}
func TestFilterIncludeExclude(t *testing.T) {
// Inclusion
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test/filename.dat"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"blank", "something", "test/filename.dat", "foo"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"blank", "something", "foo"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test/notfilename.dat"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test/*"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"nottest"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"nottest/*"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test/fil*"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test/g*"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"tes*/*"}, nil))
// Exclusion
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test/filename.dat"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"blank", "something", "test/filename.dat", "foo"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"blank", "something", "foo"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test/notfilename.dat"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test/*"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"nottest"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"nottest/*"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test/fil*"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test/g*"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"tes*/*"}))
// Both
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test/filename.dat"}, []string{"test/notfilename.dat"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test"}, []string{"test/filename.dat"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test/*"}, []string{"test/notfile*"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test/*"}, []string{"test/file*"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"another/*", "test/*"}, []string{"test/notfilename.dat", "test/filename.dat"}))
if IsWindows() {
// Extra tests because Windows git reports filenames with / separators
// but we need to allow \ separators in include/exclude too
// Can only test this ON Windows because of filepath behaviour
// Inclusion
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test\\filename.dat"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"blank", "something", "test\\filename.dat", "foo"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"blank", "something", "foo"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test\\notfilename.dat"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test\\*"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"nottest"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"nottest\\*"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test\\fil*"}, nil))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test\\g*"}, nil))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"tes*\\*"}, nil))
// Exclusion
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test\\filename.dat"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"blank", "something", "test\\filename.dat", "foo"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"blank", "something", "foo"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test\\notfilename.dat"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test\\*"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"nottest"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"nottest\\*"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test\\fil*"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"test\\g*"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", nil, []string{"tes*\\*"}))
// Both
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test\\filename.dat"}, []string{"test\\notfilename.dat"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test"}, []string{"test\\filename.dat"}))
assert.Equal(t, true, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test\\*"}, []string{"test\\notfile*"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"test\\*"}, []string{"test\\file*"}))
assert.Equal(t, false, FilenamePassesIncludeExcludeFilter("test/filename.dat", []string{"another\\*", "test\\*"}, []string{"test\\notfilename.dat", "test\\filename.dat"}))
}
}
ラララララ ラー ララララー
package lfs
import (
"bytes"
"io/ioutil"
"strings"
"testing"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
)
func TestWriterWithCallback(t *testing.T) {
called := 0
calledRead := make([]int64, 0, 2)
reader := &CallbackReader{
TotalSize: 5,
Reader: bytes.NewBufferString("BOOYA"),
C: func(total int64, read int64, current int) error {
called += 1
calledRead = append(calledRead, read)
assert.Equal(t, 5, int(total))
return nil
},
}
readBuf := make([]byte, 3)
n, err := reader.Read(readBuf)
assert.Equal(t, nil, err)
assert.Equal(t, "BOO", string(readBuf[0:n]))
n, err = reader.Read(readBuf)
assert.Equal(t, nil, err)
assert.Equal(t, "YA", string(readBuf[0:n]))
assert.Equal(t, 2, called)
assert.Equal(t, 2, len(calledRead))
assert.Equal(t, 3, int(calledRead[0]))
assert.Equal(t, 5, int(calledRead[1]))
}
func TestCopyWithCallback(t *testing.T) {
buf := bytes.NewBufferString("BOOYA")
called := 0
calledWritten := make([]int64, 0, 2)
n, err := CopyWithCallback(ioutil.Discard, buf, 5, func(total int64, written int64, current int) error {
called += 1
calledWritten = append(calledWritten, written)
assert.Equal(t, 5, int(total))
return nil
})
assert.Equal(t, nil, err)
assert.Equal(t, 5, int(n))
assert.Equal(t, 1, called)
assert.Equal(t, 1, len(calledWritten))
assert.Equal(t, 5, int(calledWritten[0]))
}
type TestIncludeExcludeCase struct {
expectedResult bool
includes []string
excludes []string
}
func TestFilterIncludeExclude(t *testing.T) {
cases := []TestIncludeExcludeCase{
// Null case
TestIncludeExcludeCase{true, nil, nil},
// Inclusion
TestIncludeExcludeCase{true, []string{"test/filename.dat"}, nil},
TestIncludeExcludeCase{true, []string{"test/filename.dat"}, nil},
TestIncludeExcludeCase{false, []string{"blank", "something", "foo"}, nil},
TestIncludeExcludeCase{false, []string{"test/notfilename.dat"}, nil},
TestIncludeExcludeCase{true, []string{"test"}, nil},
TestIncludeExcludeCase{true, []string{"test/*"}, nil},
TestIncludeExcludeCase{false, []string{"nottest"}, nil},
TestIncludeExcludeCase{false, []string{"nottest/*"}, nil},
TestIncludeExcludeCase{true, []string{"test/fil*"}, nil},
TestIncludeExcludeCase{false, []string{"test/g*"}, nil},
TestIncludeExcludeCase{true, []string{"tes*/*"}, nil},
// Exclusion
TestIncludeExcludeCase{false, nil, []string{"test/filename.dat"}},
TestIncludeExcludeCase{false, nil, []string{"blank", "something", "test/filename.dat", "foo"}},
TestIncludeExcludeCase{true, nil, []string{"blank", "something", "foo"}},
TestIncludeExcludeCase{true, nil, []string{"test/notfilename.dat"}},
TestIncludeExcludeCase{false, nil, []string{"test"}},
TestIncludeExcludeCase{false, nil, []string{"test/*"}},
TestIncludeExcludeCase{true, nil, []string{"nottest"}},
TestIncludeExcludeCase{true, nil, []string{"nottest/*"}},
TestIncludeExcludeCase{false, nil, []string{"test/fil*"}},
TestIncludeExcludeCase{true, nil, []string{"test/g*"}},
TestIncludeExcludeCase{false, nil, []string{"tes*/*"}},
// Both
TestIncludeExcludeCase{true, []string{"test/filename.dat"}, []string{"test/notfilename.dat"}},
TestIncludeExcludeCase{false, []string{"test"}, []string{"test/filename.dat"}},
TestIncludeExcludeCase{true, []string{"test/*"}, []string{"test/notfile*"}},
TestIncludeExcludeCase{false, []string{"test/*"}, []string{"test/file*"}},
TestIncludeExcludeCase{false, []string{"another/*", "test/*"}, []string{"test/notfilename.dat", "test/filename.dat"}},
}
for _, c := range cases {
assert.Equal(t, c.expectedResult, FilenamePassesIncludeExcludeFilter("test/filename.dat", c.includes, c.excludes), c)
if IsWindows() {
// also test with \ path separators, tolerate mixed separators
for i, inc := range c.includes {
c.includes[i] = strings.Replace(inc, "/", "\\", -1)
}
for i, ex := range c.excludes {
c.excludes[i] = strings.Replace(ex, "/", "\\", -1)
}
assert.Equal(t, c.expectedResult, FilenamePassesIncludeExcludeFilter("test/filename.dat", c.includes, c.excludes), c)
}
}
}
|
package freetree
import "testing"
// -----------------------------------------------------------------------------
func TestFreeTree_pair_sorted_input(t *testing.T) {
expected := ComparableArray{intTest(1), intTest(3), intTest(2), intTest(5), intTest(6), intTest(4)}
st := NewSimpleTree()
cs := ComparableArray{intTest(1), intTest(2), intTest(3), intTest(4), intTest(5), intTest(6)}
st.InsertArray(cs)
ft, err := NewFreeTree(st)
if err != nil {
t.Error(err)
}
flat := ft.Flatten()
for i := range expected {
if flat[i] != expected[i] {
t.Error("expected != flat")
t.FailNow()
}
}
if ft.Ascend(intTest(1)).(intTest) != intTest(1) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(2)).(intTest) != intTest(2) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(5)).(intTest) != intTest(5) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(6)).(intTest) != intTest(6) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(7)) != nil {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(8)) != nil {
t.Error("unexpected retval")
}
}
func TestFreeTree_pair_sorted_input_rebalanced(t *testing.T) {
expected := ComparableArray{intTest(1), intTest(3), intTest(2), intTest(5), intTest(6), intTest(4)}
st := NewSimpleTree()
cs := ComparableArray{intTest(1), intTest(2), intTest(3), intTest(4), intTest(5), intTest(6)}
st.InsertArray(cs)
ft, err := NewFreeTree(st.Rebalance())
if err != nil {
t.Error(err)
}
flat := ft.Flatten()
for i := range flat {
if flat[i] != expected[i] {
t.Error("expected != flat")
t.FailNow()
}
}
if ft.Ascend(intTest(1)).(intTest) != intTest(1) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(2)).(intTest) != intTest(2) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(5)).(intTest) != intTest(5) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(6)).(intTest) != intTest(6) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(7)) != nil {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(8)) != nil {
t.Error("unexpected retval")
}
}
func TestFreeTree_pair_unsorted_input(t *testing.T) {
expected := ComparableArray{intTest(3), intTest(2), intTest(6), intTest(5), intTest(4), intTest(1)}
st := NewSimpleTree()
cs := ComparableArray{intTest(5), intTest(4), intTest(6), intTest(1), intTest(3), intTest(2)}
st.InsertArray(cs)
ft, err := NewFreeTree(st)
if err != nil {
t.Error(err)
}
flat := ft.Flatten()
for i := range flat {
if flat[i] != expected[i] {
t.Error("expected != flat")
t.FailNow()
}
}
if ft.Ascend(intTest(1)).(intTest) != intTest(1) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(2)).(intTest) != intTest(2) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(5)).(intTest) != intTest(5) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(6)).(intTest) != intTest(6) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(7)) != nil {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(8)) != nil {
t.Error("unexpected retval")
}
}
//func TestFreeTree_pair_unsorted_input_rebalanced(t *testing.T) {
//expected := ComparableArray{intTest(1), intTest(3), intTest(2), intTest(5), intTest(6), intTest(4)}
//st := NewFreeTree()
//cs := ComparableArray{intTest(5), intTest(4), intTest(6), intTest(1), intTest(3), intTest(2)}
//st.InsertArray(cs)
//st.Rebalance()
//flat := st.Flatten()
//for i := range flat {
//if flat[i] != expected[i] {
//t.Error("expected != flat")
//t.FailNow()
//}
//}
//if st.Ascend(intTest(1)).(intTest) != intTest(1) {
//t.Error("unexpected retval")
//}
//if st.Ascend(intTest(2)).(intTest) != intTest(2) {
//t.Error("unexpected retval")
//}
//if st.Ascend(intTest(5)).(intTest) != intTest(5) {
//t.Error("unexpected retval")
//}
//if st.Ascend(intTest(6)).(intTest) != intTest(6) {
//t.Error("unexpected retval")
//}
//if st.Ascend(intTest(7)) != nil {
//log.Println(st.Ascend(intTest(7)).(intTest))
//t.Error("unexpected retval")
//}
//if st.Ascend(intTest(8)) != nil {
//log.Println(st.Ascend(intTest(8)).(intTest))
//t.Error("unexpected retval")
//}
//}
TestFreeTree_pair_unsorted_input_rebalanced
package freetree
import "testing"
// -----------------------------------------------------------------------------
func TestFreeTree_pair_sorted_input(t *testing.T) {
expected := ComparableArray{intTest(1), intTest(3), intTest(2), intTest(5), intTest(6), intTest(4)}
st := NewSimpleTree()
cs := ComparableArray{intTest(1), intTest(2), intTest(3), intTest(4), intTest(5), intTest(6)}
st.InsertArray(cs)
ft, err := NewFreeTree(st)
if err != nil {
t.Error(err)
}
flat := ft.Flatten()
for i := range expected {
if flat[i] != expected[i] {
t.Error("expected != flat")
t.FailNow()
}
}
if ft.Ascend(intTest(1)).(intTest) != intTest(1) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(2)).(intTest) != intTest(2) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(5)).(intTest) != intTest(5) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(6)).(intTest) != intTest(6) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(7)) != nil {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(8)) != nil {
t.Error("unexpected retval")
}
}
func TestFreeTree_pair_sorted_input_rebalanced(t *testing.T) {
expected := ComparableArray{intTest(1), intTest(3), intTest(2), intTest(5), intTest(6), intTest(4)}
st := NewSimpleTree()
cs := ComparableArray{intTest(1), intTest(2), intTest(3), intTest(4), intTest(5), intTest(6)}
st.InsertArray(cs)
ft, err := NewFreeTree(st.Rebalance())
if err != nil {
t.Error(err)
}
flat := ft.Flatten()
for i := range flat {
if flat[i] != expected[i] {
t.Error("expected != flat")
t.FailNow()
}
}
if ft.Ascend(intTest(1)).(intTest) != intTest(1) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(2)).(intTest) != intTest(2) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(5)).(intTest) != intTest(5) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(6)).(intTest) != intTest(6) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(7)) != nil {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(8)) != nil {
t.Error("unexpected retval")
}
}
func TestFreeTree_pair_unsorted_input(t *testing.T) {
expected := ComparableArray{intTest(3), intTest(2), intTest(6), intTest(5), intTest(4), intTest(1)}
st := NewSimpleTree()
cs := ComparableArray{intTest(5), intTest(4), intTest(6), intTest(1), intTest(3), intTest(2)}
st.InsertArray(cs)
ft, err := NewFreeTree(st)
if err != nil {
t.Error(err)
}
flat := ft.Flatten()
for i := range flat {
if flat[i] != expected[i] {
t.Error("expected != flat")
t.FailNow()
}
}
if ft.Ascend(intTest(1)).(intTest) != intTest(1) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(2)).(intTest) != intTest(2) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(5)).(intTest) != intTest(5) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(6)).(intTest) != intTest(6) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(7)) != nil {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(8)) != nil {
t.Error("unexpected retval")
}
}
func TestFreeTree_pair_unsorted_input_rebalanced(t *testing.T) {
expected := ComparableArray{intTest(1), intTest(3), intTest(2), intTest(5), intTest(6), intTest(4)}
st := NewSimpleTree()
cs := ComparableArray{intTest(5), intTest(4), intTest(6), intTest(1), intTest(3), intTest(2)}
st.InsertArray(cs)
ft, err := NewFreeTree(st.Rebalance())
if err != nil {
t.Error(err)
}
flat := ft.Flatten()
for i := range flat {
if flat[i] != expected[i] {
t.Error("expected != flat")
t.FailNow()
}
}
if ft.Ascend(intTest(1)).(intTest) != intTest(1) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(2)).(intTest) != intTest(2) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(5)).(intTest) != intTest(5) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(6)).(intTest) != intTest(6) {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(7)) != nil {
t.Error("unexpected retval")
}
if ft.Ascend(intTest(8)) != nil {
t.Error("unexpected retval")
}
}
|
// Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Heavily inspired by https://github.com/btcsuite/btcd/blob/master/version.go
// Copyright (C) 2015-2017 The Lightning Network Developers
package build
import (
"bytes"
"fmt"
"strings"
)
// Commit stores the current commit hash of this build, this should be set using
// the -ldflags during compilation.
var Commit string
// semanticAlphabet is the set of characters that are permitted for use in an
// AppPreRelease.
const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
// These constants define the application version and follow the semantic
// versioning 2.0.0 spec (http://semver.org/).
const (
// AppMajor defines the major version of this binary.
AppMajor uint = 0
// AppMinor defines the minor version of this binary.
AppMinor uint = 9
// AppPatch defines the application patch for this binary.
AppPatch uint = 0
// AppPreRelease MUST only contain characters from semanticAlphabet
// per the semantic versioning spec.
AppPreRelease = "beta"
)
// Version returns the application version as a properly formed string per the
// semantic versioning 2.0.0 spec (http://semver.org/).
func Version() string {
// Start with the major, minor, and patch versions.
version := fmt.Sprintf("%d.%d.%d", AppMajor, AppMinor, AppPatch)
// Append pre-release version if there is one. The hyphen called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the pre-release string. The pre-release version
// is not appended if it contains invalid characters.
preRelease := normalizeVerString(AppPreRelease)
if preRelease != "" {
version = fmt.Sprintf("%s-%s", version, preRelease)
}
// Append commit hash of current build to version.
version = fmt.Sprintf("%s commit=%s", version, Commit)
return version
}
// normalizeVerString returns the passed string stripped of all characters which
// are not valid according to the semantic versioning guidelines for pre-release
// version and build metadata strings. In particular they MUST only contain
// characters in semanticAlphabet.
func normalizeVerString(str string) string {
var result bytes.Buffer
for _, r := range str {
if strings.ContainsRune(semanticAlphabet, r) {
result.WriteRune(r)
}
}
return result.String()
}
build/version: check AppPreRelease semantics in init()
We'll do the validation during construction of the runtime so that we
can safely use the AppPreRelease field externally without needing to
normalize it.
// Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Heavily inspired by https://github.com/btcsuite/btcd/blob/master/version.go
// Copyright (C) 2015-2017 The Lightning Network Developers
package build
import (
"fmt"
"strings"
)
// Commit stores the current commit hash of this build, this should be set using
// the -ldflags during compilation.
var Commit string
// semanticAlphabet is the set of characters that are permitted for use in an
// AppPreRelease.
const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
// These constants define the application version and follow the semantic
// versioning 2.0.0 spec (http://semver.org/).
const (
// AppMajor defines the major version of this binary.
AppMajor uint = 0
// AppMinor defines the minor version of this binary.
AppMinor uint = 9
// AppPatch defines the application patch for this binary.
AppPatch uint = 0
// AppPreRelease MUST only contain characters from semanticAlphabet
// per the semantic versioning spec.
AppPreRelease = "beta"
)
func init() {
// Assert that AppPreRelease is valid according to the semantic
// versioning guidelines for pre-release version and build metadata
// strings. In particular it MUST only contain characters in
// semanticAlphabet.
for _, r := range AppPreRelease {
if !strings.ContainsRune(semanticAlphabet, r) {
panic(fmt.Errorf("rune: %v is not in the semantic "+
"alphabet", r))
}
}
}
// Version returns the application version as a properly formed string per the
// semantic versioning 2.0.0 spec (http://semver.org/).
func Version() string {
// Start with the major, minor, and patch versions.
version := fmt.Sprintf("%d.%d.%d", AppMajor, AppMinor, AppPatch)
// Append pre-release version if there is one. The hyphen called for by
// the semantic versioning spec is automatically appended and should not
// be contained in the pre-release string.
if AppPreRelease != "" {
version = fmt.Sprintf("%s-%s", version, AppPreRelease)
}
// Append commit hash of current build to version.
version = fmt.Sprintf("%s commit=%s", version, Commit)
return version
}
|
package consul
import (
"fmt"
"log"
"math/rand"
"net"
"os"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/hashicorp/consul/agent/consul/agent"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-uuid"
)
func getPort() int {
return 1030 + int(rand.Int31n(64400))
}
func configureTLS(config *Config) {
config.CAFile = "../../test/ca/root.cer"
config.CertFile = "../../test/key/ourdomain.cer"
config.KeyFile = "../../test/key/ourdomain.key"
}
var id int64
func uniqueNodeName(name string) string {
return fmt.Sprintf("%s-node-%d", name, atomic.AddInt64(&id, 1))
}
func testServerConfig(t *testing.T) (string, *Config) {
dir := testutil.TempDir(t, "consul")
config := DefaultConfig()
config.NodeName = uniqueNodeName(t.Name())
config.Bootstrap = true
config.Datacenter = "dc1"
config.DataDir = dir
// bind the rpc server to a random port. config.RPCAdvertise will be
// set to the listen address unless it was set in the configuration.
// In that case get the address from srv.Listener.Addr().
config.RPCAddr = &net.TCPAddr{IP: []byte{127, 0, 0, 1}}
nodeID, err := uuid.GenerateUUID()
if err != nil {
t.Fatal(err)
}
config.NodeID = types.NodeID(nodeID)
// set the memberlist bind port to 0 to bind to a random port.
// memberlist will update the value of BindPort after bind
// to the actual value.
config.SerfLANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfLANConfig.MemberlistConfig.BindPort = 0
config.SerfLANConfig.MemberlistConfig.SuspicionMult = 2
config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
config.SerfLANConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
config.SerfWANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfWANConfig.MemberlistConfig.BindPort = 0
config.SerfWANConfig.MemberlistConfig.SuspicionMult = 2
config.SerfWANConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
config.SerfWANConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
config.SerfWANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
config.RaftConfig.LeaderLeaseTimeout = 100 * time.Millisecond
config.RaftConfig.HeartbeatTimeout = 200 * time.Millisecond
config.RaftConfig.ElectionTimeout = 200 * time.Millisecond
config.ReconcileInterval = 300 * time.Millisecond
config.AutopilotConfig.ServerStabilizationTime = 100 * time.Millisecond
config.ServerHealthInterval = 50 * time.Millisecond
config.AutopilotInterval = 100 * time.Millisecond
config.Build = "0.8.0"
config.CoordinateUpdatePeriod = 100 * time.Millisecond
return dir, config
}
func testServer(t *testing.T) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = true
})
}
func testServerDC(t *testing.T, dc string) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
c.Bootstrap = true
})
}
func testServerDCBootstrap(t *testing.T, dc string, bootstrap bool) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
c.Bootstrap = bootstrap
})
}
func testServerDCExpect(t *testing.T, dc string, expect int) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
c.Bootstrap = false
c.BootstrapExpect = expect
})
}
func testServerWithConfig(t *testing.T, cb func(*Config)) (string, *Server) {
dir, config := testServerConfig(t)
if cb != nil {
cb(config)
}
srv, err := newServer(config)
if err != nil {
t.Fatalf("err: %v", err)
}
return dir, srv
}
func newServer(c *Config) (*Server, error) {
// chain server up notification
oldNotify := c.NotifyListen
up := make(chan struct{})
c.NotifyListen = func() {
close(up)
if oldNotify != nil {
oldNotify()
}
}
// start server
w := c.LogOutput
if w == nil {
w = os.Stderr
}
logger := log.New(w, c.NodeName+" - ", log.LstdFlags|log.Lmicroseconds)
srv, err := NewServerLogger(c, logger)
if err != nil {
return nil, err
}
// wait until after listen
<-up
// get the real address
//
// the server already sets the RPCAdvertise address
// if it wasn't configured since it needs it for
// some initialization
//
// todo(fs): setting RPCAddr should probably be guarded
// todo(fs): but for now it is a shortcut to avoid fixing
// todo(fs): tests which depend on that value. They should
// todo(fs): just get the listener address instead.
c.RPCAddr = srv.Listener.Addr().(*net.TCPAddr)
return srv, nil
}
func TestServer_StartStop(t *testing.T) {
t.Parallel()
// Start up a server and then stop it.
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
if err := s1.Shutdown(); err != nil {
t.Fatalf("err: %v", err)
}
// Shut down again, which should be idempotent.
if err := s1.Shutdown(); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestServer_JoinLAN(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServer(t)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinLAN(t, s2, s1)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.LANMembers()), 2; got != want {
r.Fatalf("got %d s1 LAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
})
}
func TestServer_JoinWAN(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDC(t, "dc2")
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinWAN(t, s2, s1)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got != want {
r.Fatalf("got %d s1 WAN members want %d", got, want)
}
if got, want := len(s2.WANMembers()), 2; got != want {
r.Fatalf("got %d s2 WAN members want %d", got, want)
}
})
// Check the router has both
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.router.GetDatacenters()), 2; got != want {
r.Fatalf("got %d routes want %d", got, want)
}
if got, want := len(s2.router.GetDatacenters()), 2; got != want {
r.Fatalf("got %d datacenters want %d", got, want)
}
})
}
func TestServer_JoinWAN_Flood(t *testing.T) {
t.Parallel()
// Set up two servers in a WAN.
dir1, s1 := testServerDCBootstrap(t, "dc1", true)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDCBootstrap(t, "dc2", true)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
joinWAN(t, s2, s1)
for _, s := range []*Server{s1, s2} {
retry.Run(t, func(r *retry.R) {
if got, want := len(s.WANMembers()), 2; got != want {
r.Fatalf("got %d WAN members want %d", got, want)
}
})
}
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
// Do just a LAN join for the new server and make sure it
// shows up in the WAN.
joinLAN(t, s3, s1)
for _, s := range []*Server{s1, s2, s3} {
retry.Run(t, func(r *retry.R) {
if got, want := len(s.WANMembers()), 3; got != want {
r.Fatalf("got %d WAN members for %s want %d", got, s.config.NodeName, want)
}
})
}
}
func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "s2"
c.Datacenter = "dc2"
// This wan address will be expected to be seen on s1
c.SerfWANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.2"
// This lan address will be expected to be seen on s3
c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.3"
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
dir3, s3 := testServerDC(t, "dc2")
defer os.RemoveAll(dir3)
defer s3.Shutdown()
// Join s2 to s1 on wan
joinWAN(t, s2, s1)
// Join s3 to s2 on lan
joinLAN(t, s3, s2)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got != want {
r.Fatalf("got %d s1 WAN members want %d", got, want)
}
if got, want := len(s2.WANMembers()), 2; got != want {
r.Fatalf("got %d s2 WAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
if got, want := len(s3.LANMembers()), 2; got != want {
r.Fatalf("got %d s3 LAN members want %d", got, want)
}
})
// Check the router has both
retry.Run(t, func(r *retry.R) {
if len(s1.router.GetDatacenters()) != 2 {
r.Fatalf("remote consul missing")
}
if len(s2.router.GetDatacenters()) != 2 {
r.Fatalf("remote consul missing")
}
if len(s2.localConsuls) != 2 {
r.Fatalf("local consul fellow s3 for s2 missing")
}
})
// Get and check the wan address of s2 from s1
var s2WanAddr string
for _, member := range s1.WANMembers() {
if member.Name == "s2.dc2" {
s2WanAddr = member.Addr.String()
}
}
if s2WanAddr != "127.0.0.2" {
t.Fatalf("s1 sees s2 on a wrong address: %s, expecting: %s", s2WanAddr, "127.0.0.2")
}
// Get and check the lan address of s2 from s3
var s2LanAddr string
for _, lanmember := range s3.LANMembers() {
if lanmember.Name == "s2" {
s2LanAddr = lanmember.Addr.String()
}
}
if s2LanAddr != "127.0.0.3" {
t.Fatalf("s3 sees s2 on a wrong address: %s, expecting: %s", s2LanAddr, "127.0.0.3")
}
}
func TestServer_LeaveLeader(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// Second server not in bootstrap mode
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinLAN(t, s2, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
testrpc.WaitForLeader(t, s2.RPC, "dc1")
// Issue a leave to the leader
var err error
switch {
case s1.IsLeader():
err = s1.Leave()
case s2.IsLeader():
err = s2.Leave()
default:
t.Fatal("no leader")
}
if err != nil {
t.Fatal("leave failed: ", err)
}
// Should lose a peer
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 1))
r.Check(wantPeers(s2, 1))
})
}
func TestServer_Leave(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// Second server not in bootstrap mode
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinLAN(t, s2, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
testrpc.WaitForLeader(t, s2.RPC, "dc1")
// Issue a leave to the non-leader
var err error
switch {
case s1.IsLeader():
err = s2.Leave()
case s2.IsLeader():
err = s1.Leave()
default:
t.Fatal("no leader")
}
if err != nil {
t.Fatal("leave failed: ", err)
}
// Should lose a peer
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 1))
r.Check(wantPeers(s2, 1))
})
}
func TestServer_RPC(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
var out struct{}
if err := s1.RPC("Status.Ping", struct{}{}, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestServer_JoinLAN_TLS(t *testing.T) {
t.Parallel()
dir1, conf1 := testServerConfig(t)
conf1.VerifyIncoming = true
conf1.VerifyOutgoing = true
configureTLS(conf1)
s1, err := newServer(conf1)
if err != nil {
t.Fatalf("err: %v", err)
}
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, conf2 := testServerConfig(t)
conf2.Bootstrap = false
conf2.VerifyIncoming = true
conf2.VerifyOutgoing = true
configureTLS(conf2)
s2, err := newServer(conf2)
if err != nil {
t.Fatalf("err: %v", err)
}
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinLAN(t, s2, s1)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.LANMembers()), 2; got != want {
r.Fatalf("got %d s1 LAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
})
// Verify Raft has established a peer
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 2))
r.Check(wantPeers(s2, 2))
})
}
func TestServer_Expect(t *testing.T) {
t.Parallel()
// All test servers should be in expect=3 mode, except for the 3rd one,
// but one with expect=0 can cause a bootstrap to occur from the other
// servers as currently implemented.
dir1, s1 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
dir3, s3 := testServerDCExpect(t, "dc1", 0)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
dir4, s4 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir4)
defer s4.Shutdown()
// Join the first two servers.
joinLAN(t, s2, s1)
// Should have no peers yet since the bootstrap didn't occur.
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 0))
r.Check(wantPeers(s2, 0))
})
// Join the third node.
joinLAN(t, s3, s1)
// Now we have three servers so we should bootstrap.
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 3))
r.Check(wantPeers(s2, 3))
r.Check(wantPeers(s3, 3))
})
// Make sure a leader is elected, grab the current term and then add in
// the fourth server.
testrpc.WaitForLeader(t, s1.RPC, "dc1")
termBefore := s1.raft.Stats()["last_log_term"]
joinLAN(t, s4, s1)
// Wait for the new server to see itself added to the cluster.
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 4))
r.Check(wantPeers(s2, 4))
r.Check(wantPeers(s3, 4))
r.Check(wantPeers(s4, 4))
})
// Make sure there's still a leader and that the term didn't change,
// so we know an election didn't occur.
testrpc.WaitForLeader(t, s1.RPC, "dc1")
termAfter := s1.raft.Stats()["last_log_term"]
if termAfter != termBefore {
t.Fatalf("looks like an election took place")
}
}
func TestServer_BadExpect(t *testing.T) {
t.Parallel()
// this one is in expect=3 mode
dir1, s1 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// this one is in expect=2 mode
dir2, s2 := testServerDCExpect(t, "dc1", 2)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// and this one is in expect=3 mode
dir3, s3 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
// Try to join
joinLAN(t, s2, s1)
// should have no peers yet
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 0))
r.Check(wantPeers(s2, 0))
})
// join the third node
joinLAN(t, s3, s1)
// should still have no peers (because s2 is in expect=2 mode)
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 0))
r.Check(wantPeers(s2, 0))
r.Check(wantPeers(s3, 0))
})
}
type fakeGlobalResp struct{}
func (r *fakeGlobalResp) Add(interface{}) {
return
}
func (r *fakeGlobalResp) New() interface{} {
return struct{}{}
}
func TestServer_globalRPCErrors(t *testing.T) {
t.Parallel()
dir1, s1 := testServerDC(t, "dc1")
defer os.RemoveAll(dir1)
defer s1.Shutdown()
retry.Run(t, func(r *retry.R) {
if len(s1.router.GetDatacenters()) != 1 {
r.Fatal(nil)
}
})
// Check that an error from a remote DC is returned
err := s1.globalRPC("Bad.Method", nil, &fakeGlobalResp{})
if err == nil {
t.Fatalf("should have errored")
}
if !strings.Contains(err.Error(), "Bad.Method") {
t.Fatalf("unexpcted error: %s", err)
}
}
func TestServer_Encrypted(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
key := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.SerfLANConfig.MemberlistConfig.SecretKey = key
c.SerfWANConfig.MemberlistConfig.SecretKey = key
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
if s1.Encrypted() {
t.Fatalf("should not be encrypted")
}
if !s2.Encrypted() {
t.Fatalf("should be encrypted")
}
}
func testVerifyRPC(s1, s2 *Server, t *testing.T) (bool, error) {
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// make sure both servers know about each other
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s1, 2)) })
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s2, 2)) })
// Have s2 make an RPC call to s1
s2.localLock.RLock()
var leader *agent.Server
for _, server := range s2.localConsuls {
if server.Name == s1.config.NodeName {
leader = server
}
}
s2.localLock.RUnlock()
if leader == nil {
t.Fatal("no leader")
}
return s2.connPool.Ping(leader.Datacenter, leader.Addr, leader.Version, leader.UseTLS)
}
func TestServer_TLSToNoTLS(t *testing.T) {
t.Parallel()
// Set up a server with no TLS configured
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Add a second server with TLS configured
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
c.CAFile = "../../test/client_certs/rootca.crt"
c.CertFile = "../../test/client_certs/server.crt"
c.KeyFile = "../../test/client_certs/server.key"
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
success, err := testVerifyRPC(s1, s2, t)
if err != nil {
t.Fatal(err)
}
if !success {
t.Fatalf("bad: %v", success)
}
}
func TestServer_TLSForceOutgoingToNoTLS(t *testing.T) {
t.Parallel()
// Set up a server with no TLS configured
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Add a second server with TLS and VerifyOutgoing set
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
c.CAFile = "../../test/client_certs/rootca.crt"
c.CertFile = "../../test/client_certs/server.crt"
c.KeyFile = "../../test/client_certs/server.key"
c.VerifyOutgoing = true
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
_, err := testVerifyRPC(s1, s2, t)
if err == nil || !strings.Contains(err.Error(), "remote error: tls") {
t.Fatalf("should fail")
}
}
func TestServer_TLSToFullVerify(t *testing.T) {
t.Parallel()
// Set up a server with TLS and VerifyIncoming set
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.CAFile = "../../test/client_certs/rootca.crt"
c.CertFile = "../../test/client_certs/server.crt"
c.KeyFile = "../../test/client_certs/server.key"
c.VerifyIncoming = true
c.VerifyOutgoing = true
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Add a second server with TLS configured
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
c.CAFile = "../../test/client_certs/rootca.crt"
c.CertFile = "../../test/client_certs/server.crt"
c.KeyFile = "../../test/client_certs/server.key"
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
success, err := testVerifyRPC(s1, s2, t)
if err != nil {
t.Fatal(err)
}
if !success {
t.Fatalf("bad: %v", success)
}
}
rpc: fix logging and try quicker timing of TestServer_JoinSeparateLanAndWanAddresses
package consul
import (
"fmt"
"log"
"math/rand"
"net"
"os"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/hashicorp/consul/agent/consul/agent"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-uuid"
)
func getPort() int {
return 1030 + int(rand.Int31n(64400))
}
func configureTLS(config *Config) {
config.CAFile = "../../test/ca/root.cer"
config.CertFile = "../../test/key/ourdomain.cer"
config.KeyFile = "../../test/key/ourdomain.key"
}
var id int64
func uniqueNodeName(name string) string {
return fmt.Sprintf("%s-node-%d", name, atomic.AddInt64(&id, 1))
}
func testServerConfig(t *testing.T) (string, *Config) {
dir := testutil.TempDir(t, "consul")
config := DefaultConfig()
config.NodeName = uniqueNodeName(t.Name())
config.Bootstrap = true
config.Datacenter = "dc1"
config.DataDir = dir
// bind the rpc server to a random port. config.RPCAdvertise will be
// set to the listen address unless it was set in the configuration.
// In that case get the address from srv.Listener.Addr().
config.RPCAddr = &net.TCPAddr{IP: []byte{127, 0, 0, 1}}
nodeID, err := uuid.GenerateUUID()
if err != nil {
t.Fatal(err)
}
config.NodeID = types.NodeID(nodeID)
// set the memberlist bind port to 0 to bind to a random port.
// memberlist will update the value of BindPort after bind
// to the actual value.
config.SerfLANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfLANConfig.MemberlistConfig.BindPort = 0
config.SerfLANConfig.MemberlistConfig.SuspicionMult = 2
config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
config.SerfLANConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
config.SerfWANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfWANConfig.MemberlistConfig.BindPort = 0
config.SerfWANConfig.MemberlistConfig.SuspicionMult = 2
config.SerfWANConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
config.SerfWANConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
config.SerfWANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
config.RaftConfig.LeaderLeaseTimeout = 100 * time.Millisecond
config.RaftConfig.HeartbeatTimeout = 200 * time.Millisecond
config.RaftConfig.ElectionTimeout = 200 * time.Millisecond
config.ReconcileInterval = 300 * time.Millisecond
config.AutopilotConfig.ServerStabilizationTime = 100 * time.Millisecond
config.ServerHealthInterval = 50 * time.Millisecond
config.AutopilotInterval = 100 * time.Millisecond
config.Build = "0.8.0"
config.CoordinateUpdatePeriod = 100 * time.Millisecond
return dir, config
}
func testServer(t *testing.T) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.Bootstrap = true
})
}
func testServerDC(t *testing.T, dc string) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
c.Bootstrap = true
})
}
func testServerDCBootstrap(t *testing.T, dc string, bootstrap bool) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
c.Bootstrap = bootstrap
})
}
func testServerDCExpect(t *testing.T, dc string, expect int) (string, *Server) {
return testServerWithConfig(t, func(c *Config) {
c.Datacenter = dc
c.Bootstrap = false
c.BootstrapExpect = expect
})
}
func testServerWithConfig(t *testing.T, cb func(*Config)) (string, *Server) {
dir, config := testServerConfig(t)
if cb != nil {
cb(config)
}
srv, err := newServer(config)
if err != nil {
t.Fatalf("err: %v", err)
}
return dir, srv
}
func newServer(c *Config) (*Server, error) {
// chain server up notification
oldNotify := c.NotifyListen
up := make(chan struct{})
c.NotifyListen = func() {
close(up)
if oldNotify != nil {
oldNotify()
}
}
// start server
w := c.LogOutput
if w == nil {
w = os.Stderr
}
logger := log.New(w, c.NodeName+" - ", log.LstdFlags|log.Lmicroseconds)
srv, err := NewServerLogger(c, logger)
if err != nil {
return nil, err
}
// wait until after listen
<-up
// get the real address
//
// the server already sets the RPCAdvertise address
// if it wasn't configured since it needs it for
// some initialization
//
// todo(fs): setting RPCAddr should probably be guarded
// todo(fs): but for now it is a shortcut to avoid fixing
// todo(fs): tests which depend on that value. They should
// todo(fs): just get the listener address instead.
c.RPCAddr = srv.Listener.Addr().(*net.TCPAddr)
return srv, nil
}
func TestServer_StartStop(t *testing.T) {
t.Parallel()
// Start up a server and then stop it.
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
if err := s1.Shutdown(); err != nil {
t.Fatalf("err: %v", err)
}
// Shut down again, which should be idempotent.
if err := s1.Shutdown(); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestServer_JoinLAN(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServer(t)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinLAN(t, s2, s1)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.LANMembers()), 2; got != want {
r.Fatalf("got %d s1 LAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
})
}
func TestServer_JoinWAN(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDC(t, "dc2")
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinWAN(t, s2, s1)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got != want {
r.Fatalf("got %d s1 WAN members want %d", got, want)
}
if got, want := len(s2.WANMembers()), 2; got != want {
r.Fatalf("got %d s2 WAN members want %d", got, want)
}
})
// Check the router has both
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.router.GetDatacenters()), 2; got != want {
r.Fatalf("got %d routes want %d", got, want)
}
if got, want := len(s2.router.GetDatacenters()), 2; got != want {
r.Fatalf("got %d datacenters want %d", got, want)
}
})
}
func TestServer_JoinWAN_Flood(t *testing.T) {
t.Parallel()
// Set up two servers in a WAN.
dir1, s1 := testServerDCBootstrap(t, "dc1", true)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDCBootstrap(t, "dc2", true)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
joinWAN(t, s2, s1)
for _, s := range []*Server{s1, s2} {
retry.Run(t, func(r *retry.R) {
if got, want := len(s.WANMembers()), 2; got != want {
r.Fatalf("got %d WAN members want %d", got, want)
}
})
}
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
// Do just a LAN join for the new server and make sure it
// shows up in the WAN.
joinLAN(t, s3, s1)
for _, s := range []*Server{s1, s2, s3} {
retry.Run(t, func(r *retry.R) {
if got, want := len(s.WANMembers()), 3; got != want {
r.Fatalf("got %d WAN members for %s want %d", got, s.config.NodeName, want)
}
})
}
}
func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) {
t.Parallel()
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.NodeName = t.Name() + "-s1"
c.Datacenter = "dc1"
c.Bootstrap = true
c.SerfFloodInterval = 100 * time.Millisecond
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
s2Name := t.Name() + "-s2"
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.NodeName = s2Name
c.Datacenter = "dc2"
c.Bootstrap = false
// This wan address will be expected to be seen on s1
c.SerfWANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.2"
// This lan address will be expected to be seen on s3
c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.3"
c.SerfFloodInterval = 100 * time.Millisecond
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
dir3, s3 := testServerWithConfig(t, func(c *Config) {
c.NodeName = t.Name() + "-s3"
c.Datacenter = "dc2"
c.Bootstrap = true
c.SerfFloodInterval = 100 * time.Millisecond
})
defer os.RemoveAll(dir3)
defer s3.Shutdown()
// Join s2 to s1 on wan
joinWAN(t, s2, s1)
// Join s3 to s2 on lan
joinLAN(t, s3, s2)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got != want {
r.Fatalf("got %d s1 WAN members want %d", got, want)
}
if got, want := len(s2.WANMembers()), 2; got != want {
r.Fatalf("got %d s2 WAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
if got, want := len(s3.LANMembers()), 2; got != want {
r.Fatalf("got %d s3 LAN members want %d", got, want)
}
})
// Check the router has both
retry.Run(t, func(r *retry.R) {
if len(s1.router.GetDatacenters()) != 2 {
r.Fatalf("remote consul missing")
}
if len(s2.router.GetDatacenters()) != 2 {
r.Fatalf("remote consul missing")
}
if len(s2.localConsuls) != 2 {
r.Fatalf("local consul fellow s3 for s2 missing")
}
})
// Get and check the wan address of s2 from s1
var s2WanAddr string
for _, member := range s1.WANMembers() {
if member.Name == s2Name+".dc2" {
s2WanAddr = member.Addr.String()
}
}
if s2WanAddr != "127.0.0.2" {
t.Fatalf("s1 sees s2 on a wrong address: %s, expecting: %s", s2WanAddr, "127.0.0.2")
}
// Get and check the lan address of s2 from s3
var s2LanAddr string
for _, lanmember := range s3.LANMembers() {
if lanmember.Name == s2Name {
s2LanAddr = lanmember.Addr.String()
}
}
if s2LanAddr != "127.0.0.3" {
t.Fatalf("s3 sees s2 on a wrong address: %s, expecting: %s", s2LanAddr, "127.0.0.3")
}
}
func TestServer_LeaveLeader(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// Second server not in bootstrap mode
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinLAN(t, s2, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
testrpc.WaitForLeader(t, s2.RPC, "dc1")
// Issue a leave to the leader
var err error
switch {
case s1.IsLeader():
err = s1.Leave()
case s2.IsLeader():
err = s2.Leave()
default:
t.Fatal("no leader")
}
if err != nil {
t.Fatal("leave failed: ", err)
}
// Should lose a peer
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 1))
r.Check(wantPeers(s2, 1))
})
}
func TestServer_Leave(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// Second server not in bootstrap mode
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinLAN(t, s2, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
testrpc.WaitForLeader(t, s2.RPC, "dc1")
// Issue a leave to the non-leader
var err error
switch {
case s1.IsLeader():
err = s2.Leave()
case s2.IsLeader():
err = s1.Leave()
default:
t.Fatal("no leader")
}
if err != nil {
t.Fatal("leave failed: ", err)
}
// Should lose a peer
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 1))
r.Check(wantPeers(s2, 1))
})
}
func TestServer_RPC(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
var out struct{}
if err := s1.RPC("Status.Ping", struct{}{}, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestServer_JoinLAN_TLS(t *testing.T) {
t.Parallel()
dir1, conf1 := testServerConfig(t)
conf1.VerifyIncoming = true
conf1.VerifyOutgoing = true
configureTLS(conf1)
s1, err := newServer(conf1)
if err != nil {
t.Fatalf("err: %v", err)
}
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, conf2 := testServerConfig(t)
conf2.Bootstrap = false
conf2.VerifyIncoming = true
conf2.VerifyOutgoing = true
configureTLS(conf2)
s2, err := newServer(conf2)
if err != nil {
t.Fatalf("err: %v", err)
}
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinLAN(t, s2, s1)
retry.Run(t, func(r *retry.R) {
if got, want := len(s1.LANMembers()), 2; got != want {
r.Fatalf("got %d s1 LAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
})
// Verify Raft has established a peer
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 2))
r.Check(wantPeers(s2, 2))
})
}
func TestServer_Expect(t *testing.T) {
t.Parallel()
// All test servers should be in expect=3 mode, except for the 3rd one,
// but one with expect=0 can cause a bootstrap to occur from the other
// servers as currently implemented.
dir1, s1 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
dir3, s3 := testServerDCExpect(t, "dc1", 0)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
dir4, s4 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir4)
defer s4.Shutdown()
// Join the first two servers.
joinLAN(t, s2, s1)
// Should have no peers yet since the bootstrap didn't occur.
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 0))
r.Check(wantPeers(s2, 0))
})
// Join the third node.
joinLAN(t, s3, s1)
// Now we have three servers so we should bootstrap.
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 3))
r.Check(wantPeers(s2, 3))
r.Check(wantPeers(s3, 3))
})
// Make sure a leader is elected, grab the current term and then add in
// the fourth server.
testrpc.WaitForLeader(t, s1.RPC, "dc1")
termBefore := s1.raft.Stats()["last_log_term"]
joinLAN(t, s4, s1)
// Wait for the new server to see itself added to the cluster.
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 4))
r.Check(wantPeers(s2, 4))
r.Check(wantPeers(s3, 4))
r.Check(wantPeers(s4, 4))
})
// Make sure there's still a leader and that the term didn't change,
// so we know an election didn't occur.
testrpc.WaitForLeader(t, s1.RPC, "dc1")
termAfter := s1.raft.Stats()["last_log_term"]
if termAfter != termBefore {
t.Fatalf("looks like an election took place")
}
}
func TestServer_BadExpect(t *testing.T) {
t.Parallel()
// this one is in expect=3 mode
dir1, s1 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// this one is in expect=2 mode
dir2, s2 := testServerDCExpect(t, "dc1", 2)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// and this one is in expect=3 mode
dir3, s3 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
// Try to join
joinLAN(t, s2, s1)
// should have no peers yet
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 0))
r.Check(wantPeers(s2, 0))
})
// join the third node
joinLAN(t, s3, s1)
// should still have no peers (because s2 is in expect=2 mode)
retry.Run(t, func(r *retry.R) {
r.Check(wantPeers(s1, 0))
r.Check(wantPeers(s2, 0))
r.Check(wantPeers(s3, 0))
})
}
type fakeGlobalResp struct{}
func (r *fakeGlobalResp) Add(interface{}) {
return
}
func (r *fakeGlobalResp) New() interface{} {
return struct{}{}
}
func TestServer_globalRPCErrors(t *testing.T) {
t.Parallel()
dir1, s1 := testServerDC(t, "dc1")
defer os.RemoveAll(dir1)
defer s1.Shutdown()
retry.Run(t, func(r *retry.R) {
if len(s1.router.GetDatacenters()) != 1 {
r.Fatal(nil)
}
})
// Check that an error from a remote DC is returned
err := s1.globalRPC("Bad.Method", nil, &fakeGlobalResp{})
if err == nil {
t.Fatalf("should have errored")
}
if !strings.Contains(err.Error(), "Bad.Method") {
t.Fatalf("unexpcted error: %s", err)
}
}
func TestServer_Encrypted(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
key := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.SerfLANConfig.MemberlistConfig.SecretKey = key
c.SerfWANConfig.MemberlistConfig.SecretKey = key
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
if s1.Encrypted() {
t.Fatalf("should not be encrypted")
}
if !s2.Encrypted() {
t.Fatalf("should be encrypted")
}
}
func testVerifyRPC(s1, s2 *Server, t *testing.T) (bool, error) {
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// make sure both servers know about each other
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s1, 2)) })
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s2, 2)) })
// Have s2 make an RPC call to s1
s2.localLock.RLock()
var leader *agent.Server
for _, server := range s2.localConsuls {
if server.Name == s1.config.NodeName {
leader = server
}
}
s2.localLock.RUnlock()
if leader == nil {
t.Fatal("no leader")
}
return s2.connPool.Ping(leader.Datacenter, leader.Addr, leader.Version, leader.UseTLS)
}
func TestServer_TLSToNoTLS(t *testing.T) {
t.Parallel()
// Set up a server with no TLS configured
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Add a second server with TLS configured
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
c.CAFile = "../../test/client_certs/rootca.crt"
c.CertFile = "../../test/client_certs/server.crt"
c.KeyFile = "../../test/client_certs/server.key"
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
success, err := testVerifyRPC(s1, s2, t)
if err != nil {
t.Fatal(err)
}
if !success {
t.Fatalf("bad: %v", success)
}
}
func TestServer_TLSForceOutgoingToNoTLS(t *testing.T) {
t.Parallel()
// Set up a server with no TLS configured
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Add a second server with TLS and VerifyOutgoing set
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
c.CAFile = "../../test/client_certs/rootca.crt"
c.CertFile = "../../test/client_certs/server.crt"
c.KeyFile = "../../test/client_certs/server.key"
c.VerifyOutgoing = true
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
_, err := testVerifyRPC(s1, s2, t)
if err == nil || !strings.Contains(err.Error(), "remote error: tls") {
t.Fatalf("should fail")
}
}
func TestServer_TLSToFullVerify(t *testing.T) {
t.Parallel()
// Set up a server with TLS and VerifyIncoming set
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.CAFile = "../../test/client_certs/rootca.crt"
c.CertFile = "../../test/client_certs/server.crt"
c.KeyFile = "../../test/client_certs/server.key"
c.VerifyIncoming = true
c.VerifyOutgoing = true
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Add a second server with TLS configured
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
c.CAFile = "../../test/client_certs/rootca.crt"
c.CertFile = "../../test/client_certs/server.crt"
c.KeyFile = "../../test/client_certs/server.key"
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
success, err := testVerifyRPC(s1, s2, t)
if err != nil {
t.Fatal(err)
}
if !success {
t.Fatalf("bad: %v", success)
}
}
|
package marshal
import (
"errors"
"strconv"
)
type MarshalledObject struct {
MajorVersion byte
MinorVersion byte
data []byte
symbolCache *[]string
size int
}
type marshalledObjectType byte
var TypeMismatch = errors.New("gorails/marshal: an attempt to implicitly typecast a marshalled object")
var IncompleteData = errors.New("gorails/marshal: incomplete data")
const (
TYPE_UNKNOWN marshalledObjectType = 0
TYPE_NIL marshalledObjectType = 1
TYPE_BOOL marshalledObjectType = 2
TYPE_INTEGER marshalledObjectType = 3
TYPE_FLOAT marshalledObjectType = 4
TYPE_STRING marshalledObjectType = 5
TYPE_ARRAY marshalledObjectType = 6
TYPE_MAP marshalledObjectType = 7
)
func newMarshalledObject(major_version, minor_version byte, data []byte, symbolCache *[]string) *MarshalledObject {
return newMarshalledObjectWithSize(major_version, minor_version, data, len(data), symbolCache)
}
func newMarshalledObjectWithSize(major_version, minor_version byte, data []byte, size int, symbolCache *[]string) *MarshalledObject {
return &(MarshalledObject{major_version, minor_version, data, symbolCache, size})
}
func CreateMarshalledObject(serialized_data []byte) *MarshalledObject {
cache := []string{}
return newMarshalledObject(serialized_data[0], serialized_data[1], serialized_data[2:], &cache)
}
func (obj *MarshalledObject) GetType() marshalledObjectType {
if len(obj.data) == 0 {
return TYPE_UNKNOWN
}
switch obj.data[0] {
case '0':
return TYPE_NIL
case 'T', 'F':
return TYPE_BOOL
case 'i':
return TYPE_INTEGER
case 'f':
return TYPE_FLOAT
case ':', ';':
return TYPE_STRING
case 'I':
if len(obj.data) > 1 && obj.data[1] == '"' {
return TYPE_STRING
}
case '[':
return TYPE_ARRAY
case '{':
return TYPE_MAP
}
return TYPE_UNKNOWN
}
func (obj *MarshalledObject) GetAsBool() (value bool, err error) {
err = assertType(obj, TYPE_BOOL)
if err != nil {
return
}
value, _ = parseBool(obj.data)
return
}
func (obj *MarshalledObject) GetAsInteger() (value int64, err error) {
err = assertType(obj, TYPE_INTEGER)
if err != nil {
return
}
value, _ = parseInt(obj.data[1:])
return
}
func (obj *MarshalledObject) GetAsFloat() (value float64, err error) {
err = assertType(obj, TYPE_FLOAT)
if err != nil {
return
}
str, _ := parseString(obj.data[1:])
value, err = strconv.ParseFloat(str, 64)
return
}
func (obj *MarshalledObject) GetAsString() (value string, err error) {
err = assertType(obj, TYPE_STRING)
if err != nil {
return
}
var cache []string
if obj.data[0] == ':' {
value, _ = parseString(obj.data[1:])
obj.cacheSymbols(value)
} else if obj.data[0] == ';' {
ref_index, _ := parseInt(obj.data[1:])
cache := *(obj.symbolCache)
value = cache[ref_index]
} else {
value, _, cache = parseStringWithEncoding(obj.data[2:])
obj.cacheSymbols(cache...)
}
return
}
func (obj *MarshalledObject) GetAsArray() (value []*MarshalledObject, err error) {
err = assertType(obj, TYPE_ARRAY)
if err != nil {
return
}
array_size, offset := parseInt(obj.data[1:])
offset += 1
value = make([]*MarshalledObject, array_size)
for i := int64(0); i < array_size; i++ {
value_size := newMarshalledObjectWithSize(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:],
0,
obj.symbolCache,
).getSize()
value[i] = newMarshalledObject(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:offset+value_size],
obj.symbolCache,
)
offset += value_size
}
obj.size = offset
return
}
func (obj *MarshalledObject) GetAsMap() (value map[string]*MarshalledObject, err error) {
err = assertType(obj, TYPE_MAP)
if err != nil {
return
}
map_size, offset := parseInt(obj.data[1:])
offset += 1
value = make(map[string]*MarshalledObject, map_size)
for i := int64(0); i < map_size; i++ {
k := newMarshalledObject(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:],
obj.symbolCache,
)
offset += k.getSize()
value_size := newMarshalledObjectWithSize(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:],
0,
obj.symbolCache,
).getSize()
v := newMarshalledObject(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:offset+value_size],
obj.symbolCache,
)
value[k.toString()] = v
offset += value_size
}
obj.size = offset
return
}
func assertType(obj *MarshalledObject, expected_type marshalledObjectType) (err error) {
if obj.GetType() != expected_type {
err = TypeMismatch
}
return
}
func (obj *MarshalledObject) getSize() int {
header_size, data_size := 0, 0
switch obj.GetType() {
case TYPE_NIL, TYPE_BOOL:
header_size = 0
data_size = 1
case TYPE_INTEGER:
header_size = 1
_, data_size = parseInt(obj.data[header_size:])
case TYPE_STRING, TYPE_FLOAT:
header_size = 1
if obj.data[0] == ';' {
_, data_size = parseInt(obj.data[header_size:])
} else {
var cache []string
if obj.data[0] == 'I' {
header_size += 1
_, data_size, cache = parseStringWithEncoding(obj.data[header_size:])
obj.cacheSymbols(cache...)
} else {
var symbol string
symbol, data_size = parseString(obj.data[header_size:])
obj.cacheSymbols(symbol)
}
}
case TYPE_ARRAY:
if obj.size == 0 {
obj.GetAsArray()
}
return obj.size
case TYPE_MAP:
if obj.size == 0 {
obj.GetAsMap()
}
return obj.size
}
return header_size + data_size
}
func (obj *MarshalledObject) cacheSymbols(symbols ...string) {
if len(symbols) == 0 {
return
}
cache := *(obj.symbolCache)
known := make(map[string]struct{})
for _, symbol := range cache {
known[symbol] = struct{}{}
}
for _, symbol := range symbols {
_, exists := known[symbol]
if ! exists {
cache = append(cache, symbol)
}
}
*(obj.symbolCache) = cache
}
func (obj *MarshalledObject) toString() (str string) {
switch obj.GetType() {
case TYPE_NIL:
str = "<nil>"
case TYPE_BOOL:
v, _ := obj.GetAsBool()
if v {
str = "true"
} else {
str = "false"
}
case TYPE_INTEGER:
v, _ := obj.GetAsInteger()
str = strconv.FormatInt(v, 10)
case TYPE_STRING:
str, _ = obj.GetAsString()
case TYPE_FLOAT:
v, _ := obj.GetAsFloat()
str = strconv.FormatFloat(v, 'f', -1, 64)
}
return
}
func parseBool(data []byte) (bool, int) {
return data[0] == 'T', 1
}
func parseInt(data []byte) (int64, int) {
if data[0] > 0x05 && data[0] < 0xfb {
value := int64(data[0])
if value > 0x7f {
return -(0xff ^ value + 1) + 5, 1
} else {
return value - 5, 1
}
} else if data[0] <= 0x05 {
value := int64(0)
i := data[0]
for ; i > 0; i-- {
value = value<<8 + int64(data[i])
}
return value, int(data[0] + 1)
} else {
value := int64(0)
i := 0xff - data[0] + 1
for ; i > 0; i-- {
value = value<<8 + (0xff - int64(data[i]))
}
return -(value + 1), int(0xff - data[0] + 2)
}
}
func parseString(data []byte) (string, int) {
length, header_size := parseInt(data)
size := int(length) + header_size
return string(data[header_size : size]), size
}
func parseStringWithEncoding(data []byte) (string, int, []string) {
cache := make([]string, 0)
value, size := parseString(data)
// May fail if string is followed by a symbol
if len(data) > size+1 && (data[size+1] == ':' || data[size+1] == ';') {
if data[size+1] == ';' {
_, enc_size := parseInt(data[size+2:])
size += enc_size + 1 // reference to a symbol
} else {
enc_symbol, enc_size := parseString(data[size+2:])
size += enc_size + 1
cache = append(cache, enc_symbol)
}
if data[size+1] == '"' {
encoding, enc_name_size := parseString(data[size+2:])
_ = encoding
size += enc_name_size + 1
} else {
_, enc_name_size := parseBool(data[size+1:])
size += enc_name_size
}
size += 1
}
return value, size, cache
}
Cleanup
package marshal
import (
"errors"
"strconv"
)
type MarshalledObject struct {
MajorVersion byte
MinorVersion byte
data []byte
symbolCache *[]string
size int
}
type marshalledObjectType byte
var TypeMismatch = errors.New("gorails/marshal: an attempt to implicitly typecast a marshalled object")
var IncompleteData = errors.New("gorails/marshal: incomplete data")
const (
TYPE_UNKNOWN marshalledObjectType = 0
TYPE_NIL marshalledObjectType = 1
TYPE_BOOL marshalledObjectType = 2
TYPE_INTEGER marshalledObjectType = 3
TYPE_FLOAT marshalledObjectType = 4
TYPE_STRING marshalledObjectType = 5
TYPE_ARRAY marshalledObjectType = 6
TYPE_MAP marshalledObjectType = 7
)
func newMarshalledObject(major_version, minor_version byte, data []byte, symbolCache *[]string) *MarshalledObject {
return newMarshalledObjectWithSize(major_version, minor_version, data, len(data), symbolCache)
}
func newMarshalledObjectWithSize(major_version, minor_version byte, data []byte, size int, symbolCache *[]string) *MarshalledObject {
return &(MarshalledObject{major_version, minor_version, data, symbolCache, size})
}
func CreateMarshalledObject(serialized_data []byte) *MarshalledObject {
cache := []string{}
return newMarshalledObject(serialized_data[0], serialized_data[1], serialized_data[2:], &cache)
}
func (obj *MarshalledObject) GetType() marshalledObjectType {
if len(obj.data) == 0 {
return TYPE_UNKNOWN
}
switch obj.data[0] {
case '0':
return TYPE_NIL
case 'T', 'F':
return TYPE_BOOL
case 'i':
return TYPE_INTEGER
case 'f':
return TYPE_FLOAT
case ':', ';':
return TYPE_STRING
case 'I':
if len(obj.data) > 1 && obj.data[1] == '"' {
return TYPE_STRING
}
case '[':
return TYPE_ARRAY
case '{':
return TYPE_MAP
}
return TYPE_UNKNOWN
}
func (obj *MarshalledObject) GetAsBool() (value bool, err error) {
err = assertType(obj, TYPE_BOOL)
if err != nil {
return
}
value, _ = parseBool(obj.data)
return
}
func (obj *MarshalledObject) GetAsInteger() (value int64, err error) {
err = assertType(obj, TYPE_INTEGER)
if err != nil {
return
}
value, _ = parseInt(obj.data[1:])
return
}
func (obj *MarshalledObject) GetAsFloat() (value float64, err error) {
err = assertType(obj, TYPE_FLOAT)
if err != nil {
return
}
str, _ := parseString(obj.data[1:])
value, err = strconv.ParseFloat(str, 64)
return
}
func (obj *MarshalledObject) GetAsString() (value string, err error) {
err = assertType(obj, TYPE_STRING)
if err != nil {
return
}
var cache []string
if obj.data[0] == ':' {
value, _ = parseString(obj.data[1:])
obj.cacheSymbols(value)
} else if obj.data[0] == ';' {
ref_index, _ := parseInt(obj.data[1:])
cache := *(obj.symbolCache)
value = cache[ref_index]
} else {
value, _, cache = parseStringWithEncoding(obj.data[2:])
obj.cacheSymbols(cache...)
}
return
}
func (obj *MarshalledObject) GetAsArray() (value []*MarshalledObject, err error) {
err = assertType(obj, TYPE_ARRAY)
if err != nil {
return
}
array_size, offset := parseInt(obj.data[1:])
offset += 1
value = make([]*MarshalledObject, array_size)
for i := int64(0); i < array_size; i++ {
value_size := newMarshalledObjectWithSize(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:],
0,
obj.symbolCache,
).getSize()
value[i] = newMarshalledObject(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:offset+value_size],
obj.symbolCache,
)
offset += value_size
}
obj.size = offset
return
}
func (obj *MarshalledObject) GetAsMap() (value map[string]*MarshalledObject, err error) {
err = assertType(obj, TYPE_MAP)
if err != nil {
return
}
map_size, offset := parseInt(obj.data[1:])
offset += 1
value = make(map[string]*MarshalledObject, map_size)
for i := int64(0); i < map_size; i++ {
k := newMarshalledObject(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:],
obj.symbolCache,
)
offset += k.getSize()
value_size := newMarshalledObjectWithSize(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:],
0,
obj.symbolCache,
).getSize()
v := newMarshalledObject(
obj.MajorVersion,
obj.MinorVersion,
obj.data[offset:offset+value_size],
obj.symbolCache,
)
value[k.toString()] = v
offset += value_size
}
obj.size = offset
return
}
func assertType(obj *MarshalledObject, expected_type marshalledObjectType) (err error) {
if obj.GetType() != expected_type {
err = TypeMismatch
}
return
}
func (obj *MarshalledObject) getSize() int {
header_size, data_size := 0, 0
switch obj.GetType() {
case TYPE_NIL, TYPE_BOOL:
header_size = 0
data_size = 1
case TYPE_INTEGER:
header_size = 1
_, data_size = parseInt(obj.data[header_size:])
case TYPE_STRING, TYPE_FLOAT:
header_size = 1
if obj.data[0] == ';' {
_, data_size = parseInt(obj.data[header_size:])
} else {
var cache []string
if obj.data[0] == 'I' {
header_size += 1
_, data_size, cache = parseStringWithEncoding(obj.data[header_size:])
obj.cacheSymbols(cache...)
} else {
var symbol string
symbol, data_size = parseString(obj.data[header_size:])
obj.cacheSymbols(symbol)
}
}
case TYPE_ARRAY:
if obj.size == 0 {
obj.GetAsArray()
}
return obj.size
case TYPE_MAP:
if obj.size == 0 {
obj.GetAsMap()
}
return obj.size
}
return header_size + data_size
}
func (obj *MarshalledObject) cacheSymbols(symbols ...string) {
if len(symbols) == 0 {
return
}
cache := *(obj.symbolCache)
known := make(map[string]struct{})
for _, symbol := range cache {
known[symbol] = struct{}{}
}
for _, symbol := range symbols {
_, exists := known[symbol]
if ! exists {
cache = append(cache, symbol)
}
}
*(obj.symbolCache) = cache
}
func (obj *MarshalledObject) toString() (str string) {
switch obj.GetType() {
case TYPE_NIL:
str = "<nil>"
case TYPE_BOOL:
v, _ := obj.GetAsBool()
if v {
str = "true"
} else {
str = "false"
}
case TYPE_INTEGER:
v, _ := obj.GetAsInteger()
str = strconv.FormatInt(v, 10)
case TYPE_STRING:
str, _ = obj.GetAsString()
case TYPE_FLOAT:
v, _ := obj.GetAsFloat()
str = strconv.FormatFloat(v, 'f', -1, 64)
}
return
}
func parseBool(data []byte) (bool, int) {
return data[0] == 'T', 1
}
func parseInt(data []byte) (int64, int) {
if data[0] > 0x05 && data[0] < 0xfb {
value := int64(data[0])
if value > 0x7f {
return -(0xff ^ value + 1) + 5, 1
} else {
return value - 5, 1
}
} else if data[0] <= 0x05 {
value := int64(0)
i := data[0]
for ; i > 0; i-- {
value = value<<8 + int64(data[i])
}
return value, int(data[0] + 1)
} else {
value := int64(0)
i := 0xff - data[0] + 1
for ; i > 0; i-- {
value = value<<8 + (0xff - int64(data[i]))
}
return -(value + 1), int(0xff - data[0] + 2)
}
}
func parseString(data []byte) (string, int) {
length, header_size := parseInt(data)
size := int(length) + header_size
return string(data[header_size : size]), size
}
func parseStringWithEncoding(data []byte) (string, int, []string) {
cache := make([]string, 0)
value, size := parseString(data)
if len(data) > size+1 && (data[size+1] == ':' || data[size+1] == ';') {
if data[size+1] == ';' {
_, enc_size := parseInt(data[size+2:])
size += enc_size + 1
} else {
enc_symbol, enc_size := parseString(data[size+2:])
size += enc_size + 1
cache = append(cache, enc_symbol)
}
if data[size+1] == '"' {
encoding, enc_name_size := parseString(data[size+2:])
_ = encoding
size += enc_name_size + 1
} else {
_, enc_name_size := parseBool(data[size+1:])
size += enc_name_size
}
size += 1
}
return value, size, cache
}
|
package computer
import (
"arp147/systems/key"
"arp147/ui/fonts"
"arp147/ui/position"
"arp147/ui/text"
"engo.io/ecs"
"engo.io/engo"
"image/color"
)
func (c *Computer) Entity() *ecs.Entity {
c.entity = ecs.NewEntity("RenderSystem", "KeySystem")
kc := &key.KeyComponent{}
// Alpha
for i := engo.A; i <= engo.Z; i++ {
kc.On(engo.Key(i), c.printKey)
}
// Numeric
for i := engo.Zero; i <= engo.Nine; i++ {
kc.On(engo.Key(i), c.printKey)
}
// Misc
kc.On(engo.Dash, c.printKey)
kc.On(engo.Apostrophe, c.printKey)
kc.On(engo.Semicolon, c.printKey)
kc.On(engo.Equals, c.printKey)
kc.On(engo.Comma, c.printKey)
kc.On(engo.Period, c.printKey)
kc.On(engo.Slash, c.printKey)
kc.On(engo.Backslash, c.printKey)
kc.On(engo.Backspace, c.printKey)
kc.On(engo.Tab, c.printKey)
//kc.On(engo.CapsLock, c.printKey)
kc.On(engo.Space, c.printKey)
kc.On(engo.Enter, c.printKey)
/*kc.On(engo.Escape, c.printKey)
kc.On(engo.ArrowLeft, c.printKey)
kc.On(engo.ArrowRight, c.printKey)
kc.On(engo.ArrowDown, c.printKey)
kc.On(engo.ArrowUp, c.printKey)
kc.On(engo.ArrowUp, c.printKey)*/
kc.On(engo.LeftBracket, c.printKey)
/*kc.On(engo.LeftShift, c.printKey)
kc.On(engo.LeftControl, c.printKey)
kc.On(engo.LeftSuper, c.printKey)
kc.On(engo.LeftAlt, c.printKey)*/
kc.On(engo.RightBracket, c.printKey)
/*kc.On(engo.RightShift, c.printKey)
kc.On(engo.RightControl, c.printKey)
kc.On(engo.RightSuper, c.printKey)
kc.On(engo.RightAlt, c.printKey)*/
// Add components
c.entity.AddComponent(kc)
c.entity.AddComponent(&engo.RenderComponent{})
return c.entity
}
func (c *Computer) printKey(key engo.Key) {
size := 16
var xoff, yoff float32
// Catch special keys
switch key {
case engo.Enter:
// An enter should advance us to the next line
c.line++
return
case engo.Tab:
// A tab should be translated into four spaces
for i := 0; i < 4; i++ {
c.printKey(engo.Space)
}
return
case engo.Backspace:
// A backspace should delete the last character
e := len(c.lines[c.line])
if e > 0 {
c.lines[c.line][e-1].Remove(c.world)
c.lines[c.line] = c.lines[c.line][:e-1]
}
return
}
// Don't add any offset if we're on the very first character
if len(c.lines) > 0 {
// Don't add any x offset if we're the first character of the line
if len(c.lines[c.line]) > 0 {
xoff = float32(len(c.lines[c.line])*size) * .7
}
// Always create the y offset by the size of the font and the line
yoff = float32(c.line*size) * .9
}
// Create our character
char := text.New(text.Text{
Text: string(key),
Size: float64(size),
Font: fonts.FONT_COMPUTER,
Scale: engo.Point{1, 1},
Color: text.Color{
BG: color.Transparent,
FG: color.White,
},
Position: position.Position{
Point: engo.Point{
X: 42 + xoff,
Y: 42 + yoff,
},
Position: position.TOP_LEFT,
},
})
// Add our character to the line
c.lines[c.line] = append(c.lines[c.line], char)
// Add it to the world
c.world.AddEntity(char.Entity())
}
Decrease the spacing between letters in the computer
package computer
import (
"arp147/systems/key"
"arp147/ui/fonts"
"arp147/ui/position"
"arp147/ui/text"
"engo.io/ecs"
"engo.io/engo"
"image/color"
)
func (c *Computer) Entity() *ecs.Entity {
c.entity = ecs.NewEntity("RenderSystem", "KeySystem")
kc := &key.KeyComponent{}
// Alpha
for i := engo.A; i <= engo.Z; i++ {
kc.On(engo.Key(i), c.printKey)
}
// Numeric
for i := engo.Zero; i <= engo.Nine; i++ {
kc.On(engo.Key(i), c.printKey)
}
// Misc
kc.On(engo.Dash, c.printKey)
kc.On(engo.Apostrophe, c.printKey)
kc.On(engo.Semicolon, c.printKey)
kc.On(engo.Equals, c.printKey)
kc.On(engo.Comma, c.printKey)
kc.On(engo.Period, c.printKey)
kc.On(engo.Slash, c.printKey)
kc.On(engo.Backslash, c.printKey)
kc.On(engo.Backspace, c.printKey)
kc.On(engo.Tab, c.printKey)
//kc.On(engo.CapsLock, c.printKey)
kc.On(engo.Space, c.printKey)
kc.On(engo.Enter, c.printKey)
/*kc.On(engo.Escape, c.printKey)
kc.On(engo.ArrowLeft, c.printKey)
kc.On(engo.ArrowRight, c.printKey)
kc.On(engo.ArrowDown, c.printKey)
kc.On(engo.ArrowUp, c.printKey)
kc.On(engo.ArrowUp, c.printKey)*/
kc.On(engo.LeftBracket, c.printKey)
/*kc.On(engo.LeftShift, c.printKey)
kc.On(engo.LeftControl, c.printKey)
kc.On(engo.LeftSuper, c.printKey)
kc.On(engo.LeftAlt, c.printKey)*/
kc.On(engo.RightBracket, c.printKey)
/*kc.On(engo.RightShift, c.printKey)
kc.On(engo.RightControl, c.printKey)
kc.On(engo.RightSuper, c.printKey)
kc.On(engo.RightAlt, c.printKey)*/
// Add components
c.entity.AddComponent(kc)
c.entity.AddComponent(&engo.RenderComponent{})
return c.entity
}
func (c *Computer) printKey(key engo.Key) {
size := 16
var xoff, yoff float32
// Catch special keys
switch key {
case engo.Enter:
// An enter should advance us to the next line
c.line++
return
case engo.Tab:
// A tab should be translated into four spaces
for i := 0; i < 4; i++ {
c.printKey(engo.Space)
}
return
case engo.Backspace:
// A backspace should delete the last character
e := len(c.lines[c.line])
if e > 0 {
c.lines[c.line][e-1].Remove(c.world)
c.lines[c.line] = c.lines[c.line][:e-1]
}
return
}
// Don't add any offset if we're on the very first character
if len(c.lines) > 0 {
// Don't add any x offset if we're the first character of the line
if len(c.lines[c.line]) > 0 {
xoff = float32(len(c.lines[c.line])*size) * .6
}
// Always create the y offset by the size of the font and the line
yoff = float32(c.line*size) * .9
}
// Create our character
char := text.New(text.Text{
Text: string(key),
Size: float64(size),
Font: fonts.FONT_COMPUTER,
Scale: engo.Point{1, 1},
Color: text.Color{
BG: color.Transparent,
FG: color.White,
},
Position: position.Position{
Point: engo.Point{
X: 42 + xoff,
Y: 42 + yoff,
},
Position: position.TOP_LEFT,
},
})
// Add our character to the line
c.lines[c.line] = append(c.lines[c.line], char)
// Add it to the world
c.world.AddEntity(char.Entity())
}
|
package net
import (
"bufio"
"net"
"strconv"
"strings"
"time"
"github.com/heidi-ann/ios/config"
"github.com/heidi-ann/ios/msgs"
"github.com/golang/glog"
)
type peerHandler struct {
id int
peers []config.NetAddress
failures *msgs.FailureNotifier
net *msgs.PeerNet
}
// iterative through peers and check if there is a handler for each
// try to create one if not, report failure if not possible
func (ph *peerHandler) checkPeer() {
for i := range ph.peers {
if !ph.failures.IsConnected(i) {
cn, err := net.Dial("tcp", ph.peers[i].ToString())
if err == nil {
go ph.handlePeer(cn, true)
} else {
go ph.net.OutgoingUnicast[i].Discard()
}
}
}
}
// handlePeer handles a peer connection until closed
func (ph *peerHandler) handlePeer(cn net.Conn, init bool) {
addr := cn.RemoteAddr().String()
if init {
glog.Info("Outgoing peer connection to ", addr)
} else {
glog.Info("Incoming peer connection from ", addr)
}
defer cn.Close()
defer glog.Warningf("Connection closed from %s ", addr)
// handle requests
reader := bufio.NewReader(cn)
writer := bufio.NewWriter(cn)
// exchange peer ID's via handshake
_, _ = writer.WriteString(strconv.Itoa(ph.id) + "\n")
_ = writer.Flush()
text, _ := reader.ReadString('\n')
glog.V(1).Info("Received ", text)
peerID, err := strconv.Atoi(strings.Trim(text, "\n"))
if err != nil {
glog.Warning(err)
return
}
// check ID is expected
if peerID < 0 || peerID >= len(ph.peers) || peerID == ph.id {
glog.Warning("Unexpected peer ID ", peerID)
return
}
// check IP address is as expected
// TODO: allow dynamic changes of IP
actualAddr := strings.Split(addr, ":")[0]
if ph.peers[peerID].Address != actualAddr {
glog.Warning("Peer ID ", peerID, " has connected from an unexpected address ", actualAddr,
" expected ", ph.peers[peerID].Address)
return
}
glog.Infof("Ready to handle traffic from peer %d at %s ", peerID, addr)
err = ph.failures.NowConnected(peerID)
if err != nil {
glog.Warning(err)
return
}
closeErr := make(chan error)
go func() {
for {
// read request
glog.V(1).Infof("Ready for next message from %d", peerID)
text, err := reader.ReadBytes(byte('\n'))
if err != nil {
glog.Warning(err)
closeErr <- err
break
}
glog.V(1).Infof("Read from peer %d: ", peerID, string(text))
err = ph.net.Incoming.BytesToProtoMsg(text)
if err != nil (
glog.Warning(err)
)
}
}()
go func() {
for {
// send reply
glog.V(1).Infof("Ready to send message to %d", peerID)
b, err := ph.net.OutgoingUnicast[peerID].ProtoMsgToBytes()
if err != nil {
glog.Fatal("Could not marshal message")
}
glog.V(1).Infof("Sending to %d: %s", peerID, string(b))
_, err = writer.Write(b)
if err != nil {
glog.Warning(err)
// return packet for retry
ph.net.OutgoingUnicast[peerID].BytesToProtoMsg(b)
closeErr <- err
break
}
_, err = writer.Write([]byte("\n"))
if err != nil {
glog.Warning(err)
// return packet for retry
ph.net.OutgoingUnicast[peerID].BytesToProtoMsg(b)
closeErr <- err
break
}
// TODO: BUG need to retry packet
err = writer.Flush()
if err != nil {
glog.Warning(err)
// return packet for retry
ph.net.OutgoingUnicast[peerID].BytesToProtoMsg(b)
closeErr <- err
break
}
glog.V(1).Info("Sent")
}
}()
// block until connection fails
<-closeErr
// tidy up
glog.Warningf("No longer able to handle traffic from peer %d at %s ", peerID, addr)
ph.failures.NowDisconnected(peerID)
}
// SetupPeers is an async function to handle/start peer connections
// TODO: switch to sync function
func SetupPeers(localId int, addresses []config.NetAddress, peerNet *msgs.PeerNet, fail *msgs.FailureNotifier) error {
peerHandler := peerHandler{
id: localId,
peers: addresses,
failures: fail,
net: peerNet,
}
//set up peer server
glog.Info("Starting up peer server on ", addresses[peerHandler.id].Port)
listeningPort := ":" + strconv.Itoa(addresses[peerHandler.id].Port)
lnPeers, err := net.Listen("tcp", listeningPort)
if err != nil {
glog.Info("Unable to start listen for peers")
return err
}
// handle local peer (without sending network traffic)
peerHandler.failures.NowConnected(peerHandler.id)
from := &(peerHandler.net.Incoming)
go from.Forward(peerHandler.net.OutgoingUnicast[peerHandler.id])
// handle for incoming peers
go func() {
for {
conn, err := lnPeers.Accept()
if err != nil {
glog.Fatal(err)
}
go (&peerHandler).handlePeer(conn, false)
}
}()
// regularly check if all peers are connected and retry if not
go func() {
for {
(&peerHandler).checkPeer()
time.Sleep(500 * time.Millisecond)
}
}()
return nil
}
fixing syntax error in peers.go
package net
import (
"bufio"
"net"
"strconv"
"strings"
"time"
"github.com/heidi-ann/ios/config"
"github.com/heidi-ann/ios/msgs"
"github.com/golang/glog"
)
type peerHandler struct {
id int
peers []config.NetAddress
failures *msgs.FailureNotifier
net *msgs.PeerNet
}
// iterative through peers and check if there is a handler for each
// try to create one if not, report failure if not possible
func (ph *peerHandler) checkPeer() {
for i := range ph.peers {
if !ph.failures.IsConnected(i) {
cn, err := net.Dial("tcp", ph.peers[i].ToString())
if err == nil {
go ph.handlePeer(cn, true)
} else {
go ph.net.OutgoingUnicast[i].Discard()
}
}
}
}
// handlePeer handles a peer connection until closed
func (ph *peerHandler) handlePeer(cn net.Conn, init bool) {
addr := cn.RemoteAddr().String()
if init {
glog.Info("Outgoing peer connection to ", addr)
} else {
glog.Info("Incoming peer connection from ", addr)
}
defer cn.Close()
defer glog.Warningf("Connection closed from %s ", addr)
// handle requests
reader := bufio.NewReader(cn)
writer := bufio.NewWriter(cn)
// exchange peer ID's via handshake
_, _ = writer.WriteString(strconv.Itoa(ph.id) + "\n")
_ = writer.Flush()
text, _ := reader.ReadString('\n')
glog.V(1).Info("Received ", text)
peerID, err := strconv.Atoi(strings.Trim(text, "\n"))
if err != nil {
glog.Warning(err)
return
}
// check ID is expected
if peerID < 0 || peerID >= len(ph.peers) || peerID == ph.id {
glog.Warning("Unexpected peer ID ", peerID)
return
}
// check IP address is as expected
// TODO: allow dynamic changes of IP
actualAddr := strings.Split(addr, ":")[0]
if ph.peers[peerID].Address != actualAddr {
glog.Warning("Peer ID ", peerID, " has connected from an unexpected address ", actualAddr,
" expected ", ph.peers[peerID].Address)
return
}
glog.Infof("Ready to handle traffic from peer %d at %s ", peerID, addr)
err = ph.failures.NowConnected(peerID)
if err != nil {
glog.Warning(err)
return
}
closeErr := make(chan error)
go func() {
for {
// read request
glog.V(1).Infof("Ready for next message from %d", peerID)
text, err := reader.ReadBytes(byte('\n'))
if err != nil {
glog.Warning(err)
closeErr <- err
break
}
glog.V(1).Infof("Read from peer %d: ", peerID, string(text))
err = ph.net.Incoming.BytesToProtoMsg(text)
if err != nil {
glog.Warning(err)
}
}
}()
go func() {
for {
// send reply
glog.V(1).Infof("Ready to send message to %d", peerID)
b, err := ph.net.OutgoingUnicast[peerID].ProtoMsgToBytes()
if err != nil {
glog.Fatal("Could not marshal message")
}
glog.V(1).Infof("Sending to %d: %s", peerID, string(b))
_, err = writer.Write(b)
if err != nil {
glog.Warning(err)
// return packet for retry
ph.net.OutgoingUnicast[peerID].BytesToProtoMsg(b)
closeErr <- err
break
}
_, err = writer.Write([]byte("\n"))
if err != nil {
glog.Warning(err)
// return packet for retry
ph.net.OutgoingUnicast[peerID].BytesToProtoMsg(b)
closeErr <- err
break
}
// TODO: BUG need to retry packet
err = writer.Flush()
if err != nil {
glog.Warning(err)
// return packet for retry
ph.net.OutgoingUnicast[peerID].BytesToProtoMsg(b)
closeErr <- err
break
}
glog.V(1).Info("Sent")
}
}()
// block until connection fails
<-closeErr
// tidy up
glog.Warningf("No longer able to handle traffic from peer %d at %s ", peerID, addr)
ph.failures.NowDisconnected(peerID)
}
// SetupPeers is an async function to handle/start peer connections
// TODO: switch to sync function
func SetupPeers(localId int, addresses []config.NetAddress, peerNet *msgs.PeerNet, fail *msgs.FailureNotifier) error {
peerHandler := peerHandler{
id: localId,
peers: addresses,
failures: fail,
net: peerNet,
}
//set up peer server
glog.Info("Starting up peer server on ", addresses[peerHandler.id].Port)
listeningPort := ":" + strconv.Itoa(addresses[peerHandler.id].Port)
lnPeers, err := net.Listen("tcp", listeningPort)
if err != nil {
glog.Info("Unable to start listen for peers")
return err
}
// handle local peer (without sending network traffic)
peerHandler.failures.NowConnected(peerHandler.id)
from := &(peerHandler.net.Incoming)
go from.Forward(peerHandler.net.OutgoingUnicast[peerHandler.id])
// handle for incoming peers
go func() {
for {
conn, err := lnPeers.Accept()
if err != nil {
glog.Fatal(err)
}
go (&peerHandler).handlePeer(conn, false)
}
}()
// regularly check if all peers are connected and retry if not
go func() {
for {
(&peerHandler).checkPeer()
time.Sleep(500 * time.Millisecond)
}
}()
return nil
}
|
// Copyright (C) 2014,2015 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bmp
import (
"encoding/binary"
"fmt"
"github.com/osrg/gobgp/packet/bgp"
"math"
"net"
)
type BMPHeader struct {
Version uint8
Length uint32
Type uint8
}
const (
BMP_VERSION = 3
BMP_HEADER_SIZE = 6
BMP_PEER_HEADER_SIZE = 42
)
const (
BMP_DEFAULT_PORT = 11019
)
const (
BMP_PEER_TYPE_GLOBAL uint8 = iota
BMP_PEER_TYPE_L3VPN
)
func (h *BMPHeader) DecodeFromBytes(data []byte) error {
h.Version = data[0]
if data[0] != BMP_VERSION {
return fmt.Errorf("error version")
}
h.Length = binary.BigEndian.Uint32(data[1:5])
h.Type = data[5]
return nil
}
func (h *BMPHeader) Serialize() ([]byte, error) {
buf := make([]byte, BMP_HEADER_SIZE)
buf[0] = h.Version
binary.BigEndian.PutUint32(buf[1:], h.Length)
buf[5] = h.Type
return buf, nil
}
type BMPPeerHeader struct {
PeerType uint8
IsPostPolicy bool
PeerDistinguisher uint64
PeerAddress net.IP
PeerAS uint32
PeerBGPID net.IP
Timestamp float64
Flags uint8
}
func NewBMPPeerHeader(t uint8, policy bool, dist uint64, address string, as uint32, id string, stamp float64) *BMPPeerHeader {
h := &BMPPeerHeader{
PeerType: t,
IsPostPolicy: policy,
PeerDistinguisher: dist,
PeerAS: as,
PeerBGPID: net.ParseIP(id).To4(),
Timestamp: stamp,
}
if policy == true {
h.Flags |= (1 << 6)
}
if net.ParseIP(address).To4() != nil {
h.PeerAddress = net.ParseIP(address).To4()
} else {
h.PeerAddress = net.ParseIP(address).To16()
h.Flags |= (1 << 7)
}
return h
}
func (h *BMPPeerHeader) DecodeFromBytes(data []byte) error {
h.PeerType = data[0]
h.Flags = data[1]
if h.Flags&(1<<6) != 0 {
h.IsPostPolicy = true
} else {
h.IsPostPolicy = false
}
h.PeerDistinguisher = binary.BigEndian.Uint64(data[2:10])
if h.Flags&(1<<7) != 0 {
h.PeerAddress = net.IP(data[10:26]).To16()
} else {
h.PeerAddress = net.IP(data[22:26]).To4()
}
h.PeerAS = binary.BigEndian.Uint32(data[26:30])
h.PeerBGPID = data[30:34]
timestamp1 := binary.BigEndian.Uint32(data[34:38])
timestamp2 := binary.BigEndian.Uint32(data[38:42])
h.Timestamp = float64(timestamp1) + float64(timestamp2)*math.Pow10(-6)
return nil
}
func (h *BMPPeerHeader) Serialize() ([]byte, error) {
buf := make([]byte, BMP_PEER_HEADER_SIZE)
buf[0] = h.PeerType
buf[1] = h.Flags
binary.BigEndian.PutUint64(buf[2:10], h.PeerDistinguisher)
if h.Flags&(1<<7) != 0 {
copy(buf[10:26], h.PeerAddress)
} else {
copy(buf[22:26], h.PeerAddress.To4())
}
binary.BigEndian.PutUint32(buf[26:30], h.PeerAS)
copy(buf[30:34], h.PeerBGPID)
t1, t2 := math.Modf(h.Timestamp)
t2 = math.Ceil(t2 * math.Pow10(6))
binary.BigEndian.PutUint32(buf[34:38], uint32(t1))
binary.BigEndian.PutUint32(buf[38:42], uint32(t2))
return buf, nil
}
type BMPRouteMonitoring struct {
BGPUpdate *bgp.BGPMessage
BGPUpdatePayload []byte
}
func NewBMPRouteMonitoring(p BMPPeerHeader, update *bgp.BGPMessage) *BMPMessage {
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_ROUTE_MONITORING,
},
PeerHeader: p,
Body: &BMPRouteMonitoring{
BGPUpdate: update,
},
}
}
func (body *BMPRouteMonitoring) ParseBody(msg *BMPMessage, data []byte) error {
update, err := bgp.ParseBGPMessage(data)
if err != nil {
return err
}
body.BGPUpdate = update
return nil
}
func (body *BMPRouteMonitoring) Serialize() ([]byte, error) {
if body.BGPUpdatePayload != nil {
return body.BGPUpdatePayload, nil
}
return body.BGPUpdate.Serialize()
}
const (
BMP_STAT_TYPE_REJECTED = iota
BMP_STAT_TYPE_DUPLICATE_PREFIX
BMP_STAT_TYPE_DUPLICATE_WITHDRAW
BMP_STAT_TYPE_INV_UPDATE_DUE_TO_CLUSTER_LIST_LOOP
BMP_STAT_TYPE_INV_UPDATE_DUE_TO_AS_PATH_LOOP
BMP_STAT_TYPE_INV_UPDATE_DUE_TO_ORIGINATOR_ID
BMP_STAT_TYPE_INV_UPDATE_DUE_TO_AS_CONFED_LOOP
BMP_STAT_TYPE_ADJ_RIB_IN
BMP_STAT_TYPE_LOC_RIB
)
type BMPStatsTLV struct {
Type uint16
Length uint16
Value uint64
}
type BMPStatisticsReport struct {
Count uint32
Stats []BMPStatsTLV
}
const (
BMP_PEER_DOWN_REASON_UNKNOWN = iota
BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION
BMP_PEER_DOWN_REASON_LOCAL_NO_NOTIFICATION
BMP_PEER_DOWN_REASON_REMOTE_BGP_NOTIFICATION
BMP_PEER_DOWN_REASON_REMOTE_NO_NOTIFICATION
)
type BMPPeerDownNotification struct {
Reason uint8
BGPNotification *bgp.BGPMessage
Data []byte
}
func NewBMPPeerDownNotification(p BMPPeerHeader, reason uint8, notification *bgp.BGPMessage, data []byte) *BMPMessage {
b := &BMPPeerDownNotification{
Reason: reason,
}
switch reason {
case BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION, BMP_PEER_DOWN_REASON_REMOTE_BGP_NOTIFICATION:
b.BGPNotification = notification
default:
b.Data = data
}
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_PEER_DOWN_NOTIFICATION,
},
PeerHeader: p,
Body: b,
}
}
func (body *BMPPeerDownNotification) ParseBody(msg *BMPMessage, data []byte) error {
body.Reason = data[0]
data = data[1:]
if body.Reason == BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION || body.Reason == BMP_PEER_DOWN_REASON_REMOTE_BGP_NOTIFICATION {
notification, err := bgp.ParseBGPMessage(data)
if err != nil {
return err
}
body.BGPNotification = notification
} else {
body.Data = data
}
return nil
}
func (body *BMPPeerDownNotification) Serialize() ([]byte, error) {
buf := make([]byte, 1)
buf[0] = body.Reason
switch body.Reason {
case BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION, BMP_PEER_DOWN_REASON_REMOTE_BGP_NOTIFICATION:
if body.BGPNotification != nil {
b, err := body.BGPNotification.Serialize()
if err != nil {
return nil, err
} else {
buf = append(buf, b...)
}
}
default:
if body.Data != nil {
buf = append(buf, body.Data...)
}
}
return buf, nil
}
type BMPPeerUpNotification struct {
LocalAddress net.IP
LocalPort uint16
RemotePort uint16
SentOpenMsg *bgp.BGPMessage
ReceivedOpenMsg *bgp.BGPMessage
}
func NewBMPPeerUpNotification(p BMPPeerHeader, lAddr string, lPort, rPort uint16, sent, recv *bgp.BGPMessage) *BMPMessage {
b := &BMPPeerUpNotification{
LocalPort: lPort,
RemotePort: rPort,
SentOpenMsg: sent,
ReceivedOpenMsg: recv,
}
addr := net.ParseIP(lAddr)
if addr.To4() != nil {
b.LocalAddress = addr.To4()
} else {
b.LocalAddress = addr.To16()
}
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_PEER_UP_NOTIFICATION,
},
PeerHeader: p,
Body: b,
}
}
func (body *BMPPeerUpNotification) ParseBody(msg *BMPMessage, data []byte) error {
if msg.PeerHeader.Flags&(1<<7) != 0 {
body.LocalAddress = net.IP(data[:16]).To16()
} else {
body.LocalAddress = net.IP(data[12:16]).To4()
}
body.LocalPort = binary.BigEndian.Uint16(data[16:18])
body.RemotePort = binary.BigEndian.Uint16(data[18:20])
data = data[20:]
sentopen, err := bgp.ParseBGPMessage(data)
if err != nil {
return err
}
body.SentOpenMsg = sentopen
data = data[body.SentOpenMsg.Header.Len:]
body.ReceivedOpenMsg, err = bgp.ParseBGPMessage(data)
if err != nil {
return err
}
return nil
}
func (body *BMPPeerUpNotification) Serialize() ([]byte, error) {
buf := make([]byte, 20)
if body.LocalAddress.To4() != nil {
copy(buf[12:16], body.LocalAddress.To4())
} else {
copy(buf[:16], body.LocalAddress.To16())
}
binary.BigEndian.PutUint16(buf[16:18], body.LocalPort)
binary.BigEndian.PutUint16(buf[18:20], body.RemotePort)
m, _ := body.SentOpenMsg.Serialize()
buf = append(buf, m...)
m, _ = body.ReceivedOpenMsg.Serialize()
buf = append(buf, m...)
return buf, nil
}
func (body *BMPStatisticsReport) ParseBody(msg *BMPMessage, data []byte) error {
body.Count = binary.BigEndian.Uint32(data[0:4])
data = data[4:]
for len(data) >= 4 {
s := BMPStatsTLV{}
s.Type = binary.BigEndian.Uint16(data[0:2])
s.Length = binary.BigEndian.Uint16(data[2:4])
data = data[4:]
if len(data) < int(s.Length) {
break
}
if s.Type == BMP_STAT_TYPE_ADJ_RIB_IN || s.Type == BMP_STAT_TYPE_LOC_RIB {
if s.Length < 8 {
break
}
s.Value = binary.BigEndian.Uint64(data[:8])
} else {
if s.Length < 4 {
break
}
s.Value = uint64(binary.BigEndian.Uint32(data[:4]))
}
body.Stats = append(body.Stats, s)
data = data[s.Length:]
}
return nil
}
func (body *BMPStatisticsReport) Serialize() ([]byte, error) {
// TODO
buf := make([]byte, 4)
body.Count = uint32(len(body.Stats))
binary.BigEndian.PutUint32(buf[0:4], body.Count)
return buf, nil
}
type BMPTLV struct {
Type uint16
Length uint16
Value []byte
}
func NewBMPTLV(t uint16, v []byte) *BMPTLV {
return &BMPTLV{
Type: t,
Length: uint16(len(v)),
Value: v,
}
}
func (tlv *BMPTLV) DecodeFromBytes(data []byte) error {
//TODO: check data length
tlv.Type = binary.BigEndian.Uint16(data[0:2])
tlv.Length = binary.BigEndian.Uint16(data[2:4])
tlv.Value = data[4 : 4+tlv.Length]
return nil
}
func (tlv *BMPTLV) Serialize() ([]byte, error) {
if tlv.Length == 0 {
tlv.Length = uint16(len(tlv.Value))
}
buf := make([]byte, 4+tlv.Length)
binary.BigEndian.PutUint16(buf[0:2], tlv.Type)
binary.BigEndian.PutUint16(buf[2:4], tlv.Length)
copy(buf[4:], tlv.Value)
return buf, nil
}
func (tlv *BMPTLV) Len() int {
return 4 + int(tlv.Length)
}
type BMPInitiation struct {
Info []BMPTLV
}
func NewBMPInitiation(info []BMPTLV) *BMPMessage {
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_INITIATION,
},
Body: &BMPInitiation{
Info: info,
},
}
}
func (body *BMPInitiation) ParseBody(msg *BMPMessage, data []byte) error {
for len(data) > 0 {
tlv := BMPTLV{}
tlv.DecodeFromBytes(data)
body.Info = append(body.Info, tlv)
data = data[tlv.Len():]
}
return nil
}
func (body *BMPInitiation) Serialize() ([]byte, error) {
buf := make([]byte, 0)
for _, tlv := range body.Info {
b, err := tlv.Serialize()
if err != nil {
return buf, err
}
buf = append(buf, b...)
}
return buf, nil
}
type BMPTermination struct {
Info []BMPTLV
}
func NewBMPTermination(info []BMPTLV) *BMPMessage {
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_TERMINATION,
},
Body: &BMPTermination{
Info: info,
},
}
}
func (body *BMPTermination) ParseBody(msg *BMPMessage, data []byte) error {
for len(data) > 0 {
tlv := BMPTLV{}
tlv.DecodeFromBytes(data)
body.Info = append(body.Info, tlv)
data = data[tlv.Len():]
}
return nil
}
func (body *BMPTermination) Serialize() ([]byte, error) {
buf := make([]byte, 0)
for _, tlv := range body.Info {
b, err := tlv.Serialize()
if err != nil {
return buf, err
}
buf = append(buf, b...)
}
return buf, nil
}
type BMPBody interface {
// Sigh, some body messages need a BMPHeader to parse the body
// data so we need to pass BMPHeader (avoid DecodeFromBytes
// function name).
ParseBody(*BMPMessage, []byte) error
Serialize() ([]byte, error)
}
type BMPMessage struct {
Header BMPHeader
PeerHeader BMPPeerHeader
Body BMPBody
}
func (msg *BMPMessage) Serialize() ([]byte, error) {
buf := make([]byte, 0)
if msg.Header.Type != BMP_MSG_INITIATION {
p, err := msg.PeerHeader.Serialize()
if err != nil {
return nil, err
}
buf = append(buf, p...)
}
b, err := msg.Body.Serialize()
if err != nil {
return nil, err
}
buf = append(buf, b...)
if msg.Header.Length == 0 {
msg.Header.Length = uint32(BMP_HEADER_SIZE + len(buf))
}
h, err := msg.Header.Serialize()
if err != nil {
return nil, err
}
return append(h, buf...), nil
}
func (msg *BMPMessage) Len() int {
return int(msg.Header.Length)
}
const (
BMP_MSG_ROUTE_MONITORING = iota
BMP_MSG_STATISTICS_REPORT
BMP_MSG_PEER_DOWN_NOTIFICATION
BMP_MSG_PEER_UP_NOTIFICATION
BMP_MSG_INITIATION
BMP_MSG_TERMINATION
)
func ParseBMPMessage(data []byte) (*BMPMessage, error) {
msg := &BMPMessage{}
err := msg.Header.DecodeFromBytes(data)
if err != nil {
return nil, err
}
data = data[BMP_HEADER_SIZE:msg.Header.Length]
switch msg.Header.Type {
case BMP_MSG_ROUTE_MONITORING:
msg.Body = &BMPRouteMonitoring{}
case BMP_MSG_STATISTICS_REPORT:
msg.Body = &BMPStatisticsReport{}
case BMP_MSG_PEER_DOWN_NOTIFICATION:
msg.Body = &BMPPeerDownNotification{}
case BMP_MSG_PEER_UP_NOTIFICATION:
msg.Body = &BMPPeerUpNotification{}
case BMP_MSG_INITIATION:
msg.Body = &BMPInitiation{}
case BMP_MSG_TERMINATION:
msg.Body = &BMPTermination{}
}
if msg.Header.Type != BMP_MSG_INITIATION {
msg.PeerHeader.DecodeFromBytes(data)
data = data[BMP_PEER_HEADER_SIZE:]
}
err = msg.Body.ParseBody(msg, data)
if err != nil {
return nil, err
}
return msg, nil
}
func SplitBMP(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 || len(data) < BMP_HEADER_SIZE {
return 0, nil, nil
}
msg := &BMPMessage{}
msg.Header.DecodeFromBytes(data)
if uint32(len(data)) < msg.Header.Length {
return 0, nil, nil
}
return int(msg.Header.Length), data[0:msg.Header.Length], nil
}
packet: Avoid panic() during decoding BMP messages
// Copyright (C) 2014,2015 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bmp
import (
"encoding/binary"
"fmt"
"github.com/osrg/gobgp/packet/bgp"
"math"
"net"
)
type BMPHeader struct {
Version uint8
Length uint32
Type uint8
}
const (
BMP_VERSION = 3
BMP_HEADER_SIZE = 6
BMP_PEER_HEADER_SIZE = 42
)
const (
BMP_DEFAULT_PORT = 11019
)
const (
BMP_PEER_TYPE_GLOBAL uint8 = iota
BMP_PEER_TYPE_L3VPN
)
func (h *BMPHeader) DecodeFromBytes(data []byte) error {
h.Version = data[0]
if data[0] != BMP_VERSION {
return fmt.Errorf("error version")
}
h.Length = binary.BigEndian.Uint32(data[1:5])
h.Type = data[5]
return nil
}
func (h *BMPHeader) Serialize() ([]byte, error) {
buf := make([]byte, BMP_HEADER_SIZE)
buf[0] = h.Version
binary.BigEndian.PutUint32(buf[1:], h.Length)
buf[5] = h.Type
return buf, nil
}
type BMPPeerHeader struct {
PeerType uint8
IsPostPolicy bool
PeerDistinguisher uint64
PeerAddress net.IP
PeerAS uint32
PeerBGPID net.IP
Timestamp float64
Flags uint8
}
func NewBMPPeerHeader(t uint8, policy bool, dist uint64, address string, as uint32, id string, stamp float64) *BMPPeerHeader {
h := &BMPPeerHeader{
PeerType: t,
IsPostPolicy: policy,
PeerDistinguisher: dist,
PeerAS: as,
PeerBGPID: net.ParseIP(id).To4(),
Timestamp: stamp,
}
if policy == true {
h.Flags |= (1 << 6)
}
if net.ParseIP(address).To4() != nil {
h.PeerAddress = net.ParseIP(address).To4()
} else {
h.PeerAddress = net.ParseIP(address).To16()
h.Flags |= (1 << 7)
}
return h
}
func (h *BMPPeerHeader) DecodeFromBytes(data []byte) error {
h.PeerType = data[0]
h.Flags = data[1]
if h.Flags&(1<<6) != 0 {
h.IsPostPolicy = true
} else {
h.IsPostPolicy = false
}
h.PeerDistinguisher = binary.BigEndian.Uint64(data[2:10])
if h.Flags&(1<<7) != 0 {
h.PeerAddress = net.IP(data[10:26]).To16()
} else {
h.PeerAddress = net.IP(data[22:26]).To4()
}
h.PeerAS = binary.BigEndian.Uint32(data[26:30])
h.PeerBGPID = data[30:34]
timestamp1 := binary.BigEndian.Uint32(data[34:38])
timestamp2 := binary.BigEndian.Uint32(data[38:42])
h.Timestamp = float64(timestamp1) + float64(timestamp2)*math.Pow10(-6)
return nil
}
func (h *BMPPeerHeader) Serialize() ([]byte, error) {
buf := make([]byte, BMP_PEER_HEADER_SIZE)
buf[0] = h.PeerType
buf[1] = h.Flags
binary.BigEndian.PutUint64(buf[2:10], h.PeerDistinguisher)
if h.Flags&(1<<7) != 0 {
copy(buf[10:26], h.PeerAddress)
} else {
copy(buf[22:26], h.PeerAddress.To4())
}
binary.BigEndian.PutUint32(buf[26:30], h.PeerAS)
copy(buf[30:34], h.PeerBGPID)
t1, t2 := math.Modf(h.Timestamp)
t2 = math.Ceil(t2 * math.Pow10(6))
binary.BigEndian.PutUint32(buf[34:38], uint32(t1))
binary.BigEndian.PutUint32(buf[38:42], uint32(t2))
return buf, nil
}
type BMPRouteMonitoring struct {
BGPUpdate *bgp.BGPMessage
BGPUpdatePayload []byte
}
func NewBMPRouteMonitoring(p BMPPeerHeader, update *bgp.BGPMessage) *BMPMessage {
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_ROUTE_MONITORING,
},
PeerHeader: p,
Body: &BMPRouteMonitoring{
BGPUpdate: update,
},
}
}
func (body *BMPRouteMonitoring) ParseBody(msg *BMPMessage, data []byte) error {
update, err := bgp.ParseBGPMessage(data)
if err != nil {
return err
}
body.BGPUpdate = update
return nil
}
func (body *BMPRouteMonitoring) Serialize() ([]byte, error) {
if body.BGPUpdatePayload != nil {
return body.BGPUpdatePayload, nil
}
return body.BGPUpdate.Serialize()
}
const (
BMP_STAT_TYPE_REJECTED = iota
BMP_STAT_TYPE_DUPLICATE_PREFIX
BMP_STAT_TYPE_DUPLICATE_WITHDRAW
BMP_STAT_TYPE_INV_UPDATE_DUE_TO_CLUSTER_LIST_LOOP
BMP_STAT_TYPE_INV_UPDATE_DUE_TO_AS_PATH_LOOP
BMP_STAT_TYPE_INV_UPDATE_DUE_TO_ORIGINATOR_ID
BMP_STAT_TYPE_INV_UPDATE_DUE_TO_AS_CONFED_LOOP
BMP_STAT_TYPE_ADJ_RIB_IN
BMP_STAT_TYPE_LOC_RIB
)
type BMPStatsTLV struct {
Type uint16
Length uint16
Value uint64
}
type BMPStatisticsReport struct {
Count uint32
Stats []BMPStatsTLV
}
const (
BMP_PEER_DOWN_REASON_UNKNOWN = iota
BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION
BMP_PEER_DOWN_REASON_LOCAL_NO_NOTIFICATION
BMP_PEER_DOWN_REASON_REMOTE_BGP_NOTIFICATION
BMP_PEER_DOWN_REASON_REMOTE_NO_NOTIFICATION
)
type BMPPeerDownNotification struct {
Reason uint8
BGPNotification *bgp.BGPMessage
Data []byte
}
func NewBMPPeerDownNotification(p BMPPeerHeader, reason uint8, notification *bgp.BGPMessage, data []byte) *BMPMessage {
b := &BMPPeerDownNotification{
Reason: reason,
}
switch reason {
case BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION, BMP_PEER_DOWN_REASON_REMOTE_BGP_NOTIFICATION:
b.BGPNotification = notification
default:
b.Data = data
}
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_PEER_DOWN_NOTIFICATION,
},
PeerHeader: p,
Body: b,
}
}
func (body *BMPPeerDownNotification) ParseBody(msg *BMPMessage, data []byte) error {
body.Reason = data[0]
data = data[1:]
if body.Reason == BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION || body.Reason == BMP_PEER_DOWN_REASON_REMOTE_BGP_NOTIFICATION {
notification, err := bgp.ParseBGPMessage(data)
if err != nil {
return err
}
body.BGPNotification = notification
} else {
body.Data = data
}
return nil
}
func (body *BMPPeerDownNotification) Serialize() ([]byte, error) {
buf := make([]byte, 1)
buf[0] = body.Reason
switch body.Reason {
case BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION, BMP_PEER_DOWN_REASON_REMOTE_BGP_NOTIFICATION:
if body.BGPNotification != nil {
b, err := body.BGPNotification.Serialize()
if err != nil {
return nil, err
} else {
buf = append(buf, b...)
}
}
default:
if body.Data != nil {
buf = append(buf, body.Data...)
}
}
return buf, nil
}
type BMPPeerUpNotification struct {
LocalAddress net.IP
LocalPort uint16
RemotePort uint16
SentOpenMsg *bgp.BGPMessage
ReceivedOpenMsg *bgp.BGPMessage
}
func NewBMPPeerUpNotification(p BMPPeerHeader, lAddr string, lPort, rPort uint16, sent, recv *bgp.BGPMessage) *BMPMessage {
b := &BMPPeerUpNotification{
LocalPort: lPort,
RemotePort: rPort,
SentOpenMsg: sent,
ReceivedOpenMsg: recv,
}
addr := net.ParseIP(lAddr)
if addr.To4() != nil {
b.LocalAddress = addr.To4()
} else {
b.LocalAddress = addr.To16()
}
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_PEER_UP_NOTIFICATION,
},
PeerHeader: p,
Body: b,
}
}
func (body *BMPPeerUpNotification) ParseBody(msg *BMPMessage, data []byte) error {
if msg.PeerHeader.Flags&(1<<7) != 0 {
body.LocalAddress = net.IP(data[:16]).To16()
} else {
body.LocalAddress = net.IP(data[12:16]).To4()
}
body.LocalPort = binary.BigEndian.Uint16(data[16:18])
body.RemotePort = binary.BigEndian.Uint16(data[18:20])
data = data[20:]
sentopen, err := bgp.ParseBGPMessage(data)
if err != nil {
return err
}
body.SentOpenMsg = sentopen
data = data[body.SentOpenMsg.Header.Len:]
body.ReceivedOpenMsg, err = bgp.ParseBGPMessage(data)
if err != nil {
return err
}
return nil
}
func (body *BMPPeerUpNotification) Serialize() ([]byte, error) {
buf := make([]byte, 20)
if body.LocalAddress.To4() != nil {
copy(buf[12:16], body.LocalAddress.To4())
} else {
copy(buf[:16], body.LocalAddress.To16())
}
binary.BigEndian.PutUint16(buf[16:18], body.LocalPort)
binary.BigEndian.PutUint16(buf[18:20], body.RemotePort)
m, _ := body.SentOpenMsg.Serialize()
buf = append(buf, m...)
m, _ = body.ReceivedOpenMsg.Serialize()
buf = append(buf, m...)
return buf, nil
}
func (body *BMPStatisticsReport) ParseBody(msg *BMPMessage, data []byte) error {
body.Count = binary.BigEndian.Uint32(data[0:4])
data = data[4:]
for len(data) >= 4 {
s := BMPStatsTLV{}
s.Type = binary.BigEndian.Uint16(data[0:2])
s.Length = binary.BigEndian.Uint16(data[2:4])
data = data[4:]
if len(data) < int(s.Length) {
break
}
if s.Type == BMP_STAT_TYPE_ADJ_RIB_IN || s.Type == BMP_STAT_TYPE_LOC_RIB {
if s.Length < 8 {
break
}
s.Value = binary.BigEndian.Uint64(data[:8])
} else {
if s.Length < 4 {
break
}
s.Value = uint64(binary.BigEndian.Uint32(data[:4]))
}
body.Stats = append(body.Stats, s)
data = data[s.Length:]
}
return nil
}
func (body *BMPStatisticsReport) Serialize() ([]byte, error) {
// TODO
buf := make([]byte, 4)
body.Count = uint32(len(body.Stats))
binary.BigEndian.PutUint32(buf[0:4], body.Count)
return buf, nil
}
type BMPTLV struct {
Type uint16
Length uint16
Value []byte
}
func NewBMPTLV(t uint16, v []byte) *BMPTLV {
return &BMPTLV{
Type: t,
Length: uint16(len(v)),
Value: v,
}
}
func (tlv *BMPTLV) DecodeFromBytes(data []byte) error {
//TODO: check data length
tlv.Type = binary.BigEndian.Uint16(data[0:2])
tlv.Length = binary.BigEndian.Uint16(data[2:4])
tlv.Value = data[4 : 4+tlv.Length]
return nil
}
func (tlv *BMPTLV) Serialize() ([]byte, error) {
if tlv.Length == 0 {
tlv.Length = uint16(len(tlv.Value))
}
buf := make([]byte, 4+tlv.Length)
binary.BigEndian.PutUint16(buf[0:2], tlv.Type)
binary.BigEndian.PutUint16(buf[2:4], tlv.Length)
copy(buf[4:], tlv.Value)
return buf, nil
}
func (tlv *BMPTLV) Len() int {
return 4 + int(tlv.Length)
}
type BMPInitiation struct {
Info []BMPTLV
}
func NewBMPInitiation(info []BMPTLV) *BMPMessage {
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_INITIATION,
},
Body: &BMPInitiation{
Info: info,
},
}
}
func (body *BMPInitiation) ParseBody(msg *BMPMessage, data []byte) error {
for len(data) > 0 {
tlv := BMPTLV{}
tlv.DecodeFromBytes(data)
body.Info = append(body.Info, tlv)
data = data[tlv.Len():]
}
return nil
}
func (body *BMPInitiation) Serialize() ([]byte, error) {
buf := make([]byte, 0)
for _, tlv := range body.Info {
b, err := tlv.Serialize()
if err != nil {
return buf, err
}
buf = append(buf, b...)
}
return buf, nil
}
type BMPTermination struct {
Info []BMPTLV
}
func NewBMPTermination(info []BMPTLV) *BMPMessage {
return &BMPMessage{
Header: BMPHeader{
Version: BMP_VERSION,
Type: BMP_MSG_TERMINATION,
},
Body: &BMPTermination{
Info: info,
},
}
}
func (body *BMPTermination) ParseBody(msg *BMPMessage, data []byte) error {
for len(data) > 0 {
tlv := BMPTLV{}
tlv.DecodeFromBytes(data)
body.Info = append(body.Info, tlv)
data = data[tlv.Len():]
}
return nil
}
func (body *BMPTermination) Serialize() ([]byte, error) {
buf := make([]byte, 0)
for _, tlv := range body.Info {
b, err := tlv.Serialize()
if err != nil {
return buf, err
}
buf = append(buf, b...)
}
return buf, nil
}
type BMPBody interface {
// Sigh, some body messages need a BMPHeader to parse the body
// data so we need to pass BMPHeader (avoid DecodeFromBytes
// function name).
ParseBody(*BMPMessage, []byte) error
Serialize() ([]byte, error)
}
type BMPMessage struct {
Header BMPHeader
PeerHeader BMPPeerHeader
Body BMPBody
}
func (msg *BMPMessage) Serialize() ([]byte, error) {
buf := make([]byte, 0)
if msg.Header.Type != BMP_MSG_INITIATION {
p, err := msg.PeerHeader.Serialize()
if err != nil {
return nil, err
}
buf = append(buf, p...)
}
b, err := msg.Body.Serialize()
if err != nil {
return nil, err
}
buf = append(buf, b...)
if msg.Header.Length == 0 {
msg.Header.Length = uint32(BMP_HEADER_SIZE + len(buf))
}
h, err := msg.Header.Serialize()
if err != nil {
return nil, err
}
return append(h, buf...), nil
}
func (msg *BMPMessage) Len() int {
return int(msg.Header.Length)
}
const (
BMP_MSG_ROUTE_MONITORING = iota
BMP_MSG_STATISTICS_REPORT
BMP_MSG_PEER_DOWN_NOTIFICATION
BMP_MSG_PEER_UP_NOTIFICATION
BMP_MSG_INITIATION
BMP_MSG_TERMINATION
)
func ParseBMPMessage(data []byte) (msg *BMPMessage, err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("not all data bytes are available")
}
}()
msg = &BMPMessage{}
err = msg.Header.DecodeFromBytes(data)
if err != nil {
return nil, err
}
data = data[BMP_HEADER_SIZE:msg.Header.Length]
switch msg.Header.Type {
case BMP_MSG_ROUTE_MONITORING:
msg.Body = &BMPRouteMonitoring{}
case BMP_MSG_STATISTICS_REPORT:
msg.Body = &BMPStatisticsReport{}
case BMP_MSG_PEER_DOWN_NOTIFICATION:
msg.Body = &BMPPeerDownNotification{}
case BMP_MSG_PEER_UP_NOTIFICATION:
msg.Body = &BMPPeerUpNotification{}
case BMP_MSG_INITIATION:
msg.Body = &BMPInitiation{}
case BMP_MSG_TERMINATION:
msg.Body = &BMPTermination{}
}
if msg.Header.Type != BMP_MSG_INITIATION {
msg.PeerHeader.DecodeFromBytes(data)
data = data[BMP_PEER_HEADER_SIZE:]
}
err = msg.Body.ParseBody(msg, data)
if err != nil {
return nil, err
}
return msg, nil
}
func SplitBMP(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 || len(data) < BMP_HEADER_SIZE {
return 0, nil, nil
}
msg := &BMPMessage{}
msg.Header.DecodeFromBytes(data)
if uint32(len(data)) < msg.Header.Length {
return 0, nil, nil
}
return int(msg.Header.Length), data[0:msg.Header.Length], nil
}
|
// Diskv (disk-vee) is a simple, persistent, key-value store.
// It stores all data flatly on the filesystem.
package diskv
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
)
const (
defaultBasePath = "diskv"
defaultFilePerm os.FileMode = 0666
defaultPathPerm os.FileMode = 0777
)
type PathKey struct {
Path []string
FileName string
originalKey string
}
var (
defaultAdvancedTransform = func(s string) *PathKey { return &PathKey{Path: []string{}, FileName: s} }
defaultInverseTransform = func(pathKey *PathKey) string { return pathKey.FileName }
errCanceled = errors.New("canceled")
errEmptyKey = errors.New("empty key")
errBadKey = errors.New("bad key")
errImportDirectory = errors.New("can't import a directory")
)
// TransformFunction transforms a key into a slice of strings, with each
// element in the slice representing a directory in the file path where the
// key's entry will eventually be stored.
//
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
type TransformFunction func(s string) []string
// AdvancedTransformFunction transforms a key into:
// * A slice of strings, (Path) with each element in the slice
// representing a directory in the file path where the
// key's entry will eventually be stored.
//
// and:
//
// * the file name
//
// For example, if TransformFunc transforms "abcdef/file.txt" to Path=["ab", "cde", "f"],
// and FileName="file.txt"
// the final location of the data file will be <basedir>/ab/cde/f/file.txt
//
// You must provide an InverseTransformFunction if you use this
//
type AdvancedTransformFunction func(s string) *PathKey
// InverseTransformFunction takes a Path+Filename and
// converts it back to a key
type InverseTransformFunction func(pathKey *PathKey) string
// Options define a set of properties that dictate Diskv behavior.
// All values are optional.
type Options struct {
BasePath string
Transform TransformFunction
AdvancedTransform AdvancedTransformFunction
InverseTransform InverseTransformFunction
CacheSizeMax uint64 // bytes
PathPerm os.FileMode
FilePerm os.FileMode
// If TempDir is set, it will enable filesystem atomic writes by
// writing temporary files to that location before being moved
// to BasePath.
// Note that TempDir MUST be on the same device/partition as
// BasePath.
TempDir string
Index Index
IndexLess LessFunction
Compression Compression
}
// Diskv implements the Diskv interface. You shouldn't construct Diskv
// structures directly; instead, use the New constructor.
type Diskv struct {
Options
mu sync.RWMutex
cache map[string][]byte
cacheSize uint64
}
// New returns an initialized Diskv structure, ready to use.
// If the path identified by baseDir already contains data,
// it will be accessible, but not yet cached.
func New(o Options) *Diskv {
if o.BasePath == "" {
o.BasePath = defaultBasePath
}
if o.AdvancedTransform == nil {
if o.Transform == nil {
o.AdvancedTransform = defaultAdvancedTransform
} else {
o.AdvancedTransform = convertToAdvancedTransform(o.Transform)
}
if o.InverseTransform == nil {
o.InverseTransform = defaultInverseTransform
}
} else {
if o.InverseTransform == nil {
panic("You must provide an InverseTransform function in advanced mode")
}
}
if o.PathPerm == 0 {
o.PathPerm = defaultPathPerm
}
if o.FilePerm == 0 {
o.FilePerm = defaultFilePerm
}
d := &Diskv{
Options: o,
cache: map[string][]byte{},
cacheSize: 0,
}
if d.Index != nil && d.IndexLess != nil {
d.Index.Initialize(d.IndexLess, d.Keys(nil))
}
return d
}
// convertToAdvancedTransform takes a classic Transform function and
// converts it to the new AdvancedTransform
func convertToAdvancedTransform(oldFunc func(s string) []string) AdvancedTransformFunction {
return func(s string) *PathKey {
return &PathKey{Path: oldFunc(s), FileName: s}
}
}
// Write synchronously writes the key-value pair to disk, making it immediately
// available for reads. Write relies on the filesystem to perform an eventual
// sync to physical media. If you need stronger guarantees, see WriteStream.
func (d *Diskv) Write(key string, val []byte) error {
return d.WriteStream(key, bytes.NewReader(val), false)
}
// WriteString writes a string key-value pair to disk
func (d *Diskv) WriteString(key string, val string) error {
return d.Write(key, []byte(val))
}
func (d *Diskv) transform(key string) (pathKey *PathKey) {
pathKey = d.AdvancedTransform(key)
pathKey.originalKey = key
return
}
// WriteStream writes the data represented by the io.Reader to the disk, under
// the provided key. If sync is true, WriteStream performs an explicit sync on
// the file as soon as it's written.
//
// bytes.Buffer provides io.Reader semantics for basic data types.
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
if len(key) <= 0 {
return errEmptyKey
}
pathKey := d.transform(key)
// Ensure keys cannot evaluate to paths that would not exist
for _, pathPart := range pathKey.Path {
if strings.ContainsRune(pathPart, os.PathSeparator) {
return errBadKey
}
}
if strings.ContainsRune(pathKey.FileName, os.PathSeparator) {
return errBadKey
}
d.mu.Lock()
defer d.mu.Unlock()
return d.writeStreamWithLock(pathKey, r, sync)
}
// createKeyFileWithLock either creates the key file directly, or
// creates a temporary file in TempDir if it is set.
func (d *Diskv) createKeyFileWithLock(pathKey *PathKey) (*os.File, error) {
if d.TempDir != "" {
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
return nil, fmt.Errorf("temp mkdir: %s", err)
}
f, err := ioutil.TempFile(d.TempDir, "")
if err != nil {
return nil, fmt.Errorf("temp file: %s", err)
}
if err := f.Chmod(d.FilePerm); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return nil, fmt.Errorf("chmod: %s", err)
}
return f, nil
}
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
f, err := os.OpenFile(d.completeFilename(pathKey), mode, d.FilePerm)
if err != nil {
return nil, fmt.Errorf("open file: %s", err)
}
return f, nil
}
// writeStream does no input validation checking.
func (d *Diskv) writeStreamWithLock(pathKey *PathKey, r io.Reader, sync bool) error {
if err := d.ensurePathWithLock(pathKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
f, err := d.createKeyFileWithLock(pathKey)
if err != nil {
return fmt.Errorf("create key file: %s", err)
}
wc := io.WriteCloser(&nopWriteCloser{f})
if d.Compression != nil {
wc, err = d.Compression.Writer(f)
if err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression writer: %s", err)
}
}
if _, err := io.Copy(wc, r); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("i/o copy: %s", err)
}
if err := wc.Close(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression close: %s", err)
}
if sync {
if err := f.Sync(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("file sync: %s", err)
}
}
if err := f.Close(); err != nil {
return fmt.Errorf("file close: %s", err)
}
fullPath := d.completeFilename(pathKey)
if f.Name() != fullPath {
if err := os.Rename(f.Name(), fullPath); err != nil {
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("rename: %s", err)
}
}
if d.Index != nil {
d.Index.Insert(pathKey.originalKey)
}
d.bustCacheWithLock(pathKey.originalKey) // cache only on read
return nil
}
// Import imports the source file into diskv under the destination key. If the
// destination key already exists, it's overwritten. If move is true, the
// source file is removed after a successful import.
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
if dstKey == "" {
return errEmptyKey
}
if fi, err := os.Stat(srcFilename); err != nil {
return err
} else if fi.IsDir() {
return errImportDirectory
}
dstPathKey := d.transform(dstKey)
d.mu.Lock()
defer d.mu.Unlock()
if err := d.ensurePathWithLock(dstPathKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
if move {
if err := syscall.Rename(srcFilename, d.completeFilename(dstPathKey)); err == nil {
d.bustCacheWithLock(dstPathKey.originalKey)
return nil
} else if err != syscall.EXDEV {
// If it failed due to being on a different device, fall back to copying
return err
}
}
f, err := os.Open(srcFilename)
if err != nil {
return err
}
defer f.Close()
err = d.writeStreamWithLock(dstPathKey, f, false)
if err == nil && move {
err = os.Remove(srcFilename)
}
return err
}
// Read reads the key and returns the value.
// If the key is available in the cache, Read won't touch the disk.
// If the key is not in the cache, Read will have the side-effect of
// lazily caching the value.
func (d *Diskv) Read(key string) ([]byte, error) {
rc, err := d.ReadStream(key, false)
if err != nil {
return []byte{}, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
// ReadString reads the key and returns a string value
// In case of error, an empty string is returned
func (d *Diskv) ReadString(key string) string {
value, _ := d.Read(key)
return string(value)
}
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
// If the value is cached from a previous read, and direct is false,
// ReadStream will use the cached value. Otherwise, it will return a handle to
// the file on disk, and cache the data on read.
//
// If direct is true, ReadStream will lazily delete any cached value for the
// key, and return a direct handle to the file on disk.
//
// If compression is enabled, ReadStream taps into the io.Reader stream prior
// to decompression, and caches the compressed data.
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
pathKey := d.transform(key)
d.mu.RLock()
defer d.mu.RUnlock()
if val, ok := d.cache[key]; ok {
if !direct {
buf := bytes.NewReader(val)
if d.Compression != nil {
return d.Compression.Reader(buf)
}
return ioutil.NopCloser(buf), nil
}
go func() {
d.mu.Lock()
defer d.mu.Unlock()
d.uncacheWithLock(key, uint64(len(val)))
}()
}
return d.readWithRLock(pathKey)
}
// read ignores the cache, and returns an io.ReadCloser representing the
// decompressed data for the given key, streamed from the disk. Clients should
// acquire a read lock on the Diskv and check the cache themselves before
// calling read.
func (d *Diskv) readWithRLock(pathKey *PathKey) (io.ReadCloser, error) {
filename := d.completeFilename(pathKey)
fi, err := os.Stat(filename)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, os.ErrNotExist
}
f, err := os.Open(filename)
if err != nil {
return nil, err
}
var r io.Reader
if d.CacheSizeMax > 0 {
r = newSiphon(f, d, pathKey.originalKey)
} else {
r = &closingReader{f}
}
var rc = io.ReadCloser(ioutil.NopCloser(r))
if d.Compression != nil {
rc, err = d.Compression.Reader(r)
if err != nil {
return nil, err
}
}
return rc, nil
}
// closingReader provides a Reader that automatically closes the
// embedded ReadCloser when it reaches EOF
type closingReader struct {
rc io.ReadCloser
}
func (cr closingReader) Read(p []byte) (int, error) {
n, err := cr.rc.Read(p)
if err == io.EOF {
if closeErr := cr.rc.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
}
return n, err
}
// siphon is like a TeeReader: it copies all data read through it to an
// internal buffer, and moves that buffer to the cache at EOF.
type siphon struct {
f *os.File
d *Diskv
key string
buf *bytes.Buffer
}
// newSiphon constructs a siphoning reader that represents the passed file.
// When a successful series of reads ends in an EOF, the siphon will write
// the buffered data to Diskv's cache under the given key.
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
return &siphon{
f: f,
d: d,
key: key,
buf: &bytes.Buffer{},
}
}
// Read implements the io.Reader interface for siphon.
func (s *siphon) Read(p []byte) (int, error) {
n, err := s.f.Read(p)
if err == nil {
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
}
if err == io.EOF {
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
if closeErr := s.f.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
return n, err
}
return n, err
}
// Erase synchronously erases the given key from the disk and the cache.
func (d *Diskv) Erase(key string) error {
pathKey := d.transform(key)
d.mu.Lock()
defer d.mu.Unlock()
d.bustCacheWithLock(key)
// erase from index
if d.Index != nil {
d.Index.Delete(key)
}
// erase from disk
filename := d.completeFilename(pathKey)
if s, err := os.Stat(filename); err == nil {
if s.IsDir() {
return errBadKey
}
if err = os.Remove(filename); err != nil {
return err
}
} else {
// Return err as-is so caller can do os.IsNotExist(err).
return err
}
// clean up and return
d.pruneDirsWithLock(key)
return nil
}
// EraseAll will delete all of the data from the store, both in the cache and on
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
// diskv-related data. Care should be taken to always specify a diskv base
// directory that is exclusively for diskv data.
func (d *Diskv) EraseAll() error {
d.mu.Lock()
defer d.mu.Unlock()
d.cache = make(map[string][]byte)
d.cacheSize = 0
if d.TempDir != "" {
os.RemoveAll(d.TempDir) // errors ignored
}
return os.RemoveAll(d.BasePath)
}
// Has returns true if the given key exists.
func (d *Diskv) Has(key string) bool {
pathKey := d.transform(key)
d.mu.Lock()
defer d.mu.Unlock()
if _, ok := d.cache[key]; ok {
return true
}
filename := d.completeFilename(pathKey)
s, err := os.Stat(filename)
if err != nil {
return false
}
if s.IsDir() {
return false
}
return true
}
// Keys returns a channel that will yield every key accessible by the store,
// in undefined order. If a cancel channel is provided, closing it will
// terminate and close the keys channel.
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
return d.KeysPrefix("", cancel)
}
// KeysPrefix returns a channel that will yield every key accessible by the
// store with the given prefix, in undefined order. If a cancel channel is
// provided, closing it will terminate and close the keys channel. If the
// provided prefix is the empty string, all keys will be yielded.
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
var prepath string
if prefix == "" {
prepath = d.BasePath
} else {
prefixKey := d.transform(prefix)
prepath = d.pathFor(prefixKey)
}
c := make(chan string)
go func() {
filepath.Walk(prepath, d.walker(c, prefix, cancel))
close(c)
}()
return c
}
// walker returns a function which satisfies the filepath.WalkFunc interface.
// It sends every non-directory file entry down the channel c.
func (d *Diskv) walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, _ := filepath.Rel(d.BasePath, path)
dir, file := filepath.Split(relPath)
pathSplit := strings.Split(dir, string(filepath.Separator))
pathSplit = pathSplit[:len(pathSplit)-1]
pathKey := &PathKey{
Path: pathSplit,
FileName: file,
}
/* if info.IsDir() {
pathKey = &PathKey{
Path: pathSplit,
FileName: "",
}
} else {
pathKey = &PathKey{
Path: pathSplit,
FileName: file,
}
} */
key := d.InverseTransform(pathKey)
if info.IsDir() || !strings.HasPrefix(key, prefix) {
return nil // "pass"
}
select {
case c <- key:
case <-cancel:
return errCanceled
}
return nil
}
}
// pathFor returns the absolute path for location on the filesystem where the
// data for the given key will be stored.
func (d *Diskv) pathFor(pathKey *PathKey) string {
return filepath.Join(d.BasePath, filepath.Join(pathKey.Path...))
}
// ensurePathWithLock is a helper function that generates all necessary
// directories on the filesystem for the given key.
func (d *Diskv) ensurePathWithLock(pathKey *PathKey) error {
return os.MkdirAll(d.pathFor(pathKey), d.PathPerm)
}
// completeFilename returns the absolute path to the file for the given key.
func (d *Diskv) completeFilename(pathKey *PathKey) string {
return filepath.Join(d.pathFor(pathKey), pathKey.FileName)
}
// cacheWithLock attempts to cache the given key-value pair in the store's
// cache. It can fail if the value is larger than the cache's maximum size.
func (d *Diskv) cacheWithLock(key string, val []byte) error {
valueSize := uint64(len(val))
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
return fmt.Errorf("%s; not caching", err)
}
// be very strict about memory guarantees
if (d.cacheSize + valueSize) > d.CacheSizeMax {
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
}
d.cache[key] = val
d.cacheSize += valueSize
return nil
}
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.cacheWithLock(key, val)
}
func (d *Diskv) bustCacheWithLock(key string) {
if val, ok := d.cache[key]; ok {
d.uncacheWithLock(key, uint64(len(val)))
}
}
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
d.cacheSize -= sz
delete(d.cache, key)
}
// pruneDirsWithLock deletes empty directories in the path walk leading to the
// key k. Typically this function is called after an Erase is made.
func (d *Diskv) pruneDirsWithLock(key string) error {
pathlist := d.transform(key).Path
for i := range pathlist {
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
// thanks to Steven Blenkinsop for this snippet
switch fi, err := os.Stat(dir); true {
case err != nil:
return err
case !fi.IsDir():
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
}
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
if err != nil {
return err
} else if len(nlinks) > 0 {
return nil // has subdirs -- do not prune
}
if err = os.Remove(dir); err != nil {
return err
}
}
return nil
}
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
// until the cache has at least valueSize bytes available.
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
if valueSize > d.CacheSizeMax {
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
}
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
for key, val := range d.cache {
if safe() {
break
}
d.uncacheWithLock(key, uint64(len(val)))
}
if !safe() {
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
}
return nil
}
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
// satisfy the io.WriteCloser interface.
type nopWriteCloser struct {
io.Writer
}
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
func (wc *nopWriteCloser) Close() error { return nil }
removed comments
// Diskv (disk-vee) is a simple, persistent, key-value store.
// It stores all data flatly on the filesystem.
package diskv
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
)
const (
defaultBasePath = "diskv"
defaultFilePerm os.FileMode = 0666
defaultPathPerm os.FileMode = 0777
)
type PathKey struct {
Path []string
FileName string
originalKey string
}
var (
defaultAdvancedTransform = func(s string) *PathKey { return &PathKey{Path: []string{}, FileName: s} }
defaultInverseTransform = func(pathKey *PathKey) string { return pathKey.FileName }
errCanceled = errors.New("canceled")
errEmptyKey = errors.New("empty key")
errBadKey = errors.New("bad key")
errImportDirectory = errors.New("can't import a directory")
)
// TransformFunction transforms a key into a slice of strings, with each
// element in the slice representing a directory in the file path where the
// key's entry will eventually be stored.
//
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
type TransformFunction func(s string) []string
// AdvancedTransformFunction transforms a key into:
// * A slice of strings, (Path) with each element in the slice
// representing a directory in the file path where the
// key's entry will eventually be stored.
//
// and:
//
// * the file name
//
// For example, if TransformFunc transforms "abcdef/file.txt" to Path=["ab", "cde", "f"],
// and FileName="file.txt"
// the final location of the data file will be <basedir>/ab/cde/f/file.txt
//
// You must provide an InverseTransformFunction if you use this
//
type AdvancedTransformFunction func(s string) *PathKey
// InverseTransformFunction takes a Path+Filename and
// converts it back to a key
type InverseTransformFunction func(pathKey *PathKey) string
// Options define a set of properties that dictate Diskv behavior.
// All values are optional.
type Options struct {
BasePath string
Transform TransformFunction
AdvancedTransform AdvancedTransformFunction
InverseTransform InverseTransformFunction
CacheSizeMax uint64 // bytes
PathPerm os.FileMode
FilePerm os.FileMode
// If TempDir is set, it will enable filesystem atomic writes by
// writing temporary files to that location before being moved
// to BasePath.
// Note that TempDir MUST be on the same device/partition as
// BasePath.
TempDir string
Index Index
IndexLess LessFunction
Compression Compression
}
// Diskv implements the Diskv interface. You shouldn't construct Diskv
// structures directly; instead, use the New constructor.
type Diskv struct {
Options
mu sync.RWMutex
cache map[string][]byte
cacheSize uint64
}
// New returns an initialized Diskv structure, ready to use.
// If the path identified by baseDir already contains data,
// it will be accessible, but not yet cached.
func New(o Options) *Diskv {
if o.BasePath == "" {
o.BasePath = defaultBasePath
}
if o.AdvancedTransform == nil {
if o.Transform == nil {
o.AdvancedTransform = defaultAdvancedTransform
} else {
o.AdvancedTransform = convertToAdvancedTransform(o.Transform)
}
if o.InverseTransform == nil {
o.InverseTransform = defaultInverseTransform
}
} else {
if o.InverseTransform == nil {
panic("You must provide an InverseTransform function in advanced mode")
}
}
if o.PathPerm == 0 {
o.PathPerm = defaultPathPerm
}
if o.FilePerm == 0 {
o.FilePerm = defaultFilePerm
}
d := &Diskv{
Options: o,
cache: map[string][]byte{},
cacheSize: 0,
}
if d.Index != nil && d.IndexLess != nil {
d.Index.Initialize(d.IndexLess, d.Keys(nil))
}
return d
}
// convertToAdvancedTransform takes a classic Transform function and
// converts it to the new AdvancedTransform
func convertToAdvancedTransform(oldFunc func(s string) []string) AdvancedTransformFunction {
return func(s string) *PathKey {
return &PathKey{Path: oldFunc(s), FileName: s}
}
}
// Write synchronously writes the key-value pair to disk, making it immediately
// available for reads. Write relies on the filesystem to perform an eventual
// sync to physical media. If you need stronger guarantees, see WriteStream.
func (d *Diskv) Write(key string, val []byte) error {
return d.WriteStream(key, bytes.NewReader(val), false)
}
// WriteString writes a string key-value pair to disk
func (d *Diskv) WriteString(key string, val string) error {
return d.Write(key, []byte(val))
}
func (d *Diskv) transform(key string) (pathKey *PathKey) {
pathKey = d.AdvancedTransform(key)
pathKey.originalKey = key
return
}
// WriteStream writes the data represented by the io.Reader to the disk, under
// the provided key. If sync is true, WriteStream performs an explicit sync on
// the file as soon as it's written.
//
// bytes.Buffer provides io.Reader semantics for basic data types.
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
if len(key) <= 0 {
return errEmptyKey
}
pathKey := d.transform(key)
// Ensure keys cannot evaluate to paths that would not exist
for _, pathPart := range pathKey.Path {
if strings.ContainsRune(pathPart, os.PathSeparator) {
return errBadKey
}
}
if strings.ContainsRune(pathKey.FileName, os.PathSeparator) {
return errBadKey
}
d.mu.Lock()
defer d.mu.Unlock()
return d.writeStreamWithLock(pathKey, r, sync)
}
// createKeyFileWithLock either creates the key file directly, or
// creates a temporary file in TempDir if it is set.
func (d *Diskv) createKeyFileWithLock(pathKey *PathKey) (*os.File, error) {
if d.TempDir != "" {
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
return nil, fmt.Errorf("temp mkdir: %s", err)
}
f, err := ioutil.TempFile(d.TempDir, "")
if err != nil {
return nil, fmt.Errorf("temp file: %s", err)
}
if err := f.Chmod(d.FilePerm); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return nil, fmt.Errorf("chmod: %s", err)
}
return f, nil
}
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
f, err := os.OpenFile(d.completeFilename(pathKey), mode, d.FilePerm)
if err != nil {
return nil, fmt.Errorf("open file: %s", err)
}
return f, nil
}
// writeStream does no input validation checking.
func (d *Diskv) writeStreamWithLock(pathKey *PathKey, r io.Reader, sync bool) error {
if err := d.ensurePathWithLock(pathKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
f, err := d.createKeyFileWithLock(pathKey)
if err != nil {
return fmt.Errorf("create key file: %s", err)
}
wc := io.WriteCloser(&nopWriteCloser{f})
if d.Compression != nil {
wc, err = d.Compression.Writer(f)
if err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression writer: %s", err)
}
}
if _, err := io.Copy(wc, r); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("i/o copy: %s", err)
}
if err := wc.Close(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression close: %s", err)
}
if sync {
if err := f.Sync(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("file sync: %s", err)
}
}
if err := f.Close(); err != nil {
return fmt.Errorf("file close: %s", err)
}
fullPath := d.completeFilename(pathKey)
if f.Name() != fullPath {
if err := os.Rename(f.Name(), fullPath); err != nil {
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("rename: %s", err)
}
}
if d.Index != nil {
d.Index.Insert(pathKey.originalKey)
}
d.bustCacheWithLock(pathKey.originalKey) // cache only on read
return nil
}
// Import imports the source file into diskv under the destination key. If the
// destination key already exists, it's overwritten. If move is true, the
// source file is removed after a successful import.
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
if dstKey == "" {
return errEmptyKey
}
if fi, err := os.Stat(srcFilename); err != nil {
return err
} else if fi.IsDir() {
return errImportDirectory
}
dstPathKey := d.transform(dstKey)
d.mu.Lock()
defer d.mu.Unlock()
if err := d.ensurePathWithLock(dstPathKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
if move {
if err := syscall.Rename(srcFilename, d.completeFilename(dstPathKey)); err == nil {
d.bustCacheWithLock(dstPathKey.originalKey)
return nil
} else if err != syscall.EXDEV {
// If it failed due to being on a different device, fall back to copying
return err
}
}
f, err := os.Open(srcFilename)
if err != nil {
return err
}
defer f.Close()
err = d.writeStreamWithLock(dstPathKey, f, false)
if err == nil && move {
err = os.Remove(srcFilename)
}
return err
}
// Read reads the key and returns the value.
// If the key is available in the cache, Read won't touch the disk.
// If the key is not in the cache, Read will have the side-effect of
// lazily caching the value.
func (d *Diskv) Read(key string) ([]byte, error) {
rc, err := d.ReadStream(key, false)
if err != nil {
return []byte{}, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
// ReadString reads the key and returns a string value
// In case of error, an empty string is returned
func (d *Diskv) ReadString(key string) string {
value, _ := d.Read(key)
return string(value)
}
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
// If the value is cached from a previous read, and direct is false,
// ReadStream will use the cached value. Otherwise, it will return a handle to
// the file on disk, and cache the data on read.
//
// If direct is true, ReadStream will lazily delete any cached value for the
// key, and return a direct handle to the file on disk.
//
// If compression is enabled, ReadStream taps into the io.Reader stream prior
// to decompression, and caches the compressed data.
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
pathKey := d.transform(key)
d.mu.RLock()
defer d.mu.RUnlock()
if val, ok := d.cache[key]; ok {
if !direct {
buf := bytes.NewReader(val)
if d.Compression != nil {
return d.Compression.Reader(buf)
}
return ioutil.NopCloser(buf), nil
}
go func() {
d.mu.Lock()
defer d.mu.Unlock()
d.uncacheWithLock(key, uint64(len(val)))
}()
}
return d.readWithRLock(pathKey)
}
// read ignores the cache, and returns an io.ReadCloser representing the
// decompressed data for the given key, streamed from the disk. Clients should
// acquire a read lock on the Diskv and check the cache themselves before
// calling read.
func (d *Diskv) readWithRLock(pathKey *PathKey) (io.ReadCloser, error) {
filename := d.completeFilename(pathKey)
fi, err := os.Stat(filename)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, os.ErrNotExist
}
f, err := os.Open(filename)
if err != nil {
return nil, err
}
var r io.Reader
if d.CacheSizeMax > 0 {
r = newSiphon(f, d, pathKey.originalKey)
} else {
r = &closingReader{f}
}
var rc = io.ReadCloser(ioutil.NopCloser(r))
if d.Compression != nil {
rc, err = d.Compression.Reader(r)
if err != nil {
return nil, err
}
}
return rc, nil
}
// closingReader provides a Reader that automatically closes the
// embedded ReadCloser when it reaches EOF
type closingReader struct {
rc io.ReadCloser
}
func (cr closingReader) Read(p []byte) (int, error) {
n, err := cr.rc.Read(p)
if err == io.EOF {
if closeErr := cr.rc.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
}
return n, err
}
// siphon is like a TeeReader: it copies all data read through it to an
// internal buffer, and moves that buffer to the cache at EOF.
type siphon struct {
f *os.File
d *Diskv
key string
buf *bytes.Buffer
}
// newSiphon constructs a siphoning reader that represents the passed file.
// When a successful series of reads ends in an EOF, the siphon will write
// the buffered data to Diskv's cache under the given key.
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
return &siphon{
f: f,
d: d,
key: key,
buf: &bytes.Buffer{},
}
}
// Read implements the io.Reader interface for siphon.
func (s *siphon) Read(p []byte) (int, error) {
n, err := s.f.Read(p)
if err == nil {
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
}
if err == io.EOF {
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
if closeErr := s.f.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
return n, err
}
return n, err
}
// Erase synchronously erases the given key from the disk and the cache.
func (d *Diskv) Erase(key string) error {
pathKey := d.transform(key)
d.mu.Lock()
defer d.mu.Unlock()
d.bustCacheWithLock(key)
// erase from index
if d.Index != nil {
d.Index.Delete(key)
}
// erase from disk
filename := d.completeFilename(pathKey)
if s, err := os.Stat(filename); err == nil {
if s.IsDir() {
return errBadKey
}
if err = os.Remove(filename); err != nil {
return err
}
} else {
// Return err as-is so caller can do os.IsNotExist(err).
return err
}
// clean up and return
d.pruneDirsWithLock(key)
return nil
}
// EraseAll will delete all of the data from the store, both in the cache and on
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
// diskv-related data. Care should be taken to always specify a diskv base
// directory that is exclusively for diskv data.
func (d *Diskv) EraseAll() error {
d.mu.Lock()
defer d.mu.Unlock()
d.cache = make(map[string][]byte)
d.cacheSize = 0
if d.TempDir != "" {
os.RemoveAll(d.TempDir) // errors ignored
}
return os.RemoveAll(d.BasePath)
}
// Has returns true if the given key exists.
func (d *Diskv) Has(key string) bool {
pathKey := d.transform(key)
d.mu.Lock()
defer d.mu.Unlock()
if _, ok := d.cache[key]; ok {
return true
}
filename := d.completeFilename(pathKey)
s, err := os.Stat(filename)
if err != nil {
return false
}
if s.IsDir() {
return false
}
return true
}
// Keys returns a channel that will yield every key accessible by the store,
// in undefined order. If a cancel channel is provided, closing it will
// terminate and close the keys channel.
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
return d.KeysPrefix("", cancel)
}
// KeysPrefix returns a channel that will yield every key accessible by the
// store with the given prefix, in undefined order. If a cancel channel is
// provided, closing it will terminate and close the keys channel. If the
// provided prefix is the empty string, all keys will be yielded.
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
var prepath string
if prefix == "" {
prepath = d.BasePath
} else {
prefixKey := d.transform(prefix)
prepath = d.pathFor(prefixKey)
}
c := make(chan string)
go func() {
filepath.Walk(prepath, d.walker(c, prefix, cancel))
close(c)
}()
return c
}
// walker returns a function which satisfies the filepath.WalkFunc interface.
// It sends every non-directory file entry down the channel c.
func (d *Diskv) walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, _ := filepath.Rel(d.BasePath, path)
dir, file := filepath.Split(relPath)
pathSplit := strings.Split(dir, string(filepath.Separator))
pathSplit = pathSplit[:len(pathSplit)-1]
pathKey := &PathKey{
Path: pathSplit,
FileName: file,
}
key := d.InverseTransform(pathKey)
if info.IsDir() || !strings.HasPrefix(key, prefix) {
return nil // "pass"
}
select {
case c <- key:
case <-cancel:
return errCanceled
}
return nil
}
}
// pathFor returns the absolute path for location on the filesystem where the
// data for the given key will be stored.
func (d *Diskv) pathFor(pathKey *PathKey) string {
return filepath.Join(d.BasePath, filepath.Join(pathKey.Path...))
}
// ensurePathWithLock is a helper function that generates all necessary
// directories on the filesystem for the given key.
func (d *Diskv) ensurePathWithLock(pathKey *PathKey) error {
return os.MkdirAll(d.pathFor(pathKey), d.PathPerm)
}
// completeFilename returns the absolute path to the file for the given key.
func (d *Diskv) completeFilename(pathKey *PathKey) string {
return filepath.Join(d.pathFor(pathKey), pathKey.FileName)
}
// cacheWithLock attempts to cache the given key-value pair in the store's
// cache. It can fail if the value is larger than the cache's maximum size.
func (d *Diskv) cacheWithLock(key string, val []byte) error {
valueSize := uint64(len(val))
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
return fmt.Errorf("%s; not caching", err)
}
// be very strict about memory guarantees
if (d.cacheSize + valueSize) > d.CacheSizeMax {
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
}
d.cache[key] = val
d.cacheSize += valueSize
return nil
}
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.cacheWithLock(key, val)
}
func (d *Diskv) bustCacheWithLock(key string) {
if val, ok := d.cache[key]; ok {
d.uncacheWithLock(key, uint64(len(val)))
}
}
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
d.cacheSize -= sz
delete(d.cache, key)
}
// pruneDirsWithLock deletes empty directories in the path walk leading to the
// key k. Typically this function is called after an Erase is made.
func (d *Diskv) pruneDirsWithLock(key string) error {
pathlist := d.transform(key).Path
for i := range pathlist {
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
// thanks to Steven Blenkinsop for this snippet
switch fi, err := os.Stat(dir); true {
case err != nil:
return err
case !fi.IsDir():
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
}
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
if err != nil {
return err
} else if len(nlinks) > 0 {
return nil // has subdirs -- do not prune
}
if err = os.Remove(dir); err != nil {
return err
}
}
return nil
}
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
// until the cache has at least valueSize bytes available.
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
if valueSize > d.CacheSizeMax {
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
}
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
for key, val := range d.cache {
if safe() {
break
}
d.uncacheWithLock(key, uint64(len(val)))
}
if !safe() {
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
}
return nil
}
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
// satisfy the io.WriteCloser interface.
type nopWriteCloser struct {
io.Writer
}
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
func (wc *nopWriteCloser) Close() error { return nil }
|
package digitalocean
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"github.com/nuveo/gofn/iaas"
)
var (
mux *http.ServeMux
server *httptest.Server
)
func setup() {
mux = http.NewServeMux()
server = httptest.NewServer(mux)
os.Setenv("DIGITALOCEAN_API_URL", server.URL)
os.Setenv("DIGITALOCEAN_API_KEY", "api-key")
os.Setenv("GOFN_SSH_PUBLICKEY_PATH", "testdata/fake_id_rsa.pub")
os.Setenv("GOFN_SSH_PRIVATEKEY_PATH", "testdata/fake_id_rsa")
}
func teardown() {
server.Close()
}
func defineListSnapshotsEndpoint() {
mux.HandleFunc("/v2/snapshots", func(w http.ResponseWriter, r *http.Request) {
snap := `{"snapshots": [
{
"id": "6372321",
"name": "GOFN",
"regions": [
"nyc1",
"ams1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"fra1",
"tor1"
],
"created_at": "2014-09-26T16:40:18Z",
"resource_id": "2713828",
"resource_type": "droplet",
"min_disk_size": 20,
"size_gigabytes": 1.42
}]
}`
fmt.Fprint(w, snap)
})
}
func defineBrokenListSnapshotsEndpoint() {
mux.HandleFunc("/v2/snapshots", func(w http.ResponseWriter, r *http.Request) {
snap := `{"snapshots": [
{
"id": "6372321",
"name": "5.10 x64",
"regions": [
"nyc1",
"ams1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"fra1",
"tor1"
],
"created_at": "2014-09-26T16:40:18Z",
"resource_id": "2713828",
"resource_type": "droplet",
"min_disk_size": 20,
"size_gigabytes": 1.42,
}]
}`
fmt.Fprint(w, snap)
})
}
func TestAuth(t *testing.T) {
for _, test := range []struct {
apiKEY string
apiURL string
baseURL string
errIsNil bool
}{
{"", "", "", false},
{"apikey", "", "https://api.digitalocean.com/", true},
{"apikey", "http://127.0.0.1:3000", "http://127.0.0.1:3000", true},
{"apikey", "://localhost", "", false},
} {
do := &Digitalocean{}
os.Setenv("DIGITALOCEAN_API_KEY", test.apiKEY)
os.Setenv("DIGITALOCEAN_API_URL", test.apiURL)
errBool := do.Auth() == nil
if errBool != test.errIsNil {
t.Errorf("%+v Expected %+v but found %+v", test, test.errIsNil, errBool)
}
if errBool && (do.client.BaseURL.String() != test.baseURL) {
t.Errorf("Expected %q but found %q", test.baseURL, do.client.BaseURL)
}
}
}
func TestCreateMachine(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"locked":false,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/droplets/1", func(w http.ResponseWriter, r *http.Request) {
droplet := `{"droplet": {
"id": 1,
"locked":false,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
m, err := do.CreateMachine()
if err != nil {
// temporary solution because we don't have a real ip to connect
if !strings.Contains(err.Error(), "ssh: handshake failed") {
t.Fatalf("Expected run without errors but has %q", err)
}
}
if m.ID != "1" {
t.Errorf("Expected id = 1 but found %s", m.ID)
}
if m.IP != "104.131.186.241" {
t.Errorf("Expected id = 104.131.186.241 but found %s", m.IP)
}
if m.Name != "gofn" {
t.Errorf("Expected name = \"gofn\" but found %q", m.Name)
}
if m.Status != "new" {
t.Errorf("Expected status = \"new\" but found %q", m.Status)
}
if m.SSHKeysID[0] != 512189 {
t.Errorf("Expected SSHKeysID = 512189 but found %q", m.SSHKeysID[0])
}
}
func TestCreateMachineWrongAuth(t *testing.T) {
os.Setenv("DIGITALOCEAN_API_URL", "://localhost")
do := &Digitalocean{}
m, err := do.CreateMachine()
if err == nil || m != nil {
t.Errorf("expected erros but run without errors")
}
}
func TestCreateMachineWrongIP(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Errorf("expected errors but run without errors")
}
}
func TestCreateMachineRequestError(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Errorf("expected errors but run without errors")
}
}
func TestCreateMachineWithNewSSHKey(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/droplets/1", func(w http.ResponseWriter, r *http.Request) {
droplet := `{"droplet": {
"id": 1,
"locked":false,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
m, err := do.CreateMachine()
if err != nil {
// temporary solution because we don't have a real ip to connect
if !strings.Contains(err.Error(), "ssh: handshake failed") {
t.Fatalf("Expected run without errors but has %q", err)
}
}
if m.ID != "1" {
t.Errorf("Expected id = 1 but found %s", m.ID)
}
if m.IP != "104.131.186.241" {
t.Errorf("Expected id = 104.131.186.241 but found %s", m.IP)
}
if m.Name != "gofn" {
t.Errorf("Expected name = \"gofn\" but found %q", m.Name)
}
if m.Status != "new" {
t.Errorf("Expected status = \"new\" but found %q", m.Status)
}
if m.SSHKeysID[0] != 512189 {
t.Errorf("Expected SSHKeysID = 512189 but found %q", m.SSHKeysID[0])
}
}
func TestCreateMachineWithWrongSSHKey(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key",
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has %q", err)
}
}
func TestCreateMachineWithWrongSSHKeyList(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key",
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has %q", err)
}
}
func TestCreateMachineWithoutSSHKey(t *testing.T) {
setup()
defer teardown()
os.Setenv("GOFN_SSH_PUBLICKEY_PATH", "")
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key",
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has %q", err)
}
}
func TestCreateMachineWithWrongSSHKeyPath(t *testing.T) {
setup()
defer teardown()
os.Setenv("GOFN_SSH_PUBLICKEY_PATH", "test/bla.pub")
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key",
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has %q", err)
}
}
func TestCreateMachineWrongSnapshotList(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineBrokenListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has")
}
}
func TestDeleteMachine(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/503/actions", func(w http.ResponseWriter, r *http.Request) {
rBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Expected parse request body without errors but has %q", err)
}
if strings.Contains(string(rBody), "shutdown") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
fmt.Fprintln(w, action)
return
}
})
mux.HandleFunc("/v2/droplets/503", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
mux.HandleFunc("/v2/droplets/503/actions/36077293", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
action := `{
"action": {
"id": 36077293,
"status": "completed",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
fmt.Fprintln(w, action)
return
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.DeleteMachine(machine)
if err != nil {
t.Errorf("Expected run without errors but has %q", err)
}
}
func TestDeleteMachineWithShutdownError(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/503/actions", func(w http.ResponseWriter, r *http.Request) {
rBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Expected parse request body without errors but has %q", err)
}
if strings.Contains(string(rBody), "shutdown") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"},
}
}`
fmt.Fprintln(w, action)
return
}
if strings.Contains(string(rBody), "power_off") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "power_off",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
fmt.Fprintln(w, action)
return
}
})
mux.HandleFunc("/v2/droplets/503", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204)
})
mux.HandleFunc("/v2/droplets/503/actions/36077293", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
action := `{
"action": {
"id": 36077293,
"status": "completed",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
fmt.Fprintln(w, action)
return
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.DeleteMachine(machine)
if err != nil {
t.Errorf("Expected run without errors but has %q", err)
}
}
func TestDeleteMachineWithShutdownErrorAndPowerOff(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/503/actions", func(w http.ResponseWriter, r *http.Request) {
rBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Expected parse request body without errors but has %q", err)
}
if strings.Contains(string(rBody), "shutdown") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"resource_type": "droplet",
"region": {"slug": "nyc3"},
}
}`
fmt.Fprintln(w, action)
return
}
if strings.Contains(string(rBody), "power_off") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "power_off",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"resource_type": "droplet",
"region": {"slug": "nyc3"},
}
}`
fmt.Fprintln(w, action)
return
}
})
mux.HandleFunc("/v2/droplets/503", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204)
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.DeleteMachine(machine)
if err == nil {
t.Errorf("expected run errors but not has %q", err)
}
}
func TestDeleteMachineWrongAuth(t *testing.T) {
os.Setenv("DIGITALOCEAN_API_URL", "://localhost")
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.DeleteMachine(machine)
if err == nil {
t.Errorf("expected errors but run without errors")
}
}
func TestCreateSnapshot(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/123/actions/36805022", func(w http.ResponseWriter, r *http.Request) {
action := `{
"action": {
"id": 36805022,
"status": "completed",
"type": "snapshot",
"started_at": "2014-11-14T16:34:39Z",
"completed_at": null,
"resource_id": 3164450,
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, action)
})
mux.HandleFunc("/v2/droplets/123/actions", func(w http.ResponseWriter, r *http.Request) {
action := `{
"action": {
"id": 36805022,
"status": "in-progress",
"type": "snapshot",
"started_at": "2014-11-14T16:34:39Z",
"completed_at": null,
"resource_id": 3164450,
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
w.WriteHeader(http.StatusCreated)
fmt.Fprint(w, action)
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "123"}
err := do.CreateSnapshot(machine)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
}
func TestCreateSnapshotWrongAuth(t *testing.T) {
os.Setenv("DIGITALOCEAN_API_URL", "://localhost")
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.CreateSnapshot(machine)
if err == nil {
t.Errorf("expected errors but run without errors")
}
}
func TestCreateSnapshotActionError(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/123/actions", func(w http.ResponseWriter, r *http.Request) {
action := `{
"action": {
"id": 36805022,
"status": "in-progress",
"type": "snapshot",
"started_at": "2014-11-14T16:34:39Z",
"completed_at": null,
"resource_id": 3164450,
"resource_type": "droplet",
"region": {"slug": "nyc3"},
}
}`
w.WriteHeader(http.StatusCreated)
fmt.Fprint(w, action)
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "123"}
err := do.CreateSnapshot(machine)
if err == nil {
t.Errorf("expected run with errors but not has")
}
}
func TestGeneratePrivateSSHKey(t *testing.T) {
private, err := generatePrivateKey(128)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
if private == nil {
t.Errorf("expected private not nil but is nil")
}
}
func TestGeneratePublicSSHKey(t *testing.T) {
private, err := generatePrivateKey(128)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
if private == nil {
t.Errorf("expected private not nil but is nil")
}
err = generatePublicKey(private)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
}
func TestGenerateFNSSHKey(t *testing.T) {
err := generateFNSSHKey(128)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
}
faster tests
package digitalocean
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"github.com/nuveo/gofn/iaas"
)
var (
mux *http.ServeMux
server *httptest.Server
)
func setup() {
mux = http.NewServeMux()
server = httptest.NewServer(mux)
os.Setenv("DIGITALOCEAN_API_URL", server.URL)
os.Setenv("DIGITALOCEAN_API_KEY", "api-key")
os.Setenv("GOFN_SSH_PUBLICKEY_PATH", "testdata/fake_id_rsa.pub")
os.Setenv("GOFN_SSH_PRIVATEKEY_PATH", "testdata/fake_id_rsa")
}
func teardown() {
server.Close()
}
func defineListSnapshotsEndpoint() {
mux.HandleFunc("/v2/snapshots", func(w http.ResponseWriter, r *http.Request) {
snap := `{"snapshots": [
{
"id": "6372321",
"name": "GOFN",
"regions": [
"nyc1",
"ams1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"fra1",
"tor1"
],
"created_at": "2014-09-26T16:40:18Z",
"resource_id": "2713828",
"resource_type": "droplet",
"min_disk_size": 20,
"size_gigabytes": 1.42
}]
}`
fmt.Fprint(w, snap)
})
}
func defineBrokenListSnapshotsEndpoint() {
mux.HandleFunc("/v2/snapshots", func(w http.ResponseWriter, r *http.Request) {
snap := `{"snapshots": [
{
"id": "6372321",
"name": "5.10 x64",
"regions": [
"nyc1",
"ams1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"fra1",
"tor1"
],
"created_at": "2014-09-26T16:40:18Z",
"resource_id": "2713828",
"resource_type": "droplet",
"min_disk_size": 20,
"size_gigabytes": 1.42,
}]
}`
fmt.Fprint(w, snap)
})
}
func TestAuth(t *testing.T) {
for _, test := range []struct {
apiKEY string
apiURL string
baseURL string
errIsNil bool
}{
{"", "", "", false},
{"apikey", "", "https://api.digitalocean.com/", true},
{"apikey", "http://127.0.0.1:3000", "http://127.0.0.1:3000", true},
{"apikey", "://localhost", "", false},
} {
do := &Digitalocean{}
os.Setenv("DIGITALOCEAN_API_KEY", test.apiKEY)
os.Setenv("DIGITALOCEAN_API_URL", test.apiURL)
errBool := do.Auth() == nil
if errBool != test.errIsNil {
t.Errorf("%+v Expected %+v but found %+v", test, test.errIsNil, errBool)
}
if errBool && (do.client.BaseURL.String() != test.baseURL) {
t.Errorf("Expected %q but found %q", test.baseURL, do.client.BaseURL)
}
}
}
func TestCreateMachine(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"locked":false,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/droplets/1", func(w http.ResponseWriter, r *http.Request) {
droplet := `{"droplet": {
"id": 1,
"locked":false,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
m, err := do.CreateMachine()
if err != nil {
// temporary solution because we don't have a real ip to connect
if !strings.Contains(err.Error(), "ssh: handshake failed") {
t.Fatalf("Expected run without errors but has %q", err)
}
}
if m.ID != "1" {
t.Errorf("Expected id = 1 but found %s", m.ID)
}
if m.IP != "104.131.186.241" {
t.Errorf("Expected id = 104.131.186.241 but found %s", m.IP)
}
if m.Name != "gofn" {
t.Errorf("Expected name = \"gofn\" but found %q", m.Name)
}
if m.Status != "new" {
t.Errorf("Expected status = \"new\" but found %q", m.Status)
}
if m.SSHKeysID[0] != 512189 {
t.Errorf("Expected SSHKeysID = 512189 but found %q", m.SSHKeysID[0])
}
}
func TestCreateMachineWrongAuth(t *testing.T) {
os.Setenv("DIGITALOCEAN_API_URL", "://localhost")
do := &Digitalocean{}
m, err := do.CreateMachine()
if err == nil || m != nil {
t.Errorf("expected erros but run without errors")
}
}
func TestCreateMachineWrongIP(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Errorf("expected errors but run without errors")
}
}
func TestCreateMachineRequestError(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Errorf("expected errors but run without errors")
}
}
func TestCreateMachineWithNewSSHKey(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/droplets/1", func(w http.ResponseWriter, r *http.Request) {
droplet := `{"droplet": {
"id": 1,
"locked":false,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
m, err := do.CreateMachine()
if err != nil {
// temporary solution because we don't have a real ip to connect
if !strings.Contains(err.Error(), "ssh: handshake failed") {
t.Fatalf("Expected run without errors but has %q", err)
}
}
if m.ID != "1" {
t.Errorf("Expected id = 1 but found %s", m.ID)
}
if m.IP != "104.131.186.241" {
t.Errorf("Expected id = 104.131.186.241 but found %s", m.IP)
}
if m.Name != "gofn" {
t.Errorf("Expected name = \"gofn\" but found %q", m.Name)
}
if m.Status != "new" {
t.Errorf("Expected status = \"new\" but found %q", m.Status)
}
if m.SSHKeysID[0] != 512189 {
t.Errorf("Expected SSHKeysID = 512189 but found %q", m.SSHKeysID[0])
}
}
func TestCreateMachineWithWrongSSHKey(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key",
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has %q", err)
}
}
func TestCreateMachineWithWrongSSHKeyList(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key",
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has %q", err)
}
}
func TestCreateMachineWithoutSSHKey(t *testing.T) {
setup()
defer teardown()
os.Setenv("GOFN_SSH_PUBLICKEY_PATH", "")
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key",
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has %q", err)
}
}
func TestCreateMachineWithWrongSSHKeyPath(t *testing.T) {
setup()
defer teardown()
os.Setenv("GOFN_SSH_PUBLICKEY_PATH", "test/bla.pub")
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key",
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "my_key"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has %q", err)
}
}
func TestCreateMachineWrongSnapshotList(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
t.Fatalf("Expected method POST but request method is %s", r.Method)
}
droplet := `{"droplet": {
"id": 1,
"name": "gofn",
"region": {"slug": "nyc3"},
"status": "new",
"image": {"slug": "ubuntu-16-10-x64"},
"networks": {
"v4":[
{
"ip_address": "104.131.186.241",
"type": "public"
}
]
}
}
}`
w.Header().Set("Content-Type", "application/json; charset=utf8")
fmt.Fprintln(w, droplet)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
defineBrokenListSnapshotsEndpoint()
do := &Digitalocean{}
_, err := do.CreateMachine()
if err == nil {
t.Fatalf("Expected run with errors but not has")
}
}
func TestDeleteMachine(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/503/actions", func(w http.ResponseWriter, r *http.Request) {
rBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Expected parse request body without errors but has %q", err)
}
if strings.Contains(string(rBody), "shutdown") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
fmt.Fprintln(w, action)
return
}
})
mux.HandleFunc("/v2/droplets/503", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204)
})
mux.HandleFunc("/v2/account/keys", func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
w.WriteHeader(201)
key := `{
"ssh_key": {
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
}`
fmt.Fprintln(w, key)
}
if r.Method == http.MethodGet {
w.WriteHeader(200)
keys := `{
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "GOFN"
}
]
}`
fmt.Fprintln(w, keys)
}
})
mux.HandleFunc("/v2/droplets/503/actions/36077293", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
action := `{
"action": {
"id": 36077293,
"status": "completed",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
fmt.Fprintln(w, action)
return
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.DeleteMachine(machine)
if err != nil {
t.Errorf("Expected run without errors but has %q", err)
}
}
func TestDeleteMachineWithShutdownError(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/503/actions", func(w http.ResponseWriter, r *http.Request) {
rBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Expected parse request body without errors but has %q", err)
}
if strings.Contains(string(rBody), "shutdown") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"},
}
}`
fmt.Fprintln(w, action)
return
}
if strings.Contains(string(rBody), "power_off") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "power_off",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
fmt.Fprintln(w, action)
return
}
})
mux.HandleFunc("/v2/droplets/503", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204)
})
mux.HandleFunc("/v2/droplets/503/actions/36077293", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
action := `{
"action": {
"id": 36077293,
"status": "completed",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"status": "completed",
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
fmt.Fprintln(w, action)
return
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.DeleteMachine(machine)
if err != nil {
t.Errorf("Expected run without errors but has %q", err)
}
}
func TestDeleteMachineWithShutdownErrorAndPowerOff(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/503/actions", func(w http.ResponseWriter, r *http.Request) {
rBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Expected parse request body without errors but has %q", err)
}
if strings.Contains(string(rBody), "shutdown") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "shutdown",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"resource_type": "droplet",
"region": {"slug": "nyc3"},
}
}`
fmt.Fprintln(w, action)
return
}
if strings.Contains(string(rBody), "power_off") {
w.WriteHeader(201)
action := `{
"action": {
"id": 36077293,
"status": "in-progress",
"type": "power_off",
"started_at": "2014-11-04T17:08:03Z",
"completed_at": null,
"resource_id": 503,
"resource_type": "droplet",
"region": {"slug": "nyc3"},
}
}`
fmt.Fprintln(w, action)
return
}
})
mux.HandleFunc("/v2/droplets/503", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204)
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.DeleteMachine(machine)
if err == nil {
t.Errorf("expected run errors but not has %q", err)
}
}
func TestDeleteMachineWrongAuth(t *testing.T) {
os.Setenv("DIGITALOCEAN_API_URL", "://localhost")
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.DeleteMachine(machine)
if err == nil {
t.Errorf("expected errors but run without errors")
}
}
func TestCreateSnapshot(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/123/actions/36805022", func(w http.ResponseWriter, r *http.Request) {
action := `{
"action": {
"id": 36805022,
"status": "completed",
"type": "snapshot",
"started_at": "2014-11-14T16:34:39Z",
"completed_at": null,
"resource_id": 3164450,
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, action)
})
mux.HandleFunc("/v2/droplets/123/actions", func(w http.ResponseWriter, r *http.Request) {
action := `{
"action": {
"id": 36805022,
"status": "in-progress",
"type": "snapshot",
"started_at": "2014-11-14T16:34:39Z",
"completed_at": null,
"resource_id": 3164450,
"resource_type": "droplet",
"region": {"slug": "nyc3"}
}
}`
w.WriteHeader(http.StatusCreated)
fmt.Fprint(w, action)
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "123"}
err := do.CreateSnapshot(machine)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
}
func TestCreateSnapshotWrongAuth(t *testing.T) {
os.Setenv("DIGITALOCEAN_API_URL", "://localhost")
do := &Digitalocean{}
machine := &iaas.Machine{ID: "503"}
err := do.CreateSnapshot(machine)
if err == nil {
t.Errorf("expected errors but run without errors")
}
}
func TestCreateSnapshotActionError(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/v2/droplets/123/actions", func(w http.ResponseWriter, r *http.Request) {
action := `{
"action": {
"id": 36805022,
"status": "in-progress",
"type": "snapshot",
"started_at": "2014-11-14T16:34:39Z",
"completed_at": null,
"resource_id": 3164450,
"resource_type": "droplet",
"region": {"slug": "nyc3"},
}
}`
w.WriteHeader(http.StatusCreated)
fmt.Fprint(w, action)
})
do := &Digitalocean{}
machine := &iaas.Machine{ID: "123"}
err := do.CreateSnapshot(machine)
if err == nil {
t.Errorf("expected run with errors but not has")
}
}
func TestGeneratePrivateSSHKey(t *testing.T) {
private, err := generatePrivateKey(32)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
if private == nil {
t.Errorf("expected private not nil but is nil")
}
}
func TestGeneratePublicSSHKey(t *testing.T) {
private, err := generatePrivateKey(32)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
if private == nil {
t.Errorf("expected private not nil but is nil")
}
err = generatePublicKey(private)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
}
func TestGenerateFNSSHKey(t *testing.T) {
err := generateFNSSHKey(32)
if err != nil {
t.Errorf("expected run without errors but has %q", err)
}
}
|
// shell sorting
package main
import (
"fmt"
"ashumeow/meow_sort"
)
func main {
meow := meow_sort.RandArray(10)
fmt.Println("Given array is: ", meow)
fmt.Println("")
for ig_ig := int(len(arr)/2); ig_ig > 0; ig_ig /= 2 {
for x := ig_ig; x < len(meow); x++ {
for xx := x; xx >= ig_ig && meow[xx - ig_ig] > meow[xx]; xx -= ig_ig {
meow[xx], meow[meow - ig_ig] = meow[xx - ig_ig], meow[xx]
}
}
}
fmt.Println("Sorted array is: ", meow)
}
update shell sort
// shell sorting
package main
import (
"fmt"
"ashumeow/meow_sort"
)
func main() {
meow := meow_sort.RandArray(10)
fmt.Println("Given array is: ", meow)
fmt.Println("")
for ig_ig := int(len(meow)/2); ig_ig > 0; ig_ig /= 2 {
for x := ig_ig; x < len(meow); x++ {
for xx := x; xx >= ig_ig && meow[xx - ig_ig] > meow[xx]; xx -= ig_ig {
meow[xx], meow[xx - ig_ig] = meow[xx - ig_ig], meow[xx]
}
}
}
fmt.Println("Sorted array is: ", meow)
} |
package memorystore
import (
"sync"
"time"
"github.com/mattheath/phosphor/domain"
)
func New() *MemoryStore {
s := &MemoryStore{
store: make(map[string]domain.Trace),
}
// run stats worker
go s.statsLoop()
return s
}
type MemoryStore struct {
sync.RWMutex
store map[string]domain.Trace
}
func (s *MemoryStore) GetTrace(id string) (domain.Trace, error) {
s.RLock()
defer s.RUnlock()
return s.store[id], nil
}
func (s *MemoryStore) StoreTraceFrame(f domain.Frame) error {
s.Lock()
defer s.Unlock()
// Load our current trace
t := s.store[e.TraceId]
// Add the new frame to this
t = append(t, f)
// Store it back
s.store[e.TraceId] = t
return nil
}
func (s *MemoryStore) statsLoop() {
tick := time.NewTicker(5 * time.Second)
// @todo listen for shutdown, stop ticker and exit cleanly
for {
<-tick.C // block until tick
s.printStats()
}
}
func (s *MemoryStore) printStats() {
// Get some data while under the mutex
s.RLock()
count := len(s.store)
s.RUnlock()
// Separate processing and logging outside of mutex
log.Infof("[Phosphor] Traces stored: %v", count)
}
Fixup- frame/event
package memorystore
import (
"sync"
"time"
"github.com/mattheath/phosphor/domain"
)
func New() *MemoryStore {
s := &MemoryStore{
store: make(map[string]domain.Trace),
}
// run stats worker
go s.statsLoop()
return s
}
type MemoryStore struct {
sync.RWMutex
store map[string]domain.Trace
}
func (s *MemoryStore) GetTrace(id string) (domain.Trace, error) {
s.RLock()
defer s.RUnlock()
return s.store[id], nil
}
func (s *MemoryStore) StoreTraceFrame(f domain.Frame) error {
s.Lock()
defer s.Unlock()
// Load our current trace
t := s.store[f.TraceId]
// Add the new frame to this
t = append(t, f)
// Store it back
s.store[f.TraceId] = t
return nil
}
func (s *MemoryStore) statsLoop() {
tick := time.NewTicker(5 * time.Second)
// @todo listen for shutdown, stop ticker and exit cleanly
for {
<-tick.C // block until tick
s.printStats()
}
}
func (s *MemoryStore) printStats() {
// Get some data while under the mutex
s.RLock()
count := len(s.store)
s.RUnlock()
// Separate processing and logging outside of mutex
log.Infof("[Phosphor] Traces stored: %v", count)
}
|
// go-rst - A reStructuredText parser for Go
// 2014 (c) The go-rst Authors
// MIT Licensed. See LICENSE for details.
package parse
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/demizer/go-elog"
"github.com/demizer/go-spew/spew"
"os"
"strings"
"testing"
)
type lexTest struct {
name string
description string
data string
items string
expect string
collectedItems []item
}
var (
tEOF = item{ElementType: itemEOF, Position: 0, Value: ""}
)
var spd = spew.ConfigState{Indent: "\t"}
func TestAll(t *testing.T) {
// log.SetLevel(log.LEVEL_DEBUG)
log.SetFlags(log.Lansi | log.LnoFileAnsi | log.LnoPrefix)
}
func parseTestData(t *testing.T, filepath string) ([]lexTest, error) {
testData, err := os.Open(filepath)
defer testData.Close()
if err != nil {
return nil, err
}
var lexTests []lexTest
var curTest = new(lexTest)
var buffer bytes.Buffer
scanner := bufio.NewScanner(testData)
for scanner.Scan() {
switch scanner.Text() {
case "#name":
// buffer = bytes.NewBuffer(buffer.Bytes())
// name starts a new section
if buffer.Len() > 0 {
// Apend the last section to the array and
// reset
curTest.expect = buffer.String()
lexTests = append(lexTests, *curTest)
}
curTest = new(lexTest)
buffer.Reset()
case "#description":
curTest.name = strings.TrimRight(buffer.String(), "\n")
buffer.Reset()
case "#data":
curTest.description = strings.TrimRight(buffer.String(), "\n")
buffer.Reset()
case "#items":
curTest.data = strings.TrimRight(buffer.String(), "\n")
buffer.Reset()
case "#expect":
curTest.items = buffer.String()
buffer.Reset()
default:
// Collect the text in between sections
if len(scanner.Text()) == 0 ||
strings.TrimLeft(scanner.Text(), " ")[0] == '#' {
continue
}
buffer.WriteString(fmt.Sprintln(scanner.Text()))
}
}
if err := scanner.Err(); err != nil {
t.Error(err)
}
if buffer.Len() > 0 {
// Apend the last section to the array and
curTest.expect = buffer.String()
lexTests = append(lexTests, *curTest)
}
return lexTests, nil
}
// collect gathers the emitted items into a slice.
func collect(t *lexTest) (items []item) {
l := lex(t.name, t.data)
for {
item := l.nextItem()
items = append(items, item)
if item.ElementType == itemEOF || item.ElementType == itemError {
break
}
}
return
}
func TestSection(t *testing.T) {
lexTests, err := parseTestData(t, "../testdata/test_lex_sections.dat")
if err != nil {
t.FailNow()
}
for _, test := range lexTests {
if test.name == "ST-UNEXP-TITLES" {
log.Printf("Test Name: \t%s\n", test.name)
log.Printf("Description: \t%s\n", test.description)
items := collect(&test)
b, err := json.MarshalIndent(items, "", "\t")
if err != nil {
t.Errorf("JSON Error: %s, IN: %+v\n", err, test)
}
log.Println("Collected items:\n\n", spd.Sdump(items))
log.Println("items JSON object:\n\n", string(b))
var i interface{}
err = json.Unmarshal([]byte(test.expect), &i)
if err != nil {
t.Errorf("JSON Error: %s, IN: %+v\n", err, test)
}
log.Println("JSON object:\n\n", spd.Sdump(i))
}
}
}
lex_test.go: Use custom logger template
// go-rst - A reStructuredText parser for Go
// 2014 (c) The go-rst Authors
// MIT Licensed. See LICENSE for details.
package parse
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/demizer/go-elog"
"github.com/demizer/go-spew/spew"
"os"
"strings"
"testing"
)
type lexTest struct {
name string
description string
data string
items string
expect string
collectedItems []item
}
var (
tEOF = item{ElementType: itemEOF, Position: 0, Value: ""}
)
var spd = spew.ConfigState{Indent: "\t"}
func TestAll(t *testing.T) {
log.SetLevel(log.LEVEL_DEBUG)
err := log.SetTemplate("{{if .Date}}{{.Date}} {{end}}" +
"{{if .Prefix}}{{.Prefix}} {{end}}" +
"{{if .LogLabel}}{{.LogLabel}} {{end}}" +
"{{if .FileName}}{{.FileName}}: {{end}}" +
"{{if .FunctionName}}{{.FunctionName}}{{end}}" +
"{{if .LineNumber}}#{{.LineNumber}}: {{end}}" +
"{{if .Text}}{{.Text}}{{end}}")
if err != nil {
t.Error(err)
}
log.SetFlags(log.Lansi | log.LnoPrefix | log.LfunctionName |
log.LlineNumber)
}
func parseTestData(t *testing.T, filepath string) ([]lexTest, error) {
testData, err := os.Open(filepath)
defer testData.Close()
if err != nil {
return nil, err
}
var lexTests []lexTest
var curTest = new(lexTest)
var buffer bytes.Buffer
scanner := bufio.NewScanner(testData)
for scanner.Scan() {
switch scanner.Text() {
case "#name":
// buffer = bytes.NewBuffer(buffer.Bytes())
// name starts a new section
if buffer.Len() > 0 {
// Apend the last section to the array and
// reset
curTest.expect = buffer.String()
lexTests = append(lexTests, *curTest)
}
curTest = new(lexTest)
buffer.Reset()
case "#description":
curTest.name = strings.TrimRight(buffer.String(), "\n")
buffer.Reset()
case "#data":
curTest.description = strings.TrimRight(buffer.String(), "\n")
buffer.Reset()
case "#items":
curTest.data = strings.TrimRight(buffer.String(), "\n")
buffer.Reset()
case "#expect":
curTest.items = buffer.String()
buffer.Reset()
default:
// Collect the text in between sections
if len(scanner.Text()) == 0 ||
strings.TrimLeft(scanner.Text(), " ")[0] == '#' {
continue
}
buffer.WriteString(fmt.Sprintln(scanner.Text()))
}
}
if err := scanner.Err(); err != nil {
t.Error(err)
}
if buffer.Len() > 0 {
// Apend the last section to the array and
curTest.expect = buffer.String()
lexTests = append(lexTests, *curTest)
}
return lexTests, nil
}
// collect gathers the emitted items into a slice.
func collect(t *lexTest) (items []item) {
l := lex(t.name, t.data)
for {
item := l.nextItem()
items = append(items, item)
if item.ElementType == itemEOF || item.ElementType == itemError {
break
}
}
return
}
func TestSection(t *testing.T) {
lexTests, err := parseTestData(t, "../testdata/test_lex_sections.dat")
if err != nil {
t.FailNow()
}
for _, test := range lexTests {
if test.name == "ST-UNEXP-TITLES" {
log.Printf("Test Name: \t%s\n", test.name)
log.Printf("Description: \t%s\n", test.description)
items := collect(&test)
b, err := json.MarshalIndent(items, "", "\t")
if err != nil {
t.Errorf("JSON Error: %s, IN: %+v\n", err, test)
}
log.Println("Collected items:\n\n", spd.Sdump(items))
log.Println("items JSON object:\n\n", string(b))
var i interface{}
err = json.Unmarshal([]byte(test.expect), &i)
if err != nil {
t.Errorf("JSON Error: %s, IN: %+v\n", err, test)
}
log.Println("JSON object:\n\n", spd.Sdump(i))
}
}
}
|
package flags
import (
"bytes"
"fmt"
"os"
"sort"
"strings"
"unicode/utf8"
)
type parseState struct {
arg string
args []string
retargs []string
positional []*Arg
err error
command *Command
lookup lookup
}
func (p *parseState) eof() bool {
return len(p.args) == 0
}
func (p *parseState) pop() string {
if p.eof() {
return ""
}
p.arg = p.args[0]
p.args = p.args[1:]
return p.arg
}
func (p *parseState) peek() string {
if p.eof() {
return ""
}
return p.args[0]
}
func (p *parseState) checkRequired(parser *Parser) error {
c := parser.Command
required := make([]*Option, 0)
for c != nil {
c.eachGroup(func(g *Group) {
for _, option := range g.options {
if !option.isSet && option.Required {
required = append(required, option)
}
}
})
c = c.Active
}
if len(required) == 0 {
if len(p.positional) > 0 && p.command.ArgsRequired {
reqnames := make([]string, 0)
for _, arg := range p.positional {
if arg.isRemaining() {
break
}
reqnames = append(reqnames, "`"+arg.Name+"`")
}
if len(reqnames) == 0 {
return nil
}
var msg string
if len(reqnames) == 1 {
msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0])
} else {
msg = fmt.Sprintf("the required arguments %s and %s were not provided",
strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1])
}
p.err = newError(ErrRequired, msg)
return p.err
}
return nil
}
names := make([]string, 0, len(required))
for _, k := range required {
names = append(names, "`"+k.String()+"'")
}
sort.Strings(names)
var msg string
if len(names) == 1 {
msg = fmt.Sprintf("the required flag %s was not specified", names[0])
} else {
msg = fmt.Sprintf("the required flags %s and %s were not specified",
strings.Join(names[:len(names)-1], ", "), names[len(names)-1])
}
p.err = newError(ErrRequired, msg)
return p.err
}
func (p *parseState) estimateCommand() error {
commands := p.command.sortedCommands()
cmdnames := make([]string, len(commands))
for i, v := range commands {
cmdnames[i] = v.Name
}
var msg string
var errtype ErrorType
if len(p.retargs) != 0 {
c, l := closestChoice(p.retargs[0], cmdnames)
msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0])
errtype = ErrUnknownCommand
if float32(l)/float32(len(c)) < 0.5 {
msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c)
} else if len(cmdnames) == 1 {
msg = fmt.Sprintf("%s. You should use the %s command",
msg,
cmdnames[0])
} else {
msg = fmt.Sprintf("%s. Please specify one command of: %s or %s",
msg,
strings.Join(cmdnames[:len(cmdnames)-1], ", "),
cmdnames[len(cmdnames)-1])
}
} else {
errtype = ErrCommandRequired
if len(cmdnames) == 1 {
msg = fmt.Sprintf("Please specify the %s command", cmdnames[0])
} else {
msg = fmt.Sprintf("Please specify one command of: %s or %s",
strings.Join(cmdnames[:len(cmdnames)-1], ", "),
cmdnames[len(cmdnames)-1])
}
}
return newError(errtype, msg)
}
func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) {
if !option.canArgument() {
if argument != nil {
msg := fmt.Sprintf("bool flag `%s' cannot have an argument", option)
return newError(ErrNoArgumentForBool, msg)
}
err = option.set(nil)
} else if argument != nil {
err = option.set(argument)
} else if canarg && !s.eof() {
arg := s.pop()
err = option.set(&arg)
} else if option.OptionalArgument {
option.empty()
for _, v := range option.OptionalValue {
err = option.set(&v)
if err != nil {
break
}
}
} else {
msg := fmt.Sprintf("expected argument for flag `%s'", option)
err = newError(ErrExpectedArgument, msg)
}
if err != nil {
if _, ok := err.(*Error); !ok {
msg := fmt.Sprintf("invalid argument for flag `%s' (expected %s): %s",
option,
option.value.Type(),
err.Error())
err = newError(ErrMarshal, msg)
}
}
return err
}
func (p *Parser) parseLong(s *parseState, name string, argument *string) error {
if option := s.lookup.longNames[name]; option != nil {
// Only long options that are required can consume an argument
// from the argument list
canarg := !option.OptionalArgument
return p.parseOption(s, name, option, canarg, argument)
}
return newError(ErrUnknownFlag, fmt.Sprintf("unknown flag `%s'", name))
}
func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) {
c, n := utf8.DecodeRuneInString(optname)
if n == len(optname) {
return optname, nil
}
first := string(c)
if option := s.lookup.shortNames[first]; option != nil && option.canArgument() {
arg := optname[n:]
return first, &arg
}
return optname, nil
}
func (p *Parser) parseShort(s *parseState, optname string, argument *string) error {
if argument == nil {
optname, argument = p.splitShortConcatArg(s, optname)
}
for i, c := range optname {
shortname := string(c)
if option := s.lookup.shortNames[shortname]; option != nil {
// Only the last short argument can consume an argument from
// the arguments list, and only if it's non optional
canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument
if err := p.parseOption(s, shortname, option, canarg, argument); err != nil {
return err
}
} else {
return newError(ErrUnknownFlag, fmt.Sprintf("unknown flag `%s'", shortname))
}
// Only the first option can have a concatted argument, so just
// clear argument here
argument = nil
}
return nil
}
func (s *parseState) addArgs(args ...string) error {
for len(s.positional) > 0 && len(args) > 0 {
arg := s.positional[0]
if err := convert(args[0], arg.value, arg.tag); err != nil {
return err
}
if !arg.isRemaining() {
s.positional = s.positional[1:]
}
args = args[1:]
}
s.retargs = append(s.retargs, args...)
return nil
}
func (p *Parser) parseNonOption(s *parseState) error {
if len(s.positional) > 0 {
return s.addArgs(s.arg)
}
if cmd := s.lookup.commands[s.arg]; cmd != nil {
s.command.Active = cmd
cmd.fillParseState(s)
} else if (p.Options & PassAfterNonOption) != None {
// If PassAfterNonOption is set then all remaining arguments
// are considered positional
if err := s.addArgs(s.arg); err != nil {
return err
}
if err := s.addArgs(s.args...); err != nil {
return err
}
s.args = []string{}
} else {
return s.addArgs(s.arg)
}
return nil
}
func (p *Parser) showBuiltinHelp() error {
var b bytes.Buffer
p.WriteHelp(&b)
return newError(ErrHelp, b.String())
}
func (p *Parser) printError(err error) error {
if err != nil && (p.Options&PrintErrors) != None {
fmt.Fprintln(os.Stderr, err)
}
return err
}
func (p *Parser) clearIsSet() {
p.eachCommand(func(c *Command) {
c.eachGroup(func(g *Group) {
for _, option := range g.options {
option.isSet = false
}
})
}, true)
}
make the linter happy
package flags
import (
"bytes"
"fmt"
"os"
"sort"
"strings"
"unicode/utf8"
)
type parseState struct {
arg string
args []string
retargs []string
positional []*Arg
err error
command *Command
lookup lookup
}
func (p *parseState) eof() bool {
return len(p.args) == 0
}
func (p *parseState) pop() string {
if p.eof() {
return ""
}
p.arg = p.args[0]
p.args = p.args[1:]
return p.arg
}
func (p *parseState) peek() string {
if p.eof() {
return ""
}
return p.args[0]
}
func (p *parseState) checkRequired(parser *Parser) error {
c := parser.Command
var required []*Option
for c != nil {
c.eachGroup(func(g *Group) {
for _, option := range g.options {
if !option.isSet && option.Required {
required = append(required, option)
}
}
})
c = c.Active
}
if len(required) == 0 {
if len(p.positional) > 0 && p.command.ArgsRequired {
var reqnames []string
for _, arg := range p.positional {
if arg.isRemaining() {
break
}
reqnames = append(reqnames, "`"+arg.Name+"`")
}
if len(reqnames) == 0 {
return nil
}
var msg string
if len(reqnames) == 1 {
msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0])
} else {
msg = fmt.Sprintf("the required arguments %s and %s were not provided",
strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1])
}
p.err = newError(ErrRequired, msg)
return p.err
}
return nil
}
names := make([]string, 0, len(required))
for _, k := range required {
names = append(names, "`"+k.String()+"'")
}
sort.Strings(names)
var msg string
if len(names) == 1 {
msg = fmt.Sprintf("the required flag %s was not specified", names[0])
} else {
msg = fmt.Sprintf("the required flags %s and %s were not specified",
strings.Join(names[:len(names)-1], ", "), names[len(names)-1])
}
p.err = newError(ErrRequired, msg)
return p.err
}
func (p *parseState) estimateCommand() error {
commands := p.command.sortedCommands()
cmdnames := make([]string, len(commands))
for i, v := range commands {
cmdnames[i] = v.Name
}
var msg string
var errtype ErrorType
if len(p.retargs) != 0 {
c, l := closestChoice(p.retargs[0], cmdnames)
msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0])
errtype = ErrUnknownCommand
if float32(l)/float32(len(c)) < 0.5 {
msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c)
} else if len(cmdnames) == 1 {
msg = fmt.Sprintf("%s. You should use the %s command",
msg,
cmdnames[0])
} else {
msg = fmt.Sprintf("%s. Please specify one command of: %s or %s",
msg,
strings.Join(cmdnames[:len(cmdnames)-1], ", "),
cmdnames[len(cmdnames)-1])
}
} else {
errtype = ErrCommandRequired
if len(cmdnames) == 1 {
msg = fmt.Sprintf("Please specify the %s command", cmdnames[0])
} else {
msg = fmt.Sprintf("Please specify one command of: %s or %s",
strings.Join(cmdnames[:len(cmdnames)-1], ", "),
cmdnames[len(cmdnames)-1])
}
}
return newError(errtype, msg)
}
func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) {
if !option.canArgument() {
if argument != nil {
msg := fmt.Sprintf("bool flag `%s' cannot have an argument", option)
return newError(ErrNoArgumentForBool, msg)
}
err = option.set(nil)
} else if argument != nil {
err = option.set(argument)
} else if canarg && !s.eof() {
arg := s.pop()
err = option.set(&arg)
} else if option.OptionalArgument {
option.empty()
for _, v := range option.OptionalValue {
err = option.set(&v)
if err != nil {
break
}
}
} else {
msg := fmt.Sprintf("expected argument for flag `%s'", option)
err = newError(ErrExpectedArgument, msg)
}
if err != nil {
if _, ok := err.(*Error); !ok {
msg := fmt.Sprintf("invalid argument for flag `%s' (expected %s): %s",
option,
option.value.Type(),
err.Error())
err = newError(ErrMarshal, msg)
}
}
return err
}
func (p *Parser) parseLong(s *parseState, name string, argument *string) error {
if option := s.lookup.longNames[name]; option != nil {
// Only long options that are required can consume an argument
// from the argument list
canarg := !option.OptionalArgument
return p.parseOption(s, name, option, canarg, argument)
}
return newError(ErrUnknownFlag, fmt.Sprintf("unknown flag `%s'", name))
}
func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) {
c, n := utf8.DecodeRuneInString(optname)
if n == len(optname) {
return optname, nil
}
first := string(c)
if option := s.lookup.shortNames[first]; option != nil && option.canArgument() {
arg := optname[n:]
return first, &arg
}
return optname, nil
}
func (p *Parser) parseShort(s *parseState, optname string, argument *string) error {
if argument == nil {
optname, argument = p.splitShortConcatArg(s, optname)
}
for i, c := range optname {
shortname := string(c)
if option := s.lookup.shortNames[shortname]; option != nil {
// Only the last short argument can consume an argument from
// the arguments list, and only if it's non optional
canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument
if err := p.parseOption(s, shortname, option, canarg, argument); err != nil {
return err
}
} else {
return newError(ErrUnknownFlag, fmt.Sprintf("unknown flag `%s'", shortname))
}
// Only the first option can have a concatted argument, so just
// clear argument here
argument = nil
}
return nil
}
func (p *parseState) addArgs(args ...string) error {
for len(p.positional) > 0 && len(args) > 0 {
arg := p.positional[0]
if err := convert(args[0], arg.value, arg.tag); err != nil {
return err
}
if !arg.isRemaining() {
p.positional = p.positional[1:]
}
args = args[1:]
}
p.retargs = append(p.retargs, args...)
return nil
}
func (p *Parser) parseNonOption(s *parseState) error {
if len(s.positional) > 0 {
return s.addArgs(s.arg)
}
if cmd := s.lookup.commands[s.arg]; cmd != nil {
s.command.Active = cmd
cmd.fillParseState(s)
} else if (p.Options & PassAfterNonOption) != None {
// If PassAfterNonOption is set then all remaining arguments
// are considered positional
if err := s.addArgs(s.arg); err != nil {
return err
}
if err := s.addArgs(s.args...); err != nil {
return err
}
s.args = []string{}
} else {
return s.addArgs(s.arg)
}
return nil
}
func (p *Parser) showBuiltinHelp() error {
var b bytes.Buffer
p.WriteHelp(&b)
return newError(ErrHelp, b.String())
}
func (p *Parser) printError(err error) error {
if err != nil && (p.Options&PrintErrors) != None {
fmt.Fprintln(os.Stderr, err)
}
return err
}
func (p *Parser) clearIsSet() {
p.eachCommand(func(c *Command) {
c.eachGroup(func(g *Group) {
for _, option := range g.options {
option.isSet = false
}
})
}, true)
}
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fsnotify
import (
"os"
"time"
"testing"
)
func TestFsnotifyEvents(t *testing.T) {
// Create an fsnotify watcher instance and initialize it
watcher, err := NewWatcher()
if err != nil {
t.Fatalf("NewWatcher() failed: %s", err)
}
const testDir string = "_test"
// Add a watch for testDir
err = watcher.Watch(testDir)
if err != nil {
t.Fatalf("Watcher.Watch() failed: %s", err)
}
// Receive errors on the error channel on a separate goroutine
go func() {
for err := range watcher.Error {
t.Fatalf("error received: %s", err)
}
}()
const testFile string = "_test/TestFsnotifyEvents.testfile"
// Receive events on the event channel on a separate goroutine
eventstream := watcher.Event
var eventsReceived = 0
done := make(chan bool)
go func() {
for event := range eventstream {
// Only count relevant events
if event.Name == testDir || event.Name == testFile {
eventsReceived++
t.Logf("event received: %s", event)
} else {
t.Logf("unexpected event received: %s", event)
}
}
done <- true
}()
// Create a file
// This should add at least one event to the fsnotify event queue
var f *os.File
f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
t.Fatalf("creating test file failed: %s", err)
}
f.Sync()
// Add a watch for testFile
err = watcher.Watch(testFile)
if err != nil {
t.Fatalf("Watcher.Watch() failed: %s", err)
}
f.WriteString("data")
f.Sync()
f.Close()
os.Remove(testFile)
// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
time.Sleep(500e6) // 500 ms
if eventsReceived == 0 {
t.Fatal("fsnotify event hasn't been received after 500 ms")
}
t.Logf("Received %d events.", eventsReceived)
// Try closing the fsnotify instance
t.Log("calling Close()")
watcher.Close()
t.Log("waiting for the event channel to become closed...")
select {
case <-done:
t.Log("event channel closed")
case <-time.After(1e9):
t.Fatal("event stream was not closed after 1 second")
}
}
func TestFsnotifyClose(t *testing.T) {
watcher, _ := NewWatcher()
watcher.Close()
done := false
go func() {
watcher.Close()
done = true
}()
time.Sleep(50e6) // 50 ms
if !done {
t.Fatal("double Close() test failed: second Close() call didn't return")
}
err := watcher.Watch("_test")
if err == nil {
t.Fatal("expected error on Watch() after Close(), got nil")
}
}
Dir only test
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fsnotify
import (
"os"
"time"
"testing"
)
func TestFsnotifyEvents(t *testing.T) {
// Create an fsnotify watcher instance and initialize it
watcher, err := NewWatcher()
if err != nil {
t.Fatalf("NewWatcher() failed: %s", err)
}
const testDir string = "_test"
// Add a watch for testDir
err = watcher.Watch(testDir)
if err != nil {
t.Fatalf("Watcher.Watch() failed: %s", err)
}
// Receive errors on the error channel on a separate goroutine
go func() {
for err := range watcher.Error {
t.Fatalf("error received: %s", err)
}
}()
const testFile string = "_test/TestFsnotifyEvents.testfile"
// Receive events on the event channel on a separate goroutine
eventstream := watcher.Event
var eventsReceived = 0
done := make(chan bool)
go func() {
for event := range eventstream {
// Only count relevant events
if event.Name == testDir || event.Name == testFile {
eventsReceived++
t.Logf("event received: %s", event)
} else {
t.Logf("unexpected event received: %s", event)
}
}
done <- true
}()
// Create a file
// This should add at least one event to the fsnotify event queue
var f *os.File
f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
t.Fatalf("creating test file failed: %s", err)
}
f.Sync()
// Add a watch for testFile
err = watcher.Watch(testFile)
if err != nil {
t.Fatalf("Watcher.Watch() failed: %s", err)
}
f.WriteString("data")
f.Sync()
f.Close()
os.Remove(testFile)
// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
time.Sleep(500e6) // 500 ms
if eventsReceived == 0 {
t.Fatal("fsnotify event hasn't been received after 500 ms")
}
t.Logf("Received %d events.", eventsReceived)
// Try closing the fsnotify instance
t.Log("calling Close()")
watcher.Close()
t.Log("waiting for the event channel to become closed...")
select {
case <-done:
t.Log("event channel closed")
case <-time.After(1e9):
t.Fatal("event stream was not closed after 1 second")
}
}
func TestFsnotifyDirOnly(t *testing.T) {
// Create an fsnotify watcher instance and initialize it
watcher, err := NewWatcher()
if err != nil {
t.Fatalf("NewWatcher() failed: %s", err)
}
const testDir string = "_test"
// Add a watch for testDir
err = watcher.Watch(testDir)
if err != nil {
t.Fatalf("Watcher.Watch() failed: %s", err)
}
// Receive errors on the error channel on a separate goroutine
go func() {
for err := range watcher.Error {
t.Fatalf("error received: %s", err)
}
}()
const testFile string = "_test/TestFsnotifyEvents.testfile"
// Receive events on the event channel on a separate goroutine
eventstream := watcher.Event
var eventsReceived = 0
done := make(chan bool)
go func() {
for event := range eventstream {
// Only count relevant events
if event.Name == testDir || event.Name == testFile {
eventsReceived++
t.Logf("event received: %s", event)
} else {
t.Logf("unexpected event received: %s", event)
}
}
done <- true
}()
// Create a file
// This should add at least one event to the fsnotify event queue
var f *os.File
f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
t.Fatalf("creating test file failed: %s", err)
}
f.Sync()
f.WriteString("data")
f.Sync()
f.Close()
os.Remove(testFile)
// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
time.Sleep(500e6) // 500 ms
if eventsReceived == 0 {
t.Fatal("fsnotify event hasn't been received after 500 ms")
}
t.Logf("Received %d events.", eventsReceived)
// Try closing the fsnotify instance
t.Log("calling Close()")
watcher.Close()
t.Log("waiting for the event channel to become closed...")
select {
case <-done:
t.Log("event channel closed")
case <-time.After(1e9):
t.Fatal("event stream was not closed after 1 second")
}
}
func TestFsnotifyClose(t *testing.T) {
watcher, _ := NewWatcher()
watcher.Close()
done := false
go func() {
watcher.Close()
done = true
}()
time.Sleep(50e6) // 50 ms
if !done {
t.Fatal("double Close() test failed: second Close() call didn't return")
}
err := watcher.Watch("_test")
if err == nil {
t.Fatal("expected error on Watch() after Close(), got nil")
}
}
|
package funcs
import (
"bufio"
"bytes"
"fmt"
"log"
"strings"
"github.com/open-falcon/agent/g"
"github.com/open-falcon/common/model"
"github.com/toolkits/file"
"github.com/toolkits/sys"
)
func UrlMetrics() (L []*model.MetricValue) {
reportUrls := g.ReportUrls()
sz := len(reportUrls)
if sz == 0 {
return
}
for furl, timeout := range reportUrls {
tags := fmt.Sprintf("url=%v,timeout=%v", furl, timeout)
if ok, _ := probeUrl(furl, timeout); !ok {
L = append(L, GaugeValue("url.check.health", 0, tags))
continue
}
L = append(L, GaugeValue("url.check.health", 1, tags))
}
return
}
func probeUrl(furl string, timeout string) (bool, error) {
bs, err := sys.CmdOutBytes("curl", "-I", "-m", timeout, "-o", "/dev/null", "-s", "-w", "%{http_code}", furl)
if err != nil {
log.Printf("probe url [%v] failed.the err is: [%v]\n", furl, err)
return false, err
}
reader := bufio.NewReader(bytes.NewBuffer(bs))
retcode, err := file.ReadLine(reader)
if err != nil {
log.Println("read retcode failed.err is:", err)
return false, err
}
if strings.TrimSpace(string(retcode)) != "200" {
log.Printf("return code [%v] is not 200.query url is [%v]", string(retcode), furl)
return false, err
}
return true, err
}
增加--max-filesize参数,上报结果带上src_ip的tag
package funcs
import (
"bufio"
"bytes"
"fmt"
"log"
"strings"
"github.com/open-falcon/agent/g"
"github.com/open-falcon/common/model"
"github.com/toolkits/file"
"github.com/toolkits/sys"
)
func UrlMetrics() (L []*model.MetricValue) {
reportUrls := g.ReportUrls()
sz := len(reportUrls)
if sz == 0 {
return
}
hostname, err := g.Hostname()
if err != nil {
hostname = "None"
}
for furl, timeout := range reportUrls {
tags := fmt.Sprintf("url=%v,timeout=%v,src_ip=%v", furl, timeout, hostname)
if ok, _ := probeUrl(furl, timeout); !ok {
L = append(L, GaugeValue("url.check.health", 0, tags))
continue
}
L = append(L, GaugeValue("url.check.health", 1, tags))
}
return
}
func probeUrl(furl string, timeout string) (bool, error) {
bs, err := sys.CmdOutBytes("curl", "max-filesize", "102400", "-I", "-m", timeout, "-o", "/dev/null", "-s", "-w", "%{http_code}", furl)
if err != nil {
log.Printf("probe url [%v] failed.the err is: [%v]\n", furl, err)
return false, err
}
reader := bufio.NewReader(bytes.NewBuffer(bs))
retcode, err := file.ReadLine(reader)
if err != nil {
log.Println("read retcode failed.err is:", err)
return false, err
}
if strings.TrimSpace(string(retcode)) != "200" {
log.Printf("return code [%v] is not 200.query url is [%v]", string(retcode), furl)
return false, err
}
return true, err
}
|
package handler
import (
"OnlineJudge/base"
"OnlineJudge/db"
"OnlineJudge/irpc"
"OnlineJudge/models"
"OnlineJudge/pbgen/api"
"log"
"time"
)
func (this *AdminHandler) Submit(response *api.SubmitResponse, req *api.SubmitRequest) {
defer PanicHandler(response, this.debug)
Submit_BuildResponse(this.dbu, response, req,
this.session.GetUserId(), this.session.GetIPAddr(), true, this.debug)
}
func (this *BasicHandler) Submit(response *api.SubmitResponse, req *api.SubmitRequest) {
MakeResponseError(response, this.debug, PBLoginRequired, nil)
}
// Need to be tested
// Depend on MetaProblems, OJInfo,
func (this *UserHandler) Submit(response *api.SubmitResponse, req *api.SubmitRequest) {
defer PanicHandler(response, this.debug)
Submit_BuildResponse(this.dbu, response, req,
this.session.GetUserId(), this.session.GetIPAddr(), false, this.debug)
}
func Submit_BuildResponse(
dbu *db.DBUtil,
response *api.SubmitResponse,
req *api.SubmitRequest,
user_id int64,
ip_addr string,
use_hide bool,
debug bool) {
// Parse ProblemSid
pid, err := base.ParseSid(req.GetProblemSid())
if err != nil {
MakeResponseError(response, debug, PBBadRequest, err)
return
}
tx := dbu.MustBegin()
defer dbu.MustCommit()
mp, err := models.Query_MetaProblem_By_OJName_OJPid(tx, pid.OJName,
pid.OJPid, []string{"meta_pid", "hide", "is_spj"}, nil)
PanicOnError(err)
if mp.MetaPid == 0 {
MakeResponseError(response, debug, PBProblemNotFound, nil)
return
}
// if visible
if mp.Hide == true && !use_hide {
MakeResponseError(response, debug, PBProblemNotFound, nil)
return
}
// Add Submission
subm := models.NewSubmissionModel()
sub := &models.Submission{
Status: "Pending",
StatusCode: "wt",
SubmitTime: time.Now(),
Code: req.GetCode(),
SubmitIPAddr: ip_addr,
IsShared: req.GetIsShared(),
IsContest: false,
IsSpj: mp.IsSpj,
MetaPidFK: mp.MetaPid,
UserIdFK: user_id,
LangIdFK: req.GetLanguageId(),
}
run_id, err := subm.Insert(tx, sub)
PanicOnError(err)
dbu.MustCommit()
tx = dbu.MustBegin()
defer dbu.MustCommit()
response.RunId = run_id
// Use RPC to call Daemon to judge the submission
helper := irpc.NewHelper()
if err := helper.Connect(); err != nil {
// Log the error
log.Println(err)
if err := subm.SetSystemError(tx, run_id); err != nil {
PanicOnError(err)
}
return
}
defer helper.Disconnect()
helper.NewClient()
res, err := helper.StartJudging(run_id)
if err != nil || res.Received != true {
// Log the error
log.Println(err)
if err := subm.SetSystemError(tx, run_id); err != nil {
PanicOnError(err)
}
return
}
}
fix bugs in submit.go
package handler
import (
"OnlineJudge/base"
"OnlineJudge/db"
"OnlineJudge/irpc"
"OnlineJudge/models"
"OnlineJudge/pbgen/api"
"log"
"time"
)
func (this *AdminHandler) Submit(response *api.SubmitResponse, req *api.SubmitRequest) {
defer PanicHandler(response, this.debug)
Submit_BuildResponse(this.dbu, response, req,
this.session.GetUserId(), this.session.GetIPAddr(), true, this.debug)
}
func (this *BasicHandler) Submit(response *api.SubmitResponse, req *api.SubmitRequest) {
MakeResponseError(response, this.debug, PBLoginRequired, nil)
}
// Need to be tested
// Depend on MetaProblems, OJInfo,
func (this *UserHandler) Submit(response *api.SubmitResponse, req *api.SubmitRequest) {
defer PanicHandler(response, this.debug)
Submit_BuildResponse(this.dbu, response, req,
this.session.GetUserId(), this.session.GetIPAddr(), false, this.debug)
}
func Submit_BuildResponse(
dbu *db.DBUtil,
response *api.SubmitResponse,
req *api.SubmitRequest,
user_id int64,
ip_addr string,
use_hide bool,
debug bool) {
// Parse ProblemSid
pid, err := base.ParseSid(req.GetProblemSid())
if err != nil {
MakeResponseError(response, debug, PBBadRequest, err)
return
}
tx := dbu.MustBegin()
defer dbu.Rollback()
mp, err := models.Query_MetaProblem_By_OJName_OJPid(tx, pid.OJName,
pid.OJPid, []string{"meta_pid", "hide", "is_spj"}, nil)
PanicOnError(err)
if mp.MetaPid == 0 {
MakeResponseError(response, debug, PBProblemNotFound, nil)
return
}
// if visible
if mp.Hide == true && !use_hide {
MakeResponseError(response, debug, PBProblemNotFound, nil)
return
}
// Add Submission
subm := models.NewSubmissionModel()
sub := &models.Submission{
Status: "Pending",
StatusCode: "wt",
SubmitTime: time.Now(),
Code: req.GetCode(),
SubmitIPAddr: ip_addr,
IsShared: req.GetIsShared(),
IsContest: false,
IsSpj: mp.IsSpj,
MetaPidFK: mp.MetaPid,
UserIdFK: user_id,
LangIdFK: req.GetLanguageId(),
}
run_id, err := subm.Insert(tx, sub)
PanicOnError(err)
response.RunId = run_id
dbu.MustCommit()
// Use RPC to call Daemon to judge the submission
dbu.MustBegin()
defer dbu.Rollback()
helper := irpc.NewHelper()
if err := helper.Connect(); err != nil {
// Log the error
log.Println(err)
if err := subm.SetSystemError(tx, run_id); err != nil {
PanicOnError(err)
}
return
}
defer helper.Disconnect()
helper.NewClient()
res, err := helper.StartJudging(run_id)
if err != nil || res.Received != true {
// Log the error
log.Println(err)
if err := subm.SetSystemError(tx, run_id); err != nil {
PanicOnError(err)
}
}
dbu.MustCommit()
}
|
package boardgame
import (
"github.com/Sirupsen/logrus"
"github.com/jkomoros/boardgame/enum"
"github.com/jkomoros/boardgame/errors"
"sort"
)
//GameDelegate is the place that various parts of the game lifecycle can be
//modified to support this particular game. Typically you embed
//DefaultGameDelegate in your won struct, and only override methods whose
//default behavior is incorrect for your game.
type GameDelegate interface {
//Name is a string that defines the type of game this is. The name should
//be unique and compact, and avoid any special characters other than "-"
//or "_", since they will sometimes be used in a URL path. Good examples
//are "tictactoe", "blackjack". Once configured, names should never change
//over the lifetime of the gametype, since it will be persisted in
//storage. Subclasses should override this.
Name() string
//DisplayName is a string that defines the type of game this is in a way
//appropriate for humans. The name should be unique but human readable. It
//is purely for human consumption, and may change over time with no
//adverse effects. Good examples are "Tic Tac Toe", "Blackjack".
//Subclasses should override this.
DisplayName() string
//Description is a string that describes the game type in a descriptive
//sentence. A reasonable value for "tictactoe" is "A classic game where
//players compete to get three in a row"
Description() string
//ConfigureMoves will be called during creation of a GameManager in
//NewGameManager. This is the time to install moves onto the manager by
//returning a list of moves to install. Typically you use moves.Combine
//and friends to organize your list of moves to install. If the moves you
//add are illegal for any reason, NewGameManager will fail with an error.
//By the time this is called. delegate.SetManager will already have been
//called, so you'll have access to the manager via Manager().
ConfigureMoves() []MoveConfig
//ConfigureAgents will be called when creating a new GameManager. Emit the
//agents you want to install.
ConfigureAgents() []Agent
//ConfigureDecks will be called when the GameManager is being booted up.
//Each entry in the return value will be configured on the ComponentChest
//that is being created.
ConfigureDecks() map[string]*Deck
//ConfigureEnums is called during set up of a new GameManager. Return the
//set of enums you want to be associated with this GameManagaer's Chest.
ConfigureEnums() *enum.Set
//ConfigureConstants is called during set-up of a new GameManager. Return
//the map of constants you want to create, which will be configured onto
//the newly created chest via AddConstant. If any of the AddConstant calls
//errors, the GameManager will fail to be set up. Constants are primarily
//useful in two cases: first, when you want to have access to a constant
//value client-side, and second, when you want to be able to use a
//constant value in a tag-based struct inflater.
ConfigureConstants() PropertyCollection
//GameStateConstructor and PlayerStateConstructor are called to get an
//instantiation of the concrete game/player structs that your package
//defines. This is used both to create the initial state, but also to
//inflate states from the database. These methods should always return the
//underlying same type of struct when called. This means that if different
//players have very different roles in a game, there might be many
//properties that are not in use for any given player. The simple
//properties (ints, bools, strings) should all be their zero-value.
//Importantly, all Stacks, Timers, and Enums should be non- nil, because
//an initialized struct contains information about things like MaxSize,
//Size, and a reference to the deck they are affiliated with. It is also
//possible to use tag-based auto-initalization for these fields; see the
//package doc on Constructors. Since these two methods are always
//required and always specific to each game type, DefaultGameDelegate does
//not implement them, as an extra reminder that you must implement them
//yourself.
GameStateConstructor() ConfigurableSubState
//PlayerStateConstructor is similar to GameStateConstructor, but
//playerIndex is the value that this PlayerState must return when its
//PlayerIndex() is called.
PlayerStateConstructor(player PlayerIndex) ConfigurablePlayerState
//DynamicComponentValuesConstructor returns an empty
//DynamicComponentValues for the given deck. If nil is returned, then the
//components in that deck don't have any dynamic component state. This
//method must always return the same underlying type of struct for the
//same deck. If the returned object also implements the ComponentValues
//interface, then SetContainingComponent will be called on the
//DynamicComponent whenever one is created, with a reference back to the
//component it's associated with.
DynamicComponentValuesConstructor(deck *Deck) ConfigurableSubState
//DistributeComponentToStarterStack is called during set up to establish
//the Deck/Stack invariant that every component in the chest is placed in
//precisely one Stack. Game will call this on each component in the Chest
//in order. This is where the logic goes to make sure each Component goes
//into its correct starter stack. You must return a non-nil Stack for each
//call, after which the given Component will be inserted into
//NextSlotIndex of that stack. If that is not the ordering you desire, you
//can fix it up in FinishSetUp by using SwapComponents. If any errors are
//returned, any nil Stacks are returned, or any returned stacks don't have
//space for another component, NewGame will fail and return an error.
//State and Component are only provided for reference; do not modify them.
DistributeComponentToStarterStack(state ImmutableState, c Component) (ImmutableStack, error)
//BeginSetup is a chance to modify the initial state object *before* the
//components are distributed to it. It is also where the variant
//configuration for your gametype will be passed (it will already have
//been checked for legality and had all configure defaults set), although
//you can also retrieve that at any time via game.Variant(). This is a
//good place to configure state that will be necessary for you to make the
//right decisions in DistributeComponentToStarterStack, or to transcribe
//config information you were passed into properties on your gameState as
//appropriate. If error is non-nil, Game setup will be aborted, with the
//reasoning including the error message provided.
BeginSetUp(state State, variant Variant) error
//FinishSetUp is called during NewGame, *after* components have been
//distributed to their StarterStack. This is the last chance to modify the
//state before the game's initial state is considered final. For example,
//if you have a card game this is where you'd make sure the starter draw
//stacks are shuffled. If your game has multiple rounds, or if you don't
//want the game to start with it already set-up (e.g. you want to show
//animations of starter cards being dealt) then it's probably best to do
//most of the logic in a SetUp phase. See the README for more. If error is
//non-nil, Game setup will be aborted, with the reasoning including the
//error message provided.
FinishSetUp(state State) error
//CheckGameFinished should return true if the game is finished, and who
//the winners are. Called after every move is applied.
CheckGameFinished(state ImmutableState) (finished bool, winners []PlayerIndex)
//ProposeFixUpMove is called after a move has been applied. It may return
//a FixUp move, which will be applied before any other moves are applied.
//If it returns nil, we may take the next move off of the queue. FixUp
//moves are useful for things like shuffling a discard deck back into a
//draw deck, or other moves that are necessary to get the GameState back
//into reasonable shape.
ProposeFixUpMove(state ImmutableState) Move
//DefaultNumPlayers returns the number of users that this game defaults to.
//For example, for tictactoe, it will be 2. If 0 is provided to
//manager.NewGame(), we wil use this value instead.
DefaultNumPlayers() int
//Min/MaxNumPlayers should return the min and max number of players,
//respectively. The engine doesn't use this directly, instead looking at
//LegalNumPlayers. Typically your LegalNumPlayers will check the given
//number of players is between these two extremes.
MinNumPlayers() int
MaxNumPlayers() int
//LegalNumPlayers will be consulted when a new game is created. It should
//return true if the given number of players is legal, and false
//otherwise. If this returns false, the NewGame will fail with an error.
//Game creation will automatically reject a numPlayers that does not
//result in at least one player existing. Generally this is simply
//checking to make sure the number of players is between Min and Max
//(inclusive), although some games could only allow, for example, even
//numbers of players.
LegalNumPlayers(numPlayers int) bool
//Variants returns a VariantConfig, which describes the different
//categories of configuration values and the legal values they may take
//on. Ultimately your LegalVariant is the final arbiter of which variants
//are legal; this method is mainly used so that user interfaces know which
//variants to show to the user. In general you shouldn't call this, but
//instead call gameManager.Variants() which will ensure your VariantConfig
//is initalized and memoize the return result.
Variants() VariantConfig
//CurrentPlayerIndex returns the index of the "current" player--a notion
//that is game specific (and sometimes inapplicable). If CurrentPlayer
//doesn't make sense (perhaps the game never has a notion of current
//player, or the type of round that we're in has no current player), this
//should return ObserverPlayerIndex. The result of this method is used to
//power state.CurrentPlayer.
CurrentPlayerIndex(state ImmutableState) PlayerIndex
//CurrentPhase returns the phase that the game state is currently in.
//Phase is a formalized convention used in moves.Base to make it easier to
//write fix-up moves that only apply in certain phases, like SetUp. The
//return result is primarily used in moves.Base to check whether it is one
//of the phases in a give Move's LegalPhases. See moves.Base for more
//information.
CurrentPhase(state ImmutableState) int
//PhaseEnum returns the enum for game phases (the return values of
//CurrentPhase are expected to be valid enums within that enum). If this
//returns a non-nil enums.TreeEnum, then the state will not be able to be
//saved if CurrentPhase() returns a value that is not a leaf-node.
PhaseEnum() enum.Enum
//SanitizationPolicy is consulted when sanitizing states. It is called for
//each prop in the state, including the set of groups that this player is
//a mamber of. In practice the default behavior of DefaultGameDelegate,
//which uses struct tags to figure out the policy, is sufficient and you
//do not need to override this. For more on how sanitization works, see
//the package doc. The statePropetyRef passed will always have the Index
//properties set to -1, signifying that the returned policy applies to all
//items in the Stack/Board.
SanitizationPolicy(prop StatePropertyRef, groupMembership map[int]bool) Policy
//If you have computed properties that you want to be included in your
//JSON (for example, for use clientside), export them here by creating a
//dictionary with their values.
ComputedGlobalProperties(state ImmutableState) PropertyCollection
ComputedPlayerProperties(player ImmutablePlayerState) PropertyCollection
//Diagram should return a basic debug rendering of state in multi-line
//ascii art. Useful for debugging. State.Diagram() will reach out to this
//method.
Diagram(s ImmutableState) string
//SetManager configures which manager this delegate is in use with. A
//given delegate can only be used by a single manager at a time.
SetManager(manager *GameManager)
//Manager returns the Manager that was set on this delegate.
Manager() *GameManager
}
//PropertyCollection is just an alias for map[string]interface{}
type PropertyCollection map[string]interface{}
//Copy returns a shallow copy of PropertyCollection
func (p PropertyCollection) Copy() PropertyCollection {
result := make(PropertyCollection, len(p))
for key, val := range result {
result[key] = val
}
return result
}
//DefaultGameDelegate is a struct that implements stubs for all of
//GameDelegate's methods. This makes it easy to override just one or two
//methods by creating your own struct that anonymously embeds this one. Name,
//GameStateConstructor, PlayerStateConstructor, and ConfigureMoves are not
//implemented, since those almost certainly must be overridden for your
//particular game.
type DefaultGameDelegate struct {
manager *GameManager
moveProgressions map[int][]string
}
//Diagram returns the string "This should be overriden to render a reasonable state here"
func (d *DefaultGameDelegate) Diagram(state ImmutableState) string {
return "This should be overriden to render a reasonable state here"
}
//DisplayName by default just returns the Name() that is returned from the
//delegate in use.
func (d *DefaultGameDelegate) DisplayName() string {
return d.Manager().Delegate().Name()
}
//Description defaults to "" if not overriden.
func (d *DefaultGameDelegate) Description() string {
return ""
}
//Manager returns the manager object that was provided to SetManager.
func (d *DefaultGameDelegate) Manager() *GameManager {
return d.manager
}
//SetManager keeps a reference to the passed manager, and returns it when
//Manager() is called.
func (d *DefaultGameDelegate) SetManager(manager *GameManager) {
d.manager = manager
}
//DynamicComponentValuesConstructor returns nil, as not all games have
//DynamicComponentValues. Override this if your game does require
//DynamicComponentValues.
func (d *DefaultGameDelegate) DynamicComponentValuesConstructor(deck *Deck) ConfigurableSubState {
return nil
}
//The Default ProposeFixUpMove runs through all moves in Moves, in order, and
//returns the first one that returns true from IsFixUp and is legal at the
//current state. In many cases, this behavior should be suficient and need not
//be overwritten. Be extra sure that your FixUpMoves have a conservative Legal
//function, otherwise you could get a panic from applying too many FixUp
//moves. Wil emit debug information about why certain fixup moves didn't apply
//if the Manager's log level is Debug or higher.
func (d *DefaultGameDelegate) ProposeFixUpMove(state ImmutableState) Move {
isDebug := d.Manager().Logger().Level >= logrus.DebugLevel
var logEntry *logrus.Entry
if isDebug {
logEntry = d.Manager().Logger().WithFields(logrus.Fields{
"game": state.Game().Id(),
"version": state.Version(),
})
logEntry.Debug("***** ProposeFixUpMove called *****")
}
for _, move := range state.Game().Moves() {
var entry *logrus.Entry
if isDebug {
entry = logEntry.WithField("movetype", move.Info().Name())
}
if !move.IsFixUp() {
//Not a fix up move
continue
}
if err := move.Legal(state, AdminPlayerIndex); err == nil {
if isDebug {
entry.Debug(move.Info().Name() + " : MATCH")
}
//Found it!
return move
} else {
if isDebug {
entry.Debug(move.Info().Name() + " : " + err.Error())
}
}
}
if isDebug {
logEntry.Debug("NO MATCH")
}
//No moves apply now.
return nil
}
//CurrentPlayerIndex returns gameState.CurrentPlayer, if that is a PlayerIndex
//property. If not, returns ObserverPlayerIndex.≈
func (d *DefaultGameDelegate) CurrentPlayerIndex(state ImmutableState) PlayerIndex {
index, err := state.ImmutableGameState().Reader().PlayerIndexProp("CurrentPlayer")
if err != nil {
//Guess that's not where they store CurrentPlayer.
return ObserverPlayerIndex
}
return index
}
//CurrentPhase by default with return the value of gameState.Phase, if it is
//an enum. If it is not, it will return -1 instead, to make it more clear that
//it's an invalid CurrentPhase (phase 0 is often valid).
func (d *DefaultGameDelegate) CurrentPhase(state ImmutableState) int {
phaseEnum, err := state.ImmutableGameState().Reader().ImmutableEnumProp("Phase")
if err != nil {
//Guess it wasn't there
return -1
}
return phaseEnum.Value()
}
//PhaseEnum defaults to the enum named "Phase" which is the convention for the
//name of the Phase enum. moves.Base will handle cases where that isn't a
//valid enum gracefully.
func (d *DefaultGameDelegate) PhaseEnum() enum.Enum {
return d.Manager().Chest().Enums().Enum("Phase")
}
func (d *DefaultGameDelegate) DistributeComponentToStarterStack(state ImmutableState, c Component) (ImmutableStack, error) {
//The stub returns an error, because if this is called that means there
//was a component in the deck. And if we didn't store it in a stack, then
//we are in violation of the invariant.
return nil, errors.New("DistributeComponentToStarterStack was called, but the component was not stored in a stack")
}
//SanitizatinoPolicy uses struct tags to identify the right policy to apply
//(see the package doc on SanitizationPolicy for how to configure those tags).
//It sees which policies apply given the provided group membership, and then
//returns the LEAST restrictive policy that applies. This behavior is almost
//always what you want; it is rare to need to override this method.
func (d *DefaultGameDelegate) SanitizationPolicy(prop StatePropertyRef, groupMembership map[int]bool) Policy {
manager := d.Manager()
var validator *readerValidator
switch prop.Group {
case StateGroupGame:
validator = manager.gameValidator
case StateGroupPlayer:
validator = manager.playerValidator
case StateGroupDynamicComponentValues:
validator = manager.dynamicComponentValidator[prop.DeckName]
}
if validator == nil {
return PolicyInvalid
}
policyMap := validator.sanitizationPolicy[prop.PropName]
var applicablePolicies []int
for group, isMember := range groupMembership {
//The only ones that are in the map should be `true` but sanity check
//just in case.
if !isMember {
continue
}
//Only if the policy is actually in the map should we use it
if policy, ok := policyMap[group]; ok {
applicablePolicies = append(applicablePolicies, int(policy))
}
}
if len(applicablePolicies) == 0 {
return PolicyVisible
}
sort.Ints(applicablePolicies)
return Policy(applicablePolicies[0])
}
//ComputedGlobalProperties returns nil.
func (d *DefaultGameDelegate) ComputedGlobalProperties(state ImmutableState) PropertyCollection {
return nil
}
//ComputedPlayerProperties returns nil.
func (d *DefaultGameDelegate) ComputedPlayerProperties(player ImmutablePlayerState) PropertyCollection {
return nil
}
//BeginSetUp does not do anything and returns nil.
func (d *DefaultGameDelegate) BeginSetUp(state State, variant Variant) error {
//Don't need to do anything by default
return nil
}
//FinishSetUp doesn't do anything and returns nil.
func (d *DefaultGameDelegate) FinishSetUp(state State) error {
//Don't need to do anything by default
return nil
}
//defaultCheckGameFinishedDelegate can be private because
//DefaultGameFinished implements the methods by default.
type defaultCheckGameFinishedDelegate interface {
GameEndConditionMet(state ImmutableState) bool
PlayerScore(pState ImmutablePlayerState) int
}
//PlayerGameScorer is an optional interface that can be implemented by
//PlayerSubStates. If it is implemented, DefaultGameDelegate's default
//PlayerScore() method will return it.
type PlayerGameScorer interface {
//Score returns the overall score for the game for the player at this
//point in time.
GameScore() int
}
//CheckGameFinished by default checks delegate.GameEndConditionMet(). If true,
//then it fetches delegate.PlayerScore() for each player and returns all
//players who have the highest score as winners. To use this implementation
//simply implement those methods. This is sufficient for many games, but not
//all, so sometimes needs to be overriden.
func (d *DefaultGameDelegate) CheckGameFinished(state ImmutableState) (finished bool, winners []PlayerIndex) {
if d.Manager() == nil {
return false, nil
}
//Have to reach up to the manager's delegate to get the thing that embeds
//us. Don't use the comma-ok pattern because we want to panic with
//descriptive error if not met.
checkGameFinished := d.Manager().Delegate().(defaultCheckGameFinishedDelegate)
if !checkGameFinished.GameEndConditionMet(state) {
return false, nil
}
//Game is over. What's the max score?
maxScore := 0
for _, player := range state.ImmutablePlayerStates() {
score := checkGameFinished.PlayerScore(player)
if score > maxScore {
maxScore = score
}
}
//Who has the max score?
for i, player := range state.ImmutablePlayerStates() {
score := checkGameFinished.PlayerScore(player)
if score == maxScore {
winners = append(winners, PlayerIndex(i))
}
}
return true, winners
}
//GameEndConditionMet is used in the default CheckGameFinished implementation.
//It should return true when the game is over and ready for scoring.
//CheckGameFinished uses this by default; if you override CheckGameFinished
//you don't need to override this. The default implementation of this simply
//returns false.
func (d *DefaultGameDelegate) GameEndConditionMet(state ImmutableState) bool {
return false
}
//PlayerScore is used in the default CheckGameFinished implementation. It
//should return the score for the given player. CheckGameFinished uses this by
//default; if you override CheckGameFinished you don't need to override this.
//The default implementation returns pState.GameScore() (if pState implements
//the PlayerGameScorer interface), or 0 otherwise.
func (d *DefaultGameDelegate) PlayerScore(pState ImmutablePlayerState) int {
if scorer, ok := pState.(PlayerGameScorer); ok {
return scorer.GameScore()
}
return 0
}
//DefaultNumPlayers returns 2.
func (d *DefaultGameDelegate) DefaultNumPlayers() int {
return 2
}
//MinNumPlayers returns 1
func (d *DefaultGameDelegate) MinNumPlayers() int {
return 1
}
//MaxNumPlayers returns 16
func (d *DefaultGameDelegate) MaxNumPlayers() int {
return 16
}
//LegalNumPlayers checks that the number of players is between MinNumPlayers
//and MaxNumPlayers, inclusive. You'd only want to override this if some
//player numbers in that range are not legal, for example a game where only
//even numbers of players may play.
func (d *DefaultGameDelegate) LegalNumPlayers(numPlayers int) bool {
min := d.Manager().Delegate().MinNumPlayers()
max := d.Manager().Delegate().MaxNumPlayers()
return numPlayers >= min && numPlayers <= max
}
//Variants returns a VariantConfig with no entries.
func (d *DefaultGameDelegate) Variants() VariantConfig {
return VariantConfig{}
}
//ConfigureAgents by default returns nil. If you want agents in your game,
//override this.
func (d *DefaultGameDelegate) ConfigureAgents() []Agent {
return nil
}
//ConfigureEnums simply returns nil. In general you want to override this with
//a body of `return Enums`, if you're using `boardgame-util config` to
//generate your enum set.
func (d *DefaultGameDelegate) ConfigureEnums() *enum.Set {
return nil
}
//ConfigureDecks returns a zero-entry map. You want to override this if you
//have any components in your game (which the vast majority of games do)
func (d *DefaultGameDelegate) ConfigureDecks() map[string]*Deck {
return make(map[string]*Deck)
}
//ConfigureConstants returns a zero-entry map. If you have any constants you
//wa8nt to use client-side or in tag-based struct auto-inflaters, you will want
//to override this.
func (d *DefaultGameDelegate) ConfigureConstants() PropertyCollection {
return nil
}
DefaultGameDelegate can also do its logic for low scores, if delegate.LowScoreWins() returns true.
package boardgame
import (
"github.com/Sirupsen/logrus"
"github.com/jkomoros/boardgame/enum"
"github.com/jkomoros/boardgame/errors"
"sort"
)
//GameDelegate is the place that various parts of the game lifecycle can be
//modified to support this particular game. Typically you embed
//DefaultGameDelegate in your won struct, and only override methods whose
//default behavior is incorrect for your game.
type GameDelegate interface {
//Name is a string that defines the type of game this is. The name should
//be unique and compact, and avoid any special characters other than "-"
//or "_", since they will sometimes be used in a URL path. Good examples
//are "tictactoe", "blackjack". Once configured, names should never change
//over the lifetime of the gametype, since it will be persisted in
//storage. Subclasses should override this.
Name() string
//DisplayName is a string that defines the type of game this is in a way
//appropriate for humans. The name should be unique but human readable. It
//is purely for human consumption, and may change over time with no
//adverse effects. Good examples are "Tic Tac Toe", "Blackjack".
//Subclasses should override this.
DisplayName() string
//Description is a string that describes the game type in a descriptive
//sentence. A reasonable value for "tictactoe" is "A classic game where
//players compete to get three in a row"
Description() string
//ConfigureMoves will be called during creation of a GameManager in
//NewGameManager. This is the time to install moves onto the manager by
//returning a list of moves to install. Typically you use moves.Combine
//and friends to organize your list of moves to install. If the moves you
//add are illegal for any reason, NewGameManager will fail with an error.
//By the time this is called. delegate.SetManager will already have been
//called, so you'll have access to the manager via Manager().
ConfigureMoves() []MoveConfig
//ConfigureAgents will be called when creating a new GameManager. Emit the
//agents you want to install.
ConfigureAgents() []Agent
//ConfigureDecks will be called when the GameManager is being booted up.
//Each entry in the return value will be configured on the ComponentChest
//that is being created.
ConfigureDecks() map[string]*Deck
//ConfigureEnums is called during set up of a new GameManager. Return the
//set of enums you want to be associated with this GameManagaer's Chest.
ConfigureEnums() *enum.Set
//ConfigureConstants is called during set-up of a new GameManager. Return
//the map of constants you want to create, which will be configured onto
//the newly created chest via AddConstant. If any of the AddConstant calls
//errors, the GameManager will fail to be set up. Constants are primarily
//useful in two cases: first, when you want to have access to a constant
//value client-side, and second, when you want to be able to use a
//constant value in a tag-based struct inflater.
ConfigureConstants() PropertyCollection
//GameStateConstructor and PlayerStateConstructor are called to get an
//instantiation of the concrete game/player structs that your package
//defines. This is used both to create the initial state, but also to
//inflate states from the database. These methods should always return the
//underlying same type of struct when called. This means that if different
//players have very different roles in a game, there might be many
//properties that are not in use for any given player. The simple
//properties (ints, bools, strings) should all be their zero-value.
//Importantly, all Stacks, Timers, and Enums should be non- nil, because
//an initialized struct contains information about things like MaxSize,
//Size, and a reference to the deck they are affiliated with. It is also
//possible to use tag-based auto-initalization for these fields; see the
//package doc on Constructors. Since these two methods are always
//required and always specific to each game type, DefaultGameDelegate does
//not implement them, as an extra reminder that you must implement them
//yourself.
GameStateConstructor() ConfigurableSubState
//PlayerStateConstructor is similar to GameStateConstructor, but
//playerIndex is the value that this PlayerState must return when its
//PlayerIndex() is called.
PlayerStateConstructor(player PlayerIndex) ConfigurablePlayerState
//DynamicComponentValuesConstructor returns an empty
//DynamicComponentValues for the given deck. If nil is returned, then the
//components in that deck don't have any dynamic component state. This
//method must always return the same underlying type of struct for the
//same deck. If the returned object also implements the ComponentValues
//interface, then SetContainingComponent will be called on the
//DynamicComponent whenever one is created, with a reference back to the
//component it's associated with.
DynamicComponentValuesConstructor(deck *Deck) ConfigurableSubState
//DistributeComponentToStarterStack is called during set up to establish
//the Deck/Stack invariant that every component in the chest is placed in
//precisely one Stack. Game will call this on each component in the Chest
//in order. This is where the logic goes to make sure each Component goes
//into its correct starter stack. You must return a non-nil Stack for each
//call, after which the given Component will be inserted into
//NextSlotIndex of that stack. If that is not the ordering you desire, you
//can fix it up in FinishSetUp by using SwapComponents. If any errors are
//returned, any nil Stacks are returned, or any returned stacks don't have
//space for another component, NewGame will fail and return an error.
//State and Component are only provided for reference; do not modify them.
DistributeComponentToStarterStack(state ImmutableState, c Component) (ImmutableStack, error)
//BeginSetup is a chance to modify the initial state object *before* the
//components are distributed to it. It is also where the variant
//configuration for your gametype will be passed (it will already have
//been checked for legality and had all configure defaults set), although
//you can also retrieve that at any time via game.Variant(). This is a
//good place to configure state that will be necessary for you to make the
//right decisions in DistributeComponentToStarterStack, or to transcribe
//config information you were passed into properties on your gameState as
//appropriate. If error is non-nil, Game setup will be aborted, with the
//reasoning including the error message provided.
BeginSetUp(state State, variant Variant) error
//FinishSetUp is called during NewGame, *after* components have been
//distributed to their StarterStack. This is the last chance to modify the
//state before the game's initial state is considered final. For example,
//if you have a card game this is where you'd make sure the starter draw
//stacks are shuffled. If your game has multiple rounds, or if you don't
//want the game to start with it already set-up (e.g. you want to show
//animations of starter cards being dealt) then it's probably best to do
//most of the logic in a SetUp phase. See the README for more. If error is
//non-nil, Game setup will be aborted, with the reasoning including the
//error message provided.
FinishSetUp(state State) error
//CheckGameFinished should return true if the game is finished, and who
//the winners are. Called after every move is applied.
CheckGameFinished(state ImmutableState) (finished bool, winners []PlayerIndex)
//ProposeFixUpMove is called after a move has been applied. It may return
//a FixUp move, which will be applied before any other moves are applied.
//If it returns nil, we may take the next move off of the queue. FixUp
//moves are useful for things like shuffling a discard deck back into a
//draw deck, or other moves that are necessary to get the GameState back
//into reasonable shape.
ProposeFixUpMove(state ImmutableState) Move
//DefaultNumPlayers returns the number of users that this game defaults to.
//For example, for tictactoe, it will be 2. If 0 is provided to
//manager.NewGame(), we wil use this value instead.
DefaultNumPlayers() int
//Min/MaxNumPlayers should return the min and max number of players,
//respectively. The engine doesn't use this directly, instead looking at
//LegalNumPlayers. Typically your LegalNumPlayers will check the given
//number of players is between these two extremes.
MinNumPlayers() int
MaxNumPlayers() int
//LegalNumPlayers will be consulted when a new game is created. It should
//return true if the given number of players is legal, and false
//otherwise. If this returns false, the NewGame will fail with an error.
//Game creation will automatically reject a numPlayers that does not
//result in at least one player existing. Generally this is simply
//checking to make sure the number of players is between Min and Max
//(inclusive), although some games could only allow, for example, even
//numbers of players.
LegalNumPlayers(numPlayers int) bool
//Variants returns a VariantConfig, which describes the different
//categories of configuration values and the legal values they may take
//on. Ultimately your LegalVariant is the final arbiter of which variants
//are legal; this method is mainly used so that user interfaces know which
//variants to show to the user. In general you shouldn't call this, but
//instead call gameManager.Variants() which will ensure your VariantConfig
//is initalized and memoize the return result.
Variants() VariantConfig
//CurrentPlayerIndex returns the index of the "current" player--a notion
//that is game specific (and sometimes inapplicable). If CurrentPlayer
//doesn't make sense (perhaps the game never has a notion of current
//player, or the type of round that we're in has no current player), this
//should return ObserverPlayerIndex. The result of this method is used to
//power state.CurrentPlayer.
CurrentPlayerIndex(state ImmutableState) PlayerIndex
//CurrentPhase returns the phase that the game state is currently in.
//Phase is a formalized convention used in moves.Base to make it easier to
//write fix-up moves that only apply in certain phases, like SetUp. The
//return result is primarily used in moves.Base to check whether it is one
//of the phases in a give Move's LegalPhases. See moves.Base for more
//information.
CurrentPhase(state ImmutableState) int
//PhaseEnum returns the enum for game phases (the return values of
//CurrentPhase are expected to be valid enums within that enum). If this
//returns a non-nil enums.TreeEnum, then the state will not be able to be
//saved if CurrentPhase() returns a value that is not a leaf-node.
PhaseEnum() enum.Enum
//SanitizationPolicy is consulted when sanitizing states. It is called for
//each prop in the state, including the set of groups that this player is
//a mamber of. In practice the default behavior of DefaultGameDelegate,
//which uses struct tags to figure out the policy, is sufficient and you
//do not need to override this. For more on how sanitization works, see
//the package doc. The statePropetyRef passed will always have the Index
//properties set to -1, signifying that the returned policy applies to all
//items in the Stack/Board.
SanitizationPolicy(prop StatePropertyRef, groupMembership map[int]bool) Policy
//If you have computed properties that you want to be included in your
//JSON (for example, for use clientside), export them here by creating a
//dictionary with their values.
ComputedGlobalProperties(state ImmutableState) PropertyCollection
ComputedPlayerProperties(player ImmutablePlayerState) PropertyCollection
//Diagram should return a basic debug rendering of state in multi-line
//ascii art. Useful for debugging. State.Diagram() will reach out to this
//method.
Diagram(s ImmutableState) string
//SetManager configures which manager this delegate is in use with. A
//given delegate can only be used by a single manager at a time.
SetManager(manager *GameManager)
//Manager returns the Manager that was set on this delegate.
Manager() *GameManager
}
//PropertyCollection is just an alias for map[string]interface{}
type PropertyCollection map[string]interface{}
//Copy returns a shallow copy of PropertyCollection
func (p PropertyCollection) Copy() PropertyCollection {
result := make(PropertyCollection, len(p))
for key, val := range result {
result[key] = val
}
return result
}
//DefaultGameDelegate is a struct that implements stubs for all of
//GameDelegate's methods. This makes it easy to override just one or two
//methods by creating your own struct that anonymously embeds this one. Name,
//GameStateConstructor, PlayerStateConstructor, and ConfigureMoves are not
//implemented, since those almost certainly must be overridden for your
//particular game.
type DefaultGameDelegate struct {
manager *GameManager
moveProgressions map[int][]string
}
//Diagram returns the string "This should be overriden to render a reasonable state here"
func (d *DefaultGameDelegate) Diagram(state ImmutableState) string {
return "This should be overriden to render a reasonable state here"
}
//DisplayName by default just returns the Name() that is returned from the
//delegate in use.
func (d *DefaultGameDelegate) DisplayName() string {
return d.Manager().Delegate().Name()
}
//Description defaults to "" if not overriden.
func (d *DefaultGameDelegate) Description() string {
return ""
}
//Manager returns the manager object that was provided to SetManager.
func (d *DefaultGameDelegate) Manager() *GameManager {
return d.manager
}
//SetManager keeps a reference to the passed manager, and returns it when
//Manager() is called.
func (d *DefaultGameDelegate) SetManager(manager *GameManager) {
d.manager = manager
}
//DynamicComponentValuesConstructor returns nil, as not all games have
//DynamicComponentValues. Override this if your game does require
//DynamicComponentValues.
func (d *DefaultGameDelegate) DynamicComponentValuesConstructor(deck *Deck) ConfigurableSubState {
return nil
}
//The Default ProposeFixUpMove runs through all moves in Moves, in order, and
//returns the first one that returns true from IsFixUp and is legal at the
//current state. In many cases, this behavior should be suficient and need not
//be overwritten. Be extra sure that your FixUpMoves have a conservative Legal
//function, otherwise you could get a panic from applying too many FixUp
//moves. Wil emit debug information about why certain fixup moves didn't apply
//if the Manager's log level is Debug or higher.
func (d *DefaultGameDelegate) ProposeFixUpMove(state ImmutableState) Move {
isDebug := d.Manager().Logger().Level >= logrus.DebugLevel
var logEntry *logrus.Entry
if isDebug {
logEntry = d.Manager().Logger().WithFields(logrus.Fields{
"game": state.Game().Id(),
"version": state.Version(),
})
logEntry.Debug("***** ProposeFixUpMove called *****")
}
for _, move := range state.Game().Moves() {
var entry *logrus.Entry
if isDebug {
entry = logEntry.WithField("movetype", move.Info().Name())
}
if !move.IsFixUp() {
//Not a fix up move
continue
}
if err := move.Legal(state, AdminPlayerIndex); err == nil {
if isDebug {
entry.Debug(move.Info().Name() + " : MATCH")
}
//Found it!
return move
} else {
if isDebug {
entry.Debug(move.Info().Name() + " : " + err.Error())
}
}
}
if isDebug {
logEntry.Debug("NO MATCH")
}
//No moves apply now.
return nil
}
//CurrentPlayerIndex returns gameState.CurrentPlayer, if that is a PlayerIndex
//property. If not, returns ObserverPlayerIndex.≈
func (d *DefaultGameDelegate) CurrentPlayerIndex(state ImmutableState) PlayerIndex {
index, err := state.ImmutableGameState().Reader().PlayerIndexProp("CurrentPlayer")
if err != nil {
//Guess that's not where they store CurrentPlayer.
return ObserverPlayerIndex
}
return index
}
//CurrentPhase by default with return the value of gameState.Phase, if it is
//an enum. If it is not, it will return -1 instead, to make it more clear that
//it's an invalid CurrentPhase (phase 0 is often valid).
func (d *DefaultGameDelegate) CurrentPhase(state ImmutableState) int {
phaseEnum, err := state.ImmutableGameState().Reader().ImmutableEnumProp("Phase")
if err != nil {
//Guess it wasn't there
return -1
}
return phaseEnum.Value()
}
//PhaseEnum defaults to the enum named "Phase" which is the convention for the
//name of the Phase enum. moves.Base will handle cases where that isn't a
//valid enum gracefully.
func (d *DefaultGameDelegate) PhaseEnum() enum.Enum {
return d.Manager().Chest().Enums().Enum("Phase")
}
func (d *DefaultGameDelegate) DistributeComponentToStarterStack(state ImmutableState, c Component) (ImmutableStack, error) {
//The stub returns an error, because if this is called that means there
//was a component in the deck. And if we didn't store it in a stack, then
//we are in violation of the invariant.
return nil, errors.New("DistributeComponentToStarterStack was called, but the component was not stored in a stack")
}
//SanitizatinoPolicy uses struct tags to identify the right policy to apply
//(see the package doc on SanitizationPolicy for how to configure those tags).
//It sees which policies apply given the provided group membership, and then
//returns the LEAST restrictive policy that applies. This behavior is almost
//always what you want; it is rare to need to override this method.
func (d *DefaultGameDelegate) SanitizationPolicy(prop StatePropertyRef, groupMembership map[int]bool) Policy {
manager := d.Manager()
var validator *readerValidator
switch prop.Group {
case StateGroupGame:
validator = manager.gameValidator
case StateGroupPlayer:
validator = manager.playerValidator
case StateGroupDynamicComponentValues:
validator = manager.dynamicComponentValidator[prop.DeckName]
}
if validator == nil {
return PolicyInvalid
}
policyMap := validator.sanitizationPolicy[prop.PropName]
var applicablePolicies []int
for group, isMember := range groupMembership {
//The only ones that are in the map should be `true` but sanity check
//just in case.
if !isMember {
continue
}
//Only if the policy is actually in the map should we use it
if policy, ok := policyMap[group]; ok {
applicablePolicies = append(applicablePolicies, int(policy))
}
}
if len(applicablePolicies) == 0 {
return PolicyVisible
}
sort.Ints(applicablePolicies)
return Policy(applicablePolicies[0])
}
//ComputedGlobalProperties returns nil.
func (d *DefaultGameDelegate) ComputedGlobalProperties(state ImmutableState) PropertyCollection {
return nil
}
//ComputedPlayerProperties returns nil.
func (d *DefaultGameDelegate) ComputedPlayerProperties(player ImmutablePlayerState) PropertyCollection {
return nil
}
//BeginSetUp does not do anything and returns nil.
func (d *DefaultGameDelegate) BeginSetUp(state State, variant Variant) error {
//Don't need to do anything by default
return nil
}
//FinishSetUp doesn't do anything and returns nil.
func (d *DefaultGameDelegate) FinishSetUp(state State) error {
//Don't need to do anything by default
return nil
}
//defaultCheckGameFinishedDelegate can be private because
//DefaultGameFinished implements the methods by default.
type defaultCheckGameFinishedDelegate interface {
GameEndConditionMet(state ImmutableState) bool
PlayerScore(pState ImmutablePlayerState) int
LowScoreWins() bool
}
//PlayerGameScorer is an optional interface that can be implemented by
//PlayerSubStates. If it is implemented, DefaultGameDelegate's default
//PlayerScore() method will return it.
type PlayerGameScorer interface {
//Score returns the overall score for the game for the player at this
//point in time.
GameScore() int
}
//CheckGameFinished by default checks delegate.GameEndConditionMet(). If true,
//then it fetches delegate.PlayerScore() for each player and returns all
//players who have the highest score as winners. (If delegate.LowScoreWins()
//is true, instead of highest score, it does lowest score.) To use this
//implementation simply implement those methods. This is sufficient for many
//games, but not all, so sometimes needs to be overriden.
func (d *DefaultGameDelegate) CheckGameFinished(state ImmutableState) (finished bool, winners []PlayerIndex) {
if d.Manager() == nil {
return false, nil
}
//Have to reach up to the manager's delegate to get the thing that embeds
//us. Don't use the comma-ok pattern because we want to panic with
//descriptive error if not met.
checkGameFinished := d.Manager().Delegate().(defaultCheckGameFinishedDelegate)
if !checkGameFinished.GameEndConditionMet(state) {
return false, nil
}
lowScoreWins := checkGameFinished.LowScoreWins()
//Game is over. What's the most extreme (max or min, depending on
//LowScoreWins) score?
extremeScore := 0
for _, player := range state.ImmutablePlayerStates() {
score := checkGameFinished.PlayerScore(player)
if lowScoreWins {
if score < extremeScore {
extremeScore = score
}
} else {
if score > extremeScore {
extremeScore = score
}
}
}
//Who has the most extreme score score?
for i, player := range state.ImmutablePlayerStates() {
score := checkGameFinished.PlayerScore(player)
if score == extremeScore {
winners = append(winners, PlayerIndex(i))
}
}
return true, winners
}
//LowScoreWins is used in DefaultGameDelegate's CheckGameFinished. If false
//(default) higher scores are better. If true, however, then lower scores win
//(similar to golf), and all of the players with the lowest score win.
func (d *DefaultGameDelegate) LowScoreWins() bool {
return false
}
//GameEndConditionMet is used in the default CheckGameFinished implementation.
//It should return true when the game is over and ready for scoring.
//CheckGameFinished uses this by default; if you override CheckGameFinished
//you don't need to override this. The default implementation of this simply
//returns false.
func (d *DefaultGameDelegate) GameEndConditionMet(state ImmutableState) bool {
return false
}
//PlayerScore is used in the default CheckGameFinished implementation. It
//should return the score for the given player. CheckGameFinished uses this by
//default; if you override CheckGameFinished you don't need to override this.
//The default implementation returns pState.GameScore() (if pState implements
//the PlayerGameScorer interface), or 0 otherwise.
func (d *DefaultGameDelegate) PlayerScore(pState ImmutablePlayerState) int {
if scorer, ok := pState.(PlayerGameScorer); ok {
return scorer.GameScore()
}
return 0
}
//DefaultNumPlayers returns 2.
func (d *DefaultGameDelegate) DefaultNumPlayers() int {
return 2
}
//MinNumPlayers returns 1
func (d *DefaultGameDelegate) MinNumPlayers() int {
return 1
}
//MaxNumPlayers returns 16
func (d *DefaultGameDelegate) MaxNumPlayers() int {
return 16
}
//LegalNumPlayers checks that the number of players is between MinNumPlayers
//and MaxNumPlayers, inclusive. You'd only want to override this if some
//player numbers in that range are not legal, for example a game where only
//even numbers of players may play.
func (d *DefaultGameDelegate) LegalNumPlayers(numPlayers int) bool {
min := d.Manager().Delegate().MinNumPlayers()
max := d.Manager().Delegate().MaxNumPlayers()
return numPlayers >= min && numPlayers <= max
}
//Variants returns a VariantConfig with no entries.
func (d *DefaultGameDelegate) Variants() VariantConfig {
return VariantConfig{}
}
//ConfigureAgents by default returns nil. If you want agents in your game,
//override this.
func (d *DefaultGameDelegate) ConfigureAgents() []Agent {
return nil
}
//ConfigureEnums simply returns nil. In general you want to override this with
//a body of `return Enums`, if you're using `boardgame-util config` to
//generate your enum set.
func (d *DefaultGameDelegate) ConfigureEnums() *enum.Set {
return nil
}
//ConfigureDecks returns a zero-entry map. You want to override this if you
//have any components in your game (which the vast majority of games do)
func (d *DefaultGameDelegate) ConfigureDecks() map[string]*Deck {
return make(map[string]*Deck)
}
//ConfigureConstants returns a zero-entry map. If you have any constants you
//wa8nt to use client-side or in tag-based struct auto-inflaters, you will want
//to override this.
func (d *DefaultGameDelegate) ConfigureConstants() PropertyCollection {
return nil
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsproxy
import (
"errors"
"fmt"
"github.com/googlecloudplatform/gcsfuse/lease"
"github.com/jacobsa/gcloud/gcs"
"golang.org/x/net/context"
)
// Given an object record and content that was originally derived from that
// object's contents (and potentially modified):
//
// * If the content has not been modified, return a nil read proxy and a nil
// new object.
//
// * Otherwise, write out a new generation in the bucket (failing with
// *gcs.PreconditionError if the source generation is no longer current)
// and return a read proxy for that object's contents.
//
// In the second case, the MutableContent is destroyed. Otherwise, including
// when this function fails, it is guaranteed to still be valid.
func Sync(
ctx context.Context,
srcObject *gcs.Object,
content MutableContent,
bucket gcs.Bucket) (
newProxy lease.ReadProxy, newObject *gcs.Object, err error) {
// Stat the content.
sr, err := content.Stat(ctx)
if err != nil {
err = fmt.Errorf("Stat: %v", err)
return
}
// Make sure the dirty threshold makes sense.
if sr.DirtyThreshold > int64(srcObject.Size) {
err = fmt.Errorf(
"Weird DirtyThreshold field: %d vs. %d",
sr.DirtyThreshold,
srcObject.Size)
return
}
// If the content hasn't been dirtied, we're done.
if sr.DirtyThreshold == int64(srcObject.Size) {
return
}
// Otherwise, we need to create a new generation.
err = errors.New("TODO")
return
}
Call the bucket.
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsproxy
import (
"fmt"
"github.com/googlecloudplatform/gcsfuse/lease"
"github.com/jacobsa/gcloud/gcs"
"golang.org/x/net/context"
)
// Given an object record and content that was originally derived from that
// object's contents (and potentially modified):
//
// * If the content has not been modified, return a nil read proxy and a nil
// new object.
//
// * Otherwise, write out a new generation in the bucket (failing with
// *gcs.PreconditionError if the source generation is no longer current)
// and return a read proxy for that object's contents.
//
// In the second case, the MutableContent is destroyed. Otherwise, including
// when this function fails, it is guaranteed to still be valid.
func Sync(
ctx context.Context,
srcObject *gcs.Object,
content MutableContent,
bucket gcs.Bucket) (rp lease.ReadProxy, o *gcs.Object, err error) {
// Stat the content.
sr, err := content.Stat(ctx)
if err != nil {
err = fmt.Errorf("Stat: %v", err)
return
}
// Make sure the dirty threshold makes sense.
if sr.DirtyThreshold > int64(srcObject.Size) {
err = fmt.Errorf(
"Stat returned weird DirtyThreshold field: %d vs. %d",
sr.DirtyThreshold,
srcObject.Size)
return
}
// If the content hasn't been dirtied, we're done.
if sr.DirtyThreshold == int64(srcObject.Size) {
return
}
// Otherwise, we need to create a new generation.
o, err = bucket.CreateObject(
ctx,
&gcs.CreateObjectRequest{
Name: srcObject.Name,
Contents: &mutableContentReader{
Ctx: ctx,
Content: content,
},
GenerationPrecondition: &srcObject.Generation,
})
if err != nil {
// Special case: don't mess with precondition errors.
if _, ok := err.(*gcs.PreconditionError); ok {
return
}
err = fmt.Errorf("CreateObject: %v", err)
return
}
return
}
////////////////////////////////////////////////////////////////////////
// mutableContentReader
////////////////////////////////////////////////////////////////////////
// An io.Reader that wraps a MutableContent object, reading starting from a
// base offset.
type mutableContentReader struct {
Ctx context.Context
Content MutableContent
Offset int64
}
func (mcr *mutableContentReader) Read(p []byte) (n int, err error) {
n, err = mcr.Content.ReadAt(mcr.Ctx, p, mcr.Offset)
mcr.Offset += int64(n)
return
}
|
/*
Copyright 2015 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package auth implements certificate signing authority and access control server
// Authority server is composed of several parts:
//
// * Authority server itself that implements signing and acl logic
// * HTTP server wrapper for authority server
// * HTTP client wrapper
//
package auth
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"sync"
"time"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/services/local"
"github.com/gravitational/teleport/lib/utils"
log "github.com/Sirupsen/logrus"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/oauth2"
"github.com/coreos/go-oidc/oidc"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
"github.com/tstranex/u2f"
)
// Authority implements minimal key-management facility for generating OpenSSH
// compatible public/private key pairs and OpenSSH certificates
type Authority interface {
// GenerateKeyPair generates new keypair
GenerateKeyPair(passphrase string) (privKey []byte, pubKey []byte, err error)
// GetNewKeyPairFromPool returns new keypair from pre-generated in memory pool
GetNewKeyPairFromPool() (privKey []byte, pubKey []byte, err error)
// GenerateHostCert takes the private key of the CA, public key of the new host,
// along with metadata (host ID, node name, cluster name, roles, and ttl) and generates
// a host certificate.
GenerateHostCert(certParams services.CertParams) ([]byte, error)
// GenerateUserCert generates user certificate, it takes pkey as a signing
// private key (user certificate authority)
GenerateUserCert(pkey, key []byte, teleportUsername string, allowedLogins []string, ttl time.Duration, permitAgentForwarding bool) ([]byte, error)
}
// AuthServerOption allows setting options as functional arguments to AuthServer
type AuthServerOption func(*AuthServer)
// NewAuthServer creates and configures a new AuthServer instance
func NewAuthServer(cfg *InitConfig, opts ...AuthServerOption) *AuthServer {
if cfg.Trust == nil {
cfg.Trust = local.NewCAService(cfg.Backend)
}
if cfg.Presence == nil {
cfg.Presence = local.NewPresenceService(cfg.Backend)
}
if cfg.Provisioner == nil {
cfg.Provisioner = local.NewProvisioningService(cfg.Backend)
}
if cfg.Identity == nil {
cfg.Identity = local.NewIdentityService(cfg.Backend)
}
if cfg.Access == nil {
cfg.Access = local.NewAccessService(cfg.Backend)
}
if cfg.ClusterAuthPreferenceService == nil {
cfg.ClusterAuthPreferenceService = local.NewClusterAuthPreferenceService(cfg.Backend)
}
if cfg.UniversalSecondFactorService == nil {
cfg.UniversalSecondFactorService = local.NewUniversalSecondFactorService(cfg.Backend)
}
as := AuthServer{
bk: cfg.Backend,
Authority: cfg.Authority,
Trust: cfg.Trust,
Presence: cfg.Presence,
Provisioner: cfg.Provisioner,
Identity: cfg.Identity,
Access: cfg.Access,
DomainName: cfg.DomainName,
AuthServiceName: cfg.AuthServiceName,
StaticTokens: cfg.StaticTokens,
ClusterAuthPreference: cfg.ClusterAuthPreferenceService,
UniversalSecondFactorSettings: cfg.UniversalSecondFactorService,
oidcClients: make(map[string]*oidcClient),
DeveloperMode: cfg.DeveloperMode,
}
for _, o := range opts {
o(&as)
}
if as.clock == nil {
as.clock = clockwork.NewRealClock()
}
return &as
}
// AuthServer keeps the cluster together. It acts as a certificate authority (CA) for
// a cluster and:
// - generates the keypair for the node it's running on
// - invites other SSH nodes to a cluster, by issuing invite tokens
// - adds other SSH nodes to a cluster, by checking their token and signing their keys
// - same for users and their sessions
// - checks public keys to see if they're signed by it (can be trusted or not)
type AuthServer struct {
lock sync.Mutex
oidcClients map[string]*oidcClient
clock clockwork.Clock
bk backend.Backend
// DeveloperMode should only be used during development as it does several
// unsafe things like log sensitive information to console as well as
// not verify certificates.
DeveloperMode bool
Authority
// DomainName stores the FQDN of the signing CA (its certificate will have this
// name embedded). It is usually set to the GUID of the host the Auth service runs on
DomainName string
// AuthServiceName is a human-readable name of this CA. If several Auth services are running
// (managing multiple teleport clusters) this field is used to tell them apart in UIs
// It usually defaults to the hostname of the machine the Auth service runs on.
AuthServiceName string
// StaticTokens are pre-defined host provisioning tokens supplied via config file for
// environments where paranoid security is not needed
StaticTokens []services.ProvisionToken
services.Trust
services.Presence
services.Provisioner
services.Identity
services.Access
services.ClusterAuthPreference
services.UniversalSecondFactorSettings
}
func (a *AuthServer) Close() error {
if a.bk != nil {
return trace.Wrap(a.bk.Close())
}
return nil
}
// GetDomainName returns the domain name that identifies this authority server.
// Also known as "cluster name"
func (a *AuthServer) GetDomainName() (string, error) {
return a.DomainName, nil
}
// GenerateHostCert uses the private key of the CA to sign the public key of the host
// (along with meta data like host ID, node name, roles, and ttl) to generate a host certificate.
func (s *AuthServer) GenerateHostCert(hostPublicKey []byte, hostID, nodeName, clusterName string, roles teleport.Roles, ttl time.Duration) ([]byte, error) {
// get the certificate authority that will be signing the public key of the host
ca, err := s.Trust.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: s.DomainName,
}, true)
if err != nil {
return nil, trace.BadParameter("failed to load host CA for '%s': %v", s.DomainName, err)
}
// get the private key of the certificate authority
caPrivateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
// create and sign!
return s.Authority.GenerateHostCert(services.CertParams{
PrivateCASigningKey: caPrivateKey,
PublicHostKey: hostPublicKey,
HostID: hostID,
NodeName: nodeName,
ClusterName: clusterName,
Roles: roles,
TTL: ttl,
})
}
// GenerateUserCert generates user certificate, it takes pkey as a signing
// private key (user certificate authority)
func (s *AuthServer) GenerateUserCert(key []byte, username string, allowedLogins []string, ttl time.Duration, canForwardAgents bool) ([]byte, error) {
ca, err := s.Trust.GetCertAuthority(services.CertAuthID{
Type: services.UserCA,
DomainName: s.DomainName,
}, true)
if err != nil {
return nil, trace.Wrap(err)
}
privateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
return s.Authority.GenerateUserCert(privateKey, key, username, allowedLogins, ttl, canForwardAgents)
}
// withUserLock executes function authenticateFn that perorms user authenticaton
// if authenticateFn returns non nil error, the login attempt will be logged in as failed.
// The only exception to this rule is ConnectionProblemError, in case if it occurs
// access will be denied, but login attempt will not be recorded
// this is done to avoid potential user lockouts due to backend failures
// In case if user exceeds defaults.MaxLoginAttempts
// the user account will be locked for defaults.AccountLockInterval
func (s *AuthServer) withUserLock(username string, authenticateFn func() error) error {
user, err := s.Identity.GetUser(username)
if err != nil {
return trace.Wrap(err)
}
status := user.GetStatus()
if status.IsLocked && status.LockExpires.After(s.clock.Now().UTC()) {
return trace.AccessDenied("user %v is locked until %v", utils.HumanTimeFormat(status.LockExpires))
}
fnErr := authenticateFn()
if fnErr == nil {
return nil
}
// do not lock user in case if DB is flaky or down
if trace.IsConnectionProblem(err) {
return trace.Wrap(fnErr)
}
// log failed attempt and possibly lock user
attempt := services.LoginAttempt{Time: s.clock.Now().UTC(), Success: false}
err = s.AddUserLoginAttempt(username, attempt, defaults.AttemptTTL)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
loginAttempts, err := s.Identity.GetUserLoginAttempts(username)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
if !services.LastFailed(defaults.MaxLoginAttempts, loginAttempts) {
log.Debugf("%v user has less than %v failed login attempts", username, defaults.MaxLoginAttempts)
return trace.Wrap(fnErr)
}
lockUntil := s.clock.Now().UTC().Add(defaults.AccountLockInterval)
message := fmt.Sprintf("%v exceeds %v failed login attempts, locked until %v",
username, defaults.MaxLoginAttempts, utils.HumanTimeFormat(status.LockExpires))
log.Debug(message)
user.SetLocked(lockUntil, "user has exceeded maximum failed login attempts")
err = s.Identity.UpsertUser(user)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
return trace.AccessDenied(message)
}
func (s *AuthServer) SignIn(user string, password []byte) (services.WebSession, error) {
err := s.withUserLock(user, func() error {
return s.CheckPasswordWOToken(user, password)
})
if err != nil {
return nil, trace.Wrap(err)
}
return s.PreAuthenticatedSignIn(user)
}
// PreAuthenticatedSignIn is for 2-way authentication methods like U2F where the password is
// already checked before issueing the second factor challenge
func (s *AuthServer) PreAuthenticatedSignIn(user string) (services.WebSession, error) {
sess, err := s.NewWebSession(user)
if err != nil {
return nil, trace.Wrap(err)
}
if err := s.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
return sess.WithoutSecrets(), nil
}
func (s *AuthServer) U2FSignRequest(user string, password []byte) (*u2f.SignRequest, error) {
universalSecondFactor, err := s.GetUniversalSecondFactor()
if err != nil {
return nil, trace.Wrap(err)
}
err = s.withUserLock(user, func() error {
return s.CheckPasswordWOToken(user, password)
})
if err != nil {
return nil, trace.Wrap(err)
}
registration, err := s.GetU2FRegistration(user)
if err != nil {
return nil, trace.Wrap(err)
}
challenge, err := u2f.NewChallenge(universalSecondFactor.GetAppID(), universalSecondFactor.GetFacets())
if err != nil {
return nil, trace.Wrap(err)
}
err = s.UpsertU2FSignChallenge(user, challenge)
if err != nil {
return nil, trace.Wrap(err)
}
u2fSignReq := challenge.SignRequest(*registration)
return u2fSignReq, nil
}
func (s *AuthServer) CheckU2FSignResponse(user string, response *u2f.SignResponse) error {
// before trying to register a user, see U2F is actually setup on the backend
_, err := s.GetUniversalSecondFactor()
if err != nil {
return trace.Wrap(err)
}
reg, err := s.GetU2FRegistration(user)
if err != nil {
return trace.Wrap(err)
}
counter, err := s.GetU2FRegistrationCounter(user)
if err != nil {
return trace.Wrap(err)
}
challenge, err := s.GetU2FSignChallenge(user)
if err != nil {
return trace.Wrap(err)
}
newCounter, err := reg.Authenticate(*response, *challenge, counter)
if err != nil {
return trace.Wrap(err)
}
err = s.UpsertU2FRegistrationCounter(user, newCounter)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// ExtendWebSession creates a new web session for a user based on a valid previous sessionID,
// method is used to renew the web session for a user
func (s *AuthServer) ExtendWebSession(user string, prevSessionID string) (services.WebSession, error) {
prevSession, err := s.GetWebSession(user, prevSessionID)
if err != nil {
return nil, trace.Wrap(err)
}
// consider absolute expiry time that may be set for this session
// by some external identity serivce, so we can not renew this session
// any more without extra logic for renewal with external OIDC provider
expiresAt := prevSession.GetExpiryTime()
if !expiresAt.IsZero() && expiresAt.Before(s.clock.Now().UTC()) {
return nil, trace.NotFound("web session has expired")
}
sess, err := s.NewWebSession(user)
if err != nil {
return nil, trace.Wrap(err)
}
sess.SetExpiryTime(expiresAt)
bearerTokenTTL := utils.MinTTL(utils.ToTTL(s.clock, expiresAt), BearerTokenTTL)
sess.SetBearerTokenExpiryTime(s.clock.Now().UTC().Add(bearerTokenTTL))
if err := s.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
sess, err = services.GetWebSessionMarshaler().ExtendWebSession(sess)
if err != nil {
return nil, trace.Wrap(err)
}
return sess, nil
}
// CreateWebSession creates a new web session for user without any
// checks, is used by admins
func (s *AuthServer) CreateWebSession(user string) (services.WebSession, error) {
sess, err := s.NewWebSession(user)
if err != nil {
return nil, trace.Wrap(err)
}
if err := s.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
sess, err = services.GetWebSessionMarshaler().GenerateWebSession(sess)
if err != nil {
return nil, trace.Wrap(err)
}
return sess, nil
}
func (s *AuthServer) GenerateToken(roles teleport.Roles, ttl time.Duration) (string, error) {
for _, role := range roles {
if err := role.Check(); err != nil {
return "", trace.Wrap(err)
}
}
token, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return "", trace.Wrap(err)
}
if err := s.Provisioner.UpsertToken(token, roles, ttl); err != nil {
return "", err
}
return token, nil
}
// GenerateServerKeys generates new host private keys and certificates (signed
// by the host certificate authority) for a node.
func (s *AuthServer) GenerateServerKeys(hostID string, nodeName string, roles teleport.Roles) (*PackedKeys, error) {
// generate private key
k, pub, err := s.GenerateKeyPair("")
if err != nil {
return nil, trace.Wrap(err)
}
// generate host certificate with an infinite ttl
c, err := s.GenerateHostCert(pub, hostID, nodeName, s.DomainName, roles, 0)
if err != nil {
log.Warningf("[AUTH] Node %q [%v] can not join: certificate generation error: %v", nodeName, hostID, err)
return nil, trace.Wrap(err)
}
return &PackedKeys{
Key: k,
Cert: c,
}, nil
}
// ValidateToken takes a provisioning token value and finds if it's valid. Returns
// a list of roles this token allows its owner to assume, or an error if the token
// cannot be found
func (s *AuthServer) ValidateToken(token string) (roles teleport.Roles, e error) {
// look at static tokens first:
for _, st := range s.StaticTokens {
if st.Token == token {
return st.Roles, nil
}
}
// look at the tokens in the token storage
tok, err := s.Provisioner.GetToken(token)
if err != nil {
log.Info(err)
return nil, trace.Errorf("token not recognized")
}
return tok.Roles, nil
}
// enforceTokenTTL deletes the given token if it's TTL is over. Returns 'false'
// if this token cannot be used
func (s *AuthServer) checkTokenTTL(token string) bool {
// look at the tokens in the token storage
tok, err := s.Provisioner.GetToken(token)
if err != nil {
log.Warn(err)
return true
}
now := s.clock.Now().UTC()
if tok.Expires.Before(now) {
if err = s.DeleteToken(token); err != nil {
log.Error(err)
}
return false
}
return true
}
// RegisterUsingToken adds a new node to the Teleport cluster using previously issued token.
// A node must also request a specific role (and the role must match one of the roles
// the token was generated for).
//
// If a token was generated with a TTL, it gets enforced (can't register new nodes after TTL expires)
// If a token was generated with a TTL=0, it means it's a single-use token and it gets destroyed
// after a successful registration.
func (s *AuthServer) RegisterUsingToken(token, hostID string, nodeName string, role teleport.Role) (*PackedKeys, error) {
log.Infof("[AUTH] Node %q [%v] trying to join with role: %v", nodeName, hostID, role)
if hostID == "" {
return nil, trace.BadParameter("HostID cannot be empty")
}
if err := role.Check(); err != nil {
return nil, trace.Wrap(err)
}
// make sure the token is valid
roles, err := s.ValidateToken(token)
if err != nil {
msg := fmt.Sprintf("%q [%v] can not join the cluster with role %s, token error: %v", nodeName, hostID, role, err)
log.Warnf("[AUTH] %s", msg)
return nil, trace.AccessDenied(msg)
}
// make sure the caller is requested wthe role allowed by the token
if !roles.Include(role) {
msg := fmt.Sprintf("%q [%v] can not join the cluster, the token does not allow %q role", nodeName, hostID, role)
log.Warningf("[AUTH] %s", msg)
return nil, trace.BadParameter(msg)
}
if !s.checkTokenTTL(token) {
return nil, trace.AccessDenied("%q [%v] can not join the cluster. Token has expired", nodeName, hostID)
}
// generate and return host certificate and keys
keys, err := s.GenerateServerKeys(hostID, nodeName, teleport.Roles{role})
if err != nil {
return nil, trace.Wrap(err)
}
log.Infof("[AUTH] Node %q [%v] joined the cluster", nodeName, hostID)
return keys, nil
}
func (s *AuthServer) RegisterNewAuthServer(token string) error {
tok, err := s.Provisioner.GetToken(token)
if err != nil {
return trace.Wrap(err)
}
if !tok.Roles.Include(teleport.RoleAuth) {
return trace.AccessDenied("role does not match")
}
if err := s.DeleteToken(token); err != nil {
return trace.Wrap(err)
}
return nil
}
func (s *AuthServer) DeleteToken(token string) (err error) {
// is this a static token?
for _, st := range s.StaticTokens {
if st.Token == token {
return trace.BadParameter("token %s is statically configured and cannot be removed", token)
}
}
// delete user token:
if err = s.Identity.DeleteSignupToken(token); err == nil {
return nil
}
// delete node token:
if err = s.Provisioner.DeleteToken(token); err == nil {
return nil
}
return trace.Wrap(err)
}
// GetTokens returns all tokens (machine provisioning ones and user invitation tokens). Machine
// tokens usually have "node roles", like auth,proxy,node and user invitation tokens have 'signup' role
func (s *AuthServer) GetTokens() (tokens []services.ProvisionToken, err error) {
// get node tokens:
tokens, err = s.Provisioner.GetTokens()
if err != nil {
return nil, trace.Wrap(err)
}
// get static tokens:
tokens = append(tokens, s.StaticTokens...)
// get user tokens:
userTokens, err := s.Identity.GetSignupTokens()
if err != nil {
return nil, trace.Wrap(err)
}
// convert user tokens to machine tokens:
for _, t := range userTokens {
roles := teleport.Roles{teleport.RoleSignup}
tokens = append(tokens, services.ProvisionToken{
Token: t.Token,
Expires: t.Expires,
Roles: roles,
})
}
return tokens, nil
}
func (s *AuthServer) NewWebSession(userName string) (services.WebSession, error) {
token, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
bearerToken, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
priv, pub, err := s.GetNewKeyPairFromPool()
if err != nil {
return nil, err
}
ca, err := s.Trust.GetCertAuthority(services.CertAuthID{
Type: services.UserCA,
DomainName: s.DomainName,
}, true)
if err != nil {
return nil, trace.Wrap(err)
}
privateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
user, err := s.GetUser(userName)
if err != nil {
return nil, trace.Wrap(err)
}
var roles services.RoleSet
for _, roleName := range user.GetRoles() {
role, err := s.Access.GetRole(roleName)
if err != nil {
return nil, trace.Wrap(err)
}
roles = append(roles, role)
}
sessionTTL := roles.AdjustSessionTTL(defaults.CertDuration)
bearerTokenTTL := utils.MinTTL(sessionTTL, BearerTokenTTL)
allowedLogins, err := roles.CheckLogins(sessionTTL)
if err != nil {
return nil, trace.Wrap(err)
}
// cert TTL is set to bearer token TTL as we expect active session to renew
// the token every BearerTokenTTL period
cert, err := s.Authority.GenerateUserCert(privateKey, pub, user.GetName(), allowedLogins, bearerTokenTTL, roles.CanForwardAgents())
if err != nil {
return nil, trace.Wrap(err)
}
return services.NewWebSession(token, services.WebSessionSpecV2{
User: user.GetName(),
Priv: priv,
Pub: cert,
Expires: s.clock.Now().UTC().Add(sessionTTL),
BearerToken: bearerToken,
BearerTokenExpires: s.clock.Now().UTC().Add(bearerTokenTTL),
}), nil
}
func (s *AuthServer) UpsertWebSession(user string, sess services.WebSession) error {
return s.Identity.UpsertWebSession(user, sess.GetName(), sess)
}
func (s *AuthServer) GetWebSession(userName string, id string) (services.WebSession, error) {
return s.Identity.GetWebSession(userName, id)
}
func (s *AuthServer) GetWebSessionInfo(userName string, id string) (services.WebSession, error) {
sess, err := s.Identity.GetWebSession(userName, id)
if err != nil {
return nil, trace.Wrap(err)
}
return sess.WithoutSecrets(), nil
}
func (s *AuthServer) DeleteNamespace(namespace string) error {
if namespace == defaults.Namespace {
return trace.AccessDenied("can't delete default namespace")
}
nodes, err := s.Presence.GetNodes(namespace)
if err != nil {
return trace.Wrap(err)
}
if len(nodes) != 0 {
return trace.BadParameter("can't delete namespace %v that has %v registered nodes", namespace, len(nodes))
}
return s.Presence.DeleteNamespace(namespace)
}
func (s *AuthServer) DeleteWebSession(user string, id string) error {
return trace.Wrap(s.Identity.DeleteWebSession(user, id))
}
func (s *AuthServer) getOIDCClient(conn services.OIDCConnector) (*oidc.Client, error) {
s.lock.Lock()
defer s.lock.Unlock()
config := oidc.ClientConfig{
RedirectURL: conn.GetRedirectURL(),
Credentials: oidc.ClientCredentials{
ID: conn.GetClientID(),
Secret: conn.GetClientSecret(),
},
// open id notifies provider that we are using OIDC scopes
Scope: utils.Deduplicate(append([]string{"openid", "email"}, conn.GetScope()...)),
}
clientPack, ok := s.oidcClients[conn.GetName()]
if ok && oidcConfigsEqual(clientPack.config, config) {
return clientPack.client, nil
}
delete(s.oidcClients, conn.GetName())
client, err := oidc.NewClient(config)
if err != nil {
return nil, trace.Wrap(err)
}
client.SyncProviderConfig(conn.GetIssuerURL())
s.oidcClients[conn.GetName()] = &oidcClient{client: client, config: config}
return client, nil
}
func (s *AuthServer) UpsertOIDCConnector(connector services.OIDCConnector) error {
return s.Identity.UpsertOIDCConnector(connector)
}
func (s *AuthServer) DeleteOIDCConnector(connectorName string) error {
return s.Identity.DeleteOIDCConnector(connectorName)
}
func (s *AuthServer) CreateOIDCAuthRequest(req services.OIDCAuthRequest) (*services.OIDCAuthRequest, error) {
connector, err := s.Identity.GetOIDCConnector(req.ConnectorID, true)
if err != nil {
return nil, trace.Wrap(err)
}
oidcClient, err := s.getOIDCClient(connector)
if err != nil {
return nil, trace.Wrap(err)
}
token, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
req.StateToken = token
oauthClient, err := oidcClient.OAuthClient()
if err != nil {
return nil, trace.Wrap(err)
}
// online is OIDC online scope, "select_account" forces user to always select account
req.RedirectURL = oauthClient.AuthCodeURL(req.StateToken, "online", "select_account")
// if the connector has an Authentication Context Class Reference (ACR) value set,
// update redirect url and add it as a query value.
acrValue := connector.GetACR()
if acrValue != "" {
u, err := url.Parse(req.RedirectURL)
if err != nil {
return nil, trace.Wrap(err)
}
q := u.Query()
q.Set("acr_values", acrValue)
u.RawQuery = q.Encode()
req.RedirectURL = u.String()
}
log.Debugf("[OIDC] Redirect URL: %v", req.RedirectURL)
err = s.Identity.CreateOIDCAuthRequest(req, defaults.OIDCAuthRequestTTL)
if err != nil {
return nil, trace.Wrap(err)
}
return &req, nil
}
// OIDCAuthResponse is returned when auth server validated callback parameters
// returned from OIDC provider
type OIDCAuthResponse struct {
// Username is authenticated teleport username
Username string `json:"username"`
// Identity contains validated OIDC identity
Identity services.OIDCIdentity `json:"identity"`
// Web session will be generated by auth server if requested in OIDCAuthRequest
Session services.WebSession `json:"session,omitempty"`
// Cert will be generated by certificate authority
Cert []byte `json:"cert,omitempty"`
// Req is original oidc auth request
Req services.OIDCAuthRequest `json:"req"`
// HostSigners is a list of signing host public keys
// trusted by proxy, used in console login
HostSigners []services.CertAuthority `json:"host_signers"`
}
// buildRoles takes a connector and claims and returns a slice of roles. If the claims
// match a concrete roles in the connector, those roles are returned directly. If the
// claims match a template role in the connector, then that role is first created from
// the template, then returned.
func (a *AuthServer) buildRoles(connector services.OIDCConnector, ident *oidc.Identity, claims jose.Claims) ([]string, error) {
roles := connector.MapClaims(claims)
if len(roles) == 0 {
role, err := connector.RoleFromTemplate(claims)
if err != nil {
log.Warningf("[OIDC] Unable to map claims to roles or role templates for %q", connector.GetName())
return nil, trace.AccessDenied("unable to map claims to roles or role templates for %q", connector.GetName())
}
// figure out ttl for role. expires = now + ttl => ttl = expires - now
ttl := ident.ExpiresAt.Sub(a.clock.Now())
// upsert templated role
err = a.Access.UpsertRole(role, ttl)
if err != nil {
log.Warningf("[OIDC] Unable to upsert templated role for connector: %q", connector.GetName())
return nil, trace.AccessDenied("unable to upsert templated role: %q", connector.GetName())
}
roles = []string{role.GetName()}
}
return roles, nil
}
func (a *AuthServer) createOIDCUser(connector services.OIDCConnector, ident *oidc.Identity, claims jose.Claims) error {
roles, err := a.buildRoles(connector, ident, claims)
if err != nil {
return trace.Wrap(err)
}
log.Debugf("[IDENTITY] %v/%v is a dynamic identity, generating user with roles: %v", connector.GetName(), ident.Email, roles)
user, err := services.GetUserMarshaler().GenerateUser(&services.UserV2{
Kind: services.KindUser,
Version: services.V2,
Metadata: services.Metadata{
Name: ident.Email,
Namespace: defaults.Namespace,
},
Spec: services.UserSpecV2{
Roles: roles,
Expires: ident.ExpiresAt,
OIDCIdentities: []services.OIDCIdentity{{ConnectorID: connector.GetName(), Email: ident.Email}},
CreatedBy: services.CreatedBy{
User: services.UserRef{Name: "system"},
Time: time.Now().UTC(),
Connector: &services.ConnectorRef{
Type: teleport.ConnectorOIDC,
ID: connector.GetName(),
Identity: ident.Email,
},
},
},
})
if err != nil {
return trace.Wrap(err)
}
// check if a user exists already
existingUser, err := a.GetUser(ident.Email)
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
// check if exisiting user is a non-oidc user, if so, return an error
if existingUser != nil {
connectorRef := existingUser.GetCreatedBy().Connector
if connectorRef == nil || connectorRef.Type != teleport.ConnectorOIDC || connectorRef.ID != connector.GetName() {
return trace.AlreadyExists("user %q already exists and is not OIDC user", existingUser.GetName())
}
}
// no non-oidc user exists, create or update the exisiting oidc user
err = a.UpsertUser(user)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// claimsFromIDToken extracts claims from the ID token.
func claimsFromIDToken(oidcClient *oidc.Client, idToken string) (jose.Claims, error) {
jwt, err := jose.ParseJWT(idToken)
if err != nil {
return nil, trace.Wrap(err)
}
err = oidcClient.VerifyJWT(jwt)
if err != nil {
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] Extracting claims from ID token")
claims, err := jwt.Claims()
if err != nil {
return nil, trace.Wrap(err)
}
return claims, nil
}
// claimsFromUserInfo finds the UserInfo endpoint from the provider config and then extracts claims from it.
//
// Note: We don't request signed JWT responses for UserInfo, instead we force the provider config and
// the issuer to be HTTPS and leave integrity and confidentiality to TLS. Authenticity is taken care of
// during the token exchange.
func claimsFromUserInfo(oidcClient *oidc.Client, issuerURL string, accessToken string) (jose.Claims, error) {
err := isHTTPS(issuerURL)
if err != nil {
return nil, trace.Wrap(err)
}
oac, err := oidcClient.OAuthClient()
if err != nil {
return nil, trace.Wrap(err)
}
hc := oac.HttpClient()
// go get the provider config so we can find out where the UserInfo endpoint is
pc, err := oidc.FetchProviderConfig(oac.HttpClient(), issuerURL)
if err != nil {
return nil, trace.Wrap(err)
}
// If the provider doesn't offer a UserInfo endpoint don't err.
if pc.UserInfoEndpoint == nil {
return nil, nil
}
endpoint := pc.UserInfoEndpoint.String()
err = isHTTPS(endpoint)
if err != nil {
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] Fetching claims from UserInfo endpoint: %q", endpoint)
req, err := http.NewRequest("GET", endpoint, nil)
if err != nil {
return nil, trace.Wrap(err)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", accessToken))
resp, err := hc.Do(req)
if err != nil {
return nil, trace.Wrap(err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return nil, trace.AccessDenied("bad status code: %v", resp.StatusCode)
}
var claims jose.Claims
err = json.NewDecoder(resp.Body).Decode(&claims)
if err != nil {
return nil, trace.Wrap(err)
}
return claims, nil
}
// mergeClaims merges b into a.
func mergeClaims(a jose.Claims, b jose.Claims) (jose.Claims, error) {
for k, v := range b {
_, ok := a[k]
if !ok {
a[k] = v
}
}
return a, nil
}
// getClaims gets claims from ID token and UserInfo and returns UserInfo claims merged into ID token claims.
func (a *AuthServer) getClaims(oidcClient *oidc.Client, issuerURL string, code string) (jose.Claims, error) {
var err error
oac, err := oidcClient.OAuthClient()
if err != nil {
return nil, trace.Wrap(err)
}
t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code)
if err != nil {
return nil, trace.Wrap(err)
}
idTokenClaims, err := claimsFromIDToken(oidcClient, t.IDToken)
if err != nil {
log.Debugf("[OIDC] Unable to fetch ID token claims: %v", err)
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] ID Token claims: %v", idTokenClaims)
userInfoClaims, err := claimsFromUserInfo(oidcClient, issuerURL, t.AccessToken)
if err != nil {
log.Debugf("[OIDC] Unable to fetch UserInfo claims: %v", err)
return nil, trace.Wrap(err)
}
if userInfoClaims == nil {
log.Warn("[OIDC] Provider doesn't offer UserInfo endpoint. Only token claims will be used.")
return idTokenClaims, nil
}
log.Debugf("[OIDC] UserInfo claims: %v", userInfoClaims)
// make sure that the subject in the userinfo claim matches the subject in
// the id token otherwise there is the possibility of a token substitution attack.
// see section 16.11 of the oidc spec for more details.
var idsub string
var uisub string
var exists bool
if idsub, exists, err = idTokenClaims.StringClaim("sub"); err != nil || !exists {
log.Debugf("[OIDC] unable to extract sub from ID token")
return nil, trace.Wrap(err)
}
if uisub, exists, err = userInfoClaims.StringClaim("sub"); err != nil || !exists {
log.Debugf("[OIDC] unable to extract sub from UserInfo")
return nil, trace.Wrap(err)
}
if idsub != uisub {
log.Debugf("[OIDC] Claim subjects don't match %q != %q", idsub, uisub)
return nil, trace.BadParameter("invalid subject in UserInfo")
}
claims, err := mergeClaims(idTokenClaims, userInfoClaims)
if err != nil {
log.Debugf("[OIDC] Unable to merge claims: %v", err)
return nil, trace.Wrap(err)
}
return claims, nil
}
// validateACRValues validates that we get an appropriate response for acr values. By default
// we expect the same value we send, but this function also handles Identity Provider specific
// forms of validation.
func (a *AuthServer) validateACRValues(acrValue string, identityProvider string, claims jose.Claims) error {
switch identityProvider {
case teleport.NetIQ:
log.Debugf("[OIDC] Validating ACR values with %q rules", identityProvider)
tokenAcr, ok := claims["acr"]
if !ok {
return trace.BadParameter("acr claim does not exist")
}
tokenAcrMap, ok := tokenAcr.(map[string][]string)
if !ok {
return trace.BadParameter("acr unknown type: %T", tokenAcr)
}
tokenAcrValues, ok := tokenAcrMap["values"]
if !ok {
return trace.BadParameter("acr.values not found in claims")
}
acrValueMatched := false
for _, v := range tokenAcrValues {
if acrValue == v {
acrValueMatched = true
break
}
}
if !acrValueMatched {
log.Debugf("[OIDC] No ACR match found for %q in %q", acrValue, tokenAcrValues)
return trace.BadParameter("acr claim does not match")
}
default:
log.Debugf("[OIDC] Validating ACR values with default rules")
claimValue, exists, err := claims.StringClaim("acr")
if !exists {
return trace.BadParameter("acr claim does not exist")
}
if err != nil {
return trace.Wrap(err)
}
if claimValue != acrValue {
log.Debugf("[OIDC] No ACR match found %q != %q", acrValue, claimValue)
return trace.BadParameter("acr claim does not match")
}
}
return nil
}
// ValidateOIDCAuthCallback is called by the proxy to check OIDC query parameters
// returned by OIDC Provider, if everything checks out, auth server
// will respond with OIDCAuthResponse, otherwise it will return error
func (a *AuthServer) ValidateOIDCAuthCallback(q url.Values) (*OIDCAuthResponse, error) {
if error := q.Get("error"); error != "" {
return nil, trace.OAuth2(oauth2.ErrorInvalidRequest, error, q)
}
code := q.Get("code")
if code == "" {
return nil, trace.OAuth2(
oauth2.ErrorInvalidRequest, "code query param must be set", q)
}
stateToken := q.Get("state")
if stateToken == "" {
return nil, trace.OAuth2(
oauth2.ErrorInvalidRequest, "missing state query param", q)
}
req, err := a.Identity.GetOIDCAuthRequest(stateToken)
if err != nil {
return nil, trace.Wrap(err)
}
connector, err := a.Identity.GetOIDCConnector(req.ConnectorID, true)
if err != nil {
return nil, trace.Wrap(err)
}
oidcClient, err := a.getOIDCClient(connector)
if err != nil {
return nil, trace.Wrap(err)
}
// extract claims from both the id token and the userinfo endpoint and merge them
claims, err := a.getClaims(oidcClient, connector.GetIssuerURL(), code)
if err != nil {
return nil, trace.OAuth2(
oauth2.ErrorUnsupportedResponseType, "unable to construct claims", q)
}
log.Debugf("[OIDC] Claims: %v", claims)
// if we are sending acr values, make sure we also validate them
acrValue := connector.GetACR()
if acrValue != "" {
err := a.validateACRValues(acrValue, connector.GetProvider(), claims)
if err != nil {
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] ACR values %q successfully validated", acrValue)
}
ident, err := oidc.IdentityFromClaims(claims)
if err != nil {
return nil, trace.OAuth2(
oauth2.ErrorUnsupportedResponseType, "unable to convert claims to identity", q)
}
log.Debugf("[IDENTITY] %q expires at: %v", ident.Email, ident.ExpiresAt)
response := &OIDCAuthResponse{
Identity: services.OIDCIdentity{ConnectorID: connector.GetName(), Email: ident.Email},
Req: *req,
}
log.Debugf("[OIDC] Applying %v claims to roles mappings", len(connector.GetClaimsToRoles()))
if len(connector.GetClaimsToRoles()) != 0 {
if err := a.createOIDCUser(connector, ident, claims); err != nil {
return nil, trace.Wrap(err)
}
}
if !req.CheckUser {
return response, nil
}
user, err := a.Identity.GetUserByOIDCIdentity(services.OIDCIdentity{
ConnectorID: req.ConnectorID, Email: ident.Email})
if err != nil {
return nil, trace.Wrap(err)
}
response.Username = user.GetName()
var roles services.RoleSet
roles, err = services.FetchRoles(user.GetRoles(), a.Access)
if err != nil {
return nil, trace.Wrap(err)
}
sessionTTL := roles.AdjustSessionTTL(utils.ToTTL(a.clock, ident.ExpiresAt))
bearerTokenTTL := utils.MinTTL(BearerTokenTTL, sessionTTL)
if req.CreateWebSession {
sess, err := a.NewWebSession(user.GetName())
if err != nil {
return nil, trace.Wrap(err)
}
// session will expire based on identity TTL and allowed session TTL
sess.SetExpiryTime(a.clock.Now().UTC().Add(sessionTTL))
// bearer token will expire based on the expected session renewal
sess.SetBearerTokenExpiryTime(a.clock.Now().UTC().Add(bearerTokenTTL))
if err := a.UpsertWebSession(user.GetName(), sess); err != nil {
return nil, trace.Wrap(err)
}
response.Session = sess
}
if len(req.PublicKey) != 0 {
certTTL := utils.MinTTL(utils.ToTTL(a.clock, ident.ExpiresAt), req.CertTTL)
allowedLogins, err := roles.CheckLogins(certTTL)
if err != nil {
return nil, trace.Wrap(err)
}
cert, err := a.GenerateUserCert(req.PublicKey, user.GetName(), allowedLogins, certTTL, roles.CanForwardAgents())
if err != nil {
return nil, trace.Wrap(err)
}
response.Cert = cert
authorities, err := a.GetCertAuthorities(services.HostCA, false)
if err != nil {
return nil, trace.Wrap(err)
}
for _, authority := range authorities {
response.HostSigners = append(response.HostSigners, authority)
}
}
return response, nil
}
func (a *AuthServer) DeleteRole(name string) error {
// check if this role is used by CA or Users
users, err := a.Identity.GetUsers()
if err != nil {
return trace.Wrap(err)
}
for _, u := range users {
for _, r := range u.GetRoles() {
if r == name {
return trace.BadParameter("role %v is used by user %v", name, u.GetName())
}
}
}
// check if it's used by some external cert authorities, e.g.
// cert authorities related to external cluster
cas, err := a.Trust.GetCertAuthorities(services.UserCA, false)
if err != nil {
return trace.Wrap(err)
}
for _, a := range cas {
for _, r := range a.GetRoles() {
if r == name {
return trace.BadParameter("role %v is used by user cert authority %v", name, a.GetClusterName())
}
}
}
return a.Access.DeleteRole(name)
}
const (
// BearerTokenTTL specifies standard bearer token to exist before
// it has to be renewed by the client
BearerTokenTTL = 10 * time.Minute
// TokenLenBytes is len in bytes of the invite token
TokenLenBytes = 16
)
// oidcClient is internal structure that stores client and it's config
type oidcClient struct {
client *oidc.Client
config oidc.ClientConfig
}
// oidcConfigsEqual is a struct that helps us to verify that
// two oidc configs are equal
func oidcConfigsEqual(a, b oidc.ClientConfig) bool {
if a.RedirectURL != b.RedirectURL {
return false
}
if a.Credentials.ID != b.Credentials.ID {
return false
}
if a.Credentials.Secret != b.Credentials.Secret {
return false
}
if len(a.Scope) != len(b.Scope) {
return false
}
for i := range a.Scope {
if a.Scope[i] != b.Scope[i] {
return false
}
}
return true
}
// isHTTPS checks if the scheme for a URL is https or not.
func isHTTPS(u string) error {
earl, err := url.Parse(u)
if err != nil {
return trace.Wrap(err)
}
if earl.Scheme != "https" {
return trace.BadParameter("expected scheme https, got %q", earl.Scheme)
}
return nil
}
Use trace.NotFound when returning an error.
/*
Copyright 2015 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package auth implements certificate signing authority and access control server
// Authority server is composed of several parts:
//
// * Authority server itself that implements signing and acl logic
// * HTTP server wrapper for authority server
// * HTTP client wrapper
//
package auth
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"sync"
"time"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/services/local"
"github.com/gravitational/teleport/lib/utils"
log "github.com/Sirupsen/logrus"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/oauth2"
"github.com/coreos/go-oidc/oidc"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
"github.com/tstranex/u2f"
)
// Authority implements minimal key-management facility for generating OpenSSH
// compatible public/private key pairs and OpenSSH certificates
type Authority interface {
// GenerateKeyPair generates new keypair
GenerateKeyPair(passphrase string) (privKey []byte, pubKey []byte, err error)
// GetNewKeyPairFromPool returns new keypair from pre-generated in memory pool
GetNewKeyPairFromPool() (privKey []byte, pubKey []byte, err error)
// GenerateHostCert takes the private key of the CA, public key of the new host,
// along with metadata (host ID, node name, cluster name, roles, and ttl) and generates
// a host certificate.
GenerateHostCert(certParams services.CertParams) ([]byte, error)
// GenerateUserCert generates user certificate, it takes pkey as a signing
// private key (user certificate authority)
GenerateUserCert(pkey, key []byte, teleportUsername string, allowedLogins []string, ttl time.Duration, permitAgentForwarding bool) ([]byte, error)
}
// AuthServerOption allows setting options as functional arguments to AuthServer
type AuthServerOption func(*AuthServer)
// NewAuthServer creates and configures a new AuthServer instance
func NewAuthServer(cfg *InitConfig, opts ...AuthServerOption) *AuthServer {
if cfg.Trust == nil {
cfg.Trust = local.NewCAService(cfg.Backend)
}
if cfg.Presence == nil {
cfg.Presence = local.NewPresenceService(cfg.Backend)
}
if cfg.Provisioner == nil {
cfg.Provisioner = local.NewProvisioningService(cfg.Backend)
}
if cfg.Identity == nil {
cfg.Identity = local.NewIdentityService(cfg.Backend)
}
if cfg.Access == nil {
cfg.Access = local.NewAccessService(cfg.Backend)
}
if cfg.ClusterAuthPreferenceService == nil {
cfg.ClusterAuthPreferenceService = local.NewClusterAuthPreferenceService(cfg.Backend)
}
if cfg.UniversalSecondFactorService == nil {
cfg.UniversalSecondFactorService = local.NewUniversalSecondFactorService(cfg.Backend)
}
as := AuthServer{
bk: cfg.Backend,
Authority: cfg.Authority,
Trust: cfg.Trust,
Presence: cfg.Presence,
Provisioner: cfg.Provisioner,
Identity: cfg.Identity,
Access: cfg.Access,
DomainName: cfg.DomainName,
AuthServiceName: cfg.AuthServiceName,
StaticTokens: cfg.StaticTokens,
ClusterAuthPreference: cfg.ClusterAuthPreferenceService,
UniversalSecondFactorSettings: cfg.UniversalSecondFactorService,
oidcClients: make(map[string]*oidcClient),
DeveloperMode: cfg.DeveloperMode,
}
for _, o := range opts {
o(&as)
}
if as.clock == nil {
as.clock = clockwork.NewRealClock()
}
return &as
}
// AuthServer keeps the cluster together. It acts as a certificate authority (CA) for
// a cluster and:
// - generates the keypair for the node it's running on
// - invites other SSH nodes to a cluster, by issuing invite tokens
// - adds other SSH nodes to a cluster, by checking their token and signing their keys
// - same for users and their sessions
// - checks public keys to see if they're signed by it (can be trusted or not)
type AuthServer struct {
lock sync.Mutex
oidcClients map[string]*oidcClient
clock clockwork.Clock
bk backend.Backend
// DeveloperMode should only be used during development as it does several
// unsafe things like log sensitive information to console as well as
// not verify certificates.
DeveloperMode bool
Authority
// DomainName stores the FQDN of the signing CA (its certificate will have this
// name embedded). It is usually set to the GUID of the host the Auth service runs on
DomainName string
// AuthServiceName is a human-readable name of this CA. If several Auth services are running
// (managing multiple teleport clusters) this field is used to tell them apart in UIs
// It usually defaults to the hostname of the machine the Auth service runs on.
AuthServiceName string
// StaticTokens are pre-defined host provisioning tokens supplied via config file for
// environments where paranoid security is not needed
StaticTokens []services.ProvisionToken
services.Trust
services.Presence
services.Provisioner
services.Identity
services.Access
services.ClusterAuthPreference
services.UniversalSecondFactorSettings
}
func (a *AuthServer) Close() error {
if a.bk != nil {
return trace.Wrap(a.bk.Close())
}
return nil
}
// GetDomainName returns the domain name that identifies this authority server.
// Also known as "cluster name"
func (a *AuthServer) GetDomainName() (string, error) {
return a.DomainName, nil
}
// GenerateHostCert uses the private key of the CA to sign the public key of the host
// (along with meta data like host ID, node name, roles, and ttl) to generate a host certificate.
func (s *AuthServer) GenerateHostCert(hostPublicKey []byte, hostID, nodeName, clusterName string, roles teleport.Roles, ttl time.Duration) ([]byte, error) {
// get the certificate authority that will be signing the public key of the host
ca, err := s.Trust.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: s.DomainName,
}, true)
if err != nil {
return nil, trace.BadParameter("failed to load host CA for '%s': %v", s.DomainName, err)
}
// get the private key of the certificate authority
caPrivateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
// create and sign!
return s.Authority.GenerateHostCert(services.CertParams{
PrivateCASigningKey: caPrivateKey,
PublicHostKey: hostPublicKey,
HostID: hostID,
NodeName: nodeName,
ClusterName: clusterName,
Roles: roles,
TTL: ttl,
})
}
// GenerateUserCert generates user certificate, it takes pkey as a signing
// private key (user certificate authority)
func (s *AuthServer) GenerateUserCert(key []byte, username string, allowedLogins []string, ttl time.Duration, canForwardAgents bool) ([]byte, error) {
ca, err := s.Trust.GetCertAuthority(services.CertAuthID{
Type: services.UserCA,
DomainName: s.DomainName,
}, true)
if err != nil {
return nil, trace.Wrap(err)
}
privateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
return s.Authority.GenerateUserCert(privateKey, key, username, allowedLogins, ttl, canForwardAgents)
}
// withUserLock executes function authenticateFn that perorms user authenticaton
// if authenticateFn returns non nil error, the login attempt will be logged in as failed.
// The only exception to this rule is ConnectionProblemError, in case if it occurs
// access will be denied, but login attempt will not be recorded
// this is done to avoid potential user lockouts due to backend failures
// In case if user exceeds defaults.MaxLoginAttempts
// the user account will be locked for defaults.AccountLockInterval
func (s *AuthServer) withUserLock(username string, authenticateFn func() error) error {
user, err := s.Identity.GetUser(username)
if err != nil {
return trace.Wrap(err)
}
status := user.GetStatus()
if status.IsLocked && status.LockExpires.After(s.clock.Now().UTC()) {
return trace.AccessDenied("user %v is locked until %v", utils.HumanTimeFormat(status.LockExpires))
}
fnErr := authenticateFn()
if fnErr == nil {
return nil
}
// do not lock user in case if DB is flaky or down
if trace.IsConnectionProblem(err) {
return trace.Wrap(fnErr)
}
// log failed attempt and possibly lock user
attempt := services.LoginAttempt{Time: s.clock.Now().UTC(), Success: false}
err = s.AddUserLoginAttempt(username, attempt, defaults.AttemptTTL)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
loginAttempts, err := s.Identity.GetUserLoginAttempts(username)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
if !services.LastFailed(defaults.MaxLoginAttempts, loginAttempts) {
log.Debugf("%v user has less than %v failed login attempts", username, defaults.MaxLoginAttempts)
return trace.Wrap(fnErr)
}
lockUntil := s.clock.Now().UTC().Add(defaults.AccountLockInterval)
message := fmt.Sprintf("%v exceeds %v failed login attempts, locked until %v",
username, defaults.MaxLoginAttempts, utils.HumanTimeFormat(status.LockExpires))
log.Debug(message)
user.SetLocked(lockUntil, "user has exceeded maximum failed login attempts")
err = s.Identity.UpsertUser(user)
if err != nil {
log.Error(trace.DebugReport(err))
return trace.Wrap(fnErr)
}
return trace.AccessDenied(message)
}
func (s *AuthServer) SignIn(user string, password []byte) (services.WebSession, error) {
err := s.withUserLock(user, func() error {
return s.CheckPasswordWOToken(user, password)
})
if err != nil {
return nil, trace.Wrap(err)
}
return s.PreAuthenticatedSignIn(user)
}
// PreAuthenticatedSignIn is for 2-way authentication methods like U2F where the password is
// already checked before issueing the second factor challenge
func (s *AuthServer) PreAuthenticatedSignIn(user string) (services.WebSession, error) {
sess, err := s.NewWebSession(user)
if err != nil {
return nil, trace.Wrap(err)
}
if err := s.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
return sess.WithoutSecrets(), nil
}
func (s *AuthServer) U2FSignRequest(user string, password []byte) (*u2f.SignRequest, error) {
universalSecondFactor, err := s.GetUniversalSecondFactor()
if err != nil {
return nil, trace.Wrap(err)
}
err = s.withUserLock(user, func() error {
return s.CheckPasswordWOToken(user, password)
})
if err != nil {
return nil, trace.Wrap(err)
}
registration, err := s.GetU2FRegistration(user)
if err != nil {
return nil, trace.Wrap(err)
}
challenge, err := u2f.NewChallenge(universalSecondFactor.GetAppID(), universalSecondFactor.GetFacets())
if err != nil {
return nil, trace.Wrap(err)
}
err = s.UpsertU2FSignChallenge(user, challenge)
if err != nil {
return nil, trace.Wrap(err)
}
u2fSignReq := challenge.SignRequest(*registration)
return u2fSignReq, nil
}
func (s *AuthServer) CheckU2FSignResponse(user string, response *u2f.SignResponse) error {
// before trying to register a user, see U2F is actually setup on the backend
_, err := s.GetUniversalSecondFactor()
if err != nil {
return trace.Wrap(err)
}
reg, err := s.GetU2FRegistration(user)
if err != nil {
return trace.Wrap(err)
}
counter, err := s.GetU2FRegistrationCounter(user)
if err != nil {
return trace.Wrap(err)
}
challenge, err := s.GetU2FSignChallenge(user)
if err != nil {
return trace.Wrap(err)
}
newCounter, err := reg.Authenticate(*response, *challenge, counter)
if err != nil {
return trace.Wrap(err)
}
err = s.UpsertU2FRegistrationCounter(user, newCounter)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// ExtendWebSession creates a new web session for a user based on a valid previous sessionID,
// method is used to renew the web session for a user
func (s *AuthServer) ExtendWebSession(user string, prevSessionID string) (services.WebSession, error) {
prevSession, err := s.GetWebSession(user, prevSessionID)
if err != nil {
return nil, trace.Wrap(err)
}
// consider absolute expiry time that may be set for this session
// by some external identity serivce, so we can not renew this session
// any more without extra logic for renewal with external OIDC provider
expiresAt := prevSession.GetExpiryTime()
if !expiresAt.IsZero() && expiresAt.Before(s.clock.Now().UTC()) {
return nil, trace.NotFound("web session has expired")
}
sess, err := s.NewWebSession(user)
if err != nil {
return nil, trace.Wrap(err)
}
sess.SetExpiryTime(expiresAt)
bearerTokenTTL := utils.MinTTL(utils.ToTTL(s.clock, expiresAt), BearerTokenTTL)
sess.SetBearerTokenExpiryTime(s.clock.Now().UTC().Add(bearerTokenTTL))
if err := s.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
sess, err = services.GetWebSessionMarshaler().ExtendWebSession(sess)
if err != nil {
return nil, trace.Wrap(err)
}
return sess, nil
}
// CreateWebSession creates a new web session for user without any
// checks, is used by admins
func (s *AuthServer) CreateWebSession(user string) (services.WebSession, error) {
sess, err := s.NewWebSession(user)
if err != nil {
return nil, trace.Wrap(err)
}
if err := s.UpsertWebSession(user, sess); err != nil {
return nil, trace.Wrap(err)
}
sess, err = services.GetWebSessionMarshaler().GenerateWebSession(sess)
if err != nil {
return nil, trace.Wrap(err)
}
return sess, nil
}
func (s *AuthServer) GenerateToken(roles teleport.Roles, ttl time.Duration) (string, error) {
for _, role := range roles {
if err := role.Check(); err != nil {
return "", trace.Wrap(err)
}
}
token, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return "", trace.Wrap(err)
}
if err := s.Provisioner.UpsertToken(token, roles, ttl); err != nil {
return "", err
}
return token, nil
}
// GenerateServerKeys generates new host private keys and certificates (signed
// by the host certificate authority) for a node.
func (s *AuthServer) GenerateServerKeys(hostID string, nodeName string, roles teleport.Roles) (*PackedKeys, error) {
// generate private key
k, pub, err := s.GenerateKeyPair("")
if err != nil {
return nil, trace.Wrap(err)
}
// generate host certificate with an infinite ttl
c, err := s.GenerateHostCert(pub, hostID, nodeName, s.DomainName, roles, 0)
if err != nil {
log.Warningf("[AUTH] Node %q [%v] can not join: certificate generation error: %v", nodeName, hostID, err)
return nil, trace.Wrap(err)
}
return &PackedKeys{
Key: k,
Cert: c,
}, nil
}
// ValidateToken takes a provisioning token value and finds if it's valid. Returns
// a list of roles this token allows its owner to assume, or an error if the token
// cannot be found
func (s *AuthServer) ValidateToken(token string) (roles teleport.Roles, e error) {
// look at static tokens first:
for _, st := range s.StaticTokens {
if st.Token == token {
return st.Roles, nil
}
}
// look at the tokens in the token storage
tok, err := s.Provisioner.GetToken(token)
if err != nil {
log.Info(err)
return nil, trace.Errorf("token not recognized")
}
return tok.Roles, nil
}
// enforceTokenTTL deletes the given token if it's TTL is over. Returns 'false'
// if this token cannot be used
func (s *AuthServer) checkTokenTTL(token string) bool {
// look at the tokens in the token storage
tok, err := s.Provisioner.GetToken(token)
if err != nil {
log.Warn(err)
return true
}
now := s.clock.Now().UTC()
if tok.Expires.Before(now) {
if err = s.DeleteToken(token); err != nil {
log.Error(err)
}
return false
}
return true
}
// RegisterUsingToken adds a new node to the Teleport cluster using previously issued token.
// A node must also request a specific role (and the role must match one of the roles
// the token was generated for).
//
// If a token was generated with a TTL, it gets enforced (can't register new nodes after TTL expires)
// If a token was generated with a TTL=0, it means it's a single-use token and it gets destroyed
// after a successful registration.
func (s *AuthServer) RegisterUsingToken(token, hostID string, nodeName string, role teleport.Role) (*PackedKeys, error) {
log.Infof("[AUTH] Node %q [%v] trying to join with role: %v", nodeName, hostID, role)
if hostID == "" {
return nil, trace.BadParameter("HostID cannot be empty")
}
if err := role.Check(); err != nil {
return nil, trace.Wrap(err)
}
// make sure the token is valid
roles, err := s.ValidateToken(token)
if err != nil {
msg := fmt.Sprintf("%q [%v] can not join the cluster with role %s, token error: %v", nodeName, hostID, role, err)
log.Warnf("[AUTH] %s", msg)
return nil, trace.AccessDenied(msg)
}
// make sure the caller is requested wthe role allowed by the token
if !roles.Include(role) {
msg := fmt.Sprintf("%q [%v] can not join the cluster, the token does not allow %q role", nodeName, hostID, role)
log.Warningf("[AUTH] %s", msg)
return nil, trace.BadParameter(msg)
}
if !s.checkTokenTTL(token) {
return nil, trace.AccessDenied("%q [%v] can not join the cluster. Token has expired", nodeName, hostID)
}
// generate and return host certificate and keys
keys, err := s.GenerateServerKeys(hostID, nodeName, teleport.Roles{role})
if err != nil {
return nil, trace.Wrap(err)
}
log.Infof("[AUTH] Node %q [%v] joined the cluster", nodeName, hostID)
return keys, nil
}
func (s *AuthServer) RegisterNewAuthServer(token string) error {
tok, err := s.Provisioner.GetToken(token)
if err != nil {
return trace.Wrap(err)
}
if !tok.Roles.Include(teleport.RoleAuth) {
return trace.AccessDenied("role does not match")
}
if err := s.DeleteToken(token); err != nil {
return trace.Wrap(err)
}
return nil
}
func (s *AuthServer) DeleteToken(token string) (err error) {
// is this a static token?
for _, st := range s.StaticTokens {
if st.Token == token {
return trace.BadParameter("token %s is statically configured and cannot be removed", token)
}
}
// delete user token:
if err = s.Identity.DeleteSignupToken(token); err == nil {
return nil
}
// delete node token:
if err = s.Provisioner.DeleteToken(token); err == nil {
return nil
}
return trace.Wrap(err)
}
// GetTokens returns all tokens (machine provisioning ones and user invitation tokens). Machine
// tokens usually have "node roles", like auth,proxy,node and user invitation tokens have 'signup' role
func (s *AuthServer) GetTokens() (tokens []services.ProvisionToken, err error) {
// get node tokens:
tokens, err = s.Provisioner.GetTokens()
if err != nil {
return nil, trace.Wrap(err)
}
// get static tokens:
tokens = append(tokens, s.StaticTokens...)
// get user tokens:
userTokens, err := s.Identity.GetSignupTokens()
if err != nil {
return nil, trace.Wrap(err)
}
// convert user tokens to machine tokens:
for _, t := range userTokens {
roles := teleport.Roles{teleport.RoleSignup}
tokens = append(tokens, services.ProvisionToken{
Token: t.Token,
Expires: t.Expires,
Roles: roles,
})
}
return tokens, nil
}
func (s *AuthServer) NewWebSession(userName string) (services.WebSession, error) {
token, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
bearerToken, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
priv, pub, err := s.GetNewKeyPairFromPool()
if err != nil {
return nil, err
}
ca, err := s.Trust.GetCertAuthority(services.CertAuthID{
Type: services.UserCA,
DomainName: s.DomainName,
}, true)
if err != nil {
return nil, trace.Wrap(err)
}
privateKey, err := ca.FirstSigningKey()
if err != nil {
return nil, trace.Wrap(err)
}
user, err := s.GetUser(userName)
if err != nil {
return nil, trace.Wrap(err)
}
var roles services.RoleSet
for _, roleName := range user.GetRoles() {
role, err := s.Access.GetRole(roleName)
if err != nil {
return nil, trace.Wrap(err)
}
roles = append(roles, role)
}
sessionTTL := roles.AdjustSessionTTL(defaults.CertDuration)
bearerTokenTTL := utils.MinTTL(sessionTTL, BearerTokenTTL)
allowedLogins, err := roles.CheckLogins(sessionTTL)
if err != nil {
return nil, trace.Wrap(err)
}
// cert TTL is set to bearer token TTL as we expect active session to renew
// the token every BearerTokenTTL period
cert, err := s.Authority.GenerateUserCert(privateKey, pub, user.GetName(), allowedLogins, bearerTokenTTL, roles.CanForwardAgents())
if err != nil {
return nil, trace.Wrap(err)
}
return services.NewWebSession(token, services.WebSessionSpecV2{
User: user.GetName(),
Priv: priv,
Pub: cert,
Expires: s.clock.Now().UTC().Add(sessionTTL),
BearerToken: bearerToken,
BearerTokenExpires: s.clock.Now().UTC().Add(bearerTokenTTL),
}), nil
}
func (s *AuthServer) UpsertWebSession(user string, sess services.WebSession) error {
return s.Identity.UpsertWebSession(user, sess.GetName(), sess)
}
func (s *AuthServer) GetWebSession(userName string, id string) (services.WebSession, error) {
return s.Identity.GetWebSession(userName, id)
}
func (s *AuthServer) GetWebSessionInfo(userName string, id string) (services.WebSession, error) {
sess, err := s.Identity.GetWebSession(userName, id)
if err != nil {
return nil, trace.Wrap(err)
}
return sess.WithoutSecrets(), nil
}
func (s *AuthServer) DeleteNamespace(namespace string) error {
if namespace == defaults.Namespace {
return trace.AccessDenied("can't delete default namespace")
}
nodes, err := s.Presence.GetNodes(namespace)
if err != nil {
return trace.Wrap(err)
}
if len(nodes) != 0 {
return trace.BadParameter("can't delete namespace %v that has %v registered nodes", namespace, len(nodes))
}
return s.Presence.DeleteNamespace(namespace)
}
func (s *AuthServer) DeleteWebSession(user string, id string) error {
return trace.Wrap(s.Identity.DeleteWebSession(user, id))
}
func (s *AuthServer) getOIDCClient(conn services.OIDCConnector) (*oidc.Client, error) {
s.lock.Lock()
defer s.lock.Unlock()
config := oidc.ClientConfig{
RedirectURL: conn.GetRedirectURL(),
Credentials: oidc.ClientCredentials{
ID: conn.GetClientID(),
Secret: conn.GetClientSecret(),
},
// open id notifies provider that we are using OIDC scopes
Scope: utils.Deduplicate(append([]string{"openid", "email"}, conn.GetScope()...)),
}
clientPack, ok := s.oidcClients[conn.GetName()]
if ok && oidcConfigsEqual(clientPack.config, config) {
return clientPack.client, nil
}
delete(s.oidcClients, conn.GetName())
client, err := oidc.NewClient(config)
if err != nil {
return nil, trace.Wrap(err)
}
client.SyncProviderConfig(conn.GetIssuerURL())
s.oidcClients[conn.GetName()] = &oidcClient{client: client, config: config}
return client, nil
}
func (s *AuthServer) UpsertOIDCConnector(connector services.OIDCConnector) error {
return s.Identity.UpsertOIDCConnector(connector)
}
func (s *AuthServer) DeleteOIDCConnector(connectorName string) error {
return s.Identity.DeleteOIDCConnector(connectorName)
}
func (s *AuthServer) CreateOIDCAuthRequest(req services.OIDCAuthRequest) (*services.OIDCAuthRequest, error) {
connector, err := s.Identity.GetOIDCConnector(req.ConnectorID, true)
if err != nil {
return nil, trace.Wrap(err)
}
oidcClient, err := s.getOIDCClient(connector)
if err != nil {
return nil, trace.Wrap(err)
}
token, err := utils.CryptoRandomHex(TokenLenBytes)
if err != nil {
return nil, trace.Wrap(err)
}
req.StateToken = token
oauthClient, err := oidcClient.OAuthClient()
if err != nil {
return nil, trace.Wrap(err)
}
// online is OIDC online scope, "select_account" forces user to always select account
req.RedirectURL = oauthClient.AuthCodeURL(req.StateToken, "online", "select_account")
// if the connector has an Authentication Context Class Reference (ACR) value set,
// update redirect url and add it as a query value.
acrValue := connector.GetACR()
if acrValue != "" {
u, err := url.Parse(req.RedirectURL)
if err != nil {
return nil, trace.Wrap(err)
}
q := u.Query()
q.Set("acr_values", acrValue)
u.RawQuery = q.Encode()
req.RedirectURL = u.String()
}
log.Debugf("[OIDC] Redirect URL: %v", req.RedirectURL)
err = s.Identity.CreateOIDCAuthRequest(req, defaults.OIDCAuthRequestTTL)
if err != nil {
return nil, trace.Wrap(err)
}
return &req, nil
}
// OIDCAuthResponse is returned when auth server validated callback parameters
// returned from OIDC provider
type OIDCAuthResponse struct {
// Username is authenticated teleport username
Username string `json:"username"`
// Identity contains validated OIDC identity
Identity services.OIDCIdentity `json:"identity"`
// Web session will be generated by auth server if requested in OIDCAuthRequest
Session services.WebSession `json:"session,omitempty"`
// Cert will be generated by certificate authority
Cert []byte `json:"cert,omitempty"`
// Req is original oidc auth request
Req services.OIDCAuthRequest `json:"req"`
// HostSigners is a list of signing host public keys
// trusted by proxy, used in console login
HostSigners []services.CertAuthority `json:"host_signers"`
}
// buildRoles takes a connector and claims and returns a slice of roles. If the claims
// match a concrete roles in the connector, those roles are returned directly. If the
// claims match a template role in the connector, then that role is first created from
// the template, then returned.
func (a *AuthServer) buildRoles(connector services.OIDCConnector, ident *oidc.Identity, claims jose.Claims) ([]string, error) {
roles := connector.MapClaims(claims)
if len(roles) == 0 {
role, err := connector.RoleFromTemplate(claims)
if err != nil {
log.Warningf("[OIDC] Unable to map claims to roles or role templates for %q", connector.GetName())
return nil, trace.AccessDenied("unable to map claims to roles or role templates for %q", connector.GetName())
}
// figure out ttl for role. expires = now + ttl => ttl = expires - now
ttl := ident.ExpiresAt.Sub(a.clock.Now())
// upsert templated role
err = a.Access.UpsertRole(role, ttl)
if err != nil {
log.Warningf("[OIDC] Unable to upsert templated role for connector: %q", connector.GetName())
return nil, trace.AccessDenied("unable to upsert templated role: %q", connector.GetName())
}
roles = []string{role.GetName()}
}
return roles, nil
}
func (a *AuthServer) createOIDCUser(connector services.OIDCConnector, ident *oidc.Identity, claims jose.Claims) error {
roles, err := a.buildRoles(connector, ident, claims)
if err != nil {
return trace.Wrap(err)
}
log.Debugf("[IDENTITY] %v/%v is a dynamic identity, generating user with roles: %v", connector.GetName(), ident.Email, roles)
user, err := services.GetUserMarshaler().GenerateUser(&services.UserV2{
Kind: services.KindUser,
Version: services.V2,
Metadata: services.Metadata{
Name: ident.Email,
Namespace: defaults.Namespace,
},
Spec: services.UserSpecV2{
Roles: roles,
Expires: ident.ExpiresAt,
OIDCIdentities: []services.OIDCIdentity{{ConnectorID: connector.GetName(), Email: ident.Email}},
CreatedBy: services.CreatedBy{
User: services.UserRef{Name: "system"},
Time: time.Now().UTC(),
Connector: &services.ConnectorRef{
Type: teleport.ConnectorOIDC,
ID: connector.GetName(),
Identity: ident.Email,
},
},
},
})
if err != nil {
return trace.Wrap(err)
}
// check if a user exists already
existingUser, err := a.GetUser(ident.Email)
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
// check if exisiting user is a non-oidc user, if so, return an error
if existingUser != nil {
connectorRef := existingUser.GetCreatedBy().Connector
if connectorRef == nil || connectorRef.Type != teleport.ConnectorOIDC || connectorRef.ID != connector.GetName() {
return trace.AlreadyExists("user %q already exists and is not OIDC user", existingUser.GetName())
}
}
// no non-oidc user exists, create or update the exisiting oidc user
err = a.UpsertUser(user)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// claimsFromIDToken extracts claims from the ID token.
func claimsFromIDToken(oidcClient *oidc.Client, idToken string) (jose.Claims, error) {
jwt, err := jose.ParseJWT(idToken)
if err != nil {
return nil, trace.Wrap(err)
}
err = oidcClient.VerifyJWT(jwt)
if err != nil {
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] Extracting claims from ID token")
claims, err := jwt.Claims()
if err != nil {
return nil, trace.Wrap(err)
}
return claims, nil
}
// claimsFromUserInfo finds the UserInfo endpoint from the provider config and then extracts claims from it.
//
// Note: We don't request signed JWT responses for UserInfo, instead we force the provider config and
// the issuer to be HTTPS and leave integrity and confidentiality to TLS. Authenticity is taken care of
// during the token exchange.
func claimsFromUserInfo(oidcClient *oidc.Client, issuerURL string, accessToken string) (jose.Claims, error) {
err := isHTTPS(issuerURL)
if err != nil {
return nil, trace.Wrap(err)
}
oac, err := oidcClient.OAuthClient()
if err != nil {
return nil, trace.Wrap(err)
}
hc := oac.HttpClient()
// go get the provider config so we can find out where the UserInfo endpoint
// is. if the provider doesn't offer a UserInfo endpoint return not found.
pc, err := oidc.FetchProviderConfig(oac.HttpClient(), issuerURL)
if err != nil {
return nil, trace.Wrap(err)
}
if pc.UserInfoEndpoint == nil {
return nil, trace.NotFound("UserInfo endpoint not found")
}
endpoint := pc.UserInfoEndpoint.String()
err = isHTTPS(endpoint)
if err != nil {
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] Fetching claims from UserInfo endpoint: %q", endpoint)
req, err := http.NewRequest("GET", endpoint, nil)
if err != nil {
return nil, trace.Wrap(err)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", accessToken))
resp, err := hc.Do(req)
if err != nil {
return nil, trace.Wrap(err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return nil, trace.AccessDenied("bad status code: %v", resp.StatusCode)
}
var claims jose.Claims
err = json.NewDecoder(resp.Body).Decode(&claims)
if err != nil {
return nil, trace.Wrap(err)
}
return claims, nil
}
// mergeClaims merges b into a.
func mergeClaims(a jose.Claims, b jose.Claims) (jose.Claims, error) {
for k, v := range b {
_, ok := a[k]
if !ok {
a[k] = v
}
}
return a, nil
}
// getClaims gets claims from ID token and UserInfo and returns UserInfo claims merged into ID token claims.
func (a *AuthServer) getClaims(oidcClient *oidc.Client, issuerURL string, code string) (jose.Claims, error) {
var err error
oac, err := oidcClient.OAuthClient()
if err != nil {
return nil, trace.Wrap(err)
}
t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code)
if err != nil {
return nil, trace.Wrap(err)
}
idTokenClaims, err := claimsFromIDToken(oidcClient, t.IDToken)
if err != nil {
log.Debugf("[OIDC] Unable to fetch ID token claims: %v", err)
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] ID Token claims: %v", idTokenClaims)
userInfoClaims, err := claimsFromUserInfo(oidcClient, issuerURL, t.AccessToken)
if err != nil {
if trace.IsNotFound(err) {
log.Debugf("[OIDC] Provider doesn't offer UserInfo endpoint. Returning token claims: %v", idTokenClaims)
return idTokenClaims, nil
}
log.Debugf("[OIDC] Unable to fetch UserInfo claims: %v", err)
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] UserInfo claims: %v", userInfoClaims)
// make sure that the subject in the userinfo claim matches the subject in
// the id token otherwise there is the possibility of a token substitution attack.
// see section 16.11 of the oidc spec for more details.
var idsub string
var uisub string
var exists bool
if idsub, exists, err = idTokenClaims.StringClaim("sub"); err != nil || !exists {
log.Debugf("[OIDC] unable to extract sub from ID token")
return nil, trace.Wrap(err)
}
if uisub, exists, err = userInfoClaims.StringClaim("sub"); err != nil || !exists {
log.Debugf("[OIDC] unable to extract sub from UserInfo")
return nil, trace.Wrap(err)
}
if idsub != uisub {
log.Debugf("[OIDC] Claim subjects don't match %q != %q", idsub, uisub)
return nil, trace.BadParameter("invalid subject in UserInfo")
}
claims, err := mergeClaims(idTokenClaims, userInfoClaims)
if err != nil {
log.Debugf("[OIDC] Unable to merge claims: %v", err)
return nil, trace.Wrap(err)
}
return claims, nil
}
// validateACRValues validates that we get an appropriate response for acr values. By default
// we expect the same value we send, but this function also handles Identity Provider specific
// forms of validation.
func (a *AuthServer) validateACRValues(acrValue string, identityProvider string, claims jose.Claims) error {
switch identityProvider {
case teleport.NetIQ:
log.Debugf("[OIDC] Validating ACR values with %q rules", identityProvider)
tokenAcr, ok := claims["acr"]
if !ok {
return trace.BadParameter("acr claim does not exist")
}
tokenAcrMap, ok := tokenAcr.(map[string][]string)
if !ok {
return trace.BadParameter("acr unknown type: %T", tokenAcr)
}
tokenAcrValues, ok := tokenAcrMap["values"]
if !ok {
return trace.BadParameter("acr.values not found in claims")
}
acrValueMatched := false
for _, v := range tokenAcrValues {
if acrValue == v {
acrValueMatched = true
break
}
}
if !acrValueMatched {
log.Debugf("[OIDC] No ACR match found for %q in %q", acrValue, tokenAcrValues)
return trace.BadParameter("acr claim does not match")
}
default:
log.Debugf("[OIDC] Validating ACR values with default rules")
claimValue, exists, err := claims.StringClaim("acr")
if !exists {
return trace.BadParameter("acr claim does not exist")
}
if err != nil {
return trace.Wrap(err)
}
if claimValue != acrValue {
log.Debugf("[OIDC] No ACR match found %q != %q", acrValue, claimValue)
return trace.BadParameter("acr claim does not match")
}
}
return nil
}
// ValidateOIDCAuthCallback is called by the proxy to check OIDC query parameters
// returned by OIDC Provider, if everything checks out, auth server
// will respond with OIDCAuthResponse, otherwise it will return error
func (a *AuthServer) ValidateOIDCAuthCallback(q url.Values) (*OIDCAuthResponse, error) {
if error := q.Get("error"); error != "" {
return nil, trace.OAuth2(oauth2.ErrorInvalidRequest, error, q)
}
code := q.Get("code")
if code == "" {
return nil, trace.OAuth2(
oauth2.ErrorInvalidRequest, "code query param must be set", q)
}
stateToken := q.Get("state")
if stateToken == "" {
return nil, trace.OAuth2(
oauth2.ErrorInvalidRequest, "missing state query param", q)
}
req, err := a.Identity.GetOIDCAuthRequest(stateToken)
if err != nil {
return nil, trace.Wrap(err)
}
connector, err := a.Identity.GetOIDCConnector(req.ConnectorID, true)
if err != nil {
return nil, trace.Wrap(err)
}
oidcClient, err := a.getOIDCClient(connector)
if err != nil {
return nil, trace.Wrap(err)
}
// extract claims from both the id token and the userinfo endpoint and merge them
claims, err := a.getClaims(oidcClient, connector.GetIssuerURL(), code)
if err != nil {
return nil, trace.OAuth2(
oauth2.ErrorUnsupportedResponseType, "unable to construct claims", q)
}
log.Debugf("[OIDC] Claims: %v", claims)
// if we are sending acr values, make sure we also validate them
acrValue := connector.GetACR()
if acrValue != "" {
err := a.validateACRValues(acrValue, connector.GetProvider(), claims)
if err != nil {
return nil, trace.Wrap(err)
}
log.Debugf("[OIDC] ACR values %q successfully validated", acrValue)
}
ident, err := oidc.IdentityFromClaims(claims)
if err != nil {
return nil, trace.OAuth2(
oauth2.ErrorUnsupportedResponseType, "unable to convert claims to identity", q)
}
log.Debugf("[IDENTITY] %q expires at: %v", ident.Email, ident.ExpiresAt)
response := &OIDCAuthResponse{
Identity: services.OIDCIdentity{ConnectorID: connector.GetName(), Email: ident.Email},
Req: *req,
}
log.Debugf("[OIDC] Applying %v claims to roles mappings", len(connector.GetClaimsToRoles()))
if len(connector.GetClaimsToRoles()) != 0 {
if err := a.createOIDCUser(connector, ident, claims); err != nil {
return nil, trace.Wrap(err)
}
}
if !req.CheckUser {
return response, nil
}
user, err := a.Identity.GetUserByOIDCIdentity(services.OIDCIdentity{
ConnectorID: req.ConnectorID, Email: ident.Email})
if err != nil {
return nil, trace.Wrap(err)
}
response.Username = user.GetName()
var roles services.RoleSet
roles, err = services.FetchRoles(user.GetRoles(), a.Access)
if err != nil {
return nil, trace.Wrap(err)
}
sessionTTL := roles.AdjustSessionTTL(utils.ToTTL(a.clock, ident.ExpiresAt))
bearerTokenTTL := utils.MinTTL(BearerTokenTTL, sessionTTL)
if req.CreateWebSession {
sess, err := a.NewWebSession(user.GetName())
if err != nil {
return nil, trace.Wrap(err)
}
// session will expire based on identity TTL and allowed session TTL
sess.SetExpiryTime(a.clock.Now().UTC().Add(sessionTTL))
// bearer token will expire based on the expected session renewal
sess.SetBearerTokenExpiryTime(a.clock.Now().UTC().Add(bearerTokenTTL))
if err := a.UpsertWebSession(user.GetName(), sess); err != nil {
return nil, trace.Wrap(err)
}
response.Session = sess
}
if len(req.PublicKey) != 0 {
certTTL := utils.MinTTL(utils.ToTTL(a.clock, ident.ExpiresAt), req.CertTTL)
allowedLogins, err := roles.CheckLogins(certTTL)
if err != nil {
return nil, trace.Wrap(err)
}
cert, err := a.GenerateUserCert(req.PublicKey, user.GetName(), allowedLogins, certTTL, roles.CanForwardAgents())
if err != nil {
return nil, trace.Wrap(err)
}
response.Cert = cert
authorities, err := a.GetCertAuthorities(services.HostCA, false)
if err != nil {
return nil, trace.Wrap(err)
}
for _, authority := range authorities {
response.HostSigners = append(response.HostSigners, authority)
}
}
return response, nil
}
func (a *AuthServer) DeleteRole(name string) error {
// check if this role is used by CA or Users
users, err := a.Identity.GetUsers()
if err != nil {
return trace.Wrap(err)
}
for _, u := range users {
for _, r := range u.GetRoles() {
if r == name {
return trace.BadParameter("role %v is used by user %v", name, u.GetName())
}
}
}
// check if it's used by some external cert authorities, e.g.
// cert authorities related to external cluster
cas, err := a.Trust.GetCertAuthorities(services.UserCA, false)
if err != nil {
return trace.Wrap(err)
}
for _, a := range cas {
for _, r := range a.GetRoles() {
if r == name {
return trace.BadParameter("role %v is used by user cert authority %v", name, a.GetClusterName())
}
}
}
return a.Access.DeleteRole(name)
}
const (
// BearerTokenTTL specifies standard bearer token to exist before
// it has to be renewed by the client
BearerTokenTTL = 10 * time.Minute
// TokenLenBytes is len in bytes of the invite token
TokenLenBytes = 16
)
// oidcClient is internal structure that stores client and it's config
type oidcClient struct {
client *oidc.Client
config oidc.ClientConfig
}
// oidcConfigsEqual is a struct that helps us to verify that
// two oidc configs are equal
func oidcConfigsEqual(a, b oidc.ClientConfig) bool {
if a.RedirectURL != b.RedirectURL {
return false
}
if a.Credentials.ID != b.Credentials.ID {
return false
}
if a.Credentials.Secret != b.Credentials.Secret {
return false
}
if len(a.Scope) != len(b.Scope) {
return false
}
for i := range a.Scope {
if a.Scope[i] != b.Scope[i] {
return false
}
}
return true
}
// isHTTPS checks if the scheme for a URL is https or not.
func isHTTPS(u string) error {
earl, err := url.Parse(u)
if err != nil {
return trace.Wrap(err)
}
if earl.Scheme != "https" {
return trace.BadParameter("expected scheme https, got %q", earl.Scheme)
}
return nil
}
|
// Package auth implements http request authentication
package auth
import (
"errors"
"github.com/MG-RAST/AWE/lib/auth/basic"
"github.com/MG-RAST/AWE/lib/conf"
e "github.com/MG-RAST/AWE/lib/errors"
"github.com/MG-RAST/AWE/lib/user"
)
// authCache is a
var authCache cache
var authMethods []func(string) (*user.User, error)
func Initialize() {
authCache = cache{m: make(map[string]cacheValue)}
authMethods = []func(string) (*user.User, error){}
if conf.AUTH_TYPE != "basic" {
authMethods = append(authMethods, basic.Auth)
}
}
func Authenticate(header string) (u *user.User, err error) {
if u = authCache.lookup(header); u != nil {
return u, nil
} else {
for _, auth := range authMethods {
if u, _ := auth(header); u != nil {
authCache.add(header, u)
return u, nil
}
}
}
return nil, errors.New(e.InvalidAuth)
}
a fixed typo to make user GET APIs work
// Package auth implements http request authentication
package auth
import (
"errors"
"github.com/MG-RAST/AWE/lib/auth/basic"
"github.com/MG-RAST/AWE/lib/conf"
e "github.com/MG-RAST/AWE/lib/errors"
"github.com/MG-RAST/AWE/lib/user"
)
// authCache is a
var authCache cache
var authMethods []func(string) (*user.User, error)
func Initialize() {
authCache = cache{m: make(map[string]cacheValue)}
authMethods = []func(string) (*user.User, error){}
if conf.AUTH_TYPE == "basic" {
authMethods = append(authMethods, basic.Auth)
}
}
func Authenticate(header string) (u *user.User, err error) {
if u = authCache.lookup(header); u != nil {
return u, nil
} else {
for _, auth := range authMethods {
if u, _ := auth(header); u != nil {
authCache.add(header, u)
return u, nil
}
}
}
return nil, errors.New(e.InvalidAuth)
}
|
/*
Copyright 2016 Christian Grabowski All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipeline
import (
"log"
git "gopkg.in/libgit2/git2go.v22"
)
func build(srv *DepService, index string, done chan string, errChan chan error, shouldDeploy *bool) {
err := srv.build.execBuild()
if err != nil {
errChan <- err
return
}
log.Println("Run tests")
err = RunTests(srv.build)
if err != nil {
errChan <- err
return
}
log.Println("Tests done")
if !*shouldDeploy {
done <- index
return
}
err = check(srv.build)
if err != nil {
errChan <- err
return
}
srv.build.shouldBuild = false
done <- index
return
}
func runServiceBuild(srvs map[string]*DepService, testAll, shouldDeploy *bool) error {
log.Println("building services")
doneChan := make(chan string)
errChan := make(chan error)
buildTotal := 0
for i := range srvs {
log.Println("building ", srvs[i].build.conf.Name)
if srvs[i].build.shouldBuild || *testAll {
buildTotal++
go build(srvs[i], i, doneChan, errChan, shouldDeploy)
}
}
total := 0
if buildTotal > 0 {
for {
select {
case index := <-doneChan:
total++
if len(srvs[index].Children) > 0 {
runServiceBuild(srvs[index].Children, testAll, shouldDeploy)
}
if total == buildTotal {
return nil
}
case errMsg := <-errChan:
if errMsg != nil {
return errMsg
}
}
}
}
return nil
}
// Run runs the build for all changed services
func Run(depTrees []*DepTree, repo *git.Repository, lastBuildCommit, currBuildCommit *string, testAll, shouldDeploy *bool) error {
log.Println("run")
for i := range depTrees {
travErr := TraverseTree(depTrees[i].CurrNode, repo, lastBuildCommit, currBuildCommit)
if travErr != nil {
return travErr
}
log.Println(i+1, "tree")
rootMap := make(map[string]*DepService)
rootMap["root"] = depTrees[i].CurrNode
err := runServiceBuild(rootMap, testAll, shouldDeploy)
if err != nil {
return err
}
}
return nil
}
each tree runs concurrently (#35)
/*
Copyright 2016 Christian Grabowski All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pipeline
import (
"log"
git "gopkg.in/libgit2/git2go.v22"
)
func build(srv *DepService, index string, done chan string, errChan chan error, shouldDeploy *bool) {
err := srv.build.execBuild()
if err != nil {
errChan <- err
return
}
log.Println("Run tests")
err = RunTests(srv.build)
if err != nil {
errChan <- err
return
}
log.Println("Tests done")
if !*shouldDeploy {
done <- index
return
}
err = check(srv.build)
if err != nil {
errChan <- err
return
}
srv.build.shouldBuild = false
done <- index
return
}
func runServiceBuild(srvs map[string]*DepService, testAll, shouldDeploy *bool) error {
log.Println("building services")
doneChan := make(chan string)
errChan := make(chan error)
buildTotal := 0
for i := range srvs {
log.Println("building ", srvs[i].build.conf.Name)
if srvs[i].build.shouldBuild || *testAll {
buildTotal++
go build(srvs[i], i, doneChan, errChan, shouldDeploy)
}
}
total := 0
if buildTotal > 0 {
for {
select {
case index := <-doneChan:
total++
if len(srvs[index].Children) > 0 {
runServiceBuild(srvs[index].Children, testAll, shouldDeploy)
}
if total == buildTotal {
return nil
}
case errMsg := <-errChan:
if errMsg != nil {
return errMsg
}
}
}
}
return nil
}
// Run runs the build for all changed services
func Run(depTrees []*DepTree, repo *git.Repository, lastBuildCommit, currBuildCommit *string, testAll, shouldDeploy *bool) error {
log.Println("run")
errChan := make(chan error)
for i := range depTrees {
currNode := depTrees[i].CurrNode
go func() {
travErr := TraverseTree(currNode, repo, lastBuildCommit, currBuildCommit)
if travErr != nil {
errChan <- travErr
}
rootMap := make(map[string]*DepService)
rootMap["root"] = currNode
err := runServiceBuild(rootMap, testAll, shouldDeploy)
if err != nil {
errChan <- err
} else {
errChan <- nil
}
}()
}
return <-errChan
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package orchestrator
import (
"encoding/json"
"net/http"
apiv1 "cuttlefish/liboperator/api/v1"
"cuttlefish/liboperator/operator"
"github.com/gorilla/mux"
)
func SetupInstanceManagement(router *mux.Router, im *InstanceManager, om OperationManager) {
router.HandleFunc("/devices", func(w http.ResponseWriter, r *http.Request) {
createDevices(w, r, im)
}).Methods("POST")
router.HandleFunc("/operations/{name}", func(w http.ResponseWriter, r *http.Request) {
getOperation(w, r, om)
}).Methods("GET")
}
func createDevices(w http.ResponseWriter, r *http.Request, im *InstanceManager) {
var msg apiv1.CreateCVDRequest
err := json.NewDecoder(r.Body).Decode(&msg)
if err != nil {
operator.ReplyJSONErr(w, operator.NewBadRequestError("Malformed JSON in request", err))
return
}
op, err := im.CreateCVD(msg)
if err != nil {
operator.ReplyJSONErr(w, err)
return
}
operator.ReplyJSONOK(w, BuildOperation(op))
}
func getOperation(w http.ResponseWriter, r *http.Request, om OperationManager) {
vars := mux.Vars(r)
name := vars["name"]
if op, err := om.Get(name); err != nil {
operator.ReplyJSONErr(w, operator.NewNotFoundError("operation not found", err))
} else {
operator.ReplyJSONOK(w, BuildOperation(op))
}
}
func BuildOperation(op Operation) apiv1.Operation {
result := apiv1.Operation{
Name: op.Name,
Done: op.Done,
}
if !op.Done {
return result
}
if op.IsError() {
result.Result = &apiv1.OperationResult{
Error: &apiv1.ErrorMsg{op.Result.Error.ErrorMsg},
}
}
return result
}
Use new resource path `/cvds` for instance management. (#178)
- Path `/devices` is already taken for webrtc streaming functionalities.
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package orchestrator
import (
"encoding/json"
"net/http"
apiv1 "cuttlefish/liboperator/api/v1"
"cuttlefish/liboperator/operator"
"github.com/gorilla/mux"
)
func SetupInstanceManagement(router *mux.Router, im *InstanceManager, om OperationManager) {
router.HandleFunc("/cvds", func(w http.ResponseWriter, r *http.Request) {
createDevices(w, r, im)
}).Methods("POST")
router.HandleFunc("/operations/{name}", func(w http.ResponseWriter, r *http.Request) {
getOperation(w, r, om)
}).Methods("GET")
}
func createDevices(w http.ResponseWriter, r *http.Request, im *InstanceManager) {
var msg apiv1.CreateCVDRequest
err := json.NewDecoder(r.Body).Decode(&msg)
if err != nil {
operator.ReplyJSONErr(w, operator.NewBadRequestError("Malformed JSON in request", err))
return
}
op, err := im.CreateCVD(msg)
if err != nil {
operator.ReplyJSONErr(w, err)
return
}
operator.ReplyJSONOK(w, BuildOperation(op))
}
func getOperation(w http.ResponseWriter, r *http.Request, om OperationManager) {
vars := mux.Vars(r)
name := vars["name"]
if op, err := om.Get(name); err != nil {
operator.ReplyJSONErr(w, operator.NewNotFoundError("operation not found", err))
} else {
operator.ReplyJSONOK(w, BuildOperation(op))
}
}
func BuildOperation(op Operation) apiv1.Operation {
result := apiv1.Operation{
Name: op.Name,
Done: op.Done,
}
if !op.Done {
return result
}
if op.IsError() {
result.Result = &apiv1.OperationResult{
Error: &apiv1.ErrorMsg{op.Result.Error.ErrorMsg},
}
}
return result
}
|
// Copyright (c) Alex Ellis 2017. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package builder
import (
"fmt"
"os"
"strings"
"github.com/openfaas/faas-cli/schema"
"github.com/openfaas/faas-cli/stack"
)
// AdditionalPackageBuildArg holds the special build-arg keyname for use with build-opts.
// Can also be passed as a build arg hence needs to be accessed from commands
const AdditionalPackageBuildArg = "ADDITIONAL_PACKAGE"
// BuildImage construct Docker image from function parameters
func BuildImage(image string, handler string, functionName string, language string, nocache bool, squash bool, shrinkwrap bool, buildArgMap map[string]string, buildOptions []string, tag string) error {
if stack.IsValidTemplate(language) {
format := schema.DefaultFormat
var version string
if strings.ToLower(tag) == "sha" {
version = GetGitSHA()
if len(version) == 0 {
return fmt.Errorf("cannot tag image with Git SHA as this is not a Git repository")
}
format = schema.SHAFormat
}
var branch string
if strings.ToLower(tag) == "branch" {
branch = GetGitBranch()
if len(branch) == 0 {
return fmt.Errorf("cannot tag image with Git branch and SHA as this is not a Git repository")
}
version = GetGitSHA()
if len(version) == 0 {
return fmt.Errorf("cannot tag image with Git SHA as this is not a Git repository")
}
format = schema.BranchAndSHAFormat
}
imageName := schema.BuildImageName(format, image, version, branch)
var tempPath string
if strings.ToLower(language) == "dockerfile" {
tempPath = handler
if shrinkwrap {
tempPath = dockerBuildFolder(functionName, handler, language)
fmt.Printf("%s shrink-wrapped to %s\n", functionName, tempPath)
return nil
}
if err := ensureHandlerPath(handler); err != nil {
return fmt.Errorf("building %s, %s is an invalid path", imageName, handler)
}
fmt.Printf("Building: %s with Dockerfile. Please wait..\n", imageName)
} else {
if err := ensureHandlerPath(handler); err != nil {
return fmt.Errorf("building %s, %s is an invalid path", imageName, handler)
}
tempPath = createBuildTemplate(functionName, handler, language)
fmt.Printf("Building: %s with %s template. Please wait..\n", imageName, language)
if shrinkwrap {
fmt.Printf("%s shrink-wrapped to %s\n", functionName, tempPath)
return nil
}
}
buildOptPackages, buildPackageErr := getBuildOptionPackages(buildOptions, language)
if buildPackageErr != nil {
return buildPackageErr
}
dockerBuildVal := dockerBuild{
Image: imageName,
NoCache: nocache,
Squash: squash,
HTTPProxy: os.Getenv("http_proxy"),
HTTPSProxy: os.Getenv("https_proxy"),
BuildArgMap: buildArgMap,
BuildOptPackages: buildOptPackages,
}
spaceSafeCmdLine := getDockerBuildCommand(dockerBuildVal)
ExecCommand(tempPath, spaceSafeCmdLine)
fmt.Printf("Image: %s built.\n", imageName)
} else {
return fmt.Errorf("language template: %s not supported, build a custom Dockerfile", language)
}
return nil
}
func getDockerBuildCommand(build dockerBuild) []string {
flagSlice := buildFlagSlice(build.NoCache, build.Squash, build.HTTPProxy, build.HTTPSProxy, build.BuildArgMap, build.BuildOptPackages)
command := []string{"docker", "build"}
command = append(command, flagSlice...)
command = append(command, "-t", build.Image, ".")
return command
}
type dockerBuild struct {
Image string
Version string
NoCache bool
Squash bool
HTTPProxy string
HTTPSProxy string
BuildArgMap map[string]string
BuildOptPackages []string
}
// createBuildTemplate creates temporary build folder to perform a Docker build with language template
func createBuildTemplate(functionName string, handler string, language string) string {
tempPath := fmt.Sprintf("./build/%s/", functionName)
fmt.Printf("Clearing temporary build folder: %s\n", tempPath)
clearErr := os.RemoveAll(tempPath)
if clearErr != nil {
fmt.Printf("Error clearing temporary build folder %s\n", tempPath)
}
fmt.Printf("Preparing %s %s\n", handler+"/", tempPath+"function")
functionPath := tempPath + "/function"
mkdirErr := os.MkdirAll(functionPath, 0700)
if mkdirErr != nil {
fmt.Printf("Error creating path %s - %s.\n", functionPath, mkdirErr.Error())
}
CopyFiles("./template/"+language, tempPath)
// Overlay in user-function
CopyFiles(handler, functionPath)
return tempPath
}
func dockerBuildFolder(functionName string, handler string, language string) string {
tempPath := fmt.Sprintf("./build/%s/", functionName)
fmt.Printf("Clearing temporary build folder: %s\n", tempPath)
clearErr := os.RemoveAll(tempPath)
if clearErr != nil {
fmt.Printf("Error clearing temporary build folder %s\n", tempPath)
}
fmt.Printf("Preparing %s %s\n", handler+"/", tempPath)
// Both Dockerfile and dockerfile are accepted
if language == "Dockerfile" {
language = "dockerfile"
}
CopyFiles(handler, tempPath)
return tempPath
}
func buildFlagSlice(nocache bool, squash bool, httpProxy string, httpsProxy string, buildArgMap map[string]string, buildOptionPackages []string) []string {
var spaceSafeBuildFlags []string
if nocache {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--no-cache")
}
if squash {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--squash")
}
if len(httpProxy) > 0 {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--build-arg", fmt.Sprintf("http_proxy=%s", httpProxy))
}
if len(httpsProxy) > 0 {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--build-arg", fmt.Sprintf("https_proxy=%s", httpsProxy))
}
for k, v := range buildArgMap {
if k != AdditionalPackageBuildArg {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--build-arg", fmt.Sprintf("%s=%s", k, v))
} else {
buildOptionPackages = append(buildOptionPackages, strings.Split(v, " ")...)
}
}
if len(buildOptionPackages) > 0 {
buildOptionPackages = deDuplicate(buildOptionPackages)
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--build-arg", fmt.Sprintf("%s=%s", AdditionalPackageBuildArg, strings.Join(buildOptionPackages, " ")))
}
return spaceSafeBuildFlags
}
func ensureHandlerPath(handler string) error {
if _, err := os.Stat(handler); err != nil {
return err
}
return nil
}
func getBuildOptionPackages(requestedBuildOptions []string, language string) ([]string, error) {
var buildPackages []string
if len(requestedBuildOptions) > 0 {
var allFound bool
availableBuildOptions, err := getBuildOptionsFor(language)
if err != nil {
return nil, err
}
buildPackages, allFound = getPackages(availableBuildOptions, requestedBuildOptions)
if !allFound {
err = fmt.Errorf("Error: You're using a build option unavailable for %s. Please check /template/%s/template.yml for supported build options", language, language)
return nil, err
}
}
return buildPackages, nil
}
func getBuildOptionsFor(language string) ([]stack.BuildOption, error) {
var buildOptions = []stack.BuildOption{}
pathToTemplateYAML := "./template/" + language + "/template.yml"
if _, err := os.Stat(pathToTemplateYAML); os.IsNotExist(err) {
return buildOptions, err
}
var langTemplate stack.LanguageTemplate
parsedLangTemplate, err := stack.ParseYAMLForLanguageTemplate(pathToTemplateYAML)
if err != nil {
return buildOptions, err
}
if parsedLangTemplate != nil {
langTemplate = *parsedLangTemplate
buildOptions = langTemplate.BuildOptions
}
return buildOptions, nil
}
func getPackages(availableBuildOptions []stack.BuildOption, requestedBuildOptions []string) ([]string, bool) {
var buildPackages []string
for _, requestedOption := range requestedBuildOptions {
requestedOptionAvailable := false
for _, availableOption := range availableBuildOptions {
if availableOption.Name == requestedOption {
buildPackages = append(buildPackages, availableOption.Packages...)
requestedOptionAvailable = true
break
}
}
if requestedOptionAvailable == false {
return buildPackages, false
}
}
return deDuplicate(buildPackages), true
}
func deDuplicate(buildOptPackages []string) []string {
seenPackages := map[string]bool{}
retPackages := []string{}
for _, packageName := range buildOptPackages {
if _, alreadySeen := seenPackages[packageName]; !alreadySeen {
seenPackages[packageName] = true
retPackages = append(retPackages, packageName)
}
}
return retPackages
}
Fixes issue with faas-cli build for same folder
This changes skips copy for `build` folder in copyDir
function to avoid the recursion while copying files.
Fixes: #478
Signed-off-by: Vivek Singh <0309897457bdbaf8410ee5929728bb5df02fed57@yahoo.in>
// Copyright (c) Alex Ellis 2017. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package builder
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/openfaas/faas-cli/schema"
"github.com/openfaas/faas-cli/stack"
)
// AdditionalPackageBuildArg holds the special build-arg keyname for use with build-opts.
// Can also be passed as a build arg hence needs to be accessed from commands
const AdditionalPackageBuildArg = "ADDITIONAL_PACKAGE"
// BuildImage construct Docker image from function parameters
func BuildImage(image string, handler string, functionName string, language string, nocache bool, squash bool, shrinkwrap bool, buildArgMap map[string]string, buildOptions []string, tag string) error {
if stack.IsValidTemplate(language) {
format := schema.DefaultFormat
var version string
if strings.ToLower(tag) == "sha" {
version = GetGitSHA()
if len(version) == 0 {
return fmt.Errorf("cannot tag image with Git SHA as this is not a Git repository")
}
format = schema.SHAFormat
}
var branch string
if strings.ToLower(tag) == "branch" {
branch = GetGitBranch()
if len(branch) == 0 {
return fmt.Errorf("cannot tag image with Git branch and SHA as this is not a Git repository")
}
version = GetGitSHA()
if len(version) == 0 {
return fmt.Errorf("cannot tag image with Git SHA as this is not a Git repository")
}
format = schema.BranchAndSHAFormat
}
imageName := schema.BuildImageName(format, image, version, branch)
var tempPath string
if strings.ToLower(language) == "dockerfile" {
tempPath = handler
if shrinkwrap {
tempPath = dockerBuildFolder(functionName, handler, language)
fmt.Printf("%s shrink-wrapped to %s\n", functionName, tempPath)
return nil
}
if err := ensureHandlerPath(handler); err != nil {
return fmt.Errorf("building %s, %s is an invalid path", imageName, handler)
}
fmt.Printf("Building: %s with Dockerfile. Please wait..\n", imageName)
} else {
if err := ensureHandlerPath(handler); err != nil {
return fmt.Errorf("building %s, %s is an invalid path", imageName, handler)
}
tempPath = createBuildTemplate(functionName, handler, language)
fmt.Printf("Building: %s with %s template. Please wait..\n", imageName, language)
if shrinkwrap {
fmt.Printf("%s shrink-wrapped to %s\n", functionName, tempPath)
return nil
}
}
buildOptPackages, buildPackageErr := getBuildOptionPackages(buildOptions, language)
if buildPackageErr != nil {
return buildPackageErr
}
dockerBuildVal := dockerBuild{
Image: imageName,
NoCache: nocache,
Squash: squash,
HTTPProxy: os.Getenv("http_proxy"),
HTTPSProxy: os.Getenv("https_proxy"),
BuildArgMap: buildArgMap,
BuildOptPackages: buildOptPackages,
}
spaceSafeCmdLine := getDockerBuildCommand(dockerBuildVal)
ExecCommand(tempPath, spaceSafeCmdLine)
fmt.Printf("Image: %s built.\n", imageName)
} else {
return fmt.Errorf("language template: %s not supported, build a custom Dockerfile", language)
}
return nil
}
func getDockerBuildCommand(build dockerBuild) []string {
flagSlice := buildFlagSlice(build.NoCache, build.Squash, build.HTTPProxy, build.HTTPSProxy, build.BuildArgMap, build.BuildOptPackages)
command := []string{"docker", "build"}
command = append(command, flagSlice...)
command = append(command, "-t", build.Image, ".")
return command
}
type dockerBuild struct {
Image string
Version string
NoCache bool
Squash bool
HTTPProxy string
HTTPSProxy string
BuildArgMap map[string]string
BuildOptPackages []string
}
// createBuildTemplate creates temporary build folder to perform a Docker build with language template
func createBuildTemplate(functionName string, handler string, language string) string {
tempPath := fmt.Sprintf("./build/%s/", functionName)
fmt.Printf("Clearing temporary build folder: %s\n", tempPath)
clearErr := os.RemoveAll(tempPath)
if clearErr != nil {
fmt.Printf("Error clearing temporary build folder %s\n", tempPath)
}
fmt.Printf("Preparing %s %s\n", handler+"/", tempPath+"function")
functionPath := tempPath + "/function"
mkdirErr := os.MkdirAll(functionPath, 0700)
if mkdirErr != nil {
fmt.Printf("Error creating path %s - %s.\n", functionPath, mkdirErr.Error())
}
CopyFiles("./template/"+language, tempPath)
// Overlay in user-function
// CopyFiles(handler, functionPath)
infos, readErr := ioutil.ReadDir(handler)
if readErr != nil {
fmt.Printf("Error reading the handler %s - %s.\n", handler, readErr.Error())
}
for _, info := range infos {
switch info.Name() {
case "build":
fmt.Println("Skipping \"build\" folder")
continue
default:
CopyFiles(info.Name(), functionPath)
}
}
return tempPath
}
func dockerBuildFolder(functionName string, handler string, language string) string {
tempPath := fmt.Sprintf("./build/%s/", functionName)
fmt.Printf("Clearing temporary build folder: %s\n", tempPath)
clearErr := os.RemoveAll(tempPath)
if clearErr != nil {
fmt.Printf("Error clearing temporary build folder %s\n", tempPath)
}
fmt.Printf("Preparing %s %s\n", handler+"/", tempPath)
// Both Dockerfile and dockerfile are accepted
if language == "Dockerfile" {
language = "dockerfile"
}
// CopyFiles(handler, tempPath)
infos, readErr := ioutil.ReadDir(handler)
if readErr != nil {
fmt.Printf("Error reading the handler %s - %s.\n", handler, readErr.Error())
}
for _, info := range infos {
switch info.Name() {
case "build":
fmt.Println("Skipping \"build\" folder")
continue
default:
CopyFiles(info.Name(), tempPath)
}
}
return tempPath
}
func buildFlagSlice(nocache bool, squash bool, httpProxy string, httpsProxy string, buildArgMap map[string]string, buildOptionPackages []string) []string {
var spaceSafeBuildFlags []string
if nocache {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--no-cache")
}
if squash {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--squash")
}
if len(httpProxy) > 0 {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--build-arg", fmt.Sprintf("http_proxy=%s", httpProxy))
}
if len(httpsProxy) > 0 {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--build-arg", fmt.Sprintf("https_proxy=%s", httpsProxy))
}
for k, v := range buildArgMap {
if k != AdditionalPackageBuildArg {
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--build-arg", fmt.Sprintf("%s=%s", k, v))
} else {
buildOptionPackages = append(buildOptionPackages, strings.Split(v, " ")...)
}
}
if len(buildOptionPackages) > 0 {
buildOptionPackages = deDuplicate(buildOptionPackages)
spaceSafeBuildFlags = append(spaceSafeBuildFlags, "--build-arg", fmt.Sprintf("%s=%s", AdditionalPackageBuildArg, strings.Join(buildOptionPackages, " ")))
}
return spaceSafeBuildFlags
}
func ensureHandlerPath(handler string) error {
if _, err := os.Stat(handler); err != nil {
return err
}
return nil
}
func getBuildOptionPackages(requestedBuildOptions []string, language string) ([]string, error) {
var buildPackages []string
if len(requestedBuildOptions) > 0 {
var allFound bool
availableBuildOptions, err := getBuildOptionsFor(language)
if err != nil {
return nil, err
}
buildPackages, allFound = getPackages(availableBuildOptions, requestedBuildOptions)
if !allFound {
err = fmt.Errorf("Error: You're using a build option unavailable for %s. Please check /template/%s/template.yml for supported build options", language, language)
return nil, err
}
}
return buildPackages, nil
}
func getBuildOptionsFor(language string) ([]stack.BuildOption, error) {
var buildOptions = []stack.BuildOption{}
pathToTemplateYAML := "./template/" + language + "/template.yml"
if _, err := os.Stat(pathToTemplateYAML); os.IsNotExist(err) {
return buildOptions, err
}
var langTemplate stack.LanguageTemplate
parsedLangTemplate, err := stack.ParseYAMLForLanguageTemplate(pathToTemplateYAML)
if err != nil {
return buildOptions, err
}
if parsedLangTemplate != nil {
langTemplate = *parsedLangTemplate
buildOptions = langTemplate.BuildOptions
}
return buildOptions, nil
}
func getPackages(availableBuildOptions []stack.BuildOption, requestedBuildOptions []string) ([]string, bool) {
var buildPackages []string
for _, requestedOption := range requestedBuildOptions {
requestedOptionAvailable := false
for _, availableOption := range availableBuildOptions {
if availableOption.Name == requestedOption {
buildPackages = append(buildPackages, availableOption.Packages...)
requestedOptionAvailable = true
break
}
}
if requestedOptionAvailable == false {
return buildPackages, false
}
}
return deDuplicate(buildPackages), true
}
func deDuplicate(buildOptPackages []string) []string {
seenPackages := map[string]bool{}
retPackages := []string{}
for _, packageName := range buildOptPackages {
if _, alreadySeen := seenPackages[packageName]; !alreadySeen {
seenPackages[packageName] = true
retPackages = append(retPackages, packageName)
}
}
return retPackages
}
|
package builder
// nginx
const (
NginxVersion = "1.13.0"
NginxDownloadURLPrefix = "https://nginx.org/download"
)
// pcre
const (
PcreVersion = "8.40"
PcreDownloadURLPrefix = "http://ftp.csx.cam.ac.uk/pub/software/programming/pcre"
)
// openssl
const (
OpenSSLVersion = "1.0.2k"
OpenSSLDownloadURLPrefix = "https://www.openssl.org/source"
)
// zlib
const (
ZlibVersion = "1.2.11"
ZlibDownloadURLPrefix = "https://zlib.net/fossils"
)
// openResty
const (
OpenRestyVersion = "1.11.2.3"
OpenRestyDownloadURLPrefix = "https://openresty.org/download"
)
// tengine
const (
TengineVersion = "2.2.0"
TengineDownloadURLPrefix = "http://tengine.taobao.org/download"
)
// component enumerations
const (
ComponentNginx = iota
ComponentOpenResty
ComponentTengine
ComponentPcre
ComponentOpenSSL
ComponentZlib
ComponentMax
)
bumped OpenSSL version to 1.0.2l by default.
package builder
// nginx
const (
NginxVersion = "1.13.0"
NginxDownloadURLPrefix = "https://nginx.org/download"
)
// pcre
const (
PcreVersion = "8.40"
PcreDownloadURLPrefix = "http://ftp.csx.cam.ac.uk/pub/software/programming/pcre"
)
// openssl
const (
OpenSSLVersion = "1.0.2l"
OpenSSLDownloadURLPrefix = "https://www.openssl.org/source"
)
// zlib
const (
ZlibVersion = "1.2.11"
ZlibDownloadURLPrefix = "https://zlib.net/fossils"
)
// openResty
const (
OpenRestyVersion = "1.11.2.3"
OpenRestyDownloadURLPrefix = "https://openresty.org/download"
)
// tengine
const (
TengineVersion = "2.2.0"
TengineDownloadURLPrefix = "http://tengine.taobao.org/download"
)
// component enumerations
const (
ComponentNginx = iota
ComponentOpenResty
ComponentTengine
ComponentPcre
ComponentOpenSSL
ComponentZlib
ComponentMax
)
|
package buildkite
import (
"fmt"
"github.com/buildkite/agent/buildkite/logger"
"os"
"path/filepath"
"regexp"
"time"
)
// The Job struct uses strings for StartedAt and FinishedAt because
// if they were actual date objects, then when this struct is
// initialized they would have a default value of: 00:00:00.000000000.
// This causes problems for the Buildkite Agent API because it looks for
// the presence of values in these properties to determine if the build
// has finished.
type Job struct {
ID string
State string
Env map[string]string
Output string `json:"output,omitempty"`
HeaderTimes []string `json:"header_times,omitempty"`
ExitStatus string `json:"exit_status,omitempty"`
StartedAt string `json:"started_at,omitempty"`
FinishedAt string `json:"finished_at,omitempty"`
// If the job is currently being cancelled
cancelled bool
// The currently running process of the job
process *Process
}
func (b Job) String() string {
return fmt.Sprintf("Job{ID: %s, State: %s, StartedAt: %s, FinishedAt: %s, Process: %s}", b.ID, b.State, b.StartedAt, b.FinishedAt, b.process)
}
func (c *Client) JobNext() (*Job, error) {
// Create a new instance of a job that will be populated
// by the client.
var job Job
// Return the job.
return &job, c.Get(&job, "jobs/next")
}
func (c *Client) JobFind(id string) (*Job, error) {
// Create a new instance of a job that will be populated
// by the client.
var job Job
// Find the job
return &job, c.Get(&job, "jobs/"+id)
}
func (c *Client) JobAccept(job *Job) (*Job, error) {
// Accept the job
return job, c.Put(job, "jobs/"+job.ID+"/accept", job)
}
func (c *Client) JobUpdate(job *Job) (*Job, error) {
// Create a new instance of a job that will be populated
// with the updated data by the client
var updatedJob Job
// Return the job.
return &updatedJob, c.Put(&updatedJob, "jobs/"+job.ID, job)
}
func (j *Job) Kill() error {
if j.cancelled {
// Already cancelled
} else {
logger.Info("Cancelling job %s", j.ID)
j.cancelled = true
if j.process != nil {
j.process.Kill()
} else {
logger.Error("No process to kill")
}
}
return nil
}
func (j *Job) Run(agent *Agent) error {
logger.Info("Starting job %s", j.ID)
// Create a clone of our jobs environment. We'll then set the
// environment variables provided by the agent, which will override any
// sent by Buildkite. The variables below should always take
// precedence.
env := make(map[string]string)
for key, value := range j.Env {
env[key] = value
}
// Add agent environment variables
env["BUILDKITE_AGENT_ENDPOINT"] = agent.Client.URL
env["BUILDKITE_AGENT_ACCESS_TOKEN"] = agent.Client.AuthorizationToken
env["BUILDKITE_AGENT_VERSION"] = Version()
env["BUILDKITE_AGENT_DEBUG"] = fmt.Sprintf("%t", logger.GetLevel() == logger.DEBUG)
// We know the BUILDKITE_BIN_PATH dir, because it's the path to the
// currently running file (there is only 1 binary)
dir, _ := filepath.Abs(filepath.Dir(os.Args[0]))
env["BUILDKITE_BIN_PATH"] = dir
// Add misc options
env["BUILDKITE_BUILD_PATH"] = agent.BuildPath
env["BUILDKITE_HOOKS_PATH"] = agent.HooksPath
env["BUILDKITE_AUTO_SSH_FINGERPRINT_VERIFICATION"] = fmt.Sprintf("%t", agent.AutoSSHFingerprintVerification)
env["BUILDKITE_SCRIPT_EVAL"] = fmt.Sprintf("%t", agent.ScriptEval)
// Convert the env map into a slice (which is what the script gear
// needs)
envSlice := []string{}
for key, value := range env {
envSlice = append(envSlice, fmt.Sprintf("%s=%s", key, value))
}
// Mark the build as started
j.StartedAt = time.Now().UTC().Format(time.RFC3339)
_, err := agent.Client.JobUpdate(j)
if err != nil {
// We don't care if the HTTP request failed here. We hope that it
// starts working during the actual build.
}
// This callback is called when the process starts
startCallback := func(process *Process) {
// Start a routine that will grab the output every few seconds and send it back to Buildkite
go func() {
for process.Running {
// Save the output to the job
j.Output = process.Output()
// Post the update to the API
updatedJob, err := agent.Client.JobUpdate(j)
if err != nil {
// We don't really care if the job couldn't update at this point.
// This is just a partial update. We'll just let the job run
// and hopefully the host will fix itself before we finish.
logger.Warn("Problem with updating job %s (%s)", j.ID, err)
} else if updatedJob.State == "canceled" {
j.Kill()
}
// Sleep for 1 second
time.Sleep(1000 * time.Millisecond)
}
logger.Debug("Routine that sends job updates has finished")
}()
}
// The regular expression used to match headers
headerRegexp, err := regexp.Compile("^(?:---|\\+\\+\\+|~~~)\\s(.+)?$")
if err != nil {
logger.Error("Failed to compile header regular expression (%T: %v)", err, err)
}
// This callback is called for every line that is output by the process
lineCallback := func(process *Process, line string) {
if headerRegexp.MatchString(line) {
// logger.Debug("Found header \"%s\", capturing current time", line)
j.HeaderTimes = append(j.HeaderTimes, time.Now().UTC().Format(time.RFC3339))
}
}
// Initialze our process to run
process := InitProcess(agent.BootstrapScript, envSlice, agent.RunInPty, startCallback, lineCallback)
// Store the process so we can cancel it later.
j.process = process
// Start the process. This will block until it finishes.
err = process.Start()
if err == nil {
// Store the final output
j.Output = process.Output()
} else {
j.Output = fmt.Sprintf("%s", err)
}
// Mark the build as finished
j.FinishedAt = time.Now().UTC().Format(time.RFC3339)
j.ExitStatus = j.process.ExitStatus
// Keep trying this call until it works. This is the most important one.
for {
_, err = agent.Client.JobUpdate(j)
if err != nil {
logger.Error("Problem with updating final job information %s (%s)", j.ID, err)
// How long should we wait until we try again?
idleSeconds := 5
// Sleep for a while
sleepTime := time.Duration(idleSeconds*1000) * time.Millisecond
time.Sleep(sleepTime)
} else {
break
}
}
logger.Info("Finished job %s", j.ID)
return nil
}
Send times to buildkite including Nano second.
package buildkite
import (
"fmt"
"github.com/buildkite/agent/buildkite/logger"
"os"
"path/filepath"
"regexp"
"time"
)
// The Job struct uses strings for StartedAt and FinishedAt because
// if they were actual date objects, then when this struct is
// initialized they would have a default value of: 00:00:00.000000000.
// This causes problems for the Buildkite Agent API because it looks for
// the presence of values in these properties to determine if the build
// has finished.
type Job struct {
ID string
State string
Env map[string]string
Output string `json:"output,omitempty"`
HeaderTimes []string `json:"header_times,omitempty"`
ExitStatus string `json:"exit_status,omitempty"`
StartedAt string `json:"started_at,omitempty"`
FinishedAt string `json:"finished_at,omitempty"`
// If the job is currently being cancelled
cancelled bool
// The currently running process of the job
process *Process
}
func (b Job) String() string {
return fmt.Sprintf("Job{ID: %s, State: %s, StartedAt: %s, FinishedAt: %s, Process: %s}", b.ID, b.State, b.StartedAt, b.FinishedAt, b.process)
}
func (c *Client) JobNext() (*Job, error) {
// Create a new instance of a job that will be populated
// by the client.
var job Job
// Return the job.
return &job, c.Get(&job, "jobs/next")
}
func (c *Client) JobFind(id string) (*Job, error) {
// Create a new instance of a job that will be populated
// by the client.
var job Job
// Find the job
return &job, c.Get(&job, "jobs/"+id)
}
func (c *Client) JobAccept(job *Job) (*Job, error) {
// Accept the job
return job, c.Put(job, "jobs/"+job.ID+"/accept", job)
}
func (c *Client) JobUpdate(job *Job) (*Job, error) {
// Create a new instance of a job that will be populated
// with the updated data by the client
var updatedJob Job
// Return the job.
return &updatedJob, c.Put(&updatedJob, "jobs/"+job.ID, job)
}
func (j *Job) Kill() error {
if j.cancelled {
// Already cancelled
} else {
logger.Info("Cancelling job %s", j.ID)
j.cancelled = true
if j.process != nil {
j.process.Kill()
} else {
logger.Error("No process to kill")
}
}
return nil
}
func (j *Job) Run(agent *Agent) error {
logger.Info("Starting job %s", j.ID)
// Create a clone of our jobs environment. We'll then set the
// environment variables provided by the agent, which will override any
// sent by Buildkite. The variables below should always take
// precedence.
env := make(map[string]string)
for key, value := range j.Env {
env[key] = value
}
// Add agent environment variables
env["BUILDKITE_AGENT_ENDPOINT"] = agent.Client.URL
env["BUILDKITE_AGENT_ACCESS_TOKEN"] = agent.Client.AuthorizationToken
env["BUILDKITE_AGENT_VERSION"] = Version()
env["BUILDKITE_AGENT_DEBUG"] = fmt.Sprintf("%t", logger.GetLevel() == logger.DEBUG)
// We know the BUILDKITE_BIN_PATH dir, because it's the path to the
// currently running file (there is only 1 binary)
dir, _ := filepath.Abs(filepath.Dir(os.Args[0]))
env["BUILDKITE_BIN_PATH"] = dir
// Add misc options
env["BUILDKITE_BUILD_PATH"] = agent.BuildPath
env["BUILDKITE_HOOKS_PATH"] = agent.HooksPath
env["BUILDKITE_AUTO_SSH_FINGERPRINT_VERIFICATION"] = fmt.Sprintf("%t", agent.AutoSSHFingerprintVerification)
env["BUILDKITE_SCRIPT_EVAL"] = fmt.Sprintf("%t", agent.ScriptEval)
// Convert the env map into a slice (which is what the script gear
// needs)
envSlice := []string{}
for key, value := range env {
envSlice = append(envSlice, fmt.Sprintf("%s=%s", key, value))
}
// Mark the build as started
j.StartedAt = time.Now().UTC().Format(time.RFC3339Nano)
_, err := agent.Client.JobUpdate(j)
if err != nil {
// We don't care if the HTTP request failed here. We hope that it
// starts working during the actual build.
}
// This callback is called when the process starts
startCallback := func(process *Process) {
// Start a routine that will grab the output every few seconds and send it back to Buildkite
go func() {
for process.Running {
// Save the output to the job
j.Output = process.Output()
// Post the update to the API
updatedJob, err := agent.Client.JobUpdate(j)
if err != nil {
// We don't really care if the job couldn't update at this point.
// This is just a partial update. We'll just let the job run
// and hopefully the host will fix itself before we finish.
logger.Warn("Problem with updating job %s (%s)", j.ID, err)
} else if updatedJob.State == "canceled" {
j.Kill()
}
// Sleep for 1 second
time.Sleep(1000 * time.Millisecond)
}
logger.Debug("Routine that sends job updates has finished")
}()
}
// The regular expression used to match headers
headerRegexp, err := regexp.Compile("^(?:---|\\+\\+\\+|~~~)\\s(.+)?$")
if err != nil {
logger.Error("Failed to compile header regular expression (%T: %v)", err, err)
}
// This callback is called for every line that is output by the process
lineCallback := func(process *Process, line string) {
if headerRegexp.MatchString(line) {
// logger.Debug("Found header \"%s\", capturing current time", line)
j.HeaderTimes = append(j.HeaderTimes, time.Now().UTC().Format(time.RFC3339Nano))
}
}
// Initialze our process to run
process := InitProcess(agent.BootstrapScript, envSlice, agent.RunInPty, startCallback, lineCallback)
// Store the process so we can cancel it later.
j.process = process
// Start the process. This will block until it finishes.
err = process.Start()
if err == nil {
// Store the final output
j.Output = process.Output()
} else {
j.Output = fmt.Sprintf("%s", err)
}
// Mark the build as finished
j.FinishedAt = time.Now().UTC().Format(time.RFC3339Nano)
j.ExitStatus = j.process.ExitStatus
// Keep trying this call until it works. This is the most important one.
for {
_, err = agent.Client.JobUpdate(j)
if err != nil {
logger.Error("Problem with updating final job information %s (%s)", j.ID, err)
// How long should we wait until we try again?
idleSeconds := 5
// Sleep for a while
sleepTime := time.Duration(idleSeconds*1000) * time.Millisecond
time.Sleep(sleepTime)
} else {
break
}
}
logger.Info("Finished job %s", j.ID)
return nil
}
|
package api
import (
"fmt"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/guardian"
"github.com/grafana/grafana/pkg/util"
)
func GetFolders(c *models.ReqContext) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
folders, err := s.GetFolders(c.QueryInt64("limit"))
if err != nil {
return toFolderError(err)
}
result := make([]dtos.FolderSearchHit, 0)
for _, f := range folders {
result = append(result, dtos.FolderSearchHit{
Id: f.Id,
Uid: f.Uid,
Title: f.Title,
})
}
return JSON(200, result)
}
func GetFolderByUID(c *models.ReqContext) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
folder, err := s.GetFolderByUID(c.Params(":uid"))
if err != nil {
return toFolderError(err)
}
g := guardian.New(folder.Id, c.OrgId, c.SignedInUser)
return JSON(200, toFolderDto(g, folder))
}
func GetFolderByID(c *models.ReqContext) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
folder, err := s.GetFolderByID(c.ParamsInt64(":id"))
if err != nil {
return toFolderError(err)
}
g := guardian.New(folder.Id, c.OrgId, c.SignedInUser)
return JSON(200, toFolderDto(g, folder))
}
func (hs *HTTPServer) CreateFolder(c *models.ReqContext, cmd models.CreateFolderCommand) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
err := s.CreateFolder(&cmd)
if err != nil {
return toFolderError(err)
}
if hs.Cfg.EditorsCanAdmin {
if err := dashboards.MakeUserAdmin(hs.Bus, c.OrgId, c.SignedInUser.UserId, cmd.Result.Id, true); err != nil {
hs.log.Error("Could not make user admin", "folder", cmd.Result.Title, "user", c.SignedInUser.UserId, "error", err)
}
}
g := guardian.New(cmd.Result.Id, c.OrgId, c.SignedInUser)
return JSON(200, toFolderDto(g, cmd.Result))
}
func UpdateFolder(c *models.ReqContext, cmd models.UpdateFolderCommand) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
err := s.UpdateFolder(c.Params(":uid"), &cmd)
if err != nil {
return toFolderError(err)
}
g := guardian.New(cmd.Result.Id, c.OrgId, c.SignedInUser)
return JSON(200, toFolderDto(g, cmd.Result))
}
func DeleteFolder(c *models.ReqContext) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
f, err := s.DeleteFolder(c.Params(":uid"))
if err != nil {
return toFolderError(err)
}
return JSON(200, util.DynMap{
"title": f.Title,
"message": fmt.Sprintf("Folder %s deleted", f.Title),
})
}
func toFolderDto(g guardian.DashboardGuardian, folder *models.Folder) dtos.Folder {
canEdit, _ := g.CanEdit()
canSave, _ := g.CanSave()
canAdmin, _ := g.CanAdmin()
// Finding creator and last updater of the folder
updater, creator := anonString, anonString
if folder.CreatedBy > 0 {
creator = getUserLogin(folder.CreatedBy)
}
if folder.UpdatedBy > 0 {
updater = getUserLogin(folder.UpdatedBy)
}
return dtos.Folder{
Id: folder.Id,
Uid: folder.Uid,
Title: folder.Title,
Url: folder.Url,
HasAcl: folder.HasAcl,
CanSave: canSave,
CanEdit: canEdit,
CanAdmin: canAdmin,
CreatedBy: creator,
Created: folder.Created,
UpdatedBy: updater,
Updated: folder.Updated,
Version: folder.Version,
}
}
func toFolderError(err error) Response {
if err == models.ErrFolderTitleEmpty ||
err == models.ErrFolderSameNameExists ||
err == models.ErrFolderWithSameUIDExists ||
err == models.ErrDashboardTypeMismatch ||
err == models.ErrDashboardInvalidUid ||
err == models.ErrDashboardUidTooLong {
return Error(400, err.Error(), nil)
}
if err == models.ErrFolderAccessDenied {
return Error(403, "Access denied", err)
}
if err == models.ErrFolderNotFound {
return JSON(404, util.DynMap{"status": "not-found", "message": models.ErrFolderNotFound.Error()})
}
if err == models.ErrFolderVersionMismatch {
return JSON(412, util.DynMap{"status": "version-mismatch", "message": models.ErrFolderVersionMismatch.Error()})
}
return Error(500, "Folder API error", err)
}
API: Recognize dashboard errors when saving a folder (#26499)
Signed-off-by: Arve Knudsen <ddb1ca96248d128f0719f3f79aee1e346dace4b9@gmail.com>
package api
import (
"errors"
"fmt"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/guardian"
"github.com/grafana/grafana/pkg/util"
)
func GetFolders(c *models.ReqContext) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
folders, err := s.GetFolders(c.QueryInt64("limit"))
if err != nil {
return toFolderError(err)
}
result := make([]dtos.FolderSearchHit, 0)
for _, f := range folders {
result = append(result, dtos.FolderSearchHit{
Id: f.Id,
Uid: f.Uid,
Title: f.Title,
})
}
return JSON(200, result)
}
func GetFolderByUID(c *models.ReqContext) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
folder, err := s.GetFolderByUID(c.Params(":uid"))
if err != nil {
return toFolderError(err)
}
g := guardian.New(folder.Id, c.OrgId, c.SignedInUser)
return JSON(200, toFolderDto(g, folder))
}
func GetFolderByID(c *models.ReqContext) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
folder, err := s.GetFolderByID(c.ParamsInt64(":id"))
if err != nil {
return toFolderError(err)
}
g := guardian.New(folder.Id, c.OrgId, c.SignedInUser)
return JSON(200, toFolderDto(g, folder))
}
func (hs *HTTPServer) CreateFolder(c *models.ReqContext, cmd models.CreateFolderCommand) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
err := s.CreateFolder(&cmd)
if err != nil {
return toFolderError(err)
}
if hs.Cfg.EditorsCanAdmin {
if err := dashboards.MakeUserAdmin(hs.Bus, c.OrgId, c.SignedInUser.UserId, cmd.Result.Id, true); err != nil {
hs.log.Error("Could not make user admin", "folder", cmd.Result.Title, "user", c.SignedInUser.UserId, "error", err)
}
}
g := guardian.New(cmd.Result.Id, c.OrgId, c.SignedInUser)
return JSON(200, toFolderDto(g, cmd.Result))
}
func UpdateFolder(c *models.ReqContext, cmd models.UpdateFolderCommand) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
err := s.UpdateFolder(c.Params(":uid"), &cmd)
if err != nil {
return toFolderError(err)
}
g := guardian.New(cmd.Result.Id, c.OrgId, c.SignedInUser)
return JSON(200, toFolderDto(g, cmd.Result))
}
func DeleteFolder(c *models.ReqContext) Response {
s := dashboards.NewFolderService(c.OrgId, c.SignedInUser)
f, err := s.DeleteFolder(c.Params(":uid"))
if err != nil {
return toFolderError(err)
}
return JSON(200, util.DynMap{
"title": f.Title,
"message": fmt.Sprintf("Folder %s deleted", f.Title),
})
}
func toFolderDto(g guardian.DashboardGuardian, folder *models.Folder) dtos.Folder {
canEdit, _ := g.CanEdit()
canSave, _ := g.CanSave()
canAdmin, _ := g.CanAdmin()
// Finding creator and last updater of the folder
updater, creator := anonString, anonString
if folder.CreatedBy > 0 {
creator = getUserLogin(folder.CreatedBy)
}
if folder.UpdatedBy > 0 {
updater = getUserLogin(folder.UpdatedBy)
}
return dtos.Folder{
Id: folder.Id,
Uid: folder.Uid,
Title: folder.Title,
Url: folder.Url,
HasAcl: folder.HasAcl,
CanSave: canSave,
CanEdit: canEdit,
CanAdmin: canAdmin,
CreatedBy: creator,
Created: folder.Created,
UpdatedBy: updater,
Updated: folder.Updated,
Version: folder.Version,
}
}
func toFolderError(err error) Response {
var dashboardErr models.DashboardErr
if ok := errors.As(err, &dashboardErr); ok {
return Error(dashboardErr.StatusCode, err.Error(), err)
}
if err == models.ErrFolderTitleEmpty ||
err == models.ErrFolderSameNameExists ||
err == models.ErrFolderWithSameUIDExists ||
err == models.ErrDashboardTypeMismatch ||
err == models.ErrDashboardInvalidUid ||
err == models.ErrDashboardUidTooLong {
return Error(400, err.Error(), nil)
}
if err == models.ErrFolderAccessDenied {
return Error(403, "Access denied", err)
}
if err == models.ErrFolderNotFound {
return JSON(404, util.DynMap{"status": "not-found", "message": models.ErrFolderNotFound.Error()})
}
if err == models.ErrFolderVersionMismatch {
return JSON(412, util.DynMap{"status": "version-mismatch", "message": models.ErrFolderVersionMismatch.Error()})
}
return Error(500, "Folder API error", err)
}
|
// Copyright (C) 2018 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build cgo
// +build sqlite_unlock_notify
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY
#include <stdlib.h>
#include <sqlite3-binding.h>
extern void unlock_notify_callback(void *arg, int argc);
*/
import "C"
import (
"fmt"
"math"
"sync"
"unsafe"
)
type unlock_notify_table struct {
sync.Mutex
seqnum uint
table map[uint]chan struct{}
}
var unt unlock_notify_table = unlock_notify_table{table: make(map[uint]chan struct{})}
func (t *unlock_notify_table) add(c chan struct{}) uint {
t.Lock()
defer t.Unlock()
h := t.seqnum
t.table[h] = c
t.seqnum++
return h
}
func (t *unlock_notify_table) remove(h uint) {
t.Lock()
defer t.Unlock()
delete(t.table, h)
}
func (t *unlock_notify_table) get(h uint) chan struct{} {
t.Lock()
defer t.Unlock()
c, ok := t.table[h]
if !ok {
panic(fmt.Sprintf("Non-existent key for unlcok-notify channel: %d", h))
}
return c
}
//export unlock_notify_callback
func unlock_notify_callback(argv unsafe.Pointer, argc C.int) {
for i := 0; i < int(argc); i++ {
parg := ((*(*[(math.MaxInt32 - 1) / unsafe.Sizeof(uintptr)]*[1]uint)(argv))[i])
arg := *parg
h := arg[0]
c := unt.get(h)
c <- struct{}{}
}
}
//export unlock_notify_wait
func unlock_notify_wait(db *C.sqlite3) C.int {
// It has to be a bufferred channel to not block in sqlite_unlock_notify
// as sqlite_unlock_notify could invoke the callback before it returns.
c := make(chan struct{}, 1)
defer close(c)
h := unt.add(c)
defer unt.remove(h)
pargv := C.malloc(C.sizeof_uint)
defer C.free(pargv)
argv := (*[1]uint)(pargv)
argv[0] = h
if rv := C.sqlite3_unlock_notify(db, (*[0]byte)(C.unlock_notify_callback), unsafe.Pointer(pargv)); rv != C.SQLITE_OK {
return rv
}
<-c
return C.SQLITE_OK
}
Fix build failure of incorrect expression for unsafe.Sizeof
// Copyright (C) 2018 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build cgo
// +build sqlite_unlock_notify
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY
#include <stdlib.h>
#include <sqlite3-binding.h>
extern void unlock_notify_callback(void *arg, int argc);
*/
import "C"
import (
"fmt"
"math"
"sync"
"unsafe"
)
type unlock_notify_table struct {
sync.Mutex
seqnum uint
table map[uint]chan struct{}
}
var unt unlock_notify_table = unlock_notify_table{table: make(map[uint]chan struct{})}
func (t *unlock_notify_table) add(c chan struct{}) uint {
t.Lock()
defer t.Unlock()
h := t.seqnum
t.table[h] = c
t.seqnum++
return h
}
func (t *unlock_notify_table) remove(h uint) {
t.Lock()
defer t.Unlock()
delete(t.table, h)
}
func (t *unlock_notify_table) get(h uint) chan struct{} {
t.Lock()
defer t.Unlock()
c, ok := t.table[h]
if !ok {
panic(fmt.Sprintf("Non-existent key for unlcok-notify channel: %d", h))
}
return c
}
//export unlock_notify_callback
func unlock_notify_callback(argv unsafe.Pointer, argc C.int) {
for i := 0; i < int(argc); i++ {
parg := ((*(*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.uint)(nil))]*[1]uint)(argv))[i])
arg := *parg
h := arg[0]
c := unt.get(h)
c <- struct{}{}
}
}
//export unlock_notify_wait
func unlock_notify_wait(db *C.sqlite3) C.int {
// It has to be a bufferred channel to not block in sqlite_unlock_notify
// as sqlite_unlock_notify could invoke the callback before it returns.
c := make(chan struct{}, 1)
defer close(c)
h := unt.add(c)
defer unt.remove(h)
pargv := C.malloc(C.sizeof_uint)
defer C.free(pargv)
argv := (*[1]uint)(pargv)
argv[0] = h
if rv := C.sqlite3_unlock_notify(db, (*[0]byte)(C.unlock_notify_callback), unsafe.Pointer(pargv)); rv != C.SQLITE_OK {
return rv
}
<-c
return C.SQLITE_OK
}
|
// Playbook - http://play.golang.org/p/3wFl4lacjX
package main
import (
"bytes"
"crypto/aes"
"crypto/cipher"
//"crypto/rand"
"errors"
"fmt"
"github.com/gotamer/conv"
//"io"
)
func Pad(src []byte) []byte {
fmt.Println(len(src))
padding := aes.BlockSize - len(src)%aes.BlockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(src, padtext...)
}
func Unpad(src []byte) ([]byte, error) {
length := len(src)
unpadding := int(src[length-1])
if unpadding > length {
return nil, errors.New("unpad error. This could happen when incorrect encryption key is used")
}
return src[:(length - unpadding)], nil
}
func encryptCBC(key []byte, text string) (string, error) {
block, err := aes.NewCipher(key)
//fmt.Println(block)
if err != nil {
return "", err
}
msg := []byte(text)
//fmt.Println(len(msg))
ciphertext := make([]byte, aes.BlockSize+len(msg))
//iv := ciphertext[:aes.BlockSize]
// if _, err := io.ReadFull(rand.Reader, iv); err != nil {
// return "", err
// }
iv := []byte("abcdefghijklmnop")
cbc := cipher.NewCBCEncrypter(block, iv)
cbc.CryptBlocks(ciphertext[aes.BlockSize:], msg)
fmt.Println(ciphertext)
finalMsg := conv.Bl2s(ciphertext)
return finalMsg, nil
}
func decryptCBC(key []byte, text string) (string, error) {
block, err := aes.NewCipher(key)
if err != nil {
return "", err
}
decodedMsg := []byte(text)
//fmt.Println((decodedMsg))
if (len(decodedMsg) % aes.BlockSize) != 0 {
return "", errors.New("blocksize must be multipe of decoded message length")
}
//iv := decodedMsg[:aes.BlockSize]
iv := []byte("abcdefghijklmnop")
msg := decodedMsg[aes.BlockSize:]
fmt.Println(iv)
cbc := cipher.NewCBCDecrypter(block, iv)
cbc.CryptBlocks(msg, msg)
//unpadMsg, err := Unpad(msg)
unpadMsg := (msg)
if err != nil {
return "", err
}
return string(unpadMsg), nil
}
func main() {
key := []byte("omarleonardozamb")
encryptMsg, _ := encryptCBC(key, "HelloWorldHolaMu")
fmt.Println(encryptMsg)
msg, _ := decryptCBC(key, encryptMsg)
fmt.Println(msg) // Hello World
}
Se realiza test de prueba con datos generados por arduino para corroborar resultados
// Playbook - http://play.golang.org/p/3wFl4lacjX
package main
import (
"bytes"
"crypto/aes"
"crypto/cipher"
//"crypto/rand"
"errors"
"fmt"
"github.com/gotamer/conv"
//"io"
)
func Pad(src []byte) []byte {
fmt.Println(len(src))
padding := aes.BlockSize - len(src)%aes.BlockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(src, padtext...)
}
func Unpad(src []byte) ([]byte, error) {
length := len(src)
unpadding := int(src[length-1])
if unpadding > length {
return nil, errors.New("unpad error. This could happen when incorrect encryption key is used")
}
return src[:(length - unpadding)], nil
}
func encryptCBC(key []byte, text string) (string, error) {
block, err := aes.NewCipher(key)
//fmt.Println(block)
if err != nil {
return "", err
}
msg := []byte(text)
//fmt.Println(len(msg))
ciphertext := make([]byte, aes.BlockSize+len(msg))
//iv := ciphertext[:aes.BlockSize]
// if _, err := io.ReadFull(rand.Reader, iv); err != nil {
// return "", err
// }
iv := []byte("abcdefghijklmnop")
cbc := cipher.NewCBCEncrypter(block, iv)
cbc.CryptBlocks(ciphertext[aes.BlockSize:], msg)
fmt.Println(ciphertext)
finalMsg := conv.Bl2s(ciphertext)
return finalMsg, nil
}
func decryptCBC(key []byte, text string) (string, error) {
block, err := aes.NewCipher(key)
if err != nil {
return "", err
}
decodedMsg := []byte{165, 7, 155, 74, 5, 229, 106, 210, 117, 186, 190, 152, 215, 220, 227, 30}
fmt.Println((decodedMsg))
if (len(decodedMsg) % aes.BlockSize) != 0 {
return "", errors.New("blocksize must be multipe of decoded message length")
}
//iv := decodedMsg[:aes.BlockSize]
iv := []byte("abcdefghijklmnop")
msg := decodedMsg
fmt.Println(iv)
cbc := cipher.NewCBCDecrypter(block, iv)
cbc.CryptBlocks(msg, msg)
//unpadMsg, err := Unpad(msg)
unpadMsg := (msg)
if err != nil {
return "", err
}
return string(unpadMsg), nil
}
func main() {
key := []byte("omarleonardozamb")
encryptMsg, _ := encryptCBC(key, "HelloWorldHolaMu")
fmt.Println(encryptMsg)
msg, _ := decryptCBC(key, encryptMsg)
fmt.Println(msg) // Hello World
}
|
package bytecode
import (
"errors"
)
// Raw is the raw bytecode, i.e. a list of bytes.
// If a byte has an argument, the next two bytes
// are converted into a single 16-bit integer:
//
// (a << 8) + b
//
// Where a is the first argument byte, and b is the
// second.
type Raw []byte
// Code is the "parsed" bytecode, i.e. a list of
// instructions, with their arguments.
type Code []instruction
type instruction struct {
Code byte
Arg rune
Name string
}
// ErrOutOfBytes is thrown by Read when a byte
// which takes arguments isn't followed by at
// least two more bytes.
var ErrOutOfBytes = errors.New("bytecode: not enough bytes remaining")
// Read takes some raw bytecode and outputs
// the "parsed" bytecode as a Code struct.
func Read(raw Raw) (Code, error) {
var (
code Code
index int
)
for index < len(raw) {
var (
cur = raw[index]
data = Instructions[cur]
instr = instruction{
Code: cur,
Name: data.Name,
}
)
if data.HasArg {
if index+2 >= len(raw) {
return code, ErrOutOfBytes
}
var (
a = raw[index+1]
b = raw[index+2]
arg = (rune(a) << 8) + rune(b)
)
index += 2
instr.Arg = arg
}
code = append(code, instr)
index++
}
return code, nil
}
Add a comment
package bytecode
import (
"errors"
)
// Raw is the raw bytecode, i.e. a list of bytes.
// If a byte has an argument, the next two bytes
// are converted into a single 16-bit integer:
//
// (a << 8) + b
//
// Where a is the first argument byte, and b is the
// second.
type Raw []byte
// Code is the "parsed" bytecode, i.e. a list of
// instructions, with their arguments.
type Code []instruction
type instruction struct {
Code byte
Arg rune
Name string
}
// ErrOutOfBytes is thrown by Read when a byte
// which takes arguments isn't followed by at
// least two more bytes.
var ErrOutOfBytes = errors.New("bytecode: not enough bytes remaining")
// Read takes some raw bytecode and outputs
// the "parsed" bytecode as a Code struct.
//
// If there is an error, it is ErrOutOfBytes,
// signifying there aren't enough bytes left
// after an instruction with arity > 0.
func Read(raw Raw) (Code, error) {
var (
code Code
index int
)
for index < len(raw) {
var (
cur = raw[index]
data = Instructions[cur]
instr = instruction{
Code: cur,
Name: data.Name,
}
)
if data.HasArg {
if index+2 >= len(raw) {
return code, ErrOutOfBytes
}
var (
a = raw[index+1]
b = raw[index+2]
arg = (rune(a) << 8) + rune(b)
)
index += 2
instr.Arg = arg
}
code = append(code, instr)
index++
}
return code, nil
}
|
package entity
import (
"github.com/jinzhu/gorm"
)
// Tag is a descriptive identifier given to ease searchability
type Tag struct {
gorm.Model
Value string `sql:"type:text" gorm:"unique;not null"`
Flags []*Flag `gorm:"many2many:flags_tags"`
}
Fix integration issue with mysql
package entity
import (
"github.com/jinzhu/gorm"
)
// Tag is a descriptive identifier given to ease searchability
type Tag struct {
gorm.Model
Value string `sql:"type:varchar(64);unique_index:idx_tag_value"`
Flags []*Flag `gorm:"many2many:flags_tags;"`
}
|
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"camlistore.org/pkg/test"
)
var (
errmu sync.Mutex
lasterr error
)
func condSkip(t *testing.T) {
errmu.Lock()
defer errmu.Unlock()
if lasterr != nil {
t.Skipf("Skipping test; some other test already failed.")
}
if runtime.GOOS != "darwin" {
t.Skipf("Skipping test on OS %q", runtime.GOOS)
}
if runtime.GOOS == "darwin" {
_, err := os.Stat("/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs")
if os.IsNotExist(err) {
test.DependencyErrorOrSkip(t)
} else if err != nil {
t.Fatal(err)
}
}
}
type mountEnv struct {
t *testing.T
mountPoint string
process *os.Process
}
func (e *mountEnv) Stat(s *stat) int64 {
file := filepath.Join(e.mountPoint, ".camli_fs_stats", s.name)
slurp, err := ioutil.ReadFile(file)
if err != nil {
e.t.Fatal(err)
}
slurp = bytes.TrimSpace(slurp)
v, err := strconv.ParseInt(string(slurp), 10, 64)
if err != nil {
e.t.Fatalf("unexpected value %q in file %s", slurp, file)
}
return v
}
func cammountTest(t *testing.T, fn func(env *mountEnv)) {
dupLog := io.MultiWriter(os.Stderr, testLog{t})
log.SetOutput(dupLog)
defer log.SetOutput(os.Stderr)
w := test.GetWorld(t)
mountPoint, err := ioutil.TempDir("", "fs-test-mount")
if err != nil {
t.Fatal(err)
}
verbose := "false"
var stderrDest io.Writer = ioutil.Discard
if v, _ := strconv.ParseBool(os.Getenv("VERBOSE_FUSE")); v {
verbose = "true"
stderrDest = testLog{t}
}
if v, _ := strconv.ParseBool(os.Getenv("VERBOSE_FUSE_STDERR")); v {
stderrDest = io.MultiWriter(stderrDest, os.Stderr)
}
mount := w.Cmd("cammount", "--debug="+verbose, mountPoint)
mount.Stderr = stderrDest
mount.Env = append(mount.Env, "CAMLI_TRACK_FS_STATS=1")
stdin, err := mount.StdinPipe()
if err != nil {
t.Fatal(err)
}
if err := mount.Start(); err != nil {
t.Fatal(err)
}
waitc := make(chan error, 1)
go func() { waitc <- mount.Wait() }()
defer func() {
log.Printf("Sending quit")
stdin.Write([]byte("q\n"))
select {
case <-time.After(5 * time.Second):
log.Printf("timeout waiting for cammount to finish")
mount.Process.Kill()
Unmount(mountPoint)
case err := <-waitc:
log.Printf("cammount exited: %v", err)
}
if !test.WaitFor(not(dirToBeFUSE(mountPoint)), 5*time.Second, 1*time.Second) {
// It didn't unmount. Try again.
Unmount(mountPoint)
}
}()
if !test.WaitFor(dirToBeFUSE(mountPoint), 5*time.Second, 100*time.Millisecond) {
t.Fatalf("error waiting for %s to be mounted", mountPoint)
}
fn(&mountEnv{
t: t,
mountPoint: mountPoint,
process: mount.Process,
})
}
func TestRoot(t *testing.T) {
condSkip(t)
cammountTest(t, func(env *mountEnv) {
f, err := os.Open(env.mountPoint)
if err != nil {
t.Fatal(err)
}
defer f.Close()
names, err := f.Readdirnames(-1)
if err != nil {
t.Fatal(err)
}
sort.Strings(names)
want := []string{"WELCOME.txt", "date", "recent", "roots", "sha1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "tag"}
if !reflect.DeepEqual(names, want) {
t.Errorf("root directory = %q; want %q", names, want)
}
})
}
type testLog struct {
t *testing.T
}
func (tl testLog) Write(p []byte) (n int, err error) {
tl.t.Log(strings.TrimSpace(string(p)))
return len(p), nil
}
func TestMutable(t *testing.T) {
condSkip(t)
cammountTest(t, func(env *mountEnv) {
rootDir := filepath.Join(env.mountPoint, "roots", "r")
if err := os.MkdirAll(rootDir, 0755); err != nil {
t.Fatalf("Failed to make roots/r dir: %v", err)
}
fi, err := os.Stat(rootDir)
if err != nil || !fi.IsDir() {
t.Fatalf("Stat of roots/r dir = %v, %v; want a directory", fi, err)
}
filename := filepath.Join(rootDir, "x")
f, err := os.Create(filename)
if err != nil {
t.Fatalf("Create: %v", err)
}
if err := f.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
fi, err = os.Stat(filename)
if err != nil || !fi.Mode().IsRegular() || fi.Size() != 0 {
t.Fatalf("Stat of roots/r/x = %v, %v; want a 0-byte regular file", fi, err)
}
for _, str := range []string{"foo, ", "bar\n", "another line.\n"} {
f, err = os.OpenFile(filename, os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
t.Fatalf("OpenFile: %v", err)
}
if _, err := f.Write([]byte(str)); err != nil {
t.Logf("Error with append: %v", err)
t.Fatalf("Error appending %q to %s: %v", str, filename, err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
}
ro0 := env.Stat(mutFileOpenRO)
slurp, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatal(err)
}
if env.Stat(mutFileOpenRO)-ro0 != 1 {
t.Error("Read didn't trigger read-only path optimization.")
}
const want = "foo, bar\nanother line.\n"
fi, err = os.Stat(filename)
if err != nil || !fi.Mode().IsRegular() || fi.Size() != int64(len(want)) {
t.Errorf("Stat of roots/r/x = %v, %v; want a %d byte regular file", fi, len(want), err)
}
if got := string(slurp); got != want {
t.Fatalf("contents = %q; want %q", got, want)
}
// Delete it.
if err := os.Remove(filename); err != nil {
t.Fatal(err)
}
// Gone?
if _, err := os.Stat(filename); !os.IsNotExist(err) {
t.Fatalf("expected file to be gone; got stat err = %v instead", err)
}
})
}
func brokenTest(t *testing.T) {
if v, _ := strconv.ParseBool(os.Getenv("RUN_BROKEN_TESTS")); !v {
t.Skipf("Skipping broken tests without RUN_BROKEN_TESTS=1")
}
}
func TestFinderCopy(t *testing.T) {
if runtime.GOOS != "darwin" {
t.Skipf("Skipping Darwin-specific test.")
}
condSkip(t)
cammountTest(t, func(env *mountEnv) {
f, err := ioutil.TempFile("", "finder-copy-file")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
want := []byte("Some data for Finder to copy.")
if _, err := f.Write(want); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
destDir := filepath.Join(env.mountPoint, "roots", "r")
if err := os.MkdirAll(destDir, 0755); err != nil {
t.Fatal(err)
}
cmd := exec.Command("osascript")
script := fmt.Sprintf(`
tell application "Finder"
copy file POSIX file %q to folder POSIX file %q
end tell
`, f.Name(), destDir)
cmd.Stdin = strings.NewReader(script)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("Error running AppleScript: %v, %s", err, out)
} else {
t.Logf("AppleScript said: %q", out)
}
destFile := filepath.Join(destDir, filepath.Base(f.Name()))
fi, err := os.Stat(destFile)
if err != nil {
t.Errorf("Stat = %v, %v", fi, err)
}
if fi.Size() != int64(len(want)) {
t.Errorf("Dest stat size = %d; want %d", fi.Size(), len(want))
}
slurp, err := ioutil.ReadFile(destFile)
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if !bytes.Equal(slurp, want) {
t.Errorf("Dest file = %q; want %q", slurp, want)
}
})
}
func TestTextEdit(t *testing.T) {
if runtime.GOOS != "darwin" {
t.Skipf("Skipping Darwin-specific test.")
}
condSkip(t)
brokenTest(t)
cammountTest(t, func(env *mountEnv) {
var (
testDir = filepath.Join(env.mountPoint, "roots", "r")
testFile = filepath.Join(testDir, "some-text-file.txt")
content1 = []byte("Some text content.")
content2 = []byte("Some replacement content.")
)
if err := os.MkdirAll(testDir, 0755); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(testFile, content1, 0644); err != nil {
t.Fatal(err)
}
cmd := exec.Command("osascript")
script := fmt.Sprintf(`
tell application "TextEdit"
activate
open POSIX file %q
tell front document
set paragraph 1 to %q as text
save
close
end tell
end tell
`, testFile, content2)
cmd.Stdin = strings.NewReader(script)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("Error running AppleScript: %v, %s", err, out)
} else {
t.Logf("AppleScript said: %q", out)
}
fi, err := os.Stat(testFile)
if err != nil {
t.Errorf("Stat = %v, %v", fi, err)
}
if fi.Size() != int64(len(content2)) {
t.Errorf("Stat size = %d; want %d", fi.Size(), len(content2))
}
slurp, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if !bytes.Equal(slurp, content2) {
t.Errorf("File = %q; want %q", slurp, content2)
}
})
}
func not(cond func() bool) func() bool {
return func() bool {
return !cond()
}
}
func dirToBeFUSE(dir string) func() bool {
return func() bool {
out, err := exec.Command("df", dir).CombinedOutput()
if err != nil {
return false
}
if runtime.GOOS == "darwin" {
if strings.Contains(string(out), "mount_osxfusefs@") {
return true
}
return false
}
return false
}
}
fs: another failing test
Change-Id: If9d3a87aa67415846a7b7a1a20c4cef3cd9c1f66
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"camlistore.org/pkg/test"
)
var (
errmu sync.Mutex
lasterr error
)
func condSkip(t *testing.T) {
errmu.Lock()
defer errmu.Unlock()
if lasterr != nil {
t.Skipf("Skipping test; some other test already failed.")
}
if runtime.GOOS != "darwin" {
t.Skipf("Skipping test on OS %q", runtime.GOOS)
}
if runtime.GOOS == "darwin" {
_, err := os.Stat("/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs")
if os.IsNotExist(err) {
test.DependencyErrorOrSkip(t)
} else if err != nil {
t.Fatal(err)
}
}
}
type mountEnv struct {
t *testing.T
mountPoint string
process *os.Process
}
func (e *mountEnv) Stat(s *stat) int64 {
file := filepath.Join(e.mountPoint, ".camli_fs_stats", s.name)
slurp, err := ioutil.ReadFile(file)
if err != nil {
e.t.Fatal(err)
}
slurp = bytes.TrimSpace(slurp)
v, err := strconv.ParseInt(string(slurp), 10, 64)
if err != nil {
e.t.Fatalf("unexpected value %q in file %s", slurp, file)
}
return v
}
func cammountTest(t *testing.T, fn func(env *mountEnv)) {
dupLog := io.MultiWriter(os.Stderr, testLog{t})
log.SetOutput(dupLog)
defer log.SetOutput(os.Stderr)
w := test.GetWorld(t)
mountPoint, err := ioutil.TempDir("", "fs-test-mount")
if err != nil {
t.Fatal(err)
}
verbose := "false"
var stderrDest io.Writer = ioutil.Discard
if v, _ := strconv.ParseBool(os.Getenv("VERBOSE_FUSE")); v {
verbose = "true"
stderrDest = testLog{t}
}
if v, _ := strconv.ParseBool(os.Getenv("VERBOSE_FUSE_STDERR")); v {
stderrDest = io.MultiWriter(stderrDest, os.Stderr)
}
mount := w.Cmd("cammount", "--debug="+verbose, mountPoint)
mount.Stderr = stderrDest
mount.Env = append(mount.Env, "CAMLI_TRACK_FS_STATS=1")
stdin, err := mount.StdinPipe()
if err != nil {
t.Fatal(err)
}
if err := mount.Start(); err != nil {
t.Fatal(err)
}
waitc := make(chan error, 1)
go func() { waitc <- mount.Wait() }()
defer func() {
log.Printf("Sending quit")
stdin.Write([]byte("q\n"))
select {
case <-time.After(5 * time.Second):
log.Printf("timeout waiting for cammount to finish")
mount.Process.Kill()
Unmount(mountPoint)
case err := <-waitc:
log.Printf("cammount exited: %v", err)
}
if !test.WaitFor(not(dirToBeFUSE(mountPoint)), 5*time.Second, 1*time.Second) {
// It didn't unmount. Try again.
Unmount(mountPoint)
}
}()
if !test.WaitFor(dirToBeFUSE(mountPoint), 5*time.Second, 100*time.Millisecond) {
t.Fatalf("error waiting for %s to be mounted", mountPoint)
}
fn(&mountEnv{
t: t,
mountPoint: mountPoint,
process: mount.Process,
})
}
func TestRoot(t *testing.T) {
condSkip(t)
cammountTest(t, func(env *mountEnv) {
f, err := os.Open(env.mountPoint)
if err != nil {
t.Fatal(err)
}
defer f.Close()
names, err := f.Readdirnames(-1)
if err != nil {
t.Fatal(err)
}
sort.Strings(names)
want := []string{"WELCOME.txt", "date", "recent", "roots", "sha1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "tag"}
if !reflect.DeepEqual(names, want) {
t.Errorf("root directory = %q; want %q", names, want)
}
})
}
type testLog struct {
t *testing.T
}
func (tl testLog) Write(p []byte) (n int, err error) {
tl.t.Log(strings.TrimSpace(string(p)))
return len(p), nil
}
func TestMutable(t *testing.T) {
condSkip(t)
cammountTest(t, func(env *mountEnv) {
rootDir := filepath.Join(env.mountPoint, "roots", "r")
if err := os.MkdirAll(rootDir, 0755); err != nil {
t.Fatalf("Failed to make roots/r dir: %v", err)
}
fi, err := os.Stat(rootDir)
if err != nil || !fi.IsDir() {
t.Fatalf("Stat of roots/r dir = %v, %v; want a directory", fi, err)
}
filename := filepath.Join(rootDir, "x")
f, err := os.Create(filename)
if err != nil {
t.Fatalf("Create: %v", err)
}
if err := f.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
fi, err = os.Stat(filename)
if err != nil {
t.Errorf("Stat error: %v", err)
} else if !fi.Mode().IsRegular() || fi.Size() != 0 {
t.Errorf("Stat of roots/r/x = %v size %d; want a %d byte regular file", fi.Mode(), fi.Size(), 0)
}
for _, str := range []string{"foo, ", "bar\n", "another line.\n"} {
f, err = os.OpenFile(filename, os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
t.Fatalf("OpenFile: %v", err)
}
if _, err := f.Write([]byte(str)); err != nil {
t.Logf("Error with append: %v", err)
t.Fatalf("Error appending %q to %s: %v", str, filename, err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
}
ro0 := env.Stat(mutFileOpenRO)
slurp, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatal(err)
}
if env.Stat(mutFileOpenRO)-ro0 != 1 {
t.Error("Read didn't trigger read-only path optimization.")
}
const want = "foo, bar\nanother line.\n"
fi, err = os.Stat(filename)
if err != nil {
t.Errorf("Stat error: %v", err)
} else if !fi.Mode().IsRegular() || fi.Size() != int64(len(want)) {
t.Errorf("Stat of roots/r/x = %v size %d; want a %d byte regular file", fi.Mode(), fi.Size(), len(want))
}
if got := string(slurp); got != want {
t.Fatalf("contents = %q; want %q", got, want)
}
// Delete it.
if err := os.Remove(filename); err != nil {
t.Fatal(err)
}
// Gone?
if _, err := os.Stat(filename); !os.IsNotExist(err) {
t.Fatalf("expected file to be gone; got stat err = %v instead", err)
}
})
}
func TestDifferentWriteTypes(t *testing.T) {
condSkip(t)
brokenTest(t)
cammountTest(t, func(env *mountEnv) {
rootDir := filepath.Join(env.mountPoint, "roots", "r")
if err := os.MkdirAll(rootDir, 0755); err != nil {
t.Fatalf("Failed to make roots/r dir: %v", err)
}
fi, err := os.Stat(rootDir)
if err != nil || !fi.IsDir() {
t.Fatalf("Stat of roots/r dir = %v, %v; want a directory", fi, err)
}
filename := filepath.Join(rootDir, "big")
writes := []struct {
name string
flag int
write []byte // if non-nil, Write is called
writeAt []byte // if non-nil, WriteAt is used
writePos int64 // writeAt position
want string // shortenString of remaining file
}{
{
name: "write 8k of a",
flag: os.O_RDWR | os.O_CREATE | os.O_TRUNC,
write: bytes.Repeat([]byte("a"), 8<<10),
want: "a{8192}",
},
{
name: "writeAt HI at offset 10",
flag: os.O_RDWR,
writeAt: []byte("HI"),
writePos: 10,
want: "a{10}HIa{8180}",
},
{
name: "append single C",
flag: os.O_WRONLY | os.O_APPEND,
write: []byte("C"),
want: "a{10}HIa{8180}C",
},
{
name: "append 8k of b",
flag: os.O_WRONLY | os.O_APPEND,
write: bytes.Repeat([]byte("b"), 8<<10),
want: "a{10}HIa{8180}Cb{8192}",
},
}
for _, wr := range writes {
f, err := os.OpenFile(filename, wr.flag, 0644)
if err != nil {
t.Fatalf("%s: OpenFile: %v", wr.name, err)
}
if wr.write != nil {
if n, err := f.Write(wr.write); err != nil || n != len(wr.write) {
t.Fatalf("%s: Write = (%n, %v); want (%d, nil)", wr.name, n, err, len(wr.write))
}
}
if wr.writeAt != nil {
if n, err := f.WriteAt(wr.writeAt, wr.writePos); err != nil || n != len(wr.writeAt) {
t.Fatalf("%s: WriteAt = (%n, %v); want (%d, nil)", wr.name, n, err, len(wr.writeAt))
}
}
if err := f.Close(); err != nil {
t.Fatalf("%s: Close: %v", wr.name, err)
}
slurp, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatalf("%s: Slurp: %v", wr.name, err)
}
if got := shortenString(string(slurp)); got != wr.want {
t.Fatalf("%s: afterwards, file = %q; want %q", wr.name, got, wr.want)
}
}
// Delete it.
if err := os.Remove(filename); err != nil {
t.Fatal(err)
}
})
}
func brokenTest(t *testing.T) {
if v, _ := strconv.ParseBool(os.Getenv("RUN_BROKEN_TESTS")); !v {
t.Skipf("Skipping broken tests without RUN_BROKEN_TESTS=1")
}
}
func TestFinderCopy(t *testing.T) {
if runtime.GOOS != "darwin" {
t.Skipf("Skipping Darwin-specific test.")
}
condSkip(t)
cammountTest(t, func(env *mountEnv) {
f, err := ioutil.TempFile("", "finder-copy-file")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
want := []byte("Some data for Finder to copy.")
if _, err := f.Write(want); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
destDir := filepath.Join(env.mountPoint, "roots", "r")
if err := os.MkdirAll(destDir, 0755); err != nil {
t.Fatal(err)
}
cmd := exec.Command("osascript")
script := fmt.Sprintf(`
tell application "Finder"
copy file POSIX file %q to folder POSIX file %q
end tell
`, f.Name(), destDir)
cmd.Stdin = strings.NewReader(script)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("Error running AppleScript: %v, %s", err, out)
} else {
t.Logf("AppleScript said: %q", out)
}
destFile := filepath.Join(destDir, filepath.Base(f.Name()))
fi, err := os.Stat(destFile)
if err != nil {
t.Errorf("Stat = %v, %v", fi, err)
}
if fi.Size() != int64(len(want)) {
t.Errorf("Dest stat size = %d; want %d", fi.Size(), len(want))
}
slurp, err := ioutil.ReadFile(destFile)
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if !bytes.Equal(slurp, want) {
t.Errorf("Dest file = %q; want %q", slurp, want)
}
})
}
func TestTextEdit(t *testing.T) {
if runtime.GOOS != "darwin" {
t.Skipf("Skipping Darwin-specific test.")
}
condSkip(t)
brokenTest(t)
cammountTest(t, func(env *mountEnv) {
var (
testDir = filepath.Join(env.mountPoint, "roots", "r")
testFile = filepath.Join(testDir, "some-text-file.txt")
content1 = []byte("Some text content.")
content2 = []byte("Some replacement content.")
)
if err := os.MkdirAll(testDir, 0755); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(testFile, content1, 0644); err != nil {
t.Fatal(err)
}
cmd := exec.Command("osascript")
script := fmt.Sprintf(`
tell application "TextEdit"
activate
open POSIX file %q
tell front document
set paragraph 1 to %q as text
save
close
end tell
end tell
`, testFile, content2)
cmd.Stdin = strings.NewReader(script)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("Error running AppleScript: %v, %s", err, out)
} else {
t.Logf("AppleScript said: %q", out)
}
fi, err := os.Stat(testFile)
if err != nil {
t.Errorf("Stat = %v, %v", fi, err)
}
if fi.Size() != int64(len(content2)) {
t.Errorf("Stat size = %d; want %d", fi.Size(), len(content2))
}
slurp, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if !bytes.Equal(slurp, content2) {
t.Errorf("File = %q; want %q", slurp, content2)
}
})
}
func not(cond func() bool) func() bool {
return func() bool {
return !cond()
}
}
func dirToBeFUSE(dir string) func() bool {
return func() bool {
out, err := exec.Command("df", dir).CombinedOutput()
if err != nil {
return false
}
if runtime.GOOS == "darwin" {
if strings.Contains(string(out), "mount_osxfusefs@") {
return true
}
return false
}
return false
}
}
// shortenString reduces any run of 5 or more identical bytes to "x{17}".
// "hello" => "hello"
// "fooooooooooooooooo" => "fo{17}"
func shortenString(v string) string {
var buf bytes.Buffer
var last byte
var run int
flush := func() {
switch {
case run == 0:
case run < 5:
for i := 0; i < run; i++ {
buf.WriteByte(last)
}
default:
buf.WriteByte(last)
fmt.Fprintf(&buf, "{%d}", run)
}
run = 0
}
for i := 0; i < len(v); i++ {
b := v[i]
if b != last {
flush()
}
last = b
run++
}
flush()
return buf.String()
}
|
// Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package k8s abstracts all Kubernetes specific behaviour
package k8s
import (
goerrors "errors"
"fmt"
"time"
"github.com/cilium/cilium/api/v1/models"
clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/cilium/cilium/pkg/logging/logfields"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
// ErrNilNode is returned when the Kubernetes API server has returned a nil node
ErrNilNode = goerrors.New("API server returned nil node")
// k8sCli is the default client.
k8sCli = &K8sClient{}
// k8sCiliumCli is the default Cilium client.
k8sCiliumCli = &K8sCiliumClient{}
)
// CreateConfig creates a rest.Config for a given endpoint using a kubeconfig file.
func createConfig(endpoint, kubeCfgPath string) (*rest.Config, error) {
// If the endpoint and the kubeCfgPath are empty then we can try getting
// the rest.Config from the InClusterConfig
if endpoint == "" && kubeCfgPath == "" {
return rest.InClusterConfig()
}
if kubeCfgPath != "" {
return clientcmd.BuildConfigFromFlags("", kubeCfgPath)
}
config := &rest.Config{Host: endpoint}
err := rest.SetKubernetesDefaults(config)
return config, err
}
// CreateConfigFromAgentResponse creates a client configuration from a
// models.DaemonConfigurationResponse
func CreateConfigFromAgentResponse(resp *models.DaemonConfiguration) (*rest.Config, error) {
return createConfig(resp.Status.K8sEndpoint, resp.Status.K8sConfiguration)
}
// CreateConfig creates a client configuration based on the configured API
// server and Kubeconfig path
func CreateConfig() (*rest.Config, error) {
return createConfig(GetAPIServer(), GetKubeconfigPath())
}
// CreateClient creates a new client to access the Kubernetes API
func CreateClient(config *rest.Config) (*kubernetes.Clientset, error) {
cs, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
stop := make(chan struct{})
timeout := time.NewTimer(time.Minute)
defer timeout.Stop()
wait.Until(func() {
// FIXME: Use config.String() when we rebase to latest go-client
log.WithField("host", config.Host).Info("Establishing connection to apiserver")
err = isConnReady(cs)
if err == nil {
close(stop)
return
}
select {
case <-timeout.C:
log.WithError(err).WithField(logfields.IPAddr, config.Host).Error("Unable to contact k8s api-server")
close(stop)
default:
}
}, 5*time.Second, stop)
if err == nil {
log.Info("Connected to apiserver")
}
return cs, err
}
// isConnReady returns the err for the controller-manager status
func isConnReady(c *kubernetes.Clientset) error {
_, err := c.CoreV1().ComponentStatuses().Get("controller-manager", metav1.GetOptions{})
return err
}
// Client returns the default Kubernetes client.
func Client() *K8sClient {
return k8sCli
}
func createDefaultClient() error {
restConfig, err := CreateConfig()
if err != nil {
return fmt.Errorf("unable to create k8s client rest configuration: %s", err)
}
restConfig.ContentConfig.ContentType = `application/vnd.kubernetes.protobuf`
createdK8sClient, err := CreateClient(restConfig)
if err != nil {
return fmt.Errorf("unable to create k8s client: %s", err)
}
k8sCli.Interface = createdK8sClient
return nil
}
// CiliumClient returns the default Cilium Kubernetes client.
func CiliumClient() *K8sCiliumClient {
return k8sCiliumCli
}
func createDefaultCiliumClient() error {
restConfig, err := CreateConfig()
if err != nil {
return fmt.Errorf("unable to create k8s client rest configuration: %s", err)
}
createdCiliumK8sClient, err := clientset.NewForConfig(restConfig)
if err != nil {
return fmt.Errorf("unable to create k8s client: %s", err)
}
k8sCiliumCli.Interface = createdCiliumK8sClient
return nil
}
k8s: add useragent (#7791)
* k8s: add useragent
Signed-off-by: Pierre-Yves Aillet <6834d8c46acec70ffb72a0b0b57251ab0f7e1c52@gmail.com>
// Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package k8s abstracts all Kubernetes specific behaviour
package k8s
import (
goerrors "errors"
"fmt"
"time"
"github.com/cilium/cilium/api/v1/models"
clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/version"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
// ErrNilNode is returned when the Kubernetes API server has returned a nil node
ErrNilNode = goerrors.New("API server returned nil node")
// k8sCli is the default client.
k8sCli = &K8sClient{}
// k8sCiliumCli is the default Cilium client.
k8sCiliumCli = &K8sCiliumClient{}
)
// CreateConfig creates a rest.Config for a given endpoint using a kubeconfig file.
func createConfig(endpoint, kubeCfgPath string) (*rest.Config, error) {
userAgent := fmt.Sprintf("Cilium %s", version.Version)
// If the endpoint and the kubeCfgPath are empty then we can try getting
// the rest.Config from the InClusterConfig
if endpoint == "" && kubeCfgPath == "" {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
config.UserAgent = userAgent
return config, nil
}
if kubeCfgPath != "" {
config, err := clientcmd.BuildConfigFromFlags("", kubeCfgPath)
if err != nil {
return nil, err
}
config.UserAgent = userAgent
return config, nil
}
config := &rest.Config{Host: endpoint, UserAgent: userAgent}
err := rest.SetKubernetesDefaults(config)
return config, err
}
// CreateConfigFromAgentResponse creates a client configuration from a
// models.DaemonConfigurationResponse
func CreateConfigFromAgentResponse(resp *models.DaemonConfiguration) (*rest.Config, error) {
return createConfig(resp.Status.K8sEndpoint, resp.Status.K8sConfiguration)
}
// CreateConfig creates a client configuration based on the configured API
// server and Kubeconfig path
func CreateConfig() (*rest.Config, error) {
return createConfig(GetAPIServer(), GetKubeconfigPath())
}
// CreateClient creates a new client to access the Kubernetes API
func CreateClient(config *rest.Config) (*kubernetes.Clientset, error) {
cs, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
stop := make(chan struct{})
timeout := time.NewTimer(time.Minute)
defer timeout.Stop()
wait.Until(func() {
// FIXME: Use config.String() when we rebase to latest go-client
log.WithField("host", config.Host).Info("Establishing connection to apiserver")
err = isConnReady(cs)
if err == nil {
close(stop)
return
}
select {
case <-timeout.C:
log.WithError(err).WithField(logfields.IPAddr, config.Host).Error("Unable to contact k8s api-server")
close(stop)
default:
}
}, 5*time.Second, stop)
if err == nil {
log.Info("Connected to apiserver")
}
return cs, err
}
// isConnReady returns the err for the controller-manager status
func isConnReady(c *kubernetes.Clientset) error {
_, err := c.CoreV1().ComponentStatuses().Get("controller-manager", metav1.GetOptions{})
return err
}
// Client returns the default Kubernetes client.
func Client() *K8sClient {
return k8sCli
}
func createDefaultClient() error {
restConfig, err := CreateConfig()
if err != nil {
return fmt.Errorf("unable to create k8s client rest configuration: %s", err)
}
restConfig.ContentConfig.ContentType = `application/vnd.kubernetes.protobuf`
createdK8sClient, err := CreateClient(restConfig)
if err != nil {
return fmt.Errorf("unable to create k8s client: %s", err)
}
k8sCli.Interface = createdK8sClient
return nil
}
// CiliumClient returns the default Cilium Kubernetes client.
func CiliumClient() *K8sCiliumClient {
return k8sCiliumCli
}
func createDefaultCiliumClient() error {
restConfig, err := CreateConfig()
if err != nil {
return fmt.Errorf("unable to create k8s client rest configuration: %s", err)
}
createdCiliumK8sClient, err := clientset.NewForConfig(restConfig)
if err != nil {
return fmt.Errorf("unable to create k8s client: %s", err)
}
k8sCiliumCli.Interface = createdCiliumK8sClient
return nil
}
|
package self_test
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
mrand "math/rand"
"net"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/lucas-clemente/quic-go"
quicproxy "github.com/lucas-clemente/quic-go/integrationtests/tools/proxy"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/wire"
"github.com/lucas-clemente/quic-go/logging"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
)
type rcvdPacket struct {
hdr *logging.ExtendedHeader
frames []logging.Frame
}
type rcvdPacketTracer struct {
connTracer
closed chan struct{}
rcvdPackets []rcvdPacket
}
func newRcvdPacketTracer() *rcvdPacketTracer {
return &rcvdPacketTracer{closed: make(chan struct{})}
}
func (t *rcvdPacketTracer) ReceivedPacket(hdr *logging.ExtendedHeader, _ logging.ByteCount, frames []logging.Frame) {
t.rcvdPackets = append(t.rcvdPackets, rcvdPacket{hdr: hdr, frames: frames})
}
func (t *rcvdPacketTracer) Close() { close(t.closed) }
func (t *rcvdPacketTracer) getRcvdPackets() []rcvdPacket {
<-t.closed
return t.rcvdPackets
}
var _ = Describe("0-RTT", func() {
rtt := scaleDuration(5 * time.Millisecond)
for _, v := range protocol.SupportedVersions {
version := v
Context(fmt.Sprintf("with QUIC version %s", version), func() {
runCountingProxy := func(serverPort int) (*quicproxy.QuicProxy, *uint32) {
var num0RTTPackets uint32 // to be used as an atomic
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", serverPort),
DelayPacket: func(_ quicproxy.Direction, data []byte) time.Duration {
hdr, _, _, err := wire.ParsePacket(data, 0)
Expect(err).ToNot(HaveOccurred())
if hdr.Type == protocol.PacketType0RTT {
atomic.AddUint32(&num0RTTPackets, 1)
}
return rtt / 2
},
})
Expect(err).ToNot(HaveOccurred())
return proxy, &num0RTTPackets
}
dialAndReceiveSessionTicket := func(serverConf *quic.Config) (*tls.Config, *tls.Config) {
tlsConf := getTLSConfig()
if serverConf == nil {
serverConf = getQuicConfig(&quic.Config{
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
})
serverConf.Versions = []protocol.VersionNumber{version}
}
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
serverConf,
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", ln.Addr().(*net.UDPAddr).Port),
DelayPacket: func(_ quicproxy.Direction, data []byte) time.Duration { return rtt / 2 },
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
// dial the first session in order to receive a session ticket
done := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(done)
sess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
<-sess.Context().Done()
}()
clientConf := getTLSClientConfig()
gets := make(chan string, 100)
puts := make(chan string, 100)
clientConf.ClientSessionCache = newClientSessionCache(gets, puts)
sess, err := quic.DialAddr(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
Eventually(puts).Should(Receive())
// received the session ticket. We're done here.
Expect(sess.CloseWithError(0, "")).To(Succeed())
Eventually(done).Should(BeClosed())
return tlsConf, clientConf
}
transfer0RTTData := func(
ln quic.EarlyListener,
proxyPort int,
clientConf *tls.Config,
testdata []byte, // data to transfer
expect0RTT bool, // do we expect that 0-RTT is actually used
) {
// now dial the second session, and use 0-RTT to send some data
done := make(chan struct{})
go func() {
defer GinkgoRecover()
sess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
str, err := sess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(testdata))
Expect(sess.ConnectionState().TLS.Used0RTT).To(Equal(expect0RTT))
Expect(sess.CloseWithError(0, "")).To(Succeed())
close(done)
}()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxyPort),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
defer sess.CloseWithError(0, "")
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(testdata)
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
Expect(sess.ConnectionState().TLS.Used0RTT).To(Equal(expect0RTT))
Eventually(done).Should(BeClosed())
Eventually(sess.Context().Done()).Should(BeClosed())
}
check0RTTRejected := func(
ln quic.EarlyListener,
proxyPort int,
clientConf *tls.Config,
) {
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxyPort),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(make([]byte, 3000))
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
Expect(sess.ConnectionState().TLS.Used0RTT).To(BeFalse())
// make sure the server doesn't process the data
ctx, cancel := context.WithTimeout(context.Background(), scaleDuration(50*time.Millisecond))
defer cancel()
serverSess, err := ln.Accept(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(serverSess.ConnectionState().TLS.Used0RTT).To(BeFalse())
_, err = serverSess.AcceptUniStream(ctx)
Expect(err).To(Equal(context.DeadlineExceeded))
Expect(serverSess.CloseWithError(0, "")).To(Succeed())
Eventually(sess.Context().Done()).Should(BeClosed())
}
// can be used to extract 0-RTT from a rcvdPacketTracer
get0RTTPackets := func(packets []rcvdPacket) []protocol.PacketNumber {
var zeroRTTPackets []protocol.PacketNumber
for _, p := range packets {
if p.hdr.Type == protocol.PacketType0RTT {
zeroRTTPackets = append(zeroRTTPackets, p.hdr.PacketNumber)
}
}
return zeroRTTPackets
}
It("transfers 0-RTT data", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
transfer0RTTData(ln, proxy.LocalPort(), clientConf, PRData, true)
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
zeroRTTPackets := get0RTTPackets(tracer.getRcvdPackets())
Expect(len(zeroRTTPackets)).To(BeNumerically(">", 10))
sort.Slice(zeroRTTPackets, func(i, j int) bool { return zeroRTTPackets[i] < zeroRTTPackets[j] })
Expect(zeroRTTPackets[0]).To(Equal(protocol.PacketNumber(0)))
})
// Test that data intended to be sent with 1-RTT protection is not sent in 0-RTT packets.
It("waits until a session until the handshake is done", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
zeroRTTData := GeneratePRData(2 * 1100) // 2 packets
oneRTTData := PRData
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
// now dial the second session, and use 0-RTT to send some data
go func() {
defer GinkgoRecover()
sess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
str, err := sess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(zeroRTTData))
str, err = sess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err = ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(oneRTTData))
Expect(sess.CloseWithError(0, "")).To(Succeed())
}()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
sent0RTT := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(sent0RTT)
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(zeroRTTData)
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
}()
Eventually(sent0RTT).Should(BeClosed())
// wait for the handshake to complete
Eventually(sess.HandshakeComplete().Done()).Should(BeClosed())
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(PRData)
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
<-sess.Context().Done()
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).To(Or(BeEquivalentTo(2), BeEquivalentTo(3))) // the FIN might be sent in a separate packet
Expect(get0RTTPackets(tracer.getRcvdPackets())).To(HaveLen(int(num0RTT)))
})
It("transfers 0-RTT data, when 0-RTT packets are lost", func() {
var (
num0RTTPackets uint32 // to be used as an atomic
num0RTTDropped uint32
)
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", ln.Addr().(*net.UDPAddr).Port),
DelayPacket: func(_ quicproxy.Direction, data []byte) time.Duration {
hdr, _, _, err := wire.ParsePacket(data, 0)
Expect(err).ToNot(HaveOccurred())
if hdr.Type == protocol.PacketType0RTT {
atomic.AddUint32(&num0RTTPackets, 1)
}
return rtt / 2
},
DropPacket: func(_ quicproxy.Direction, data []byte) bool {
hdr, _, _, err := wire.ParsePacket(data, 0)
Expect(err).ToNot(HaveOccurred())
if hdr.Type == protocol.PacketType0RTT {
// drop 25% of the 0-RTT packets
drop := mrand.Intn(4) == 0
if drop {
atomic.AddUint32(&num0RTTDropped, 1)
}
return drop
}
return false
},
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
transfer0RTTData(ln, proxy.LocalPort(), clientConf, PRData, true)
num0RTT := atomic.LoadUint32(&num0RTTPackets)
numDropped := atomic.LoadUint32(&num0RTTDropped)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets. Dropped %d of those.", num0RTT, numDropped)
Expect(numDropped).ToNot(BeZero())
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).ToNot(BeEmpty())
})
It("retransmits all 0-RTT data when the server performs a Retry", func() {
var mutex sync.Mutex
var firstConnID, secondConnID protocol.ConnectionID
var firstCounter, secondCounter protocol.ByteCount
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
countZeroRTTBytes := func(data []byte) (n protocol.ByteCount) {
for len(data) > 0 {
hdr, _, rest, err := wire.ParsePacket(data, 0)
if err != nil {
return
}
data = rest
if hdr.Type == protocol.PacketType0RTT {
n += hdr.Length - 16 /* AEAD tag */
}
}
return
}
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", ln.Addr().(*net.UDPAddr).Port),
DelayPacket: func(dir quicproxy.Direction, data []byte) time.Duration {
connID, err := wire.ParseConnectionID(data, 0)
Expect(err).ToNot(HaveOccurred())
mutex.Lock()
defer mutex.Unlock()
if zeroRTTBytes := countZeroRTTBytes(data); zeroRTTBytes > 0 {
if firstConnID == nil {
firstConnID = connID
firstCounter += zeroRTTBytes
} else if firstConnID != nil && firstConnID.Equal(connID) {
Expect(secondConnID).To(BeNil())
firstCounter += zeroRTTBytes
} else if secondConnID == nil {
secondConnID = connID
secondCounter += zeroRTTBytes
} else if secondConnID != nil && secondConnID.Equal(connID) {
secondCounter += zeroRTTBytes
} else {
Fail("received 3 connection IDs on 0-RTT packets")
}
}
return rtt / 2
},
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
transfer0RTTData(ln, proxy.LocalPort(), clientConf, GeneratePRData(5000), true) // ~5 packets
mutex.Lock()
defer mutex.Unlock()
Expect(firstCounter).To(BeNumerically("~", 5000+100 /* framing overhead */, 100)) // the FIN bit might be sent extra
Expect(secondCounter).To(BeNumerically("~", firstCounter, 20))
zeroRTTPackets := get0RTTPackets(tracer.getRcvdPackets())
Expect(len(zeroRTTPackets)).To(BeNumerically(">=", 5))
Expect(zeroRTTPackets[0]).To(BeNumerically(">=", protocol.PacketNumber(5)))
})
It("doesn't reject 0-RTT when the server's transport stream limit increased", func() {
const maxStreams = 1
tlsConf, clientConf := dialAndReceiveSessionTicket(getQuicConfig(&quic.Config{
MaxIncomingUniStreams: maxStreams,
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
}))
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
MaxIncomingUniStreams: maxStreams + 1,
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
// The client remembers the old limit and refuses to open a new stream.
_, err = sess.OpenUniStream()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("too many open streams"))
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_, err = sess.OpenUniStreamSync(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(sess.CloseWithError(0, "")).To(Succeed())
// The client should send 0-RTT packets.
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).ToNot(BeEmpty())
})
It("rejects 0-RTT when the server's stream limit decreased", func() {
const maxStreams = 42
tlsConf, clientConf := dialAndReceiveSessionTicket(getQuicConfig(&quic.Config{
MaxIncomingStreams: maxStreams,
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
}))
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
MaxIncomingStreams: maxStreams - 1,
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
check0RTTRejected(ln, proxy.LocalPort(), clientConf)
// The client should send 0-RTT packets, but the server doesn't process them.
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).To(BeEmpty())
})
It("rejects 0-RTT when the ALPN changed", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
// now close the listener and dial new connection with a different ALPN
clientConf.NextProtos = []string{"new-alpn"}
tlsConf.NextProtos = []string{"new-alpn"}
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
check0RTTRejected(ln, proxy.LocalPort(), clientConf)
// The client should send 0-RTT packets, but the server doesn't process them.
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).To(BeEmpty())
})
DescribeTable("flow control limits",
func(addFlowControlLimit func(*quic.Config, uint64)) {
tracer := newRcvdPacketTracer()
firstConf := getQuicConfig(&quic.Config{
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Versions: []protocol.VersionNumber{version},
})
addFlowControlLimit(firstConf, 3)
tlsConf, clientConf := dialAndReceiveSessionTicket(firstConf)
secondConf := getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
})
addFlowControlLimit(secondConf, 100)
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
secondConf,
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, _ := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
written := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(written)
_, err := str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
}()
Eventually(written).Should(BeClosed())
serverSess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
rstr, err := serverSess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(rstr)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("foobar")))
Expect(serverSess.ConnectionState().TLS.Used0RTT).To(BeTrue())
Expect(serverSess.CloseWithError(0, "")).To(Succeed())
Eventually(sess.Context().Done()).Should(BeClosed())
var processedFirst bool
for _, p := range tracer.getRcvdPackets() {
for _, f := range p.frames {
if sf, ok := f.(*logging.StreamFrame); ok {
if !processedFirst {
// The first STREAM should have been sent in a 0-RTT packet.
// Due to the flow control limit, the STREAM frame was limit to the first 3 bytes.
Expect(p.hdr.Type).To(Equal(protocol.PacketType0RTT))
Expect(sf.Length).To(BeEquivalentTo(3))
processedFirst = true
} else {
// All other STREAM frames can only be sent after handshake completion.
Expect(p.hdr.IsLongHeader).To(BeFalse())
Expect(sf.Offset).ToNot(BeZero())
}
}
}
}
},
Entry("doesn't reject 0-RTT when the server's transport stream flow control limit increased", func(c *quic.Config, limit uint64) { c.InitialStreamReceiveWindow = limit }),
Entry("doesn't reject 0-RTT when the server's transport connection flow control limit increased", func(c *quic.Config, limit uint64) { c.InitialConnectionReceiveWindow = limit }),
)
It("correctly deals with 0-RTT rejections", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
// now dial new connection with different transport parameters
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
MaxIncomingUniStreams: 1,
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
done := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(done)
sess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
str, err := sess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(string(data)).To(Equal("second flight"))
Expect(sess.CloseWithError(0, "")).To(Succeed())
}()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
// The client remembers that it was allowed to open 2 uni-directional streams.
firstStr, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
written := make(chan struct{}, 2)
go func() {
defer GinkgoRecover()
defer func() { written <- struct{}{} }()
_, err := firstStr.Write([]byte("first flight"))
Expect(err).ToNot(HaveOccurred())
}()
secondStr, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
go func() {
defer GinkgoRecover()
defer func() { written <- struct{}{} }()
_, err := secondStr.Write([]byte("first flight"))
Expect(err).ToNot(HaveOccurred())
}()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_, err = sess.AcceptStream(ctx)
Expect(err).To(MatchError(quic.Err0RTTRejected))
Eventually(written).Should(Receive())
Eventually(written).Should(Receive())
_, err = firstStr.Write([]byte("foobar"))
Expect(err).To(MatchError(quic.Err0RTTRejected))
_, err = sess.OpenUniStream()
Expect(err).To(MatchError(quic.Err0RTTRejected))
newSess := sess.NextSession()
str, err := newSess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = newSess.OpenUniStream()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("too many open streams"))
_, err = str.Write([]byte("second flight"))
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
Eventually(done).Should(BeClosed())
Eventually(sess.Context().Done()).Should(BeClosed())
// The client should send 0-RTT packets, but the server doesn't process them.
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).To(BeEmpty())
})
It("queues 0-RTT packets, if the Initial is delayed", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: ln.Addr().String(),
DelayPacket: func(dir quicproxy.Direction, data []byte) time.Duration {
if dir == quicproxy.DirectionIncoming && data[0]&0x80 > 0 && data[0]&0x30>>4 == 0 { // Initial packet from client
return rtt/2 + rtt
}
return rtt / 2
},
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
transfer0RTTData(ln, proxy.LocalPort(), clientConf, PRData, true)
Expect(tracer.rcvdPackets[0].hdr.Type).To(Equal(protocol.PacketTypeInitial))
zeroRTTPackets := get0RTTPackets(tracer.getRcvdPackets())
Expect(len(zeroRTTPackets)).To(BeNumerically(">", 10))
Expect(zeroRTTPackets[0]).To(Equal(protocol.PacketNumber(0)))
})
})
}
})
remove superfluous function parameter in the 0-RTT integration tests
package self_test
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
mrand "math/rand"
"net"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/lucas-clemente/quic-go"
quicproxy "github.com/lucas-clemente/quic-go/integrationtests/tools/proxy"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/wire"
"github.com/lucas-clemente/quic-go/logging"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
)
type rcvdPacket struct {
hdr *logging.ExtendedHeader
frames []logging.Frame
}
type rcvdPacketTracer struct {
connTracer
closed chan struct{}
rcvdPackets []rcvdPacket
}
func newRcvdPacketTracer() *rcvdPacketTracer {
return &rcvdPacketTracer{closed: make(chan struct{})}
}
func (t *rcvdPacketTracer) ReceivedPacket(hdr *logging.ExtendedHeader, _ logging.ByteCount, frames []logging.Frame) {
t.rcvdPackets = append(t.rcvdPackets, rcvdPacket{hdr: hdr, frames: frames})
}
func (t *rcvdPacketTracer) Close() { close(t.closed) }
func (t *rcvdPacketTracer) getRcvdPackets() []rcvdPacket {
<-t.closed
return t.rcvdPackets
}
var _ = Describe("0-RTT", func() {
rtt := scaleDuration(5 * time.Millisecond)
for _, v := range protocol.SupportedVersions {
version := v
Context(fmt.Sprintf("with QUIC version %s", version), func() {
runCountingProxy := func(serverPort int) (*quicproxy.QuicProxy, *uint32) {
var num0RTTPackets uint32 // to be used as an atomic
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", serverPort),
DelayPacket: func(_ quicproxy.Direction, data []byte) time.Duration {
hdr, _, _, err := wire.ParsePacket(data, 0)
Expect(err).ToNot(HaveOccurred())
if hdr.Type == protocol.PacketType0RTT {
atomic.AddUint32(&num0RTTPackets, 1)
}
return rtt / 2
},
})
Expect(err).ToNot(HaveOccurred())
return proxy, &num0RTTPackets
}
dialAndReceiveSessionTicket := func(serverConf *quic.Config) (*tls.Config, *tls.Config) {
tlsConf := getTLSConfig()
if serverConf == nil {
serverConf = getQuicConfig(&quic.Config{
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
})
serverConf.Versions = []protocol.VersionNumber{version}
}
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
serverConf,
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", ln.Addr().(*net.UDPAddr).Port),
DelayPacket: func(_ quicproxy.Direction, data []byte) time.Duration { return rtt / 2 },
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
// dial the first session in order to receive a session ticket
done := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(done)
sess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
<-sess.Context().Done()
}()
clientConf := getTLSClientConfig()
gets := make(chan string, 100)
puts := make(chan string, 100)
clientConf.ClientSessionCache = newClientSessionCache(gets, puts)
sess, err := quic.DialAddr(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
Eventually(puts).Should(Receive())
// received the session ticket. We're done here.
Expect(sess.CloseWithError(0, "")).To(Succeed())
Eventually(done).Should(BeClosed())
return tlsConf, clientConf
}
transfer0RTTData := func(
ln quic.EarlyListener,
proxyPort int,
clientConf *tls.Config,
testdata []byte, // data to transfer
) {
// now dial the second session, and use 0-RTT to send some data
done := make(chan struct{})
go func() {
defer GinkgoRecover()
sess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
str, err := sess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(testdata))
Expect(sess.ConnectionState().TLS.Used0RTT).To(BeTrue())
Expect(sess.CloseWithError(0, "")).To(Succeed())
close(done)
}()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxyPort),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
defer sess.CloseWithError(0, "")
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(testdata)
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
Expect(sess.ConnectionState().TLS.Used0RTT).To(BeTrue())
Eventually(done).Should(BeClosed())
Eventually(sess.Context().Done()).Should(BeClosed())
}
check0RTTRejected := func(
ln quic.EarlyListener,
proxyPort int,
clientConf *tls.Config,
) {
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxyPort),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(make([]byte, 3000))
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
Expect(sess.ConnectionState().TLS.Used0RTT).To(BeFalse())
// make sure the server doesn't process the data
ctx, cancel := context.WithTimeout(context.Background(), scaleDuration(50*time.Millisecond))
defer cancel()
serverSess, err := ln.Accept(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(serverSess.ConnectionState().TLS.Used0RTT).To(BeFalse())
_, err = serverSess.AcceptUniStream(ctx)
Expect(err).To(Equal(context.DeadlineExceeded))
Expect(serverSess.CloseWithError(0, "")).To(Succeed())
Eventually(sess.Context().Done()).Should(BeClosed())
}
// can be used to extract 0-RTT from a rcvdPacketTracer
get0RTTPackets := func(packets []rcvdPacket) []protocol.PacketNumber {
var zeroRTTPackets []protocol.PacketNumber
for _, p := range packets {
if p.hdr.Type == protocol.PacketType0RTT {
zeroRTTPackets = append(zeroRTTPackets, p.hdr.PacketNumber)
}
}
return zeroRTTPackets
}
It("transfers 0-RTT data", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
transfer0RTTData(ln, proxy.LocalPort(), clientConf, PRData)
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
zeroRTTPackets := get0RTTPackets(tracer.getRcvdPackets())
Expect(len(zeroRTTPackets)).To(BeNumerically(">", 10))
sort.Slice(zeroRTTPackets, func(i, j int) bool { return zeroRTTPackets[i] < zeroRTTPackets[j] })
Expect(zeroRTTPackets[0]).To(Equal(protocol.PacketNumber(0)))
})
// Test that data intended to be sent with 1-RTT protection is not sent in 0-RTT packets.
It("waits until a session until the handshake is done", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
zeroRTTData := GeneratePRData(2 * 1100) // 2 packets
oneRTTData := PRData
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
// now dial the second session, and use 0-RTT to send some data
go func() {
defer GinkgoRecover()
sess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
str, err := sess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(zeroRTTData))
str, err = sess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err = ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(oneRTTData))
Expect(sess.CloseWithError(0, "")).To(Succeed())
}()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
sent0RTT := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(sent0RTT)
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(zeroRTTData)
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
}()
Eventually(sent0RTT).Should(BeClosed())
// wait for the handshake to complete
Eventually(sess.HandshakeComplete().Done()).Should(BeClosed())
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(PRData)
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
<-sess.Context().Done()
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).To(Or(BeEquivalentTo(2), BeEquivalentTo(3))) // the FIN might be sent in a separate packet
Expect(get0RTTPackets(tracer.getRcvdPackets())).To(HaveLen(int(num0RTT)))
})
It("transfers 0-RTT data, when 0-RTT packets are lost", func() {
var (
num0RTTPackets uint32 // to be used as an atomic
num0RTTDropped uint32
)
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", ln.Addr().(*net.UDPAddr).Port),
DelayPacket: func(_ quicproxy.Direction, data []byte) time.Duration {
hdr, _, _, err := wire.ParsePacket(data, 0)
Expect(err).ToNot(HaveOccurred())
if hdr.Type == protocol.PacketType0RTT {
atomic.AddUint32(&num0RTTPackets, 1)
}
return rtt / 2
},
DropPacket: func(_ quicproxy.Direction, data []byte) bool {
hdr, _, _, err := wire.ParsePacket(data, 0)
Expect(err).ToNot(HaveOccurred())
if hdr.Type == protocol.PacketType0RTT {
// drop 25% of the 0-RTT packets
drop := mrand.Intn(4) == 0
if drop {
atomic.AddUint32(&num0RTTDropped, 1)
}
return drop
}
return false
},
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
transfer0RTTData(ln, proxy.LocalPort(), clientConf, PRData)
num0RTT := atomic.LoadUint32(&num0RTTPackets)
numDropped := atomic.LoadUint32(&num0RTTDropped)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets. Dropped %d of those.", num0RTT, numDropped)
Expect(numDropped).ToNot(BeZero())
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).ToNot(BeEmpty())
})
It("retransmits all 0-RTT data when the server performs a Retry", func() {
var mutex sync.Mutex
var firstConnID, secondConnID protocol.ConnectionID
var firstCounter, secondCounter protocol.ByteCount
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
countZeroRTTBytes := func(data []byte) (n protocol.ByteCount) {
for len(data) > 0 {
hdr, _, rest, err := wire.ParsePacket(data, 0)
if err != nil {
return
}
data = rest
if hdr.Type == protocol.PacketType0RTT {
n += hdr.Length - 16 /* AEAD tag */
}
}
return
}
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", ln.Addr().(*net.UDPAddr).Port),
DelayPacket: func(dir quicproxy.Direction, data []byte) time.Duration {
connID, err := wire.ParseConnectionID(data, 0)
Expect(err).ToNot(HaveOccurred())
mutex.Lock()
defer mutex.Unlock()
if zeroRTTBytes := countZeroRTTBytes(data); zeroRTTBytes > 0 {
if firstConnID == nil {
firstConnID = connID
firstCounter += zeroRTTBytes
} else if firstConnID != nil && firstConnID.Equal(connID) {
Expect(secondConnID).To(BeNil())
firstCounter += zeroRTTBytes
} else if secondConnID == nil {
secondConnID = connID
secondCounter += zeroRTTBytes
} else if secondConnID != nil && secondConnID.Equal(connID) {
secondCounter += zeroRTTBytes
} else {
Fail("received 3 connection IDs on 0-RTT packets")
}
}
return rtt / 2
},
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
transfer0RTTData(ln, proxy.LocalPort(), clientConf, GeneratePRData(5000)) // ~5 packets
mutex.Lock()
defer mutex.Unlock()
Expect(firstCounter).To(BeNumerically("~", 5000+100 /* framing overhead */, 100)) // the FIN bit might be sent extra
Expect(secondCounter).To(BeNumerically("~", firstCounter, 20))
zeroRTTPackets := get0RTTPackets(tracer.getRcvdPackets())
Expect(len(zeroRTTPackets)).To(BeNumerically(">=", 5))
Expect(zeroRTTPackets[0]).To(BeNumerically(">=", protocol.PacketNumber(5)))
})
It("doesn't reject 0-RTT when the server's transport stream limit increased", func() {
const maxStreams = 1
tlsConf, clientConf := dialAndReceiveSessionTicket(getQuicConfig(&quic.Config{
MaxIncomingUniStreams: maxStreams,
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
}))
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
MaxIncomingUniStreams: maxStreams + 1,
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
// The client remembers the old limit and refuses to open a new stream.
_, err = sess.OpenUniStream()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("too many open streams"))
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_, err = sess.OpenUniStreamSync(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(sess.CloseWithError(0, "")).To(Succeed())
// The client should send 0-RTT packets.
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).ToNot(BeEmpty())
})
It("rejects 0-RTT when the server's stream limit decreased", func() {
const maxStreams = 42
tlsConf, clientConf := dialAndReceiveSessionTicket(getQuicConfig(&quic.Config{
MaxIncomingStreams: maxStreams,
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
}))
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
MaxIncomingStreams: maxStreams - 1,
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
check0RTTRejected(ln, proxy.LocalPort(), clientConf)
// The client should send 0-RTT packets, but the server doesn't process them.
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).To(BeEmpty())
})
It("rejects 0-RTT when the ALPN changed", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
// now close the listener and dial new connection with a different ALPN
clientConf.NextProtos = []string{"new-alpn"}
tlsConf.NextProtos = []string{"new-alpn"}
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
check0RTTRejected(ln, proxy.LocalPort(), clientConf)
// The client should send 0-RTT packets, but the server doesn't process them.
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).To(BeEmpty())
})
DescribeTable("flow control limits",
func(addFlowControlLimit func(*quic.Config, uint64)) {
tracer := newRcvdPacketTracer()
firstConf := getQuicConfig(&quic.Config{
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Versions: []protocol.VersionNumber{version},
})
addFlowControlLimit(firstConf, 3)
tlsConf, clientConf := dialAndReceiveSessionTicket(firstConf)
secondConf := getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
})
addFlowControlLimit(secondConf, 100)
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
secondConf,
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, _ := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
written := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(written)
_, err := str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
}()
Eventually(written).Should(BeClosed())
serverSess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
rstr, err := serverSess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(rstr)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("foobar")))
Expect(serverSess.ConnectionState().TLS.Used0RTT).To(BeTrue())
Expect(serverSess.CloseWithError(0, "")).To(Succeed())
Eventually(sess.Context().Done()).Should(BeClosed())
var processedFirst bool
for _, p := range tracer.getRcvdPackets() {
for _, f := range p.frames {
if sf, ok := f.(*logging.StreamFrame); ok {
if !processedFirst {
// The first STREAM should have been sent in a 0-RTT packet.
// Due to the flow control limit, the STREAM frame was limit to the first 3 bytes.
Expect(p.hdr.Type).To(Equal(protocol.PacketType0RTT))
Expect(sf.Length).To(BeEquivalentTo(3))
processedFirst = true
} else {
// All other STREAM frames can only be sent after handshake completion.
Expect(p.hdr.IsLongHeader).To(BeFalse())
Expect(sf.Offset).ToNot(BeZero())
}
}
}
}
},
Entry("doesn't reject 0-RTT when the server's transport stream flow control limit increased", func(c *quic.Config, limit uint64) { c.InitialStreamReceiveWindow = limit }),
Entry("doesn't reject 0-RTT when the server's transport connection flow control limit increased", func(c *quic.Config, limit uint64) { c.InitialConnectionReceiveWindow = limit }),
)
It("correctly deals with 0-RTT rejections", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
// now dial new connection with different transport parameters
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
MaxIncomingUniStreams: 1,
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, num0RTTPackets := runCountingProxy(ln.Addr().(*net.UDPAddr).Port)
defer proxy.Close()
done := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(done)
sess, err := ln.Accept(context.Background())
Expect(err).ToNot(HaveOccurred())
str, err := sess.AcceptUniStream(context.Background())
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(string(data)).To(Equal("second flight"))
Expect(sess.CloseWithError(0, "")).To(Succeed())
}()
sess, err := quic.DialAddrEarly(
fmt.Sprintf("localhost:%d", proxy.LocalPort()),
clientConf,
getQuicConfig(&quic.Config{Versions: []protocol.VersionNumber{version}}),
)
Expect(err).ToNot(HaveOccurred())
// The client remembers that it was allowed to open 2 uni-directional streams.
firstStr, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
written := make(chan struct{}, 2)
go func() {
defer GinkgoRecover()
defer func() { written <- struct{}{} }()
_, err := firstStr.Write([]byte("first flight"))
Expect(err).ToNot(HaveOccurred())
}()
secondStr, err := sess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
go func() {
defer GinkgoRecover()
defer func() { written <- struct{}{} }()
_, err := secondStr.Write([]byte("first flight"))
Expect(err).ToNot(HaveOccurred())
}()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
_, err = sess.AcceptStream(ctx)
Expect(err).To(MatchError(quic.Err0RTTRejected))
Eventually(written).Should(Receive())
Eventually(written).Should(Receive())
_, err = firstStr.Write([]byte("foobar"))
Expect(err).To(MatchError(quic.Err0RTTRejected))
_, err = sess.OpenUniStream()
Expect(err).To(MatchError(quic.Err0RTTRejected))
newSess := sess.NextSession()
str, err := newSess.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
_, err = newSess.OpenUniStream()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("too many open streams"))
_, err = str.Write([]byte("second flight"))
Expect(err).ToNot(HaveOccurred())
Expect(str.Close()).To(Succeed())
Eventually(done).Should(BeClosed())
Eventually(sess.Context().Done()).Should(BeClosed())
// The client should send 0-RTT packets, but the server doesn't process them.
num0RTT := atomic.LoadUint32(num0RTTPackets)
fmt.Fprintf(GinkgoWriter, "Sent %d 0-RTT packets.", num0RTT)
Expect(num0RTT).ToNot(BeZero())
Expect(get0RTTPackets(tracer.getRcvdPackets())).To(BeEmpty())
})
It("queues 0-RTT packets, if the Initial is delayed", func() {
tlsConf, clientConf := dialAndReceiveSessionTicket(nil)
tracer := newRcvdPacketTracer()
ln, err := quic.ListenAddrEarly(
"localhost:0",
tlsConf,
getQuicConfig(&quic.Config{
Versions: []protocol.VersionNumber{version},
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
Tracer: newTracer(func() logging.ConnectionTracer { return tracer }),
}),
)
Expect(err).ToNot(HaveOccurred())
defer ln.Close()
proxy, err := quicproxy.NewQuicProxy("localhost:0", &quicproxy.Opts{
RemoteAddr: ln.Addr().String(),
DelayPacket: func(dir quicproxy.Direction, data []byte) time.Duration {
if dir == quicproxy.DirectionIncoming && data[0]&0x80 > 0 && data[0]&0x30>>4 == 0 { // Initial packet from client
return rtt/2 + rtt
}
return rtt / 2
},
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
transfer0RTTData(ln, proxy.LocalPort(), clientConf, PRData)
Expect(tracer.rcvdPackets[0].hdr.Type).To(Equal(protocol.PacketTypeInitial))
zeroRTTPackets := get0RTTPackets(tracer.getRcvdPackets())
Expect(len(zeroRTTPackets)).To(BeNumerically(">", 10))
Expect(zeroRTTPackets[0]).To(Equal(protocol.PacketNumber(0)))
})
})
}
})
|
/*
Copyright 2019 The xridge kubestone contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8s
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
k8sclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// Access provides client related structs to access kubernetes
type Access struct {
Client client.Client
Clientset *k8sclient.Clientset
Scheme *runtime.Scheme
EventRecorder record.EventRecorder
}
// CreateWithReference method creates a kubernetes resource and
// sets the owner reference to a given object. It provides basic
// idempotency (by ignoring Already Exists errors).
// Successful creation of the event is logged via EventRecorder
// to the owner.
func (a *Access) CreateWithReference(ctx context.Context, object, owner metav1.Object) error {
runtimeObject, ok := object.(runtime.Object)
if !ok {
return fmt.Errorf("object (%T) is not a runtime.Object", object)
}
runtimeOwner, ok := owner.(runtime.Object)
if !ok {
return fmt.Errorf("owner (%T) is not a runtime.Object", object)
}
ownerRef, err := reference.GetReference(a.Scheme, runtimeOwner)
if err != nil {
return fmt.Errorf("Unable to get reference to owner")
}
if err := controllerutil.SetControllerReference(owner, object, a.Scheme); err != nil {
return err
}
err = a.Client.Create(ctx, runtimeObject)
if IgnoreAlreadyExists(err) != nil {
return err
}
if !errors.IsAlreadyExists(err) {
a.EventRecorder.Eventf(ownerRef, corev1.EventTypeNormal, CreateSucceeded,
"Created %v", object.GetSelfLink())
}
return nil
}
// DeleteObject method deletes a kubernetes resource while
// ignores not found errors, so that it can be called multiple times.
// Successful deletion of the event is logged via EventRecorder
// to the owner.
func (a *Access) DeleteObject(ctx context.Context, object, owner metav1.Object) error {
runtimeObject, ok := object.(runtime.Object)
if !ok {
return fmt.Errorf("object (%T) is not a runtime.Object", object)
}
runtimeOwner, ok := owner.(runtime.Object)
if !ok {
return fmt.Errorf("owner (%T) is not a runtime.Object", object)
}
ownerRef, err := reference.GetReference(a.Scheme, runtimeOwner)
if err != nil {
return fmt.Errorf("Unable to get reference to owner")
}
// Need to get the object first so that the object.GetSelfLink()
// works during Event Recording
namespacedName := types.NamespacedName{
Namespace: object.GetNamespace(),
Name: object.GetName(),
}
err = a.Client.Get(ctx, namespacedName, runtimeObject)
if IgnoreNotFound(err) != nil {
return err
} else if errors.IsNotFound(err) {
return nil
}
err = a.Client.Delete(ctx, runtimeObject)
if IgnoreNotFound(err) != nil {
return err
}
if !errors.IsNotFound(err) {
a.EventRecorder.Eventf(ownerRef, corev1.EventTypeNormal, DeleteSucceeded,
"Deleted %v", object.GetSelfLink())
}
return nil
}
// IsJobFinished returns true if the given job has already succeeded or failed
func (a *Access) IsJobFinished(namespacedName types.NamespacedName) (finished bool, err error) {
job, err := a.Clientset.BatchV1().Jobs(namespacedName.Namespace).Get(
namespacedName.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
finished = job.Status.Succeeded+job.Status.Failed > 0
return finished, nil
}
Use CompletionTime to check if the job is finished
/*
Copyright 2019 The xridge kubestone contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8s
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
k8sclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// Access provides client related structs to access kubernetes
type Access struct {
Client client.Client
Clientset *k8sclient.Clientset
Scheme *runtime.Scheme
EventRecorder record.EventRecorder
}
// CreateWithReference method creates a kubernetes resource and
// sets the owner reference to a given object. It provides basic
// idempotency (by ignoring Already Exists errors).
// Successful creation of the event is logged via EventRecorder
// to the owner.
func (a *Access) CreateWithReference(ctx context.Context, object, owner metav1.Object) error {
runtimeObject, ok := object.(runtime.Object)
if !ok {
return fmt.Errorf("object (%T) is not a runtime.Object", object)
}
runtimeOwner, ok := owner.(runtime.Object)
if !ok {
return fmt.Errorf("owner (%T) is not a runtime.Object", object)
}
ownerRef, err := reference.GetReference(a.Scheme, runtimeOwner)
if err != nil {
return fmt.Errorf("Unable to get reference to owner")
}
if err := controllerutil.SetControllerReference(owner, object, a.Scheme); err != nil {
return err
}
err = a.Client.Create(ctx, runtimeObject)
if IgnoreAlreadyExists(err) != nil {
return err
}
if !errors.IsAlreadyExists(err) {
a.EventRecorder.Eventf(ownerRef, corev1.EventTypeNormal, CreateSucceeded,
"Created %v", object.GetSelfLink())
}
return nil
}
// DeleteObject method deletes a kubernetes resource while
// ignores not found errors, so that it can be called multiple times.
// Successful deletion of the event is logged via EventRecorder
// to the owner.
func (a *Access) DeleteObject(ctx context.Context, object, owner metav1.Object) error {
runtimeObject, ok := object.(runtime.Object)
if !ok {
return fmt.Errorf("object (%T) is not a runtime.Object", object)
}
runtimeOwner, ok := owner.(runtime.Object)
if !ok {
return fmt.Errorf("owner (%T) is not a runtime.Object", object)
}
ownerRef, err := reference.GetReference(a.Scheme, runtimeOwner)
if err != nil {
return fmt.Errorf("Unable to get reference to owner")
}
// Need to get the object first so that the object.GetSelfLink()
// works during Event Recording
namespacedName := types.NamespacedName{
Namespace: object.GetNamespace(),
Name: object.GetName(),
}
err = a.Client.Get(ctx, namespacedName, runtimeObject)
if IgnoreNotFound(err) != nil {
return err
} else if errors.IsNotFound(err) {
return nil
}
err = a.Client.Delete(ctx, runtimeObject)
if IgnoreNotFound(err) != nil {
return err
}
if !errors.IsNotFound(err) {
a.EventRecorder.Eventf(ownerRef, corev1.EventTypeNormal, DeleteSucceeded,
"Deleted %v", object.GetSelfLink())
}
return nil
}
// IsJobFinished returns true if the given job has already succeeded or failed
func (a *Access) IsJobFinished(namespacedName types.NamespacedName) (finished bool, err error) {
job, err := a.Clientset.BatchV1().Jobs(namespacedName.Namespace).Get(
namespacedName.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
finished = job.Status.CompletionTime != nil
return finished, nil
}
|
package login
import (
"crypto/tls"
"errors"
"fmt"
"strings"
"github.com/davecgh/go-spew/spew"
"github.com/go-ldap/ldap"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/log"
m "github.com/grafana/grafana/pkg/models"
)
type ldapAuther struct {
server *LdapServerConf
conn *ldap.Conn
requireSecondBind bool
}
func NewLdapAuthenticator(server *LdapServerConf) *ldapAuther {
return &ldapAuther{server: server}
}
func (a *ldapAuther) Dial() error {
address := fmt.Sprintf("%s:%d", a.server.Host, a.server.Port)
var err error
if a.server.UseSSL {
tlsCfg := &tls.Config{
InsecureSkipVerify: a.server.SkipVerifySSL,
ServerName: a.server.Host,
}
a.conn, err = ldap.DialTLS("tcp", address, tlsCfg)
} else {
a.conn, err = ldap.Dial("tcp", address)
}
return err
}
func (a *ldapAuther) login(query *LoginUserQuery) error {
if err := a.Dial(); err != nil {
return err
}
defer a.conn.Close()
// perform initial authentication
if err := a.initialBind(query.Username, query.Password); err != nil {
return err
}
// find user entry & attributes
if ldapUser, err := a.searchForUser(query.Username); err != nil {
return err
} else {
if ldapCfg.VerboseLogging {
log.Info("Ldap User Info: %s", spew.Sdump(ldapUser))
}
// check if a second user bind is needed
if a.requireSecondBind {
if err := a.secondBind(ldapUser, query.Password); err != nil {
return err
}
}
if grafanaUser, err := a.getGrafanaUserFor(ldapUser); err != nil {
return err
} else {
// sync user details
if err := a.syncUserInfo(grafanaUser, ldapUser); err != nil {
return err
}
// sync org roles
if err := a.syncOrgRoles(grafanaUser, ldapUser); err != nil {
return err
}
query.User = grafanaUser
return nil
}
}
}
func (a *ldapAuther) getGrafanaUserFor(ldapUser *ldapUserInfo) (*m.User, error) {
// validate that the user has access
// if there are no ldap group mappings access is true
// otherwise a single group must match
access := len(a.server.LdapGroups) == 0
for _, ldapGroup := range a.server.LdapGroups {
if ldapUser.isMemberOf(ldapGroup.GroupDN) {
access = true
break
}
}
if !access {
log.Info("Ldap Auth: user %s does not belong in any of the specified ldap groups, ldapUser groups: %v", ldapUser.Username, ldapUser.MemberOf)
return nil, ErrInvalidCredentials
}
// get user from grafana db
userQuery := m.GetUserByLoginQuery{LoginOrEmail: ldapUser.Username}
if err := bus.Dispatch(&userQuery); err != nil {
if err == m.ErrUserNotFound {
return a.createGrafanaUser(ldapUser)
} else {
return nil, err
}
}
return userQuery.Result, nil
}
func (a *ldapAuther) createGrafanaUser(ldapUser *ldapUserInfo) (*m.User, error) {
cmd := m.CreateUserCommand{
Login: ldapUser.Username,
Email: ldapUser.Email,
Name: fmt.Sprintf("%s %s", ldapUser.FirstName, ldapUser.LastName),
}
if err := bus.Dispatch(&cmd); err != nil {
return nil, err
}
return &cmd.Result, nil
}
func (a *ldapAuther) syncUserInfo(user *m.User, ldapUser *ldapUserInfo) error {
var name = fmt.Sprintf("%s %s", ldapUser.FirstName, ldapUser.LastName)
if user.Email == ldapUser.Email && user.Name == name {
return nil
}
log.Info("Ldap: Syncing user info %s", ldapUser.Username)
updateCmd := m.UpdateUserCommand{}
updateCmd.UserId = user.Id
updateCmd.Login = user.Login
updateCmd.Email = ldapUser.Email
updateCmd.Name = fmt.Sprintf("%s %s", ldapUser.FirstName, ldapUser.LastName)
return bus.Dispatch(&updateCmd)
}
func (a *ldapAuther) syncOrgRoles(user *m.User, ldapUser *ldapUserInfo) error {
if len(a.server.LdapGroups) == 0 {
return nil
}
orgsQuery := m.GetUserOrgListQuery{UserId: user.Id}
if err := bus.Dispatch(&orgsQuery); err != nil {
return err
}
// update or remove org roles
for _, org := range orgsQuery.Result {
match := false
for _, group := range a.server.LdapGroups {
if org.OrgId != group.OrgId {
continue
}
if ldapUser.isMemberOf(group.GroupDN) {
match = true
if org.Role != group.OrgRole {
// update role
cmd := m.UpdateOrgUserCommand{OrgId: org.OrgId, UserId: user.Id, Role: group.OrgRole}
if err := bus.Dispatch(&cmd); err != nil {
return err
}
}
// ignore subsequent ldap group mapping matches
break
}
}
// remove role if no mappings match
if !match {
cmd := m.RemoveOrgUserCommand{OrgId: org.OrgId, UserId: user.Id}
if err := bus.Dispatch(&cmd); err != nil {
return err
}
}
}
// add missing org roles
for _, group := range a.server.LdapGroups {
if !ldapUser.isMemberOf(group.GroupDN) {
continue
}
match := false
for _, org := range orgsQuery.Result {
if group.OrgId == org.OrgId {
match = true
break
}
}
if !match {
// add role
cmd := m.AddOrgUserCommand{UserId: user.Id, Role: group.OrgRole, OrgId: group.OrgId}
if err := bus.Dispatch(&cmd); err != nil {
return err
}
break
}
}
return nil
}
func (a *ldapAuther) secondBind(ldapUser *ldapUserInfo, userPassword string) error {
if err := a.conn.Bind(ldapUser.DN, userPassword); err != nil {
if ldapCfg.VerboseLogging {
log.Info("LDAP second bind failed, %v", err)
}
if ldapErr, ok := err.(*ldap.Error); ok {
if ldapErr.ResultCode == 49 {
return ErrInvalidCredentials
}
}
return err
}
return nil
}
func (a *ldapAuther) initialBind(username, userPassword string) error {
if a.server.BindPassword != "" || a.server.BindDN == "" {
userPassword = a.server.BindPassword
a.requireSecondBind = true
}
bindPath := a.server.BindDN
if strings.Contains(bindPath, "%s") {
bindPath = fmt.Sprintf(a.server.BindDN, username)
}
if err := a.conn.Bind(bindPath, userPassword); err != nil {
if ldapCfg.VerboseLogging {
log.Info("LDAP initial bind failed, %v", err)
}
if ldapErr, ok := err.(*ldap.Error); ok {
if ldapErr.ResultCode == 49 {
return ErrInvalidCredentials
}
}
return err
}
return nil
}
func (a *ldapAuther) searchForUser(username string) (*ldapUserInfo, error) {
var searchResult *ldap.SearchResult
var err error
for _, searchBase := range a.server.SearchBaseDNs {
searchReq := ldap.SearchRequest{
BaseDN: searchBase,
Scope: ldap.ScopeWholeSubtree,
DerefAliases: ldap.NeverDerefAliases,
Attributes: []string{
a.server.Attr.Username,
a.server.Attr.Surname,
a.server.Attr.Email,
a.server.Attr.Name,
a.server.Attr.MemberOf,
},
Filter: strings.Replace(a.server.SearchFilter, "%s", username, -1),
}
searchResult, err = a.conn.Search(&searchReq)
if err != nil {
return nil, err
}
if len(searchResult.Entries) > 0 {
break
}
}
if len(searchResult.Entries) == 0 {
return nil, ErrInvalidCredentials
}
if len(searchResult.Entries) > 1 {
return nil, errors.New("Ldap search matched more than one entry, please review your filter setting")
}
return &ldapUserInfo{
DN: searchResult.Entries[0].DN,
LastName: getLdapAttr(a.server.Attr.Surname, searchResult),
FirstName: getLdapAttr(a.server.Attr.Name, searchResult),
Username: getLdapAttr(a.server.Attr.Username, searchResult),
Email: getLdapAttr(a.server.Attr.Email, searchResult),
MemberOf: getLdapAttrArray(a.server.Attr.MemberOf, searchResult),
}, nil
}
func getLdapAttr(name string, result *ldap.SearchResult) string {
for _, attr := range result.Entries[0].Attributes {
if attr.Name == name {
if len(attr.Values) > 0 {
return attr.Values[0]
}
}
}
return ""
}
func getLdapAttrArray(name string, result *ldap.SearchResult) []string {
for _, attr := range result.Entries[0].Attributes {
if attr.Name == name {
return attr.Values
}
}
return []string{}
}
func createUserFromLdapInfo() error {
return nil
}
fix(ldap): fixed ldap org roles sync, did only add one new role per login, now all roles are added, fixes #2766
package login
import (
"crypto/tls"
"errors"
"fmt"
"strings"
"github.com/davecgh/go-spew/spew"
"github.com/go-ldap/ldap"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/log"
m "github.com/grafana/grafana/pkg/models"
)
type ldapAuther struct {
server *LdapServerConf
conn *ldap.Conn
requireSecondBind bool
}
func NewLdapAuthenticator(server *LdapServerConf) *ldapAuther {
return &ldapAuther{server: server}
}
func (a *ldapAuther) Dial() error {
address := fmt.Sprintf("%s:%d", a.server.Host, a.server.Port)
var err error
if a.server.UseSSL {
tlsCfg := &tls.Config{
InsecureSkipVerify: a.server.SkipVerifySSL,
ServerName: a.server.Host,
}
a.conn, err = ldap.DialTLS("tcp", address, tlsCfg)
} else {
a.conn, err = ldap.Dial("tcp", address)
}
return err
}
func (a *ldapAuther) login(query *LoginUserQuery) error {
if err := a.Dial(); err != nil {
return err
}
defer a.conn.Close()
// perform initial authentication
if err := a.initialBind(query.Username, query.Password); err != nil {
return err
}
// find user entry & attributes
if ldapUser, err := a.searchForUser(query.Username); err != nil {
return err
} else {
if ldapCfg.VerboseLogging {
log.Info("Ldap User Info: %s", spew.Sdump(ldapUser))
}
// check if a second user bind is needed
if a.requireSecondBind {
if err := a.secondBind(ldapUser, query.Password); err != nil {
return err
}
}
if grafanaUser, err := a.getGrafanaUserFor(ldapUser); err != nil {
return err
} else {
// sync user details
if err := a.syncUserInfo(grafanaUser, ldapUser); err != nil {
return err
}
// sync org roles
if err := a.syncOrgRoles(grafanaUser, ldapUser); err != nil {
return err
}
query.User = grafanaUser
return nil
}
}
}
func (a *ldapAuther) getGrafanaUserFor(ldapUser *ldapUserInfo) (*m.User, error) {
// validate that the user has access
// if there are no ldap group mappings access is true
// otherwise a single group must match
access := len(a.server.LdapGroups) == 0
for _, ldapGroup := range a.server.LdapGroups {
if ldapUser.isMemberOf(ldapGroup.GroupDN) {
access = true
break
}
}
if !access {
log.Info("Ldap Auth: user %s does not belong in any of the specified ldap groups, ldapUser groups: %v", ldapUser.Username, ldapUser.MemberOf)
return nil, ErrInvalidCredentials
}
// get user from grafana db
userQuery := m.GetUserByLoginQuery{LoginOrEmail: ldapUser.Username}
if err := bus.Dispatch(&userQuery); err != nil {
if err == m.ErrUserNotFound {
return a.createGrafanaUser(ldapUser)
} else {
return nil, err
}
}
return userQuery.Result, nil
}
func (a *ldapAuther) createGrafanaUser(ldapUser *ldapUserInfo) (*m.User, error) {
cmd := m.CreateUserCommand{
Login: ldapUser.Username,
Email: ldapUser.Email,
Name: fmt.Sprintf("%s %s", ldapUser.FirstName, ldapUser.LastName),
}
if err := bus.Dispatch(&cmd); err != nil {
return nil, err
}
return &cmd.Result, nil
}
func (a *ldapAuther) syncUserInfo(user *m.User, ldapUser *ldapUserInfo) error {
var name = fmt.Sprintf("%s %s", ldapUser.FirstName, ldapUser.LastName)
if user.Email == ldapUser.Email && user.Name == name {
return nil
}
log.Info("Ldap: Syncing user info %s", ldapUser.Username)
updateCmd := m.UpdateUserCommand{}
updateCmd.UserId = user.Id
updateCmd.Login = user.Login
updateCmd.Email = ldapUser.Email
updateCmd.Name = fmt.Sprintf("%s %s", ldapUser.FirstName, ldapUser.LastName)
return bus.Dispatch(&updateCmd)
}
func (a *ldapAuther) syncOrgRoles(user *m.User, ldapUser *ldapUserInfo) error {
if len(a.server.LdapGroups) == 0 {
return nil
}
orgsQuery := m.GetUserOrgListQuery{UserId: user.Id}
if err := bus.Dispatch(&orgsQuery); err != nil {
return err
}
handledOrgIds := map[int64]bool{}
// update or remove org roles
for _, org := range orgsQuery.Result {
match := false
handledOrgIds[org.OrgId] = true
for _, group := range a.server.LdapGroups {
if org.OrgId != group.OrgId {
continue
}
if ldapUser.isMemberOf(group.GroupDN) {
match = true
if org.Role != group.OrgRole {
// update role
cmd := m.UpdateOrgUserCommand{OrgId: org.OrgId, UserId: user.Id, Role: group.OrgRole}
if err := bus.Dispatch(&cmd); err != nil {
return err
}
}
// ignore subsequent ldap group mapping matches
break
}
}
// remove role if no mappings match
if !match {
cmd := m.RemoveOrgUserCommand{OrgId: org.OrgId, UserId: user.Id}
if err := bus.Dispatch(&cmd); err != nil {
return err
}
}
}
// add missing org roles
for _, group := range a.server.LdapGroups {
if !ldapUser.isMemberOf(group.GroupDN) {
continue
}
if _, exists := handledOrgIds[group.OrgId]; exists {
continue
}
// add role
cmd := m.AddOrgUserCommand{UserId: user.Id, Role: group.OrgRole, OrgId: group.OrgId}
if err := bus.Dispatch(&cmd); err != nil {
return err
}
// mark this group has handled so we do not process it again
handledOrgIds[group.OrgId] = true
}
return nil
}
func (a *ldapAuther) secondBind(ldapUser *ldapUserInfo, userPassword string) error {
if err := a.conn.Bind(ldapUser.DN, userPassword); err != nil {
if ldapCfg.VerboseLogging {
log.Info("LDAP second bind failed, %v", err)
}
if ldapErr, ok := err.(*ldap.Error); ok {
if ldapErr.ResultCode == 49 {
return ErrInvalidCredentials
}
}
return err
}
return nil
}
func (a *ldapAuther) initialBind(username, userPassword string) error {
if a.server.BindPassword != "" || a.server.BindDN == "" {
userPassword = a.server.BindPassword
a.requireSecondBind = true
}
bindPath := a.server.BindDN
if strings.Contains(bindPath, "%s") {
bindPath = fmt.Sprintf(a.server.BindDN, username)
}
if err := a.conn.Bind(bindPath, userPassword); err != nil {
if ldapCfg.VerboseLogging {
log.Info("LDAP initial bind failed, %v", err)
}
if ldapErr, ok := err.(*ldap.Error); ok {
if ldapErr.ResultCode == 49 {
return ErrInvalidCredentials
}
}
return err
}
return nil
}
func (a *ldapAuther) searchForUser(username string) (*ldapUserInfo, error) {
var searchResult *ldap.SearchResult
var err error
for _, searchBase := range a.server.SearchBaseDNs {
searchReq := ldap.SearchRequest{
BaseDN: searchBase,
Scope: ldap.ScopeWholeSubtree,
DerefAliases: ldap.NeverDerefAliases,
Attributes: []string{
a.server.Attr.Username,
a.server.Attr.Surname,
a.server.Attr.Email,
a.server.Attr.Name,
a.server.Attr.MemberOf,
},
Filter: strings.Replace(a.server.SearchFilter, "%s", username, -1),
}
searchResult, err = a.conn.Search(&searchReq)
if err != nil {
return nil, err
}
if len(searchResult.Entries) > 0 {
break
}
}
if len(searchResult.Entries) == 0 {
return nil, ErrInvalidCredentials
}
if len(searchResult.Entries) > 1 {
return nil, errors.New("Ldap search matched more than one entry, please review your filter setting")
}
return &ldapUserInfo{
DN: searchResult.Entries[0].DN,
LastName: getLdapAttr(a.server.Attr.Surname, searchResult),
FirstName: getLdapAttr(a.server.Attr.Name, searchResult),
Username: getLdapAttr(a.server.Attr.Username, searchResult),
Email: getLdapAttr(a.server.Attr.Email, searchResult),
MemberOf: getLdapAttrArray(a.server.Attr.MemberOf, searchResult),
}, nil
}
func getLdapAttr(name string, result *ldap.SearchResult) string {
for _, attr := range result.Entries[0].Attributes {
if attr.Name == name {
if len(attr.Values) > 0 {
return attr.Values[0]
}
}
}
return ""
}
func getLdapAttrArray(name string, result *ldap.SearchResult) []string {
for _, attr := range result.Entries[0].Attributes {
if attr.Name == name {
return attr.Values
}
}
return []string{}
}
func createUserFromLdapInfo() error {
return nil
}
|
// Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"fmt"
"io"
"math/rand"
"net"
"strconv"
"sync"
"syscall"
"time"
"github.com/eapache/channels"
"github.com/osrg/gobgp/internal/pkg/config"
"github.com/osrg/gobgp/internal/pkg/table"
"github.com/osrg/gobgp/pkg/packet/bgp"
"github.com/osrg/gobgp/pkg/packet/bmp"
log "github.com/sirupsen/logrus"
)
const (
minConnectRetryInterval = 5
)
type fsmStateReasonType uint8
const (
fsmDying fsmStateReasonType = iota
fsmAdminDown
fsmReadFailed
fsmWriteFailed
fsmNotificationSent
fsmNotificationRecv
fsmHoldTimerExpired
fsmIdleTimerExpired
fsmRestartTimerExpired
fsmGracefulRestart
fsmInvalidMsg
fsmNewConnection
fsmOpenMsgReceived
fsmOpenMsgNegotiated
fsmHardReset
fsmDeConfigured
)
type fsmStateReason struct {
Type fsmStateReasonType
BGPNotification *bgp.BGPMessage
Data []byte
}
func newfsmStateReason(typ fsmStateReasonType, notif *bgp.BGPMessage, data []byte) *fsmStateReason {
return &fsmStateReason{
Type: typ,
BGPNotification: notif,
Data: data,
}
}
func (r fsmStateReason) String() string {
switch r.Type {
case fsmDying:
return "dying"
case fsmAdminDown:
return "admin-down"
case fsmReadFailed:
return "read-failed"
case fsmWriteFailed:
return "write-failed"
case fsmNotificationSent:
body := r.BGPNotification.Body.(*bgp.BGPNotification)
return fmt.Sprintf("notification-sent %s", bgp.NewNotificationErrorCode(body.ErrorCode, body.ErrorSubcode).String())
case fsmNotificationRecv:
body := r.BGPNotification.Body.(*bgp.BGPNotification)
return fmt.Sprintf("notification-received %s", bgp.NewNotificationErrorCode(body.ErrorCode, body.ErrorSubcode).String())
case fsmHoldTimerExpired:
return "hold-timer-expired"
case fsmIdleTimerExpired:
return "idle-hold-timer-expired"
case fsmRestartTimerExpired:
return "restart-timer-expired"
case fsmGracefulRestart:
return "graceful-restart"
case fsmInvalidMsg:
return "invalid-msg"
case fsmNewConnection:
return "new-connection"
case fsmOpenMsgReceived:
return "open-msg-received"
case fsmOpenMsgNegotiated:
return "open-msg-negotiated"
case fsmHardReset:
return "hard-reset"
default:
return "unknown"
}
}
type fsmMsgType int
const (
_ fsmMsgType = iota
fsmMsgStateChange
fsmMsgBGPMessage
fsmMsgRouteRefresh
)
type fsmMsg struct {
MsgType fsmMsgType
fsm *fsm
MsgSrc string
MsgData interface{}
StateReason *fsmStateReason
PathList []*table.Path
timestamp time.Time
payload []byte
}
type fsmOutgoingMsg struct {
Paths []*table.Path
Notification *bgp.BGPMessage
StayIdle bool
}
const (
holdtimeOpensent = 240
holdtimeIdle = 5
)
type adminState int
const (
adminStateUp adminState = iota
adminStateDown
adminStatePfxCt
)
func (s adminState) String() string {
switch s {
case adminStateUp:
return "adminStateUp"
case adminStateDown:
return "adminStateDown"
case adminStatePfxCt:
return "adminStatePfxCt"
default:
return "Unknown"
}
}
type adminStateOperation struct {
State adminState
Communication []byte
}
type fsm struct {
gConf *config.Global
pConf *config.Neighbor
lock sync.RWMutex
state bgp.FSMState
outgoingCh *channels.InfiniteChannel
incomingCh *channels.InfiniteChannel
reason *fsmStateReason
conn net.Conn
connCh chan net.Conn
idleHoldTime float64
opensentHoldTime float64
adminState adminState
adminStateCh chan adminStateOperation
h *fsmHandler
rfMap map[bgp.RouteFamily]bgp.BGPAddPathMode
capMap map[bgp.BGPCapabilityCode][]bgp.ParameterCapabilityInterface
recvOpen *bgp.BGPMessage
peerInfo *table.PeerInfo
gracefulRestartTimer *time.Timer
twoByteAsTrans bool
marshallingOptions *bgp.MarshallingOption
notification chan *bgp.BGPMessage
}
func (fsm *fsm) bgpMessageStateUpdate(MessageType uint8, isIn bool) {
fsm.lock.Lock()
defer fsm.lock.Unlock()
state := &fsm.pConf.State.Messages
timer := &fsm.pConf.Timers
if isIn {
state.Received.Total++
} else {
state.Sent.Total++
}
switch MessageType {
case bgp.BGP_MSG_OPEN:
if isIn {
state.Received.Open++
} else {
state.Sent.Open++
}
case bgp.BGP_MSG_UPDATE:
if isIn {
state.Received.Update++
timer.State.UpdateRecvTime = time.Now().Unix()
} else {
state.Sent.Update++
}
case bgp.BGP_MSG_NOTIFICATION:
if isIn {
state.Received.Notification++
} else {
state.Sent.Notification++
}
case bgp.BGP_MSG_KEEPALIVE:
if isIn {
state.Received.Keepalive++
} else {
state.Sent.Keepalive++
}
case bgp.BGP_MSG_ROUTE_REFRESH:
if isIn {
state.Received.Refresh++
} else {
state.Sent.Refresh++
}
default:
if isIn {
state.Received.Discarded++
} else {
state.Sent.Discarded++
}
}
}
func (fsm *fsm) bmpStatsUpdate(statType uint16, increment int) {
fsm.lock.Lock()
defer fsm.lock.Unlock()
stats := &fsm.pConf.State.Messages.Received
switch statType {
// TODO
// Support other stat types.
case bmp.BMP_STAT_TYPE_WITHDRAW_UPDATE:
stats.WithdrawUpdate += uint32(increment)
case bmp.BMP_STAT_TYPE_WITHDRAW_PREFIX:
stats.WithdrawPrefix += uint32(increment)
}
}
func newFSM(gConf *config.Global, pConf *config.Neighbor) *fsm {
adminState := adminStateUp
if pConf.Config.AdminDown {
adminState = adminStateDown
}
pConf.State.SessionState = config.IntToSessionStateMap[int(bgp.BGP_FSM_IDLE)]
pConf.Timers.State.Downtime = time.Now().Unix()
fsm := &fsm{
gConf: gConf,
pConf: pConf,
state: bgp.BGP_FSM_IDLE,
outgoingCh: channels.NewInfiniteChannel(),
incomingCh: channels.NewInfiniteChannel(),
connCh: make(chan net.Conn, 1),
opensentHoldTime: float64(holdtimeOpensent),
adminState: adminState,
adminStateCh: make(chan adminStateOperation, 1),
rfMap: make(map[bgp.RouteFamily]bgp.BGPAddPathMode),
capMap: make(map[bgp.BGPCapabilityCode][]bgp.ParameterCapabilityInterface),
peerInfo: table.NewPeerInfo(gConf, pConf),
gracefulRestartTimer: time.NewTimer(time.Hour),
notification: make(chan *bgp.BGPMessage, 1),
}
fsm.gracefulRestartTimer.Stop()
return fsm
}
func (fsm *fsm) StateChange(nextState bgp.FSMState) {
fsm.lock.Lock()
defer fsm.lock.Unlock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"old": fsm.state.String(),
"new": nextState.String(),
"reason": fsm.reason,
}).Debug("state changed")
fsm.state = nextState
switch nextState {
case bgp.BGP_FSM_ESTABLISHED:
fsm.pConf.Timers.State.Uptime = time.Now().Unix()
fsm.pConf.State.EstablishedCount++
// reset the state set by the previous session
fsm.twoByteAsTrans = false
if _, y := fsm.capMap[bgp.BGP_CAP_FOUR_OCTET_AS_NUMBER]; !y {
fsm.twoByteAsTrans = true
break
}
y := func() bool {
for _, c := range capabilitiesFromConfig(fsm.pConf) {
switch c.(type) {
case *bgp.CapFourOctetASNumber:
return true
}
}
return false
}()
if !y {
fsm.twoByteAsTrans = true
}
default:
fsm.pConf.Timers.State.Downtime = time.Now().Unix()
}
}
func hostport(addr net.Addr) (string, uint16) {
if addr != nil {
host, port, err := net.SplitHostPort(addr.String())
if err != nil {
return "", 0
}
p, _ := strconv.ParseUint(port, 10, 16)
return host, uint16(p)
}
return "", 0
}
func (fsm *fsm) RemoteHostPort() (string, uint16) {
return hostport(fsm.conn.RemoteAddr())
}
func (fsm *fsm) LocalHostPort() (string, uint16) {
return hostport(fsm.conn.LocalAddr())
}
func (fsm *fsm) sendNotificationFromErrorMsg(e *bgp.MessageError) (*bgp.BGPMessage, error) {
fsm.lock.RLock()
established := fsm.h != nil && fsm.h.conn != nil
fsm.lock.RUnlock()
if established {
m := bgp.NewBGPNotificationMessage(e.TypeCode, e.SubTypeCode, e.Data)
b, _ := m.Serialize()
_, err := fsm.h.conn.Write(b)
if err == nil {
fsm.bgpMessageStateUpdate(m.Header.Type, false)
fsm.h.sentNotification = m
}
fsm.h.conn.Close()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"Data": e,
}).Warn("sent notification")
return m, nil
}
return nil, fmt.Errorf("can't send notification to %s since TCP connection is not established", fsm.pConf.State.NeighborAddress)
}
func (fsm *fsm) sendNotification(code, subType uint8, data []byte, msg string) (*bgp.BGPMessage, error) {
e := bgp.NewMessageError(code, subType, data, msg)
return fsm.sendNotificationFromErrorMsg(e.(*bgp.MessageError))
}
type fsmHandler struct {
fsm *fsm
conn net.Conn
msgCh *channels.InfiniteChannel
stateReasonCh chan fsmStateReason
incoming *channels.InfiniteChannel
outgoing *channels.InfiniteChannel
holdTimerResetCh chan bool
sentNotification *bgp.BGPMessage
ctx context.Context
ctxCancel context.CancelFunc
wg *sync.WaitGroup
}
func newFSMHandler(fsm *fsm, outgoing *channels.InfiniteChannel) *fsmHandler {
ctx, cancel := context.WithCancel(context.Background())
h := &fsmHandler{
fsm: fsm,
stateReasonCh: make(chan fsmStateReason, 2),
incoming: fsm.incomingCh,
outgoing: outgoing,
holdTimerResetCh: make(chan bool, 2),
wg: &sync.WaitGroup{},
ctx: ctx,
ctxCancel: cancel,
}
h.wg.Add(1)
go h.loop(ctx, h.wg)
return h
}
func (h *fsmHandler) idle(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
fsm := h.fsm
fsm.lock.RLock()
idleHoldTimer := time.NewTimer(time.Second * time.Duration(fsm.idleHoldTime))
fsm.lock.RUnlock()
for {
select {
case <-ctx.Done():
return -1, newfsmStateReason(fsmDying, nil, nil)
case <-fsm.gracefulRestartTimer.C:
fsm.lock.RLock()
restarting := fsm.pConf.GracefulRestart.State.PeerRestarting
fsm.lock.RUnlock()
if restarting {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("graceful restart timer expired")
fsm.lock.RUnlock()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmRestartTimerExpired, nil, nil)
}
case conn, ok := <-fsm.connCh:
if !ok {
break
}
conn.Close()
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("Closed an accepted connection")
fsm.lock.RUnlock()
case <-idleHoldTimer.C:
fsm.lock.RLock()
adminStateUp := fsm.adminState == adminStateUp
fsm.lock.RUnlock()
if adminStateUp {
fsm.lock.Lock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"Duration": fsm.idleHoldTime,
}).Debug("IdleHoldTimer expired")
fsm.idleHoldTime = holdtimeIdle
fsm.lock.Unlock()
return bgp.BGP_FSM_ACTIVE, newfsmStateReason(fsmIdleTimerExpired, nil, nil)
} else {
log.WithFields(log.Fields{"Topic": "Peer"}).Debug("IdleHoldTimer expired, but stay at idle because the admin state is DOWN")
}
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
// stop idle hold timer
idleHoldTimer.Stop()
case adminStateUp:
// restart idle hold timer
fsm.lock.RLock()
idleHoldTimer.Reset(time.Second * time.Duration(fsm.idleHoldTime))
fsm.lock.RUnlock()
}
}
}
}
}
func (h *fsmHandler) connectLoop(ctx context.Context, wg *sync.WaitGroup) {
defer wg.Done()
fsm := h.fsm
retry, addr, port, password, ttl, ttlMin, localAddress, bindInterface := func() (int, string, int, string, uint8, uint8, string, string) {
fsm.lock.RLock()
defer fsm.lock.RUnlock()
tick := int(fsm.pConf.Timers.Config.ConnectRetry)
if tick < minConnectRetryInterval {
tick = minConnectRetryInterval
}
addr := fsm.pConf.State.NeighborAddress
port := int(bgp.BGP_PORT)
if fsm.pConf.Transport.Config.RemotePort != 0 {
port = int(fsm.pConf.Transport.Config.RemotePort)
}
password := fsm.pConf.Config.AuthPassword
ttl := uint8(0)
ttlMin := uint8(0)
if fsm.pConf.TtlSecurity.Config.Enabled {
ttl = 255
ttlMin = fsm.pConf.TtlSecurity.Config.TtlMin
} else if fsm.pConf.Config.PeerAs != 0 && fsm.pConf.Config.PeerType == config.PEER_TYPE_EXTERNAL {
ttl = 1
if fsm.pConf.EbgpMultihop.Config.Enabled {
ttl = fsm.pConf.EbgpMultihop.Config.MultihopTtl
}
}
return tick, addr, port, password, ttl, ttlMin, fsm.pConf.Transport.Config.LocalAddress, fsm.pConf.Transport.Config.BindInterface
}()
tick := minConnectRetryInterval
for {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
timer := time.NewTimer(time.Duration(r.Intn(tick)+tick) * time.Second)
select {
case <-ctx.Done():
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Debug("stop connect loop")
timer.Stop()
return
case <-timer.C:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Debug("try to connect")
}
laddr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(localAddress, "0"))
if err != nil {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Warnf("failed to resolve local address: %s", err)
}
if err == nil {
d := net.Dialer{
LocalAddr: laddr,
Timeout: time.Duration(tick-1) * time.Second,
Control: func(network, address string, c syscall.RawConn) error {
return dialerControl(network, address, c, ttl, ttlMin, password, bindInterface)
},
}
conn, err := d.DialContext(ctx, "tcp", net.JoinHostPort(addr, strconv.Itoa(port)))
select {
case <-ctx.Done():
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Debug("stop connect loop")
return
default:
}
if err == nil {
select {
case fsm.connCh <- conn:
return
default:
conn.Close()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Warn("active conn is closed to avoid being blocked")
}
} else {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Debugf("failed to connect: %s", err)
}
}
tick = retry
}
}
func (h *fsmHandler) active(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
c, cancel := context.WithCancel(ctx)
fsm := h.fsm
var wg sync.WaitGroup
fsm.lock.RLock()
tryConnect := !fsm.pConf.Transport.Config.PassiveMode
fsm.lock.RUnlock()
if tryConnect {
wg.Add(1)
go h.connectLoop(c, &wg)
}
defer func() {
cancel()
wg.Wait()
}()
for {
select {
case <-ctx.Done():
return -1, newfsmStateReason(fsmDying, nil, nil)
case conn, ok := <-fsm.connCh:
if !ok {
break
}
fsm.lock.Lock()
fsm.conn = conn
fsm.lock.Unlock()
ttl := 0
ttlMin := 0
fsm.lock.RLock()
if fsm.pConf.TtlSecurity.Config.Enabled {
ttl = 255
ttlMin = int(fsm.pConf.TtlSecurity.Config.TtlMin)
} else if fsm.pConf.Config.PeerAs != 0 && fsm.pConf.Config.PeerType == config.PEER_TYPE_EXTERNAL {
if fsm.pConf.EbgpMultihop.Config.Enabled {
ttl = int(fsm.pConf.EbgpMultihop.Config.MultihopTtl)
} else if fsm.pConf.Transport.Config.Ttl != 0 {
ttl = int(fsm.pConf.Transport.Config.Ttl)
} else {
ttl = 1
}
} else if fsm.pConf.Transport.Config.Ttl != 0 {
ttl = int(fsm.pConf.Transport.Config.Ttl)
}
if ttl != 0 {
if err := setTCPTTLSockopt(conn.(*net.TCPConn), ttl); err != nil {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.Config.NeighborAddress,
"State": fsm.state.String(),
}).Warnf("cannot set TTL(=%d) for peer: %s", ttl, err)
}
}
if ttlMin != 0 {
if err := setTCPMinTTLSockopt(conn.(*net.TCPConn), ttlMin); err != nil {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.Config.NeighborAddress,
"State": fsm.state.String(),
}).Warnf("cannot set minimal TTL(=%d) for peer: %s", ttl, err)
}
}
fsm.lock.RUnlock()
// we don't implement delayed open timer so move to opensent right
// away.
return bgp.BGP_FSM_OPENSENT, newfsmStateReason(fsmNewConnection, nil, nil)
case <-fsm.gracefulRestartTimer.C:
fsm.lock.RLock()
restarting := fsm.pConf.GracefulRestart.State.PeerRestarting
fsm.lock.RUnlock()
if restarting {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("graceful restart timer expired")
fsm.lock.RUnlock()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmRestartTimerExpired, nil, nil)
}
case err := <-h.stateReasonCh:
return bgp.BGP_FSM_IDLE, &err
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmAdminDown, nil, nil)
case adminStateUp:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"adminState": stateOp.State.String(),
}).Panic("code logic bug")
}
}
}
}
}
func capAddPathFromConfig(pConf *config.Neighbor) bgp.ParameterCapabilityInterface {
tuples := make([]*bgp.CapAddPathTuple, 0, len(pConf.AfiSafis))
for _, af := range pConf.AfiSafis {
var mode bgp.BGPAddPathMode
if af.AddPaths.State.Receive {
mode |= bgp.BGP_ADD_PATH_RECEIVE
}
if af.AddPaths.State.SendMax > 0 {
mode |= bgp.BGP_ADD_PATH_SEND
}
if mode > 0 {
tuples = append(tuples, bgp.NewCapAddPathTuple(af.State.Family, mode))
}
}
if len(tuples) == 0 {
return nil
}
return bgp.NewCapAddPath(tuples)
}
func capabilitiesFromConfig(pConf *config.Neighbor) []bgp.ParameterCapabilityInterface {
caps := make([]bgp.ParameterCapabilityInterface, 0, 4)
caps = append(caps, bgp.NewCapRouteRefresh())
for _, af := range pConf.AfiSafis {
caps = append(caps, bgp.NewCapMultiProtocol(af.State.Family))
}
caps = append(caps, bgp.NewCapFourOctetASNumber(pConf.Config.LocalAs))
if c := pConf.GracefulRestart.Config; c.Enabled {
tuples := []*bgp.CapGracefulRestartTuple{}
ltuples := []*bgp.CapLongLivedGracefulRestartTuple{}
// RFC 4724 4.1
// To re-establish the session with its peer, the Restarting Speaker
// MUST set the "Restart State" bit in the Graceful Restart Capability
// of the OPEN message.
restarting := pConf.GracefulRestart.State.LocalRestarting
if !c.HelperOnly {
for i, rf := range pConf.AfiSafis {
if m := rf.MpGracefulRestart.Config; m.Enabled {
// When restarting, always flag forwaring bit.
// This can be a lie, depending on how gobgpd is used.
// For a route-server use-case, since a route-server
// itself doesn't forward packets, and the dataplane
// is a l2 switch which continues to work with no
// relation to bgpd, this behavior is ok.
// TODO consideration of other use-cases
tuples = append(tuples, bgp.NewCapGracefulRestartTuple(rf.State.Family, restarting))
pConf.AfiSafis[i].MpGracefulRestart.State.Advertised = true
}
if m := rf.LongLivedGracefulRestart.Config; m.Enabled {
ltuples = append(ltuples, bgp.NewCapLongLivedGracefulRestartTuple(rf.State.Family, restarting, m.RestartTime))
}
}
}
restartTime := c.RestartTime
notification := c.NotificationEnabled
caps = append(caps, bgp.NewCapGracefulRestart(restarting, notification, restartTime, tuples))
if c.LongLivedEnabled {
caps = append(caps, bgp.NewCapLongLivedGracefulRestart(ltuples))
}
}
// Extended Nexthop Capability (Code 5)
tuples := []*bgp.CapExtendedNexthopTuple{}
families, _ := config.AfiSafis(pConf.AfiSafis).ToRfList()
for _, family := range families {
if family == bgp.RF_IPv6_UC {
continue
}
tuple := bgp.NewCapExtendedNexthopTuple(family, bgp.AFI_IP6)
tuples = append(tuples, tuple)
}
caps = append(caps, bgp.NewCapExtendedNexthop(tuples))
// ADD-PATH Capability
if c := capAddPathFromConfig(pConf); c != nil {
caps = append(caps, capAddPathFromConfig(pConf))
}
return caps
}
func buildopen(gConf *config.Global, pConf *config.Neighbor) *bgp.BGPMessage {
caps := capabilitiesFromConfig(pConf)
opt := bgp.NewOptionParameterCapability(caps)
holdTime := uint16(pConf.Timers.Config.HoldTime)
as := pConf.Config.LocalAs
if as > (1<<16)-1 {
as = bgp.AS_TRANS
}
return bgp.NewBGPOpenMessage(uint16(as), holdTime, gConf.Config.RouterId,
[]bgp.OptionParameterInterface{opt})
}
func readAll(conn net.Conn, length int) ([]byte, error) {
buf := make([]byte, length)
_, err := io.ReadFull(conn, buf)
if err != nil {
return nil, err
}
return buf, nil
}
func getPathAttrFromBGPUpdate(m *bgp.BGPUpdate, typ bgp.BGPAttrType) bgp.PathAttributeInterface {
for _, a := range m.PathAttributes {
if a.GetType() == typ {
return a
}
}
return nil
}
func hasOwnASLoop(ownAS uint32, limit int, asPath *bgp.PathAttributeAsPath) bool {
cnt := 0
for _, param := range asPath.Value {
for _, as := range param.GetAS() {
if as == ownAS {
cnt++
if cnt > limit {
return true
}
}
}
}
return false
}
func extractRouteFamily(p *bgp.PathAttributeInterface) *bgp.RouteFamily {
attr := *p
var afi uint16
var safi uint8
switch a := attr.(type) {
case *bgp.PathAttributeMpReachNLRI:
afi = a.AFI
safi = a.SAFI
case *bgp.PathAttributeMpUnreachNLRI:
afi = a.AFI
safi = a.SAFI
default:
return nil
}
rf := bgp.AfiSafiToRouteFamily(afi, safi)
return &rf
}
func (h *fsmHandler) afiSafiDisable(rf bgp.RouteFamily) string {
h.fsm.lock.Lock()
defer h.fsm.lock.Unlock()
n := bgp.AddressFamilyNameMap[rf]
for i, a := range h.fsm.pConf.AfiSafis {
if string(a.Config.AfiSafiName) == n {
h.fsm.pConf.AfiSafis[i].State.Enabled = false
break
}
}
newList := make([]bgp.ParameterCapabilityInterface, 0)
for _, c := range h.fsm.capMap[bgp.BGP_CAP_MULTIPROTOCOL] {
if c.(*bgp.CapMultiProtocol).CapValue == rf {
continue
}
newList = append(newList, c)
}
h.fsm.capMap[bgp.BGP_CAP_MULTIPROTOCOL] = newList
return n
}
func (h *fsmHandler) handlingError(m *bgp.BGPMessage, e error, useRevisedError bool) bgp.ErrorHandling {
handling := bgp.ERROR_HANDLING_NONE
if m.Header.Type == bgp.BGP_MSG_UPDATE && useRevisedError {
factor := e.(*bgp.MessageError)
handling = factor.ErrorHandling
switch handling {
case bgp.ERROR_HANDLING_ATTRIBUTE_DISCARD:
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": e,
}).Warn("Some attributes were discarded")
h.fsm.lock.RUnlock()
case bgp.ERROR_HANDLING_TREAT_AS_WITHDRAW:
m.Body = bgp.TreatAsWithdraw(m.Body.(*bgp.BGPUpdate))
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": e,
}).Warn("the received Update message was treated as withdraw")
h.fsm.lock.RUnlock()
case bgp.ERROR_HANDLING_AFISAFI_DISABLE:
rf := extractRouteFamily(factor.ErrorAttribute)
if rf == nil {
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
}).Warn("Error occurred during AFI/SAFI disabling")
h.fsm.lock.RUnlock()
} else {
n := h.afiSafiDisable(*rf)
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": e,
}).Warnf("Capability %s was disabled", n)
h.fsm.lock.RUnlock()
}
}
} else {
handling = bgp.ERROR_HANDLING_SESSION_RESET
}
return handling
}
func (h *fsmHandler) recvMessageWithError() (*fsmMsg, error) {
sendToStateReasonCh := func(typ fsmStateReasonType, notif *bgp.BGPMessage) {
// probably doesn't happen but be cautious
select {
case h.stateReasonCh <- *newfsmStateReason(typ, notif, nil):
default:
}
}
headerBuf, err := readAll(h.conn, bgp.BGP_HEADER_LENGTH)
if err != nil {
sendToStateReasonCh(fsmReadFailed, nil)
return nil, err
}
hd := &bgp.BGPHeader{}
err = hd.DecodeFromBytes(headerBuf)
if err != nil {
h.fsm.bgpMessageStateUpdate(0, true)
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": err,
}).Warn("Session will be reset due to malformed BGP Header")
fmsg := &fsmMsg{
fsm: h.fsm,
MsgType: fsmMsgBGPMessage,
MsgSrc: h.fsm.pConf.State.NeighborAddress,
MsgData: err,
}
h.fsm.lock.RUnlock()
return fmsg, err
}
bodyBuf, err := readAll(h.conn, int(hd.Len)-bgp.BGP_HEADER_LENGTH)
if err != nil {
sendToStateReasonCh(fsmReadFailed, nil)
return nil, err
}
now := time.Now()
handling := bgp.ERROR_HANDLING_NONE
h.fsm.lock.RLock()
useRevisedError := h.fsm.pConf.ErrorHandling.Config.TreatAsWithdraw
options := h.fsm.marshallingOptions
h.fsm.lock.RUnlock()
m, err := bgp.ParseBGPBody(hd, bodyBuf, options)
if err != nil {
handling = h.handlingError(m, err, useRevisedError)
h.fsm.bgpMessageStateUpdate(0, true)
} else {
h.fsm.bgpMessageStateUpdate(m.Header.Type, true)
err = bgp.ValidateBGPMessage(m)
}
h.fsm.lock.RLock()
fmsg := &fsmMsg{
fsm: h.fsm,
MsgType: fsmMsgBGPMessage,
MsgSrc: h.fsm.pConf.State.NeighborAddress,
timestamp: now,
}
h.fsm.lock.RUnlock()
switch handling {
case bgp.ERROR_HANDLING_AFISAFI_DISABLE:
fmsg.MsgData = m
return fmsg, nil
case bgp.ERROR_HANDLING_SESSION_RESET:
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": err,
}).Warn("Session will be reset due to malformed BGP message")
h.fsm.lock.RUnlock()
fmsg.MsgData = err
return fmsg, err
default:
fmsg.MsgData = m
h.fsm.lock.RLock()
establishedState := h.fsm.state == bgp.BGP_FSM_ESTABLISHED
h.fsm.lock.RUnlock()
if establishedState {
switch m.Header.Type {
case bgp.BGP_MSG_ROUTE_REFRESH:
fmsg.MsgType = fsmMsgRouteRefresh
case bgp.BGP_MSG_UPDATE:
body := m.Body.(*bgp.BGPUpdate)
isEBGP := h.fsm.pConf.IsEBGPPeer(h.fsm.gConf)
isConfed := h.fsm.pConf.IsConfederationMember(h.fsm.gConf)
fmsg.payload = make([]byte, len(headerBuf)+len(bodyBuf))
copy(fmsg.payload, headerBuf)
copy(fmsg.payload[len(headerBuf):], bodyBuf)
h.fsm.lock.RLock()
rfMap := h.fsm.rfMap
h.fsm.lock.RUnlock()
ok, err := bgp.ValidateUpdateMsg(body, rfMap, isEBGP, isConfed)
if !ok {
handling = h.handlingError(m, err, useRevisedError)
}
if handling == bgp.ERROR_HANDLING_SESSION_RESET {
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": err,
}).Warn("Session will be reset due to malformed BGP update message")
h.fsm.lock.RUnlock()
fmsg.MsgData = err
return fmsg, err
}
if routes := len(body.WithdrawnRoutes); routes > 0 {
h.fsm.bmpStatsUpdate(bmp.BMP_STAT_TYPE_WITHDRAW_UPDATE, 1)
h.fsm.bmpStatsUpdate(bmp.BMP_STAT_TYPE_WITHDRAW_PREFIX, routes)
} else if attr := getPathAttrFromBGPUpdate(body, bgp.BGP_ATTR_TYPE_MP_UNREACH_NLRI); attr != nil {
mpUnreach := attr.(*bgp.PathAttributeMpUnreachNLRI)
if routes = len(mpUnreach.Value); routes > 0 {
h.fsm.bmpStatsUpdate(bmp.BMP_STAT_TYPE_WITHDRAW_UPDATE, 1)
h.fsm.bmpStatsUpdate(bmp.BMP_STAT_TYPE_WITHDRAW_PREFIX, routes)
}
}
table.UpdatePathAttrs4ByteAs(body)
if err = table.UpdatePathAggregator4ByteAs(body); err != nil {
fmsg.MsgData = err
return fmsg, err
}
h.fsm.lock.RLock()
peerInfo := h.fsm.peerInfo
h.fsm.lock.RUnlock()
fmsg.PathList = table.ProcessMessage(m, peerInfo, fmsg.timestamp)
fallthrough
case bgp.BGP_MSG_KEEPALIVE:
// if the length of h.holdTimerResetCh
// isn't zero, the timer will be reset
// soon anyway.
select {
case h.holdTimerResetCh <- true:
default:
}
if m.Header.Type == bgp.BGP_MSG_KEEPALIVE {
return nil, nil
}
case bgp.BGP_MSG_NOTIFICATION:
body := m.Body.(*bgp.BGPNotification)
if body.ErrorCode == bgp.BGP_ERROR_CEASE && (body.ErrorSubcode == bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN || body.ErrorSubcode == bgp.BGP_ERROR_SUB_ADMINISTRATIVE_RESET) {
communication, rest := decodeAdministrativeCommunication(body.Data)
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"Code": body.ErrorCode,
"Subcode": body.ErrorSubcode,
"Communicated-Reason": communication,
"Data": rest,
}).Warn("received notification")
h.fsm.lock.RUnlock()
} else {
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"Code": body.ErrorCode,
"Subcode": body.ErrorSubcode,
"Data": body.Data,
}).Warn("received notification")
h.fsm.lock.RUnlock()
}
h.fsm.lock.RLock()
s := h.fsm.pConf.GracefulRestart.State
hardReset := s.Enabled && s.NotificationEnabled && body.ErrorCode == bgp.BGP_ERROR_CEASE && body.ErrorSubcode == bgp.BGP_ERROR_SUB_HARD_RESET
h.fsm.lock.RUnlock()
if hardReset {
sendToStateReasonCh(fsmHardReset, m)
} else {
sendToStateReasonCh(fsmNotificationRecv, m)
}
return nil, nil
}
}
}
return fmsg, nil
}
func (h *fsmHandler) recvMessage(ctx context.Context, wg *sync.WaitGroup) error {
defer func() {
h.msgCh.Close()
wg.Done()
}()
fmsg, _ := h.recvMessageWithError()
if fmsg != nil {
h.msgCh.In() <- fmsg
}
return nil
}
func open2Cap(open *bgp.BGPOpen, n *config.Neighbor) (map[bgp.BGPCapabilityCode][]bgp.ParameterCapabilityInterface, map[bgp.RouteFamily]bgp.BGPAddPathMode) {
capMap := make(map[bgp.BGPCapabilityCode][]bgp.ParameterCapabilityInterface)
for _, p := range open.OptParams {
if paramCap, y := p.(*bgp.OptionParameterCapability); y {
for _, c := range paramCap.Capability {
m, ok := capMap[c.Code()]
if !ok {
m = make([]bgp.ParameterCapabilityInterface, 0, 1)
}
capMap[c.Code()] = append(m, c)
}
}
}
// squash add path cap
if caps, y := capMap[bgp.BGP_CAP_ADD_PATH]; y {
items := make([]*bgp.CapAddPathTuple, 0, len(caps))
for _, c := range caps {
items = append(items, c.(*bgp.CapAddPath).Tuples...)
}
capMap[bgp.BGP_CAP_ADD_PATH] = []bgp.ParameterCapabilityInterface{bgp.NewCapAddPath(items)}
}
// remote open message may not include multi-protocol capability
if _, y := capMap[bgp.BGP_CAP_MULTIPROTOCOL]; !y {
capMap[bgp.BGP_CAP_MULTIPROTOCOL] = []bgp.ParameterCapabilityInterface{bgp.NewCapMultiProtocol(bgp.RF_IPv4_UC)}
}
local := n.CreateRfMap()
remote := make(map[bgp.RouteFamily]bgp.BGPAddPathMode)
for _, c := range capMap[bgp.BGP_CAP_MULTIPROTOCOL] {
family := c.(*bgp.CapMultiProtocol).CapValue
remote[family] = bgp.BGP_ADD_PATH_NONE
for _, a := range capMap[bgp.BGP_CAP_ADD_PATH] {
for _, i := range a.(*bgp.CapAddPath).Tuples {
if i.RouteFamily == family {
remote[family] = i.Mode
}
}
}
}
negotiated := make(map[bgp.RouteFamily]bgp.BGPAddPathMode)
for family, mode := range local {
if m, y := remote[family]; y {
n := bgp.BGP_ADD_PATH_NONE
if mode&bgp.BGP_ADD_PATH_SEND > 0 && m&bgp.BGP_ADD_PATH_RECEIVE > 0 {
n |= bgp.BGP_ADD_PATH_SEND
}
if mode&bgp.BGP_ADD_PATH_RECEIVE > 0 && m&bgp.BGP_ADD_PATH_SEND > 0 {
n |= bgp.BGP_ADD_PATH_RECEIVE
}
negotiated[family] = n
}
}
return capMap, negotiated
}
func (h *fsmHandler) opensent(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
fsm := h.fsm
fsm.lock.RLock()
m := buildopen(fsm.gConf, fsm.pConf)
fsm.lock.RUnlock()
b, _ := m.Serialize()
fsm.conn.Write(b)
fsm.bgpMessageStateUpdate(m.Header.Type, false)
h.msgCh = channels.NewInfiniteChannel()
fsm.lock.RLock()
h.conn = fsm.conn
fsm.lock.RUnlock()
var wg sync.WaitGroup
wg.Add(1)
defer wg.Wait()
go h.recvMessage(ctx, &wg)
// RFC 4271 P.60
// sets its HoldTimer to a large value
// A HoldTimer value of 4 minutes is suggested as a "large value"
// for the HoldTimer
fsm.lock.RLock()
holdTimer := time.NewTimer(time.Second * time.Duration(fsm.opensentHoldTime))
fsm.lock.RUnlock()
for {
select {
case <-ctx.Done():
h.conn.Close()
return -1, newfsmStateReason(fsmDying, nil, nil)
case conn, ok := <-fsm.connCh:
if !ok {
break
}
conn.Close()
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("Closed an accepted connection")
fsm.lock.RUnlock()
case <-fsm.gracefulRestartTimer.C:
fsm.lock.RLock()
restarting := fsm.pConf.GracefulRestart.State.PeerRestarting
fsm.lock.RUnlock()
if restarting {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("graceful restart timer expired")
fsm.lock.RUnlock()
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmRestartTimerExpired, nil, nil)
}
case i, ok := <-h.msgCh.Out():
if !ok {
continue
}
e := i.(*fsmMsg)
switch m := e.MsgData.(type) {
case *bgp.BGPMessage:
if m.Header.Type == bgp.BGP_MSG_OPEN {
fsm.lock.Lock()
fsm.recvOpen = m
fsm.lock.Unlock()
body := m.Body.(*bgp.BGPOpen)
fsm.lock.RLock()
fsmPeerAS := fsm.pConf.Config.PeerAs
fsm.lock.RUnlock()
peerAs, err := bgp.ValidateOpenMsg(body, fsmPeerAS)
if err != nil {
m, _ := fsm.sendNotificationFromErrorMsg(err.(*bgp.MessageError))
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, m, nil)
}
// ASN negotiation was skipped
fsm.lock.RLock()
asnNegotiationSkipped := fsm.pConf.Config.PeerAs == 0
fsm.lock.RUnlock()
if asnNegotiationSkipped {
fsm.lock.Lock()
typ := config.PEER_TYPE_EXTERNAL
if fsm.peerInfo.LocalAS == peerAs {
typ = config.PEER_TYPE_INTERNAL
}
fsm.pConf.State.PeerType = typ
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Infof("skipped asn negotiation: peer-as: %d, peer-type: %s", peerAs, typ)
fsm.lock.Unlock()
} else {
fsm.lock.Lock()
fsm.pConf.State.PeerType = fsm.pConf.Config.PeerType
fsm.lock.Unlock()
}
fsm.lock.Lock()
fsm.pConf.State.PeerAs = peerAs
fsm.peerInfo.AS = peerAs
fsm.peerInfo.ID = body.ID
fsm.capMap, fsm.rfMap = open2Cap(body, fsm.pConf)
if _, y := fsm.capMap[bgp.BGP_CAP_ADD_PATH]; y {
fsm.marshallingOptions = &bgp.MarshallingOption{
AddPath: fsm.rfMap,
}
} else {
fsm.marshallingOptions = nil
}
// calculate HoldTime
// RFC 4271 P.13
// a BGP speaker MUST calculate the value of the Hold Timer
// by using the smaller of its configured Hold Time and the Hold Time
// received in the OPEN message.
holdTime := float64(body.HoldTime)
myHoldTime := fsm.pConf.Timers.Config.HoldTime
if holdTime > myHoldTime {
fsm.pConf.Timers.State.NegotiatedHoldTime = myHoldTime
} else {
fsm.pConf.Timers.State.NegotiatedHoldTime = holdTime
}
keepalive := fsm.pConf.Timers.Config.KeepaliveInterval
if n := fsm.pConf.Timers.State.NegotiatedHoldTime; n < myHoldTime {
keepalive = n / 3
}
fsm.pConf.Timers.State.KeepaliveInterval = keepalive
gr, ok := fsm.capMap[bgp.BGP_CAP_GRACEFUL_RESTART]
if fsm.pConf.GracefulRestart.Config.Enabled && ok {
state := &fsm.pConf.GracefulRestart.State
state.Enabled = true
cap := gr[len(gr)-1].(*bgp.CapGracefulRestart)
state.PeerRestartTime = uint16(cap.Time)
for _, t := range cap.Tuples {
n := bgp.AddressFamilyNameMap[bgp.AfiSafiToRouteFamily(t.AFI, t.SAFI)]
for i, a := range fsm.pConf.AfiSafis {
if string(a.Config.AfiSafiName) == n {
fsm.pConf.AfiSafis[i].MpGracefulRestart.State.Enabled = true
fsm.pConf.AfiSafis[i].MpGracefulRestart.State.Received = true
break
}
}
}
// RFC 4724 4.1
// To re-establish the session with its peer, the Restarting Speaker
// MUST set the "Restart State" bit in the Graceful Restart Capability
// of the OPEN message.
if fsm.pConf.GracefulRestart.State.PeerRestarting && cap.Flags&0x08 == 0 {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("restart flag is not set")
// just ignore
}
// RFC 4724 3
// The most significant bit is defined as the Restart State (R)
// bit, ...(snip)... When set (value 1), this bit
// indicates that the BGP speaker has restarted, and its peer MUST
// NOT wait for the End-of-RIB marker from the speaker before
// advertising routing information to the speaker.
if fsm.pConf.GracefulRestart.State.LocalRestarting && cap.Flags&0x08 != 0 {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Debug("peer has restarted, skipping wait for EOR")
for i := range fsm.pConf.AfiSafis {
fsm.pConf.AfiSafis[i].MpGracefulRestart.State.EndOfRibReceived = true
}
}
if fsm.pConf.GracefulRestart.Config.NotificationEnabled && cap.Flags&0x04 > 0 {
fsm.pConf.GracefulRestart.State.NotificationEnabled = true
}
}
llgr, ok2 := fsm.capMap[bgp.BGP_CAP_LONG_LIVED_GRACEFUL_RESTART]
if fsm.pConf.GracefulRestart.Config.LongLivedEnabled && ok && ok2 {
fsm.pConf.GracefulRestart.State.LongLivedEnabled = true
cap := llgr[len(llgr)-1].(*bgp.CapLongLivedGracefulRestart)
for _, t := range cap.Tuples {
n := bgp.AddressFamilyNameMap[bgp.AfiSafiToRouteFamily(t.AFI, t.SAFI)]
for i, a := range fsm.pConf.AfiSafis {
if string(a.Config.AfiSafiName) == n {
fsm.pConf.AfiSafis[i].LongLivedGracefulRestart.State.Enabled = true
fsm.pConf.AfiSafis[i].LongLivedGracefulRestart.State.Received = true
fsm.pConf.AfiSafis[i].LongLivedGracefulRestart.State.PeerRestartTime = t.RestartTime
break
}
}
}
}
fsm.lock.Unlock()
msg := bgp.NewBGPKeepAliveMessage()
b, _ := msg.Serialize()
fsm.conn.Write(b)
fsm.bgpMessageStateUpdate(msg.Header.Type, false)
return bgp.BGP_FSM_OPENCONFIRM, newfsmStateReason(fsmOpenMsgReceived, nil, nil)
} else {
// send notification?
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, nil, nil)
}
case *bgp.MessageError:
msg, _ := fsm.sendNotificationFromErrorMsg(m)
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, msg, nil)
default:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": e.MsgData,
}).Panic("unknown msg type")
}
case err := <-h.stateReasonCh:
h.conn.Close()
return bgp.BGP_FSM_IDLE, &err
case <-holdTimer.C:
m, _ := fsm.sendNotification(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil, "hold timer expired")
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmHoldTimerExpired, m, nil)
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmAdminDown, m, nil)
case adminStateUp:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"adminState": stateOp.State.String(),
}).Panic("code logic bug")
}
}
}
}
}
func keepaliveTicker(fsm *fsm) *time.Ticker {
fsm.lock.RLock()
defer fsm.lock.RUnlock()
negotiatedTime := fsm.pConf.Timers.State.NegotiatedHoldTime
if negotiatedTime == 0 {
return &time.Ticker{}
}
sec := time.Second * time.Duration(fsm.pConf.Timers.State.KeepaliveInterval)
if sec == 0 {
sec = time.Second
}
return time.NewTicker(sec)
}
func (h *fsmHandler) openconfirm(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
fsm := h.fsm
ticker := keepaliveTicker(fsm)
h.msgCh = channels.NewInfiniteChannel()
fsm.lock.RLock()
h.conn = fsm.conn
var wg sync.WaitGroup
defer wg.Wait()
wg.Add(1)
go h.recvMessage(ctx, &wg)
var holdTimer *time.Timer
if fsm.pConf.Timers.State.NegotiatedHoldTime == 0 {
holdTimer = &time.Timer{}
} else {
// RFC 4271 P.65
// sets the HoldTimer according to the negotiated value
holdTimer = time.NewTimer(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime))
}
fsm.lock.RUnlock()
for {
select {
case <-ctx.Done():
h.conn.Close()
return -1, newfsmStateReason(fsmDying, nil, nil)
case conn, ok := <-fsm.connCh:
if !ok {
break
}
conn.Close()
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("Closed an accepted connection")
fsm.lock.RUnlock()
case <-fsm.gracefulRestartTimer.C:
fsm.lock.RLock()
restarting := fsm.pConf.GracefulRestart.State.PeerRestarting
fsm.lock.RUnlock()
if restarting {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("graceful restart timer expired")
fsm.lock.RUnlock()
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmRestartTimerExpired, nil, nil)
}
case <-ticker.C:
m := bgp.NewBGPKeepAliveMessage()
b, _ := m.Serialize()
// TODO: check error
fsm.conn.Write(b)
fsm.bgpMessageStateUpdate(m.Header.Type, false)
case i, ok := <-h.msgCh.Out():
if !ok {
continue
}
e := i.(*fsmMsg)
switch m := e.MsgData.(type) {
case *bgp.BGPMessage:
if m.Header.Type == bgp.BGP_MSG_KEEPALIVE {
return bgp.BGP_FSM_ESTABLISHED, newfsmStateReason(fsmOpenMsgNegotiated, nil, nil)
}
// send notification ?
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, nil, nil)
case *bgp.MessageError:
msg, _ := fsm.sendNotificationFromErrorMsg(m)
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, msg, nil)
default:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": e.MsgData,
}).Panic("unknown msg type")
}
case err := <-h.stateReasonCh:
h.conn.Close()
return bgp.BGP_FSM_IDLE, &err
case <-holdTimer.C:
m, _ := fsm.sendNotification(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil, "hold timer expired")
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmHoldTimerExpired, m, nil)
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmAdminDown, nil, nil)
case adminStateUp:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"adminState": stateOp.State.String(),
}).Panic("code logic bug")
}
}
}
}
}
func (h *fsmHandler) sendMessageloop(ctx context.Context, wg *sync.WaitGroup) error {
defer wg.Done()
conn := h.conn
fsm := h.fsm
ticker := keepaliveTicker(fsm)
send := func(m *bgp.BGPMessage) error {
fsm.lock.RLock()
if fsm.twoByteAsTrans && m.Header.Type == bgp.BGP_MSG_UPDATE {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": m,
}).Debug("update for 2byte AS peer")
table.UpdatePathAttrs2ByteAs(m.Body.(*bgp.BGPUpdate))
table.UpdatePathAggregator2ByteAs(m.Body.(*bgp.BGPUpdate))
}
b, err := m.Serialize(h.fsm.marshallingOptions)
fsm.lock.RUnlock()
if err != nil {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": err,
}).Warn("failed to serialize")
fsm.lock.RUnlock()
fsm.bgpMessageStateUpdate(0, false)
return nil
}
fsm.lock.RLock()
err = conn.SetWriteDeadline(time.Now().Add(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime)))
fsm.lock.RUnlock()
if err != nil {
h.stateReasonCh <- *newfsmStateReason(fsmWriteFailed, nil, nil)
conn.Close()
return fmt.Errorf("failed to set write deadline")
}
_, err = conn.Write(b)
if err != nil {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": err,
}).Warn("failed to send")
fsm.lock.RUnlock()
h.stateReasonCh <- *newfsmStateReason(fsmWriteFailed, nil, nil)
conn.Close()
return fmt.Errorf("closed")
}
fsm.bgpMessageStateUpdate(m.Header.Type, false)
switch m.Header.Type {
case bgp.BGP_MSG_NOTIFICATION:
body := m.Body.(*bgp.BGPNotification)
if body.ErrorCode == bgp.BGP_ERROR_CEASE && (body.ErrorSubcode == bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN || body.ErrorSubcode == bgp.BGP_ERROR_SUB_ADMINISTRATIVE_RESET) {
communication, rest := decodeAdministrativeCommunication(body.Data)
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Code": body.ErrorCode,
"Subcode": body.ErrorSubcode,
"Communicated-Reason": communication,
"Data": rest,
}).Warn("sent notification")
fsm.lock.RUnlock()
} else {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Code": body.ErrorCode,
"Subcode": body.ErrorSubcode,
"Data": body.Data,
}).Warn("sent notification")
fsm.lock.RUnlock()
}
h.stateReasonCh <- *newfsmStateReason(fsmNotificationSent, m, nil)
conn.Close()
return fmt.Errorf("closed")
case bgp.BGP_MSG_UPDATE:
update := m.Body.(*bgp.BGPUpdate)
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"nlri": update.NLRI,
"withdrawals": update.WithdrawnRoutes,
"attributes": update.PathAttributes,
}).Debug("sent update")
fsm.lock.RUnlock()
default:
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"data": m,
}).Debug("sent")
fsm.lock.RUnlock()
}
return nil
}
for {
select {
case <-ctx.Done():
return nil
case o := <-h.outgoing.Out():
switch m := o.(type) {
case *fsmOutgoingMsg:
h.fsm.lock.RLock()
options := h.fsm.marshallingOptions
h.fsm.lock.RUnlock()
for _, msg := range table.CreateUpdateMsgFromPaths(m.Paths, options) {
if err := send(msg); err != nil {
return nil
}
}
if m.Notification != nil {
if m.StayIdle {
// current user is only prefix-limit
// fix me if this is not the case
h.changeadminState(adminStatePfxCt)
}
if err := send(m.Notification); err != nil {
return nil
}
}
default:
return nil
}
case <-ticker.C:
if err := send(bgp.NewBGPKeepAliveMessage()); err != nil {
return nil
}
}
}
}
func (h *fsmHandler) recvMessageloop(ctx context.Context, wg *sync.WaitGroup) error {
defer wg.Done()
for {
fmsg, err := h.recvMessageWithError()
if fmsg != nil {
h.msgCh.In() <- fmsg
}
if err != nil {
return nil
}
}
}
func (h *fsmHandler) established(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
var wg sync.WaitGroup
fsm := h.fsm
fsm.lock.Lock()
h.conn = fsm.conn
fsm.lock.Unlock()
defer wg.Wait()
wg.Add(2)
go h.sendMessageloop(ctx, &wg)
h.msgCh = h.incoming
go h.recvMessageloop(ctx, &wg)
var holdTimer *time.Timer
if fsm.pConf.Timers.State.NegotiatedHoldTime == 0 {
holdTimer = &time.Timer{}
} else {
fsm.lock.RLock()
holdTimer = time.NewTimer(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime))
fsm.lock.RUnlock()
}
fsm.gracefulRestartTimer.Stop()
for {
select {
case <-ctx.Done():
select {
case m := <-fsm.notification:
b, _ := m.Serialize(h.fsm.marshallingOptions)
h.conn.Write(b)
default:
// nothing to do
}
h.conn.Close()
return -1, newfsmStateReason(fsmDying, nil, nil)
case conn, ok := <-fsm.connCh:
if !ok {
break
}
conn.Close()
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("Closed an accepted connection")
fsm.lock.RUnlock()
case err := <-h.stateReasonCh:
h.conn.Close()
// if recv goroutine hit an error and sent to
// stateReasonCh, then tx goroutine might take
// long until it exits because it waits for
// ctx.Done() or keepalive timer. So let kill
// it now.
h.outgoing.In() <- err
fsm.lock.RLock()
if s := fsm.pConf.GracefulRestart.State; s.Enabled {
if (s.NotificationEnabled && err.Type == fsmNotificationRecv) ||
(err.Type == fsmNotificationSent &&
err.BGPNotification.Body.(*bgp.BGPNotification).ErrorCode == bgp.BGP_ERROR_HOLD_TIMER_EXPIRED) ||
err.Type == fsmReadFailed ||
err.Type == fsmWriteFailed {
err = *newfsmStateReason(fsmGracefulRestart, nil, nil)
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("peer graceful restart")
fsm.gracefulRestartTimer.Reset(time.Duration(fsm.pConf.GracefulRestart.State.PeerRestartTime) * time.Second)
}
}
fsm.lock.RUnlock()
return bgp.BGP_FSM_IDLE, &err
case <-holdTimer.C:
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("hold timer expired")
fsm.lock.RUnlock()
m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil)
h.outgoing.In() <- &fsmOutgoingMsg{Notification: m}
fsm.lock.RLock()
s := fsm.pConf.GracefulRestart.State
fsm.lock.RUnlock()
// Do not return hold timer expired to server if graceful restart is enabled
// Let it fallback to read/write error or fsmNotificationSent handled above
// Reference: https://github.com/osrg/gobgp/issues/2174
if !s.Enabled {
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmHoldTimerExpired, m, nil)
}
case <-h.holdTimerResetCh:
fsm.lock.RLock()
if fsm.pConf.Timers.State.NegotiatedHoldTime != 0 {
holdTimer.Reset(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime))
}
fsm.lock.RUnlock()
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN, stateOp.Communication)
h.outgoing.In() <- &fsmOutgoingMsg{Notification: m}
}
}
}
}
}
func (h *fsmHandler) loop(ctx context.Context, wg *sync.WaitGroup) error {
defer wg.Done()
fsm := h.fsm
fsm.lock.RLock()
oldState := fsm.state
fsm.lock.RUnlock()
var reason *fsmStateReason
nextState := bgp.FSMState(-1)
fsm.lock.RLock()
fsmState := fsm.state
fsm.lock.RUnlock()
switch fsmState {
case bgp.BGP_FSM_IDLE:
nextState, reason = h.idle(ctx)
// case bgp.BGP_FSM_CONNECT:
// nextState = h.connect()
case bgp.BGP_FSM_ACTIVE:
nextState, reason = h.active(ctx)
case bgp.BGP_FSM_OPENSENT:
nextState, reason = h.opensent(ctx)
case bgp.BGP_FSM_OPENCONFIRM:
nextState, reason = h.openconfirm(ctx)
case bgp.BGP_FSM_ESTABLISHED:
nextState, reason = h.established(ctx)
}
fsm.lock.RLock()
fsm.reason = reason
if nextState == bgp.BGP_FSM_ESTABLISHED && oldState == bgp.BGP_FSM_OPENCONFIRM {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("Peer Up")
}
if oldState == bgp.BGP_FSM_ESTABLISHED {
// The main goroutine sent the notification due to
// deconfiguration or something.
reason := fsm.reason
if fsm.h.sentNotification != nil {
reason.Type = fsmNotificationSent
reason.BGPNotification = fsm.h.sentNotification
}
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Reason": reason.String(),
}).Info("Peer Down")
}
fsm.lock.RUnlock()
fsm.lock.RLock()
h.incoming.In() <- &fsmMsg{
fsm: fsm,
MsgType: fsmMsgStateChange,
MsgSrc: fsm.pConf.State.NeighborAddress,
MsgData: nextState,
StateReason: reason,
}
fsm.lock.RUnlock()
return nil
}
func (h *fsmHandler) changeadminState(s adminState) error {
h.fsm.lock.Lock()
defer h.fsm.lock.Unlock()
fsm := h.fsm
if fsm.adminState != s {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"adminState": s.String(),
}).Debug("admin state changed")
fsm.adminState = s
fsm.pConf.State.AdminDown = !fsm.pConf.State.AdminDown
switch s {
case adminStateUp:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("Administrative start")
case adminStateDown:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("Administrative shutdown")
case adminStatePfxCt:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("Administrative shutdown(Prefix limit reached)")
}
} else {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("cannot change to the same state")
return fmt.Errorf("cannot change to the same state")
}
return nil
}
server: don't send empty ExtendedNexthop capability
Signed-off-by: FUJITA Tomonori <93dac1fe9c4b2a3957982200319981492ad4976e@gmail.com>
// Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"fmt"
"io"
"math/rand"
"net"
"strconv"
"sync"
"syscall"
"time"
"github.com/eapache/channels"
"github.com/osrg/gobgp/internal/pkg/config"
"github.com/osrg/gobgp/internal/pkg/table"
"github.com/osrg/gobgp/pkg/packet/bgp"
"github.com/osrg/gobgp/pkg/packet/bmp"
log "github.com/sirupsen/logrus"
)
const (
minConnectRetryInterval = 5
)
type fsmStateReasonType uint8
const (
fsmDying fsmStateReasonType = iota
fsmAdminDown
fsmReadFailed
fsmWriteFailed
fsmNotificationSent
fsmNotificationRecv
fsmHoldTimerExpired
fsmIdleTimerExpired
fsmRestartTimerExpired
fsmGracefulRestart
fsmInvalidMsg
fsmNewConnection
fsmOpenMsgReceived
fsmOpenMsgNegotiated
fsmHardReset
fsmDeConfigured
)
type fsmStateReason struct {
Type fsmStateReasonType
BGPNotification *bgp.BGPMessage
Data []byte
}
func newfsmStateReason(typ fsmStateReasonType, notif *bgp.BGPMessage, data []byte) *fsmStateReason {
return &fsmStateReason{
Type: typ,
BGPNotification: notif,
Data: data,
}
}
func (r fsmStateReason) String() string {
switch r.Type {
case fsmDying:
return "dying"
case fsmAdminDown:
return "admin-down"
case fsmReadFailed:
return "read-failed"
case fsmWriteFailed:
return "write-failed"
case fsmNotificationSent:
body := r.BGPNotification.Body.(*bgp.BGPNotification)
return fmt.Sprintf("notification-sent %s", bgp.NewNotificationErrorCode(body.ErrorCode, body.ErrorSubcode).String())
case fsmNotificationRecv:
body := r.BGPNotification.Body.(*bgp.BGPNotification)
return fmt.Sprintf("notification-received %s", bgp.NewNotificationErrorCode(body.ErrorCode, body.ErrorSubcode).String())
case fsmHoldTimerExpired:
return "hold-timer-expired"
case fsmIdleTimerExpired:
return "idle-hold-timer-expired"
case fsmRestartTimerExpired:
return "restart-timer-expired"
case fsmGracefulRestart:
return "graceful-restart"
case fsmInvalidMsg:
return "invalid-msg"
case fsmNewConnection:
return "new-connection"
case fsmOpenMsgReceived:
return "open-msg-received"
case fsmOpenMsgNegotiated:
return "open-msg-negotiated"
case fsmHardReset:
return "hard-reset"
default:
return "unknown"
}
}
type fsmMsgType int
const (
_ fsmMsgType = iota
fsmMsgStateChange
fsmMsgBGPMessage
fsmMsgRouteRefresh
)
type fsmMsg struct {
MsgType fsmMsgType
fsm *fsm
MsgSrc string
MsgData interface{}
StateReason *fsmStateReason
PathList []*table.Path
timestamp time.Time
payload []byte
}
type fsmOutgoingMsg struct {
Paths []*table.Path
Notification *bgp.BGPMessage
StayIdle bool
}
const (
holdtimeOpensent = 240
holdtimeIdle = 5
)
type adminState int
const (
adminStateUp adminState = iota
adminStateDown
adminStatePfxCt
)
func (s adminState) String() string {
switch s {
case adminStateUp:
return "adminStateUp"
case adminStateDown:
return "adminStateDown"
case adminStatePfxCt:
return "adminStatePfxCt"
default:
return "Unknown"
}
}
type adminStateOperation struct {
State adminState
Communication []byte
}
type fsm struct {
gConf *config.Global
pConf *config.Neighbor
lock sync.RWMutex
state bgp.FSMState
outgoingCh *channels.InfiniteChannel
incomingCh *channels.InfiniteChannel
reason *fsmStateReason
conn net.Conn
connCh chan net.Conn
idleHoldTime float64
opensentHoldTime float64
adminState adminState
adminStateCh chan adminStateOperation
h *fsmHandler
rfMap map[bgp.RouteFamily]bgp.BGPAddPathMode
capMap map[bgp.BGPCapabilityCode][]bgp.ParameterCapabilityInterface
recvOpen *bgp.BGPMessage
peerInfo *table.PeerInfo
gracefulRestartTimer *time.Timer
twoByteAsTrans bool
marshallingOptions *bgp.MarshallingOption
notification chan *bgp.BGPMessage
}
func (fsm *fsm) bgpMessageStateUpdate(MessageType uint8, isIn bool) {
fsm.lock.Lock()
defer fsm.lock.Unlock()
state := &fsm.pConf.State.Messages
timer := &fsm.pConf.Timers
if isIn {
state.Received.Total++
} else {
state.Sent.Total++
}
switch MessageType {
case bgp.BGP_MSG_OPEN:
if isIn {
state.Received.Open++
} else {
state.Sent.Open++
}
case bgp.BGP_MSG_UPDATE:
if isIn {
state.Received.Update++
timer.State.UpdateRecvTime = time.Now().Unix()
} else {
state.Sent.Update++
}
case bgp.BGP_MSG_NOTIFICATION:
if isIn {
state.Received.Notification++
} else {
state.Sent.Notification++
}
case bgp.BGP_MSG_KEEPALIVE:
if isIn {
state.Received.Keepalive++
} else {
state.Sent.Keepalive++
}
case bgp.BGP_MSG_ROUTE_REFRESH:
if isIn {
state.Received.Refresh++
} else {
state.Sent.Refresh++
}
default:
if isIn {
state.Received.Discarded++
} else {
state.Sent.Discarded++
}
}
}
func (fsm *fsm) bmpStatsUpdate(statType uint16, increment int) {
fsm.lock.Lock()
defer fsm.lock.Unlock()
stats := &fsm.pConf.State.Messages.Received
switch statType {
// TODO
// Support other stat types.
case bmp.BMP_STAT_TYPE_WITHDRAW_UPDATE:
stats.WithdrawUpdate += uint32(increment)
case bmp.BMP_STAT_TYPE_WITHDRAW_PREFIX:
stats.WithdrawPrefix += uint32(increment)
}
}
func newFSM(gConf *config.Global, pConf *config.Neighbor) *fsm {
adminState := adminStateUp
if pConf.Config.AdminDown {
adminState = adminStateDown
}
pConf.State.SessionState = config.IntToSessionStateMap[int(bgp.BGP_FSM_IDLE)]
pConf.Timers.State.Downtime = time.Now().Unix()
fsm := &fsm{
gConf: gConf,
pConf: pConf,
state: bgp.BGP_FSM_IDLE,
outgoingCh: channels.NewInfiniteChannel(),
incomingCh: channels.NewInfiniteChannel(),
connCh: make(chan net.Conn, 1),
opensentHoldTime: float64(holdtimeOpensent),
adminState: adminState,
adminStateCh: make(chan adminStateOperation, 1),
rfMap: make(map[bgp.RouteFamily]bgp.BGPAddPathMode),
capMap: make(map[bgp.BGPCapabilityCode][]bgp.ParameterCapabilityInterface),
peerInfo: table.NewPeerInfo(gConf, pConf),
gracefulRestartTimer: time.NewTimer(time.Hour),
notification: make(chan *bgp.BGPMessage, 1),
}
fsm.gracefulRestartTimer.Stop()
return fsm
}
func (fsm *fsm) StateChange(nextState bgp.FSMState) {
fsm.lock.Lock()
defer fsm.lock.Unlock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"old": fsm.state.String(),
"new": nextState.String(),
"reason": fsm.reason,
}).Debug("state changed")
fsm.state = nextState
switch nextState {
case bgp.BGP_FSM_ESTABLISHED:
fsm.pConf.Timers.State.Uptime = time.Now().Unix()
fsm.pConf.State.EstablishedCount++
// reset the state set by the previous session
fsm.twoByteAsTrans = false
if _, y := fsm.capMap[bgp.BGP_CAP_FOUR_OCTET_AS_NUMBER]; !y {
fsm.twoByteAsTrans = true
break
}
y := func() bool {
for _, c := range capabilitiesFromConfig(fsm.pConf) {
switch c.(type) {
case *bgp.CapFourOctetASNumber:
return true
}
}
return false
}()
if !y {
fsm.twoByteAsTrans = true
}
default:
fsm.pConf.Timers.State.Downtime = time.Now().Unix()
}
}
func hostport(addr net.Addr) (string, uint16) {
if addr != nil {
host, port, err := net.SplitHostPort(addr.String())
if err != nil {
return "", 0
}
p, _ := strconv.ParseUint(port, 10, 16)
return host, uint16(p)
}
return "", 0
}
func (fsm *fsm) RemoteHostPort() (string, uint16) {
return hostport(fsm.conn.RemoteAddr())
}
func (fsm *fsm) LocalHostPort() (string, uint16) {
return hostport(fsm.conn.LocalAddr())
}
func (fsm *fsm) sendNotificationFromErrorMsg(e *bgp.MessageError) (*bgp.BGPMessage, error) {
fsm.lock.RLock()
established := fsm.h != nil && fsm.h.conn != nil
fsm.lock.RUnlock()
if established {
m := bgp.NewBGPNotificationMessage(e.TypeCode, e.SubTypeCode, e.Data)
b, _ := m.Serialize()
_, err := fsm.h.conn.Write(b)
if err == nil {
fsm.bgpMessageStateUpdate(m.Header.Type, false)
fsm.h.sentNotification = m
}
fsm.h.conn.Close()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"Data": e,
}).Warn("sent notification")
return m, nil
}
return nil, fmt.Errorf("can't send notification to %s since TCP connection is not established", fsm.pConf.State.NeighborAddress)
}
func (fsm *fsm) sendNotification(code, subType uint8, data []byte, msg string) (*bgp.BGPMessage, error) {
e := bgp.NewMessageError(code, subType, data, msg)
return fsm.sendNotificationFromErrorMsg(e.(*bgp.MessageError))
}
type fsmHandler struct {
fsm *fsm
conn net.Conn
msgCh *channels.InfiniteChannel
stateReasonCh chan fsmStateReason
incoming *channels.InfiniteChannel
outgoing *channels.InfiniteChannel
holdTimerResetCh chan bool
sentNotification *bgp.BGPMessage
ctx context.Context
ctxCancel context.CancelFunc
wg *sync.WaitGroup
}
func newFSMHandler(fsm *fsm, outgoing *channels.InfiniteChannel) *fsmHandler {
ctx, cancel := context.WithCancel(context.Background())
h := &fsmHandler{
fsm: fsm,
stateReasonCh: make(chan fsmStateReason, 2),
incoming: fsm.incomingCh,
outgoing: outgoing,
holdTimerResetCh: make(chan bool, 2),
wg: &sync.WaitGroup{},
ctx: ctx,
ctxCancel: cancel,
}
h.wg.Add(1)
go h.loop(ctx, h.wg)
return h
}
func (h *fsmHandler) idle(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
fsm := h.fsm
fsm.lock.RLock()
idleHoldTimer := time.NewTimer(time.Second * time.Duration(fsm.idleHoldTime))
fsm.lock.RUnlock()
for {
select {
case <-ctx.Done():
return -1, newfsmStateReason(fsmDying, nil, nil)
case <-fsm.gracefulRestartTimer.C:
fsm.lock.RLock()
restarting := fsm.pConf.GracefulRestart.State.PeerRestarting
fsm.lock.RUnlock()
if restarting {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("graceful restart timer expired")
fsm.lock.RUnlock()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmRestartTimerExpired, nil, nil)
}
case conn, ok := <-fsm.connCh:
if !ok {
break
}
conn.Close()
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("Closed an accepted connection")
fsm.lock.RUnlock()
case <-idleHoldTimer.C:
fsm.lock.RLock()
adminStateUp := fsm.adminState == adminStateUp
fsm.lock.RUnlock()
if adminStateUp {
fsm.lock.Lock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"Duration": fsm.idleHoldTime,
}).Debug("IdleHoldTimer expired")
fsm.idleHoldTime = holdtimeIdle
fsm.lock.Unlock()
return bgp.BGP_FSM_ACTIVE, newfsmStateReason(fsmIdleTimerExpired, nil, nil)
} else {
log.WithFields(log.Fields{"Topic": "Peer"}).Debug("IdleHoldTimer expired, but stay at idle because the admin state is DOWN")
}
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
// stop idle hold timer
idleHoldTimer.Stop()
case adminStateUp:
// restart idle hold timer
fsm.lock.RLock()
idleHoldTimer.Reset(time.Second * time.Duration(fsm.idleHoldTime))
fsm.lock.RUnlock()
}
}
}
}
}
func (h *fsmHandler) connectLoop(ctx context.Context, wg *sync.WaitGroup) {
defer wg.Done()
fsm := h.fsm
retry, addr, port, password, ttl, ttlMin, localAddress, bindInterface := func() (int, string, int, string, uint8, uint8, string, string) {
fsm.lock.RLock()
defer fsm.lock.RUnlock()
tick := int(fsm.pConf.Timers.Config.ConnectRetry)
if tick < minConnectRetryInterval {
tick = minConnectRetryInterval
}
addr := fsm.pConf.State.NeighborAddress
port := int(bgp.BGP_PORT)
if fsm.pConf.Transport.Config.RemotePort != 0 {
port = int(fsm.pConf.Transport.Config.RemotePort)
}
password := fsm.pConf.Config.AuthPassword
ttl := uint8(0)
ttlMin := uint8(0)
if fsm.pConf.TtlSecurity.Config.Enabled {
ttl = 255
ttlMin = fsm.pConf.TtlSecurity.Config.TtlMin
} else if fsm.pConf.Config.PeerAs != 0 && fsm.pConf.Config.PeerType == config.PEER_TYPE_EXTERNAL {
ttl = 1
if fsm.pConf.EbgpMultihop.Config.Enabled {
ttl = fsm.pConf.EbgpMultihop.Config.MultihopTtl
}
}
return tick, addr, port, password, ttl, ttlMin, fsm.pConf.Transport.Config.LocalAddress, fsm.pConf.Transport.Config.BindInterface
}()
tick := minConnectRetryInterval
for {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
timer := time.NewTimer(time.Duration(r.Intn(tick)+tick) * time.Second)
select {
case <-ctx.Done():
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Debug("stop connect loop")
timer.Stop()
return
case <-timer.C:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Debug("try to connect")
}
laddr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(localAddress, "0"))
if err != nil {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Warnf("failed to resolve local address: %s", err)
}
if err == nil {
d := net.Dialer{
LocalAddr: laddr,
Timeout: time.Duration(tick-1) * time.Second,
Control: func(network, address string, c syscall.RawConn) error {
return dialerControl(network, address, c, ttl, ttlMin, password, bindInterface)
},
}
conn, err := d.DialContext(ctx, "tcp", net.JoinHostPort(addr, strconv.Itoa(port)))
select {
case <-ctx.Done():
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Debug("stop connect loop")
return
default:
}
if err == nil {
select {
case fsm.connCh <- conn:
return
default:
conn.Close()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Warn("active conn is closed to avoid being blocked")
}
} else {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": addr,
}).Debugf("failed to connect: %s", err)
}
}
tick = retry
}
}
func (h *fsmHandler) active(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
c, cancel := context.WithCancel(ctx)
fsm := h.fsm
var wg sync.WaitGroup
fsm.lock.RLock()
tryConnect := !fsm.pConf.Transport.Config.PassiveMode
fsm.lock.RUnlock()
if tryConnect {
wg.Add(1)
go h.connectLoop(c, &wg)
}
defer func() {
cancel()
wg.Wait()
}()
for {
select {
case <-ctx.Done():
return -1, newfsmStateReason(fsmDying, nil, nil)
case conn, ok := <-fsm.connCh:
if !ok {
break
}
fsm.lock.Lock()
fsm.conn = conn
fsm.lock.Unlock()
ttl := 0
ttlMin := 0
fsm.lock.RLock()
if fsm.pConf.TtlSecurity.Config.Enabled {
ttl = 255
ttlMin = int(fsm.pConf.TtlSecurity.Config.TtlMin)
} else if fsm.pConf.Config.PeerAs != 0 && fsm.pConf.Config.PeerType == config.PEER_TYPE_EXTERNAL {
if fsm.pConf.EbgpMultihop.Config.Enabled {
ttl = int(fsm.pConf.EbgpMultihop.Config.MultihopTtl)
} else if fsm.pConf.Transport.Config.Ttl != 0 {
ttl = int(fsm.pConf.Transport.Config.Ttl)
} else {
ttl = 1
}
} else if fsm.pConf.Transport.Config.Ttl != 0 {
ttl = int(fsm.pConf.Transport.Config.Ttl)
}
if ttl != 0 {
if err := setTCPTTLSockopt(conn.(*net.TCPConn), ttl); err != nil {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.Config.NeighborAddress,
"State": fsm.state.String(),
}).Warnf("cannot set TTL(=%d) for peer: %s", ttl, err)
}
}
if ttlMin != 0 {
if err := setTCPMinTTLSockopt(conn.(*net.TCPConn), ttlMin); err != nil {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.Config.NeighborAddress,
"State": fsm.state.String(),
}).Warnf("cannot set minimal TTL(=%d) for peer: %s", ttl, err)
}
}
fsm.lock.RUnlock()
// we don't implement delayed open timer so move to opensent right
// away.
return bgp.BGP_FSM_OPENSENT, newfsmStateReason(fsmNewConnection, nil, nil)
case <-fsm.gracefulRestartTimer.C:
fsm.lock.RLock()
restarting := fsm.pConf.GracefulRestart.State.PeerRestarting
fsm.lock.RUnlock()
if restarting {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("graceful restart timer expired")
fsm.lock.RUnlock()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmRestartTimerExpired, nil, nil)
}
case err := <-h.stateReasonCh:
return bgp.BGP_FSM_IDLE, &err
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmAdminDown, nil, nil)
case adminStateUp:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"adminState": stateOp.State.String(),
}).Panic("code logic bug")
}
}
}
}
}
func capAddPathFromConfig(pConf *config.Neighbor) bgp.ParameterCapabilityInterface {
tuples := make([]*bgp.CapAddPathTuple, 0, len(pConf.AfiSafis))
for _, af := range pConf.AfiSafis {
var mode bgp.BGPAddPathMode
if af.AddPaths.State.Receive {
mode |= bgp.BGP_ADD_PATH_RECEIVE
}
if af.AddPaths.State.SendMax > 0 {
mode |= bgp.BGP_ADD_PATH_SEND
}
if mode > 0 {
tuples = append(tuples, bgp.NewCapAddPathTuple(af.State.Family, mode))
}
}
if len(tuples) == 0 {
return nil
}
return bgp.NewCapAddPath(tuples)
}
func capabilitiesFromConfig(pConf *config.Neighbor) []bgp.ParameterCapabilityInterface {
caps := make([]bgp.ParameterCapabilityInterface, 0, 4)
caps = append(caps, bgp.NewCapRouteRefresh())
for _, af := range pConf.AfiSafis {
caps = append(caps, bgp.NewCapMultiProtocol(af.State.Family))
}
caps = append(caps, bgp.NewCapFourOctetASNumber(pConf.Config.LocalAs))
if c := pConf.GracefulRestart.Config; c.Enabled {
tuples := []*bgp.CapGracefulRestartTuple{}
ltuples := []*bgp.CapLongLivedGracefulRestartTuple{}
// RFC 4724 4.1
// To re-establish the session with its peer, the Restarting Speaker
// MUST set the "Restart State" bit in the Graceful Restart Capability
// of the OPEN message.
restarting := pConf.GracefulRestart.State.LocalRestarting
if !c.HelperOnly {
for i, rf := range pConf.AfiSafis {
if m := rf.MpGracefulRestart.Config; m.Enabled {
// When restarting, always flag forwaring bit.
// This can be a lie, depending on how gobgpd is used.
// For a route-server use-case, since a route-server
// itself doesn't forward packets, and the dataplane
// is a l2 switch which continues to work with no
// relation to bgpd, this behavior is ok.
// TODO consideration of other use-cases
tuples = append(tuples, bgp.NewCapGracefulRestartTuple(rf.State.Family, restarting))
pConf.AfiSafis[i].MpGracefulRestart.State.Advertised = true
}
if m := rf.LongLivedGracefulRestart.Config; m.Enabled {
ltuples = append(ltuples, bgp.NewCapLongLivedGracefulRestartTuple(rf.State.Family, restarting, m.RestartTime))
}
}
}
restartTime := c.RestartTime
notification := c.NotificationEnabled
caps = append(caps, bgp.NewCapGracefulRestart(restarting, notification, restartTime, tuples))
if c.LongLivedEnabled {
caps = append(caps, bgp.NewCapLongLivedGracefulRestart(ltuples))
}
}
// Extended Nexthop Capability (Code 5)
tuples := []*bgp.CapExtendedNexthopTuple{}
families, _ := config.AfiSafis(pConf.AfiSafis).ToRfList()
for _, family := range families {
if family == bgp.RF_IPv6_UC {
continue
}
tuple := bgp.NewCapExtendedNexthopTuple(family, bgp.AFI_IP6)
tuples = append(tuples, tuple)
}
if len(tuples) != 0 {
caps = append(caps, bgp.NewCapExtendedNexthop(tuples))
}
// ADD-PATH Capability
if c := capAddPathFromConfig(pConf); c != nil {
caps = append(caps, capAddPathFromConfig(pConf))
}
return caps
}
func buildopen(gConf *config.Global, pConf *config.Neighbor) *bgp.BGPMessage {
caps := capabilitiesFromConfig(pConf)
opt := bgp.NewOptionParameterCapability(caps)
holdTime := uint16(pConf.Timers.Config.HoldTime)
as := pConf.Config.LocalAs
if as > (1<<16)-1 {
as = bgp.AS_TRANS
}
return bgp.NewBGPOpenMessage(uint16(as), holdTime, gConf.Config.RouterId,
[]bgp.OptionParameterInterface{opt})
}
func readAll(conn net.Conn, length int) ([]byte, error) {
buf := make([]byte, length)
_, err := io.ReadFull(conn, buf)
if err != nil {
return nil, err
}
return buf, nil
}
func getPathAttrFromBGPUpdate(m *bgp.BGPUpdate, typ bgp.BGPAttrType) bgp.PathAttributeInterface {
for _, a := range m.PathAttributes {
if a.GetType() == typ {
return a
}
}
return nil
}
func hasOwnASLoop(ownAS uint32, limit int, asPath *bgp.PathAttributeAsPath) bool {
cnt := 0
for _, param := range asPath.Value {
for _, as := range param.GetAS() {
if as == ownAS {
cnt++
if cnt > limit {
return true
}
}
}
}
return false
}
func extractRouteFamily(p *bgp.PathAttributeInterface) *bgp.RouteFamily {
attr := *p
var afi uint16
var safi uint8
switch a := attr.(type) {
case *bgp.PathAttributeMpReachNLRI:
afi = a.AFI
safi = a.SAFI
case *bgp.PathAttributeMpUnreachNLRI:
afi = a.AFI
safi = a.SAFI
default:
return nil
}
rf := bgp.AfiSafiToRouteFamily(afi, safi)
return &rf
}
func (h *fsmHandler) afiSafiDisable(rf bgp.RouteFamily) string {
h.fsm.lock.Lock()
defer h.fsm.lock.Unlock()
n := bgp.AddressFamilyNameMap[rf]
for i, a := range h.fsm.pConf.AfiSafis {
if string(a.Config.AfiSafiName) == n {
h.fsm.pConf.AfiSafis[i].State.Enabled = false
break
}
}
newList := make([]bgp.ParameterCapabilityInterface, 0)
for _, c := range h.fsm.capMap[bgp.BGP_CAP_MULTIPROTOCOL] {
if c.(*bgp.CapMultiProtocol).CapValue == rf {
continue
}
newList = append(newList, c)
}
h.fsm.capMap[bgp.BGP_CAP_MULTIPROTOCOL] = newList
return n
}
func (h *fsmHandler) handlingError(m *bgp.BGPMessage, e error, useRevisedError bool) bgp.ErrorHandling {
handling := bgp.ERROR_HANDLING_NONE
if m.Header.Type == bgp.BGP_MSG_UPDATE && useRevisedError {
factor := e.(*bgp.MessageError)
handling = factor.ErrorHandling
switch handling {
case bgp.ERROR_HANDLING_ATTRIBUTE_DISCARD:
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": e,
}).Warn("Some attributes were discarded")
h.fsm.lock.RUnlock()
case bgp.ERROR_HANDLING_TREAT_AS_WITHDRAW:
m.Body = bgp.TreatAsWithdraw(m.Body.(*bgp.BGPUpdate))
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": e,
}).Warn("the received Update message was treated as withdraw")
h.fsm.lock.RUnlock()
case bgp.ERROR_HANDLING_AFISAFI_DISABLE:
rf := extractRouteFamily(factor.ErrorAttribute)
if rf == nil {
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
}).Warn("Error occurred during AFI/SAFI disabling")
h.fsm.lock.RUnlock()
} else {
n := h.afiSafiDisable(*rf)
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": e,
}).Warnf("Capability %s was disabled", n)
h.fsm.lock.RUnlock()
}
}
} else {
handling = bgp.ERROR_HANDLING_SESSION_RESET
}
return handling
}
func (h *fsmHandler) recvMessageWithError() (*fsmMsg, error) {
sendToStateReasonCh := func(typ fsmStateReasonType, notif *bgp.BGPMessage) {
// probably doesn't happen but be cautious
select {
case h.stateReasonCh <- *newfsmStateReason(typ, notif, nil):
default:
}
}
headerBuf, err := readAll(h.conn, bgp.BGP_HEADER_LENGTH)
if err != nil {
sendToStateReasonCh(fsmReadFailed, nil)
return nil, err
}
hd := &bgp.BGPHeader{}
err = hd.DecodeFromBytes(headerBuf)
if err != nil {
h.fsm.bgpMessageStateUpdate(0, true)
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": err,
}).Warn("Session will be reset due to malformed BGP Header")
fmsg := &fsmMsg{
fsm: h.fsm,
MsgType: fsmMsgBGPMessage,
MsgSrc: h.fsm.pConf.State.NeighborAddress,
MsgData: err,
}
h.fsm.lock.RUnlock()
return fmsg, err
}
bodyBuf, err := readAll(h.conn, int(hd.Len)-bgp.BGP_HEADER_LENGTH)
if err != nil {
sendToStateReasonCh(fsmReadFailed, nil)
return nil, err
}
now := time.Now()
handling := bgp.ERROR_HANDLING_NONE
h.fsm.lock.RLock()
useRevisedError := h.fsm.pConf.ErrorHandling.Config.TreatAsWithdraw
options := h.fsm.marshallingOptions
h.fsm.lock.RUnlock()
m, err := bgp.ParseBGPBody(hd, bodyBuf, options)
if err != nil {
handling = h.handlingError(m, err, useRevisedError)
h.fsm.bgpMessageStateUpdate(0, true)
} else {
h.fsm.bgpMessageStateUpdate(m.Header.Type, true)
err = bgp.ValidateBGPMessage(m)
}
h.fsm.lock.RLock()
fmsg := &fsmMsg{
fsm: h.fsm,
MsgType: fsmMsgBGPMessage,
MsgSrc: h.fsm.pConf.State.NeighborAddress,
timestamp: now,
}
h.fsm.lock.RUnlock()
switch handling {
case bgp.ERROR_HANDLING_AFISAFI_DISABLE:
fmsg.MsgData = m
return fmsg, nil
case bgp.ERROR_HANDLING_SESSION_RESET:
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": err,
}).Warn("Session will be reset due to malformed BGP message")
h.fsm.lock.RUnlock()
fmsg.MsgData = err
return fmsg, err
default:
fmsg.MsgData = m
h.fsm.lock.RLock()
establishedState := h.fsm.state == bgp.BGP_FSM_ESTABLISHED
h.fsm.lock.RUnlock()
if establishedState {
switch m.Header.Type {
case bgp.BGP_MSG_ROUTE_REFRESH:
fmsg.MsgType = fsmMsgRouteRefresh
case bgp.BGP_MSG_UPDATE:
body := m.Body.(*bgp.BGPUpdate)
isEBGP := h.fsm.pConf.IsEBGPPeer(h.fsm.gConf)
isConfed := h.fsm.pConf.IsConfederationMember(h.fsm.gConf)
fmsg.payload = make([]byte, len(headerBuf)+len(bodyBuf))
copy(fmsg.payload, headerBuf)
copy(fmsg.payload[len(headerBuf):], bodyBuf)
h.fsm.lock.RLock()
rfMap := h.fsm.rfMap
h.fsm.lock.RUnlock()
ok, err := bgp.ValidateUpdateMsg(body, rfMap, isEBGP, isConfed)
if !ok {
handling = h.handlingError(m, err, useRevisedError)
}
if handling == bgp.ERROR_HANDLING_SESSION_RESET {
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"State": h.fsm.state.String(),
"error": err,
}).Warn("Session will be reset due to malformed BGP update message")
h.fsm.lock.RUnlock()
fmsg.MsgData = err
return fmsg, err
}
if routes := len(body.WithdrawnRoutes); routes > 0 {
h.fsm.bmpStatsUpdate(bmp.BMP_STAT_TYPE_WITHDRAW_UPDATE, 1)
h.fsm.bmpStatsUpdate(bmp.BMP_STAT_TYPE_WITHDRAW_PREFIX, routes)
} else if attr := getPathAttrFromBGPUpdate(body, bgp.BGP_ATTR_TYPE_MP_UNREACH_NLRI); attr != nil {
mpUnreach := attr.(*bgp.PathAttributeMpUnreachNLRI)
if routes = len(mpUnreach.Value); routes > 0 {
h.fsm.bmpStatsUpdate(bmp.BMP_STAT_TYPE_WITHDRAW_UPDATE, 1)
h.fsm.bmpStatsUpdate(bmp.BMP_STAT_TYPE_WITHDRAW_PREFIX, routes)
}
}
table.UpdatePathAttrs4ByteAs(body)
if err = table.UpdatePathAggregator4ByteAs(body); err != nil {
fmsg.MsgData = err
return fmsg, err
}
h.fsm.lock.RLock()
peerInfo := h.fsm.peerInfo
h.fsm.lock.RUnlock()
fmsg.PathList = table.ProcessMessage(m, peerInfo, fmsg.timestamp)
fallthrough
case bgp.BGP_MSG_KEEPALIVE:
// if the length of h.holdTimerResetCh
// isn't zero, the timer will be reset
// soon anyway.
select {
case h.holdTimerResetCh <- true:
default:
}
if m.Header.Type == bgp.BGP_MSG_KEEPALIVE {
return nil, nil
}
case bgp.BGP_MSG_NOTIFICATION:
body := m.Body.(*bgp.BGPNotification)
if body.ErrorCode == bgp.BGP_ERROR_CEASE && (body.ErrorSubcode == bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN || body.ErrorSubcode == bgp.BGP_ERROR_SUB_ADMINISTRATIVE_RESET) {
communication, rest := decodeAdministrativeCommunication(body.Data)
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"Code": body.ErrorCode,
"Subcode": body.ErrorSubcode,
"Communicated-Reason": communication,
"Data": rest,
}).Warn("received notification")
h.fsm.lock.RUnlock()
} else {
h.fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": h.fsm.pConf.State.NeighborAddress,
"Code": body.ErrorCode,
"Subcode": body.ErrorSubcode,
"Data": body.Data,
}).Warn("received notification")
h.fsm.lock.RUnlock()
}
h.fsm.lock.RLock()
s := h.fsm.pConf.GracefulRestart.State
hardReset := s.Enabled && s.NotificationEnabled && body.ErrorCode == bgp.BGP_ERROR_CEASE && body.ErrorSubcode == bgp.BGP_ERROR_SUB_HARD_RESET
h.fsm.lock.RUnlock()
if hardReset {
sendToStateReasonCh(fsmHardReset, m)
} else {
sendToStateReasonCh(fsmNotificationRecv, m)
}
return nil, nil
}
}
}
return fmsg, nil
}
func (h *fsmHandler) recvMessage(ctx context.Context, wg *sync.WaitGroup) error {
defer func() {
h.msgCh.Close()
wg.Done()
}()
fmsg, _ := h.recvMessageWithError()
if fmsg != nil {
h.msgCh.In() <- fmsg
}
return nil
}
func open2Cap(open *bgp.BGPOpen, n *config.Neighbor) (map[bgp.BGPCapabilityCode][]bgp.ParameterCapabilityInterface, map[bgp.RouteFamily]bgp.BGPAddPathMode) {
capMap := make(map[bgp.BGPCapabilityCode][]bgp.ParameterCapabilityInterface)
for _, p := range open.OptParams {
if paramCap, y := p.(*bgp.OptionParameterCapability); y {
for _, c := range paramCap.Capability {
m, ok := capMap[c.Code()]
if !ok {
m = make([]bgp.ParameterCapabilityInterface, 0, 1)
}
capMap[c.Code()] = append(m, c)
}
}
}
// squash add path cap
if caps, y := capMap[bgp.BGP_CAP_ADD_PATH]; y {
items := make([]*bgp.CapAddPathTuple, 0, len(caps))
for _, c := range caps {
items = append(items, c.(*bgp.CapAddPath).Tuples...)
}
capMap[bgp.BGP_CAP_ADD_PATH] = []bgp.ParameterCapabilityInterface{bgp.NewCapAddPath(items)}
}
// remote open message may not include multi-protocol capability
if _, y := capMap[bgp.BGP_CAP_MULTIPROTOCOL]; !y {
capMap[bgp.BGP_CAP_MULTIPROTOCOL] = []bgp.ParameterCapabilityInterface{bgp.NewCapMultiProtocol(bgp.RF_IPv4_UC)}
}
local := n.CreateRfMap()
remote := make(map[bgp.RouteFamily]bgp.BGPAddPathMode)
for _, c := range capMap[bgp.BGP_CAP_MULTIPROTOCOL] {
family := c.(*bgp.CapMultiProtocol).CapValue
remote[family] = bgp.BGP_ADD_PATH_NONE
for _, a := range capMap[bgp.BGP_CAP_ADD_PATH] {
for _, i := range a.(*bgp.CapAddPath).Tuples {
if i.RouteFamily == family {
remote[family] = i.Mode
}
}
}
}
negotiated := make(map[bgp.RouteFamily]bgp.BGPAddPathMode)
for family, mode := range local {
if m, y := remote[family]; y {
n := bgp.BGP_ADD_PATH_NONE
if mode&bgp.BGP_ADD_PATH_SEND > 0 && m&bgp.BGP_ADD_PATH_RECEIVE > 0 {
n |= bgp.BGP_ADD_PATH_SEND
}
if mode&bgp.BGP_ADD_PATH_RECEIVE > 0 && m&bgp.BGP_ADD_PATH_SEND > 0 {
n |= bgp.BGP_ADD_PATH_RECEIVE
}
negotiated[family] = n
}
}
return capMap, negotiated
}
func (h *fsmHandler) opensent(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
fsm := h.fsm
fsm.lock.RLock()
m := buildopen(fsm.gConf, fsm.pConf)
fsm.lock.RUnlock()
b, _ := m.Serialize()
fsm.conn.Write(b)
fsm.bgpMessageStateUpdate(m.Header.Type, false)
h.msgCh = channels.NewInfiniteChannel()
fsm.lock.RLock()
h.conn = fsm.conn
fsm.lock.RUnlock()
var wg sync.WaitGroup
wg.Add(1)
defer wg.Wait()
go h.recvMessage(ctx, &wg)
// RFC 4271 P.60
// sets its HoldTimer to a large value
// A HoldTimer value of 4 minutes is suggested as a "large value"
// for the HoldTimer
fsm.lock.RLock()
holdTimer := time.NewTimer(time.Second * time.Duration(fsm.opensentHoldTime))
fsm.lock.RUnlock()
for {
select {
case <-ctx.Done():
h.conn.Close()
return -1, newfsmStateReason(fsmDying, nil, nil)
case conn, ok := <-fsm.connCh:
if !ok {
break
}
conn.Close()
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("Closed an accepted connection")
fsm.lock.RUnlock()
case <-fsm.gracefulRestartTimer.C:
fsm.lock.RLock()
restarting := fsm.pConf.GracefulRestart.State.PeerRestarting
fsm.lock.RUnlock()
if restarting {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("graceful restart timer expired")
fsm.lock.RUnlock()
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmRestartTimerExpired, nil, nil)
}
case i, ok := <-h.msgCh.Out():
if !ok {
continue
}
e := i.(*fsmMsg)
switch m := e.MsgData.(type) {
case *bgp.BGPMessage:
if m.Header.Type == bgp.BGP_MSG_OPEN {
fsm.lock.Lock()
fsm.recvOpen = m
fsm.lock.Unlock()
body := m.Body.(*bgp.BGPOpen)
fsm.lock.RLock()
fsmPeerAS := fsm.pConf.Config.PeerAs
fsm.lock.RUnlock()
peerAs, err := bgp.ValidateOpenMsg(body, fsmPeerAS)
if err != nil {
m, _ := fsm.sendNotificationFromErrorMsg(err.(*bgp.MessageError))
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, m, nil)
}
// ASN negotiation was skipped
fsm.lock.RLock()
asnNegotiationSkipped := fsm.pConf.Config.PeerAs == 0
fsm.lock.RUnlock()
if asnNegotiationSkipped {
fsm.lock.Lock()
typ := config.PEER_TYPE_EXTERNAL
if fsm.peerInfo.LocalAS == peerAs {
typ = config.PEER_TYPE_INTERNAL
}
fsm.pConf.State.PeerType = typ
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Infof("skipped asn negotiation: peer-as: %d, peer-type: %s", peerAs, typ)
fsm.lock.Unlock()
} else {
fsm.lock.Lock()
fsm.pConf.State.PeerType = fsm.pConf.Config.PeerType
fsm.lock.Unlock()
}
fsm.lock.Lock()
fsm.pConf.State.PeerAs = peerAs
fsm.peerInfo.AS = peerAs
fsm.peerInfo.ID = body.ID
fsm.capMap, fsm.rfMap = open2Cap(body, fsm.pConf)
if _, y := fsm.capMap[bgp.BGP_CAP_ADD_PATH]; y {
fsm.marshallingOptions = &bgp.MarshallingOption{
AddPath: fsm.rfMap,
}
} else {
fsm.marshallingOptions = nil
}
// calculate HoldTime
// RFC 4271 P.13
// a BGP speaker MUST calculate the value of the Hold Timer
// by using the smaller of its configured Hold Time and the Hold Time
// received in the OPEN message.
holdTime := float64(body.HoldTime)
myHoldTime := fsm.pConf.Timers.Config.HoldTime
if holdTime > myHoldTime {
fsm.pConf.Timers.State.NegotiatedHoldTime = myHoldTime
} else {
fsm.pConf.Timers.State.NegotiatedHoldTime = holdTime
}
keepalive := fsm.pConf.Timers.Config.KeepaliveInterval
if n := fsm.pConf.Timers.State.NegotiatedHoldTime; n < myHoldTime {
keepalive = n / 3
}
fsm.pConf.Timers.State.KeepaliveInterval = keepalive
gr, ok := fsm.capMap[bgp.BGP_CAP_GRACEFUL_RESTART]
if fsm.pConf.GracefulRestart.Config.Enabled && ok {
state := &fsm.pConf.GracefulRestart.State
state.Enabled = true
cap := gr[len(gr)-1].(*bgp.CapGracefulRestart)
state.PeerRestartTime = uint16(cap.Time)
for _, t := range cap.Tuples {
n := bgp.AddressFamilyNameMap[bgp.AfiSafiToRouteFamily(t.AFI, t.SAFI)]
for i, a := range fsm.pConf.AfiSafis {
if string(a.Config.AfiSafiName) == n {
fsm.pConf.AfiSafis[i].MpGracefulRestart.State.Enabled = true
fsm.pConf.AfiSafis[i].MpGracefulRestart.State.Received = true
break
}
}
}
// RFC 4724 4.1
// To re-establish the session with its peer, the Restarting Speaker
// MUST set the "Restart State" bit in the Graceful Restart Capability
// of the OPEN message.
if fsm.pConf.GracefulRestart.State.PeerRestarting && cap.Flags&0x08 == 0 {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("restart flag is not set")
// just ignore
}
// RFC 4724 3
// The most significant bit is defined as the Restart State (R)
// bit, ...(snip)... When set (value 1), this bit
// indicates that the BGP speaker has restarted, and its peer MUST
// NOT wait for the End-of-RIB marker from the speaker before
// advertising routing information to the speaker.
if fsm.pConf.GracefulRestart.State.LocalRestarting && cap.Flags&0x08 != 0 {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Debug("peer has restarted, skipping wait for EOR")
for i := range fsm.pConf.AfiSafis {
fsm.pConf.AfiSafis[i].MpGracefulRestart.State.EndOfRibReceived = true
}
}
if fsm.pConf.GracefulRestart.Config.NotificationEnabled && cap.Flags&0x04 > 0 {
fsm.pConf.GracefulRestart.State.NotificationEnabled = true
}
}
llgr, ok2 := fsm.capMap[bgp.BGP_CAP_LONG_LIVED_GRACEFUL_RESTART]
if fsm.pConf.GracefulRestart.Config.LongLivedEnabled && ok && ok2 {
fsm.pConf.GracefulRestart.State.LongLivedEnabled = true
cap := llgr[len(llgr)-1].(*bgp.CapLongLivedGracefulRestart)
for _, t := range cap.Tuples {
n := bgp.AddressFamilyNameMap[bgp.AfiSafiToRouteFamily(t.AFI, t.SAFI)]
for i, a := range fsm.pConf.AfiSafis {
if string(a.Config.AfiSafiName) == n {
fsm.pConf.AfiSafis[i].LongLivedGracefulRestart.State.Enabled = true
fsm.pConf.AfiSafis[i].LongLivedGracefulRestart.State.Received = true
fsm.pConf.AfiSafis[i].LongLivedGracefulRestart.State.PeerRestartTime = t.RestartTime
break
}
}
}
}
fsm.lock.Unlock()
msg := bgp.NewBGPKeepAliveMessage()
b, _ := msg.Serialize()
fsm.conn.Write(b)
fsm.bgpMessageStateUpdate(msg.Header.Type, false)
return bgp.BGP_FSM_OPENCONFIRM, newfsmStateReason(fsmOpenMsgReceived, nil, nil)
} else {
// send notification?
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, nil, nil)
}
case *bgp.MessageError:
msg, _ := fsm.sendNotificationFromErrorMsg(m)
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, msg, nil)
default:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": e.MsgData,
}).Panic("unknown msg type")
}
case err := <-h.stateReasonCh:
h.conn.Close()
return bgp.BGP_FSM_IDLE, &err
case <-holdTimer.C:
m, _ := fsm.sendNotification(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil, "hold timer expired")
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmHoldTimerExpired, m, nil)
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmAdminDown, m, nil)
case adminStateUp:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"adminState": stateOp.State.String(),
}).Panic("code logic bug")
}
}
}
}
}
func keepaliveTicker(fsm *fsm) *time.Ticker {
fsm.lock.RLock()
defer fsm.lock.RUnlock()
negotiatedTime := fsm.pConf.Timers.State.NegotiatedHoldTime
if negotiatedTime == 0 {
return &time.Ticker{}
}
sec := time.Second * time.Duration(fsm.pConf.Timers.State.KeepaliveInterval)
if sec == 0 {
sec = time.Second
}
return time.NewTicker(sec)
}
func (h *fsmHandler) openconfirm(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
fsm := h.fsm
ticker := keepaliveTicker(fsm)
h.msgCh = channels.NewInfiniteChannel()
fsm.lock.RLock()
h.conn = fsm.conn
var wg sync.WaitGroup
defer wg.Wait()
wg.Add(1)
go h.recvMessage(ctx, &wg)
var holdTimer *time.Timer
if fsm.pConf.Timers.State.NegotiatedHoldTime == 0 {
holdTimer = &time.Timer{}
} else {
// RFC 4271 P.65
// sets the HoldTimer according to the negotiated value
holdTimer = time.NewTimer(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime))
}
fsm.lock.RUnlock()
for {
select {
case <-ctx.Done():
h.conn.Close()
return -1, newfsmStateReason(fsmDying, nil, nil)
case conn, ok := <-fsm.connCh:
if !ok {
break
}
conn.Close()
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("Closed an accepted connection")
fsm.lock.RUnlock()
case <-fsm.gracefulRestartTimer.C:
fsm.lock.RLock()
restarting := fsm.pConf.GracefulRestart.State.PeerRestarting
fsm.lock.RUnlock()
if restarting {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("graceful restart timer expired")
fsm.lock.RUnlock()
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmRestartTimerExpired, nil, nil)
}
case <-ticker.C:
m := bgp.NewBGPKeepAliveMessage()
b, _ := m.Serialize()
// TODO: check error
fsm.conn.Write(b)
fsm.bgpMessageStateUpdate(m.Header.Type, false)
case i, ok := <-h.msgCh.Out():
if !ok {
continue
}
e := i.(*fsmMsg)
switch m := e.MsgData.(type) {
case *bgp.BGPMessage:
if m.Header.Type == bgp.BGP_MSG_KEEPALIVE {
return bgp.BGP_FSM_ESTABLISHED, newfsmStateReason(fsmOpenMsgNegotiated, nil, nil)
}
// send notification ?
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, nil, nil)
case *bgp.MessageError:
msg, _ := fsm.sendNotificationFromErrorMsg(m)
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmInvalidMsg, msg, nil)
default:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": e.MsgData,
}).Panic("unknown msg type")
}
case err := <-h.stateReasonCh:
h.conn.Close()
return bgp.BGP_FSM_IDLE, &err
case <-holdTimer.C:
m, _ := fsm.sendNotification(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil, "hold timer expired")
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmHoldTimerExpired, m, nil)
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
h.conn.Close()
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmAdminDown, nil, nil)
case adminStateUp:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"adminState": stateOp.State.String(),
}).Panic("code logic bug")
}
}
}
}
}
func (h *fsmHandler) sendMessageloop(ctx context.Context, wg *sync.WaitGroup) error {
defer wg.Done()
conn := h.conn
fsm := h.fsm
ticker := keepaliveTicker(fsm)
send := func(m *bgp.BGPMessage) error {
fsm.lock.RLock()
if fsm.twoByteAsTrans && m.Header.Type == bgp.BGP_MSG_UPDATE {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": m,
}).Debug("update for 2byte AS peer")
table.UpdatePathAttrs2ByteAs(m.Body.(*bgp.BGPUpdate))
table.UpdatePathAggregator2ByteAs(m.Body.(*bgp.BGPUpdate))
}
b, err := m.Serialize(h.fsm.marshallingOptions)
fsm.lock.RUnlock()
if err != nil {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": err,
}).Warn("failed to serialize")
fsm.lock.RUnlock()
fsm.bgpMessageStateUpdate(0, false)
return nil
}
fsm.lock.RLock()
err = conn.SetWriteDeadline(time.Now().Add(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime)))
fsm.lock.RUnlock()
if err != nil {
h.stateReasonCh <- *newfsmStateReason(fsmWriteFailed, nil, nil)
conn.Close()
return fmt.Errorf("failed to set write deadline")
}
_, err = conn.Write(b)
if err != nil {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Data": err,
}).Warn("failed to send")
fsm.lock.RUnlock()
h.stateReasonCh <- *newfsmStateReason(fsmWriteFailed, nil, nil)
conn.Close()
return fmt.Errorf("closed")
}
fsm.bgpMessageStateUpdate(m.Header.Type, false)
switch m.Header.Type {
case bgp.BGP_MSG_NOTIFICATION:
body := m.Body.(*bgp.BGPNotification)
if body.ErrorCode == bgp.BGP_ERROR_CEASE && (body.ErrorSubcode == bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN || body.ErrorSubcode == bgp.BGP_ERROR_SUB_ADMINISTRATIVE_RESET) {
communication, rest := decodeAdministrativeCommunication(body.Data)
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Code": body.ErrorCode,
"Subcode": body.ErrorSubcode,
"Communicated-Reason": communication,
"Data": rest,
}).Warn("sent notification")
fsm.lock.RUnlock()
} else {
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Code": body.ErrorCode,
"Subcode": body.ErrorSubcode,
"Data": body.Data,
}).Warn("sent notification")
fsm.lock.RUnlock()
}
h.stateReasonCh <- *newfsmStateReason(fsmNotificationSent, m, nil)
conn.Close()
return fmt.Errorf("closed")
case bgp.BGP_MSG_UPDATE:
update := m.Body.(*bgp.BGPUpdate)
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"nlri": update.NLRI,
"withdrawals": update.WithdrawnRoutes,
"attributes": update.PathAttributes,
}).Debug("sent update")
fsm.lock.RUnlock()
default:
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"data": m,
}).Debug("sent")
fsm.lock.RUnlock()
}
return nil
}
for {
select {
case <-ctx.Done():
return nil
case o := <-h.outgoing.Out():
switch m := o.(type) {
case *fsmOutgoingMsg:
h.fsm.lock.RLock()
options := h.fsm.marshallingOptions
h.fsm.lock.RUnlock()
for _, msg := range table.CreateUpdateMsgFromPaths(m.Paths, options) {
if err := send(msg); err != nil {
return nil
}
}
if m.Notification != nil {
if m.StayIdle {
// current user is only prefix-limit
// fix me if this is not the case
h.changeadminState(adminStatePfxCt)
}
if err := send(m.Notification); err != nil {
return nil
}
}
default:
return nil
}
case <-ticker.C:
if err := send(bgp.NewBGPKeepAliveMessage()); err != nil {
return nil
}
}
}
}
func (h *fsmHandler) recvMessageloop(ctx context.Context, wg *sync.WaitGroup) error {
defer wg.Done()
for {
fmsg, err := h.recvMessageWithError()
if fmsg != nil {
h.msgCh.In() <- fmsg
}
if err != nil {
return nil
}
}
}
func (h *fsmHandler) established(ctx context.Context) (bgp.FSMState, *fsmStateReason) {
var wg sync.WaitGroup
fsm := h.fsm
fsm.lock.Lock()
h.conn = fsm.conn
fsm.lock.Unlock()
defer wg.Wait()
wg.Add(2)
go h.sendMessageloop(ctx, &wg)
h.msgCh = h.incoming
go h.recvMessageloop(ctx, &wg)
var holdTimer *time.Timer
if fsm.pConf.Timers.State.NegotiatedHoldTime == 0 {
holdTimer = &time.Timer{}
} else {
fsm.lock.RLock()
holdTimer = time.NewTimer(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime))
fsm.lock.RUnlock()
}
fsm.gracefulRestartTimer.Stop()
for {
select {
case <-ctx.Done():
select {
case m := <-fsm.notification:
b, _ := m.Serialize(h.fsm.marshallingOptions)
h.conn.Write(b)
default:
// nothing to do
}
h.conn.Close()
return -1, newfsmStateReason(fsmDying, nil, nil)
case conn, ok := <-fsm.connCh:
if !ok {
break
}
conn.Close()
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("Closed an accepted connection")
fsm.lock.RUnlock()
case err := <-h.stateReasonCh:
h.conn.Close()
// if recv goroutine hit an error and sent to
// stateReasonCh, then tx goroutine might take
// long until it exits because it waits for
// ctx.Done() or keepalive timer. So let kill
// it now.
h.outgoing.In() <- err
fsm.lock.RLock()
if s := fsm.pConf.GracefulRestart.State; s.Enabled {
if (s.NotificationEnabled && err.Type == fsmNotificationRecv) ||
(err.Type == fsmNotificationSent &&
err.BGPNotification.Body.(*bgp.BGPNotification).ErrorCode == bgp.BGP_ERROR_HOLD_TIMER_EXPIRED) ||
err.Type == fsmReadFailed ||
err.Type == fsmWriteFailed {
err = *newfsmStateReason(fsmGracefulRestart, nil, nil)
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("peer graceful restart")
fsm.gracefulRestartTimer.Reset(time.Duration(fsm.pConf.GracefulRestart.State.PeerRestartTime) * time.Second)
}
}
fsm.lock.RUnlock()
return bgp.BGP_FSM_IDLE, &err
case <-holdTimer.C:
fsm.lock.RLock()
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("hold timer expired")
fsm.lock.RUnlock()
m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_HOLD_TIMER_EXPIRED, 0, nil)
h.outgoing.In() <- &fsmOutgoingMsg{Notification: m}
fsm.lock.RLock()
s := fsm.pConf.GracefulRestart.State
fsm.lock.RUnlock()
// Do not return hold timer expired to server if graceful restart is enabled
// Let it fallback to read/write error or fsmNotificationSent handled above
// Reference: https://github.com/osrg/gobgp/issues/2174
if !s.Enabled {
return bgp.BGP_FSM_IDLE, newfsmStateReason(fsmHoldTimerExpired, m, nil)
}
case <-h.holdTimerResetCh:
fsm.lock.RLock()
if fsm.pConf.Timers.State.NegotiatedHoldTime != 0 {
holdTimer.Reset(time.Second * time.Duration(fsm.pConf.Timers.State.NegotiatedHoldTime))
}
fsm.lock.RUnlock()
case stateOp := <-fsm.adminStateCh:
err := h.changeadminState(stateOp.State)
if err == nil {
switch stateOp.State {
case adminStateDown:
m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN, stateOp.Communication)
h.outgoing.In() <- &fsmOutgoingMsg{Notification: m}
}
}
}
}
}
func (h *fsmHandler) loop(ctx context.Context, wg *sync.WaitGroup) error {
defer wg.Done()
fsm := h.fsm
fsm.lock.RLock()
oldState := fsm.state
fsm.lock.RUnlock()
var reason *fsmStateReason
nextState := bgp.FSMState(-1)
fsm.lock.RLock()
fsmState := fsm.state
fsm.lock.RUnlock()
switch fsmState {
case bgp.BGP_FSM_IDLE:
nextState, reason = h.idle(ctx)
// case bgp.BGP_FSM_CONNECT:
// nextState = h.connect()
case bgp.BGP_FSM_ACTIVE:
nextState, reason = h.active(ctx)
case bgp.BGP_FSM_OPENSENT:
nextState, reason = h.opensent(ctx)
case bgp.BGP_FSM_OPENCONFIRM:
nextState, reason = h.openconfirm(ctx)
case bgp.BGP_FSM_ESTABLISHED:
nextState, reason = h.established(ctx)
}
fsm.lock.RLock()
fsm.reason = reason
if nextState == bgp.BGP_FSM_ESTABLISHED && oldState == bgp.BGP_FSM_OPENCONFIRM {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("Peer Up")
}
if oldState == bgp.BGP_FSM_ESTABLISHED {
// The main goroutine sent the notification due to
// deconfiguration or something.
reason := fsm.reason
if fsm.h.sentNotification != nil {
reason.Type = fsmNotificationSent
reason.BGPNotification = fsm.h.sentNotification
}
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"Reason": reason.String(),
}).Info("Peer Down")
}
fsm.lock.RUnlock()
fsm.lock.RLock()
h.incoming.In() <- &fsmMsg{
fsm: fsm,
MsgType: fsmMsgStateChange,
MsgSrc: fsm.pConf.State.NeighborAddress,
MsgData: nextState,
StateReason: reason,
}
fsm.lock.RUnlock()
return nil
}
func (h *fsmHandler) changeadminState(s adminState) error {
h.fsm.lock.Lock()
defer h.fsm.lock.Unlock()
fsm := h.fsm
if fsm.adminState != s {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
"adminState": s.String(),
}).Debug("admin state changed")
fsm.adminState = s
fsm.pConf.State.AdminDown = !fsm.pConf.State.AdminDown
switch s {
case adminStateUp:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("Administrative start")
case adminStateDown:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("Administrative shutdown")
case adminStatePfxCt:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Info("Administrative shutdown(Prefix limit reached)")
}
} else {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": fsm.pConf.State.NeighborAddress,
"State": fsm.state.String(),
}).Warn("cannot change to the same state")
return fmt.Errorf("cannot change to the same state")
}
return nil
}
|
package allocation
import (
"fmt"
"net"
"testing"
"time"
"github.com/gortc/turn"
"github.com/pion/stun"
"github.com/stretchr/testify/assert"
"github.com/pion/turn/internal/ipnet"
)
func TestAllocation(t *testing.T) {
tt := []struct {
name string
f func(*testing.T)
}{
{"GetPermission", subTestGetPermission},
{"AddPermission", subTestAddPermission},
{"RemovePermission", subTestRemovePermission},
{"AddChannelBind", subTestAddChannelBind},
{"GetChannelByNumber", subTestGetChannelByNumber},
{"GetChannelByAddr", subTestGetChannelByAddr},
{"RemoveChannelBind", subTestRemoveChannelBind},
{"Close", subTestAllocationClose},
{"packetHandler", subTestPacketHandler},
}
for _, tc := range tt {
f := tc.f
t.Run(tc.name, func(t *testing.T) {
f(t)
})
}
}
func subTestGetPermission(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
addr2, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3479")
addr3, _ := net.ResolveUDPAddr("udp", "127.0.0.2:3478")
p := &Permission{
Addr: addr,
}
p2 := &Permission{
Addr: addr2,
}
p3 := &Permission{
Addr: addr3,
}
a.AddPermission(p)
a.AddPermission(p2)
a.AddPermission(p3)
foundP1 := a.GetPermission(addr)
assert.Equal(t, p, foundP1, "Should keep the first one.")
foundP2 := a.GetPermission(addr2)
assert.Equal(t, p, foundP2, "Second one should be ignored.")
foundP3 := a.GetPermission(addr3)
assert.Equal(t, p3, foundP3, "Permission with another IP should be found")
}
func subTestAddPermission(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
p := &Permission{
Addr: addr,
}
a.AddPermission(p)
assert.Equal(t, a, p.allocation, "Permission's allocation should be the adder.")
foundPermission := a.GetPermission(p.Addr)
assert.Equal(t, p, foundPermission)
}
func subTestRemovePermission(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
p := &Permission{
Addr: addr,
}
a.AddPermission(p)
foundPermission := a.GetPermission(p.Addr)
assert.Equal(t, p, foundPermission, "Got permission is not same as the the added.")
a.RemovePermission(p.Addr)
foundPermission = a.GetPermission(p.Addr)
assert.Nil(t, foundPermission, "Got permission should be nil after removed.")
}
func subTestAddChannelBind(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
err := a.AddChannelBind(c, turn.DefaultLifetime)
assert.Nil(t, err, "should succeed")
assert.Equal(t, a, c.allocation, "allocation should be the caller.")
c2 := NewChannelBind(turn.MinChannelNumber+1, addr, nil)
err = a.AddChannelBind(c2, turn.DefaultLifetime)
assert.NotNil(t, err, "should failed with conflicted peer address")
addr2, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3479")
c3 := NewChannelBind(turn.MinChannelNumber, addr2, nil)
err = a.AddChannelBind(c3, turn.DefaultLifetime)
assert.NotNil(t, err, "should fail with conflicted number.")
}
func subTestGetChannelByNumber(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
_ = a.AddChannelBind(c, turn.DefaultLifetime)
existChannel := a.GetChannelByNumber(c.Number)
assert.Equal(t, c, existChannel)
notExistChannel := a.GetChannelByNumber(turn.MinChannelNumber + 1)
assert.Nil(t, notExistChannel, "should be nil for not existed channel.")
}
func subTestGetChannelByAddr(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
_ = a.AddChannelBind(c, turn.DefaultLifetime)
existChannel := a.GetChannelByAddr(c.Peer)
assert.Equal(t, c, existChannel)
addr2, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3479")
notExistChannel := a.GetChannelByAddr(addr2)
assert.Nil(t, notExistChannel, "should be nil for not existed channel.")
}
func subTestRemoveChannelBind(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
_ = a.AddChannelBind(c, turn.DefaultLifetime)
a.RemoveChannelBind(c.Number)
channelByNumber := a.GetChannelByNumber(c.Number)
assert.Nil(t, channelByNumber)
channelByAddr := a.GetChannelByAddr(c.Peer)
assert.Nil(t, channelByAddr)
}
func subTestAllocationClose(t *testing.T) {
network := "udp"
l, err := net.ListenPacket(network, "0.0.0.0:0")
if err != nil {
panic(err)
}
a := NewAllocation(nil, nil, nil)
a.RelaySocket = l
// add mock lifetimeTimer
a.lifetimeTimer = time.AfterFunc(turn.DefaultLifetime, func() {})
// add channel
addr, _ := net.ResolveUDPAddr(network, "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
_ = a.AddChannelBind(c, turn.DefaultLifetime)
// add permission
a.AddPermission(NewPermission(addr, nil))
err = a.Close()
assert.Nil(t, err, "should succeed")
assert.True(t, isClose(a.RelaySocket), "should be closed")
}
func subTestPacketHandler(t *testing.T) {
network := "udp"
m := newTestManager()
// turn server initialization
turnSocket, err := net.ListenPacket(network, "127.0.0.1:0")
if err != nil {
panic(err)
}
// client listener initialization
clientListener, err := net.ListenPacket(network, "127.0.0.1:0")
if err != nil {
panic(err)
}
dataCh := make(chan []byte)
// client listener read data
go func() {
buffer := make([]byte, rtpMTU)
for {
n, _, err2 := clientListener.ReadFrom(buffer)
if err2 != nil {
return
}
dataCh <- buffer[:n]
}
}()
a, err := m.CreateAllocation(&FiveTuple{
SrcAddr: clientListener.LocalAddr(),
DstAddr: turnSocket.LocalAddr(),
}, turnSocket, 0, turn.DefaultLifetime)
assert.Nil(t, err, "should succeed")
peerListener1, err := net.ListenPacket(network, "127.0.0.1:0")
if err != nil {
panic(err)
}
peerListener2, err := net.ListenPacket(network, "127.0.0.1:0")
if err != nil {
panic(err)
}
// add permission with peer1 address
a.AddPermission(NewPermission(peerListener1.LocalAddr(), m.log))
// add channel with min channel number and peer2 address
channelBind := NewChannelBind(turn.MinChannelNumber, peerListener2.LocalAddr(), m.log)
_ = a.AddChannelBind(channelBind, turn.DefaultLifetime)
_, port, _ := ipnet.AddrIPPort(a.RelaySocket.LocalAddr())
relayAddrWithHostStr := fmt.Sprintf("127.0.0.1:%d", port)
relayAddrWithHost, _ := net.ResolveUDPAddr(network, relayAddrWithHostStr)
// test for permission and data message
targetText := "permission"
_, _ = peerListener1.WriteTo([]byte(targetText), relayAddrWithHost)
data := <-dataCh
// resolve stun data message
assert.True(t, stun.IsMessage(data), "should be stun message")
var msg stun.Message
err = stun.Decode(data, &msg)
assert.Nil(t, err, "decode data to stun message failed")
var msgData turn.Data
err = msgData.GetFrom(&msg)
assert.Nil(t, err, "get data from stun message failed")
assert.Equal(t, targetText, string(msgData), "get message doesn't equal the target text")
// test for channel bind and channel data
targetText2 := "channel bind"
_, _ = peerListener2.WriteTo([]byte(targetText2), relayAddrWithHost)
data = <-dataCh
// resolve channel data
assert.True(t, turn.IsChannelData(data), "should be channel data")
channelData := turn.ChannelData{
Raw: data,
}
err = channelData.Decode()
assert.Nil(t, err, fmt.Sprintf("channel data decode with error: %v", err))
assert.Equal(t, channelBind.Number, channelData.Number, "get channel data's number is invalid")
assert.Equal(t, targetText2, string(channelData.Data), "get data doesn't equal the target text.")
// listeners close
_ = m.Close()
_ = clientListener.Close()
_ = peerListener1.Close()
_ = peerListener2.Close()
}
Fix test compile error
CreateAllocation arguments changed
package allocation
import (
"fmt"
"net"
"testing"
"time"
"github.com/gortc/turn"
"github.com/pion/stun"
"github.com/stretchr/testify/assert"
"github.com/pion/turn/internal/ipnet"
)
func TestAllocation(t *testing.T) {
tt := []struct {
name string
f func(*testing.T)
}{
{"GetPermission", subTestGetPermission},
{"AddPermission", subTestAddPermission},
{"RemovePermission", subTestRemovePermission},
{"AddChannelBind", subTestAddChannelBind},
{"GetChannelByNumber", subTestGetChannelByNumber},
{"GetChannelByAddr", subTestGetChannelByAddr},
{"RemoveChannelBind", subTestRemoveChannelBind},
{"Close", subTestAllocationClose},
{"packetHandler", subTestPacketHandler},
}
for _, tc := range tt {
f := tc.f
t.Run(tc.name, func(t *testing.T) {
f(t)
})
}
}
func subTestGetPermission(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
addr2, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3479")
addr3, _ := net.ResolveUDPAddr("udp", "127.0.0.2:3478")
p := &Permission{
Addr: addr,
}
p2 := &Permission{
Addr: addr2,
}
p3 := &Permission{
Addr: addr3,
}
a.AddPermission(p)
a.AddPermission(p2)
a.AddPermission(p3)
foundP1 := a.GetPermission(addr)
assert.Equal(t, p, foundP1, "Should keep the first one.")
foundP2 := a.GetPermission(addr2)
assert.Equal(t, p, foundP2, "Second one should be ignored.")
foundP3 := a.GetPermission(addr3)
assert.Equal(t, p3, foundP3, "Permission with another IP should be found")
}
func subTestAddPermission(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
p := &Permission{
Addr: addr,
}
a.AddPermission(p)
assert.Equal(t, a, p.allocation, "Permission's allocation should be the adder.")
foundPermission := a.GetPermission(p.Addr)
assert.Equal(t, p, foundPermission)
}
func subTestRemovePermission(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
p := &Permission{
Addr: addr,
}
a.AddPermission(p)
foundPermission := a.GetPermission(p.Addr)
assert.Equal(t, p, foundPermission, "Got permission is not same as the the added.")
a.RemovePermission(p.Addr)
foundPermission = a.GetPermission(p.Addr)
assert.Nil(t, foundPermission, "Got permission should be nil after removed.")
}
func subTestAddChannelBind(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
err := a.AddChannelBind(c, turn.DefaultLifetime)
assert.Nil(t, err, "should succeed")
assert.Equal(t, a, c.allocation, "allocation should be the caller.")
c2 := NewChannelBind(turn.MinChannelNumber+1, addr, nil)
err = a.AddChannelBind(c2, turn.DefaultLifetime)
assert.NotNil(t, err, "should failed with conflicted peer address")
addr2, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3479")
c3 := NewChannelBind(turn.MinChannelNumber, addr2, nil)
err = a.AddChannelBind(c3, turn.DefaultLifetime)
assert.NotNil(t, err, "should fail with conflicted number.")
}
func subTestGetChannelByNumber(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
_ = a.AddChannelBind(c, turn.DefaultLifetime)
existChannel := a.GetChannelByNumber(c.Number)
assert.Equal(t, c, existChannel)
notExistChannel := a.GetChannelByNumber(turn.MinChannelNumber + 1)
assert.Nil(t, notExistChannel, "should be nil for not existed channel.")
}
func subTestGetChannelByAddr(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
_ = a.AddChannelBind(c, turn.DefaultLifetime)
existChannel := a.GetChannelByAddr(c.Peer)
assert.Equal(t, c, existChannel)
addr2, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3479")
notExistChannel := a.GetChannelByAddr(addr2)
assert.Nil(t, notExistChannel, "should be nil for not existed channel.")
}
func subTestRemoveChannelBind(t *testing.T) {
a := NewAllocation(nil, nil, nil)
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
_ = a.AddChannelBind(c, turn.DefaultLifetime)
a.RemoveChannelBind(c.Number)
channelByNumber := a.GetChannelByNumber(c.Number)
assert.Nil(t, channelByNumber)
channelByAddr := a.GetChannelByAddr(c.Peer)
assert.Nil(t, channelByAddr)
}
func subTestAllocationClose(t *testing.T) {
network := "udp"
l, err := net.ListenPacket(network, "0.0.0.0:0")
if err != nil {
panic(err)
}
a := NewAllocation(nil, nil, nil)
a.RelaySocket = l
// add mock lifetimeTimer
a.lifetimeTimer = time.AfterFunc(turn.DefaultLifetime, func() {})
// add channel
addr, _ := net.ResolveUDPAddr(network, "127.0.0.1:3478")
c := NewChannelBind(turn.MinChannelNumber, addr, nil)
_ = a.AddChannelBind(c, turn.DefaultLifetime)
// add permission
a.AddPermission(NewPermission(addr, nil))
err = a.Close()
assert.Nil(t, err, "should succeed")
assert.True(t, isClose(a.RelaySocket), "should be closed")
}
func subTestPacketHandler(t *testing.T) {
network := "udp"
m := newTestManager()
// turn server initialization
turnSocket, err := net.ListenPacket(network, "127.0.0.1:0")
if err != nil {
panic(err)
}
// client listener initialization
clientListener, err := net.ListenPacket(network, "127.0.0.1:0")
if err != nil {
panic(err)
}
dataCh := make(chan []byte)
// client listener read data
go func() {
buffer := make([]byte, rtpMTU)
for {
n, _, err2 := clientListener.ReadFrom(buffer)
if err2 != nil {
return
}
dataCh <- buffer[:n]
}
}()
a, err := m.CreateAllocation(&FiveTuple{
SrcAddr: clientListener.LocalAddr(),
DstAddr: turnSocket.LocalAddr(),
}, turnSocket, net.ParseIP("127.0.0.1"), 0, turn.DefaultLifetime)
assert.Nil(t, err, "should succeed")
peerListener1, err := net.ListenPacket(network, "127.0.0.1:0")
if err != nil {
panic(err)
}
peerListener2, err := net.ListenPacket(network, "127.0.0.1:0")
if err != nil {
panic(err)
}
// add permission with peer1 address
a.AddPermission(NewPermission(peerListener1.LocalAddr(), m.log))
// add channel with min channel number and peer2 address
channelBind := NewChannelBind(turn.MinChannelNumber, peerListener2.LocalAddr(), m.log)
_ = a.AddChannelBind(channelBind, turn.DefaultLifetime)
_, port, _ := ipnet.AddrIPPort(a.RelaySocket.LocalAddr())
relayAddrWithHostStr := fmt.Sprintf("127.0.0.1:%d", port)
relayAddrWithHost, _ := net.ResolveUDPAddr(network, relayAddrWithHostStr)
// test for permission and data message
targetText := "permission"
_, _ = peerListener1.WriteTo([]byte(targetText), relayAddrWithHost)
data := <-dataCh
// resolve stun data message
assert.True(t, stun.IsMessage(data), "should be stun message")
var msg stun.Message
err = stun.Decode(data, &msg)
assert.Nil(t, err, "decode data to stun message failed")
var msgData turn.Data
err = msgData.GetFrom(&msg)
assert.Nil(t, err, "get data from stun message failed")
assert.Equal(t, targetText, string(msgData), "get message doesn't equal the target text")
// test for channel bind and channel data
targetText2 := "channel bind"
_, _ = peerListener2.WriteTo([]byte(targetText2), relayAddrWithHost)
data = <-dataCh
// resolve channel data
assert.True(t, turn.IsChannelData(data), "should be channel data")
channelData := turn.ChannelData{
Raw: data,
}
err = channelData.Decode()
assert.Nil(t, err, fmt.Sprintf("channel data decode with error: %v", err))
assert.Equal(t, channelBind.Number, channelData.Number, "get channel data's number is invalid")
assert.Equal(t, targetText2, string(channelData.Data), "get data doesn't equal the target text.")
// listeners close
_ = m.Close()
_ = clientListener.Close()
_ = peerListener1.Close()
_ = peerListener2.Close()
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsx
import (
"crypto/rand"
"fmt"
"io"
"time"
"github.com/jacobsa/gcloud/gcs"
"golang.org/x/net/context"
)
// Create an objectCreator that accepts a source object and the contents that
// should be "appended" to it, storing temporary objects using the supplied
// prefix.
//
// Note that the Create method will attempt to remove any temporary junk left
// behind, but it may fail to do so. Users should arrange for garbage collection.
//
// Create guarantees to return *gcs.PreconditionError when the source object
// has been clobbered.
func newAppendObjectCreator(
prefix string,
bucket gcs.Bucket) (oc objectCreator) {
oc = &appendObjectCreator{
prefix: prefix,
bucket: bucket,
}
return
}
////////////////////////////////////////////////////////////////////////
// Implementation
////////////////////////////////////////////////////////////////////////
type appendObjectCreator struct {
prefix string
bucket gcs.Bucket
}
func (oc *appendObjectCreator) chooseName() (name string, err error) {
// Generate a good 64-bit random number.
var buf [8]byte
_, err = io.ReadFull(rand.Reader, buf[:])
if err != nil {
err = fmt.Errorf("ReadFull: %v", err)
return
}
x := uint64(buf[0])<<0 |
uint64(buf[1])<<8 |
uint64(buf[2])<<16 |
uint64(buf[3])<<24 |
uint64(buf[4])<<32 |
uint64(buf[5])<<40 |
uint64(buf[6])<<48 |
uint64(buf[7])<<56
// Turn it into a name.
name = fmt.Sprintf("%s%016x", oc.prefix, x)
return
}
func (oc *appendObjectCreator) Create(
ctx context.Context,
srcObject *gcs.Object,
mtime time.Time,
r io.Reader) (o *gcs.Object, err error) {
// Choose a name for a temporary object.
tmpName, err := oc.chooseName()
if err != nil {
err = fmt.Errorf("chooseName: %v", err)
return
}
// Create a temporary object containing the additional contents.
var zero int64
tmp, err := oc.bucket.CreateObject(
ctx,
&gcs.CreateObjectRequest{
Name: tmpName,
GenerationPrecondition: &zero,
Contents: r,
})
// Don't mangle precondition errors.
switch typed := err.(type) {
case nil:
case *gcs.PreconditionError:
err = &gcs.PreconditionError{
Err: fmt.Errorf("CreateObject: %v", typed.Err),
}
return
default:
err = fmt.Errorf("CreateObject: %v", err)
return
}
// Attempt to delete the temporary object when we're done.
defer func() {
deleteErr := oc.bucket.DeleteObject(
ctx,
&gcs.DeleteObjectRequest{
Name: tmp.Name,
})
if err == nil && deleteErr != nil {
err = fmt.Errorf("DeleteObject: %v", deleteErr)
}
}()
// Compose the old contents plus the new over the old.
o, err = oc.bucket.ComposeObjects(
ctx,
&gcs.ComposeObjectsRequest{
DstName: srcObject.Name,
DstGenerationPrecondition: &srcObject.Generation,
DstMetaGenerationPrecondition: &srcObject.Generation,
Sources: []gcs.ComposeSource{
gcs.ComposeSource{
Name: srcObject.Name,
Generation: srcObject.Generation,
},
gcs.ComposeSource{
Name: tmp.Name,
Generation: tmp.Generation,
},
},
Metadata: map[string]string{
MtimeMetadataKey: mtime.Format(time.RFC3339Nano),
},
})
switch typed := err.(type) {
case nil:
case *gcs.PreconditionError:
err = &gcs.PreconditionError{
Err: fmt.Errorf("ComposeObjects: %v", typed.Err),
}
return
// A not found error means that either the source object was clobbered or the
// temporary object was. The latter is unlikely, so we signal a precondition
// error.
case *gcs.NotFoundError:
err = &gcs.PreconditionError{
Err: fmt.Errorf(
"Synthesized precondition error for ComposeObjects. Original: %v",
err),
}
return
default:
err = fmt.Errorf("ComposeObjects: %v", err)
return
}
return
}
Fixed a bug.
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsx
import (
"crypto/rand"
"fmt"
"io"
"time"
"github.com/jacobsa/gcloud/gcs"
"golang.org/x/net/context"
)
// Create an objectCreator that accepts a source object and the contents that
// should be "appended" to it, storing temporary objects using the supplied
// prefix.
//
// Note that the Create method will attempt to remove any temporary junk left
// behind, but it may fail to do so. Users should arrange for garbage collection.
//
// Create guarantees to return *gcs.PreconditionError when the source object
// has been clobbered.
func newAppendObjectCreator(
prefix string,
bucket gcs.Bucket) (oc objectCreator) {
oc = &appendObjectCreator{
prefix: prefix,
bucket: bucket,
}
return
}
////////////////////////////////////////////////////////////////////////
// Implementation
////////////////////////////////////////////////////////////////////////
type appendObjectCreator struct {
prefix string
bucket gcs.Bucket
}
func (oc *appendObjectCreator) chooseName() (name string, err error) {
// Generate a good 64-bit random number.
var buf [8]byte
_, err = io.ReadFull(rand.Reader, buf[:])
if err != nil {
err = fmt.Errorf("ReadFull: %v", err)
return
}
x := uint64(buf[0])<<0 |
uint64(buf[1])<<8 |
uint64(buf[2])<<16 |
uint64(buf[3])<<24 |
uint64(buf[4])<<32 |
uint64(buf[5])<<40 |
uint64(buf[6])<<48 |
uint64(buf[7])<<56
// Turn it into a name.
name = fmt.Sprintf("%s%016x", oc.prefix, x)
return
}
func (oc *appendObjectCreator) Create(
ctx context.Context,
srcObject *gcs.Object,
mtime time.Time,
r io.Reader) (o *gcs.Object, err error) {
// Choose a name for a temporary object.
tmpName, err := oc.chooseName()
if err != nil {
err = fmt.Errorf("chooseName: %v", err)
return
}
// Create a temporary object containing the additional contents.
var zero int64
tmp, err := oc.bucket.CreateObject(
ctx,
&gcs.CreateObjectRequest{
Name: tmpName,
GenerationPrecondition: &zero,
Contents: r,
})
// Don't mangle precondition errors.
switch typed := err.(type) {
case nil:
case *gcs.PreconditionError:
err = &gcs.PreconditionError{
Err: fmt.Errorf("CreateObject: %v", typed.Err),
}
return
default:
err = fmt.Errorf("CreateObject: %v", err)
return
}
// Attempt to delete the temporary object when we're done.
defer func() {
deleteErr := oc.bucket.DeleteObject(
ctx,
&gcs.DeleteObjectRequest{
Name: tmp.Name,
})
if err == nil && deleteErr != nil {
err = fmt.Errorf("DeleteObject: %v", deleteErr)
}
}()
// Compose the old contents plus the new over the old.
o, err = oc.bucket.ComposeObjects(
ctx,
&gcs.ComposeObjectsRequest{
DstName: srcObject.Name,
DstGenerationPrecondition: &srcObject.Generation,
DstMetaGenerationPrecondition: &srcObject.MetaGeneration,
Sources: []gcs.ComposeSource{
gcs.ComposeSource{
Name: srcObject.Name,
Generation: srcObject.Generation,
},
gcs.ComposeSource{
Name: tmp.Name,
Generation: tmp.Generation,
},
},
Metadata: map[string]string{
MtimeMetadataKey: mtime.Format(time.RFC3339Nano),
},
})
switch typed := err.(type) {
case nil:
case *gcs.PreconditionError:
err = &gcs.PreconditionError{
Err: fmt.Errorf("ComposeObjects: %v", typed.Err),
}
return
// A not found error means that either the source object was clobbered or the
// temporary object was. The latter is unlikely, so we signal a precondition
// error.
case *gcs.NotFoundError:
err = &gcs.PreconditionError{
Err: fmt.Errorf(
"Synthesized precondition error for ComposeObjects. Original: %v",
err),
}
return
default:
err = fmt.Errorf("ComposeObjects: %v", err)
return
}
return
}
|
package calendar
import (
"database/sql"
"fmt"
"math"
"strings"
"time"
"github.com/MyHomeworkSpace/api-server/data"
)
// A ViewDay represents a day in a View.
type ViewDay struct {
DayString string `json:"day"`
ShiftingIndex int `json:"shiftingIndex"` // if it's a shifting day, its current index (for example, friday 1/2/3/4)
CurrentTerm *Term `json:"currentTerm"`
Announcements []data.PlannerAnnouncement `json:"announcements"`
Events []Event `json:"events"`
}
// A View represents a view of a user's calendar over a certain period of time.
type View struct {
Days []ViewDay `json:"days"`
}
func getOffBlocksStartingBefore(db *sql.DB, before string, groupSQL string) ([]OffBlock, error) {
// find the starts
offBlockRows, err := db.Query("SELECT id, date, text, grade FROM announcements WHERE ("+groupSQL+") AND `type` = 2 AND `date` < ?", before)
if err != nil {
return nil, err
}
defer offBlockRows.Close()
blocks := []OffBlock{}
for offBlockRows.Next() {
block := OffBlock{}
offBlockRows.Scan(&block.StartID, &block.StartText, &block.Name, &block.Grade)
blocks = append(blocks, block)
}
// find the matching ends
for i, block := range blocks {
offBlockEndRows, err := db.Query("SELECT date FROM announcements WHERE ("+groupSQL+") AND `type` = 3 AND `text` = ?", block.Name)
if err != nil {
return nil, err
}
defer offBlockEndRows.Close()
if offBlockEndRows.Next() {
offBlockEndRows.Scan(&blocks[i].EndText)
}
}
// parse dates
for i, block := range blocks {
blocks[i].Start, err = time.Parse("2006-01-02", block.StartText)
if err != nil {
return nil, err
}
blocks[i].End, err = time.Parse("2006-01-02", block.EndText)
if err != nil {
return nil, err
}
}
return blocks, err
}
// GetView retrieves a CalendarView for the given user with the given parameters.
func GetView(db *sql.DB, userID int, location *time.Location, announcementsGroupsSQL string, startTime time.Time, endTime time.Time) (View, error) {
view := View{
Days: []ViewDay{},
}
// get announcements for time period
announcementRows, err := db.Query("SELECT id, date, text, grade, `type` FROM announcements WHERE date >= ? AND date <= ? AND ("+announcementsGroupsSQL+") AND type < 2", startTime.Format("2006-01-02"), endTime.Format("2006-01-02"))
if err != nil {
return View{}, err
}
defer announcementRows.Close()
announcements := []data.PlannerAnnouncement{}
for announcementRows.Next() {
resp := data.PlannerAnnouncement{}
announcementRows.Scan(&resp.ID, &resp.Date, &resp.Text, &resp.Grade, &resp.Type)
announcements = append(announcements, resp)
}
// get all friday information for time period
fridayRows, err := db.Query("SELECT * FROM fridays WHERE date >= ? AND date <= ?", startTime.Format("2006-01-02"), endTime.Format("2006-01-02"))
if err != nil {
return View{}, err
}
defer fridayRows.Close()
fridays := []data.PlannerFriday{}
for fridayRows.Next() {
friday := data.PlannerFriday{}
fridayRows.Scan(&friday.ID, &friday.Date, &friday.Index)
fridays = append(fridays, friday)
}
// get terms for user
termRows, err := db.Query("SELECT id, termId, name, userId FROM calendar_terms WHERE userId = ? ORDER BY name ASC", userID)
if err != nil {
return View{}, err
}
defer termRows.Close()
availableTerms := []Term{}
for termRows.Next() {
term := Term{}
termRows.Scan(&term.ID, &term.TermID, &term.Name, &term.UserID)
availableTerms = append(availableTerms, term)
}
// get off blocks for time period
offBlocks, err := getOffBlocksStartingBefore(db, endTime.Format("2006-01-02"), announcementsGroupsSQL)
if err != nil {
return View{}, err
}
// generate list of all off days in time period
offDays := []string{}
for _, announcement := range announcements {
if announcement.Type == AnnouncementType_FullOff {
offDays = append(offDays, announcement.Date)
}
}
for _, offBlock := range offBlocks {
offDayCount := int(math.Ceil(offBlock.End.Sub(offBlock.Start).Hours() / 24))
offDay := offBlock.Start
announcements = append(announcements, data.PlannerAnnouncement{
ID: offBlock.StartID,
Date: offBlock.StartText,
Text: "Start of " + offBlock.Name,
Grade: offBlock.Grade,
Type: AnnouncementType_BreakStart,
})
for i := 0; i < offDayCount; i++ {
if i != 0 {
announcements = append(announcements, data.PlannerAnnouncement{
ID: offBlock.StartID,
Date: offDay.Format("2006-01-02"),
Text: offBlock.Name,
Grade: offBlock.Grade,
Type: AnnouncementType_BreakStart,
})
}
offDays = append(offDays, offDay.Format("2006-01-02"))
offDay = offDay.Add(24 * time.Hour)
}
announcements = append(announcements, data.PlannerAnnouncement{
ID: offBlock.EndID,
Date: offBlock.EndText,
Text: "End of " + offBlock.Name,
Grade: offBlock.Grade,
Type: AnnouncementType_BreakEnd,
})
}
// create days in array, set friday indices
dayCount := int(math.Ceil(endTime.Sub(startTime).Hours() / 24))
currentDay := startTime
viewIncludesSpecialAssessmentDay := false
for i := 0; i < dayCount; i++ {
view.Days = append(view.Days, ViewDay{
DayString: currentDay.Format("2006-01-02"),
ShiftingIndex: -1,
CurrentTerm: nil,
Announcements: []data.PlannerAnnouncement{},
Events: []Event{},
})
if currentDay.Add(time.Second).After(Day_SchoolStart) && currentDay.Before(Day_SchoolEnd) {
if currentDay.After(Day_ExamRelief) {
// it's the second term
view.Days[i].CurrentTerm = &availableTerms[1]
} else {
// it's the first term
view.Days[i].CurrentTerm = &availableTerms[0]
}
}
for _, announcement := range announcements {
if view.Days[i].DayString == announcement.Date {
view.Days[i].Announcements = append(view.Days[i].Announcements, announcement)
}
}
if currentDay.Weekday() == time.Friday {
for _, friday := range fridays {
if view.Days[i].DayString == friday.Date {
view.Days[i].ShiftingIndex = friday.Index
break
}
}
}
for specialAssessmentDay, _ := range SpecialAssessmentDays {
if view.Days[i].DayString == specialAssessmentDay {
viewIncludesSpecialAssessmentDay = true
break
}
}
currentDay = currentDay.AddDate(0, 0, 1)
}
// get plain events
plainEventRows, err := db.Query("SELECT id, name, `start`, `end`, `desc`, userId FROM calendar_events WHERE userId = ? AND (`end` >= ? AND `start` <= ?)", userID, startTime.Unix(), endTime.Unix())
if err != nil {
return View{}, err
}
defer plainEventRows.Close()
for plainEventRows.Next() {
event := Event{
Type: PlainEvent,
}
data := PlainEventData{}
plainEventRows.Scan(&event.ID, &event.Name, &event.Start, &event.End, &data.Desc, &event.UserID)
event.Data = data
eventStartTime := time.Unix(int64(event.Start), 0)
dayOffset := int(math.Floor(eventStartTime.Sub(startTime).Hours() / 24))
if dayOffset < 0 || dayOffset > len(view.Days)-1 {
continue
}
view.Days[dayOffset].Events = append(view.Days[dayOffset].Events, event)
}
// get homework events
hwEventRows, err := db.Query("SELECT calendar_hwevents.id, homework.id, homework.name, homework.`due`, homework.`desc`, homework.`complete`, homework.classId, homework.userId, calendar_hwevents.`start`, calendar_hwevents.`end`, calendar_hwevents.userId FROM calendar_hwevents INNER JOIN homework ON calendar_hwevents.homeworkId = homework.id WHERE calendar_hwevents.userId = ? AND (calendar_hwevents.`end` >= ? AND calendar_hwevents.`start` <= ?)", userID, startTime.Unix(), endTime.Unix())
if err != nil {
return View{}, err
}
defer hwEventRows.Close()
for hwEventRows.Next() {
event := Event{
Type: HomeworkEvent,
}
data := HomeworkEventData{}
hwEventRows.Scan(&event.ID, &data.Homework.ID, &data.Homework.Name, &data.Homework.Due, &data.Homework.Desc, &data.Homework.Complete, &data.Homework.ClassID, &data.Homework.UserID, &event.Start, &event.End, &event.UserID)
event.Data = data
event.Name = data.Homework.Name
eventStartTime := time.Unix(int64(event.Start), 0)
dayOffset := int(math.Floor(eventStartTime.Sub(startTime).Hours() / 24))
if dayOffset < 0 || dayOffset > len(view.Days)-1 {
continue
}
view.Days[dayOffset].Events = append(view.Days[dayOffset].Events, event)
}
// get schedule events
for i := 0; i < dayCount; i++ {
day := view.Days[i]
if day.CurrentTerm == nil {
continue
}
dayTime, _ := time.ParseInLocation("2006-01-02", day.DayString, location)
dayNumber := int(dayTime.Weekday())
if dayTime.Weekday() == time.Friday {
if day.ShiftingIndex != -1 {
dayNumber = 4 + day.ShiftingIndex
} else {
continue
}
}
isOff := false
for _, offDay := range offDays {
if day.DayString == offDay {
isOff = true
break
}
}
if isOff {
continue
}
if dayTime.Weekday() == time.Saturday || dayTime.Weekday() == time.Sunday {
continue
}
rows, err := db.Query("SELECT calendar_periods.id, calendar_classes.termId, calendar_classes.sectionId, calendar_classes.`name`, calendar_classes.ownerId, calendar_classes.ownerName, calendar_periods.dayNumber, calendar_periods.block, calendar_periods.buildingName, calendar_periods.roomNumber, calendar_periods.`start`, calendar_periods.`end`, calendar_periods.userId FROM calendar_periods INNER JOIN calendar_classes ON calendar_periods.classId = calendar_classes.sectionId WHERE calendar_periods.userId = ? AND (calendar_classes.termId = ? OR calendar_classes.termId = -1) AND calendar_periods.dayNumber = ? GROUP BY calendar_periods.id, calendar_classes.termId, calendar_classes.name, calendar_classes.ownerId, calendar_classes.ownerName", userID, day.CurrentTerm.TermID, dayNumber)
if err != nil {
return View{}, err
}
defer rows.Close()
for rows.Next() {
event := Event{
Type: ScheduleEvent,
}
data := ScheduleEventData{}
rows.Scan(&event.ID, &data.TermID, &data.ClassID, &event.Name, &data.OwnerID, &data.OwnerName, &data.DayNumber, &data.Block, &data.BuildingName, &data.RoomNumber, &event.Start, &event.End, &event.UserID)
event.Data = data
event.Start += int(dayTime.Unix())
event.End += int(dayTime.Unix())
view.Days[i].Events = append(view.Days[i].Events, event)
}
if dayTime.Weekday() == time.Thursday {
// special case: assembly
for eventIndex, event := range view.Days[i].Events {
// check for an "HS House" event
// starting 11:50, ending 12:50
if strings.HasPrefix(event.Name, "HS House") && event.Start == int(dayTime.Unix())+42600 && event.End == int(dayTime.Unix())+46200 {
// found it
// now look up what type of assembly period it is this week
assemblyType, foundType := AssemblyTypeList[dayTime.Format("2006-01-02")]
if !foundType || assemblyType == AssemblyType_Assembly {
// set name to assembly and room to Theater
view.Days[i].Events[eventIndex].Name = "Assembly"
data := view.Days[i].Events[eventIndex].Data.(ScheduleEventData)
data.RoomNumber = "Theater"
view.Days[i].Events[eventIndex].Data = data
} else if assemblyType == AssemblyType_LongHouse {
// set name to long house
view.Days[i].Events[eventIndex].Name = "Long House"
} else if assemblyType == AssemblyType_Lab {
// just remove it
view.Days[i].Events = append(view.Days[i].Events[:eventIndex], view.Days[i].Events[eventIndex+1:]...)
}
}
}
}
}
if viewIncludesSpecialAssessmentDay {
// get a list of the user's calendar classes
sectionIDs := []int{}
classRows, err := db.Query("SELECT sectionId FROM calendar_classes WHERE userId = ? GROUP BY `sectionId`", userID)
if err != nil {
return View{}, err
}
defer classRows.Close()
for classRows.Next() {
sectionID := -1
classRows.Scan(§ionID)
sectionIDs = append(sectionIDs, sectionID)
}
// find the applicable special assessments
allSpecialAssessments := []*SpecialAssessmentInfo{}
for _, sectionID := range sectionIDs {
specialAssessment, foundAssessment := SpecialAssessmentList[sectionID]
if !foundAssessment {
// no assessment for this class
continue
}
isDuplicate := false
for _, alreadyFoundSpecialAssessment := range allSpecialAssessments {
if specialAssessment == alreadyFoundSpecialAssessment {
isDuplicate = true
break
}
}
if isDuplicate {
continue
}
allSpecialAssessments = append(allSpecialAssessments, specialAssessment)
}
for i := 0; i < dayCount; i++ {
day := view.Days[i]
dayType := SpecialAssessmentType_Unknown
for specialAssessmentDay, specialAssessmentDayType := range SpecialAssessmentDays {
if day.DayString == specialAssessmentDay {
dayType = specialAssessmentDayType
break
}
}
if dayType == SpecialAssessmentType_Unknown {
continue
}
var assessmentForDay *SpecialAssessmentInfo
for _, assessment := range allSpecialAssessments {
if assessment.Subject == dayType {
assessmentForDay = assessment
break
}
}
if assessmentForDay == nil {
continue
}
event := Event{
Type: ScheduleEvent,
ID: -1,
Name: fmt.Sprintf("Final - %s", assessmentForDay.ClassName),
Start: assessmentForDay.Start,
End: assessmentForDay.End,
UserID: userID,
}
finalDay := startTime.Add(time.Duration(i) * 24 * time.Hour)
// hacky time correction to shift the timezone properly
startHour := int(math.Floor(float64(event.Start) / 60 / 60))
startMin := int(math.Floor((float64(event.Start) - (float64(startHour) * 60 * 60)) / 60))
event.Start = int(time.Date(finalDay.Year(), finalDay.Month(), finalDay.Day(), startHour, startMin, 0, 0, location).Unix())
endHour := int(math.Floor(float64(event.End) / 60 / 60))
endMin := int(math.Floor((float64(event.End) - (float64(endHour) * 60 * 60)) / 60))
event.End = int(time.Date(finalDay.Year(), finalDay.Month(), finalDay.Day(), endHour, endMin, 0, 0, location).Unix())
data := ScheduleEventData{
TermID: -1,
ClassID: -1,
OwnerID: -1,
OwnerName: assessmentForDay.TeacherName,
DayNumber: -1,
Block: "",
BuildingName: "",
RoomNumber: assessmentForDay.RoomNumber,
}
event.Data = data
view.Days[i].Events = append(view.Days[i].Events, event)
}
}
return view, nil
}
fix bug where a break with the same name as a previous one would not be displayed correctly
package calendar
import (
"database/sql"
"fmt"
"math"
"strings"
"time"
"github.com/MyHomeworkSpace/api-server/data"
)
// A ViewDay represents a day in a View.
type ViewDay struct {
DayString string `json:"day"`
ShiftingIndex int `json:"shiftingIndex"` // if it's a shifting day, its current index (for example, friday 1/2/3/4)
CurrentTerm *Term `json:"currentTerm"`
Announcements []data.PlannerAnnouncement `json:"announcements"`
Events []Event `json:"events"`
}
// A View represents a view of a user's calendar over a certain period of time.
type View struct {
Days []ViewDay `json:"days"`
}
func getOffBlocksStartingBefore(db *sql.DB, before string, groupSQL string) ([]OffBlock, error) {
// find the starts
offBlockRows, err := db.Query("SELECT id, date, text, grade FROM announcements WHERE ("+groupSQL+") AND `type` = 2 AND `date` < ?", before)
if err != nil {
return nil, err
}
defer offBlockRows.Close()
blocks := []OffBlock{}
for offBlockRows.Next() {
block := OffBlock{}
offBlockRows.Scan(&block.StartID, &block.StartText, &block.Name, &block.Grade)
blocks = append(blocks, block)
}
// find the matching ends
for i, block := range blocks {
offBlockEndRows, err := db.Query("SELECT date FROM announcements WHERE ("+groupSQL+") AND `type` = 3 AND `text` = ? AND `date` > ?", block.Name, block.StartText)
if err != nil {
return nil, err
}
defer offBlockEndRows.Close()
if offBlockEndRows.Next() {
offBlockEndRows.Scan(&blocks[i].EndText)
}
}
// parse dates
for i, block := range blocks {
blocks[i].Start, err = time.Parse("2006-01-02", block.StartText)
if err != nil {
return nil, err
}
blocks[i].End, err = time.Parse("2006-01-02", block.EndText)
if err != nil {
return nil, err
}
}
return blocks, err
}
// GetView retrieves a CalendarView for the given user with the given parameters.
func GetView(db *sql.DB, userID int, location *time.Location, announcementsGroupsSQL string, startTime time.Time, endTime time.Time) (View, error) {
view := View{
Days: []ViewDay{},
}
// get announcements for time period
announcementRows, err := db.Query("SELECT id, date, text, grade, `type` FROM announcements WHERE date >= ? AND date <= ? AND ("+announcementsGroupsSQL+") AND type < 2", startTime.Format("2006-01-02"), endTime.Format("2006-01-02"))
if err != nil {
return View{}, err
}
defer announcementRows.Close()
announcements := []data.PlannerAnnouncement{}
for announcementRows.Next() {
resp := data.PlannerAnnouncement{}
announcementRows.Scan(&resp.ID, &resp.Date, &resp.Text, &resp.Grade, &resp.Type)
announcements = append(announcements, resp)
}
// get all friday information for time period
fridayRows, err := db.Query("SELECT * FROM fridays WHERE date >= ? AND date <= ?", startTime.Format("2006-01-02"), endTime.Format("2006-01-02"))
if err != nil {
return View{}, err
}
defer fridayRows.Close()
fridays := []data.PlannerFriday{}
for fridayRows.Next() {
friday := data.PlannerFriday{}
fridayRows.Scan(&friday.ID, &friday.Date, &friday.Index)
fridays = append(fridays, friday)
}
// get terms for user
termRows, err := db.Query("SELECT id, termId, name, userId FROM calendar_terms WHERE userId = ? ORDER BY name ASC", userID)
if err != nil {
return View{}, err
}
defer termRows.Close()
availableTerms := []Term{}
for termRows.Next() {
term := Term{}
termRows.Scan(&term.ID, &term.TermID, &term.Name, &term.UserID)
availableTerms = append(availableTerms, term)
}
// get off blocks for time period
offBlocks, err := getOffBlocksStartingBefore(db, endTime.Format("2006-01-02"), announcementsGroupsSQL)
if err != nil {
return View{}, err
}
// generate list of all off days in time period
offDays := []string{}
for _, announcement := range announcements {
if announcement.Type == AnnouncementType_FullOff {
offDays = append(offDays, announcement.Date)
}
}
for _, offBlock := range offBlocks {
offDayCount := int(math.Ceil(offBlock.End.Sub(offBlock.Start).Hours() / 24))
offDay := offBlock.Start
announcements = append(announcements, data.PlannerAnnouncement{
ID: offBlock.StartID,
Date: offBlock.StartText,
Text: "Start of " + offBlock.Name,
Grade: offBlock.Grade,
Type: AnnouncementType_BreakStart,
})
for i := 0; i < offDayCount; i++ {
if i != 0 {
announcements = append(announcements, data.PlannerAnnouncement{
ID: offBlock.StartID,
Date: offDay.Format("2006-01-02"),
Text: offBlock.Name,
Grade: offBlock.Grade,
Type: AnnouncementType_BreakStart,
})
}
offDays = append(offDays, offDay.Format("2006-01-02"))
offDay = offDay.Add(24 * time.Hour)
}
announcements = append(announcements, data.PlannerAnnouncement{
ID: offBlock.EndID,
Date: offBlock.EndText,
Text: "End of " + offBlock.Name,
Grade: offBlock.Grade,
Type: AnnouncementType_BreakEnd,
})
}
// create days in array, set friday indices
dayCount := int(math.Ceil(endTime.Sub(startTime).Hours() / 24))
currentDay := startTime
viewIncludesSpecialAssessmentDay := false
for i := 0; i < dayCount; i++ {
view.Days = append(view.Days, ViewDay{
DayString: currentDay.Format("2006-01-02"),
ShiftingIndex: -1,
CurrentTerm: nil,
Announcements: []data.PlannerAnnouncement{},
Events: []Event{},
})
if currentDay.Add(time.Second).After(Day_SchoolStart) && currentDay.Before(Day_SchoolEnd) {
if currentDay.After(Day_ExamRelief) {
// it's the second term
view.Days[i].CurrentTerm = &availableTerms[1]
} else {
// it's the first term
view.Days[i].CurrentTerm = &availableTerms[0]
}
}
for _, announcement := range announcements {
if view.Days[i].DayString == announcement.Date {
view.Days[i].Announcements = append(view.Days[i].Announcements, announcement)
}
}
if currentDay.Weekday() == time.Friday {
for _, friday := range fridays {
if view.Days[i].DayString == friday.Date {
view.Days[i].ShiftingIndex = friday.Index
break
}
}
}
for specialAssessmentDay, _ := range SpecialAssessmentDays {
if view.Days[i].DayString == specialAssessmentDay {
viewIncludesSpecialAssessmentDay = true
break
}
}
currentDay = currentDay.AddDate(0, 0, 1)
}
// get plain events
plainEventRows, err := db.Query("SELECT id, name, `start`, `end`, `desc`, userId FROM calendar_events WHERE userId = ? AND (`end` >= ? AND `start` <= ?)", userID, startTime.Unix(), endTime.Unix())
if err != nil {
return View{}, err
}
defer plainEventRows.Close()
for plainEventRows.Next() {
event := Event{
Type: PlainEvent,
}
data := PlainEventData{}
plainEventRows.Scan(&event.ID, &event.Name, &event.Start, &event.End, &data.Desc, &event.UserID)
event.Data = data
eventStartTime := time.Unix(int64(event.Start), 0)
dayOffset := int(math.Floor(eventStartTime.Sub(startTime).Hours() / 24))
if dayOffset < 0 || dayOffset > len(view.Days)-1 {
continue
}
view.Days[dayOffset].Events = append(view.Days[dayOffset].Events, event)
}
// get homework events
hwEventRows, err := db.Query("SELECT calendar_hwevents.id, homework.id, homework.name, homework.`due`, homework.`desc`, homework.`complete`, homework.classId, homework.userId, calendar_hwevents.`start`, calendar_hwevents.`end`, calendar_hwevents.userId FROM calendar_hwevents INNER JOIN homework ON calendar_hwevents.homeworkId = homework.id WHERE calendar_hwevents.userId = ? AND (calendar_hwevents.`end` >= ? AND calendar_hwevents.`start` <= ?)", userID, startTime.Unix(), endTime.Unix())
if err != nil {
return View{}, err
}
defer hwEventRows.Close()
for hwEventRows.Next() {
event := Event{
Type: HomeworkEvent,
}
data := HomeworkEventData{}
hwEventRows.Scan(&event.ID, &data.Homework.ID, &data.Homework.Name, &data.Homework.Due, &data.Homework.Desc, &data.Homework.Complete, &data.Homework.ClassID, &data.Homework.UserID, &event.Start, &event.End, &event.UserID)
event.Data = data
event.Name = data.Homework.Name
eventStartTime := time.Unix(int64(event.Start), 0)
dayOffset := int(math.Floor(eventStartTime.Sub(startTime).Hours() / 24))
if dayOffset < 0 || dayOffset > len(view.Days)-1 {
continue
}
view.Days[dayOffset].Events = append(view.Days[dayOffset].Events, event)
}
// get schedule events
for i := 0; i < dayCount; i++ {
day := view.Days[i]
if day.CurrentTerm == nil {
continue
}
dayTime, _ := time.ParseInLocation("2006-01-02", day.DayString, location)
dayNumber := int(dayTime.Weekday())
if dayTime.Weekday() == time.Friday {
if day.ShiftingIndex != -1 {
dayNumber = 4 + day.ShiftingIndex
} else {
continue
}
}
isOff := false
for _, offDay := range offDays {
if day.DayString == offDay {
isOff = true
break
}
}
if isOff {
continue
}
if dayTime.Weekday() == time.Saturday || dayTime.Weekday() == time.Sunday {
continue
}
rows, err := db.Query("SELECT calendar_periods.id, calendar_classes.termId, calendar_classes.sectionId, calendar_classes.`name`, calendar_classes.ownerId, calendar_classes.ownerName, calendar_periods.dayNumber, calendar_periods.block, calendar_periods.buildingName, calendar_periods.roomNumber, calendar_periods.`start`, calendar_periods.`end`, calendar_periods.userId FROM calendar_periods INNER JOIN calendar_classes ON calendar_periods.classId = calendar_classes.sectionId WHERE calendar_periods.userId = ? AND (calendar_classes.termId = ? OR calendar_classes.termId = -1) AND calendar_periods.dayNumber = ? GROUP BY calendar_periods.id, calendar_classes.termId, calendar_classes.name, calendar_classes.ownerId, calendar_classes.ownerName", userID, day.CurrentTerm.TermID, dayNumber)
if err != nil {
return View{}, err
}
defer rows.Close()
for rows.Next() {
event := Event{
Type: ScheduleEvent,
}
data := ScheduleEventData{}
rows.Scan(&event.ID, &data.TermID, &data.ClassID, &event.Name, &data.OwnerID, &data.OwnerName, &data.DayNumber, &data.Block, &data.BuildingName, &data.RoomNumber, &event.Start, &event.End, &event.UserID)
event.Data = data
event.Start += int(dayTime.Unix())
event.End += int(dayTime.Unix())
view.Days[i].Events = append(view.Days[i].Events, event)
}
if dayTime.Weekday() == time.Thursday {
// special case: assembly
for eventIndex, event := range view.Days[i].Events {
// check for an "HS House" event
// starting 11:50, ending 12:50
if strings.HasPrefix(event.Name, "HS House") && event.Start == int(dayTime.Unix())+42600 && event.End == int(dayTime.Unix())+46200 {
// found it
// now look up what type of assembly period it is this week
assemblyType, foundType := AssemblyTypeList[dayTime.Format("2006-01-02")]
if !foundType || assemblyType == AssemblyType_Assembly {
// set name to assembly and room to Theater
view.Days[i].Events[eventIndex].Name = "Assembly"
data := view.Days[i].Events[eventIndex].Data.(ScheduleEventData)
data.RoomNumber = "Theater"
view.Days[i].Events[eventIndex].Data = data
} else if assemblyType == AssemblyType_LongHouse {
// set name to long house
view.Days[i].Events[eventIndex].Name = "Long House"
} else if assemblyType == AssemblyType_Lab {
// just remove it
view.Days[i].Events = append(view.Days[i].Events[:eventIndex], view.Days[i].Events[eventIndex+1:]...)
}
}
}
}
}
if viewIncludesSpecialAssessmentDay {
// get a list of the user's calendar classes
sectionIDs := []int{}
classRows, err := db.Query("SELECT sectionId FROM calendar_classes WHERE userId = ? GROUP BY `sectionId`", userID)
if err != nil {
return View{}, err
}
defer classRows.Close()
for classRows.Next() {
sectionID := -1
classRows.Scan(§ionID)
sectionIDs = append(sectionIDs, sectionID)
}
// find the applicable special assessments
allSpecialAssessments := []*SpecialAssessmentInfo{}
for _, sectionID := range sectionIDs {
specialAssessment, foundAssessment := SpecialAssessmentList[sectionID]
if !foundAssessment {
// no assessment for this class
continue
}
isDuplicate := false
for _, alreadyFoundSpecialAssessment := range allSpecialAssessments {
if specialAssessment == alreadyFoundSpecialAssessment {
isDuplicate = true
break
}
}
if isDuplicate {
continue
}
allSpecialAssessments = append(allSpecialAssessments, specialAssessment)
}
for i := 0; i < dayCount; i++ {
day := view.Days[i]
dayType := SpecialAssessmentType_Unknown
for specialAssessmentDay, specialAssessmentDayType := range SpecialAssessmentDays {
if day.DayString == specialAssessmentDay {
dayType = specialAssessmentDayType
break
}
}
if dayType == SpecialAssessmentType_Unknown {
continue
}
var assessmentForDay *SpecialAssessmentInfo
for _, assessment := range allSpecialAssessments {
if assessment.Subject == dayType {
assessmentForDay = assessment
break
}
}
if assessmentForDay == nil {
continue
}
event := Event{
Type: ScheduleEvent,
ID: -1,
Name: fmt.Sprintf("Final - %s", assessmentForDay.ClassName),
Start: assessmentForDay.Start,
End: assessmentForDay.End,
UserID: userID,
}
finalDay := startTime.Add(time.Duration(i) * 24 * time.Hour)
// hacky time correction to shift the timezone properly
startHour := int(math.Floor(float64(event.Start) / 60 / 60))
startMin := int(math.Floor((float64(event.Start) - (float64(startHour) * 60 * 60)) / 60))
event.Start = int(time.Date(finalDay.Year(), finalDay.Month(), finalDay.Day(), startHour, startMin, 0, 0, location).Unix())
endHour := int(math.Floor(float64(event.End) / 60 / 60))
endMin := int(math.Floor((float64(event.End) - (float64(endHour) * 60 * 60)) / 60))
event.End = int(time.Date(finalDay.Year(), finalDay.Month(), finalDay.Day(), endHour, endMin, 0, 0, location).Unix())
data := ScheduleEventData{
TermID: -1,
ClassID: -1,
OwnerID: -1,
OwnerName: assessmentForDay.TeacherName,
DayNumber: -1,
Block: "",
BuildingName: "",
RoomNumber: assessmentForDay.RoomNumber,
}
event.Data = data
view.Days[i].Events = append(view.Days[i].Events, event)
}
}
return view, nil
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"github.com/ferki/poker-player-go/player"
)
func main() {
PORT, err := strconv.Atoi(os.Getenv("PORT"))
if err != nil {
PORT = 4711
}
http.HandleFunc("/", handleRequest)
if err := http.ListenAndServe(fmt.Sprintf(":%d", PORT), nil); err != nil {
log.Fatal(err)
}
}
func handleRequest(w http.ResponseWriter, request *http.Request) {
if err := request.ParseForm(); err != nil {
log.Printf("Error parsing form data: %s", err)
http.Error(w, "Internal Server Error", 500)
return
}
action := request.FormValue("action")
log.Printf("Request method=%s url=%s action=%s from client=%s\n", request.Method, request.URL, action, request.RemoteAddr)
switch action {
case "check":
fmt.Fprint(w, "")
return
case "bet_request":
gameState := parseGameState(request.FormValue("game_state"))
if gameState == nil {
http.Error(w, "Internal Server Error", 500)
return
}
result := player.BetRequest(gameState)
fmt.Fprintf(w, "%d", result)
return
case "showdown":
gameState := parseGameState(request.FormValue("game_state"))
if gameState == nil {
http.Error(w, "Internal Server Error", 500)
return
}
player.Showdown(gameState)
fmt.Fprint(w, "")
return
case "version":
fmt.Fprint(w, player.Version())
return
default:
http.Error(w, "Invalid action", 400)
}
}
func parseGameState(stateStr string) *player.GameState {
stateBytes := []byte(stateStr)
gameState := new(player.GameState)
if err := json.Unmarshal(stateBytes, &gameState); err != nil {
log.Printf("Error parsing game state: %s", err)
return nil
}
return gameState
}
Simplify game state parsing
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"github.com/ferki/poker-player-go/player"
)
func main() {
port, err := strconv.Atoi(os.Getenv("PORT"))
if err != nil {
port = 4711
}
http.HandleFunc("/", handleRequest)
if err := http.ListenAndServe(fmt.Sprintf(":%d", port), nil); err != nil {
log.Fatal(err)
}
}
func handleRequest(w http.ResponseWriter, request *http.Request) {
if err := request.ParseForm(); err != nil {
log.Printf("Error parsing form data: %s", err)
http.Error(w, "Internal Server Error", 500)
return
}
action := request.FormValue("action")
log.Printf("Request method=%s url=%s action=%s from client=%s\n", request.Method, request.URL, action, request.RemoteAddr)
switch action {
case "check":
fmt.Fprint(w, "")
return
case "bet_request":
gameState := parseGameState(request.FormValue("game_state"))
if gameState == nil {
http.Error(w, "Internal Server Error", 500)
return
}
result := player.BetRequest(gameState)
fmt.Fprintf(w, "%d", result)
return
case "showdown":
gameState := parseGameState(request.FormValue("game_state"))
if gameState == nil {
http.Error(w, "Internal Server Error", 500)
return
}
player.Showdown(gameState)
fmt.Fprint(w, "")
return
case "version":
fmt.Fprint(w, player.Version())
return
default:
http.Error(w, "Invalid action", 400)
}
}
func parseGameState(stateStr string) *player.GameState {
var gameState *player.GameState
if err := json.Unmarshal([]byte(stateStr), gameState); err != nil {
log.Printf("Error parsing game state: %s", err)
return nil
}
return gameState
}
|
package handlers
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/gorilla/handlers"
"github.com/danielkrainas/tinkersnest/api/errcode"
"github.com/danielkrainas/tinkersnest/api/v1"
"github.com/danielkrainas/tinkersnest/auth"
"github.com/danielkrainas/tinkersnest/context"
"github.com/danielkrainas/tinkersnest/cqrs"
"github.com/danielkrainas/tinkersnest/cqrs/queries"
)
func authDispatcher(ctx context.Context, r *http.Request) http.Handler {
h := &authHandler{
Context: ctx,
}
return handlers.MethodHandler{
"POST": withTraceLogging("Authorize", h.Auth),
}
}
type authHandler struct {
context.Context
}
func (ctx *authHandler) Auth(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
acontext.GetLogger(ctx).Error(err)
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
creds := &v1.User{}
if err = json.Unmarshal(body, creds); err != nil {
acontext.GetLogger(ctx).Error(err)
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
userData, err := cqrs.DispatchQuery(ctx, &queries.FindUser{Name: creds.Name})
if err != nil {
acontext.GetLogger(ctx).Error(err)
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
user, ok := userData.(*v1.User)
if !ok {
acontext.GetLogger(ctx).Error("couldn't cast user data")
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown)
return
}
creds.HashedPassword = auth.HashPassword(creds.Password, user.Salt)
acontext.GetLogger(ctx).Warnf("using %s", creds.Password)
acontext.GetLogger(ctx).Warnf("attempting %s", creds.HashedPassword)
acontext.GetLogger(ctx).Warnf("expecting %s", user.HashedPassword)
if creds.HashedPassword != user.HashedPassword {
acontext.GetLogger(ctx).Error("invalid username or password")
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail("invalid username or password"))
return
}
token, err := auth.BearerToken(user)
if err != nil {
acontext.GetLogger(ctx).Error(err)
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
if _, err = io.WriteString(w, token); err != nil {
acontext.GetLogger(ctx).Errorf("error sending auth token: %v", err)
}
}
remove debug logging
package handlers
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/gorilla/handlers"
"github.com/danielkrainas/tinkersnest/api/errcode"
"github.com/danielkrainas/tinkersnest/api/v1"
"github.com/danielkrainas/tinkersnest/auth"
"github.com/danielkrainas/tinkersnest/context"
"github.com/danielkrainas/tinkersnest/cqrs"
"github.com/danielkrainas/tinkersnest/cqrs/queries"
)
func authDispatcher(ctx context.Context, r *http.Request) http.Handler {
h := &authHandler{
Context: ctx,
}
return handlers.MethodHandler{
"POST": withTraceLogging("Authorize", h.Auth),
}
}
type authHandler struct {
context.Context
}
func (ctx *authHandler) Auth(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
acontext.GetLogger(ctx).Error(err)
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
creds := &v1.User{}
if err = json.Unmarshal(body, creds); err != nil {
acontext.GetLogger(ctx).Error(err)
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
userData, err := cqrs.DispatchQuery(ctx, &queries.FindUser{Name: creds.Name})
if err != nil {
acontext.GetLogger(ctx).Error(err)
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
user, ok := userData.(*v1.User)
if !ok {
acontext.GetLogger(ctx).Error("couldn't cast user data")
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown)
return
}
creds.HashedPassword = auth.HashPassword(creds.Password, user.Salt)
if creds.HashedPassword != user.HashedPassword {
acontext.GetLogger(ctx).Error("invalid username or password")
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail("invalid username or password"))
return
}
token, err := auth.BearerToken(user)
if err != nil {
acontext.GetLogger(ctx).Error(err)
ctx.Context = acontext.AppendError(ctx, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
if _, err = io.WriteString(w, token); err != nil {
acontext.GetLogger(ctx).Errorf("error sending auth token: %v", err)
}
}
|
// Google Image Search functionality
package gis
import (
"fmt"
"github.com/collinvandyck/gesture/core"
"github.com/collinvandyck/gesture/util"
"math/rand"
neturl "net/url"
"strings"
"time"
)
func Create(bot *core.Gobot, config map[string]interface{}) {
defaultUrl, useDefault := config["default"].(string)
bot.ListenFor("^gis (.*)", func(msg core.Message, matches []string) core.Response {
link, err := search(matches[1])
if err != nil {
if useDefault {
link = defaultUrl
} else {
return bot.Error(err)
}
}
msg.Ftfy(link)
return bot.Stop()
})
}
// these structs really tie the room together, man
type gisResult struct {
Url string
}
type gisResponse struct {
ResponseData struct {
Results *[]gisResult // use a pointer here b/c sometimes the results are null :(
}
}
// Search queries google for some images, and then randomly selects one
func search(search string) (string, error) {
searchUrl := "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + neturl.QueryEscape(search)
var gisResponse gisResponse
if err := util.UnmarshalUrl(searchUrl, &gisResponse); err != nil {
return "", err
}
if gisResponse.ResponseData.Results == nil {
return "", fmt.Errorf("No results were returned for query %s", search)
}
results := *gisResponse.ResponseData.Results
if len(results) > 0 {
// start a goroutine to determine image info for each response result
// we have to use buffered channels so that the senders don't hang on send after the main method exits
imageUrlCh := make(chan string, len(results))
errorsCh := make(chan error, len(results))
for _, resultUrl := range results {
go getImageInfo(resultUrl.Url, imageUrlCh, errorsCh)
}
// until a timeout is met, build a collection of urls
totalResults := len(results)
remainingResults := totalResults
urls := make([]string, 0, totalResults)
errors := make([]error, 0, totalResults)
timeout := time.After(2 * time.Second)
SEARCH:
for remainingResults > 0 {
select {
case url := <-imageUrlCh:
urls = append(urls, url)
remainingResults--
case err := <-errorsCh:
errors = append(errors, err)
remainingResults--
case <-timeout:
break SEARCH
}
}
if len(urls) == 0 {
return "", fmt.Errorf("No image could be found for \"%s\"", search)
}
return urls[rand.Intn(len(urls))], nil
}
return "", fmt.Errorf("No image could be found for \"%s\"", search)
}
// getImageInfo looks at the header info for the url, and if it is an image, it sends an imageInfo on the channel
func getImageInfo(url string, ch chan<- string, failures chan<- error) {
imageUrl, contentType, err := util.ResponseHeaderContentType(url)
if err == nil && strings.HasPrefix(contentType, "image/") {
url, err := ensureSuffix(imageUrl, "."+contentType[len("image/"):])
if err != nil {
failures <- err
} else {
ch <- url
}
} else {
failures <- fmt.Errorf("Not an image: %s", url)
}
}
// ensureSuffix ensures a url ends with suffixes like .jpg, .png, etc
func ensureSuffix(url, suffix string) (string, error) {
var err error
unescapedUrl, err := neturl.QueryUnescape(url)
if err != nil {
return "", err
}
lowerSuffix := strings.ToLower(suffix)
lowerUrl := strings.ToLower(unescapedUrl)
if lowerSuffix == ".jpeg" && strings.HasSuffix(lowerUrl, ".jpg") {
return url, nil
}
if lowerSuffix == ".jpg" && strings.HasSuffix(lowerUrl, ".jpeg") {
return url, nil
}
if strings.HasSuffix(lowerUrl, lowerSuffix) {
return url, nil
}
if strings.Contains(url, "?") {
return url + "&lol=lol" + suffix, nil
}
return url + "?lol=lol" + suffix, nil
}
Add the ability to exclude certain channels
// Google Image Search functionality
package gis
import (
"fmt"
"github.com/collinvandyck/gesture/core"
"github.com/collinvandyck/gesture/util"
"math/rand"
neturl "net/url"
"strings"
"time"
"log"
)
func Create(bot *core.Gobot, config map[string]interface{}) {
defaultUrl, useDefault := config["default"].(string)
exclusions := getExclusions(config)
bot.ListenFor("^gis (.*)", func(msg core.Message, matches []string) core.Response {
log.Printf("searching for %s on channel %s", matches[1], msg.Channel)
for _, ex := range(exclusions) {
log.Printf("checking excluded channel: %s", ex)
if ex == msg.Channel {
log.Printf("Stopping")
return bot.Stop()
}
}
link, err := search(matches[1])
if err != nil {
if useDefault {
link = defaultUrl
} else {
return bot.Error(err)
}
}
msg.Ftfy(link)
return bot.Stop()
})
}
func getExclusions(config map[string]interface{}) []string {
result := make([]string, 0)
exclude, ok := config["exclude"].([]interface{})
if (!ok) {
return result
}
for _, ex := range(exclude) {
result = append(result, ex.(string))
}
return result
}
// these structs really tie the room together, man
type gisResult struct {
Url string
}
type gisResponse struct {
ResponseData struct {
Results *[]gisResult // use a pointer here b/c sometimes the results are null :(
}
}
// Search queries google for some images, and then randomly selects one
func search(search string) (string, error) {
searchUrl := "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + neturl.QueryEscape(search)
var gisResponse gisResponse
if err := util.UnmarshalUrl(searchUrl, &gisResponse); err != nil {
return "", err
}
if gisResponse.ResponseData.Results == nil {
return "", fmt.Errorf("No results were returned for query %s", search)
}
results := *gisResponse.ResponseData.Results
if len(results) > 0 {
// start a goroutine to determine image info for each response result
// we have to use buffered channels so that the senders don't hang on send after the main method exits
imageUrlCh := make(chan string, len(results))
errorsCh := make(chan error, len(results))
for _, resultUrl := range results {
go getImageInfo(resultUrl.Url, imageUrlCh, errorsCh)
}
// until a timeout is met, build a collection of urls
totalResults := len(results)
remainingResults := totalResults
urls := make([]string, 0, totalResults)
errors := make([]error, 0, totalResults)
timeout := time.After(2 * time.Second)
SEARCH:
for remainingResults > 0 {
select {
case url := <-imageUrlCh:
urls = append(urls, url)
remainingResults--
case err := <-errorsCh:
errors = append(errors, err)
remainingResults--
case <-timeout:
break SEARCH
}
}
if len(urls) == 0 {
return "", fmt.Errorf("No image could be found for \"%s\"", search)
}
return urls[rand.Intn(len(urls))], nil
}
return "", fmt.Errorf("No image could be found for \"%s\"", search)
}
// getImageInfo looks at the header info for the url, and if it is an image, it sends an imageInfo on the channel
func getImageInfo(url string, ch chan<- string, failures chan<- error) {
imageUrl, contentType, err := util.ResponseHeaderContentType(url)
if err == nil && strings.HasPrefix(contentType, "image/") {
url, err := ensureSuffix(imageUrl, "."+contentType[len("image/"):])
if err != nil {
failures <- err
} else {
ch <- url
}
} else {
failures <- fmt.Errorf("Not an image: %s", url)
}
}
// ensureSuffix ensures a url ends with suffixes like .jpg, .png, etc
func ensureSuffix(url, suffix string) (string, error) {
var err error
unescapedUrl, err := neturl.QueryUnescape(url)
if err != nil {
return "", err
}
lowerSuffix := strings.ToLower(suffix)
lowerUrl := strings.ToLower(unescapedUrl)
if lowerSuffix == ".jpeg" && strings.HasSuffix(lowerUrl, ".jpg") {
return url, nil
}
if lowerSuffix == ".jpg" && strings.HasSuffix(lowerUrl, ".jpeg") {
return url, nil
}
if strings.HasSuffix(lowerUrl, lowerSuffix) {
return url, nil
}
if strings.Contains(url, "?") {
return url + "&lol=lol" + suffix, nil
}
return url + "?lol=lol" + suffix, nil
}
|
package cluster
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/flynn/flynn/Godeps/_workspace/src/golang.org/x/crypto/ssh"
"github.com/flynn/flynn/pkg/attempt"
"github.com/flynn/flynn/pkg/random"
)
func NewVMManager(bridge *Bridge) *VMManager {
return &VMManager{taps: &TapManager{bridge}}
}
type VMManager struct {
taps *TapManager
}
type VMConfig struct {
Kernel string
User int
Group int
Memory string
Cores int
Drives map[string]*VMDrive
Args []string
Out io.Writer `json:"-"`
netFS string
}
type VMDrive struct {
FS string
COW bool
Temp bool
}
func (v *VMManager) NewInstance(c *VMConfig) (*Instance, error) {
var err error
inst := &Instance{ID: random.String(8), VMConfig: c}
if c.Kernel == "" {
c.Kernel = "vmlinuz"
}
if c.Out == nil {
c.Out, err = os.Create("flynn-" + inst.ID + ".log")
if err != nil {
return nil, err
}
}
inst.tap, err = v.taps.NewTap(c.User, c.Group)
if err != nil {
return nil, err
}
inst.IP = inst.tap.RemoteIP.String()
return inst, nil
}
type Instance struct {
ID string `json:"id"`
IP string `json:"ip"`
*VMConfig
tap *Tap
cmd *exec.Cmd
tempFiles []string
sshMtx sync.RWMutex
ssh *ssh.Client
initial bool
}
func (i *Instance) writeInterfaceConfig() error {
dir, err := ioutil.TempDir("", "netfs-")
if err != nil {
return err
}
i.tempFiles = append(i.tempFiles, dir)
i.netFS = dir
if err := os.Chmod(dir, 0755); err != nil {
os.RemoveAll(dir)
return err
}
f, err := os.Create(filepath.Join(dir, "eth0"))
if err != nil {
os.RemoveAll(dir)
return err
}
defer f.Close()
return i.tap.WriteInterfaceConfig(f)
}
func (i *Instance) cleanup() {
for _, f := range i.tempFiles {
fmt.Printf("removing temp file %s\n", f)
if err := os.RemoveAll(f); err != nil {
fmt.Printf("could not remove temp file %s: %s\n", f, err)
}
}
if err := i.tap.Close(); err != nil {
fmt.Printf("could not close tap device %s: %s\n", i.tap.Name, err)
}
i.tempFiles = nil
i.sshMtx.Lock()
defer i.sshMtx.Unlock()
if i.ssh != nil {
i.ssh.Close()
}
}
func (i *Instance) Start() error {
i.writeInterfaceConfig()
macRand := random.Bytes(3)
macaddr := fmt.Sprintf("52:54:00:%02x:%02x:%02x", macRand[0], macRand[1], macRand[2])
i.Args = append(i.Args,
"-enable-kvm",
"-kernel", i.Kernel,
"-append", `"root=/dev/sda"`,
"-net", "nic,macaddr="+macaddr,
"-net", "tap,ifname="+i.tap.Name+",script=no,downscript=no",
"-virtfs", "fsdriver=local,path="+i.netFS+",security_model=passthrough,readonly,mount_tag=netfs",
"-nographic",
)
if i.Memory != "" {
i.Args = append(i.Args, "-m", i.Memory)
}
if i.Cores > 0 {
i.Args = append(i.Args, "-smp", strconv.Itoa(i.Cores))
}
var err error
for n, d := range i.Drives {
if d.COW {
fs, err := i.createCOW(d.FS, d.Temp)
if err != nil {
i.cleanup()
return err
}
d.FS = fs
}
i.Args = append(i.Args, fmt.Sprintf("-%s", n), d.FS)
}
i.cmd = exec.Command("sudo", append([]string{"-u", fmt.Sprintf("#%d", i.User), "-g", fmt.Sprintf("#%d", i.Group), "-H", "/usr/bin/qemu-system-x86_64"}, i.Args...)...)
i.cmd.Stdout = i.Out
i.cmd.Stderr = i.Out
if err = i.cmd.Start(); err != nil {
i.cleanup()
}
return err
}
func (i *Instance) createCOW(image string, temp bool) (string, error) {
name := strings.TrimSuffix(filepath.Base(image), filepath.Ext(image))
dir, err := ioutil.TempDir("", name+"-")
if err != nil {
return "", err
}
if temp {
i.tempFiles = append(i.tempFiles, dir)
}
if err := os.Chown(dir, i.User, i.Group); err != nil {
return "", err
}
path := filepath.Join(dir, "rootfs.img")
cmd := exec.Command("qemu-img", "create", "-f", "qcow2", "-b", image, path)
if err = cmd.Run(); err != nil {
return "", fmt.Errorf("failed to create COW filesystem: %s", err.Error())
}
if err := os.Chown(path, i.User, i.Group); err != nil {
return "", err
}
return path, nil
}
func (i *Instance) Wait(timeout time.Duration) error {
done := make(chan error)
go func() {
done <- i.cmd.Wait()
}()
select {
case err := <-done:
return err
case <-time.After(timeout):
return errors.New("timeout")
}
}
func (i *Instance) Shutdown() error {
if err := i.Run("sudo poweroff", nil); err != nil {
return i.Kill()
}
if err := i.Wait(5 * time.Second); err != nil {
return i.Kill()
}
i.cleanup()
return nil
}
func (i *Instance) Kill() error {
defer i.cleanup()
if err := i.cmd.Process.Signal(syscall.SIGTERM); err != nil {
return err
}
if err := i.Wait(5 * time.Second); err != nil {
return i.cmd.Process.Kill()
}
return nil
}
func (i *Instance) dialSSH(stderr io.Writer) error {
if i.ssh != nil {
return nil
}
i.sshMtx.RUnlock()
i.sshMtx.Lock()
defer i.sshMtx.RLock()
defer i.sshMtx.Unlock()
if i.ssh != nil {
return nil
}
var sc *ssh.Client
err := sshAttempts.Run(func() (err error) {
if stderr != nil {
fmt.Fprintf(stderr, "Attempting to ssh to %s:22...\n", i.IP)
}
sc, err = ssh.Dial("tcp", i.IP+":22", &ssh.ClientConfig{
User: "ubuntu",
Auth: []ssh.AuthMethod{ssh.Password("ubuntu")},
})
return
})
if sc != nil {
i.ssh = sc
}
return err
}
var sshAttempts = attempt.Strategy{
Min: 5,
Total: 5 * time.Minute,
Delay: time.Second,
}
func (i *Instance) Run(command string, s *Streams) error {
if s == nil {
s = &Streams{}
}
i.sshMtx.RLock()
defer i.sshMtx.RUnlock()
if err := i.dialSSH(s.Stderr); err != nil {
return err
}
sess, err := i.ssh.NewSession()
if err != nil {
return err
}
defer sess.Close()
sess.Stdin = s.Stdin
sess.Stdout = s.Stdout
sess.Stderr = s.Stderr
if err := sess.Run(command); err != nil {
return fmt.Errorf("failed to run command on %s: %s", i.IP, err)
}
return nil
}
func (i *Instance) Drive(name string) *VMDrive {
return i.Drives[name]
}
test: Reduce SSH connect timeout to 30s
The connection is usually established in ~3s, so a 30s timeout seems
reasonable enough.
Signed-off-by: Lewis Marshall <748e1641a368164906d4a0c0e3965345453dcc93@lmars.net>
package cluster
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/flynn/flynn/Godeps/_workspace/src/golang.org/x/crypto/ssh"
"github.com/flynn/flynn/pkg/attempt"
"github.com/flynn/flynn/pkg/random"
)
func NewVMManager(bridge *Bridge) *VMManager {
return &VMManager{taps: &TapManager{bridge}}
}
type VMManager struct {
taps *TapManager
}
type VMConfig struct {
Kernel string
User int
Group int
Memory string
Cores int
Drives map[string]*VMDrive
Args []string
Out io.Writer `json:"-"`
netFS string
}
type VMDrive struct {
FS string
COW bool
Temp bool
}
func (v *VMManager) NewInstance(c *VMConfig) (*Instance, error) {
var err error
inst := &Instance{ID: random.String(8), VMConfig: c}
if c.Kernel == "" {
c.Kernel = "vmlinuz"
}
if c.Out == nil {
c.Out, err = os.Create("flynn-" + inst.ID + ".log")
if err != nil {
return nil, err
}
}
inst.tap, err = v.taps.NewTap(c.User, c.Group)
if err != nil {
return nil, err
}
inst.IP = inst.tap.RemoteIP.String()
return inst, nil
}
type Instance struct {
ID string `json:"id"`
IP string `json:"ip"`
*VMConfig
tap *Tap
cmd *exec.Cmd
tempFiles []string
sshMtx sync.RWMutex
ssh *ssh.Client
initial bool
}
func (i *Instance) writeInterfaceConfig() error {
dir, err := ioutil.TempDir("", "netfs-")
if err != nil {
return err
}
i.tempFiles = append(i.tempFiles, dir)
i.netFS = dir
if err := os.Chmod(dir, 0755); err != nil {
os.RemoveAll(dir)
return err
}
f, err := os.Create(filepath.Join(dir, "eth0"))
if err != nil {
os.RemoveAll(dir)
return err
}
defer f.Close()
return i.tap.WriteInterfaceConfig(f)
}
func (i *Instance) cleanup() {
for _, f := range i.tempFiles {
fmt.Printf("removing temp file %s\n", f)
if err := os.RemoveAll(f); err != nil {
fmt.Printf("could not remove temp file %s: %s\n", f, err)
}
}
if err := i.tap.Close(); err != nil {
fmt.Printf("could not close tap device %s: %s\n", i.tap.Name, err)
}
i.tempFiles = nil
i.sshMtx.Lock()
defer i.sshMtx.Unlock()
if i.ssh != nil {
i.ssh.Close()
}
}
func (i *Instance) Start() error {
i.writeInterfaceConfig()
macRand := random.Bytes(3)
macaddr := fmt.Sprintf("52:54:00:%02x:%02x:%02x", macRand[0], macRand[1], macRand[2])
i.Args = append(i.Args,
"-enable-kvm",
"-kernel", i.Kernel,
"-append", `"root=/dev/sda"`,
"-net", "nic,macaddr="+macaddr,
"-net", "tap,ifname="+i.tap.Name+",script=no,downscript=no",
"-virtfs", "fsdriver=local,path="+i.netFS+",security_model=passthrough,readonly,mount_tag=netfs",
"-nographic",
)
if i.Memory != "" {
i.Args = append(i.Args, "-m", i.Memory)
}
if i.Cores > 0 {
i.Args = append(i.Args, "-smp", strconv.Itoa(i.Cores))
}
var err error
for n, d := range i.Drives {
if d.COW {
fs, err := i.createCOW(d.FS, d.Temp)
if err != nil {
i.cleanup()
return err
}
d.FS = fs
}
i.Args = append(i.Args, fmt.Sprintf("-%s", n), d.FS)
}
i.cmd = exec.Command("sudo", append([]string{"-u", fmt.Sprintf("#%d", i.User), "-g", fmt.Sprintf("#%d", i.Group), "-H", "/usr/bin/qemu-system-x86_64"}, i.Args...)...)
i.cmd.Stdout = i.Out
i.cmd.Stderr = i.Out
if err = i.cmd.Start(); err != nil {
i.cleanup()
}
return err
}
func (i *Instance) createCOW(image string, temp bool) (string, error) {
name := strings.TrimSuffix(filepath.Base(image), filepath.Ext(image))
dir, err := ioutil.TempDir("", name+"-")
if err != nil {
return "", err
}
if temp {
i.tempFiles = append(i.tempFiles, dir)
}
if err := os.Chown(dir, i.User, i.Group); err != nil {
return "", err
}
path := filepath.Join(dir, "rootfs.img")
cmd := exec.Command("qemu-img", "create", "-f", "qcow2", "-b", image, path)
if err = cmd.Run(); err != nil {
return "", fmt.Errorf("failed to create COW filesystem: %s", err.Error())
}
if err := os.Chown(path, i.User, i.Group); err != nil {
return "", err
}
return path, nil
}
func (i *Instance) Wait(timeout time.Duration) error {
done := make(chan error)
go func() {
done <- i.cmd.Wait()
}()
select {
case err := <-done:
return err
case <-time.After(timeout):
return errors.New("timeout")
}
}
func (i *Instance) Shutdown() error {
if err := i.Run("sudo poweroff", nil); err != nil {
return i.Kill()
}
if err := i.Wait(5 * time.Second); err != nil {
return i.Kill()
}
i.cleanup()
return nil
}
func (i *Instance) Kill() error {
defer i.cleanup()
if err := i.cmd.Process.Signal(syscall.SIGTERM); err != nil {
return err
}
if err := i.Wait(5 * time.Second); err != nil {
return i.cmd.Process.Kill()
}
return nil
}
func (i *Instance) dialSSH(stderr io.Writer) error {
if i.ssh != nil {
return nil
}
i.sshMtx.RUnlock()
i.sshMtx.Lock()
defer i.sshMtx.RLock()
defer i.sshMtx.Unlock()
if i.ssh != nil {
return nil
}
var sc *ssh.Client
err := sshAttempts.Run(func() (err error) {
if stderr != nil {
fmt.Fprintf(stderr, "Attempting to ssh to %s:22...\n", i.IP)
}
sc, err = ssh.Dial("tcp", i.IP+":22", &ssh.ClientConfig{
User: "ubuntu",
Auth: []ssh.AuthMethod{ssh.Password("ubuntu")},
})
return
})
if sc != nil {
i.ssh = sc
}
return err
}
var sshAttempts = attempt.Strategy{
Min: 5,
Total: 30 * time.Second,
Delay: time.Second,
}
func (i *Instance) Run(command string, s *Streams) error {
if s == nil {
s = &Streams{}
}
i.sshMtx.RLock()
defer i.sshMtx.RUnlock()
if err := i.dialSSH(s.Stderr); err != nil {
return err
}
sess, err := i.ssh.NewSession()
if err != nil {
return err
}
defer sess.Close()
sess.Stdin = s.Stdin
sess.Stdout = s.Stdout
sess.Stderr = s.Stderr
if err := sess.Run(command); err != nil {
return fmt.Errorf("failed to run command on %s: %s", i.IP, err)
}
return nil
}
func (i *Instance) Drive(name string) *VMDrive {
return i.Drives[name]
}
|
/*
Copyright 2020 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secretsmanager
import (
"bytes"
"testing"
jks "github.com/pavel-v-chernykh/keystore-go"
"software.sslmate.com/src/go-pkcs12"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2"
"github.com/jetstack/cert-manager/pkg/util/pki"
)
func mustGeneratePrivateKey(t *testing.T, encoding cmapi.KeyEncoding) []byte {
pk, err := pki.GenerateRSAPrivateKey(2048)
if err != nil {
t.Fatal(err)
}
pkBytes, err := pki.EncodePrivateKey(pk, encoding)
if err != nil {
t.Fatal(err)
}
return pkBytes
}
func mustSelfSignCertificate(t *testing.T, pkBytes []byte) []byte {
if pkBytes == nil {
pkBytes = mustGeneratePrivateKey(t, cmapi.PKCS8)
}
pk, err := pki.DecodePrivateKeyBytes(pkBytes)
if err != nil {
t.Fatal(err)
}
x509Crt, err := pki.GenerateTemplate(&cmapi.Certificate{
Spec: cmapi.CertificateSpec{
DNSNames: []string{"example.com"},
},
})
if err != nil {
t.Fatal(err)
}
certBytes, _, err := pki.SignCertificate(x509Crt, x509Crt, pk.Public(), pk)
if err != nil {
t.Fatal(err)
}
return certBytes
}
func TestEncodeJKSKeystore(t *testing.T) {
tests := map[string]struct {
password string
rawKey, certPEM, caPEM []byte
verify func(t *testing.T, out []byte, err error)
}{
"encode a JKS bundle for a PKCS1 key and certificate only": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS1),
certPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
return
}
buf := bytes.NewBuffer(out)
ks, err := jks.Decode(buf, []byte("password"))
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if ks["certificate"] == nil {
t.Errorf("no certificate data found in keystore")
}
if ks["ca"] != nil {
t.Errorf("unexpected ca data found in keystore")
}
},
},
"encode a JKS bundle for a PKCS8 key and certificate only": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS8),
certPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
buf := bytes.NewBuffer(out)
ks, err := jks.Decode(buf, []byte("password"))
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if ks["certificate"] == nil {
t.Errorf("no certificate data found in keystore")
}
if ks["ca"] != nil {
t.Errorf("unexpected ca data found in keystore")
}
},
},
"encode a JKS bundle for a key, certificate and ca": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS8),
certPEM: mustSelfSignCertificate(t, nil),
caPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
buf := bytes.NewBuffer(out)
ks, err := jks.Decode(buf, []byte("password"))
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if ks["certificate"] == nil {
t.Errorf("no certificate data found in keystore")
}
if ks["ca"] == nil {
t.Errorf("no ca data found in keystore")
}
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
out, err := encodeJKSKeystore([]byte(test.password), test.rawKey, test.certPEM, test.caPEM)
test.verify(t, out, err)
})
}
}
func TestEncodePKCS12Keystore(t *testing.T) {
tests := map[string]struct {
password string
rawKey, certPEM, caPEM []byte
verify func(t *testing.T, out []byte, err error)
}{
"encode a JKS bundle for a PKCS1 key and certificate only": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS1),
certPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
pk, cert, err := pkcs12.Decode(out, "password")
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if cert == nil {
t.Errorf("no certificate data found in keystore")
}
if pk == nil {
t.Errorf("no ca data found in keystore")
}
},
},
"encode a JKS bundle for a PKCS8 key and certificate only": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS8),
certPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
pk, cert, err := pkcs12.Decode(out, "password")
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if cert == nil {
t.Errorf("no certificate data found in keystore")
}
if pk == nil {
t.Errorf("no ca data found in keystore")
}
},
},
"encode a JKS bundle for a key, certificate and ca": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS8),
certPEM: mustSelfSignCertificate(t, nil),
caPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
// The pkcs12 package does not expose a way to decode the CA
// data that has been written.
// It will return an error when attempting to decode a file
// with more than one 'certbag', so we just ensure the error
// returned is the expected error and don't inspect the keystore
// contents.
_, _, err = pkcs12.Decode(out, "password")
if err == nil || err.Error() != "pkcs12: expected exactly two safe bags in the PFX PDU" {
t.Errorf("unexpected error string, exp=%q, got=%v", "pkcs12: expected exactly two safe bags in the PFX PDU", err)
return
}
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
out, err := encodePKCS12Keystore(test.password, test.rawKey, test.certPEM, test.caPEM)
test.verify(t, out, err)
})
}
}
Update test for pkcs12 encoded CA data
Signed-off-by: Richard Wall <c2866c0eefadc742fd429365a658dfba4f801ca3@jetstack.io>
/*
Copyright 2020 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secretsmanager
import (
"bytes"
"testing"
jks "github.com/pavel-v-chernykh/keystore-go"
"software.sslmate.com/src/go-pkcs12"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2"
"github.com/jetstack/cert-manager/pkg/util/pki"
)
func mustGeneratePrivateKey(t *testing.T, encoding cmapi.KeyEncoding) []byte {
pk, err := pki.GenerateRSAPrivateKey(2048)
if err != nil {
t.Fatal(err)
}
pkBytes, err := pki.EncodePrivateKey(pk, encoding)
if err != nil {
t.Fatal(err)
}
return pkBytes
}
func mustSelfSignCertificate(t *testing.T, pkBytes []byte) []byte {
if pkBytes == nil {
pkBytes = mustGeneratePrivateKey(t, cmapi.PKCS8)
}
pk, err := pki.DecodePrivateKeyBytes(pkBytes)
if err != nil {
t.Fatal(err)
}
x509Crt, err := pki.GenerateTemplate(&cmapi.Certificate{
Spec: cmapi.CertificateSpec{
DNSNames: []string{"example.com"},
},
})
if err != nil {
t.Fatal(err)
}
certBytes, _, err := pki.SignCertificate(x509Crt, x509Crt, pk.Public(), pk)
if err != nil {
t.Fatal(err)
}
return certBytes
}
func TestEncodeJKSKeystore(t *testing.T) {
tests := map[string]struct {
password string
rawKey, certPEM, caPEM []byte
verify func(t *testing.T, out []byte, err error)
}{
"encode a JKS bundle for a PKCS1 key and certificate only": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS1),
certPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
return
}
buf := bytes.NewBuffer(out)
ks, err := jks.Decode(buf, []byte("password"))
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if ks["certificate"] == nil {
t.Errorf("no certificate data found in keystore")
}
if ks["ca"] != nil {
t.Errorf("unexpected ca data found in keystore")
}
},
},
"encode a JKS bundle for a PKCS8 key and certificate only": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS8),
certPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
buf := bytes.NewBuffer(out)
ks, err := jks.Decode(buf, []byte("password"))
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if ks["certificate"] == nil {
t.Errorf("no certificate data found in keystore")
}
if ks["ca"] != nil {
t.Errorf("unexpected ca data found in keystore")
}
},
},
"encode a JKS bundle for a key, certificate and ca": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS8),
certPEM: mustSelfSignCertificate(t, nil),
caPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
buf := bytes.NewBuffer(out)
ks, err := jks.Decode(buf, []byte("password"))
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if ks["certificate"] == nil {
t.Errorf("no certificate data found in keystore")
}
if ks["ca"] == nil {
t.Errorf("no ca data found in keystore")
}
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
out, err := encodeJKSKeystore([]byte(test.password), test.rawKey, test.certPEM, test.caPEM)
test.verify(t, out, err)
})
}
}
func TestEncodePKCS12Keystore(t *testing.T) {
tests := map[string]struct {
password string
rawKey, certPEM, caPEM []byte
verify func(t *testing.T, out []byte, err error)
}{
"encode a JKS bundle for a PKCS1 key and certificate only": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS1),
certPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
pk, cert, err := pkcs12.Decode(out, "password")
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if cert == nil {
t.Errorf("no certificate data found in keystore")
}
if pk == nil {
t.Errorf("no ca data found in keystore")
}
},
},
"encode a JKS bundle for a PKCS8 key and certificate only": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS8),
certPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
pk, cert, err := pkcs12.Decode(out, "password")
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if cert == nil {
t.Errorf("no certificate data found in keystore")
}
if pk == nil {
t.Errorf("no ca data found in keystore")
}
},
},
"encode a JKS bundle for a key, certificate and ca": {
password: "password",
rawKey: mustGeneratePrivateKey(t, cmapi.PKCS8),
certPEM: mustSelfSignCertificate(t, nil),
caPEM: mustSelfSignCertificate(t, nil),
verify: func(t *testing.T, out []byte, err error) {
if err != nil {
t.Errorf("expected no error but got: %v", err)
}
pk, cert, caCerts, err := pkcs12.DecodeChain(out, "password")
if err != nil {
t.Errorf("error decoding keystore: %v", err)
return
}
if cert == nil {
t.Errorf("no certificate data found in keystore")
}
if pk == nil {
t.Errorf("no private key data found in keystore")
}
if caCerts == nil {
t.Errorf("no ca data found in keystore")
}
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
out, err := encodePKCS12Keystore(test.password, test.rawKey, test.certPEM, test.caPEM)
test.verify(t, out, err)
})
}
}
|
// Copyright 2017 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package migrate
import (
"testing"
"github.com/tsuru/config"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/db"
"github.com/tsuru/tsuru/router"
check "gopkg.in/check.v1"
"gopkg.in/mgo.v2/bson"
)
type S struct {
conn *db.Storage
}
func Test(t *testing.T) { check.TestingT(t) }
func (s *S) SetUpSuite(c *check.C) {
var err error
s.conn, err = db.Conn()
c.Assert(err, check.IsNil)
}
func (s *S) TearDownSuite(c *check.C) {
s.conn.Apps().Database.DropDatabase()
s.conn.Close()
}
var _ = check.Suite(&S{})
func (s *S) TestMigrateAppPlanRouterToRouter(c *check.C) {
config.Set("routers:galeb:default", true)
defer config.Unset("routers")
a := app.App{Name: "a-with-plan-router"}
err := s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
err = s.conn.Apps().Update(bson.M{"name": "a-with-plan-router"}, bson.M{"$set": bson.M{"plan.router": "planb"}})
c.Assert(err, check.IsNil)
a = app.App{Name: "b-without-plan-router"}
err = s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
a = app.App{Name: "c-with-router", Router: "hipache"}
err = s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
err = MigrateAppPlanRouterToRouter()
c.Assert(err, check.IsNil)
var apps []app.App
err = s.conn.Apps().Find(nil).Sort("name").All(&apps)
c.Assert(err, check.IsNil)
c.Assert(apps[0].Router, check.DeepEquals, "planb")
c.Assert(apps[1].Router, check.DeepEquals, "galeb")
c.Assert(apps[2].Router, check.DeepEquals, "hipache")
}
func (s *S) TestMigrateAppPlanRouterToRouterWithoutDefaultRouter(c *check.C) {
err := MigrateAppPlanRouterToRouter()
c.Assert(err, check.DeepEquals, router.ErrDefaultRouterNotFound)
}
app/migrate: refactor test
// Copyright 2017 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package migrate
import (
"testing"
"github.com/tsuru/config"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/db"
"github.com/tsuru/tsuru/router"
check "gopkg.in/check.v1"
"gopkg.in/mgo.v2/bson"
)
type S struct {
conn *db.Storage
}
func Test(t *testing.T) { check.TestingT(t) }
func (s *S) SetUpSuite(c *check.C) {
var err error
s.conn, err = db.Conn()
c.Assert(err, check.IsNil)
}
func (s *S) TearDownSuite(c *check.C) {
s.conn.Apps().Database.DropDatabase()
s.conn.Close()
}
var _ = check.Suite(&S{})
func (s *S) TestMigrateAppPlanRouterToRouter(c *check.C) {
config.Set("routers:galeb:default", true)
defer config.Unset("routers")
a := &app.App{Name: "with-plan-router"}
err := s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
err = s.conn.Apps().Update(bson.M{"name": "with-plan-router"}, bson.M{"$set": bson.M{"plan.router": "planb"}})
c.Assert(err, check.IsNil)
a = &app.App{Name: "without-plan-router"}
err = s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
a = &app.App{Name: "with-router", Router: "hipache"}
err = s.conn.Apps().Insert(a)
c.Assert(err, check.IsNil)
err = MigrateAppPlanRouterToRouter()
c.Assert(err, check.IsNil)
a, err = app.GetByName("with-plan-router")
c.Assert(err, check.IsNil)
c.Assert(a.Router, check.Equals, "planb")
a, err = app.GetByName("without-plan-router")
c.Assert(err, check.IsNil)
c.Assert(a.Router, check.Equals, "galeb")
a, err = app.GetByName("with-router")
c.Assert(err, check.IsNil)
c.Assert(a.Router, check.Equals, "hipache")
}
func (s *S) TestMigrateAppPlanRouterToRouterWithoutDefaultRouter(c *check.C) {
err := MigrateAppPlanRouterToRouter()
c.Assert(err, check.DeepEquals, router.ErrDefaultRouterNotFound)
}
|
package watchman
import (
"sync"
"time"
"github.com/winston-ci/winston/builder"
"github.com/winston-ci/winston/config"
"github.com/winston-ci/winston/resources"
)
type Watchman interface {
Watch(
job config.Job,
resource config.Resource,
checker resources.Checker,
latestOnly bool,
interval time.Duration,
)
Stop()
}
type watchman struct {
builder builder.Builder
stop chan struct{}
watching *sync.WaitGroup
}
func NewWatchman(builder builder.Builder) Watchman {
return &watchman{
builder: builder,
stop: make(chan struct{}),
watching: new(sync.WaitGroup),
}
}
func (watchman *watchman) Watch(
job config.Job,
resource config.Resource,
checker resources.Checker,
latestOnly bool,
interval time.Duration,
) {
watchman.watching.Add(1)
go func() {
defer watchman.watching.Done()
ticker := time.NewTicker(interval)
for {
select {
case <-watchman.stop:
return
case <-ticker.C:
resources := checker.CheckResource(resource)
if len(resources) == 0 {
break
}
if latestOnly {
resource = resources[len(resources)-1]
watchman.builder.Build(job, resource)
} else {
for _, resource = range resources {
watchman.builder.Build(job, resource)
}
}
}
}
}()
}
func (watchman *watchman) Stop() {
close(watchman.stop)
watchman.watching.Wait()
}
add logging to watchman
package watchman
import (
"log"
"sync"
"time"
"github.com/winston-ci/winston/builder"
"github.com/winston-ci/winston/config"
"github.com/winston-ci/winston/resources"
)
type Watchman interface {
Watch(
job config.Job,
resource config.Resource,
checker resources.Checker,
latestOnly bool,
interval time.Duration,
)
Stop()
}
type watchman struct {
builder builder.Builder
stop chan struct{}
watching *sync.WaitGroup
}
func NewWatchman(builder builder.Builder) Watchman {
return &watchman{
builder: builder,
stop: make(chan struct{}),
watching: new(sync.WaitGroup),
}
}
func (watchman *watchman) Watch(
job config.Job,
resource config.Resource,
checker resources.Checker,
latestOnly bool,
interval time.Duration,
) {
watchman.watching.Add(1)
go func() {
defer watchman.watching.Done()
ticker := time.NewTicker(interval)
for {
select {
case <-watchman.stop:
return
case <-ticker.C:
log.Println("checking for sources for", resource)
resources := checker.CheckResource(resource)
if len(resources) == 0 {
break
}
log.Printf("found %d sources via %T", len(resources))
resource = resources[len(resources)-1]
if latestOnly {
log.Printf("triggering latest via %T: %s\n", checker, resource)
watchman.builder.Build(job, resource)
} else {
for i, resource := range resources {
log.Printf("triggering %d of %d via %T: %s\n", i+1, len(resources), checker, resource)
watchman.builder.Build(job, resource)
}
}
}
}
}()
}
func (watchman *watchman) Stop() {
close(watchman.stop)
watchman.watching.Wait()
}
|
// errchk $G $D/$F.go
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This is a test case for issue 804.
package main
func f() [10]int {
return [10]int{}
}
var m map[int][10]int
func main() {
f()[1] = 2 // ERROR "cannot"
f()[2:3][0] = 4 // ERROR "cannot"
var x = "abc"
x[2] = 3 // ERROR "cannot"
m[0][5] = 6 // ERROR "cannot"
}
test: Recognize gccgo error messages.
bug278.go:18: error: invalid left hand side of assignment
bug278.go:19: error: array is not addressable
bug278.go:21: error: invalid left hand side of assignment
bug278.go:22: error: invalid left hand side of assignment
R=rsc
CC=golang-dev
http://codereview.appspot.com/2122042
// errchk $G $D/$F.go
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This is a test case for issue 804.
package main
func f() [10]int {
return [10]int{}
}
var m map[int][10]int
func main() {
f()[1] = 2 // ERROR "cannot|invalid"
f()[2:3][0] = 4 // ERROR "cannot|addressable"
var x = "abc"
x[2] = 3 // ERROR "cannot|invalid"
m[0][5] = 6 // ERROR "cannot|invalid"
}
|
package do
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/skibish/ddns/misc"
)
// Record describe record structure
type Record struct {
ID uint64 `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Data string `json:"data"`
TTL uint64 `json:"ttl,omitempty"`
Priority uint64 `json:"priority,omitempty"`
Port uint64 `json:"port,omitempty"`
Weight uint64 `json:"weight,omitempty"`
Flags uint64 `json:"flags,omitempty"`
Tag string `json:"tag,omitempty"`
}
type domainRecords struct {
Records []Record `json:"domain_records"`
}
// DomainsService is an interface to interact with DNS records.
type DomainsService interface {
List(context.Context, string) ([]Record, error)
Create(context.Context, string, Record) error
Update(context.Context, string, Record) error
}
// DigitalOcean hold
type DigitalOcean struct {
c *http.Client
token string
url string
timeout time.Duration
}
// New return instance of DigitalOcean.
func New(token string, timeout time.Duration) *DigitalOcean {
return &DigitalOcean{
token: token,
c: &http.Client{},
url: "https://api.digitalocean.com/v2",
timeout: timeout,
}
}
// List return domain DNS records.
func (d *DigitalOcean) List(ctx context.Context, domain string) ([]Record, error) {
req, err := d.prepareRequest(http.MethodGet, fmt.Sprintf("/domains/%s/records", domain), nil)
if err != nil {
return nil, fmt.Errorf("failed to prepare a request: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, d.timeout)
defer cancel()
req = req.WithContext(ctx)
res, err := d.c.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to do a request: %w", err)
}
defer res.Body.Close()
if !misc.Success(res.StatusCode) {
return nil, fmt.Errorf("unexpected response with status code %d", res.StatusCode)
}
var records domainRecords
if err := json.NewDecoder(res.Body).Decode(&records); err != nil {
return nil, fmt.Errorf("failed to decode the response: %w", err)
}
return records.Records, nil
}
// Create creates DNS record.
func (d *DigitalOcean) Create(ctx context.Context, domain string, record Record) error {
body, err := json.Marshal(record)
if err != nil {
return fmt.Errorf("failed to marshal the record: %w", err)
}
req, err := d.prepareRequest(http.MethodPost, fmt.Sprintf("/domains/%s/records", domain), bytes.NewBuffer(body))
if err != nil {
return fmt.Errorf("failed to prepare a request: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, d.timeout)
defer cancel()
req = req.WithContext(ctx)
res, err := d.c.Do(req)
if err != nil {
return fmt.Errorf("failed to do a request: %w", err)
}
defer res.Body.Close()
if !misc.Success(res.StatusCode) {
return fmt.Errorf("unexpected response with status code %d", res.StatusCode)
}
return nil
}
// Update updates DNS record.
func (d *DigitalOcean) Update(ctx context.Context, domain string, record Record) error {
body, err := json.Marshal(record)
if err != nil {
return fmt.Errorf("failed to marshal the record %w", err)
}
req, err := d.prepareRequest(http.MethodPut, fmt.Sprintf("/domains/%s/records/%d", domain, record.ID), bytes.NewBuffer(body))
if err != nil {
return fmt.Errorf("failed to prepare a request: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, d.timeout)
defer cancel()
req = req.WithContext(ctx)
res, err := d.c.Do(req)
if err != nil {
return fmt.Errorf("failed to do a request: %w", err)
}
defer res.Body.Close()
if !misc.Success(res.StatusCode) {
return fmt.Errorf("unexpected response with status code %d", res.StatusCode)
}
return nil
}
func (d *DigitalOcean) prepareRequest(method, path string, body io.Reader) (*http.Request, error) {
req, err := http.NewRequest(method, d.url+path, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.token))
return req, nil
}
fix(do): return 200 records instead of default 20
In most cases 200 limit should be enough.
It is extremely rare situation when
there will be 200 dns records or more.
And it might be due to a wrong configuration.
If it will become something common, then its time to implement pagination.
Closes: #24
package do
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
log "github.com/sirupsen/logrus"
"github.com/skibish/ddns/misc"
)
// Record describe record structure
type Record struct {
ID uint64 `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Data string `json:"data"`
TTL uint64 `json:"ttl,omitempty"`
Priority uint64 `json:"priority,omitempty"`
Port uint64 `json:"port,omitempty"`
Weight uint64 `json:"weight,omitempty"`
Flags uint64 `json:"flags,omitempty"`
Tag string `json:"tag,omitempty"`
}
type domainRecords struct {
Records []Record `json:"domain_records"`
Links struct {
Pages struct {
Next string `json:"next"`
} `json:"pages"`
} `json:"links"`
}
// DomainsService is an interface to interact with DNS records.
type DomainsService interface {
List(context.Context, string) ([]Record, error)
Create(context.Context, string, Record) error
Update(context.Context, string, Record) error
}
// DigitalOcean hold
type DigitalOcean struct {
c *http.Client
token string
url string
timeout time.Duration
}
// New return instance of DigitalOcean.
func New(token string, timeout time.Duration) *DigitalOcean {
return &DigitalOcean{
token: token,
c: &http.Client{},
url: "https://api.digitalocean.com/v2",
timeout: timeout,
}
}
// List return domain DNS records.
func (d *DigitalOcean) List(ctx context.Context, domain string) ([]Record, error) {
req, err := d.prepareRequest(http.MethodGet, fmt.Sprintf("/domains/%s/records?per_page=200", domain), nil)
if err != nil {
return nil, fmt.Errorf("failed to prepare a request: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, d.timeout)
defer cancel()
req = req.WithContext(ctx)
res, err := d.c.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to do a request: %w", err)
}
defer res.Body.Close()
if !misc.Success(res.StatusCode) {
return nil, fmt.Errorf("unexpected response with status code %d", res.StatusCode)
}
var records domainRecords
if err := json.NewDecoder(res.Body).Decode(&records); err != nil {
return nil, fmt.Errorf("failed to decode the response: %w", err)
}
if records.Links.Pages.Next != "" {
log.Debugf("there are more than 200 dns record for %s domain, are you sure that's correct? if yes, please raise an issue here: https://github.com/skibish/ddns/issues/new", domain)
}
return records.Records, nil
}
// Create creates DNS record.
func (d *DigitalOcean) Create(ctx context.Context, domain string, record Record) error {
body, err := json.Marshal(record)
if err != nil {
return fmt.Errorf("failed to marshal the record: %w", err)
}
req, err := d.prepareRequest(http.MethodPost, fmt.Sprintf("/domains/%s/records", domain), bytes.NewBuffer(body))
if err != nil {
return fmt.Errorf("failed to prepare a request: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, d.timeout)
defer cancel()
req = req.WithContext(ctx)
res, err := d.c.Do(req)
if err != nil {
return fmt.Errorf("failed to do a request: %w", err)
}
defer res.Body.Close()
if !misc.Success(res.StatusCode) {
return fmt.Errorf("unexpected response with status code %d", res.StatusCode)
}
return nil
}
// Update updates DNS record.
func (d *DigitalOcean) Update(ctx context.Context, domain string, record Record) error {
body, err := json.Marshal(record)
if err != nil {
return fmt.Errorf("failed to marshal the record %w", err)
}
req, err := d.prepareRequest(http.MethodPut, fmt.Sprintf("/domains/%s/records/%d", domain, record.ID), bytes.NewBuffer(body))
if err != nil {
return fmt.Errorf("failed to prepare a request: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, d.timeout)
defer cancel()
req = req.WithContext(ctx)
res, err := d.c.Do(req)
if err != nil {
return fmt.Errorf("failed to do a request: %w", err)
}
defer res.Body.Close()
if !misc.Success(res.StatusCode) {
return fmt.Errorf("unexpected response with status code %d", res.StatusCode)
}
return nil
}
func (d *DigitalOcean) prepareRequest(method, path string, body io.Reader) (*http.Request, error) {
req, err := http.NewRequest(method, d.url+path, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.token))
return req, nil
}
|
// Package genapi implements a generic skeleton we can use as the basis for an
// api service. It will set up command line arguments, connections to backend
// databases, handle test modes which might affect those databases, register
// itself with skyapi, and more.
//
// Basic definition
//
// To use first initialize a GenAPI instance somewhere accessible by your entire
// application, and give it an RPC type
//
// package myapi
//
// var GA = genapi.GenAPI{
// Name: "my-api",
// RedisInfo: &genapi.RedisInfo{}
// Services: []interface{}{MyAPI{}},
// }
//
// type MyAPI struct{}
//
// func (_ MyAPI) Foo(r *http.Request, args *struct{}, res *struct{}) error {
// return GA.Cmd("INCR", "MyKey").Err
// }
//
// API Mode
//
// To actually read command-line arguments, set up database connections, listen
// on a random port, register with skyapi, and start handling requests, simply
// call APIMode() from your main method:
//
// package main
//
// func main() {
// myapi.GA.APIMode()
// }
//
// In APIMode the genapi will also listen for SIGTERM signals, and if it
// receives one will unregister with skyapi, and exit once all ongoing requests
// are completed.
//
// Test Mode
//
// When testing your api you can call TestMode from your test's init function,
// and call RPC to get an instance of an http.Handler you can make calls
// against:
//
// package myapi // myapi_test.go
//
// import . "testing"
//
// func init() {
// GA.TestMode()
// }
//
// func TestSomeThing(t *T) {
// h := GA.RPC()
// // test against h
// }
//
// CLI Mode
//
// Finally, there are times when you want a command-line binary which will be
// made alongside the actual api binary, and which will share resources and
// possibly database connections. In this case you can use the CLIMode method
// and then access the GenAPI from your main method as normal:
//
// package main
//
// func main() {
// myapi.GA.CLIMode()
// myapi.GA.Cmd("DECR", "MyKey")
// }
//
package genapi
import (
"crypto/sha1"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"golang.org/x/net/context"
"github.com/gorilla/rpc/v2"
"github.com/levenlabs/gatewayrpc"
"github.com/levenlabs/go-llog"
"github.com/levenlabs/go-srvclient"
"github.com/levenlabs/golib/mgoutil"
"github.com/levenlabs/golib/radixutil"
"github.com/levenlabs/golib/rpcutil"
"github.com/mediocregopher/lever"
"github.com/mediocregopher/okq-go.v2"
"github.com/mediocregopher/radix.v2/pool"
"github.com/mediocregopher/radix.v2/util"
"github.com/mediocregopher/skyapi/client"
"github.com/miekg/dns"
"gopkg.in/mgo.v2"
)
// Version can be set using:
// -ldflags "-X github.com/levenlabs/golib/genapi.Version versionstring"
// on the go build command. When this is done, the --version flag will be
// available on the command-line and will print out whatever version string is
// passed in.
//
// It could also be set manually during runtime, but that would kind of defeat
// the purpose.
//
// Version will be automatically unquoted
var Version string
// MongoInfo contains information needed by the api to interact with a mongo
// backend, and also houses the connection to that backend (which can be
// interacted with through its methods)
type MongoInfo struct {
// If you want to make mongo optional, set this and if --mongo-addr isn't
// sent, then WithDB, WithColl will call fn with nil and SessionHelper's
// session will be nil.
Optional bool
// The name of the mongo database this app should use. In TestMode this will
// always be overwritten to "test_<DBName>"
DBName string
session *mgo.Session
}
// InitFunc is just a helper for a function that accepts a GenAPI pointer
type InitFunc func(*GenAPI)
// WithDB is similar to mgoutil.SessionHelper's WithDB, see those docs for more
// details
func (m *MongoInfo) WithDB(fn func(*mgo.Database)) {
if m.session == nil {
fn(nil)
return
}
mgoutil.SessionHelper{
Session: m.session,
DB: m.DBName,
}.WithDB(fn)
}
// WithColl is similar to mgoutil.SessionHelper's WithColl, see those docs for
// more details
func (m *MongoInfo) WithColl(collName string, fn func(*mgo.Collection)) {
if m.session == nil {
fn(nil)
return
}
mgoutil.SessionHelper{
Session: m.session,
DB: m.DBName,
Coll: collName,
}.WithColl(fn)
}
// CollSH returns an mgoutil.SessionHelper for a collection of the given name
// The SessionHelper's Session might be nil if you made mongo Optional.
func (m *MongoInfo) CollSH(collName string) mgoutil.SessionHelper {
return mgoutil.SessionHelper{
Session: m.session,
DB: m.DBName,
Coll: collName,
}
}
// RedisInfo is used to tell the api to interact with a redis backend, and also
// houses the connection to that backend. If the redis backend is a cluster
// instance that whole cluster will be connected to
type RedisInfo struct {
// If you want to make redis optional, set this and if --redis-addr isn't
// sent, Cmder will be nil.
Optional bool
// Populated by the api once a connection to redis is made, and can be used
// as such. Do not set manually.
util.Cmder
}
// OkqInfo is used to tell the api to interact with a set of okq instances.
type OkqInfo struct {
// If you want to make okq optional, set this and if --okq-addr isn't sent,
// Client will return nil
Optional bool
// Read/Write timeout for redis connection and the NotifyTimeout for Client.
// Defaults to 30 seconds
// Do not change after initializing GenAPI
Timeout time.Duration
*okq.Client
}
// TLSInfo is used to tell the api to use TLS (e.g. https/ssl) when listening
// for incoming requests
type TLSInfo struct {
// If set to true then the config options for passing in cert files on the
// command-line will not be used, and instead the Certs field will be
// expected to be filled in manually during the Init function
FillCertsManually bool
// One or more certificates to use for TLS. Will be filled automatically if
// FillCertsManually is false
Certs []tls.Certificate
}
// GenAPI is a type used to handle most of the generic logic we always implement
// when making an RPC API endpoint.
//
// The struct is initialized with whatever parameters are appropriate, and then
// has either APIMode(), TestMode(), or CLIMode() called on it depending on the
// intent. Fields are optional unless otherwise marked in the comment.
type GenAPI struct {
// Required. Name is the name of the api, as it will be identified on the
// command-line and in skyapi
Name string
// The set of rpc service structs which this API will host. Must have at
// least one service in APIMode
Services []interface{}
// Like Services, but these will not be registered with the underlying
// gateway library, and therefore will not show up in calls to
// "RPC.GetMethods"
HiddenServices []interface{}
// The mux which the rpc services will be added to. If not set a new one
// will be created and used. This can be used to provide extra functionality
// in conjunction with the RPC server, or completely in place of it.
//
// It is important that RPCEndpoint does *not* have a handler set in this
// mux, as GenAPI will be setting it itself.
Mux *http.ServeMux
// The http endpoint that the RPC handler for Services and HiddenServices
// will be attached to. Defaults to "/". If you set this to "_", no rpc
// listener will be set up and its up to you to add the handler from RPC()
// to the mux for whatever path you need.
RPCEndpoint string
// Additional lever.Param structs which can be included in the lever parsing
LeverParams []lever.Param
// If mongo is intended to be used as a backend, this should be filled in
*MongoInfo
// If redis is intended to be used, this should be filled in.
*RedisInfo
// If okq is intended to be used, this should be filled in.
*OkqInfo
// If TLS is intended to be used, this should be filled in. The Certs field
// of TLSInfo may be filled in during the Init function for convenience, but
// the struct itself must be initialized before any of the Mode methods are
// called
*TLSInfo
// A function to run just after initializing connections to backing
// database. Meant for performing any initialization needed by the app.
// This is called before any AppendInit functions
Init InitFunc
inits []InitFunc
// May be set if a codec with different parameters is required.
// If not set an rpcutil.LLCodec with default options will be used.
Codec rpc.Codec
// Do not set. This will be automatically filled in with whatever address
// is being listened on once APIMode is called.
ListenAddr string
// Do not set. This will be automatically filled in when any of the run
// modes are called, and may be used after that point to retrieve parameter
// values.
*lever.Lever
// Do not set. This will be automatically filled in when any of the run
// modes are called. Indicates which mode the GenAPI is currently in, and
// may be used after that point to know which run mode GenAPI is in.
Mode string
// When initialized, this channel will be closed at the end of the init
// phase of running. If in APIMode it will be closed just before the call to
// ListenAndServe. This is useful so you can call APIMode in a separate
// go-routine and know when it's started listening, if there's other steps
// you want to take after initialization has been done.
InitDoneCh chan bool
// When initialized, this channel will be closed when in APIMode and cleanup
// has been completed after a kill signal. This is useful if you have other
// cleanup you want to run after GenAPI is done.
DoneCh chan bool
// Optional set of remote APIs (presumably GenAPIs, but that's not actually
// required) that this one will be calling. The key should be the name of
// the remote api, and the value should be the default address for it. Each
// one will have a configuration option added for its address (e.g. if
// "other-api" is in this list, then "--other-api-addr" will be a config
// option). Each key can be used as an argument to RemoteAPICaller to obtain
// a convenient function for communicating with other apis.
RemoteAPIs map[string]string
// Optional set of Healthers which should be checked during a /health-check.
// These will be checked sequentially, and if any return an error that will
// be logged and the health check will return false. The key in the map is a
// name for the Healther which can be logged
Healthers map[string]Healther
// SRVClient which will be used by GenAPI when resolving requests, and which
// can also be used by other processes as well. This should only be modified
// during the init function
srvclient.SRVClient
ctxs map[*http.Request]context.Context
ctxsL sync.RWMutex
// Mutex for accessing Healthers
healthersL sync.Mutex
// Signal channel. Included for testing purposes only
sigCh chan os.Signal
// set of active listeners for this genapi (APIMode only)
listeners []*listenerReloader
// the active httpWaiter for the instance
hw *httpWaiter
countCh chan bool
}
// The different possible Mode values for GenAPI
const (
APIMode = "api"
TestMode = "test"
CLIMode = "cli"
)
// APIMode puts the GenAPI into APIMode, wherein it listens for any incoming rpc
// requests and tries to serve them against one of its Services. This method
// will block indefinitely
func (g *GenAPI) APIMode() {
g.Mode = APIMode
g.sigCh = make(chan os.Signal, 1)
g.init()
g.RPCListen()
// Once ListenAddr is populated with the final value we can call doSkyAPI
skyapiStopCh := g.doSkyAPI()
unhealthyTimeout, _ := g.ParamInt("--unhealthy-timeout")
if g.InitDoneCh != nil {
close(g.InitDoneCh)
}
// After this point everything is listening and we're just waiting for a
// kill signal
llog.Info("waiting for close signal")
signal.Notify(g.sigCh, syscall.SIGTERM, syscall.SIGINT)
<-g.sigCh
llog.Info("signal received, stopping")
g.healthersL.Lock()
g.Healthers = map[string]Healther{
"sneezy": sneezy{},
}
g.healthersL.Unlock()
if skyapiStopCh != nil {
llog.Info("stopping skyapi connection")
close(skyapiStopCh)
// Wait a bit just in case something gets the skydns record before we
// kill the skyapi connection, but the connection doesn't come in till
// after hw.wait() runs
time.Sleep(500 * time.Millisecond)
}
// Appear as unhealthy for a while before hw.wait() runs
time.Sleep(time.Duration(unhealthyTimeout) * time.Millisecond)
g.hw.wait() // hw is populated in RPCListen
time.Sleep(50 * time.Millisecond)
if g.DoneCh != nil {
close(g.DoneCh)
}
}
func (g *GenAPI) countHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.countCh <- true
h.ServeHTTP(w, r)
})
}
func (g *GenAPI) hostnameHandler(h http.Handler) http.Handler {
hostname, _ := g.ParamStr("--hostname")
if hostname == "" {
return h
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Hostname", hostname)
h.ServeHTTP(w, r)
})
}
// RPCListen sets up listeners for the GenAPI listen and starts them up. This
// may only be called after TestMode or CLIMode has been called, it is
// automatically done for APIMode.
func (g *GenAPI) RPCListen() {
if g.RPCEndpoint != "_" {
g.Mux.Handle(g.RPCEndpoint, g.RPC())
}
// The net/http/pprof package expects to be under /debug/pprof/, which is
// why we don't strip the prefix here
g.Mux.Handle("/debug/pprof/", g.pprofHandler())
g.Mux.Handle("/health-check", g.healthCheck())
g.hw = &httpWaiter{
ch: make(chan struct{}, 1),
}
var h http.Handler
h = g.Mux
h = g.countHandler(h)
h = g.hostnameHandler(h)
h = g.contextHandler(h)
h = g.hw.handler(h)
addrs, _ := g.Lever.ParamStrs("--listen-addr")
for _, addr := range addrs {
// empty addr might get passed in to disable --listen-addr
if addr == "" {
continue
}
g.listeners = append(g.listeners, g.serve(h, addr, false))
}
if g.TLSInfo != nil {
addrs, _ := g.Lever.ParamStrs("--tls-listen-addr")
for _, addr := range addrs {
if addr == "" {
continue
}
g.listeners = append(g.listeners, g.serve(h, addr, true))
}
}
}
// AddHealther adds a healther to Healthers under the specified key
func (g *GenAPI) AddHealther(key string, healther Healther) {
g.healthersL.Lock()
defer g.healthersL.Unlock()
g.Healthers[key] = healther
}
// This starts a go-routine which will do the actual serving of the handler
func (g *GenAPI) serve(h http.Handler, addr string, doTLS bool) *listenerReloader {
kv := llog.KV{"addr": addr, "tls": doTLS}
llog.Info("creating listen socket", kv)
ln, err := net.Listen("tcp", addr)
if err != nil {
llog.Fatal("failed creating listen socket", kv.Set("err", err))
}
actualAddr := ln.Addr().String()
kv["addr"] = actualAddr
// If this is the first address specified set ListenAddr to that, so it will
// be advertised with skyapi
if g.ListenAddr == "" {
g.ListenAddr = actualAddr
}
netln := net.Listener(tcpKeepAliveListener{ln.(*net.TCPListener)})
lr, err := newListenerReloader(netln, g.listenerMaker(doTLS))
if err != nil {
llog.Fatal("failed to create listener", kv.Set("err", err))
}
go func() {
llog.Info("starting rpc listening", kv)
srv := &http.Server{
Handler: h,
}
srv.Serve(lr)
}()
return lr
}
func (g *GenAPI) listenerMaker(doTLS bool) func(net.Listener) (net.Listener, error) {
return func(l net.Listener) (net.Listener, error) {
var err error
allowedProxyCIDRsStr, _ := g.ParamStr("--proxy-proto-allowed-cidrs")
allowedProxyCIDRs := strings.Split(allowedProxyCIDRsStr, ",")
if l, err = newProxyListener(l, allowedProxyCIDRs); err != nil {
return nil, fmt.Errorf("proxy proto listener: %s", err)
}
if doTLS {
tf := &tls.Config{
Certificates: g.TLSInfo.Certs,
}
tf.BuildNameToCertificate()
l = tls.NewListener(l, tf)
}
return l, nil
}
}
// TestMode puts the GenAPI into TestMode, wherein it is then prepared to be
// used for during go tests
func (g *GenAPI) TestMode() {
g.Mode = TestMode
g.init()
}
// CLIMode puts the GenAPI into CLIMode, wherein it is then prepared to be used
// by a command-line utility
func (g *GenAPI) CLIMode() {
g.Mode = CLIMode
g.init()
}
// AppendInit adds a function to be called when GenAPI is initialized. It will
// be called after GenAPI's Init() and after any previous functions that were
// appended
func (g *GenAPI) AppendInit(f InitFunc) {
if g.Mode != "" {
panic("genapi: AppendInit was called after Init has already been ran")
}
g.inits = append(g.inits, f)
}
// RPC returns an http.Handler which will handle the RPC calls made against it
// for the GenAPI's Services
func (g *GenAPI) RPC() http.Handler {
// TODO make gatewayrpc.Server have an option not to do its logging
// per-request, so we can do it in here with the proper KVs from the context
s := gatewayrpc.NewServer()
s.RegisterCodec(g.Codec, "application/json")
for _, service := range g.Services {
if err := s.RegisterService(service, ""); err != nil {
llog.Fatal("error registering service", llog.KV{
"service": fmt.Sprintf("%T", service),
"err": err,
})
}
}
for _, service := range g.HiddenServices {
if err := s.RegisterHiddenService(service, ""); err != nil {
llog.Fatal("error registering hidden service", llog.KV{
"service": fmt.Sprintf("%T", service),
"err": err,
})
}
}
return s
}
func (g *GenAPI) pprofHandler() http.Handler {
h := http.NewServeMux()
h.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
// Even though Index handles this, this particular one won't work without
// setting the BlockProfileRate temporarily.
h.HandleFunc("/debug/pprof/block", func(w http.ResponseWriter, r *http.Request) {
runtime.SetBlockProfileRate(1)
time.Sleep(5 * time.Second)
pprof.Index(w, r)
runtime.SetBlockProfileRate(0)
})
h.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
h.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
h.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
h.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ipStr, _, _ := net.SplitHostPort(r.RemoteAddr)
ip := net.ParseIP(ipStr)
if ip == nil || !ip.IsLoopback() {
http.Error(w, "", 403) // forbidden
return
}
h.ServeHTTP(w, r)
})
}
func (g *GenAPI) init() {
g.ctxs = map[*http.Request]context.Context{}
rpcutil.InstallCustomValidators()
g.SRVClient.EnableCacheLast()
g.doLever()
g.SRVClient.Preprocess = g.srvClientPreprocess
if g.RPCEndpoint == "" {
g.RPCEndpoint = "/"
}
if g.Mux == nil {
g.Mux = http.NewServeMux()
}
if g.Lever.ParamFlag("--version") {
v := Version
if v[0] != '"' {
v = `"` + v + `"`
}
if uv, err := strconv.Unquote(v); err == nil {
v = uv
}
fmt.Println(v)
time.Sleep(100 * time.Millisecond)
os.Exit(0)
}
ll, _ := g.ParamStr("--log-level")
llog.SetLevelFromString(ll)
llog.Info("starting GenAPI", llog.KV{"mode": g.Mode, "name": g.Name})
if g.MongoInfo != nil {
g.initMongo()
}
if g.RedisInfo != nil {
g.initRedis()
}
if g.OkqInfo != nil {
g.initOkq()
}
if g.Codec == nil {
c := rpcutil.NewLLCodec()
c.ValidateInput = true
c.RunInputApplicators = true
g.Codec = c
}
tlsAddrs, _ := g.ParamStrs("--tls-listen-addr")
if g.TLSInfo != nil && !g.TLSInfo.FillCertsManually && len(tlsAddrs) > 0 {
certFiles, _ := g.ParamStrs("--tls-cert-file")
keyFiles, _ := g.ParamStrs("--tls-key-file")
if len(certFiles) == 0 {
llog.Fatal("no --tls-cert-file provided")
}
if len(certFiles) != len(keyFiles) {
llog.Fatal("number of --tls-cert-file must match number of --tls-key-file")
}
for i := range certFiles {
kv := llog.KV{"certFile": certFiles[i], "keyFile": keyFiles[i]}
llog.Info("loading tls cert", kv)
c, err := tls.LoadX509KeyPair(certFiles[i], keyFiles[i])
if err != nil {
llog.Fatal("failed to load tls cert", kv, llog.KV{"err": err})
}
g.TLSInfo.Certs = append(g.TLSInfo.Certs, c)
}
}
g.countCh = make(chan bool)
go func() {
t := time.Tick(1 * time.Minute)
var c uint64
for {
select {
case <-g.countCh:
c++
case <-t:
llog.Info("count requests in last minute", llog.KV{"count": c})
c = 0
}
}
}()
if g.Init != nil {
// make sure the struct's Init is always called first
g.Init(g)
}
for _, f := range g.inits {
f(g)
}
// InitDoneCh gets closed at the end of APIMode being called
if g.Mode != APIMode && g.InitDoneCh != nil {
close(g.InitDoneCh)
}
}
func (g *GenAPI) doLever() {
o := &lever.Opts{}
if g.Mode == CLIMode {
o.DisallowConfigFile = true
}
g.Lever = lever.New(g.Name, o)
g.Lever.Add(lever.Param{
Name: "--log-level",
Description: "Log level to run with. Available levels are: debug, info, warn, error, fatal",
Default: "info",
})
g.Lever.Add(lever.Param{
Name: "--datacenter",
Description: "What datacenter the service is running in",
Default: os.Getenv("DATACENTER"),
})
g.Lever.Add(lever.Param{
Name: "--hostname",
Description: "What hostanme the service is running on",
Default: os.Getenv("HOSTNAME"),
})
// The listen-addr parameters can be used outside of APIMode through the
// RPCListener method
g.Lever.Add(lever.Param{
Name: "--listen-addr",
Description: "[address]:port to listen for requests on. If port is zero a port will be chosen randomly",
DefaultMulti: []string{":0"},
})
if g.TLSInfo != nil {
g.Lever.Add(lever.Param{
Name: "--tls-listen-addr",
Description: "[address]:port to listen for https requests on. If port is zero a port will be chosen randomly",
DefaultMulti: []string{},
})
if !g.TLSInfo.FillCertsManually {
g.Lever.Add(lever.Param{
Name: "--tls-cert-file",
Description: "Certificate file to use for TLS. Maybe be specified more than once. Must be specified as many times as --tls-key-file.",
})
g.Lever.Add(lever.Param{
Name: "--tls-key-file",
Description: "Key file to use for TLS. Maybe be specified more than once. Must be specified as many times as --tls-cert-file.",
})
}
}
if g.Mode == APIMode {
g.Lever.Add(lever.Param{
Name: "--skyapi-addr",
Description: "Hostname of skyapi, to be looked up via a SRV request. Unset means don't register with skyapi",
})
g.Lever.Add(lever.Param{
Name: "--proxy-proto-allowed-cidrs",
Description: "Comma separated list of cidrs which are allowed to use the PROXY protocol",
Default: "127.0.0.1/32,::1/128,10.0.0.0/8",
})
g.Lever.Add(lever.Param{
Name: "--unhealthy-timeout",
Description: "Number of milliseconds to appear unhealthy after a stop signal is received",
})
}
if g.MongoInfo != nil {
g.Lever.Add(lever.Param{
Name: "--mongo-addr",
Description: "Address of mongo instance to use",
Default: "127.0.0.1:27017",
})
}
if g.RedisInfo != nil {
g.Lever.Add(lever.Param{
Name: "--redis-addr",
Description: "Address of redis instance to use. May be a single member of a cluster",
Default: "127.0.0.1:6379",
})
g.Lever.Add(lever.Param{
Name: "--redis-pool-size",
Description: "Number of connections to a single redis instance to use. If a cluster is being used, this many connections will be made to each member of the cluster",
Default: "10",
})
}
if g.OkqInfo != nil {
g.Lever.Add(lever.Param{
Name: "--okq-addr",
Description: "Address of okq instance to use",
Default: "127.0.0.1:4777",
})
g.Lever.Add(lever.Param{
Name: "--okq-pool-size",
Description: "Number of connections to okq to initially make",
Default: "10",
})
}
if Version != "" {
g.Lever.Add(lever.Param{
Name: "--version",
Aliases: []string{"-V"},
Description: "Print out version information for this binary",
Flag: true,
})
}
for rapi, raddr := range g.RemoteAPIs {
g.Lever.Add(lever.Param{
Name: "--" + rapi + "-addr",
Description: "Address or hostname of a " + rapi + " instance",
Default: raddr,
})
}
for _, p := range g.LeverParams {
g.Lever.Add(p)
}
g.Lever.Parse()
}
func (g *GenAPI) srvClientPreprocess(m *dns.Msg) {
dc := g.getDCHash()
if dc == "" {
return
}
for i := range m.Answer {
if ansSRV, ok := m.Answer[i].(*dns.SRV); ok {
tar := ansSRV.Target
if strings.HasPrefix(tar, dc+"-") {
if ansSRV.Priority < 2 {
ansSRV.Priority = uint16(0)
} else {
ansSRV.Priority = ansSRV.Priority - 1
}
}
}
}
}
func (g *GenAPI) getDCHash() string {
dc, _ := g.Lever.ParamStr("--datacenter")
if dc == "" {
return ""
}
sha1Bytes := sha1.Sum([]byte(dc))
return fmt.Sprintf("%x", sha1Bytes)[:20]
}
func (g *GenAPI) doSkyAPI() chan struct{} {
skyapiAddr, _ := g.Lever.ParamStr("--skyapi-addr")
if skyapiAddr == "" {
return nil
}
dc := g.getDCHash()
kv := llog.KV{
"skyapiAddr": skyapiAddr,
"listenAddr": g.ListenAddr,
"serviceName": g.Name,
"prefix": dc,
}
stopCh := make(chan struct{})
go func() {
for {
llog.Info("connecting to skyapi", kv)
err := client.ProvideOpts(client.Opts{
SkyAPIAddr: skyapiAddr,
Service: g.Name,
ThisAddr: g.ListenAddr,
ReconnectAttempts: 0, // do not attempt to reconnect, we'll do that here
StopCh: stopCh,
Prefix: dc,
})
if err != nil {
llog.Warn("skyapi error", kv.Set("err", err))
time.Sleep(1 * time.Second)
} else {
// If there wasn't an error but skyapi stopped, it's because the
// stopCh was closed
return
}
}
}()
return stopCh
}
func (g *GenAPI) initMongo() {
if g.Mode == TestMode {
g.MongoInfo.DBName = "test_" + g.MongoInfo.DBName
}
mongoAddr, _ := g.ParamStr("--mongo-addr")
if mongoAddr == "" && g.MongoInfo.Optional {
return
}
g.MongoInfo.session = mgoutil.EnsureSession(mongoAddr)
}
func (g *GenAPI) initRedis() {
redisAddr, _ := g.ParamStr("--redis-addr")
if redisAddr == "" && g.RedisInfo.Optional {
return
}
redisPoolSize, _ := g.ParamInt("--redis-pool-size")
kv := llog.KV{
"addr": redisAddr,
"poolSize": redisPoolSize,
}
llog.Info("connecting to redis", kv)
var err error
g.RedisInfo.Cmder, err = radixutil.DialMaybeCluster("tcp", redisAddr, redisPoolSize)
if err != nil {
llog.Fatal("error connecting to redis", kv, llog.KV{"err": err})
}
}
func (g *GenAPI) initOkq() {
okqAddr, _ := g.ParamStr("--okq-addr")
if okqAddr == "" && g.OkqInfo.Optional {
return
}
okqPoolSize, _ := g.ParamInt("--okq-pool-size")
kv := llog.KV{
"addr": okqAddr,
"poolSize": okqPoolSize,
}
if g.OkqInfo.Timeout == 0 {
g.OkqInfo.Timeout = 30 * time.Second
}
df := radixutil.SRVDialFunc(g.SRVClient, g.OkqInfo.Timeout)
llog.Info("connecting to okq", kv)
p, err := pool.NewCustom("tcp", okqAddr, okqPoolSize, df)
if err != nil {
llog.Fatal("error connection to okq", kv, llog.KV{"err": err})
}
g.OkqInfo.Client = okq.NewWithOpts(okq.Opts{
RedisPool: p,
NotifyTimeout: g.OkqInfo.Timeout,
})
}
func (g *GenAPI) contextHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := requestCtx(r)
if len(ContextKV(ctx)) == 0 {
ctx = ContextMergeKV(ctx, rpcutil.RequestKV(r))
}
// TODO I'll be posting a question in the google group about what
// exactly we're supposed to be doing here. It's currently very unclear
//cn, ok := w.(http.CloseNotifier)
//if !ok {
// h.ServeHTTP(w, r)
// return
//}
//closeCh := make(chan struct{})
//reqCloseCh := cn.CloseNotify()
//ctx, cancelFn := context.WithCancel(ctx)
//go func() {
// <-closeCh
// <-reqCloseCh
// cancelFn()
//}()
g.ctxsL.Lock()
g.ctxs[r] = ctx
g.ctxsL.Unlock()
h.ServeHTTP(w, r)
//close(closeCh)
g.ctxsL.Lock()
delete(g.ctxs, r)
g.ctxsL.Unlock()
})
}
// RequestContext returns a context for the given request. The context will be
// cancelled if the request is closed, and may possibly have a deadline on it as
// well
func (g *GenAPI) RequestContext(r *http.Request) context.Context {
g.ctxsL.RLock()
defer g.ctxsL.RUnlock()
ctx := g.ctxs[r]
if ctx == nil {
ctx = context.Background()
}
return ctx
}
// ReloadListeners reloads the listener configurations of all existing
// listeners. This doesn't actually close the listen sockets, just hot reloads
// the configuration. Goes through each listener sequentially and returns the
// first error it encounters.
func (g *GenAPI) ReloadListeners() error {
for _, lr := range g.listeners {
if err := lr.Reload(); err != nil {
return err
}
}
return nil
}
// CallErr is an implementation of error which is returned from the Call method,
// and subsequently by the Call methods on Callers returned by RemoteAPICaller
// and NewCaller. If used, CallErr will not be a pointer
type CallErr struct {
URL string
Method string
Err error
}
func (c CallErr) Error() string {
return fmt.Sprintf("calling %q on %q: %s", c.Method, c.URL, c.Err)
}
// Call makes an rpc call, presumably to another genapi server but really it
// only has to be a JSONRPC2 server. If it is another genapi server, however,
// the given context will be propagated to it, as well as being used here as a
// timeout if deadline is set on it. See rpcutil for more on how the rest of the
// arguments work.
//
// Note that host can be a hostname, and address (host:port), or a url
// (http[s]://host[:port])
func (g *GenAPI) Call(ctx context.Context, res interface{}, host, method string, args interface{}) error {
host = g.SRVClient.MaybeSRVURL(host)
r, err := http.NewRequest("POST", host, nil)
if err != nil {
return CallErr{URL: host, Method: method, Err: err}
}
ContextApply(r, ctx)
opts := rpcutil.JSONRPC2Opts{
BaseRequest: r,
Context: ctx,
}
if err := rpcutil.JSONRPC2CallOpts(opts, host, res, method, args); err != nil {
return CallErr{URL: host, Method: method, Err: err}
}
return nil
}
func (g *GenAPI) remoteAPIAddr(remoteAPI string) string {
addr, _ := g.ParamStr("--" + remoteAPI + "-addr")
if addr == "" {
llog.Fatal("no address defined", llog.KV{"api": remoteAPI})
}
return addr
}
// Caller provides a way of calling RPC methods against a pre-defined remote
// endpoint. The Call method is essentially the same as GenAPI's Call method,
// but doesn't take in a host parameter
type Caller interface {
Call(ctx context.Context, res interface{}, method string, args interface{}) error
}
type caller struct {
g *GenAPI
addr string
}
func (c caller) Call(ctx context.Context, res interface{}, method string, args interface{}) error {
return c.g.Call(ctx, res, c.addr, method, args)
}
// NewCaller returns an instance of a Caller which will make RPC requests
// against the given address, after doing a SRV request on it before each
// request
func (g *GenAPI) NewCaller(addr string) Caller {
return caller{g, addr}
}
// RemoteAPIAddr returns an address to use for the given remoteAPI (which must
// be defined in RemoteAPIs). The address will have had SRV called on it
// already. A Fatal will be thrown if no address has been provided for the
// remote API
func (g *GenAPI) RemoteAPIAddr(remoteAPI string) string {
return g.SRVClient.MaybeSRV(g.remoteAPIAddr(remoteAPI))
}
// RemoteAPICaller takes in the name of a remote API instance defined in the
// RemoteAPIs field, and returns a function which can be used to make RPC calls
// against it. The arguments to the returned function are essentially the same
// as those to the Call method, sans the host argument. A Fatal will be thrown
// if no address has been provided for the remote API
func (g *GenAPI) RemoteAPICaller(remoteAPI string) Caller {
addr := g.remoteAPIAddr(remoteAPI)
return caller{g, addr}
}
// CallerStub provides a convenient way to make stubbed endpoints for testing
type CallerStub func(method string, args interface{}) (interface{}, error)
// Call implements the Call method for the Caller interface. It passed method
// and args to the underlying CallerStub function. The returned interface from
// that function is assigned to res (if the underlying types for them are
// compatible). The passed in context is ignored.
func (cs CallerStub) Call(_ context.Context, res interface{}, method string, args interface{}) error {
csres, err := cs(method, args)
if err != nil {
return err
}
if res == nil {
return nil
}
vres := reflect.ValueOf(res).Elem()
vres.Set(reflect.ValueOf(csres))
return nil
}
move countCh initialization into RPCListen
// Package genapi implements a generic skeleton we can use as the basis for an
// api service. It will set up command line arguments, connections to backend
// databases, handle test modes which might affect those databases, register
// itself with skyapi, and more.
//
// Basic definition
//
// To use first initialize a GenAPI instance somewhere accessible by your entire
// application, and give it an RPC type
//
// package myapi
//
// var GA = genapi.GenAPI{
// Name: "my-api",
// RedisInfo: &genapi.RedisInfo{}
// Services: []interface{}{MyAPI{}},
// }
//
// type MyAPI struct{}
//
// func (_ MyAPI) Foo(r *http.Request, args *struct{}, res *struct{}) error {
// return GA.Cmd("INCR", "MyKey").Err
// }
//
// API Mode
//
// To actually read command-line arguments, set up database connections, listen
// on a random port, register with skyapi, and start handling requests, simply
// call APIMode() from your main method:
//
// package main
//
// func main() {
// myapi.GA.APIMode()
// }
//
// In APIMode the genapi will also listen for SIGTERM signals, and if it
// receives one will unregister with skyapi, and exit once all ongoing requests
// are completed.
//
// Test Mode
//
// When testing your api you can call TestMode from your test's init function,
// and call RPC to get an instance of an http.Handler you can make calls
// against:
//
// package myapi // myapi_test.go
//
// import . "testing"
//
// func init() {
// GA.TestMode()
// }
//
// func TestSomeThing(t *T) {
// h := GA.RPC()
// // test against h
// }
//
// CLI Mode
//
// Finally, there are times when you want a command-line binary which will be
// made alongside the actual api binary, and which will share resources and
// possibly database connections. In this case you can use the CLIMode method
// and then access the GenAPI from your main method as normal:
//
// package main
//
// func main() {
// myapi.GA.CLIMode()
// myapi.GA.Cmd("DECR", "MyKey")
// }
//
package genapi
import (
"crypto/sha1"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"golang.org/x/net/context"
"github.com/gorilla/rpc/v2"
"github.com/levenlabs/gatewayrpc"
"github.com/levenlabs/go-llog"
"github.com/levenlabs/go-srvclient"
"github.com/levenlabs/golib/mgoutil"
"github.com/levenlabs/golib/radixutil"
"github.com/levenlabs/golib/rpcutil"
"github.com/mediocregopher/lever"
"github.com/mediocregopher/okq-go.v2"
"github.com/mediocregopher/radix.v2/pool"
"github.com/mediocregopher/radix.v2/util"
"github.com/mediocregopher/skyapi/client"
"github.com/miekg/dns"
"gopkg.in/mgo.v2"
)
// Version can be set using:
// -ldflags "-X github.com/levenlabs/golib/genapi.Version versionstring"
// on the go build command. When this is done, the --version flag will be
// available on the command-line and will print out whatever version string is
// passed in.
//
// It could also be set manually during runtime, but that would kind of defeat
// the purpose.
//
// Version will be automatically unquoted
var Version string
// MongoInfo contains information needed by the api to interact with a mongo
// backend, and also houses the connection to that backend (which can be
// interacted with through its methods)
type MongoInfo struct {
// If you want to make mongo optional, set this and if --mongo-addr isn't
// sent, then WithDB, WithColl will call fn with nil and SessionHelper's
// session will be nil.
Optional bool
// The name of the mongo database this app should use. In TestMode this will
// always be overwritten to "test_<DBName>"
DBName string
session *mgo.Session
}
// InitFunc is just a helper for a function that accepts a GenAPI pointer
type InitFunc func(*GenAPI)
// WithDB is similar to mgoutil.SessionHelper's WithDB, see those docs for more
// details
func (m *MongoInfo) WithDB(fn func(*mgo.Database)) {
if m.session == nil {
fn(nil)
return
}
mgoutil.SessionHelper{
Session: m.session,
DB: m.DBName,
}.WithDB(fn)
}
// WithColl is similar to mgoutil.SessionHelper's WithColl, see those docs for
// more details
func (m *MongoInfo) WithColl(collName string, fn func(*mgo.Collection)) {
if m.session == nil {
fn(nil)
return
}
mgoutil.SessionHelper{
Session: m.session,
DB: m.DBName,
Coll: collName,
}.WithColl(fn)
}
// CollSH returns an mgoutil.SessionHelper for a collection of the given name
// The SessionHelper's Session might be nil if you made mongo Optional.
func (m *MongoInfo) CollSH(collName string) mgoutil.SessionHelper {
return mgoutil.SessionHelper{
Session: m.session,
DB: m.DBName,
Coll: collName,
}
}
// RedisInfo is used to tell the api to interact with a redis backend, and also
// houses the connection to that backend. If the redis backend is a cluster
// instance that whole cluster will be connected to
type RedisInfo struct {
// If you want to make redis optional, set this and if --redis-addr isn't
// sent, Cmder will be nil.
Optional bool
// Populated by the api once a connection to redis is made, and can be used
// as such. Do not set manually.
util.Cmder
}
// OkqInfo is used to tell the api to interact with a set of okq instances.
type OkqInfo struct {
// If you want to make okq optional, set this and if --okq-addr isn't sent,
// Client will return nil
Optional bool
// Read/Write timeout for redis connection and the NotifyTimeout for Client.
// Defaults to 30 seconds
// Do not change after initializing GenAPI
Timeout time.Duration
*okq.Client
}
// TLSInfo is used to tell the api to use TLS (e.g. https/ssl) when listening
// for incoming requests
type TLSInfo struct {
// If set to true then the config options for passing in cert files on the
// command-line will not be used, and instead the Certs field will be
// expected to be filled in manually during the Init function
FillCertsManually bool
// One or more certificates to use for TLS. Will be filled automatically if
// FillCertsManually is false
Certs []tls.Certificate
}
// GenAPI is a type used to handle most of the generic logic we always implement
// when making an RPC API endpoint.
//
// The struct is initialized with whatever parameters are appropriate, and then
// has either APIMode(), TestMode(), or CLIMode() called on it depending on the
// intent. Fields are optional unless otherwise marked in the comment.
type GenAPI struct {
// Required. Name is the name of the api, as it will be identified on the
// command-line and in skyapi
Name string
// The set of rpc service structs which this API will host. Must have at
// least one service in APIMode
Services []interface{}
// Like Services, but these will not be registered with the underlying
// gateway library, and therefore will not show up in calls to
// "RPC.GetMethods"
HiddenServices []interface{}
// The mux which the rpc services will be added to. If not set a new one
// will be created and used. This can be used to provide extra functionality
// in conjunction with the RPC server, or completely in place of it.
//
// It is important that RPCEndpoint does *not* have a handler set in this
// mux, as GenAPI will be setting it itself.
Mux *http.ServeMux
// The http endpoint that the RPC handler for Services and HiddenServices
// will be attached to. Defaults to "/". If you set this to "_", no rpc
// listener will be set up and its up to you to add the handler from RPC()
// to the mux for whatever path you need.
RPCEndpoint string
// Additional lever.Param structs which can be included in the lever parsing
LeverParams []lever.Param
// If mongo is intended to be used as a backend, this should be filled in
*MongoInfo
// If redis is intended to be used, this should be filled in.
*RedisInfo
// If okq is intended to be used, this should be filled in.
*OkqInfo
// If TLS is intended to be used, this should be filled in. The Certs field
// of TLSInfo may be filled in during the Init function for convenience, but
// the struct itself must be initialized before any of the Mode methods are
// called
*TLSInfo
// A function to run just after initializing connections to backing
// database. Meant for performing any initialization needed by the app.
// This is called before any AppendInit functions
Init InitFunc
inits []InitFunc
// May be set if a codec with different parameters is required.
// If not set an rpcutil.LLCodec with default options will be used.
Codec rpc.Codec
// Do not set. This will be automatically filled in with whatever address
// is being listened on once APIMode is called.
ListenAddr string
// Do not set. This will be automatically filled in when any of the run
// modes are called, and may be used after that point to retrieve parameter
// values.
*lever.Lever
// Do not set. This will be automatically filled in when any of the run
// modes are called. Indicates which mode the GenAPI is currently in, and
// may be used after that point to know which run mode GenAPI is in.
Mode string
// When initialized, this channel will be closed at the end of the init
// phase of running. If in APIMode it will be closed just before the call to
// ListenAndServe. This is useful so you can call APIMode in a separate
// go-routine and know when it's started listening, if there's other steps
// you want to take after initialization has been done.
InitDoneCh chan bool
// When initialized, this channel will be closed when in APIMode and cleanup
// has been completed after a kill signal. This is useful if you have other
// cleanup you want to run after GenAPI is done.
DoneCh chan bool
// Optional set of remote APIs (presumably GenAPIs, but that's not actually
// required) that this one will be calling. The key should be the name of
// the remote api, and the value should be the default address for it. Each
// one will have a configuration option added for its address (e.g. if
// "other-api" is in this list, then "--other-api-addr" will be a config
// option). Each key can be used as an argument to RemoteAPICaller to obtain
// a convenient function for communicating with other apis.
RemoteAPIs map[string]string
// Optional set of Healthers which should be checked during a /health-check.
// These will be checked sequentially, and if any return an error that will
// be logged and the health check will return false. The key in the map is a
// name for the Healther which can be logged
Healthers map[string]Healther
// SRVClient which will be used by GenAPI when resolving requests, and which
// can also be used by other processes as well. This should only be modified
// during the init function
srvclient.SRVClient
ctxs map[*http.Request]context.Context
ctxsL sync.RWMutex
// Mutex for accessing Healthers
healthersL sync.Mutex
// Signal channel. Included for testing purposes only
sigCh chan os.Signal
// set of active listeners for this genapi (APIMode only)
listeners []*listenerReloader
// the active httpWaiter for the instance
hw *httpWaiter
countCh chan bool
}
// The different possible Mode values for GenAPI
const (
APIMode = "api"
TestMode = "test"
CLIMode = "cli"
)
// APIMode puts the GenAPI into APIMode, wherein it listens for any incoming rpc
// requests and tries to serve them against one of its Services. This method
// will block indefinitely
func (g *GenAPI) APIMode() {
g.Mode = APIMode
g.sigCh = make(chan os.Signal, 1)
g.init()
g.RPCListen()
// Once ListenAddr is populated with the final value we can call doSkyAPI
skyapiStopCh := g.doSkyAPI()
unhealthyTimeout, _ := g.ParamInt("--unhealthy-timeout")
if g.InitDoneCh != nil {
close(g.InitDoneCh)
}
// After this point everything is listening and we're just waiting for a
// kill signal
llog.Info("waiting for close signal")
signal.Notify(g.sigCh, syscall.SIGTERM, syscall.SIGINT)
<-g.sigCh
llog.Info("signal received, stopping")
g.healthersL.Lock()
g.Healthers = map[string]Healther{
"sneezy": sneezy{},
}
g.healthersL.Unlock()
if skyapiStopCh != nil {
llog.Info("stopping skyapi connection")
close(skyapiStopCh)
// Wait a bit just in case something gets the skydns record before we
// kill the skyapi connection, but the connection doesn't come in till
// after hw.wait() runs
time.Sleep(500 * time.Millisecond)
}
// Appear as unhealthy for a while before hw.wait() runs
time.Sleep(time.Duration(unhealthyTimeout) * time.Millisecond)
g.hw.wait() // hw is populated in RPCListen
time.Sleep(50 * time.Millisecond)
if g.DoneCh != nil {
close(g.DoneCh)
}
}
func (g *GenAPI) countHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.countCh <- true
h.ServeHTTP(w, r)
})
}
func (g *GenAPI) hostnameHandler(h http.Handler) http.Handler {
hostname, _ := g.ParamStr("--hostname")
if hostname == "" {
return h
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Hostname", hostname)
h.ServeHTTP(w, r)
})
}
// RPCListen sets up listeners for the GenAPI listen and starts them up. This
// may only be called after TestMode or CLIMode has been called, it is
// automatically done for APIMode.
func (g *GenAPI) RPCListen() {
g.countCh = make(chan bool)
go func() {
t := time.Tick(1 * time.Minute)
var c uint64
for {
select {
case <-g.countCh:
c++
case <-t:
llog.Info("count requests in last minute", llog.KV{"count": c})
c = 0
}
}
}()
if g.RPCEndpoint != "_" {
g.Mux.Handle(g.RPCEndpoint, g.RPC())
}
// The net/http/pprof package expects to be under /debug/pprof/, which is
// why we don't strip the prefix here
g.Mux.Handle("/debug/pprof/", g.pprofHandler())
g.Mux.Handle("/health-check", g.healthCheck())
g.hw = &httpWaiter{
ch: make(chan struct{}, 1),
}
var h http.Handler
h = g.Mux
h = g.countHandler(h)
h = g.hostnameHandler(h)
h = g.contextHandler(h)
h = g.hw.handler(h)
addrs, _ := g.Lever.ParamStrs("--listen-addr")
for _, addr := range addrs {
// empty addr might get passed in to disable --listen-addr
if addr == "" {
continue
}
g.listeners = append(g.listeners, g.serve(h, addr, false))
}
if g.TLSInfo != nil {
addrs, _ := g.Lever.ParamStrs("--tls-listen-addr")
for _, addr := range addrs {
if addr == "" {
continue
}
g.listeners = append(g.listeners, g.serve(h, addr, true))
}
}
}
// AddHealther adds a healther to Healthers under the specified key
func (g *GenAPI) AddHealther(key string, healther Healther) {
g.healthersL.Lock()
defer g.healthersL.Unlock()
g.Healthers[key] = healther
}
// This starts a go-routine which will do the actual serving of the handler
func (g *GenAPI) serve(h http.Handler, addr string, doTLS bool) *listenerReloader {
kv := llog.KV{"addr": addr, "tls": doTLS}
llog.Info("creating listen socket", kv)
ln, err := net.Listen("tcp", addr)
if err != nil {
llog.Fatal("failed creating listen socket", kv.Set("err", err))
}
actualAddr := ln.Addr().String()
kv["addr"] = actualAddr
// If this is the first address specified set ListenAddr to that, so it will
// be advertised with skyapi
if g.ListenAddr == "" {
g.ListenAddr = actualAddr
}
netln := net.Listener(tcpKeepAliveListener{ln.(*net.TCPListener)})
lr, err := newListenerReloader(netln, g.listenerMaker(doTLS))
if err != nil {
llog.Fatal("failed to create listener", kv.Set("err", err))
}
go func() {
llog.Info("starting rpc listening", kv)
srv := &http.Server{
Handler: h,
}
srv.Serve(lr)
}()
return lr
}
func (g *GenAPI) listenerMaker(doTLS bool) func(net.Listener) (net.Listener, error) {
return func(l net.Listener) (net.Listener, error) {
var err error
allowedProxyCIDRsStr, _ := g.ParamStr("--proxy-proto-allowed-cidrs")
allowedProxyCIDRs := strings.Split(allowedProxyCIDRsStr, ",")
if l, err = newProxyListener(l, allowedProxyCIDRs); err != nil {
return nil, fmt.Errorf("proxy proto listener: %s", err)
}
if doTLS {
tf := &tls.Config{
Certificates: g.TLSInfo.Certs,
}
tf.BuildNameToCertificate()
l = tls.NewListener(l, tf)
}
return l, nil
}
}
// TestMode puts the GenAPI into TestMode, wherein it is then prepared to be
// used for during go tests
func (g *GenAPI) TestMode() {
g.Mode = TestMode
g.init()
}
// CLIMode puts the GenAPI into CLIMode, wherein it is then prepared to be used
// by a command-line utility
func (g *GenAPI) CLIMode() {
g.Mode = CLIMode
g.init()
}
// AppendInit adds a function to be called when GenAPI is initialized. It will
// be called after GenAPI's Init() and after any previous functions that were
// appended
func (g *GenAPI) AppendInit(f InitFunc) {
if g.Mode != "" {
panic("genapi: AppendInit was called after Init has already been ran")
}
g.inits = append(g.inits, f)
}
// RPC returns an http.Handler which will handle the RPC calls made against it
// for the GenAPI's Services
func (g *GenAPI) RPC() http.Handler {
// TODO make gatewayrpc.Server have an option not to do its logging
// per-request, so we can do it in here with the proper KVs from the context
s := gatewayrpc.NewServer()
s.RegisterCodec(g.Codec, "application/json")
for _, service := range g.Services {
if err := s.RegisterService(service, ""); err != nil {
llog.Fatal("error registering service", llog.KV{
"service": fmt.Sprintf("%T", service),
"err": err,
})
}
}
for _, service := range g.HiddenServices {
if err := s.RegisterHiddenService(service, ""); err != nil {
llog.Fatal("error registering hidden service", llog.KV{
"service": fmt.Sprintf("%T", service),
"err": err,
})
}
}
return s
}
func (g *GenAPI) pprofHandler() http.Handler {
h := http.NewServeMux()
h.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
// Even though Index handles this, this particular one won't work without
// setting the BlockProfileRate temporarily.
h.HandleFunc("/debug/pprof/block", func(w http.ResponseWriter, r *http.Request) {
runtime.SetBlockProfileRate(1)
time.Sleep(5 * time.Second)
pprof.Index(w, r)
runtime.SetBlockProfileRate(0)
})
h.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
h.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
h.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
h.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ipStr, _, _ := net.SplitHostPort(r.RemoteAddr)
ip := net.ParseIP(ipStr)
if ip == nil || !ip.IsLoopback() {
http.Error(w, "", 403) // forbidden
return
}
h.ServeHTTP(w, r)
})
}
func (g *GenAPI) init() {
g.ctxs = map[*http.Request]context.Context{}
rpcutil.InstallCustomValidators()
g.SRVClient.EnableCacheLast()
g.doLever()
g.SRVClient.Preprocess = g.srvClientPreprocess
if g.RPCEndpoint == "" {
g.RPCEndpoint = "/"
}
if g.Mux == nil {
g.Mux = http.NewServeMux()
}
if g.Lever.ParamFlag("--version") {
v := Version
if v[0] != '"' {
v = `"` + v + `"`
}
if uv, err := strconv.Unquote(v); err == nil {
v = uv
}
fmt.Println(v)
time.Sleep(100 * time.Millisecond)
os.Exit(0)
}
ll, _ := g.ParamStr("--log-level")
llog.SetLevelFromString(ll)
llog.Info("starting GenAPI", llog.KV{"mode": g.Mode, "name": g.Name})
if g.MongoInfo != nil {
g.initMongo()
}
if g.RedisInfo != nil {
g.initRedis()
}
if g.OkqInfo != nil {
g.initOkq()
}
if g.Codec == nil {
c := rpcutil.NewLLCodec()
c.ValidateInput = true
c.RunInputApplicators = true
g.Codec = c
}
tlsAddrs, _ := g.ParamStrs("--tls-listen-addr")
if g.TLSInfo != nil && !g.TLSInfo.FillCertsManually && len(tlsAddrs) > 0 {
certFiles, _ := g.ParamStrs("--tls-cert-file")
keyFiles, _ := g.ParamStrs("--tls-key-file")
if len(certFiles) == 0 {
llog.Fatal("no --tls-cert-file provided")
}
if len(certFiles) != len(keyFiles) {
llog.Fatal("number of --tls-cert-file must match number of --tls-key-file")
}
for i := range certFiles {
kv := llog.KV{"certFile": certFiles[i], "keyFile": keyFiles[i]}
llog.Info("loading tls cert", kv)
c, err := tls.LoadX509KeyPair(certFiles[i], keyFiles[i])
if err != nil {
llog.Fatal("failed to load tls cert", kv, llog.KV{"err": err})
}
g.TLSInfo.Certs = append(g.TLSInfo.Certs, c)
}
}
if g.Init != nil {
// make sure the struct's Init is always called first
g.Init(g)
}
for _, f := range g.inits {
f(g)
}
// InitDoneCh gets closed at the end of APIMode being called
if g.Mode != APIMode && g.InitDoneCh != nil {
close(g.InitDoneCh)
}
}
func (g *GenAPI) doLever() {
o := &lever.Opts{}
if g.Mode == CLIMode {
o.DisallowConfigFile = true
}
g.Lever = lever.New(g.Name, o)
g.Lever.Add(lever.Param{
Name: "--log-level",
Description: "Log level to run with. Available levels are: debug, info, warn, error, fatal",
Default: "info",
})
g.Lever.Add(lever.Param{
Name: "--datacenter",
Description: "What datacenter the service is running in",
Default: os.Getenv("DATACENTER"),
})
g.Lever.Add(lever.Param{
Name: "--hostname",
Description: "What hostanme the service is running on",
Default: os.Getenv("HOSTNAME"),
})
// The listen-addr parameters can be used outside of APIMode through the
// RPCListener method
g.Lever.Add(lever.Param{
Name: "--listen-addr",
Description: "[address]:port to listen for requests on. If port is zero a port will be chosen randomly",
DefaultMulti: []string{":0"},
})
if g.TLSInfo != nil {
g.Lever.Add(lever.Param{
Name: "--tls-listen-addr",
Description: "[address]:port to listen for https requests on. If port is zero a port will be chosen randomly",
DefaultMulti: []string{},
})
if !g.TLSInfo.FillCertsManually {
g.Lever.Add(lever.Param{
Name: "--tls-cert-file",
Description: "Certificate file to use for TLS. Maybe be specified more than once. Must be specified as many times as --tls-key-file.",
})
g.Lever.Add(lever.Param{
Name: "--tls-key-file",
Description: "Key file to use for TLS. Maybe be specified more than once. Must be specified as many times as --tls-cert-file.",
})
}
}
if g.Mode == APIMode {
g.Lever.Add(lever.Param{
Name: "--skyapi-addr",
Description: "Hostname of skyapi, to be looked up via a SRV request. Unset means don't register with skyapi",
})
g.Lever.Add(lever.Param{
Name: "--proxy-proto-allowed-cidrs",
Description: "Comma separated list of cidrs which are allowed to use the PROXY protocol",
Default: "127.0.0.1/32,::1/128,10.0.0.0/8",
})
g.Lever.Add(lever.Param{
Name: "--unhealthy-timeout",
Description: "Number of milliseconds to appear unhealthy after a stop signal is received",
})
}
if g.MongoInfo != nil {
g.Lever.Add(lever.Param{
Name: "--mongo-addr",
Description: "Address of mongo instance to use",
Default: "127.0.0.1:27017",
})
}
if g.RedisInfo != nil {
g.Lever.Add(lever.Param{
Name: "--redis-addr",
Description: "Address of redis instance to use. May be a single member of a cluster",
Default: "127.0.0.1:6379",
})
g.Lever.Add(lever.Param{
Name: "--redis-pool-size",
Description: "Number of connections to a single redis instance to use. If a cluster is being used, this many connections will be made to each member of the cluster",
Default: "10",
})
}
if g.OkqInfo != nil {
g.Lever.Add(lever.Param{
Name: "--okq-addr",
Description: "Address of okq instance to use",
Default: "127.0.0.1:4777",
})
g.Lever.Add(lever.Param{
Name: "--okq-pool-size",
Description: "Number of connections to okq to initially make",
Default: "10",
})
}
if Version != "" {
g.Lever.Add(lever.Param{
Name: "--version",
Aliases: []string{"-V"},
Description: "Print out version information for this binary",
Flag: true,
})
}
for rapi, raddr := range g.RemoteAPIs {
g.Lever.Add(lever.Param{
Name: "--" + rapi + "-addr",
Description: "Address or hostname of a " + rapi + " instance",
Default: raddr,
})
}
for _, p := range g.LeverParams {
g.Lever.Add(p)
}
g.Lever.Parse()
}
func (g *GenAPI) srvClientPreprocess(m *dns.Msg) {
dc := g.getDCHash()
if dc == "" {
return
}
for i := range m.Answer {
if ansSRV, ok := m.Answer[i].(*dns.SRV); ok {
tar := ansSRV.Target
if strings.HasPrefix(tar, dc+"-") {
if ansSRV.Priority < 2 {
ansSRV.Priority = uint16(0)
} else {
ansSRV.Priority = ansSRV.Priority - 1
}
}
}
}
}
func (g *GenAPI) getDCHash() string {
dc, _ := g.Lever.ParamStr("--datacenter")
if dc == "" {
return ""
}
sha1Bytes := sha1.Sum([]byte(dc))
return fmt.Sprintf("%x", sha1Bytes)[:20]
}
func (g *GenAPI) doSkyAPI() chan struct{} {
skyapiAddr, _ := g.Lever.ParamStr("--skyapi-addr")
if skyapiAddr == "" {
return nil
}
dc := g.getDCHash()
kv := llog.KV{
"skyapiAddr": skyapiAddr,
"listenAddr": g.ListenAddr,
"serviceName": g.Name,
"prefix": dc,
}
stopCh := make(chan struct{})
go func() {
for {
llog.Info("connecting to skyapi", kv)
err := client.ProvideOpts(client.Opts{
SkyAPIAddr: skyapiAddr,
Service: g.Name,
ThisAddr: g.ListenAddr,
ReconnectAttempts: 0, // do not attempt to reconnect, we'll do that here
StopCh: stopCh,
Prefix: dc,
})
if err != nil {
llog.Warn("skyapi error", kv.Set("err", err))
time.Sleep(1 * time.Second)
} else {
// If there wasn't an error but skyapi stopped, it's because the
// stopCh was closed
return
}
}
}()
return stopCh
}
func (g *GenAPI) initMongo() {
if g.Mode == TestMode {
g.MongoInfo.DBName = "test_" + g.MongoInfo.DBName
}
mongoAddr, _ := g.ParamStr("--mongo-addr")
if mongoAddr == "" && g.MongoInfo.Optional {
return
}
g.MongoInfo.session = mgoutil.EnsureSession(mongoAddr)
}
func (g *GenAPI) initRedis() {
redisAddr, _ := g.ParamStr("--redis-addr")
if redisAddr == "" && g.RedisInfo.Optional {
return
}
redisPoolSize, _ := g.ParamInt("--redis-pool-size")
kv := llog.KV{
"addr": redisAddr,
"poolSize": redisPoolSize,
}
llog.Info("connecting to redis", kv)
var err error
g.RedisInfo.Cmder, err = radixutil.DialMaybeCluster("tcp", redisAddr, redisPoolSize)
if err != nil {
llog.Fatal("error connecting to redis", kv, llog.KV{"err": err})
}
}
func (g *GenAPI) initOkq() {
okqAddr, _ := g.ParamStr("--okq-addr")
if okqAddr == "" && g.OkqInfo.Optional {
return
}
okqPoolSize, _ := g.ParamInt("--okq-pool-size")
kv := llog.KV{
"addr": okqAddr,
"poolSize": okqPoolSize,
}
if g.OkqInfo.Timeout == 0 {
g.OkqInfo.Timeout = 30 * time.Second
}
df := radixutil.SRVDialFunc(g.SRVClient, g.OkqInfo.Timeout)
llog.Info("connecting to okq", kv)
p, err := pool.NewCustom("tcp", okqAddr, okqPoolSize, df)
if err != nil {
llog.Fatal("error connection to okq", kv, llog.KV{"err": err})
}
g.OkqInfo.Client = okq.NewWithOpts(okq.Opts{
RedisPool: p,
NotifyTimeout: g.OkqInfo.Timeout,
})
}
func (g *GenAPI) contextHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := requestCtx(r)
if len(ContextKV(ctx)) == 0 {
ctx = ContextMergeKV(ctx, rpcutil.RequestKV(r))
}
// TODO I'll be posting a question in the google group about what
// exactly we're supposed to be doing here. It's currently very unclear
//cn, ok := w.(http.CloseNotifier)
//if !ok {
// h.ServeHTTP(w, r)
// return
//}
//closeCh := make(chan struct{})
//reqCloseCh := cn.CloseNotify()
//ctx, cancelFn := context.WithCancel(ctx)
//go func() {
// <-closeCh
// <-reqCloseCh
// cancelFn()
//}()
g.ctxsL.Lock()
g.ctxs[r] = ctx
g.ctxsL.Unlock()
h.ServeHTTP(w, r)
//close(closeCh)
g.ctxsL.Lock()
delete(g.ctxs, r)
g.ctxsL.Unlock()
})
}
// RequestContext returns a context for the given request. The context will be
// cancelled if the request is closed, and may possibly have a deadline on it as
// well
func (g *GenAPI) RequestContext(r *http.Request) context.Context {
g.ctxsL.RLock()
defer g.ctxsL.RUnlock()
ctx := g.ctxs[r]
if ctx == nil {
ctx = context.Background()
}
return ctx
}
// ReloadListeners reloads the listener configurations of all existing
// listeners. This doesn't actually close the listen sockets, just hot reloads
// the configuration. Goes through each listener sequentially and returns the
// first error it encounters.
func (g *GenAPI) ReloadListeners() error {
for _, lr := range g.listeners {
if err := lr.Reload(); err != nil {
return err
}
}
return nil
}
// CallErr is an implementation of error which is returned from the Call method,
// and subsequently by the Call methods on Callers returned by RemoteAPICaller
// and NewCaller. If used, CallErr will not be a pointer
type CallErr struct {
URL string
Method string
Err error
}
func (c CallErr) Error() string {
return fmt.Sprintf("calling %q on %q: %s", c.Method, c.URL, c.Err)
}
// Call makes an rpc call, presumably to another genapi server but really it
// only has to be a JSONRPC2 server. If it is another genapi server, however,
// the given context will be propagated to it, as well as being used here as a
// timeout if deadline is set on it. See rpcutil for more on how the rest of the
// arguments work.
//
// Note that host can be a hostname, and address (host:port), or a url
// (http[s]://host[:port])
func (g *GenAPI) Call(ctx context.Context, res interface{}, host, method string, args interface{}) error {
host = g.SRVClient.MaybeSRVURL(host)
r, err := http.NewRequest("POST", host, nil)
if err != nil {
return CallErr{URL: host, Method: method, Err: err}
}
ContextApply(r, ctx)
opts := rpcutil.JSONRPC2Opts{
BaseRequest: r,
Context: ctx,
}
if err := rpcutil.JSONRPC2CallOpts(opts, host, res, method, args); err != nil {
return CallErr{URL: host, Method: method, Err: err}
}
return nil
}
func (g *GenAPI) remoteAPIAddr(remoteAPI string) string {
addr, _ := g.ParamStr("--" + remoteAPI + "-addr")
if addr == "" {
llog.Fatal("no address defined", llog.KV{"api": remoteAPI})
}
return addr
}
// Caller provides a way of calling RPC methods against a pre-defined remote
// endpoint. The Call method is essentially the same as GenAPI's Call method,
// but doesn't take in a host parameter
type Caller interface {
Call(ctx context.Context, res interface{}, method string, args interface{}) error
}
type caller struct {
g *GenAPI
addr string
}
func (c caller) Call(ctx context.Context, res interface{}, method string, args interface{}) error {
return c.g.Call(ctx, res, c.addr, method, args)
}
// NewCaller returns an instance of a Caller which will make RPC requests
// against the given address, after doing a SRV request on it before each
// request
func (g *GenAPI) NewCaller(addr string) Caller {
return caller{g, addr}
}
// RemoteAPIAddr returns an address to use for the given remoteAPI (which must
// be defined in RemoteAPIs). The address will have had SRV called on it
// already. A Fatal will be thrown if no address has been provided for the
// remote API
func (g *GenAPI) RemoteAPIAddr(remoteAPI string) string {
return g.SRVClient.MaybeSRV(g.remoteAPIAddr(remoteAPI))
}
// RemoteAPICaller takes in the name of a remote API instance defined in the
// RemoteAPIs field, and returns a function which can be used to make RPC calls
// against it. The arguments to the returned function are essentially the same
// as those to the Call method, sans the host argument. A Fatal will be thrown
// if no address has been provided for the remote API
func (g *GenAPI) RemoteAPICaller(remoteAPI string) Caller {
addr := g.remoteAPIAddr(remoteAPI)
return caller{g, addr}
}
// CallerStub provides a convenient way to make stubbed endpoints for testing
type CallerStub func(method string, args interface{}) (interface{}, error)
// Call implements the Call method for the Caller interface. It passed method
// and args to the underlying CallerStub function. The returned interface from
// that function is assigned to res (if the underlying types for them are
// compatible). The passed in context is ignored.
func (cs CallerStub) Call(_ context.Context, res interface{}, method string, args interface{}) error {
csres, err := cs(method, args)
if err != nil {
return err
}
if res == nil {
return nil
}
vres := reflect.ValueOf(res).Elem()
vres.Set(reflect.ValueOf(csres))
return nil
}
|
package aws
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/aws-sdk-go/aws"
"github.com/hashicorp/aws-sdk-go/gen/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsInstance() *schema.Resource {
return &schema.Resource{
Create: resourceAwsInstanceCreate,
Read: resourceAwsInstanceRead,
Update: resourceAwsInstanceUpdate,
Delete: resourceAwsInstanceDelete,
SchemaVersion: 1,
MigrateState: resourceAwsInstanceMigrateState,
Schema: map[string]*schema.Schema{
"ami": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"associate_public_ip_address": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"availability_zone": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"instance_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"key_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"subnet_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"private_ip": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"source_dest_check": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"user_data": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
StateFunc: func(v interface{}) string {
switch v.(type) {
case string:
hash := sha1.Sum([]byte(v.(string)))
return hex.EncodeToString(hash[:])
default:
return ""
}
},
},
"security_groups": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: func(v interface{}) int {
return hashcode.String(v.(string))
},
},
"public_dns": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"public_ip": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"private_dns": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"ebs_optimized": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"iam_instance_profile": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Optional: true,
},
"tenancy": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"tags": tagsSchema(),
"block_device": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Removed: "Split out into three sub-types; see Changelog and Docs",
},
"ebs_block_device": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"delete_on_termination": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
ForceNew: true,
},
"device_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"encrypted": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},
"iops": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"snapshot_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"volume_size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"volume_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool)))
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
buf.WriteString(fmt.Sprintf("%t-", m["encrypted"].(bool)))
// NOTE: Not considering IOPS in hash; when using gp2, IOPS can come
// back set to something like "33", which throws off the set
// calculation and generates an unresolvable diff.
// buf.WriteString(fmt.Sprintf("%d-", m["iops"].(int)))
buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string)))
buf.WriteString(fmt.Sprintf("%d-", m["volume_size"].(int)))
buf.WriteString(fmt.Sprintf("%s-", m["volume_type"].(string)))
return hashcode.String(buf.String())
},
},
"ephemeral_block_device": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"virtual_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string)))
return hashcode.String(buf.String())
},
},
"root_block_device": &schema.Schema{
// TODO: This is a set because we don't support singleton
// sub-resources today. We'll enforce that the set only ever has
// length zero or one below. When TF gains support for
// sub-resources this can be converted.
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Resource{
// "You can only modify the volume size, volume type, and Delete on
// Termination flag on the block device mapping entry for the root
// device volume." - bit.ly/ec2bdmap
Schema: map[string]*schema.Schema{
"delete_on_termination": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
ForceNew: true,
},
"device_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "/dev/sda1",
},
"iops": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"volume_size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"volume_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool)))
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
// See the NOTE in "ebs_block_device" for why we skip iops here.
// buf.WriteString(fmt.Sprintf("%d-", m["iops"].(int)))
buf.WriteString(fmt.Sprintf("%d-", m["volume_size"].(int)))
buf.WriteString(fmt.Sprintf("%s-", m["volume_type"].(string)))
return hashcode.String(buf.String())
},
},
},
}
}
func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
ec2conn := meta.(*AWSClient).ec2conn
// Figure out user data
userData := ""
if v := d.Get("user_data"); v != nil {
userData = base64.StdEncoding.EncodeToString([]byte(v.(string)))
}
placement := &ec2.Placement{
AvailabilityZone: aws.String(d.Get("availability_zone").(string)),
}
if v := d.Get("tenancy").(string); v != "" {
placement.Tenancy = aws.String(v)
}
iam := &ec2.IAMInstanceProfileSpecification{
Name: aws.String(d.Get("iam_instance_profile").(string)),
}
// Build the creation struct
runOpts := &ec2.RunInstancesRequest{
ImageID: aws.String(d.Get("ami").(string)),
Placement: placement,
InstanceType: aws.String(d.Get("instance_type").(string)),
MaxCount: aws.Integer(1),
MinCount: aws.Integer(1),
UserData: aws.String(userData),
EBSOptimized: aws.Boolean(d.Get("ebs_optimized").(bool)),
IAMInstanceProfile: iam,
}
associatePublicIPAddress := false
if v := d.Get("associate_public_ip_address"); v != nil {
associatePublicIPAddress = v.(bool)
}
// check for non-default Subnet, and cast it to a String
var hasSubnet bool
subnet, hasSubnet := d.GetOk("subnet_id")
subnetID := subnet.(string)
var groups []string
if v := d.Get("security_groups"); v != nil {
// Security group names.
// For a nondefault VPC, you must use security group IDs instead.
// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html
for _, v := range v.(*schema.Set).List() {
str := v.(string)
groups = append(groups, str)
}
}
if hasSubnet && associatePublicIPAddress {
// If we have a non-default VPC / Subnet specified, we can flag
// AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided.
// You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise
// you get: Network interfaces and an instance-level subnet ID may not be specified on the same request
// You also need to attach Security Groups to the NetworkInterface instead of the instance,
// to avoid: Network interfaces and an instance-level security groups may not be specified on
// the same request
ni := ec2.InstanceNetworkInterfaceSpecification{
AssociatePublicIPAddress: aws.Boolean(associatePublicIPAddress),
DeviceIndex: aws.Integer(0),
SubnetID: aws.String(subnetID),
}
if v, ok := d.GetOk("private_ip"); ok {
ni.PrivateIPAddress = aws.String(v.(string))
}
if len(groups) > 0 {
ni.Groups = groups
}
runOpts.NetworkInterfaces = []ec2.InstanceNetworkInterfaceSpecification{ni}
} else {
if subnetID != "" {
runOpts.SubnetID = aws.String(subnetID)
}
if v, ok := d.GetOk("private_ip"); ok {
runOpts.PrivateIPAddress = aws.String(v.(string))
}
if runOpts.SubnetID != nil &&
*runOpts.SubnetID != "" {
runOpts.SecurityGroupIDs = groups
} else {
runOpts.SecurityGroups = groups
}
}
if v, ok := d.GetOk("key_name"); ok {
runOpts.KeyName = aws.String(v.(string))
}
blockDevices := make([]ec2.BlockDeviceMapping, 0)
if v, ok := d.GetOk("ebs_block_device"); ok {
vL := v.(*schema.Set).List()
for _, v := range vL {
bd := v.(map[string]interface{})
ebs := &ec2.EBSBlockDevice{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)),
}
if v, ok := bd["snapshot_id"].(string); ok && v != "" {
ebs.SnapshotID = aws.String(v)
}
if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Integer(v)
}
if v, ok := bd["volume_type"].(string); ok && v != "" {
ebs.VolumeType = aws.String(v)
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Integer(v)
}
blockDevices = append(blockDevices, ec2.BlockDeviceMapping{
DeviceName: aws.String(bd["device_name"].(string)),
EBS: ebs,
})
}
}
if v, ok := d.GetOk("ephemeral_block_device"); ok {
vL := v.(*schema.Set).List()
for _, v := range vL {
bd := v.(map[string]interface{})
blockDevices = append(blockDevices, ec2.BlockDeviceMapping{
DeviceName: aws.String(bd["device_name"].(string)),
VirtualName: aws.String(bd["virtual_name"].(string)),
})
}
// if err := d.Set("ephemeral_block_device", vL); err != nil {
// return err
// }
}
if v, ok := d.GetOk("root_block_device"); ok {
vL := v.(*schema.Set).List()
if len(vL) > 1 {
return fmt.Errorf("Cannot specify more than one root_block_device.")
}
for _, v := range vL {
bd := v.(map[string]interface{})
ebs := &ec2.EBSBlockDevice{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)),
}
if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Integer(v)
}
if v, ok := bd["volume_type"].(string); ok && v != "" {
ebs.VolumeType = aws.String(v)
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Integer(v)
}
blockDevices = append(blockDevices, ec2.BlockDeviceMapping{
DeviceName: aws.String(bd["device_name"].(string)),
EBS: ebs,
})
}
}
if len(blockDevices) > 0 {
runOpts.BlockDeviceMappings = blockDevices
}
// Create the instance
log.Printf("[DEBUG] Run configuration: %#v", runOpts)
runResp, err := ec2conn.RunInstances(runOpts)
if err != nil {
return fmt.Errorf("Error launching source instance: %s", err)
}
instance := &runResp.Instances[0]
log.Printf("[INFO] Instance ID: %s", *instance.InstanceID)
// Store the resulting ID so we can look this up later
d.SetId(*instance.InstanceID)
// Wait for the instance to become running so we can get some attributes
// that aren't available until later.
log.Printf(
"[DEBUG] Waiting for instance (%s) to become running",
*instance.InstanceID)
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: "running",
Refresh: InstanceStateRefreshFunc(ec2conn, *instance.InstanceID),
Timeout: 10 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
instanceRaw, err := stateConf.WaitForState()
if err != nil {
return fmt.Errorf(
"Error waiting for instance (%s) to become ready: %s",
*instance.InstanceID, err)
}
instance = instanceRaw.(*ec2.Instance)
// Initialize the connection info
if instance.PublicIPAddress != nil {
d.SetConnInfo(map[string]string{
"type": "ssh",
"host": *instance.PublicIPAddress,
})
}
// Set our attributes
if err := resourceAwsInstanceRead(d, meta); err != nil {
return err
}
// Update if we need to
return resourceAwsInstanceUpdate(d, meta)
}
func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
ec2conn := meta.(*AWSClient).ec2conn
resp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesRequest{
InstanceIDs: []string{d.Id()},
})
if err != nil {
// If the instance was not found, return nil so that we can show
// that the instance is gone.
if ec2err, ok := err.(aws.APIError); ok && ec2err.Code == "InvalidInstanceID.NotFound" {
d.SetId("")
return nil
}
// Some other error, report it
return err
}
// If nothing was found, then return no state
if len(resp.Reservations) == 0 {
d.SetId("")
return nil
}
instance := &resp.Reservations[0].Instances[0]
// If the instance is terminated, then it is gone
if *instance.State.Name == "terminated" {
d.SetId("")
return nil
}
d.Set("availability_zone", instance.Placement.AvailabilityZone)
d.Set("key_name", instance.KeyName)
d.Set("public_dns", instance.PublicDNSName)
d.Set("public_ip", instance.PublicIPAddress)
d.Set("private_dns", instance.PrivateDNSName)
d.Set("private_ip", instance.PrivateIPAddress)
d.Set("subnet_id", instance.SubnetID)
if len(instance.NetworkInterfaces) > 0 {
d.Set("subnet_id", instance.NetworkInterfaces[0].SubnetID)
} else {
d.Set("subnet_id", instance.SubnetID)
}
d.Set("ebs_optimized", instance.EBSOptimized)
d.Set("tags", tagsToMap(instance.Tags))
d.Set("tenancy", instance.Placement.Tenancy)
// Determine whether we're referring to security groups with
// IDs or names. We use a heuristic to figure this out. By default,
// we use IDs if we're in a VPC. However, if we previously had an
// all-name list of security groups, we use names. Or, if we had any
// IDs, we use IDs.
useID := instance.SubnetID != nil && *instance.SubnetID != ""
if v := d.Get("security_groups"); v != nil {
match := false
for _, v := range v.(*schema.Set).List() {
if strings.HasPrefix(v.(string), "sg-") {
match = true
break
}
}
useID = match
}
// Build up the security groups
sgs := make([]string, len(instance.SecurityGroups))
for i, sg := range instance.SecurityGroups {
if useID {
sgs[i] = *sg.GroupID
} else {
sgs[i] = *sg.GroupName
}
}
d.Set("security_groups", sgs)
if err := readBlockDevices(d, instance, ec2conn); err != nil {
return err
}
return nil
}
func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
ec2conn := meta.(*AWSClient).ec2conn
// SourceDestCheck can only be set on VPC instances
if d.Get("subnet_id").(string) != "" {
log.Printf("[INFO] Modifying instance %s", d.Id())
err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeRequest{
InstanceID: aws.String(d.Id()),
SourceDestCheck: &ec2.AttributeBooleanValue{
Value: aws.Boolean(d.Get("source_dest_check").(bool)),
},
})
if err != nil {
return err
}
}
// TODO(mitchellh): wait for the attributes we modified to
// persist the change...
if err := setTags(ec2conn, d); err != nil {
return err
} else {
d.SetPartial("tags")
}
return nil
}
func resourceAwsInstanceDelete(d *schema.ResourceData, meta interface{}) error {
ec2conn := meta.(*AWSClient).ec2conn
log.Printf("[INFO] Terminating instance: %s", d.Id())
req := &ec2.TerminateInstancesRequest{
InstanceIDs: []string{d.Id()},
}
if _, err := ec2conn.TerminateInstances(req); err != nil {
return fmt.Errorf("Error terminating instance: %s", err)
}
log.Printf(
"[DEBUG] Waiting for instance (%s) to become terminated",
d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
Target: "terminated",
Refresh: InstanceStateRefreshFunc(ec2conn, d.Id()),
Timeout: 10 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err := stateConf.WaitForState()
if err != nil {
return fmt.Errorf(
"Error waiting for instance (%s) to terminate: %s",
d.Id(), err)
}
d.SetId("")
return nil
}
// InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
// an EC2 instance.
func InstanceStateRefreshFunc(conn *ec2.EC2, instanceID string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeInstances(&ec2.DescribeInstancesRequest{
InstanceIDs: []string{instanceID},
})
if err != nil {
if ec2err, ok := err.(aws.APIError); ok && ec2err.Code == "InvalidInstanceID.NotFound" {
// Set this to nil as if we didn't find anything.
resp = nil
} else {
log.Printf("Error on InstanceStateRefresh: %s", err)
return nil, "", err
}
}
if resp == nil || len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 {
// Sometimes AWS just has consistency issues and doesn't see
// our instance yet. Return an empty state.
return nil, "", nil
}
i := &resp.Reservations[0].Instances[0]
return i, *i.State.Name, nil
}
}
func readBlockDevices(d *schema.ResourceData, instance *ec2.Instance, ec2conn *ec2.EC2) error {
ibds, err := readBlockDevicesFromInstance(instance, ec2conn)
if err != nil {
return err
}
if err := d.Set("ebs_block_device", ibds["ebs"]); err != nil {
return err
}
if ibds["root"] != nil {
if err := d.Set("root_block_device", []interface{}{ibds["root"]}); err != nil {
return err
}
}
return nil
}
func readBlockDevicesFromInstance(instance *ec2.Instance, ec2conn *ec2.EC2) (map[string]interface{}, error) {
blockDevices := make(map[string]interface{})
blockDevices["ebs"] = make([]map[string]interface{}, 0)
blockDevices["root"] = nil
instanceBlockDevices := make(map[string]ec2.InstanceBlockDeviceMapping)
for _, bd := range instance.BlockDeviceMappings {
if bd.EBS != nil {
instanceBlockDevices[*(bd.EBS.VolumeID)] = bd
}
}
volIDs := make([]string, 0, len(instanceBlockDevices))
for volID := range instanceBlockDevices {
volIDs = append(volIDs, volID)
}
// Need to call DescribeVolumes to get volume_size and volume_type for each
// EBS block device
volResp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesRequest{
VolumeIDs: volIDs,
})
if err != nil {
return nil, err
}
for _, vol := range volResp.Volumes {
instanceBd := instanceBlockDevices[*vol.VolumeID]
bd := make(map[string]interface{})
if instanceBd.EBS != nil && instanceBd.EBS.DeleteOnTermination != nil {
bd["delete_on_termination"] = *instanceBd.EBS.DeleteOnTermination
}
if instanceBd.DeviceName != nil {
bd["device_name"] = *instanceBd.DeviceName
}
if vol.Size != nil {
bd["volume_size"] = *vol.Size
}
if vol.VolumeType != nil {
bd["volume_type"] = *vol.VolumeType
}
if vol.IOPS != nil {
bd["iops"] = *vol.IOPS
}
if blockDeviceIsRoot(instanceBd, instance) {
blockDevices["root"] = bd
} else {
if vol.Encrypted != nil {
bd["encrypted"] = *vol.Encrypted
}
if vol.SnapshotID != nil {
bd["snapshot_id"] = *vol.SnapshotID
}
blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd)
}
}
return blockDevices, nil
}
func blockDeviceIsRoot(bd ec2.InstanceBlockDeviceMapping, instance *ec2.Instance) bool {
return (bd.DeviceName != nil &&
instance.RootDeviceName != nil &&
*bd.DeviceName == *instance.RootDeviceName)
}
providers/aws: remove commented code
oopsie!
package aws
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/aws-sdk-go/aws"
"github.com/hashicorp/aws-sdk-go/gen/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsInstance() *schema.Resource {
return &schema.Resource{
Create: resourceAwsInstanceCreate,
Read: resourceAwsInstanceRead,
Update: resourceAwsInstanceUpdate,
Delete: resourceAwsInstanceDelete,
SchemaVersion: 1,
MigrateState: resourceAwsInstanceMigrateState,
Schema: map[string]*schema.Schema{
"ami": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"associate_public_ip_address": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"availability_zone": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"instance_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"key_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"subnet_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"private_ip": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"source_dest_check": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"user_data": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
StateFunc: func(v interface{}) string {
switch v.(type) {
case string:
hash := sha1.Sum([]byte(v.(string)))
return hex.EncodeToString(hash[:])
default:
return ""
}
},
},
"security_groups": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: func(v interface{}) int {
return hashcode.String(v.(string))
},
},
"public_dns": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"public_ip": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"private_dns": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"ebs_optimized": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"iam_instance_profile": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Optional: true,
},
"tenancy": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"tags": tagsSchema(),
"block_device": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Removed: "Split out into three sub-types; see Changelog and Docs",
},
"ebs_block_device": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"delete_on_termination": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
ForceNew: true,
},
"device_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"encrypted": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},
"iops": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"snapshot_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"volume_size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"volume_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool)))
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
buf.WriteString(fmt.Sprintf("%t-", m["encrypted"].(bool)))
// NOTE: Not considering IOPS in hash; when using gp2, IOPS can come
// back set to something like "33", which throws off the set
// calculation and generates an unresolvable diff.
// buf.WriteString(fmt.Sprintf("%d-", m["iops"].(int)))
buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string)))
buf.WriteString(fmt.Sprintf("%d-", m["volume_size"].(int)))
buf.WriteString(fmt.Sprintf("%s-", m["volume_type"].(string)))
return hashcode.String(buf.String())
},
},
"ephemeral_block_device": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"virtual_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string)))
return hashcode.String(buf.String())
},
},
"root_block_device": &schema.Schema{
// TODO: This is a set because we don't support singleton
// sub-resources today. We'll enforce that the set only ever has
// length zero or one below. When TF gains support for
// sub-resources this can be converted.
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Resource{
// "You can only modify the volume size, volume type, and Delete on
// Termination flag on the block device mapping entry for the root
// device volume." - bit.ly/ec2bdmap
Schema: map[string]*schema.Schema{
"delete_on_termination": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
ForceNew: true,
},
"device_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "/dev/sda1",
},
"iops": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"volume_size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"volume_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool)))
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
// See the NOTE in "ebs_block_device" for why we skip iops here.
// buf.WriteString(fmt.Sprintf("%d-", m["iops"].(int)))
buf.WriteString(fmt.Sprintf("%d-", m["volume_size"].(int)))
buf.WriteString(fmt.Sprintf("%s-", m["volume_type"].(string)))
return hashcode.String(buf.String())
},
},
},
}
}
func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
ec2conn := meta.(*AWSClient).ec2conn
// Figure out user data
userData := ""
if v := d.Get("user_data"); v != nil {
userData = base64.StdEncoding.EncodeToString([]byte(v.(string)))
}
placement := &ec2.Placement{
AvailabilityZone: aws.String(d.Get("availability_zone").(string)),
}
if v := d.Get("tenancy").(string); v != "" {
placement.Tenancy = aws.String(v)
}
iam := &ec2.IAMInstanceProfileSpecification{
Name: aws.String(d.Get("iam_instance_profile").(string)),
}
// Build the creation struct
runOpts := &ec2.RunInstancesRequest{
ImageID: aws.String(d.Get("ami").(string)),
Placement: placement,
InstanceType: aws.String(d.Get("instance_type").(string)),
MaxCount: aws.Integer(1),
MinCount: aws.Integer(1),
UserData: aws.String(userData),
EBSOptimized: aws.Boolean(d.Get("ebs_optimized").(bool)),
IAMInstanceProfile: iam,
}
associatePublicIPAddress := false
if v := d.Get("associate_public_ip_address"); v != nil {
associatePublicIPAddress = v.(bool)
}
// check for non-default Subnet, and cast it to a String
var hasSubnet bool
subnet, hasSubnet := d.GetOk("subnet_id")
subnetID := subnet.(string)
var groups []string
if v := d.Get("security_groups"); v != nil {
// Security group names.
// For a nondefault VPC, you must use security group IDs instead.
// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html
for _, v := range v.(*schema.Set).List() {
str := v.(string)
groups = append(groups, str)
}
}
if hasSubnet && associatePublicIPAddress {
// If we have a non-default VPC / Subnet specified, we can flag
// AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided.
// You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise
// you get: Network interfaces and an instance-level subnet ID may not be specified on the same request
// You also need to attach Security Groups to the NetworkInterface instead of the instance,
// to avoid: Network interfaces and an instance-level security groups may not be specified on
// the same request
ni := ec2.InstanceNetworkInterfaceSpecification{
AssociatePublicIPAddress: aws.Boolean(associatePublicIPAddress),
DeviceIndex: aws.Integer(0),
SubnetID: aws.String(subnetID),
}
if v, ok := d.GetOk("private_ip"); ok {
ni.PrivateIPAddress = aws.String(v.(string))
}
if len(groups) > 0 {
ni.Groups = groups
}
runOpts.NetworkInterfaces = []ec2.InstanceNetworkInterfaceSpecification{ni}
} else {
if subnetID != "" {
runOpts.SubnetID = aws.String(subnetID)
}
if v, ok := d.GetOk("private_ip"); ok {
runOpts.PrivateIPAddress = aws.String(v.(string))
}
if runOpts.SubnetID != nil &&
*runOpts.SubnetID != "" {
runOpts.SecurityGroupIDs = groups
} else {
runOpts.SecurityGroups = groups
}
}
if v, ok := d.GetOk("key_name"); ok {
runOpts.KeyName = aws.String(v.(string))
}
blockDevices := make([]ec2.BlockDeviceMapping, 0)
if v, ok := d.GetOk("ebs_block_device"); ok {
vL := v.(*schema.Set).List()
for _, v := range vL {
bd := v.(map[string]interface{})
ebs := &ec2.EBSBlockDevice{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)),
}
if v, ok := bd["snapshot_id"].(string); ok && v != "" {
ebs.SnapshotID = aws.String(v)
}
if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Integer(v)
}
if v, ok := bd["volume_type"].(string); ok && v != "" {
ebs.VolumeType = aws.String(v)
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Integer(v)
}
blockDevices = append(blockDevices, ec2.BlockDeviceMapping{
DeviceName: aws.String(bd["device_name"].(string)),
EBS: ebs,
})
}
}
if v, ok := d.GetOk("ephemeral_block_device"); ok {
vL := v.(*schema.Set).List()
for _, v := range vL {
bd := v.(map[string]interface{})
blockDevices = append(blockDevices, ec2.BlockDeviceMapping{
DeviceName: aws.String(bd["device_name"].(string)),
VirtualName: aws.String(bd["virtual_name"].(string)),
})
}
}
if v, ok := d.GetOk("root_block_device"); ok {
vL := v.(*schema.Set).List()
if len(vL) > 1 {
return fmt.Errorf("Cannot specify more than one root_block_device.")
}
for _, v := range vL {
bd := v.(map[string]interface{})
ebs := &ec2.EBSBlockDevice{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)),
}
if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Integer(v)
}
if v, ok := bd["volume_type"].(string); ok && v != "" {
ebs.VolumeType = aws.String(v)
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Integer(v)
}
blockDevices = append(blockDevices, ec2.BlockDeviceMapping{
DeviceName: aws.String(bd["device_name"].(string)),
EBS: ebs,
})
}
}
if len(blockDevices) > 0 {
runOpts.BlockDeviceMappings = blockDevices
}
// Create the instance
log.Printf("[DEBUG] Run configuration: %#v", runOpts)
runResp, err := ec2conn.RunInstances(runOpts)
if err != nil {
return fmt.Errorf("Error launching source instance: %s", err)
}
instance := &runResp.Instances[0]
log.Printf("[INFO] Instance ID: %s", *instance.InstanceID)
// Store the resulting ID so we can look this up later
d.SetId(*instance.InstanceID)
// Wait for the instance to become running so we can get some attributes
// that aren't available until later.
log.Printf(
"[DEBUG] Waiting for instance (%s) to become running",
*instance.InstanceID)
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: "running",
Refresh: InstanceStateRefreshFunc(ec2conn, *instance.InstanceID),
Timeout: 10 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
instanceRaw, err := stateConf.WaitForState()
if err != nil {
return fmt.Errorf(
"Error waiting for instance (%s) to become ready: %s",
*instance.InstanceID, err)
}
instance = instanceRaw.(*ec2.Instance)
// Initialize the connection info
if instance.PublicIPAddress != nil {
d.SetConnInfo(map[string]string{
"type": "ssh",
"host": *instance.PublicIPAddress,
})
}
// Set our attributes
if err := resourceAwsInstanceRead(d, meta); err != nil {
return err
}
// Update if we need to
return resourceAwsInstanceUpdate(d, meta)
}
func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
ec2conn := meta.(*AWSClient).ec2conn
resp, err := ec2conn.DescribeInstances(&ec2.DescribeInstancesRequest{
InstanceIDs: []string{d.Id()},
})
if err != nil {
// If the instance was not found, return nil so that we can show
// that the instance is gone.
if ec2err, ok := err.(aws.APIError); ok && ec2err.Code == "InvalidInstanceID.NotFound" {
d.SetId("")
return nil
}
// Some other error, report it
return err
}
// If nothing was found, then return no state
if len(resp.Reservations) == 0 {
d.SetId("")
return nil
}
instance := &resp.Reservations[0].Instances[0]
// If the instance is terminated, then it is gone
if *instance.State.Name == "terminated" {
d.SetId("")
return nil
}
d.Set("availability_zone", instance.Placement.AvailabilityZone)
d.Set("key_name", instance.KeyName)
d.Set("public_dns", instance.PublicDNSName)
d.Set("public_ip", instance.PublicIPAddress)
d.Set("private_dns", instance.PrivateDNSName)
d.Set("private_ip", instance.PrivateIPAddress)
d.Set("subnet_id", instance.SubnetID)
if len(instance.NetworkInterfaces) > 0 {
d.Set("subnet_id", instance.NetworkInterfaces[0].SubnetID)
} else {
d.Set("subnet_id", instance.SubnetID)
}
d.Set("ebs_optimized", instance.EBSOptimized)
d.Set("tags", tagsToMap(instance.Tags))
d.Set("tenancy", instance.Placement.Tenancy)
// Determine whether we're referring to security groups with
// IDs or names. We use a heuristic to figure this out. By default,
// we use IDs if we're in a VPC. However, if we previously had an
// all-name list of security groups, we use names. Or, if we had any
// IDs, we use IDs.
useID := instance.SubnetID != nil && *instance.SubnetID != ""
if v := d.Get("security_groups"); v != nil {
match := false
for _, v := range v.(*schema.Set).List() {
if strings.HasPrefix(v.(string), "sg-") {
match = true
break
}
}
useID = match
}
// Build up the security groups
sgs := make([]string, len(instance.SecurityGroups))
for i, sg := range instance.SecurityGroups {
if useID {
sgs[i] = *sg.GroupID
} else {
sgs[i] = *sg.GroupName
}
}
d.Set("security_groups", sgs)
if err := readBlockDevices(d, instance, ec2conn); err != nil {
return err
}
return nil
}
func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
ec2conn := meta.(*AWSClient).ec2conn
// SourceDestCheck can only be set on VPC instances
if d.Get("subnet_id").(string) != "" {
log.Printf("[INFO] Modifying instance %s", d.Id())
err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeRequest{
InstanceID: aws.String(d.Id()),
SourceDestCheck: &ec2.AttributeBooleanValue{
Value: aws.Boolean(d.Get("source_dest_check").(bool)),
},
})
if err != nil {
return err
}
}
// TODO(mitchellh): wait for the attributes we modified to
// persist the change...
if err := setTags(ec2conn, d); err != nil {
return err
} else {
d.SetPartial("tags")
}
return nil
}
func resourceAwsInstanceDelete(d *schema.ResourceData, meta interface{}) error {
ec2conn := meta.(*AWSClient).ec2conn
log.Printf("[INFO] Terminating instance: %s", d.Id())
req := &ec2.TerminateInstancesRequest{
InstanceIDs: []string{d.Id()},
}
if _, err := ec2conn.TerminateInstances(req); err != nil {
return fmt.Errorf("Error terminating instance: %s", err)
}
log.Printf(
"[DEBUG] Waiting for instance (%s) to become terminated",
d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
Target: "terminated",
Refresh: InstanceStateRefreshFunc(ec2conn, d.Id()),
Timeout: 10 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err := stateConf.WaitForState()
if err != nil {
return fmt.Errorf(
"Error waiting for instance (%s) to terminate: %s",
d.Id(), err)
}
d.SetId("")
return nil
}
// InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
// an EC2 instance.
func InstanceStateRefreshFunc(conn *ec2.EC2, instanceID string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeInstances(&ec2.DescribeInstancesRequest{
InstanceIDs: []string{instanceID},
})
if err != nil {
if ec2err, ok := err.(aws.APIError); ok && ec2err.Code == "InvalidInstanceID.NotFound" {
// Set this to nil as if we didn't find anything.
resp = nil
} else {
log.Printf("Error on InstanceStateRefresh: %s", err)
return nil, "", err
}
}
if resp == nil || len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 {
// Sometimes AWS just has consistency issues and doesn't see
// our instance yet. Return an empty state.
return nil, "", nil
}
i := &resp.Reservations[0].Instances[0]
return i, *i.State.Name, nil
}
}
func readBlockDevices(d *schema.ResourceData, instance *ec2.Instance, ec2conn *ec2.EC2) error {
ibds, err := readBlockDevicesFromInstance(instance, ec2conn)
if err != nil {
return err
}
if err := d.Set("ebs_block_device", ibds["ebs"]); err != nil {
return err
}
if ibds["root"] != nil {
if err := d.Set("root_block_device", []interface{}{ibds["root"]}); err != nil {
return err
}
}
return nil
}
func readBlockDevicesFromInstance(instance *ec2.Instance, ec2conn *ec2.EC2) (map[string]interface{}, error) {
blockDevices := make(map[string]interface{})
blockDevices["ebs"] = make([]map[string]interface{}, 0)
blockDevices["root"] = nil
instanceBlockDevices := make(map[string]ec2.InstanceBlockDeviceMapping)
for _, bd := range instance.BlockDeviceMappings {
if bd.EBS != nil {
instanceBlockDevices[*(bd.EBS.VolumeID)] = bd
}
}
volIDs := make([]string, 0, len(instanceBlockDevices))
for volID := range instanceBlockDevices {
volIDs = append(volIDs, volID)
}
// Need to call DescribeVolumes to get volume_size and volume_type for each
// EBS block device
volResp, err := ec2conn.DescribeVolumes(&ec2.DescribeVolumesRequest{
VolumeIDs: volIDs,
})
if err != nil {
return nil, err
}
for _, vol := range volResp.Volumes {
instanceBd := instanceBlockDevices[*vol.VolumeID]
bd := make(map[string]interface{})
if instanceBd.EBS != nil && instanceBd.EBS.DeleteOnTermination != nil {
bd["delete_on_termination"] = *instanceBd.EBS.DeleteOnTermination
}
if instanceBd.DeviceName != nil {
bd["device_name"] = *instanceBd.DeviceName
}
if vol.Size != nil {
bd["volume_size"] = *vol.Size
}
if vol.VolumeType != nil {
bd["volume_type"] = *vol.VolumeType
}
if vol.IOPS != nil {
bd["iops"] = *vol.IOPS
}
if blockDeviceIsRoot(instanceBd, instance) {
blockDevices["root"] = bd
} else {
if vol.Encrypted != nil {
bd["encrypted"] = *vol.Encrypted
}
if vol.SnapshotID != nil {
bd["snapshot_id"] = *vol.SnapshotID
}
blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd)
}
}
return blockDevices, nil
}
func blockDeviceIsRoot(bd ec2.InstanceBlockDeviceMapping, instance *ec2.Instance) bool {
return (bd.DeviceName != nil &&
instance.RootDeviceName != nil &&
*bd.DeviceName == *instance.RootDeviceName)
}
|
package boomer
import (
"errors"
"math"
"strconv"
"strings"
"sync/atomic"
"time"
)
// RateLimiter is used to put limits on task executions.
type RateLimiter interface {
// Start is used to enable the rate limiter.
// It can be implemented as a noop if not needed.
Start()
// Acquire() is called before executing a task.Fn function.
// If Acquire() returns true, the task.Fn function will be executed.
// If Acquire() returns false, the task.Fn function won't be executed this time, but Acquire() will be called very soon.
// It works like:
// for {
// blocked := rateLimiter.Acquire()
// if !blocked {
// task.Fn()
// }
// }
// Acquire() should block the caller until execution is allowed.
Acquire() bool
// Stop is used to disable the rate limiter.
// It can be implemented as a noop if not needed.
Stop()
}
// A StableRateLimiter uses the token bucket algorithm.
// the bucket is refilled according to the refill period, no burst is allowed.
type StableRateLimiter struct {
threshold int64
currentThreshold int64
refillPeriod time.Duration
broadcastChannel chan bool
quitChannel chan bool
}
// NewStableRateLimiter returns a StableRateLimiter.
func NewStableRateLimiter(threshold int64, refillPeriod time.Duration) (rateLimiter *StableRateLimiter) {
rateLimiter = &StableRateLimiter{
threshold: threshold,
currentThreshold: threshold,
refillPeriod: refillPeriod,
broadcastChannel: make(chan bool),
}
return rateLimiter
}
// Start to refill the bucket periodically.
func (limiter *StableRateLimiter) Start() {
limiter.quitChannel = make(chan bool)
quitChannel := limiter.quitChannel
go func() {
for {
select {
case <-quitChannel:
return
default:
atomic.StoreInt64(&limiter.currentThreshold, limiter.threshold)
time.Sleep(limiter.refillPeriod)
close(limiter.broadcastChannel)
limiter.broadcastChannel = make(chan bool)
}
}
}()
}
// Acquire a token from the bucket, returns true if the bucket is exhausted.
func (limiter *StableRateLimiter) Acquire() (blocked bool) {
permit := atomic.AddInt64(&limiter.currentThreshold, -1)
if permit < 0 {
blocked = true
// block until the bucket is refilled
<-limiter.broadcastChannel
} else {
blocked = false
}
return blocked
}
// Stop the rate limiter.
func (limiter *StableRateLimiter) Stop() {
close(limiter.quitChannel)
}
// ErrParsingRampUpRate is the error returned if the format of rampUpRate is invalid.
var ErrParsingRampUpRate = errors.New("ratelimiter: invalid format of rampUpRate, try \"1\" or \"1/1s\"")
// A RampUpRateLimiter uses the token bucket algorithm.
// the threshold is updated according to the warm up rate.
// the bucket is refilled according to the refill period, no burst is allowed.
type RampUpRateLimiter struct {
maxThreshold int64
nextThreshold int64
currentThreshold int64
refillPeriod time.Duration
rampUpRate string
rampUpStep int64
rampUpPeroid time.Duration
broadcastChannel chan bool
rampUpChannel chan bool
quitChannel chan bool
}
// NewRampUpRateLimiter returns a RampUpRateLimiter.
// Valid formats of rampUpRate are "1", "1/1s".
func NewRampUpRateLimiter(maxThreshold int64, rampUpRate string, refillPeriod time.Duration) (rateLimiter *RampUpRateLimiter, err error) {
rateLimiter = &RampUpRateLimiter{
maxThreshold: maxThreshold,
nextThreshold: 0,
currentThreshold: 0,
rampUpRate: rampUpRate,
refillPeriod: refillPeriod,
broadcastChannel: make(chan bool),
}
rateLimiter.rampUpStep, rateLimiter.rampUpPeroid, err = rateLimiter.parseRampUpRate(rateLimiter.rampUpRate)
if err != nil {
return nil, err
}
return rateLimiter, nil
}
func (limiter *RampUpRateLimiter) parseRampUpRate(rampUpRate string) (rampUpStep int64, rampUpPeroid time.Duration, err error) {
if strings.Contains(rampUpRate, "/") {
tmp := strings.Split(rampUpRate, "/")
if len(tmp) != 2 {
return rampUpStep, rampUpPeroid, ErrParsingRampUpRate
}
rampUpStep, err := strconv.ParseInt(tmp[0], 10, 64)
if err != nil {
return rampUpStep, rampUpPeroid, ErrParsingRampUpRate
}
rampUpPeroid, err := time.ParseDuration(tmp[1])
if err != nil {
return rampUpStep, rampUpPeroid, ErrParsingRampUpRate
}
return rampUpStep, rampUpPeroid, nil
}
rampUpStep, err = strconv.ParseInt(rampUpRate, 10, 64)
if err != nil {
return rampUpStep, rampUpPeroid, ErrParsingRampUpRate
}
rampUpPeroid = time.Second
return rampUpStep, rampUpPeroid, nil
}
// Start to refill the bucket periodically.
func (limiter *RampUpRateLimiter) Start() {
limiter.quitChannel = make(chan bool)
quitChannel := limiter.quitChannel
// bucket updater
go func() {
for {
select {
case <-quitChannel:
return
default:
atomic.StoreInt64(&limiter.currentThreshold, limiter.nextThreshold)
time.Sleep(limiter.refillPeriod)
close(limiter.broadcastChannel)
limiter.broadcastChannel = make(chan bool)
}
}
}()
// threshold updater
go func() {
for {
select {
case <-quitChannel:
return
default:
nextValue := limiter.nextThreshold + limiter.rampUpStep
if nextValue < 0 {
// int64 overflow
nextValue = int64(math.MaxInt64)
}
if nextValue > limiter.maxThreshold {
nextValue = limiter.maxThreshold
}
atomic.StoreInt64(&limiter.nextThreshold, nextValue)
time.Sleep(limiter.rampUpPeroid)
}
}
}()
}
// Acquire a token from the bucket, returns true if the bucket is exhausted.
func (limiter *RampUpRateLimiter) Acquire() (blocked bool) {
permit := atomic.AddInt64(&limiter.currentThreshold, -1)
if permit < 0 {
blocked = true
// block until the bucket is refilled
<-limiter.broadcastChannel
} else {
blocked = false
}
return blocked
}
// Stop the rate limiter.
func (limiter *RampUpRateLimiter) Stop() {
limiter.nextThreshold = 0
close(limiter.quitChannel)
}
FIX: remove unused variable
package boomer
import (
"errors"
"math"
"strconv"
"strings"
"sync/atomic"
"time"
)
// RateLimiter is used to put limits on task executions.
type RateLimiter interface {
// Start is used to enable the rate limiter.
// It can be implemented as a noop if not needed.
Start()
// Acquire() is called before executing a task.Fn function.
// If Acquire() returns true, the task.Fn function will be executed.
// If Acquire() returns false, the task.Fn function won't be executed this time, but Acquire() will be called very soon.
// It works like:
// for {
// blocked := rateLimiter.Acquire()
// if !blocked {
// task.Fn()
// }
// }
// Acquire() should block the caller until execution is allowed.
Acquire() bool
// Stop is used to disable the rate limiter.
// It can be implemented as a noop if not needed.
Stop()
}
// A StableRateLimiter uses the token bucket algorithm.
// the bucket is refilled according to the refill period, no burst is allowed.
type StableRateLimiter struct {
threshold int64
currentThreshold int64
refillPeriod time.Duration
broadcastChannel chan bool
quitChannel chan bool
}
// NewStableRateLimiter returns a StableRateLimiter.
func NewStableRateLimiter(threshold int64, refillPeriod time.Duration) (rateLimiter *StableRateLimiter) {
rateLimiter = &StableRateLimiter{
threshold: threshold,
currentThreshold: threshold,
refillPeriod: refillPeriod,
broadcastChannel: make(chan bool),
}
return rateLimiter
}
// Start to refill the bucket periodically.
func (limiter *StableRateLimiter) Start() {
limiter.quitChannel = make(chan bool)
quitChannel := limiter.quitChannel
go func() {
for {
select {
case <-quitChannel:
return
default:
atomic.StoreInt64(&limiter.currentThreshold, limiter.threshold)
time.Sleep(limiter.refillPeriod)
close(limiter.broadcastChannel)
limiter.broadcastChannel = make(chan bool)
}
}
}()
}
// Acquire a token from the bucket, returns true if the bucket is exhausted.
func (limiter *StableRateLimiter) Acquire() (blocked bool) {
permit := atomic.AddInt64(&limiter.currentThreshold, -1)
if permit < 0 {
blocked = true
// block until the bucket is refilled
<-limiter.broadcastChannel
} else {
blocked = false
}
return blocked
}
// Stop the rate limiter.
func (limiter *StableRateLimiter) Stop() {
close(limiter.quitChannel)
}
// ErrParsingRampUpRate is the error returned if the format of rampUpRate is invalid.
var ErrParsingRampUpRate = errors.New("ratelimiter: invalid format of rampUpRate, try \"1\" or \"1/1s\"")
// A RampUpRateLimiter uses the token bucket algorithm.
// the threshold is updated according to the warm up rate.
// the bucket is refilled according to the refill period, no burst is allowed.
type RampUpRateLimiter struct {
maxThreshold int64
nextThreshold int64
currentThreshold int64
refillPeriod time.Duration
rampUpRate string
rampUpStep int64
rampUpPeroid time.Duration
broadcastChannel chan bool
quitChannel chan bool
}
// NewRampUpRateLimiter returns a RampUpRateLimiter.
// Valid formats of rampUpRate are "1", "1/1s".
func NewRampUpRateLimiter(maxThreshold int64, rampUpRate string, refillPeriod time.Duration) (rateLimiter *RampUpRateLimiter, err error) {
rateLimiter = &RampUpRateLimiter{
maxThreshold: maxThreshold,
nextThreshold: 0,
currentThreshold: 0,
rampUpRate: rampUpRate,
refillPeriod: refillPeriod,
broadcastChannel: make(chan bool),
}
rateLimiter.rampUpStep, rateLimiter.rampUpPeroid, err = rateLimiter.parseRampUpRate(rateLimiter.rampUpRate)
if err != nil {
return nil, err
}
return rateLimiter, nil
}
func (limiter *RampUpRateLimiter) parseRampUpRate(rampUpRate string) (rampUpStep int64, rampUpPeroid time.Duration, err error) {
if strings.Contains(rampUpRate, "/") {
tmp := strings.Split(rampUpRate, "/")
if len(tmp) != 2 {
return rampUpStep, rampUpPeroid, ErrParsingRampUpRate
}
rampUpStep, err := strconv.ParseInt(tmp[0], 10, 64)
if err != nil {
return rampUpStep, rampUpPeroid, ErrParsingRampUpRate
}
rampUpPeroid, err := time.ParseDuration(tmp[1])
if err != nil {
return rampUpStep, rampUpPeroid, ErrParsingRampUpRate
}
return rampUpStep, rampUpPeroid, nil
}
rampUpStep, err = strconv.ParseInt(rampUpRate, 10, 64)
if err != nil {
return rampUpStep, rampUpPeroid, ErrParsingRampUpRate
}
rampUpPeroid = time.Second
return rampUpStep, rampUpPeroid, nil
}
// Start to refill the bucket periodically.
func (limiter *RampUpRateLimiter) Start() {
limiter.quitChannel = make(chan bool)
quitChannel := limiter.quitChannel
// bucket updater
go func() {
for {
select {
case <-quitChannel:
return
default:
atomic.StoreInt64(&limiter.currentThreshold, limiter.nextThreshold)
time.Sleep(limiter.refillPeriod)
close(limiter.broadcastChannel)
limiter.broadcastChannel = make(chan bool)
}
}
}()
// threshold updater
go func() {
for {
select {
case <-quitChannel:
return
default:
nextValue := limiter.nextThreshold + limiter.rampUpStep
if nextValue < 0 {
// int64 overflow
nextValue = int64(math.MaxInt64)
}
if nextValue > limiter.maxThreshold {
nextValue = limiter.maxThreshold
}
atomic.StoreInt64(&limiter.nextThreshold, nextValue)
time.Sleep(limiter.rampUpPeroid)
}
}
}()
}
// Acquire a token from the bucket, returns true if the bucket is exhausted.
func (limiter *RampUpRateLimiter) Acquire() (blocked bool) {
permit := atomic.AddInt64(&limiter.currentThreshold, -1)
if permit < 0 {
blocked = true
// block until the bucket is refilled
<-limiter.broadcastChannel
} else {
blocked = false
}
return blocked
}
// Stop the rate limiter.
func (limiter *RampUpRateLimiter) Stop() {
limiter.nextThreshold = 0
close(limiter.quitChannel)
}
|
package main
import (
"os"
"os/exec"
"path/filepath"
"fmt"
"time"
"bytes"
"sync"
"strings"
"github.com/spf13/cobra"
"github.com/fatih/color"
)
func findRepo(root string, sign string, c chan string) {
visit := func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
return nil
}
if info.Name() == sign && info.IsDir() {
dir, _ := filepath.Split(path)
abs_dir, err := filepath.Abs(dir)
if err != nil {
color.Red(err.Error())
return nil
}
c <- abs_dir
}
return nil
}
filepath.Walk(root, visit)
close(c)
}
func runHgCommand(hg_cmd string, path string, args ...string) {
args = append([]string{hg_cmd, "--repository", path}, args...)
cmd := exec.Command("hg", args...)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
color.Green(path)
color.Yellow("hg %s", strings.Join(args, " "))
if err != nil {
color.Red(err.Error())
}
fmt.Println(out.String())
}
func hgStatus(path string, wg *sync.WaitGroup, branch string, new_branch bool) {
runHgCommand("status", path)
wg.Done()
}
func hgPull(path string, wg *sync.WaitGroup, branch string, new_branch bool) {
runHgCommand("pull", path)
wg.Done()
}
func hgPush(path string, wg *sync.WaitGroup, branch string, new_branch bool) {
if new_branch{
runHgCommand("push", path, "--new-branch")
}else {
runHgCommand("push", path)
}
wg.Done()
}
func hgUpdate(path string, wg *sync.WaitGroup, branch string, new_branch bool) {
if branch != "" {
runHgCommand("update", path, "--rev", branch)
} else {
runHgCommand("update", path)
}
wg.Done()
}
func hgPullUpdate(path string, wg *sync.WaitGroup, branch string, new_branch bool) {
runHgCommand("pull", path)
if branch != "" {
runHgCommand("update", path, "--rev", branch)
} else {
runHgCommand("update", path)
}
wg.Done()
}
func runCommand(cmdFunc func(path string, wg *sync.WaitGroup, branch string, new_branch bool), branch string, new_branch bool){
t := time.Now()
wg := new(sync.WaitGroup)
c := make(chan string)
count := 0
go findRepo(".", ".hg", c)
for path := range c {
wg.Add(1)
go cmdFunc(path, wg, branch, new_branch)
count += 1
}
wg.Wait()
color.Cyan("Done %d repos in %s", count, time.Since(t))
}
func main() {
var branch string
var new_branch bool
var EatMeCmd = &cobra.Command{
Use: "eatme",
Short: "pull + update",
Run: func(cmd *cobra.Command, args []string) {
runCommand(hgPullUpdate, branch, new_branch)
},
}
var cmdUpdate = &cobra.Command{
Use: "update",
Short: "only update",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
runCommand(hgUpdate, branch, new_branch)
},
}
var cmdPull = &cobra.Command{
Use: "pull",
Short: "only pull",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
runCommand(hgPull, branch, new_branch)
},
}
var cmdPush = &cobra.Command{
Use: "push",
Short: "only push",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
runCommand(hgPush, branch, new_branch)
},
}
EatMeCmd.PersistentFlags().StringVarP(&branch, "branch", "b", "", "Branch or Tag name")
cmdPush.Flags().BoolVarP(&new_branch, "new-branch", "n", false, "Create remote new branch")
EatMeCmd.AddCommand(cmdUpdate)
EatMeCmd.AddCommand(cmdPull)
EatMeCmd.AddCommand(cmdPush)
EatMeCmd.Execute()
}
refactor using struct #1
package main
import (
"os"
"os/exec"
"path/filepath"
"fmt"
"time"
"bytes"
"sync"
"strings"
"github.com/spf13/cobra"
"github.com/fatih/color"
)
type HGCommand struct {
hg_cmd string
args []string
}
func findRepo(root string, sign string, path_chan chan string) {
defer close(path_chan)
visit := func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
return nil
}
if info.Name() == sign && info.IsDir() {
dir, _ := filepath.Split(path)
abs_dir, err := filepath.Abs(dir)
if err != nil {
color.Red(err.Error())
return nil
}
path_chan <- abs_dir
}
return nil
}
filepath.Walk(root, visit)
}
func (cmd *HGCommand) Run(path string, wg *sync.WaitGroup) {
defer wg.Done()
args := append([]string{cmd.hg_cmd, "--repository", path}, cmd.args...)
system_cmd := exec.Command("hg", args...)
var out bytes.Buffer
system_cmd.Stdout = &out
err := system_cmd.Run()
color.Green(path)
color.Yellow("hg %s", strings.Join(args, " "))
if err != nil {
color.Red(err.Error())
}
fmt.Println(out.String())
}
func (cmd *HGCommand) RunForAll() {
t := time.Now()
wg := new(sync.WaitGroup)
path_chan := make(chan string)
count := 0
go findRepo(".", ".hg", path_chan)
for path := range path_chan {
wg.Add(1)
go cmd.Run(path, wg)
count += 1
}
wg.Wait()
color.Cyan("Done \"hg %s\" for %d repos in %s\n\n", cmd.hg_cmd, count, time.Since(t))
}
func (cmd *HGCommand) SetBranch(branch string) {
if branch != "" {
cmd.args = append(cmd.args, "--rev", branch)
}
}
func (cmd *HGCommand) SetNewBranch(new_branch bool) {
if new_branch {
cmd.args = append(cmd.args, "--new-branch")
}
}
func (cmd *HGCommand) SetClean(clean bool) {
if clean {
cmd.args = append(cmd.args, "--clean")
}
}
func main() {
var branch string
var new_branch bool
var clean bool
var EatMeCmd = &cobra.Command{
Use: "eatme",
Short: "pull + update",
Run: func(cmd *cobra.Command, args []string) {
pull_cmd := &HGCommand{hg_cmd: "pull"}
pull_cmd.RunForAll()
update_cmd := &HGCommand{hg_cmd: "update"}
update_cmd.SetClean(clean)
update_cmd.RunForAll()
},
}
var cmdUpdate = &cobra.Command{
Use: "update",
Short: "only update",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
update_cmd := &HGCommand{hg_cmd: "update"}
update_cmd.SetClean(clean)
update_cmd.RunForAll()
},
}
var cmdPull = &cobra.Command{
Use: "pull",
Short: "only pull",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
pull_cmd := &HGCommand{hg_cmd: "pull"}
pull_cmd.RunForAll()
},
}
var cmdPush = &cobra.Command{
Use: "push",
Short: "only push",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
push_cmd := &HGCommand{hg_cmd: "push"}
push_cmd.SetNewBranch(new_branch)
push_cmd.RunForAll()
},
}
EatMeCmd.PersistentFlags().StringVarP(&branch, "branch", "b", "", "Branch or Tag name")
cmdPush.Flags().BoolVarP(&new_branch, "new-branch", "n", false, "Create remote new branch")
cmdPush.Flags().BoolVarP(&clean, "clean", "C", false, "Clean update")
EatMeCmd.AddCommand(cmdUpdate)
EatMeCmd.AddCommand(cmdPull)
EatMeCmd.AddCommand(cmdPush)
EatMeCmd.Execute()
} |
package controllers_test
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/antonve/logger-api/config"
"github.com/antonve/logger-api/controllers"
"github.com/antonve/logger-api/models"
"github.com/antonve/logger-api/models/enums"
"github.com/antonve/logger-api/utils"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"github.com/stretchr/testify/assert"
)
type LoginBody struct {
Token string `json:"token"`
User models.User `json:"user"`
}
var mockJwtToken string
var mockUser *models.User
func init() {
utils.SetupTesting()
mockJwtToken, mockUser = utils.SetupTestUser("session_test")
}
func TestCreateUser(t *testing.T) {
// Setup registration request
e := echo.New()
req, err := http.NewRequest(echo.POST, "/api/register", strings.NewReader(`{"email": "register_test@example.com", "display_name": "logger", "password": "password"}`))
if !assert.NoError(t, err) {
return
}
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if assert.NoError(t, controllers.APISessionRegister(c)) {
assert.Equal(t, http.StatusCreated, rec.Code)
assert.Equal(t, `{"success": true}`, rec.Body.String())
}
}
//func TestCreateInvalidUser(t *testing.T) {
//// Setup registration request
//e := echo.New()
//req, err := http.NewRequest(echo.POST, "/api/register", strings.NewReader(`{"email": "register_test@invalid##", "display_name": "invalid", "password": "password"}`))
//if !assert.NoError(t, err) {
//return
//}
//req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
//rec := httptest.NewRecorder()
//c := e.NewContext(req, rec)
//if assert.NoError(t, controllers.APISessionRegister(c)) {
//assert.Equal(t, http.StatusBadRequest, rec.Code)
//assert.NotEqual(t, `{"success": true}`, rec.Body.String())
//}
//}
func TestLoginUser(t *testing.T) {
// Setup user to test login with
user := models.User{Email: "login_test@example.com", DisplayName: "logger_user", Password: "password", Role: enums.RoleAdmin}
user.HashPassword()
userCollection := models.UserCollection{}
userCollection.Add(&user)
// Setup login request
e := echo.New()
req, err := http.NewRequest(echo.POST, "/api/login", strings.NewReader(`{"email": "login_test@example.com", "password": "password"}`))
if !assert.NoError(t, err) {
return
}
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if assert.NoError(t, controllers.APISessionLogin(c)) {
// Check login response
var body LoginBody
assert.Equal(t, http.StatusOK, rec.Code)
err = json.Unmarshal(rec.Body.Bytes(), &body)
// Check if the user has information
assert.Nil(t, err)
assert.NotEmpty(t, body.Token)
assert.NotNil(t, body.User)
// Check if the user has the correct information
assert.Equal(t, "login_test@example.com", body.User.Email)
assert.Equal(t, "logger_user", body.User.DisplayName)
assert.Equal(t, enums.RoleAdmin, body.User.Role)
// Make sure password is not sent back to the client
assert.Empty(t, body.User.Password)
}
}
func TestRefreshJWTToken(t *testing.T) {
// Setup refresh request
e := echo.New()
req, err := http.NewRequest(echo.POST, "/api/refresh", nil)
if !assert.NoError(t, err) {
return
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", mockJwtToken))
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if assert.NoError(t, middleware.JWTWithConfig(config.GetJWTConfig(&models.JwtClaims{}))(controllers.APISessionRefreshJWTToken)(c)) {
// Check login response
var body LoginBody
assert.Equal(t, http.StatusOK, rec.Code)
err = json.Unmarshal(rec.Body.Bytes(), &body)
// Check if the user has information
assert.Nil(t, err)
assert.NotEmpty(t, body.Token)
}
}
Add test for controllers.APISessionCreateRefreshToken
package controllers_test
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/antonve/logger-api/config"
"github.com/antonve/logger-api/controllers"
"github.com/antonve/logger-api/models"
"github.com/antonve/logger-api/models/enums"
"github.com/antonve/logger-api/utils"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"github.com/stretchr/testify/assert"
)
type LoginBody struct {
Token string `json:"token"`
User models.User `json:"user"`
}
type RefreshTokenBody struct {
RefreshToken string `json:"refresh_token"`
}
var mockJwtToken string
var mockUser *models.User
func init() {
utils.SetupTesting()
mockJwtToken, mockUser = utils.SetupTestUser("session_test")
}
func TestCreateUser(t *testing.T) {
// Setup registration request
e := echo.New()
req, err := http.NewRequest(echo.POST, "/api/register", strings.NewReader(`{"email": "register_test@example.com", "display_name": "logger", "password": "password"}`))
if !assert.NoError(t, err) {
return
}
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if assert.NoError(t, controllers.APISessionRegister(c)) {
assert.Equal(t, http.StatusCreated, rec.Code)
assert.Equal(t, `{"success": true}`, rec.Body.String())
}
}
//func TestCreateInvalidUser(t *testing.T) {
//// Setup registration request
//e := echo.New()
//req, err := http.NewRequest(echo.POST, "/api/register", strings.NewReader(`{"email": "register_test@invalid##", "display_name": "invalid", "password": "password"}`))
//if !assert.NoError(t, err) {
//return
//}
//req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
//rec := httptest.NewRecorder()
//c := e.NewContext(req, rec)
//if assert.NoError(t, controllers.APISessionRegister(c)) {
//assert.Equal(t, http.StatusBadRequest, rec.Code)
//assert.NotEqual(t, `{"success": true}`, rec.Body.String())
//}
//}
func TestLoginUser(t *testing.T) {
// Setup user to test login with
user := models.User{Email: "login_test@example.com", DisplayName: "logger_user", Password: "password", Role: enums.RoleAdmin}
user.HashPassword()
userCollection := models.UserCollection{}
userCollection.Add(&user)
// Setup login request
e := echo.New()
req, err := http.NewRequest(echo.POST, "/api/login", strings.NewReader(`{"email": "login_test@example.com", "password": "password"}`))
if !assert.NoError(t, err) {
return
}
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if assert.NoError(t, controllers.APISessionLogin(c)) {
// Check login response
var body LoginBody
assert.Equal(t, http.StatusOK, rec.Code)
err = json.Unmarshal(rec.Body.Bytes(), &body)
// Check if the user has information
assert.Nil(t, err)
assert.NotEmpty(t, body.Token)
assert.NotNil(t, body.User)
// Check if the user has the correct information
assert.Equal(t, "login_test@example.com", body.User.Email)
assert.Equal(t, "logger_user", body.User.DisplayName)
assert.Equal(t, enums.RoleAdmin, body.User.Role)
// Make sure password is not sent back to the client
assert.Empty(t, body.User.Password)
}
}
func TestRefreshJWTToken(t *testing.T) {
// Setup refresh request
e := echo.New()
req, err := http.NewRequest(echo.POST, "/api/session/refresh", nil)
if !assert.NoError(t, err) {
return
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", mockJwtToken))
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if assert.NoError(t, middleware.JWTWithConfig(config.GetJWTConfig(&models.JwtClaims{}))(controllers.APISessionRefreshJWTToken)(c)) {
// Check login response
var body LoginBody
assert.Equal(t, http.StatusOK, rec.Code)
err = json.Unmarshal(rec.Body.Bytes(), &body)
// Check if the user has information
assert.Nil(t, err)
assert.NotEmpty(t, body.Token)
// Might want to check if the new token is usable
}
}
func TestCreateRefreshToken(t *testing.T) {
// Setup refresh request
e := echo.New()
req, err := http.NewRequest(echo.POST, "/api/session/new", strings.NewReader(`{"device_id": "6db435f352d7ea4a67807a3feb447bf7"}`))
if !assert.NoError(t, err) {
return
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", mockJwtToken))
req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
if assert.NoError(t, middleware.JWTWithConfig(config.GetJWTConfig(&models.JwtClaims{}))(controllers.APISessionCreateRefreshToken)(c)) {
// Check login response
var body RefreshTokenBody
assert.Equal(t, http.StatusOK, rec.Code)
err = json.Unmarshal(rec.Body.Bytes(), &body)
// Check if the user has information
assert.Nil(t, err)
assert.NotEmpty(t, body.RefreshToken)
// Might want to check if the new token is usable
}
}
|
package storage
import (
"fmt"
"os"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/util"
)
func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind) (v *Volume, err error) {
v = &Volume{dir: dirname, Collection: collection, Id: id}
v.SuperBlock = super_block.SuperBlock{}
v.needleMapKind = needleMapKind
err = v.load(false, false, needleMapKind, 0)
return
}
func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapKind, preallocate int64) (err error) {
alreadyHasSuperBlock := false
hasLoadedVolume := false
defer func() {
if !hasLoadedVolume {
if v.nm != nil {
v.nm.Close()
v.nm = nil
}
if v.DataBackend != nil {
v.DataBackend.Close()
v.DataBackend = nil
}
}
}()
hasVolumeInfoFile := v.maybeLoadVolumeInfo()
if v.HasRemoteFile() {
v.noWriteCanDelete = true
v.noWriteOrDelete = false
glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo)
v.LoadRemoteFile()
alreadyHasSuperBlock = true
} else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(v.FileName(".dat")); exists {
// open dat file
if !canRead {
return fmt.Errorf("cannot read Volume Data file %s", v.FileName(".dat"))
}
var dataFile *os.File
if canWrite {
dataFile, err = os.OpenFile(v.FileName(".dat"), os.O_RDWR|os.O_CREATE, 0644)
} else {
glog.V(0).Infof("opening %s in READONLY mode", v.FileName(".dat"))
dataFile, err = os.Open(v.FileName(".dat"))
v.noWriteOrDelete = true
}
v.lastModifiedTsSeconds = uint64(modifiedTime.Unix())
if fileSize >= super_block.SuperBlockSize {
alreadyHasSuperBlock = true
}
v.DataBackend = backend.NewDiskFile(dataFile)
} else {
if createDatIfMissing {
v.DataBackend, err = backend.CreateVolumeFile(v.FileName(".dat"), preallocate, v.MemoryMapMaxSizeMb)
} else {
return fmt.Errorf("volume data file %s does not exist", v.FileName(".dat"))
}
}
if err != nil {
if !os.IsPermission(err) {
return fmt.Errorf("cannot load volume data %s: %v", v.FileName(".dat"), err)
} else {
return fmt.Errorf("load data file %s: %v", v.FileName(".dat"), err)
}
}
if alreadyHasSuperBlock {
err = v.readSuperBlock()
glog.V(0).Infof("readSuperBlock volume %d version %v", v.Id, v.SuperBlock.Version)
if v.HasRemoteFile() {
// maybe temporary network problem
glog.Errorf("readSuperBlock remote volume %d: %v", v.Id, err)
err = nil
}
} else {
if !v.SuperBlock.Initialized() {
return fmt.Errorf("volume %s not initialized", v.FileName(".dat"))
}
err = v.maybeWriteSuperBlock()
}
if err == nil && alsoLoadIndex {
// adjust for existing volumes with .idx together with .dat files
if v.dirIdx != v.dir {
if util.FileExists(v.DataFileName() + ".idx") {
v.dirIdx = v.dir
}
}
// check volume idx files
if err := v.checkIdxFile(); err != nil {
glog.Fatalf("check volume idx file %s: %v", v.FileName(".idx"), err)
}
var indexFile *os.File
if v.noWriteOrDelete {
glog.V(0).Infoln("open to read file", v.FileName(".idx"))
if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDONLY, 0644); err != nil {
return fmt.Errorf("cannot read Volume Index %s: %v", v.FileName(".idx"), err)
}
} else {
glog.V(1).Infoln("open to write file", v.FileName(".idx"))
if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDWR|os.O_CREATE, 0644); err != nil {
return fmt.Errorf("cannot write Volume Index %s: %v", v.FileName(".idx"), err)
}
}
if v.lastAppendAtNs, err = CheckAndFixVolumeDataIntegrity(v, indexFile); err != nil {
v.noWriteOrDelete = true
glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err)
}
if v.noWriteOrDelete || v.noWriteCanDelete {
if v.nm, err = NewSortedFileNeedleMap(v.IndexFileName(), indexFile); err != nil {
glog.V(0).Infof("loading sorted db %s error: %v", v.FileName(".sdx"), err)
}
} else {
switch needleMapKind {
case NeedleMapInMemory:
glog.V(0).Infoln("loading index", v.FileName(".idx"), "to memory")
if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil {
glog.V(0).Infof("loading index %s to memory error: %v", v.FileName(".idx"), err)
}
case NeedleMapLevelDb:
glog.V(0).Infoln("loading leveldb", v.FileName(".ldb"))
opts := &opt.Options{
BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil {
glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err)
}
case NeedleMapLevelDbMedium:
glog.V(0).Infoln("loading leveldb medium", v.FileName(".ldb"))
opts := &opt.Options{
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil {
glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err)
}
case NeedleMapLevelDbLarge:
glog.V(0).Infoln("loading leveldb large", v.FileName(".ldb"))
opts := &opt.Options{
BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil {
glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err)
}
}
}
}
if !hasVolumeInfoFile {
v.volumeInfo.Version = uint32(v.SuperBlock.Version)
v.SaveVolumeInfo()
}
stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Inc()
if err == nil {
hasLoadedVolume = true
}
return err
}
volume: fix loading old volume format
fix https://github.com/chrislusf/seaweedfs/issues/2487
package storage
import (
"fmt"
"os"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/util"
)
func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind) (v *Volume, err error) {
v = &Volume{dir: dirname, Collection: collection, Id: id}
v.SuperBlock = super_block.SuperBlock{}
v.needleMapKind = needleMapKind
err = v.load(false, false, needleMapKind, 0)
return
}
func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapKind, preallocate int64) (err error) {
alreadyHasSuperBlock := false
hasLoadedVolume := false
defer func() {
if !hasLoadedVolume {
if v.nm != nil {
v.nm.Close()
v.nm = nil
}
if v.DataBackend != nil {
v.DataBackend.Close()
v.DataBackend = nil
}
}
}()
hasVolumeInfoFile := v.maybeLoadVolumeInfo()
if v.HasRemoteFile() {
v.noWriteCanDelete = true
v.noWriteOrDelete = false
glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo)
v.LoadRemoteFile()
alreadyHasSuperBlock = true
} else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(v.FileName(".dat")); exists {
// open dat file
if !canRead {
return fmt.Errorf("cannot read Volume Data file %s", v.FileName(".dat"))
}
var dataFile *os.File
if canWrite {
dataFile, err = os.OpenFile(v.FileName(".dat"), os.O_RDWR|os.O_CREATE, 0644)
} else {
glog.V(0).Infof("opening %s in READONLY mode", v.FileName(".dat"))
dataFile, err = os.Open(v.FileName(".dat"))
v.noWriteOrDelete = true
}
v.lastModifiedTsSeconds = uint64(modifiedTime.Unix())
if fileSize >= super_block.SuperBlockSize {
alreadyHasSuperBlock = true
}
v.DataBackend = backend.NewDiskFile(dataFile)
} else {
if createDatIfMissing {
v.DataBackend, err = backend.CreateVolumeFile(v.FileName(".dat"), preallocate, v.MemoryMapMaxSizeMb)
} else {
return fmt.Errorf("volume data file %s does not exist", v.FileName(".dat"))
}
}
if err != nil {
if !os.IsPermission(err) {
return fmt.Errorf("cannot load volume data %s: %v", v.FileName(".dat"), err)
} else {
return fmt.Errorf("load data file %s: %v", v.FileName(".dat"), err)
}
}
if alreadyHasSuperBlock {
err = v.readSuperBlock()
if err == nil {
v.volumeInfo.Version = uint32(v.SuperBlock.Version)
}
glog.V(0).Infof("readSuperBlock volume %d version %v", v.Id, v.SuperBlock.Version)
if v.HasRemoteFile() {
// maybe temporary network problem
glog.Errorf("readSuperBlock remote volume %d: %v", v.Id, err)
err = nil
}
} else {
if !v.SuperBlock.Initialized() {
return fmt.Errorf("volume %s not initialized", v.FileName(".dat"))
}
err = v.maybeWriteSuperBlock()
}
if err == nil && alsoLoadIndex {
// adjust for existing volumes with .idx together with .dat files
if v.dirIdx != v.dir {
if util.FileExists(v.DataFileName() + ".idx") {
v.dirIdx = v.dir
}
}
// check volume idx files
if err := v.checkIdxFile(); err != nil {
glog.Fatalf("check volume idx file %s: %v", v.FileName(".idx"), err)
}
var indexFile *os.File
if v.noWriteOrDelete {
glog.V(0).Infoln("open to read file", v.FileName(".idx"))
if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDONLY, 0644); err != nil {
return fmt.Errorf("cannot read Volume Index %s: %v", v.FileName(".idx"), err)
}
} else {
glog.V(1).Infoln("open to write file", v.FileName(".idx"))
if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDWR|os.O_CREATE, 0644); err != nil {
return fmt.Errorf("cannot write Volume Index %s: %v", v.FileName(".idx"), err)
}
}
if v.lastAppendAtNs, err = CheckAndFixVolumeDataIntegrity(v, indexFile); err != nil {
v.noWriteOrDelete = true
glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err)
}
if v.noWriteOrDelete || v.noWriteCanDelete {
if v.nm, err = NewSortedFileNeedleMap(v.IndexFileName(), indexFile); err != nil {
glog.V(0).Infof("loading sorted db %s error: %v", v.FileName(".sdx"), err)
}
} else {
switch needleMapKind {
case NeedleMapInMemory:
glog.V(0).Infoln("loading index", v.FileName(".idx"), "to memory")
if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil {
glog.V(0).Infof("loading index %s to memory error: %v", v.FileName(".idx"), err)
}
case NeedleMapLevelDb:
glog.V(0).Infoln("loading leveldb", v.FileName(".ldb"))
opts := &opt.Options{
BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil {
glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err)
}
case NeedleMapLevelDbMedium:
glog.V(0).Infoln("loading leveldb medium", v.FileName(".ldb"))
opts := &opt.Options{
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil {
glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err)
}
case NeedleMapLevelDbLarge:
glog.V(0).Infoln("loading leveldb large", v.FileName(".ldb"))
opts := &opt.Options{
BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil {
glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err)
}
}
}
}
if !hasVolumeInfoFile {
v.volumeInfo.Version = uint32(v.SuperBlock.Version)
v.SaveVolumeInfo()
}
stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Inc()
if err == nil {
hasLoadedVolume = true
}
return err
}
|
//
// Copyright 2021, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package gitlab
import (
"fmt"
"net/http"
"time"
)
// ResourceLabelEventsService handles communication with the event related
// methods of the GitLab API.
//
// GitLab API docs: https://docs.gitlab.com/ee/api/resource_label_events.html
type ResourceLabelEventsService struct {
client *Client
}
// LabelEvent represents a resource label event.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-issue-label-event
type LabelEvent struct {
ID int `json:"id"`
Action string `json:"action"`
CreatedAt *time.Time `json:"created_at"`
ResourceType string `json:"resource_type"`
ResourceID int `json:"resource_id"`
User struct {
ID int `json:"id"`
Name string `json:"name"`
Username string `json:"username"`
State string `json:"state"`
AvatarURL string `json:"avatar_url"`
WebURL string `json:"web_url"`
} `json:"user"`
Label struct {
ID int `json:"id"`
Name string `json:"name"`
Color string `json:"color"`
TextColor string `json:"text_color"`
Description string `json:"description"`
} `json:"label"`
}
// ListLabelEventsOptions represents the options for all resource label events
// list methods.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-issue-label-events
type ListLabelEventsOptions struct {
ListOptions
}
// ListIssueLabelEvents retrieves resource label events for the
// specified project and issue.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-issue-label-events
func (s *ResourceLabelEventsService) ListIssueLabelEvents(pid interface{}, issue int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events", pathEscape(project), issue)
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ls []*LabelEvent
resp, err := s.client.Do(req, &ls)
if err != nil {
return nil, resp, err
}
return ls, resp, err
}
// GetIssueLabelEvent gets a single issue-label-event.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-issue-label-event
func (s *ResourceLabelEventsService) GetIssueLabelEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events/%d", pathEscape(project), issue, event)
req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
if err != nil {
return nil, nil, err
}
l := new(LabelEvent)
resp, err := s.client.Do(req, l)
if err != nil {
return nil, resp, err
}
return l, resp, err
}
// ListGroupEpicLabelEvents retrieves resource label events for the specified
// group and epic.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#list-group-epic-label-events
func (s *ResourceLabelEventsService) ListGroupEpicLabelEvents(gid interface{}, epic int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events", pathEscape(group), epic)
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ls []*LabelEvent
resp, err := s.client.Do(req, &ls)
if err != nil {
return nil, resp, err
}
return ls, resp, err
}
// GetGroupEpicLabelEvent gets a single group epic label event.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-epic-label-event
func (s *ResourceLabelEventsService) GetGroupEpicLabelEvent(gid interface{}, epic int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events/%d", pathEscape(group), epic, event)
req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
if err != nil {
return nil, nil, err
}
l := new(LabelEvent)
resp, err := s.client.Do(req, l)
if err != nil {
return nil, resp, err
}
return l, resp, err
}
// ListMergeLabelEvents retrieves resource label events for the specified
// project and merge request.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-merge-request-label-events
func (s *ResourceLabelEventsService) ListMergeLabelEvents(pid interface{}, request int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events", pathEscape(project), request)
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ls []*LabelEvent
resp, err := s.client.Do(req, &ls)
if err != nil {
return nil, resp, err
}
return ls, resp, err
}
// GetMergeRequestLabelEvent gets a single merge request label event.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-merge-request-label-event
func (s *ResourceLabelEventsService) GetMergeRequestLabelEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events/%d", pathEscape(project), request, event)
req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
if err != nil {
return nil, nil, err
}
l := new(LabelEvent)
resp, err := s.client.Do(req, l)
if err != nil {
return nil, resp, err
}
return l, resp, err
}
Fix typo in method name
Fixes #1080
//
// Copyright 2021, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package gitlab
import (
"fmt"
"net/http"
"time"
)
// ResourceLabelEventsService handles communication with the event related
// methods of the GitLab API.
//
// GitLab API docs: https://docs.gitlab.com/ee/api/resource_label_events.html
type ResourceLabelEventsService struct {
client *Client
}
// LabelEvent represents a resource label event.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-issue-label-event
type LabelEvent struct {
ID int `json:"id"`
Action string `json:"action"`
CreatedAt *time.Time `json:"created_at"`
ResourceType string `json:"resource_type"`
ResourceID int `json:"resource_id"`
User struct {
ID int `json:"id"`
Name string `json:"name"`
Username string `json:"username"`
State string `json:"state"`
AvatarURL string `json:"avatar_url"`
WebURL string `json:"web_url"`
} `json:"user"`
Label struct {
ID int `json:"id"`
Name string `json:"name"`
Color string `json:"color"`
TextColor string `json:"text_color"`
Description string `json:"description"`
} `json:"label"`
}
// ListLabelEventsOptions represents the options for all resource label events
// list methods.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-issue-label-events
type ListLabelEventsOptions struct {
ListOptions
}
// ListIssueLabelEvents retrieves resource label events for the
// specified project and issue.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-issue-label-events
func (s *ResourceLabelEventsService) ListIssueLabelEvents(pid interface{}, issue int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events", pathEscape(project), issue)
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ls []*LabelEvent
resp, err := s.client.Do(req, &ls)
if err != nil {
return nil, resp, err
}
return ls, resp, err
}
// GetIssueLabelEvent gets a single issue-label-event.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-issue-label-event
func (s *ResourceLabelEventsService) GetIssueLabelEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events/%d", pathEscape(project), issue, event)
req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
if err != nil {
return nil, nil, err
}
l := new(LabelEvent)
resp, err := s.client.Do(req, l)
if err != nil {
return nil, resp, err
}
return l, resp, err
}
// ListGroupEpicLabelEvents retrieves resource label events for the specified
// group and epic.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#list-group-epic-label-events
func (s *ResourceLabelEventsService) ListGroupEpicLabelEvents(gid interface{}, epic int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events", pathEscape(group), epic)
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ls []*LabelEvent
resp, err := s.client.Do(req, &ls)
if err != nil {
return nil, resp, err
}
return ls, resp, err
}
// GetGroupEpicLabelEvent gets a single group epic label event.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-epic-label-event
func (s *ResourceLabelEventsService) GetGroupEpicLabelEvent(gid interface{}, epic int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events/%d", pathEscape(group), epic, event)
req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
if err != nil {
return nil, nil, err
}
l := new(LabelEvent)
resp, err := s.client.Do(req, l)
if err != nil {
return nil, resp, err
}
return l, resp, err
}
// ListMergeRequestsLabelEvents retrieves resource label events for the specified
// project and merge request.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-merge-request-label-events
func (s *ResourceLabelEventsService) ListMergeRequestsLabelEvents(pid interface{}, request int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events", pathEscape(project), request)
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ls []*LabelEvent
resp, err := s.client.Do(req, &ls)
if err != nil {
return nil, resp, err
}
return ls, resp, err
}
// GetMergeRequestLabelEvent gets a single merge request label event.
//
// GitLab API docs:
// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-merge-request-label-event
func (s *ResourceLabelEventsService) GetMergeRequestLabelEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events/%d", pathEscape(project), request, event)
req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
if err != nil {
return nil, nil, err
}
l := new(LabelEvent)
resp, err := s.client.Do(req, l)
if err != nil {
return nil, resp, err
}
return l, resp, err
}
|
// This code was originally based on the Digital Ocean provider from
// https://github.com/hashicorp/terraform/tree/master/builtin/providers/digitalocean.
package main
import (
"fmt"
"log"
"strings"
"time"
"github.com/JamesClonk/vultr/lib"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceVultrServer() *schema.Resource {
return &schema.Resource{
Create: resourceVultrServerCreate,
Read: resourceVultrServerRead,
Update: resourceVultrServerUpdate,
Delete: resourceVultrServerDelete,
Schema: map[string]*schema.Schema{
"status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"power_status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"default_password": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"region_id": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"plan_id": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"os_id": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
// if you are using this make sure you set `os_id` to `159` (Custom).
"ipxe_chain_url": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
// if you are using this make sure you set `os_id` to `159` (Custom).
"iso_id": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"user_data": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"ssh_key_ids": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"ipv4_address": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"ipv4_private_address": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"ipv6": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"private_networking": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"auto_backups": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
}
}
func resourceVultrServerCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*lib.Client)
name := d.Get("name").(string)
regionId := d.Get("region_id").(int)
planId := d.Get("plan_id").(int)
osId := d.Get("os_id").(int)
options := &lib.ServerOptions{
IPXEChainURL: d.Get("ipxe_chain_url").(string),
ISO: d.Get("iso_id").(int),
UserData: d.Get("user_data").(string),
}
if attr, ok := d.GetOk("ipv6"); ok {
options.IPV6 = attr.(bool)
}
if attr, ok := d.GetOk("private_networking"); ok {
options.PrivateNetworking = attr.(bool)
}
if attr, ok := d.GetOk("auto_backups"); ok {
options.AutoBackups = attr.(bool)
}
sshKeyIdsLen := d.Get("ssh_key_ids.#").(int)
if sshKeyIdsLen > 0 {
sshKeyIds := make([]string, 0, sshKeyIdsLen)
for i := 0; i < sshKeyIdsLen; i++ {
key := fmt.Sprintf("ssh_key_ids.%d", i)
sshKeyIds = append(sshKeyIds, d.Get(key).(string))
}
options.SSHKey = strings.Join(sshKeyIds, ",")
}
log.Printf("[DEBUG] Server create configuration: %#v", options)
server, err := client.CreateServer(name, regionId, planId, osId, options)
if err != nil {
return fmt.Errorf("Error creating server: %s", err)
}
d.SetId(server.ID)
log.Printf("[INFO] Server ID: %s", d.Id())
// wait for the server to be "ready". we have to wait for status=active and power_status=running.
_, err = WaitForServerAttribute(d, "active", []string{"pending"}, "status", meta)
if err != nil {
return fmt.Errorf(
"Error waiting for server (%s) to become active: %s", d.Id(), err)
}
_, err = WaitForServerAttribute(d, "running", []string{"starting", "stopped"}, "power_status", meta)
if err != nil {
return fmt.Errorf(
"Error waiting for server (%s) to become running: %s", d.Id(), err)
}
return resourceVultrServerRead(d, meta)
}
func resourceVultrServerRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*lib.Client)
server, err := client.GetServer(d.Id())
if err != nil {
// check if the server not longer exists.
if err.Error() == "Invalid server." {
d.SetId("")
return nil
}
return fmt.Errorf("Error retrieving server: %s", err)
}
d.Set("name", server.Name)
d.Set("region_id", server.RegionID)
d.Set("plan_id", server.PlanID)
d.Set("status", server.Status)
d.Set("power_status", server.PowerStatus)
d.Set("default_password", server.DefaultPassword)
d.Set("ipv4_address", server.MainIP)
d.Set("ipv4_private_address", server.InternalIP)
d.SetConnInfo(map[string]string{
"type": "ssh",
"host": server.MainIP,
"password": server.DefaultPassword,
})
return nil
}
func resourceVultrServerUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*lib.Client)
d.Partial(true)
if d.HasChange("name") {
oldName, newName := d.GetChange("name")
err := client.RenameServer(d.Id(), newName.(string))
if err != nil {
return fmt.Errorf("Error renaming server (%s): %s", d.Id(), err)
}
_, err = WaitForServerAttribute(d, newName.(string), []string{"", oldName.(string)}, "name", meta)
if err != nil {
return fmt.Errorf("Error waiting for rename server (%s) to finish: %s", d.Id(), err)
}
d.SetPartial("name")
}
return resourceVultrServerRead(d, meta)
}
func resourceVultrServerDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*lib.Client)
log.Printf("[INFO] Deleting server: %s", d.Id())
err := client.DeleteServer(d.Id())
if err != nil {
return fmt.Errorf("Error deleting server: %s", err)
}
return nil
}
func WaitForServerAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) {
log.Printf(
"[INFO] Waiting for server (%s) to have %s of %s",
d.Id(), attribute, target)
stateConf := &resource.StateChangeConf{
Pending: pending,
Target: []string{target},
Refresh: newServerStateRefreshFunc(d, attribute, meta),
Timeout: 60 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
return stateConf.WaitForState()
}
// TODO This function still needs a little more refactoring to make it
// cleaner and more efficient
func newServerStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc {
client := meta.(*lib.Client)
return func() (interface{}, string, error) {
err := resourceVultrServerRead(d, meta)
if err != nil {
return nil, "", err
}
// See if we can access our attribute
if attr, ok := d.GetOk(attribute); ok {
// Retrieve the server properties
server, err := client.GetServer(d.Id())
if err != nil {
return nil, "", fmt.Errorf("Error retrieving server: %s", err)
}
return &server, attr.(string), nil
}
return nil, "", nil
}
}
add snapshot id option
// This code was originally based on the Digital Ocean provider from
// https://github.com/hashicorp/terraform/tree/master/builtin/providers/digitalocean.
package main
import (
"fmt"
"log"
"strings"
"time"
"github.com/JamesClonk/vultr/lib"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceVultrServer() *schema.Resource {
return &schema.Resource{
Create: resourceVultrServerCreate,
Read: resourceVultrServerRead,
Update: resourceVultrServerUpdate,
Delete: resourceVultrServerDelete,
Schema: map[string]*schema.Schema{
"status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"power_status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"default_password": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"region_id": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"plan_id": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"os_id": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"snapshot_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
// if you are using this make sure you set `os_id` to `159` (Custom).
"ipxe_chain_url": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
// if you are using this make sure you set `os_id` to `159` (Custom).
"iso_id": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"user_data": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"ssh_key_ids": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"ipv4_address": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"ipv4_private_address": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"ipv6": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"private_networking": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"auto_backups": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
}
}
func resourceVultrServerCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*lib.Client)
name := d.Get("name").(string)
regionId := d.Get("region_id").(int)
planId := d.Get("plan_id").(int)
osId := d.Get("os_id").(int)
options := &lib.ServerOptions{
IPXEChainURL: d.Get("ipxe_chain_url").(string),
ISO: d.Get("iso_id").(int),
UserData: d.Get("user_data").(string),
Snapshot: d.Get("snapshot_id").(string),
}
if attr, ok := d.GetOk("ipv6"); ok {
options.IPV6 = attr.(bool)
}
if attr, ok := d.GetOk("private_networking"); ok {
options.PrivateNetworking = attr.(bool)
}
if attr, ok := d.GetOk("auto_backups"); ok {
options.AutoBackups = attr.(bool)
}
sshKeyIdsLen := d.Get("ssh_key_ids.#").(int)
if sshKeyIdsLen > 0 {
sshKeyIds := make([]string, 0, sshKeyIdsLen)
for i := 0; i < sshKeyIdsLen; i++ {
key := fmt.Sprintf("ssh_key_ids.%d", i)
sshKeyIds = append(sshKeyIds, d.Get(key).(string))
}
options.SSHKey = strings.Join(sshKeyIds, ",")
}
log.Printf("[DEBUG] Server create configuration: %#v", options)
server, err := client.CreateServer(name, regionId, planId, osId, options)
if err != nil {
return fmt.Errorf("Error creating server: %s", err)
}
d.SetId(server.ID)
log.Printf("[INFO] Server ID: %s", d.Id())
// wait for the server to be "ready". we have to wait for status=active and power_status=running.
_, err = WaitForServerAttribute(d, "active", []string{"pending"}, "status", meta)
if err != nil {
return fmt.Errorf(
"Error waiting for server (%s) to become active: %s", d.Id(), err)
}
_, err = WaitForServerAttribute(d, "running", []string{"starting", "stopped"}, "power_status", meta)
if err != nil {
return fmt.Errorf(
"Error waiting for server (%s) to become running: %s", d.Id(), err)
}
return resourceVultrServerRead(d, meta)
}
func resourceVultrServerRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*lib.Client)
server, err := client.GetServer(d.Id())
if err != nil {
// check if the server not longer exists.
if err.Error() == "Invalid server." {
d.SetId("")
return nil
}
return fmt.Errorf("Error retrieving server: %s", err)
}
d.Set("name", server.Name)
d.Set("region_id", server.RegionID)
d.Set("plan_id", server.PlanID)
d.Set("status", server.Status)
d.Set("power_status", server.PowerStatus)
d.Set("default_password", server.DefaultPassword)
d.Set("ipv4_address", server.MainIP)
d.Set("ipv4_private_address", server.InternalIP)
d.SetConnInfo(map[string]string{
"type": "ssh",
"host": server.MainIP,
"password": server.DefaultPassword,
})
return nil
}
func resourceVultrServerUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*lib.Client)
d.Partial(true)
if d.HasChange("name") {
oldName, newName := d.GetChange("name")
err := client.RenameServer(d.Id(), newName.(string))
if err != nil {
return fmt.Errorf("Error renaming server (%s): %s", d.Id(), err)
}
_, err = WaitForServerAttribute(d, newName.(string), []string{"", oldName.(string)}, "name", meta)
if err != nil {
return fmt.Errorf("Error waiting for rename server (%s) to finish: %s", d.Id(), err)
}
d.SetPartial("name")
}
return resourceVultrServerRead(d, meta)
}
func resourceVultrServerDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*lib.Client)
log.Printf("[INFO] Deleting server: %s", d.Id())
err := client.DeleteServer(d.Id())
if err != nil {
return fmt.Errorf("Error deleting server: %s", err)
}
return nil
}
func WaitForServerAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) {
log.Printf(
"[INFO] Waiting for server (%s) to have %s of %s",
d.Id(), attribute, target)
stateConf := &resource.StateChangeConf{
Pending: pending,
Target: []string{target},
Refresh: newServerStateRefreshFunc(d, attribute, meta),
Timeout: 60 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
return stateConf.WaitForState()
}
// TODO This function still needs a little more refactoring to make it
// cleaner and more efficient
func newServerStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc {
client := meta.(*lib.Client)
return func() (interface{}, string, error) {
err := resourceVultrServerRead(d, meta)
if err != nil {
return nil, "", err
}
// See if we can access our attribute
if attr, ok := d.GetOk(attribute); ok {
// Retrieve the server properties
server, err := client.GetServer(d.Id())
if err != nil {
return nil, "", fmt.Errorf("Error retrieving server: %s", err)
}
return &server, attr.(string), nil
}
return nil, "", nil
}
}
|
package node
import (
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"github.com/tywkeene/autobd/manifest"
"github.com/tywkeene/autobd/options"
"github.com/tywkeene/autobd/packing"
"github.com/tywkeene/autobd/version"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
)
func Get(url string) (*http.Response, error) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept-Encoding", "gzip")
req.Header.Set("User-Agent", "Autobd-node/"+version.Server())
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
buffer, _ := DeflateResponse(resp)
var err string
json.Unmarshal(buffer, &err)
return nil, errors.New(err)
}
return resp, nil
}
func DeflateResponse(resp *http.Response) ([]byte, error) {
reader, err := gzip.NewReader(resp.Body)
if err != nil {
return nil, err
}
defer reader.Close()
var buffer []byte
buffer, _ = ioutil.ReadAll(reader)
return buffer, nil
}
func WriteFile(filename string, source io.Reader) error {
writer, err := os.Create(filename)
if err != nil {
log.Println(err)
return err
}
defer writer.Close()
gr, err := gzip.NewReader(source)
if err != nil {
return err
}
defer gr.Close()
io.Copy(writer, gr)
return nil
}
func RequestVersion(seed string) (*version.VersionInfo, error) {
log.Println("Requesting version from", seed)
url := seed + "/version"
resp, err := Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buffer, err := DeflateResponse(resp)
if err != nil {
return nil, err
}
var ver *version.VersionInfo
if err := json.Unmarshal(buffer, &ver); err != nil {
return nil, err
}
return ver, nil
}
func RequestManifest(seed string, dir string) (map[string]*manifest.Manifest, error) {
log.Printf("Requesting manifest for directory %s from %s", dir, seed)
url := seed + "/" + "v" + version.Major() + "/manifest?dir=" + dir
resp, err := Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buffer, err := DeflateResponse(resp)
if err != nil {
return nil, err
}
remoteManifest := make(map[string]*manifest.Manifest)
if err := json.Unmarshal(buffer, &remoteManifest); err != nil {
return nil, err
}
return remoteManifest, nil
}
func RequestSync(seed string, file string) error {
log.Printf("Requesting sync of file '%s' from %s", file, seed)
url := seed + "/" + "v" + version.Major() + "/sync?grab=" + file
resp, err := Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.Header.Get("Content-Type") == "application/x-tar" {
err := packing.UnpackDir(resp.Body)
if err != nil && err == io.EOF {
return nil
} else {
return err
}
}
//make sure we create the directory tree if it's needed
if tree := path.Dir(file); tree != "" {
err := os.MkdirAll(tree, 0777)
if err != nil {
return err
}
}
err = WriteFile(file, resp.Body)
return err
}
func validateServerVersion(remote *version.VersionInfo) error {
if version.Server() != remote.ServerVer {
return fmt.Errorf("Mismatched version with server. Server: %s Local: %s",
remote.ServerVer, version.Server())
}
remoteMajor := strings.Split(remote.ServerVer, ".")[0]
if version.Major() != remoteMajor {
return fmt.Errorf("Mismatched API version with server. Server: %s Local: %s",
remoteMajor, version.Major())
}
return nil
}
func CompareManifest(server string) ([]string, error) {
remoteManifest, err := RequestManifest(server, "/")
if err != nil {
return nil, err
}
localManifest, err := manifest.GetManifest("/")
if err != nil {
return nil, err
}
need := make([]string, 0)
for remoteName, _ := range remoteManifest {
if _, exists := localManifest[remoteName]; exists == false {
log.Println("Need", remoteName)
need = append(need, remoteName)
}
}
return need, nil
}
func UpdateLoop(config options.NodeConf) error {
log.Printf("Running as a node. Updating every %s with %s\n",
config.UpdateInterval, config.Seeds)
for _, server := range config.Seeds {
remoteVer, err := RequestVersion(server)
if err != nil {
log.Println(err)
continue
}
if err := validateServerVersion(remoteVer); err != nil {
return err
}
}
updateInterval, err := time.ParseDuration(config.UpdateInterval)
if err != nil {
return err
}
for {
time.Sleep(updateInterval)
for _, server := range config.Seeds {
log.Printf("Updating with %s...\n", server)
need, err := CompareManifest(server)
if err != nil {
log.Println(err)
continue
}
if len(need) == 0 {
log.Println("In sync with", server)
continue
}
for _, filename := range need {
err := RequestSync(server, filename)
if err != nil {
log.Println(err)
continue
}
}
}
}
return nil
}
Rewrote ComparManifest() and added compareDirs() to node/node.go
package node
import (
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"github.com/tywkeene/autobd/manifest"
"github.com/tywkeene/autobd/options"
"github.com/tywkeene/autobd/packing"
"github.com/tywkeene/autobd/version"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
)
func Get(url string) (*http.Response, error) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept-Encoding", "gzip")
req.Header.Set("User-Agent", "Autobd-node/"+version.Server())
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
buffer, _ := DeflateResponse(resp)
var err string
json.Unmarshal(buffer, &err)
return nil, errors.New(err)
}
return resp, nil
}
func DeflateResponse(resp *http.Response) ([]byte, error) {
reader, err := gzip.NewReader(resp.Body)
if err != nil {
return nil, err
}
defer reader.Close()
var buffer []byte
buffer, _ = ioutil.ReadAll(reader)
return buffer, nil
}
func WriteFile(filename string, source io.Reader) error {
writer, err := os.Create(filename)
if err != nil {
log.Println(err)
return err
}
defer writer.Close()
gr, err := gzip.NewReader(source)
if err != nil {
return err
}
defer gr.Close()
io.Copy(writer, gr)
return nil
}
func RequestVersion(seed string) (*version.VersionInfo, error) {
log.Println("Requesting version from", seed)
url := seed + "/version"
resp, err := Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buffer, err := DeflateResponse(resp)
if err != nil {
return nil, err
}
var ver *version.VersionInfo
if err := json.Unmarshal(buffer, &ver); err != nil {
return nil, err
}
return ver, nil
}
func RequestManifest(seed string, dir string) (map[string]*manifest.Manifest, error) {
log.Printf("Requesting manifest for directory %s from %s", dir, seed)
url := seed + "/" + "v" + version.Major() + "/manifest?dir=" + dir
resp, err := Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buffer, err := DeflateResponse(resp)
if err != nil {
return nil, err
}
remoteManifest := make(map[string]*manifest.Manifest)
if err := json.Unmarshal(buffer, &remoteManifest); err != nil {
return nil, err
}
return remoteManifest, nil
}
func RequestSync(seed string, file string) error {
log.Printf("Requesting sync of file '%s' from %s", file, seed)
url := seed + "/" + "v" + version.Major() + "/sync?grab=" + file
resp, err := Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.Header.Get("Content-Type") == "application/x-tar" {
err := packing.UnpackDir(resp.Body)
if err != nil {
return nil
} else {
return err
}
}
//make sure we create the directory tree if it's needed
if tree := path.Dir(file); tree != "" {
err := os.MkdirAll(tree, 0777)
if err != nil {
return err
}
}
err = WriteFile(file, resp.Body)
return err
}
func validateServerVersion(remote *version.VersionInfo) error {
if version.Server() != remote.ServerVer {
return fmt.Errorf("Mismatched version with server. Server: %s Local: %s",
remote.ServerVer, version.Server())
}
remoteMajor := strings.Split(remote.ServerVer, ".")[0]
if version.Major() != remoteMajor {
return fmt.Errorf("Mismatched API version with server. Server: %s Local: %s",
remoteMajor, version.Major())
}
return nil
}
func compareDirs(local map[string]*manifest.Manifest, remote map[string]*manifest.Manifest) []string {
need := make([]string, 0)
for name, info := range remote {
_, exists := local[name]
if exists == true && info.IsDir == true && remote[name].Files != nil {
dirNeed := compareDirs(local[name].Files, remote[name].Files)
need = append(need, dirNeed...)
}
if _, exists := local[name]; exists == false {
need = append(need, name)
}
}
return need
}
func CompareManifest(server string) ([]string, error) {
remoteManifest, err := RequestManifest(server, "/")
if err != nil {
return nil, err
}
localManifest, err := manifest.GetManifest("/")
if err != nil {
return nil, err
}
need := make([]string, 0)
for remoteName, info := range remoteManifest {
_, exists := localManifest[remoteName]
if info.IsDir == true && exists == true {
dirNeed := compareDirs(localManifest[remoteName].Files, remoteManifest[remoteName].Files)
need = append(need, dirNeed...)
continue
}
if exists == false {
need = append(need, remoteName)
}
}
return need, nil
}
func UpdateLoop(config options.NodeConf) error {
log.Printf("Running as a node. Updating every %s with %s\n",
config.UpdateInterval, config.Seeds)
for _, server := range config.Seeds {
remoteVer, err := RequestVersion(server)
if err != nil {
log.Println(err)
continue
}
if err := validateServerVersion(remoteVer); err != nil {
return err
}
}
updateInterval, err := time.ParseDuration(config.UpdateInterval)
if err != nil {
return err
}
for {
time.Sleep(updateInterval)
for _, server := range config.Seeds {
log.Printf("Updating with %s...\n", server)
need, err := CompareManifest(server)
if err != nil {
log.Println(err)
continue
}
if len(need) == 0 {
log.Println("In sync with", server)
continue
}
log.Printf("Need %s from %s\n", need, server)
for _, filename := range need {
err := RequestSync(server, filename)
if err != nil {
log.Println(err)
continue
}
}
}
}
return nil
}
|
// Copyright (c) 2014 - Max Ekman <max@looplab.se>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package domain
import (
"github.com/looplab/eventhorizon"
"log"
)
// Invitation is a read model object for an invitation.
type Invitation struct {
ID eventhorizon.UUID `json:"id" bson:"_id"`
Name string
Age int
Status string
}
// InvitationProjector is a projector that updates the invitations.
type InvitationProjector struct {
repository eventhorizon.ReadRepository
}
// NewInvitationProjector creates a new InvitationProjector.
func NewInvitationProjector(repository eventhorizon.ReadRepository) *InvitationProjector {
p := &InvitationProjector{
repository: repository,
}
return p
}
// HandlerType implements the HandlerType method of the EventHandler interface.
func (p *InvitationProjector) HandlerType() eventhorizon.EventHandlerType {
return eventhorizon.EventHandlerType("InvitationProjector")
}
// HandleEvent implements the HandleEvent method of the EventHandler interface.
func (p *InvitationProjector) HandleEvent(event eventhorizon.Event) {
switch event := event.(type) {
case *InviteCreated:
i := &Invitation{
ID: event.InvitationID,
Name: event.Name,
Age: event.Age,
Status: "pending",
}
p.repository.Save(i.ID, i)
case *InviteAccepted:
m, _ := p.repository.Find(event.InvitationID)
i := m.(*Invitation)
i.Status = "accepted"
p.repository.Save(i.ID, i)
case *InviteDeclined:
m, _ := p.repository.Find(event.InvitationID)
i := m.(*Invitation)
i.Status = "declined"
p.repository.Save(i.ID, i)
}
}
// GuestList is a read model object for the guest list.
type GuestList struct {
Id eventhorizon.UUID `json:"id" bson:"_id"`
NumGuests int
NumAccepted int
NumDeclined int
}
// GuestListProjector is a projector that updates the guest list.
type GuestListProjector struct {
repository eventhorizon.ReadRepository
eventID eventhorizon.UUID
}
// NewGuestListProjector creates a new GuestListProjector.
func NewGuestListProjector(repository eventhorizon.ReadRepository, eventID eventhorizon.UUID) *GuestListProjector {
p := &GuestListProjector{
repository: repository,
eventID: eventID,
}
return p
}
// HandlerType implements the HandlerType method of the EventHandler interface.
func (p *GuestListProjector) HandlerType() eventhorizon.EventHandlerType {
return eventhorizon.EventHandlerType("GuestListProjector")
}
// HandleEvent implements the HandleEvent method of the EventHandler interface.
func (p *GuestListProjector) HandleEvent(event eventhorizon.Event) {
switch event.(type) {
case *InviteCreated:
m, _ := p.repository.Find(p.eventID)
if m == nil {
m = &GuestList{
Id: p.eventID,
NumGuests: 0,
NumAccepted: 0,
NumDeclined: 0,
}
}
g := m.(*GuestList)
p.repository.Save(p.eventID, g)
case *InviteAccepted:
m, _ := p.repository.Find(p.eventID)
g := m.(*GuestList)
g.NumAccepted++
g.NumGuests++
p.repository.Save(p.eventID, g)
case *InviteDeclined:
m, _ := p.repository.Find(p.eventID)
g := m.(*GuestList)
g.NumDeclined++
g.NumGuests++
p.repository.Save(p.eventID, g)
}
}
unused import removed
// Copyright (c) 2014 - Max Ekman <max@looplab.se>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package domain
import (
"github.com/looplab/eventhorizon"
)
// Invitation is a read model object for an invitation.
type Invitation struct {
ID eventhorizon.UUID `json:"id" bson:"_id"`
Name string
Age int
Status string
}
// InvitationProjector is a projector that updates the invitations.
type InvitationProjector struct {
repository eventhorizon.ReadRepository
}
// NewInvitationProjector creates a new InvitationProjector.
func NewInvitationProjector(repository eventhorizon.ReadRepository) *InvitationProjector {
p := &InvitationProjector{
repository: repository,
}
return p
}
// HandlerType implements the HandlerType method of the EventHandler interface.
func (p *InvitationProjector) HandlerType() eventhorizon.EventHandlerType {
return eventhorizon.EventHandlerType("InvitationProjector")
}
// HandleEvent implements the HandleEvent method of the EventHandler interface.
func (p *InvitationProjector) HandleEvent(event eventhorizon.Event) {
switch event := event.(type) {
case *InviteCreated:
i := &Invitation{
ID: event.InvitationID,
Name: event.Name,
Age: event.Age,
Status: "pending",
}
p.repository.Save(i.ID, i)
case *InviteAccepted:
m, _ := p.repository.Find(event.InvitationID)
i := m.(*Invitation)
i.Status = "accepted"
p.repository.Save(i.ID, i)
case *InviteDeclined:
m, _ := p.repository.Find(event.InvitationID)
i := m.(*Invitation)
i.Status = "declined"
p.repository.Save(i.ID, i)
}
}
// GuestList is a read model object for the guest list.
type GuestList struct {
Id eventhorizon.UUID `json:"id" bson:"_id"`
NumGuests int
NumAccepted int
NumDeclined int
}
// GuestListProjector is a projector that updates the guest list.
type GuestListProjector struct {
repository eventhorizon.ReadRepository
eventID eventhorizon.UUID
}
// NewGuestListProjector creates a new GuestListProjector.
func NewGuestListProjector(repository eventhorizon.ReadRepository, eventID eventhorizon.UUID) *GuestListProjector {
p := &GuestListProjector{
repository: repository,
eventID: eventID,
}
return p
}
// HandlerType implements the HandlerType method of the EventHandler interface.
func (p *GuestListProjector) HandlerType() eventhorizon.EventHandlerType {
return eventhorizon.EventHandlerType("GuestListProjector")
}
// HandleEvent implements the HandleEvent method of the EventHandler interface.
func (p *GuestListProjector) HandleEvent(event eventhorizon.Event) {
switch event.(type) {
case *InviteCreated:
m, _ := p.repository.Find(p.eventID)
if m == nil {
m = &GuestList{
Id: p.eventID,
NumGuests: 0,
NumAccepted: 0,
NumDeclined: 0,
}
}
g := m.(*GuestList)
p.repository.Save(p.eventID, g)
case *InviteAccepted:
m, _ := p.repository.Find(p.eventID)
g := m.(*GuestList)
g.NumAccepted++
g.NumGuests++
p.repository.Save(p.eventID, g)
case *InviteDeclined:
m, _ := p.repository.Find(p.eventID)
g := m.(*GuestList)
g.NumDeclined++
g.NumGuests++
p.repository.Save(p.eventID, g)
}
}
|
package graft
import (
"log"
"testing"
"time"
)
func TestNewNode(t *testing.T) {
node, err := NewNode([]string{"10.88.147.130:1213", "10.88.147.2:1213"}, "10.88.104.33", "test.log", 1213)
if err != nil {
log.Println(err.Error())
return
}
log.Println("test")
if node.State() == LEADER {
log.Println("i am leader")
}
for {
select {
case sc := <-node.StateChg:
log.Println(sc)
default:
log.Println(node.State())
time.Sleep(1 * time.Second)
}
}
}
remove unused log
package graft
import (
"log"
"testing"
)
func TestNewNode(t *testing.T) {
node, err := NewNode([]string{"10.88.147.130:1213", "10.88.147.2:1213"}, "10.88.104.33", "test.log", 1213)
if err != nil {
log.Println(err.Error())
return
}
if node.State() == LEADER {
log.Println("i am leader")
}
for {
select {
case sc := <-node.StateChg:
log.Println(sc)
}
}
}
|
package encrypted
import (
"encoding/json"
"io/ioutil"
"log"
"os"
"strings"
"github.com/tinzenite/shared"
)
type chaninterface struct {
// reference back to encrypted
enc *Encrypted
}
func createChanInterface(enc *Encrypted) *chaninterface {
return &chaninterface{
enc: enc}
}
// ----------------------- Callbacks ------------------------------
/*
OnFriendRequest is called when a friend request is received. Due to the nature
of the encrypted peer, it will NEVER accept friend requests.
*/
func (c *chaninterface) OnFriendRequest(address, message string) {
// for now only accept connection from myself for testing
if address[:8] == "ed284a9f" {
// TODO remove once done debugging / "dev"-ing
log.Println("OnFriendRequest: Accepting connection from root.")
c.enc.channel.AcceptConnection(address)
return
}
// TODO usually encrypted should NEVER accept a friend request
log.Println("OnFriendRequest: Connection request from", address[:8]+", ignoring!")
}
func (c *chaninterface) OnMessage(address, message string) {
// check if lock message, or request, or send message
v := &shared.Message{}
err := json.Unmarshal([]byte(message), v)
if err == nil {
// special case for lock messages (can be received if not locked)
if v.Type == shared.MsgLock {
msg := &shared.LockMessage{}
err := json.Unmarshal([]byte(message), msg)
if err != nil {
log.Println("OnMessage: failed to parse JSON!", err)
return
}
c.handleLockMessage(address, msg)
return
}
// for all others ensure that we are locked correctly
if !c.enc.checkLock(address) {
// if not warn and ignore message
log.Println("OnMessage: not locked to given address!", address[:8])
// TODO send notify that they are unlocked back?
return
}
// if correctly locked handle message according to type
switch msgType := v.Type; msgType {
case shared.MsgRequest:
msg := &shared.RequestMessage{}
err := json.Unmarshal([]byte(message), msg)
if err != nil {
log.Println("OnMessage: failed to parse JSON!", err)
return
}
c.handleRequestMessage(address, msg)
case shared.MsgPush:
msg := &shared.PushMessage{}
err := json.Unmarshal([]byte(message), msg)
if err != nil {
log.Println("OnMessage: failed to parse JSON!", err)
return
}
c.handlePushMessage(address, msg)
default:
log.Println("OnMessage: WARNING: Unknown object received:", msgType.String())
}
// in any case return as we are done handling them
return
}
// if unmarshal didn't work check for plain commands:
// TODO these are temporary until it works, remove them later
switch message {
case "push":
log.Println("Sending example push message.")
pm := shared.CreatePushMessage("ID_HERE", shared.OtObject)
c.enc.channel.Send(address, pm.JSON())
case "lock":
log.Println("Sending example lock message.")
lm := shared.CreateLockMessage(shared.LoRequest)
c.enc.channel.Send(address, lm.JSON())
case "unlock":
log.Println("Sending example unlock message.")
lm := shared.CreateLockMessage(shared.LoRelease)
c.enc.channel.Send(address, lm.JSON())
case "request":
log.Println("Sending example request message.")
rm := shared.CreateRequestMessage(shared.OtObject, "ID_HERE")
c.enc.channel.Send(address, rm.JSON())
default:
log.Println("Received:", message)
c.enc.channel.Send(address, "Received non JSON message.")
}
}
/*
OnAllowFile is called when a file is to be received. Name should be the
file identification!
*/
func (c *chaninterface) OnAllowFile(address, name string) (bool, string) {
if !c.enc.checkLock(address) {
log.Println("OnAllowFile: not locked to given address, refusing!")
return false, ""
}
//check against allowed files and allow if ok
key := c.buildKey(address, name)
_, exists := c.enc.allowedTransfers[key]
if !exists {
log.Println("OnAllowFile: refusing file transfer due to no allowance!")
return false, ""
}
//write to RECEIVINGDIR
return true, c.enc.RootPath + "/" + shared.RECEIVINGDIR + "/" + key
}
/*
OnFileReceived is called when a file has been successfully received.
*/
func (c *chaninterface) OnFileReceived(address, path, name string) {
// note: no lock check so that locks don't have to stay on for long file transfers
// need to read id so that we can write it to the correct location
identification := strings.Split(name, ":")[1]
// read data
data, err := ioutil.ReadFile(path)
if err != nil {
log.Println("OnFileReceived: failed to read file:", err)
return
}
// TODO differentiate complete ORGDIR FIXME the IF should be a switch
// model is not written to storage but to disk directly
if identification == shared.IDMODEL {
err = ioutil.WriteFile(c.enc.RootPath+"/"+shared.IDMODEL, data, shared.FILEPERMISSIONMODE)
} else {
// write to storage
err = c.enc.storage.Store(identification, data)
}
if err != nil {
log.Println("OnFileReceived: storing to storage failed:", err)
return
}
// remove temp file
err = os.Remove(path)
if err != nil {
log.Println("OnFileReceived: failed to remove temp file:", err)
return
}
}
/*
OnFileCanceled is called when a file has failed to be successfully received.
*/
func (c *chaninterface) OnFileCanceled(address, path string) {
// note: no lock check so that locks don't have to stay on for long file transfers
log.Println("OnFileCanceled:", path)
// remove temp file if exists
err := os.Remove(path)
if err != nil {
log.Println("OnFileCanceled: failed to remove temp file:", err)
return
}
}
/*
OnConnected is called when another peer comes online.
*/
func (c *chaninterface) OnConnected(address string) {
// only notify log, nothing else to do for us here
log.Println("OnConnected:", address[:8])
}
add laptop to debug access
package encrypted
import (
"encoding/json"
"io/ioutil"
"log"
"os"
"strings"
"github.com/tinzenite/shared"
)
type chaninterface struct {
// reference back to encrypted
enc *Encrypted
}
func createChanInterface(enc *Encrypted) *chaninterface {
return &chaninterface{
enc: enc}
}
// ----------------------- Callbacks ------------------------------
/*
OnFriendRequest is called when a friend request is received. Due to the nature
of the encrypted peer, it will NEVER accept friend requests.
*/
func (c *chaninterface) OnFriendRequest(address, message string) {
// for now only accept connection from myself for testing
if address[:8] == "ed284a9f" || address[:8] == "866ba1b5" {
// TODO remove once done debugging / "dev"-ing
log.Println("OnFriendRequest: Accepting connection from root.")
c.enc.channel.AcceptConnection(address)
return
}
// TODO usually encrypted should NEVER accept a friend request
log.Println("OnFriendRequest: Connection request from", address[:8]+", ignoring!")
}
func (c *chaninterface) OnMessage(address, message string) {
// check if lock message, or request, or send message
v := &shared.Message{}
err := json.Unmarshal([]byte(message), v)
if err == nil {
// special case for lock messages (can be received if not locked)
if v.Type == shared.MsgLock {
msg := &shared.LockMessage{}
err := json.Unmarshal([]byte(message), msg)
if err != nil {
log.Println("OnMessage: failed to parse JSON!", err)
return
}
c.handleLockMessage(address, msg)
return
}
// for all others ensure that we are locked correctly
if !c.enc.checkLock(address) {
// if not warn and ignore message
log.Println("OnMessage: not locked to given address!", address[:8])
// TODO send notify that they are unlocked back?
return
}
// if correctly locked handle message according to type
switch msgType := v.Type; msgType {
case shared.MsgRequest:
msg := &shared.RequestMessage{}
err := json.Unmarshal([]byte(message), msg)
if err != nil {
log.Println("OnMessage: failed to parse JSON!", err)
return
}
c.handleRequestMessage(address, msg)
case shared.MsgPush:
msg := &shared.PushMessage{}
err := json.Unmarshal([]byte(message), msg)
if err != nil {
log.Println("OnMessage: failed to parse JSON!", err)
return
}
c.handlePushMessage(address, msg)
default:
log.Println("OnMessage: WARNING: Unknown object received:", msgType.String())
}
// in any case return as we are done handling them
return
}
// if unmarshal didn't work check for plain commands:
// TODO these are temporary until it works, remove them later
switch message {
case "push":
log.Println("Sending example push message.")
pm := shared.CreatePushMessage("ID_HERE", shared.OtObject)
c.enc.channel.Send(address, pm.JSON())
case "lock":
log.Println("Sending example lock message.")
lm := shared.CreateLockMessage(shared.LoRequest)
c.enc.channel.Send(address, lm.JSON())
case "unlock":
log.Println("Sending example unlock message.")
lm := shared.CreateLockMessage(shared.LoRelease)
c.enc.channel.Send(address, lm.JSON())
case "request":
log.Println("Sending example request message.")
rm := shared.CreateRequestMessage(shared.OtObject, "ID_HERE")
c.enc.channel.Send(address, rm.JSON())
default:
log.Println("Received:", message)
c.enc.channel.Send(address, "Received non JSON message.")
}
}
/*
OnAllowFile is called when a file is to be received. Name should be the
file identification!
*/
func (c *chaninterface) OnAllowFile(address, name string) (bool, string) {
if !c.enc.checkLock(address) {
log.Println("OnAllowFile: not locked to given address, refusing!")
return false, ""
}
//check against allowed files and allow if ok
key := c.buildKey(address, name)
_, exists := c.enc.allowedTransfers[key]
if !exists {
log.Println("OnAllowFile: refusing file transfer due to no allowance!")
return false, ""
}
//write to RECEIVINGDIR
return true, c.enc.RootPath + "/" + shared.RECEIVINGDIR + "/" + key
}
/*
OnFileReceived is called when a file has been successfully received.
*/
func (c *chaninterface) OnFileReceived(address, path, name string) {
// note: no lock check so that locks don't have to stay on for long file transfers
// need to read id so that we can write it to the correct location
identification := strings.Split(name, ":")[1]
// read data
data, err := ioutil.ReadFile(path)
if err != nil {
log.Println("OnFileReceived: failed to read file:", err)
return
}
// TODO differentiate complete ORGDIR FIXME the IF should be a switch
// model is not written to storage but to disk directly
if identification == shared.IDMODEL {
err = ioutil.WriteFile(c.enc.RootPath+"/"+shared.IDMODEL, data, shared.FILEPERMISSIONMODE)
} else {
// write to storage
err = c.enc.storage.Store(identification, data)
}
if err != nil {
log.Println("OnFileReceived: storing to storage failed:", err)
return
}
// remove temp file
err = os.Remove(path)
if err != nil {
log.Println("OnFileReceived: failed to remove temp file:", err)
return
}
}
/*
OnFileCanceled is called when a file has failed to be successfully received.
*/
func (c *chaninterface) OnFileCanceled(address, path string) {
// note: no lock check so that locks don't have to stay on for long file transfers
log.Println("OnFileCanceled:", path)
// remove temp file if exists
err := os.Remove(path)
if err != nil {
log.Println("OnFileCanceled: failed to remove temp file:", err)
return
}
}
/*
OnConnected is called when another peer comes online.
*/
func (c *chaninterface) OnConnected(address string) {
// only notify log, nothing else to do for us here
log.Println("OnConnected:", address[:8])
}
|
package nomad
import (
"fmt"
"io"
"reflect"
"sync"
"time"
metrics "github.com/armon/go-metrics"
log "github.com/hashicorp/go-hclog"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/scheduler"
"github.com/hashicorp/raft"
"github.com/ugorji/go/codec"
)
const (
// timeTableGranularity is the granularity of index to time tracking
timeTableGranularity = 5 * time.Minute
// timeTableLimit is the maximum limit of our tracking
timeTableLimit = 72 * time.Hour
)
// SnapshotType is prefixed to a record in the FSM snapshot
// so that we can determine the type for restore
type SnapshotType byte
const (
NodeSnapshot SnapshotType = iota
JobSnapshot
IndexSnapshot
EvalSnapshot
AllocSnapshot
TimeTableSnapshot
PeriodicLaunchSnapshot
JobSummarySnapshot
VaultAccessorSnapshot
JobVersionSnapshot
DeploymentSnapshot
ACLPolicySnapshot
ACLTokenSnapshot
SchedulerConfigSnapshot
)
// LogApplier is the definition of a function that can apply a Raft log
type LogApplier func(buf []byte, index uint64) interface{}
// LogAppliers is a mapping of the Raft MessageType to the appropriate log
// applier
type LogAppliers map[structs.MessageType]LogApplier
// SnapshotRestorer is the definition of a function that can apply a Raft log
type SnapshotRestorer func(restore *state.StateRestore, dec *codec.Decoder) error
// SnapshotRestorers is a mapping of the SnapshotType to the appropriate
// snapshot restorer.
type SnapshotRestorers map[SnapshotType]SnapshotRestorer
// nomadFSM implements a finite state machine that is used
// along with Raft to provide strong consistency. We implement
// this outside the Server to avoid exposing this outside the package.
type nomadFSM struct {
evalBroker *EvalBroker
blockedEvals *BlockedEvals
periodicDispatcher *PeriodicDispatch
logger log.Logger
state *state.StateStore
timetable *TimeTable
// config is the FSM config
config *FSMConfig
// enterpriseAppliers holds the set of enterprise only LogAppliers
enterpriseAppliers LogAppliers
// enterpriseRestorers holds the set of enterprise only snapshot restorers
enterpriseRestorers SnapshotRestorers
// stateLock is only used to protect outside callers to State() from
// racing with Restore(), which is called by Raft (it puts in a totally
// new state store). Everything internal here is synchronized by the
// Raft side, so doesn't need to lock this.
stateLock sync.RWMutex
}
// nomadSnapshot is used to provide a snapshot of the current
// state in a way that can be accessed concurrently with operations
// that may modify the live state.
type nomadSnapshot struct {
snap *state.StateSnapshot
timetable *TimeTable
}
// snapshotHeader is the first entry in our snapshot
type snapshotHeader struct {
}
// FSMConfig is used to configure the FSM
type FSMConfig struct {
// EvalBroker is the evaluation broker evaluations should be added to
EvalBroker *EvalBroker
// Periodic is the periodic job dispatcher that periodic jobs should be
// added/removed from
Periodic *PeriodicDispatch
// BlockedEvals is the blocked eval tracker that blocked evaluations should
// be added to.
Blocked *BlockedEvals
// Logger is the logger used by the FSM
Logger log.Logger
// Region is the region of the server embedding the FSM
Region string
}
// NewFSMPath is used to construct a new FSM with a blank state
func NewFSM(config *FSMConfig) (*nomadFSM, error) {
// Create a state store
sconfig := &state.StateStoreConfig{
Logger: config.Logger,
Region: config.Region,
}
state, err := state.NewStateStore(sconfig)
if err != nil {
return nil, err
}
fsm := &nomadFSM{
evalBroker: config.EvalBroker,
periodicDispatcher: config.Periodic,
blockedEvals: config.Blocked,
logger: config.Logger.Named("fsm"),
config: config,
state: state,
timetable: NewTimeTable(timeTableGranularity, timeTableLimit),
enterpriseAppliers: make(map[structs.MessageType]LogApplier, 8),
enterpriseRestorers: make(map[SnapshotType]SnapshotRestorer, 8),
}
// Register all the log applier functions
fsm.registerLogAppliers()
// Register all the snapshot restorer functions
fsm.registerSnapshotRestorers()
return fsm, nil
}
// Close is used to cleanup resources associated with the FSM
func (n *nomadFSM) Close() error {
return nil
}
// State is used to return a handle to the current state
func (n *nomadFSM) State() *state.StateStore {
n.stateLock.RLock()
defer n.stateLock.RUnlock()
return n.state
}
// TimeTable returns the time table of transactions
func (n *nomadFSM) TimeTable() *TimeTable {
return n.timetable
}
func (n *nomadFSM) Apply(log *raft.Log) interface{} {
buf := log.Data
msgType := structs.MessageType(buf[0])
// Witness this write
n.timetable.Witness(log.Index, time.Now().UTC())
// Check if this message type should be ignored when unknown. This is
// used so that new commands can be added with developer control if older
// versions can safely ignore the command, or if they should crash.
ignoreUnknown := false
if msgType&structs.IgnoreUnknownTypeFlag == structs.IgnoreUnknownTypeFlag {
msgType &= ^structs.IgnoreUnknownTypeFlag
ignoreUnknown = true
}
switch msgType {
case structs.NodeRegisterRequestType:
return n.applyUpsertNode(buf[1:], log.Index)
case structs.NodeDeregisterRequestType:
return n.applyDeregisterNode(buf[1:], log.Index)
case structs.NodeUpdateStatusRequestType:
return n.applyStatusUpdate(buf[1:], log.Index)
case structs.NodeUpdateDrainRequestType:
return n.applyDrainUpdate(buf[1:], log.Index)
case structs.JobRegisterRequestType:
return n.applyUpsertJob(buf[1:], log.Index)
case structs.JobDeregisterRequestType:
return n.applyDeregisterJob(buf[1:], log.Index)
case structs.EvalUpdateRequestType:
return n.applyUpdateEval(buf[1:], log.Index)
case structs.EvalDeleteRequestType:
return n.applyDeleteEval(buf[1:], log.Index)
case structs.AllocUpdateRequestType:
return n.applyAllocUpdate(buf[1:], log.Index)
case structs.AllocClientUpdateRequestType:
return n.applyAllocClientUpdate(buf[1:], log.Index)
case structs.ReconcileJobSummariesRequestType:
return n.applyReconcileSummaries(buf[1:], log.Index)
case structs.VaultAccessorRegisterRequestType:
return n.applyUpsertVaultAccessor(buf[1:], log.Index)
case structs.VaultAccessorDeregisterRequestType:
return n.applyDeregisterVaultAccessor(buf[1:], log.Index)
case structs.ApplyPlanResultsRequestType:
return n.applyPlanResults(buf[1:], log.Index)
case structs.DeploymentStatusUpdateRequestType:
return n.applyDeploymentStatusUpdate(buf[1:], log.Index)
case structs.DeploymentPromoteRequestType:
return n.applyDeploymentPromotion(buf[1:], log.Index)
case structs.DeploymentAllocHealthRequestType:
return n.applyDeploymentAllocHealth(buf[1:], log.Index)
case structs.DeploymentDeleteRequestType:
return n.applyDeploymentDelete(buf[1:], log.Index)
case structs.JobStabilityRequestType:
return n.applyJobStability(buf[1:], log.Index)
case structs.ACLPolicyUpsertRequestType:
return n.applyACLPolicyUpsert(buf[1:], log.Index)
case structs.ACLPolicyDeleteRequestType:
return n.applyACLPolicyDelete(buf[1:], log.Index)
case structs.ACLTokenUpsertRequestType:
return n.applyACLTokenUpsert(buf[1:], log.Index)
case structs.ACLTokenDeleteRequestType:
return n.applyACLTokenDelete(buf[1:], log.Index)
case structs.ACLTokenBootstrapRequestType:
return n.applyACLTokenBootstrap(buf[1:], log.Index)
case structs.AutopilotRequestType:
return n.applyAutopilotUpdate(buf[1:], log.Index)
case structs.UpsertNodeEventsType:
return n.applyUpsertNodeEvent(buf[1:], log.Index)
case structs.JobBatchDeregisterRequestType:
return n.applyBatchDeregisterJob(buf[1:], log.Index)
case structs.AllocUpdateDesiredTransitionRequestType:
return n.applyAllocUpdateDesiredTransition(buf[1:], log.Index)
case structs.NodeUpdateEligibilityRequestType:
return n.applyNodeEligibilityUpdate(buf[1:], log.Index)
case structs.BatchNodeUpdateDrainRequestType:
return n.applyBatchDrainUpdate(buf[1:], log.Index)
case structs.SchedulerConfigRequestType:
return n.applySchedulerConfigUpdate(buf[1:], log.Index)
}
// Check enterprise only message types.
if applier, ok := n.enterpriseAppliers[msgType]; ok {
return applier(buf[1:], log.Index)
}
// We didn't match anything, either panic or ignore
if ignoreUnknown {
n.logger.Warn("ignoring unknown message type, upgrade to newer version", "msg_type", msgType)
return nil
}
panic(fmt.Errorf("failed to apply request: %#v", buf))
}
func (n *nomadFSM) applyUpsertNode(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "register_node"}, time.Now())
var req structs.NodeRegisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Handle upgrade paths
req.Node.Canonicalize()
if err := n.state.UpsertNode(index, req.Node); err != nil {
n.logger.Error("UpsertNode failed", "error", err)
return err
}
// Unblock evals for the nodes computed node class if it is in a ready
// state.
if req.Node.Status == structs.NodeStatusReady {
n.blockedEvals.Unblock(req.Node.ComputedClass, index)
}
return nil
}
func (n *nomadFSM) applyDeregisterNode(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "deregister_node"}, time.Now())
var req structs.NodeDeregisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Messages pre 0.9.3 use a single NodeID
var ids []string
if len(req.NodeIDs) == 0 {
ids = []string{req.NodeID}
} else {
ids = req.NodeIDs
}
if err := n.state.DeleteNode(index, ids); err != nil {
n.logger.Error("DeleteNode failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyStatusUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "node_status_update"}, time.Now())
var req structs.NodeUpdateStatusRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateNodeStatus(index, req.NodeID, req.Status, req.UpdatedAt, req.NodeEvent); err != nil {
n.logger.Error("UpdateNodeStatus failed", "error", err)
return err
}
// Unblock evals for the nodes computed node class if it is in a ready
// state.
if req.Status == structs.NodeStatusReady {
ws := memdb.NewWatchSet()
node, err := n.state.NodeByID(ws, req.NodeID)
if err != nil {
n.logger.Error("looking up node failed", "node_id", req.NodeID, "error", err)
return err
}
n.blockedEvals.Unblock(node.ComputedClass, index)
}
return nil
}
func (n *nomadFSM) applyDrainUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "node_drain_update"}, time.Now())
var req structs.NodeUpdateDrainRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// COMPAT Remove in version 0.10
// As part of Nomad 0.8 we have deprecated the drain boolean in favor of a
// drain strategy but we need to handle the upgrade path where the Raft log
// contains drain updates with just the drain boolean being manipulated.
if req.Drain && req.DrainStrategy == nil {
// Mark the drain strategy as a force to imitate the old style drain
// functionality.
req.DrainStrategy = &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: -1 * time.Second,
},
}
}
if err := n.state.UpdateNodeDrain(index, req.NodeID, req.DrainStrategy, req.MarkEligible, req.UpdatedAt, req.NodeEvent); err != nil {
n.logger.Error("UpdateNodeDrain failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyBatchDrainUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "batch_node_drain_update"}, time.Now())
var req structs.BatchNodeUpdateDrainRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.BatchUpdateNodeDrain(index, req.UpdatedAt, req.Updates, req.NodeEvents); err != nil {
n.logger.Error("BatchUpdateNodeDrain failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyNodeEligibilityUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "node_eligibility_update"}, time.Now())
var req structs.NodeUpdateEligibilityRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Lookup the existing node
node, err := n.state.NodeByID(nil, req.NodeID)
if err != nil {
n.logger.Error("UpdateNodeEligibility failed to lookup node", "node_id", req.NodeID, "error", err)
return err
}
if err := n.state.UpdateNodeEligibility(index, req.NodeID, req.Eligibility, req.UpdatedAt, req.NodeEvent); err != nil {
n.logger.Error("UpdateNodeEligibility failed", "error", err)
return err
}
// Unblock evals for the nodes computed node class if it is in a ready
// state.
if node != nil && node.SchedulingEligibility == structs.NodeSchedulingIneligible &&
req.Eligibility == structs.NodeSchedulingEligible {
n.blockedEvals.Unblock(node.ComputedClass, index)
}
return nil
}
func (n *nomadFSM) applyUpsertJob(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "register_job"}, time.Now())
var req structs.JobRegisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
/* Handle upgrade paths:
* - Empty maps and slices should be treated as nil to avoid
* un-intended destructive updates in scheduler since we use
* reflect.DeepEqual. Starting Nomad 0.4.1, job submission sanitizes
* the incoming job.
* - Migrate from old style upgrade stanza that used only a stagger.
*/
req.Job.Canonicalize()
if err := n.state.UpsertJob(index, req.Job); err != nil {
n.logger.Error("UpsertJob failed", "error", err)
return err
}
// We always add the job to the periodic dispatcher because there is the
// possibility that the periodic spec was removed and then we should stop
// tracking it.
if err := n.periodicDispatcher.Add(req.Job); err != nil {
n.logger.Error("periodicDispatcher.Add failed", "error", err)
return fmt.Errorf("failed adding job to periodic dispatcher: %v", err)
}
// Create a watch set
ws := memdb.NewWatchSet()
// If it is an active periodic job, record the time it was inserted. This is
// necessary for recovering during leader election. It is possible that from
// the time it is added to when it was suppose to launch, leader election
// occurs and the job was not launched. In this case, we use the insertion
// time to determine if a launch was missed.
if req.Job.IsPeriodicActive() {
prevLaunch, err := n.state.PeriodicLaunchByID(ws, req.Namespace, req.Job.ID)
if err != nil {
n.logger.Error("PeriodicLaunchByID failed", "error", err)
return err
}
// Record the insertion time as a launch. We overload the launch table
// such that the first entry is the insertion time.
if prevLaunch == nil {
launch := &structs.PeriodicLaunch{
ID: req.Job.ID,
Namespace: req.Namespace,
Launch: time.Now(),
}
if err := n.state.UpsertPeriodicLaunch(index, launch); err != nil {
n.logger.Error("UpsertPeriodicLaunch failed", "error", err)
return err
}
}
}
// Check if the parent job is periodic and mark the launch time.
parentID := req.Job.ParentID
if parentID != "" {
parent, err := n.state.JobByID(ws, req.Namespace, parentID)
if err != nil {
n.logger.Error("JobByID lookup for parent failed", "parent_id", parentID, "namespace", req.Namespace, "error", err)
return err
} else if parent == nil {
// The parent has been deregistered.
return nil
}
if parent.IsPeriodic() && !parent.IsParameterized() {
t, err := n.periodicDispatcher.LaunchTime(req.Job.ID)
if err != nil {
n.logger.Error("LaunchTime failed", "job", req.Job.NamespacedID(), "error", err)
return err
}
launch := &structs.PeriodicLaunch{
ID: parentID,
Namespace: req.Namespace,
Launch: t,
}
if err := n.state.UpsertPeriodicLaunch(index, launch); err != nil {
n.logger.Error("UpsertPeriodicLaunch failed", "error", err)
return err
}
}
}
return nil
}
func (n *nomadFSM) applyDeregisterJob(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "deregister_job"}, time.Now())
var req structs.JobDeregisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
return n.state.WithWriteTransaction(func(tx state.Txn) error {
if err := n.handleJobDeregister(index, req.JobID, req.Namespace, req.Purge, tx); err != nil {
n.logger.Error("deregistering job failed", "error", err)
return err
}
return nil
})
}
func (n *nomadFSM) applyBatchDeregisterJob(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "batch_deregister_job"}, time.Now())
var req structs.JobBatchDeregisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Perform all store updates atomically to ensure a consistent view for store readers.
// A partial update may increment the snapshot index, allowing eval brokers to process
// evals for jobs whose deregistering didn't get committed yet.
err := n.state.WithWriteTransaction(func(tx state.Txn) error {
for jobNS, options := range req.Jobs {
if err := n.handleJobDeregister(index, jobNS.ID, jobNS.Namespace, options.Purge, tx); err != nil {
n.logger.Error("deregistering job failed", "job", jobNS, "error", err)
return err
}
}
if err := n.state.UpsertEvalsTxn(index, req.Evals, tx); err != nil {
n.logger.Error("UpsertEvals failed", "error", err)
return err
}
return nil
})
if err != nil {
return err
}
// perform the side effects outside the transactions
n.handleUpsertedEvals(req.Evals)
return nil
}
// handleJobDeregister is used to deregister a job.
func (n *nomadFSM) handleJobDeregister(index uint64, jobID, namespace string, purge bool, tx state.Txn) error {
// If it is periodic remove it from the dispatcher
if err := n.periodicDispatcher.Remove(namespace, jobID); err != nil {
n.logger.Error("periodicDispatcher.Remove failed", "error", err)
return err
}
if purge {
if err := n.state.DeleteJobTxn(index, namespace, jobID, tx); err != nil {
n.logger.Error("DeleteJob failed", "error", err)
return err
}
// We always delete from the periodic launch table because it is possible that
// the job was updated to be non-periodic, thus checking if it is periodic
// doesn't ensure we clean it up properly.
n.state.DeletePeriodicLaunchTxn(index, namespace, jobID, tx)
} else {
// Get the current job and mark it as stopped and re-insert it.
ws := memdb.NewWatchSet()
current, err := n.state.JobByIDTxn(ws, namespace, jobID, tx)
if err != nil {
n.logger.Error("JobByID lookup failed", "error", err)
return err
}
if current == nil {
return fmt.Errorf("job %q in namespace %q doesn't exist to be deregistered", jobID, namespace)
}
stopped := current.Copy()
stopped.Stop = true
if err := n.state.UpsertJobTxn(index, stopped, tx); err != nil {
n.logger.Error("UpsertJob failed", "error", err)
return err
}
}
return nil
}
func (n *nomadFSM) applyUpdateEval(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "update_eval"}, time.Now())
var req structs.EvalUpdateRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
return n.upsertEvals(index, req.Evals)
}
func (n *nomadFSM) upsertEvals(index uint64, evals []*structs.Evaluation) error {
if err := n.state.UpsertEvals(index, evals); err != nil {
n.logger.Error("UpsertEvals failed", "error", err)
return err
}
n.handleUpsertedEvals(evals)
return nil
}
// handleUpsertingEval is a helper for taking action after upserting
// evaluations.
func (n *nomadFSM) handleUpsertedEvals(evals []*structs.Evaluation) {
for _, eval := range evals {
n.handleUpsertedEval(eval)
}
}
// handleUpsertingEval is a helper for taking action after upserting an eval.
func (n *nomadFSM) handleUpsertedEval(eval *structs.Evaluation) {
if eval == nil {
return
}
if eval.ShouldEnqueue() {
n.evalBroker.Enqueue(eval)
} else if eval.ShouldBlock() {
n.blockedEvals.Block(eval)
} else if eval.Status == structs.EvalStatusComplete &&
len(eval.FailedTGAllocs) == 0 {
// If we have a successful evaluation for a node, untrack any
// blocked evaluation
n.blockedEvals.Untrack(eval.JobID, eval.Namespace)
}
}
func (n *nomadFSM) applyDeleteEval(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "delete_eval"}, time.Now())
var req structs.EvalDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteEval(index, req.Evals, req.Allocs); err != nil {
n.logger.Error("DeleteEval failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyAllocUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "alloc_update"}, time.Now())
var req structs.AllocUpdateRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Attach the job to all the allocations. It is pulled out in the
// payload to avoid the redundancy of encoding, but should be denormalized
// prior to being inserted into MemDB.
structs.DenormalizeAllocationJobs(req.Job, req.Alloc)
// COMPAT(0.11): Remove in 0.11
// Calculate the total resources of allocations. It is pulled out in the
// payload to avoid encoding something that can be computed, but should be
// denormalized prior to being inserted into MemDB.
for _, alloc := range req.Alloc {
if alloc.Resources != nil {
continue
}
alloc.Resources = new(structs.Resources)
for _, task := range alloc.TaskResources {
alloc.Resources.Add(task)
}
// Add the shared resources
alloc.Resources.Add(alloc.SharedResources)
}
if err := n.state.UpsertAllocs(index, req.Alloc); err != nil {
n.logger.Error("UpsertAllocs failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyAllocClientUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "alloc_client_update"}, time.Now())
var req structs.AllocUpdateRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if len(req.Alloc) == 0 {
return nil
}
// Create a watch set
ws := memdb.NewWatchSet()
// Updating the allocs with the job id and task group name
for _, alloc := range req.Alloc {
if existing, _ := n.state.AllocByID(ws, alloc.ID); existing != nil {
alloc.JobID = existing.JobID
alloc.TaskGroup = existing.TaskGroup
}
}
// Update all the client allocations
if err := n.state.UpdateAllocsFromClient(index, req.Alloc); err != nil {
n.logger.Error("UpdateAllocFromClient failed", "error", err)
return err
}
// Update any evals
if len(req.Evals) > 0 {
if err := n.upsertEvals(index, req.Evals); err != nil {
n.logger.Error("applyAllocClientUpdate failed to update evaluations", "error", err)
return err
}
}
// Unblock evals for the nodes computed node class if the client has
// finished running an allocation.
for _, alloc := range req.Alloc {
if alloc.ClientStatus == structs.AllocClientStatusComplete ||
alloc.ClientStatus == structs.AllocClientStatusFailed {
nodeID := alloc.NodeID
node, err := n.state.NodeByID(ws, nodeID)
if err != nil || node == nil {
n.logger.Error("looking up node failed", "node_id", nodeID, "error", err)
return err
}
// Unblock any associated quota
quota, err := n.allocQuota(alloc.ID)
if err != nil {
n.logger.Error("looking up quota associated with alloc failed", "alloc_id", alloc.ID, "error", err)
return err
}
n.blockedEvals.UnblockClassAndQuota(node.ComputedClass, quota, index)
}
}
return nil
}
// applyAllocUpdateDesiredTransition is used to update the desired transitions
// of a set of allocations.
func (n *nomadFSM) applyAllocUpdateDesiredTransition(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "alloc_update_desired_transition"}, time.Now())
var req structs.AllocUpdateDesiredTransitionRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateAllocsDesiredTransitions(index, req.Allocs, req.Evals); err != nil {
n.logger.Error("UpdateAllocsDesiredTransitions failed", "error", err)
return err
}
n.handleUpsertedEvals(req.Evals)
return nil
}
// applyReconcileSummaries reconciles summaries for all the jobs
func (n *nomadFSM) applyReconcileSummaries(buf []byte, index uint64) interface{} {
if err := n.state.ReconcileJobSummaries(index); err != nil {
return err
}
return n.reconcileQueuedAllocations(index)
}
// applyUpsertNodeEvent tracks the given node events.
func (n *nomadFSM) applyUpsertNodeEvent(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "upsert_node_events"}, time.Now())
var req structs.EmitNodeEventsRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode EmitNodeEventsRequest: %v", err))
}
if err := n.state.UpsertNodeEvents(index, req.NodeEvents); err != nil {
n.logger.Error("failed to add node events", "error", err)
return err
}
return nil
}
// applyUpsertVaultAccessor stores the Vault accessors for a given allocation
// and task
func (n *nomadFSM) applyUpsertVaultAccessor(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "upsert_vault_accessor"}, time.Now())
var req structs.VaultAccessorsRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpsertVaultAccessor(index, req.Accessors); err != nil {
n.logger.Error("UpsertVaultAccessor failed", "error", err)
return err
}
return nil
}
// applyDeregisterVaultAccessor deregisters a set of Vault accessors
func (n *nomadFSM) applyDeregisterVaultAccessor(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "deregister_vault_accessor"}, time.Now())
var req structs.VaultAccessorsRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteVaultAccessors(index, req.Accessors); err != nil {
n.logger.Error("DeregisterVaultAccessor failed", "error", err)
return err
}
return nil
}
// applyPlanApply applies the results of a plan application
func (n *nomadFSM) applyPlanResults(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_plan_results"}, time.Now())
var req structs.ApplyPlanResultsRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpsertPlanResults(index, &req); err != nil {
n.logger.Error("ApplyPlan failed", "error", err)
return err
}
// Add evals for jobs that were preempted
n.handleUpsertedEvals(req.PreemptionEvals)
return nil
}
// applyDeploymentStatusUpdate is used to update the status of an existing
// deployment
func (n *nomadFSM) applyDeploymentStatusUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_deployment_status_update"}, time.Now())
var req structs.DeploymentStatusUpdateRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateDeploymentStatus(index, &req); err != nil {
n.logger.Error("UpsertDeploymentStatusUpdate failed", "error", err)
return err
}
n.handleUpsertedEval(req.Eval)
return nil
}
// applyDeploymentPromotion is used to promote canaries in a deployment
func (n *nomadFSM) applyDeploymentPromotion(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_deployment_promotion"}, time.Now())
var req structs.ApplyDeploymentPromoteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateDeploymentPromotion(index, &req); err != nil {
n.logger.Error("UpsertDeploymentPromotion failed", "error", err)
return err
}
n.handleUpsertedEval(req.Eval)
return nil
}
// applyDeploymentAllocHealth is used to set the health of allocations as part
// of a deployment
func (n *nomadFSM) applyDeploymentAllocHealth(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_deployment_alloc_health"}, time.Now())
var req structs.ApplyDeploymentAllocHealthRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateDeploymentAllocHealth(index, &req); err != nil {
n.logger.Error("UpsertDeploymentAllocHealth failed", "error", err)
return err
}
n.handleUpsertedEval(req.Eval)
return nil
}
// applyDeploymentDelete is used to delete a set of deployments
func (n *nomadFSM) applyDeploymentDelete(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_deployment_delete"}, time.Now())
var req structs.DeploymentDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteDeployment(index, req.Deployments); err != nil {
n.logger.Error("DeleteDeployment failed", "error", err)
return err
}
return nil
}
// applyJobStability is used to set the stability of a job
func (n *nomadFSM) applyJobStability(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_job_stability"}, time.Now())
var req structs.JobStabilityRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateJobStability(index, req.Namespace, req.JobID, req.JobVersion, req.Stable); err != nil {
n.logger.Error("UpdateJobStability failed", "error", err)
return err
}
return nil
}
// applyACLPolicyUpsert is used to upsert a set of policies
func (n *nomadFSM) applyACLPolicyUpsert(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_policy_upsert"}, time.Now())
var req structs.ACLPolicyUpsertRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpsertACLPolicies(index, req.Policies); err != nil {
n.logger.Error("UpsertACLPolicies failed", "error", err)
return err
}
return nil
}
// applyACLPolicyDelete is used to delete a set of policies
func (n *nomadFSM) applyACLPolicyDelete(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_policy_delete"}, time.Now())
var req structs.ACLPolicyDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteACLPolicies(index, req.Names); err != nil {
n.logger.Error("DeleteACLPolicies failed", "error", err)
return err
}
return nil
}
// applyACLTokenUpsert is used to upsert a set of policies
func (n *nomadFSM) applyACLTokenUpsert(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_upsert"}, time.Now())
var req structs.ACLTokenUpsertRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpsertACLTokens(index, req.Tokens); err != nil {
n.logger.Error("UpsertACLTokens failed", "error", err)
return err
}
return nil
}
// applyACLTokenDelete is used to delete a set of policies
func (n *nomadFSM) applyACLTokenDelete(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_delete"}, time.Now())
var req structs.ACLTokenDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteACLTokens(index, req.AccessorIDs); err != nil {
n.logger.Error("DeleteACLTokens failed", "error", err)
return err
}
return nil
}
// applyACLTokenBootstrap is used to bootstrap an ACL token
func (n *nomadFSM) applyACLTokenBootstrap(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_bootstrap"}, time.Now())
var req structs.ACLTokenBootstrapRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.BootstrapACLTokens(index, req.ResetIndex, req.Token); err != nil {
n.logger.Error("BootstrapACLToken failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyAutopilotUpdate(buf []byte, index uint64) interface{} {
var req structs.AutopilotSetConfigRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
defer metrics.MeasureSince([]string{"nomad", "fsm", "autopilot"}, time.Now())
if req.CAS {
act, err := n.state.AutopilotCASConfig(index, req.Config.ModifyIndex, &req.Config)
if err != nil {
return err
}
return act
}
return n.state.AutopilotSetConfig(index, &req.Config)
}
func (n *nomadFSM) applySchedulerConfigUpdate(buf []byte, index uint64) interface{} {
var req structs.SchedulerSetConfigRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_scheduler_config"}, time.Now())
if req.CAS {
applied, err := n.state.SchedulerCASConfig(index, req.Config.ModifyIndex, &req.Config)
if err != nil {
return err
}
return applied
}
return n.state.SchedulerSetConfig(index, &req.Config)
}
func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) {
// Create a new snapshot
snap, err := n.state.Snapshot()
if err != nil {
return nil, err
}
ns := &nomadSnapshot{
snap: snap,
timetable: n.timetable,
}
return ns, nil
}
func (n *nomadFSM) Restore(old io.ReadCloser) error {
defer old.Close()
// Create a new state store
config := &state.StateStoreConfig{
Logger: n.config.Logger,
Region: n.config.Region,
}
newState, err := state.NewStateStore(config)
if err != nil {
return err
}
// Start the state restore
restore, err := newState.Restore()
if err != nil {
return err
}
defer restore.Abort()
// Create a decoder
dec := codec.NewDecoder(old, structs.MsgpackHandle)
// Read in the header
var header snapshotHeader
if err := dec.Decode(&header); err != nil {
return err
}
// Populate the new state
msgType := make([]byte, 1)
for {
// Read the message type
_, err := old.Read(msgType)
if err == io.EOF {
break
} else if err != nil {
return err
}
// Decode
snapType := SnapshotType(msgType[0])
switch snapType {
case TimeTableSnapshot:
if err := n.timetable.Deserialize(dec); err != nil {
return fmt.Errorf("time table deserialize failed: %v", err)
}
case NodeSnapshot:
node := new(structs.Node)
if err := dec.Decode(node); err != nil {
return err
}
// Handle upgrade paths
node.Canonicalize()
if err := restore.NodeRestore(node); err != nil {
return err
}
case JobSnapshot:
job := new(structs.Job)
if err := dec.Decode(job); err != nil {
return err
}
/* Handle upgrade paths:
* - Empty maps and slices should be treated as nil to avoid
* un-intended destructive updates in scheduler since we use
* reflect.DeepEqual. Starting Nomad 0.4.1, job submission sanitizes
* the incoming job.
* - Migrate from old style upgrade stanza that used only a stagger.
*/
job.Canonicalize()
if err := restore.JobRestore(job); err != nil {
return err
}
case EvalSnapshot:
eval := new(structs.Evaluation)
if err := dec.Decode(eval); err != nil {
return err
}
if err := restore.EvalRestore(eval); err != nil {
return err
}
case AllocSnapshot:
alloc := new(structs.Allocation)
if err := dec.Decode(alloc); err != nil {
return err
}
if err := restore.AllocRestore(alloc); err != nil {
return err
}
case IndexSnapshot:
idx := new(state.IndexEntry)
if err := dec.Decode(idx); err != nil {
return err
}
if err := restore.IndexRestore(idx); err != nil {
return err
}
case PeriodicLaunchSnapshot:
launch := new(structs.PeriodicLaunch)
if err := dec.Decode(launch); err != nil {
return err
}
if err := restore.PeriodicLaunchRestore(launch); err != nil {
return err
}
case JobSummarySnapshot:
summary := new(structs.JobSummary)
if err := dec.Decode(summary); err != nil {
return err
}
if err := restore.JobSummaryRestore(summary); err != nil {
return err
}
case VaultAccessorSnapshot:
accessor := new(structs.VaultAccessor)
if err := dec.Decode(accessor); err != nil {
return err
}
if err := restore.VaultAccessorRestore(accessor); err != nil {
return err
}
case JobVersionSnapshot:
version := new(structs.Job)
if err := dec.Decode(version); err != nil {
return err
}
if err := restore.JobVersionRestore(version); err != nil {
return err
}
case DeploymentSnapshot:
deployment := new(structs.Deployment)
if err := dec.Decode(deployment); err != nil {
return err
}
if err := restore.DeploymentRestore(deployment); err != nil {
return err
}
case ACLPolicySnapshot:
policy := new(structs.ACLPolicy)
if err := dec.Decode(policy); err != nil {
return err
}
if err := restore.ACLPolicyRestore(policy); err != nil {
return err
}
case ACLTokenSnapshot:
token := new(structs.ACLToken)
if err := dec.Decode(token); err != nil {
return err
}
if err := restore.ACLTokenRestore(token); err != nil {
return err
}
case SchedulerConfigSnapshot:
schedConfig := new(structs.SchedulerConfiguration)
if err := dec.Decode(schedConfig); err != nil {
return err
}
if err := restore.SchedulerConfigRestore(schedConfig); err != nil {
return err
}
default:
// Check if this is an enterprise only object being restored
restorer, ok := n.enterpriseRestorers[snapType]
if !ok {
return fmt.Errorf("Unrecognized snapshot type: %v", msgType)
}
// Restore the enterprise only object
if err := restorer(restore, dec); err != nil {
return err
}
}
}
restore.Commit()
// COMPAT Remove in 0.10
// Clean up active deployments that do not have a job
if err := n.failLeakedDeployments(newState); err != nil {
return err
}
// External code might be calling State(), so we need to synchronize
// here to make sure we swap in the new state store atomically.
n.stateLock.Lock()
stateOld := n.state
n.state = newState
n.stateLock.Unlock()
// Signal that the old state store has been abandoned. This is required
// because we don't operate on it any more, we just throw it away, so
// blocking queries won't see any changes and need to be woken up.
stateOld.Abandon()
return nil
}
// failLeakedDeployments is used to fail deployments that do not have a job.
// This state is a broken invariant that should not occur since 0.8.X.
func (n *nomadFSM) failLeakedDeployments(state *state.StateStore) error {
// Scan for deployments that are referencing a job that no longer exists.
// This could happen if multiple deployments were created for a given job
// and thus the older deployment leaks and then the job is removed.
iter, err := state.Deployments(nil)
if err != nil {
return fmt.Errorf("failed to query deployments: %v", err)
}
dindex, err := state.Index("deployment")
if err != nil {
return fmt.Errorf("couldn't fetch index of deployments table: %v", err)
}
for {
raw := iter.Next()
if raw == nil {
break
}
d := raw.(*structs.Deployment)
// We are only looking for active deployments where the job no longer
// exists
if !d.Active() {
continue
}
// Find the job
job, err := state.JobByID(nil, d.Namespace, d.JobID)
if err != nil {
return fmt.Errorf("failed to lookup job %s from deployment %q: %v", d.JobID, d.ID, err)
}
// Job exists.
if job != nil {
continue
}
// Update the deployment to be terminal
failed := d.Copy()
failed.Status = structs.DeploymentStatusCancelled
failed.StatusDescription = structs.DeploymentStatusDescriptionStoppedJob
if err := state.UpsertDeployment(dindex, failed); err != nil {
return fmt.Errorf("failed to mark leaked deployment %q as failed: %v", failed.ID, err)
}
}
return nil
}
// reconcileQueuedAllocations re-calculates the queued allocations for every job that we
// created a Job Summary during the snap shot restore
func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error {
// Get all the jobs
ws := memdb.NewWatchSet()
iter, err := n.state.Jobs(ws)
if err != nil {
return err
}
snap, err := n.state.Snapshot()
if err != nil {
return fmt.Errorf("unable to create snapshot: %v", err)
}
// Invoking the scheduler for every job so that we can populate the number
// of queued allocations for every job
for {
rawJob := iter.Next()
if rawJob == nil {
break
}
job := rawJob.(*structs.Job)
// Nothing to do for queued allocations if the job is a parent periodic/parameterized job
if job.IsParameterized() || job.IsPeriodic() {
continue
}
planner := &scheduler.Harness{
State: &snap.StateStore,
}
// Create an eval and mark it as requiring annotations and insert that as well
eval := &structs.Evaluation{
ID: uuid.Generate(),
Namespace: job.Namespace,
Priority: job.Priority,
Type: job.Type,
TriggeredBy: structs.EvalTriggerJobRegister,
JobID: job.ID,
JobModifyIndex: job.JobModifyIndex + 1,
Status: structs.EvalStatusPending,
AnnotatePlan: true,
}
snap.UpsertEvals(100, []*structs.Evaluation{eval})
// Create the scheduler and run it
sched, err := scheduler.NewScheduler(eval.Type, n.logger, snap, planner)
if err != nil {
return err
}
if err := sched.Process(eval); err != nil {
return err
}
// Get the job summary from the fsm state store
originalSummary, err := n.state.JobSummaryByID(ws, job.Namespace, job.ID)
if err != nil {
return err
}
summary := originalSummary.Copy()
// Add the allocations scheduler has made to queued since these
// allocations are never getting placed until the scheduler is invoked
// with a real planner
if l := len(planner.Plans); l != 1 {
return fmt.Errorf("unexpected number of plans during restore %d. Please file an issue including the logs", l)
}
for _, allocations := range planner.Plans[0].NodeAllocation {
for _, allocation := range allocations {
tgSummary, ok := summary.Summary[allocation.TaskGroup]
if !ok {
return fmt.Errorf("task group %q not found while updating queued count", allocation.TaskGroup)
}
tgSummary.Queued += 1
summary.Summary[allocation.TaskGroup] = tgSummary
}
}
// Add the queued allocations attached to the evaluation to the queued
// counter of the job summary
if l := len(planner.Evals); l != 1 {
return fmt.Errorf("unexpected number of evals during restore %d. Please file an issue including the logs", l)
}
for tg, queued := range planner.Evals[0].QueuedAllocations {
tgSummary, ok := summary.Summary[tg]
if !ok {
return fmt.Errorf("task group %q not found while updating queued count", tg)
}
// We add instead of setting here because we want to take into
// consideration what the scheduler with a mock planner thinks it
// placed. Those should be counted as queued as well
tgSummary.Queued += queued
summary.Summary[tg] = tgSummary
}
if !reflect.DeepEqual(summary, originalSummary) {
summary.ModifyIndex = index
if err := n.state.UpsertJobSummary(index, summary); err != nil {
return err
}
}
}
return nil
}
func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error {
defer metrics.MeasureSince([]string{"nomad", "fsm", "persist"}, time.Now())
// Register the nodes
encoder := codec.NewEncoder(sink, structs.MsgpackHandle)
// Write the header
header := snapshotHeader{}
if err := encoder.Encode(&header); err != nil {
sink.Cancel()
return err
}
// Write the time table
sink.Write([]byte{byte(TimeTableSnapshot)})
if err := s.timetable.Serialize(encoder); err != nil {
sink.Cancel()
return err
}
// Write all the data out
if err := s.persistIndexes(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistNodes(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistJobs(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistEvals(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistAllocs(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistPeriodicLaunches(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistJobSummaries(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistVaultAccessors(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistJobVersions(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistDeployments(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistACLPolicies(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistACLTokens(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistEnterpriseTables(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistSchedulerConfig(sink, encoder); err != nil {
sink.Cancel()
return err
}
return nil
}
func (s *nomadSnapshot) persistIndexes(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the indexes
iter, err := s.snap.Indexes()
if err != nil {
return err
}
for {
// Get the next item
raw := iter.Next()
if raw == nil {
break
}
// Prepare the request struct
idx := raw.(*state.IndexEntry)
// Write out a node registration
sink.Write([]byte{byte(IndexSnapshot)})
if err := encoder.Encode(idx); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistNodes(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the nodes
ws := memdb.NewWatchSet()
nodes, err := s.snap.Nodes(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := nodes.Next()
if raw == nil {
break
}
// Prepare the request struct
node := raw.(*structs.Node)
// Write out a node registration
sink.Write([]byte{byte(NodeSnapshot)})
if err := encoder.Encode(node); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistJobs(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the jobs
ws := memdb.NewWatchSet()
jobs, err := s.snap.Jobs(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := jobs.Next()
if raw == nil {
break
}
// Prepare the request struct
job := raw.(*structs.Job)
// Write out a job registration
sink.Write([]byte{byte(JobSnapshot)})
if err := encoder.Encode(job); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistEvals(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the evaluations
ws := memdb.NewWatchSet()
evals, err := s.snap.Evals(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := evals.Next()
if raw == nil {
break
}
// Prepare the request struct
eval := raw.(*structs.Evaluation)
// Write out the evaluation
sink.Write([]byte{byte(EvalSnapshot)})
if err := encoder.Encode(eval); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistAllocs(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the allocations
ws := memdb.NewWatchSet()
allocs, err := s.snap.Allocs(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := allocs.Next()
if raw == nil {
break
}
// Prepare the request struct
alloc := raw.(*structs.Allocation)
// Write out the evaluation
sink.Write([]byte{byte(AllocSnapshot)})
if err := encoder.Encode(alloc); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistPeriodicLaunches(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the jobs
ws := memdb.NewWatchSet()
launches, err := s.snap.PeriodicLaunches(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := launches.Next()
if raw == nil {
break
}
// Prepare the request struct
launch := raw.(*structs.PeriodicLaunch)
// Write out a job registration
sink.Write([]byte{byte(PeriodicLaunchSnapshot)})
if err := encoder.Encode(launch); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistJobSummaries(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
ws := memdb.NewWatchSet()
summaries, err := s.snap.JobSummaries(ws)
if err != nil {
return err
}
for {
raw := summaries.Next()
if raw == nil {
break
}
jobSummary := raw.(*structs.JobSummary)
sink.Write([]byte{byte(JobSummarySnapshot)})
if err := encoder.Encode(jobSummary); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistVaultAccessors(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
ws := memdb.NewWatchSet()
accessors, err := s.snap.VaultAccessors(ws)
if err != nil {
return err
}
for {
raw := accessors.Next()
if raw == nil {
break
}
accessor := raw.(*structs.VaultAccessor)
sink.Write([]byte{byte(VaultAccessorSnapshot)})
if err := encoder.Encode(accessor); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistJobVersions(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the jobs
ws := memdb.NewWatchSet()
versions, err := s.snap.JobVersions(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := versions.Next()
if raw == nil {
break
}
// Prepare the request struct
job := raw.(*structs.Job)
// Write out a job registration
sink.Write([]byte{byte(JobVersionSnapshot)})
if err := encoder.Encode(job); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistDeployments(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the jobs
ws := memdb.NewWatchSet()
deployments, err := s.snap.Deployments(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := deployments.Next()
if raw == nil {
break
}
// Prepare the request struct
deployment := raw.(*structs.Deployment)
// Write out a job registration
sink.Write([]byte{byte(DeploymentSnapshot)})
if err := encoder.Encode(deployment); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistACLPolicies(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the policies
ws := memdb.NewWatchSet()
policies, err := s.snap.ACLPolicies(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := policies.Next()
if raw == nil {
break
}
// Prepare the request struct
policy := raw.(*structs.ACLPolicy)
// Write out a policy registration
sink.Write([]byte{byte(ACLPolicySnapshot)})
if err := encoder.Encode(policy); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistACLTokens(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the policies
ws := memdb.NewWatchSet()
tokens, err := s.snap.ACLTokens(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := tokens.Next()
if raw == nil {
break
}
// Prepare the request struct
token := raw.(*structs.ACLToken)
// Write out a token registration
sink.Write([]byte{byte(ACLTokenSnapshot)})
if err := encoder.Encode(token); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistSchedulerConfig(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get scheduler config
_, schedConfig, err := s.snap.SchedulerConfig()
if err != nil {
return err
}
// Write out scheduler config
sink.Write([]byte{byte(SchedulerConfigSnapshot)})
if err := encoder.Encode(schedConfig); err != nil {
return err
}
return nil
}
// Release is a no-op, as we just need to GC the pointer
// to the state store snapshot. There is nothing to explicitly
// cleanup.
func (s *nomadSnapshot) Release() {}
fsm variable names for consistency
package nomad
import (
"fmt"
"io"
"reflect"
"sync"
"time"
metrics "github.com/armon/go-metrics"
log "github.com/hashicorp/go-hclog"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/scheduler"
"github.com/hashicorp/raft"
"github.com/ugorji/go/codec"
)
const (
// timeTableGranularity is the granularity of index to time tracking
timeTableGranularity = 5 * time.Minute
// timeTableLimit is the maximum limit of our tracking
timeTableLimit = 72 * time.Hour
)
// SnapshotType is prefixed to a record in the FSM snapshot
// so that we can determine the type for restore
type SnapshotType byte
const (
NodeSnapshot SnapshotType = iota
JobSnapshot
IndexSnapshot
EvalSnapshot
AllocSnapshot
TimeTableSnapshot
PeriodicLaunchSnapshot
JobSummarySnapshot
VaultAccessorSnapshot
JobVersionSnapshot
DeploymentSnapshot
ACLPolicySnapshot
ACLTokenSnapshot
SchedulerConfigSnapshot
)
// LogApplier is the definition of a function that can apply a Raft log
type LogApplier func(buf []byte, index uint64) interface{}
// LogAppliers is a mapping of the Raft MessageType to the appropriate log
// applier
type LogAppliers map[structs.MessageType]LogApplier
// SnapshotRestorer is the definition of a function that can apply a Raft log
type SnapshotRestorer func(restore *state.StateRestore, dec *codec.Decoder) error
// SnapshotRestorers is a mapping of the SnapshotType to the appropriate
// snapshot restorer.
type SnapshotRestorers map[SnapshotType]SnapshotRestorer
// nomadFSM implements a finite state machine that is used
// along with Raft to provide strong consistency. We implement
// this outside the Server to avoid exposing this outside the package.
type nomadFSM struct {
evalBroker *EvalBroker
blockedEvals *BlockedEvals
periodicDispatcher *PeriodicDispatch
logger log.Logger
state *state.StateStore
timetable *TimeTable
// config is the FSM config
config *FSMConfig
// enterpriseAppliers holds the set of enterprise only LogAppliers
enterpriseAppliers LogAppliers
// enterpriseRestorers holds the set of enterprise only snapshot restorers
enterpriseRestorers SnapshotRestorers
// stateLock is only used to protect outside callers to State() from
// racing with Restore(), which is called by Raft (it puts in a totally
// new state store). Everything internal here is synchronized by the
// Raft side, so doesn't need to lock this.
stateLock sync.RWMutex
}
// nomadSnapshot is used to provide a snapshot of the current
// state in a way that can be accessed concurrently with operations
// that may modify the live state.
type nomadSnapshot struct {
snap *state.StateSnapshot
timetable *TimeTable
}
// snapshotHeader is the first entry in our snapshot
type snapshotHeader struct {
}
// FSMConfig is used to configure the FSM
type FSMConfig struct {
// EvalBroker is the evaluation broker evaluations should be added to
EvalBroker *EvalBroker
// Periodic is the periodic job dispatcher that periodic jobs should be
// added/removed from
Periodic *PeriodicDispatch
// BlockedEvals is the blocked eval tracker that blocked evaluations should
// be added to.
Blocked *BlockedEvals
// Logger is the logger used by the FSM
Logger log.Logger
// Region is the region of the server embedding the FSM
Region string
}
// NewFSMPath is used to construct a new FSM with a blank state
func NewFSM(config *FSMConfig) (*nomadFSM, error) {
// Create a state store
sconfig := &state.StateStoreConfig{
Logger: config.Logger,
Region: config.Region,
}
state, err := state.NewStateStore(sconfig)
if err != nil {
return nil, err
}
fsm := &nomadFSM{
evalBroker: config.EvalBroker,
periodicDispatcher: config.Periodic,
blockedEvals: config.Blocked,
logger: config.Logger.Named("fsm"),
config: config,
state: state,
timetable: NewTimeTable(timeTableGranularity, timeTableLimit),
enterpriseAppliers: make(map[structs.MessageType]LogApplier, 8),
enterpriseRestorers: make(map[SnapshotType]SnapshotRestorer, 8),
}
// Register all the log applier functions
fsm.registerLogAppliers()
// Register all the snapshot restorer functions
fsm.registerSnapshotRestorers()
return fsm, nil
}
// Close is used to cleanup resources associated with the FSM
func (n *nomadFSM) Close() error {
return nil
}
// State is used to return a handle to the current state
func (n *nomadFSM) State() *state.StateStore {
n.stateLock.RLock()
defer n.stateLock.RUnlock()
return n.state
}
// TimeTable returns the time table of transactions
func (n *nomadFSM) TimeTable() *TimeTable {
return n.timetable
}
func (n *nomadFSM) Apply(log *raft.Log) interface{} {
buf := log.Data
msgType := structs.MessageType(buf[0])
// Witness this write
n.timetable.Witness(log.Index, time.Now().UTC())
// Check if this message type should be ignored when unknown. This is
// used so that new commands can be added with developer control if older
// versions can safely ignore the command, or if they should crash.
ignoreUnknown := false
if msgType&structs.IgnoreUnknownTypeFlag == structs.IgnoreUnknownTypeFlag {
msgType &= ^structs.IgnoreUnknownTypeFlag
ignoreUnknown = true
}
switch msgType {
case structs.NodeRegisterRequestType:
return n.applyUpsertNode(buf[1:], log.Index)
case structs.NodeDeregisterRequestType:
return n.applyDeregisterNode(buf[1:], log.Index)
case structs.NodeUpdateStatusRequestType:
return n.applyStatusUpdate(buf[1:], log.Index)
case structs.NodeUpdateDrainRequestType:
return n.applyDrainUpdate(buf[1:], log.Index)
case structs.JobRegisterRequestType:
return n.applyUpsertJob(buf[1:], log.Index)
case structs.JobDeregisterRequestType:
return n.applyDeregisterJob(buf[1:], log.Index)
case structs.EvalUpdateRequestType:
return n.applyUpdateEval(buf[1:], log.Index)
case structs.EvalDeleteRequestType:
return n.applyDeleteEval(buf[1:], log.Index)
case structs.AllocUpdateRequestType:
return n.applyAllocUpdate(buf[1:], log.Index)
case structs.AllocClientUpdateRequestType:
return n.applyAllocClientUpdate(buf[1:], log.Index)
case structs.ReconcileJobSummariesRequestType:
return n.applyReconcileSummaries(buf[1:], log.Index)
case structs.VaultAccessorRegisterRequestType:
return n.applyUpsertVaultAccessor(buf[1:], log.Index)
case structs.VaultAccessorDeregisterRequestType:
return n.applyDeregisterVaultAccessor(buf[1:], log.Index)
case structs.ApplyPlanResultsRequestType:
return n.applyPlanResults(buf[1:], log.Index)
case structs.DeploymentStatusUpdateRequestType:
return n.applyDeploymentStatusUpdate(buf[1:], log.Index)
case structs.DeploymentPromoteRequestType:
return n.applyDeploymentPromotion(buf[1:], log.Index)
case structs.DeploymentAllocHealthRequestType:
return n.applyDeploymentAllocHealth(buf[1:], log.Index)
case structs.DeploymentDeleteRequestType:
return n.applyDeploymentDelete(buf[1:], log.Index)
case structs.JobStabilityRequestType:
return n.applyJobStability(buf[1:], log.Index)
case structs.ACLPolicyUpsertRequestType:
return n.applyACLPolicyUpsert(buf[1:], log.Index)
case structs.ACLPolicyDeleteRequestType:
return n.applyACLPolicyDelete(buf[1:], log.Index)
case structs.ACLTokenUpsertRequestType:
return n.applyACLTokenUpsert(buf[1:], log.Index)
case structs.ACLTokenDeleteRequestType:
return n.applyACLTokenDelete(buf[1:], log.Index)
case structs.ACLTokenBootstrapRequestType:
return n.applyACLTokenBootstrap(buf[1:], log.Index)
case structs.AutopilotRequestType:
return n.applyAutopilotUpdate(buf[1:], log.Index)
case structs.UpsertNodeEventsType:
return n.applyUpsertNodeEvent(buf[1:], log.Index)
case structs.JobBatchDeregisterRequestType:
return n.applyBatchDeregisterJob(buf[1:], log.Index)
case structs.AllocUpdateDesiredTransitionRequestType:
return n.applyAllocUpdateDesiredTransition(buf[1:], log.Index)
case structs.NodeUpdateEligibilityRequestType:
return n.applyNodeEligibilityUpdate(buf[1:], log.Index)
case structs.BatchNodeUpdateDrainRequestType:
return n.applyBatchDrainUpdate(buf[1:], log.Index)
case structs.SchedulerConfigRequestType:
return n.applySchedulerConfigUpdate(buf[1:], log.Index)
}
// Check enterprise only message types.
if applier, ok := n.enterpriseAppliers[msgType]; ok {
return applier(buf[1:], log.Index)
}
// We didn't match anything, either panic or ignore
if ignoreUnknown {
n.logger.Warn("ignoring unknown message type, upgrade to newer version", "msg_type", msgType)
return nil
}
panic(fmt.Errorf("failed to apply request: %#v", buf))
}
func (n *nomadFSM) applyUpsertNode(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "register_node"}, time.Now())
var req structs.NodeRegisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Handle upgrade paths
req.Node.Canonicalize()
if err := n.state.UpsertNode(index, req.Node); err != nil {
n.logger.Error("UpsertNode failed", "error", err)
return err
}
// Unblock evals for the nodes computed node class if it is in a ready
// state.
if req.Node.Status == structs.NodeStatusReady {
n.blockedEvals.Unblock(req.Node.ComputedClass, index)
}
return nil
}
func (n *nomadFSM) applyDeregisterNode(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "deregister_node"}, time.Now())
var req structs.NodeDeregisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Messages pre 0.9.3 use a single NodeID
var nodeIDs []string
if len(req.NodeIDs) == 0 {
nodeIDs = append(nodeIDs, req.NodeID)
} else {
nodeIDs = req.NodeIDs
}
if err := n.state.DeleteNode(index, nodeIDs); err != nil {
n.logger.Error("DeleteNode failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyStatusUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "node_status_update"}, time.Now())
var req structs.NodeUpdateStatusRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateNodeStatus(index, req.NodeID, req.Status, req.UpdatedAt, req.NodeEvent); err != nil {
n.logger.Error("UpdateNodeStatus failed", "error", err)
return err
}
// Unblock evals for the nodes computed node class if it is in a ready
// state.
if req.Status == structs.NodeStatusReady {
ws := memdb.NewWatchSet()
node, err := n.state.NodeByID(ws, req.NodeID)
if err != nil {
n.logger.Error("looking up node failed", "node_id", req.NodeID, "error", err)
return err
}
n.blockedEvals.Unblock(node.ComputedClass, index)
}
return nil
}
func (n *nomadFSM) applyDrainUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "node_drain_update"}, time.Now())
var req structs.NodeUpdateDrainRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// COMPAT Remove in version 0.10
// As part of Nomad 0.8 we have deprecated the drain boolean in favor of a
// drain strategy but we need to handle the upgrade path where the Raft log
// contains drain updates with just the drain boolean being manipulated.
if req.Drain && req.DrainStrategy == nil {
// Mark the drain strategy as a force to imitate the old style drain
// functionality.
req.DrainStrategy = &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: -1 * time.Second,
},
}
}
if err := n.state.UpdateNodeDrain(index, req.NodeID, req.DrainStrategy, req.MarkEligible, req.UpdatedAt, req.NodeEvent); err != nil {
n.logger.Error("UpdateNodeDrain failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyBatchDrainUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "batch_node_drain_update"}, time.Now())
var req structs.BatchNodeUpdateDrainRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.BatchUpdateNodeDrain(index, req.UpdatedAt, req.Updates, req.NodeEvents); err != nil {
n.logger.Error("BatchUpdateNodeDrain failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyNodeEligibilityUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "node_eligibility_update"}, time.Now())
var req structs.NodeUpdateEligibilityRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Lookup the existing node
node, err := n.state.NodeByID(nil, req.NodeID)
if err != nil {
n.logger.Error("UpdateNodeEligibility failed to lookup node", "node_id", req.NodeID, "error", err)
return err
}
if err := n.state.UpdateNodeEligibility(index, req.NodeID, req.Eligibility, req.UpdatedAt, req.NodeEvent); err != nil {
n.logger.Error("UpdateNodeEligibility failed", "error", err)
return err
}
// Unblock evals for the nodes computed node class if it is in a ready
// state.
if node != nil && node.SchedulingEligibility == structs.NodeSchedulingIneligible &&
req.Eligibility == structs.NodeSchedulingEligible {
n.blockedEvals.Unblock(node.ComputedClass, index)
}
return nil
}
func (n *nomadFSM) applyUpsertJob(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "register_job"}, time.Now())
var req structs.JobRegisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
/* Handle upgrade paths:
* - Empty maps and slices should be treated as nil to avoid
* un-intended destructive updates in scheduler since we use
* reflect.DeepEqual. Starting Nomad 0.4.1, job submission sanitizes
* the incoming job.
* - Migrate from old style upgrade stanza that used only a stagger.
*/
req.Job.Canonicalize()
if err := n.state.UpsertJob(index, req.Job); err != nil {
n.logger.Error("UpsertJob failed", "error", err)
return err
}
// We always add the job to the periodic dispatcher because there is the
// possibility that the periodic spec was removed and then we should stop
// tracking it.
if err := n.periodicDispatcher.Add(req.Job); err != nil {
n.logger.Error("periodicDispatcher.Add failed", "error", err)
return fmt.Errorf("failed adding job to periodic dispatcher: %v", err)
}
// Create a watch set
ws := memdb.NewWatchSet()
// If it is an active periodic job, record the time it was inserted. This is
// necessary for recovering during leader election. It is possible that from
// the time it is added to when it was suppose to launch, leader election
// occurs and the job was not launched. In this case, we use the insertion
// time to determine if a launch was missed.
if req.Job.IsPeriodicActive() {
prevLaunch, err := n.state.PeriodicLaunchByID(ws, req.Namespace, req.Job.ID)
if err != nil {
n.logger.Error("PeriodicLaunchByID failed", "error", err)
return err
}
// Record the insertion time as a launch. We overload the launch table
// such that the first entry is the insertion time.
if prevLaunch == nil {
launch := &structs.PeriodicLaunch{
ID: req.Job.ID,
Namespace: req.Namespace,
Launch: time.Now(),
}
if err := n.state.UpsertPeriodicLaunch(index, launch); err != nil {
n.logger.Error("UpsertPeriodicLaunch failed", "error", err)
return err
}
}
}
// Check if the parent job is periodic and mark the launch time.
parentID := req.Job.ParentID
if parentID != "" {
parent, err := n.state.JobByID(ws, req.Namespace, parentID)
if err != nil {
n.logger.Error("JobByID lookup for parent failed", "parent_id", parentID, "namespace", req.Namespace, "error", err)
return err
} else if parent == nil {
// The parent has been deregistered.
return nil
}
if parent.IsPeriodic() && !parent.IsParameterized() {
t, err := n.periodicDispatcher.LaunchTime(req.Job.ID)
if err != nil {
n.logger.Error("LaunchTime failed", "job", req.Job.NamespacedID(), "error", err)
return err
}
launch := &structs.PeriodicLaunch{
ID: parentID,
Namespace: req.Namespace,
Launch: t,
}
if err := n.state.UpsertPeriodicLaunch(index, launch); err != nil {
n.logger.Error("UpsertPeriodicLaunch failed", "error", err)
return err
}
}
}
return nil
}
func (n *nomadFSM) applyDeregisterJob(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "deregister_job"}, time.Now())
var req structs.JobDeregisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
return n.state.WithWriteTransaction(func(tx state.Txn) error {
if err := n.handleJobDeregister(index, req.JobID, req.Namespace, req.Purge, tx); err != nil {
n.logger.Error("deregistering job failed", "error", err)
return err
}
return nil
})
}
func (n *nomadFSM) applyBatchDeregisterJob(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "batch_deregister_job"}, time.Now())
var req structs.JobBatchDeregisterRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Perform all store updates atomically to ensure a consistent view for store readers.
// A partial update may increment the snapshot index, allowing eval brokers to process
// evals for jobs whose deregistering didn't get committed yet.
err := n.state.WithWriteTransaction(func(tx state.Txn) error {
for jobNS, options := range req.Jobs {
if err := n.handleJobDeregister(index, jobNS.ID, jobNS.Namespace, options.Purge, tx); err != nil {
n.logger.Error("deregistering job failed", "job", jobNS, "error", err)
return err
}
}
if err := n.state.UpsertEvalsTxn(index, req.Evals, tx); err != nil {
n.logger.Error("UpsertEvals failed", "error", err)
return err
}
return nil
})
if err != nil {
return err
}
// perform the side effects outside the transactions
n.handleUpsertedEvals(req.Evals)
return nil
}
// handleJobDeregister is used to deregister a job.
func (n *nomadFSM) handleJobDeregister(index uint64, jobID, namespace string, purge bool, tx state.Txn) error {
// If it is periodic remove it from the dispatcher
if err := n.periodicDispatcher.Remove(namespace, jobID); err != nil {
n.logger.Error("periodicDispatcher.Remove failed", "error", err)
return err
}
if purge {
if err := n.state.DeleteJobTxn(index, namespace, jobID, tx); err != nil {
n.logger.Error("DeleteJob failed", "error", err)
return err
}
// We always delete from the periodic launch table because it is possible that
// the job was updated to be non-periodic, thus checking if it is periodic
// doesn't ensure we clean it up properly.
n.state.DeletePeriodicLaunchTxn(index, namespace, jobID, tx)
} else {
// Get the current job and mark it as stopped and re-insert it.
ws := memdb.NewWatchSet()
current, err := n.state.JobByIDTxn(ws, namespace, jobID, tx)
if err != nil {
n.logger.Error("JobByID lookup failed", "error", err)
return err
}
if current == nil {
return fmt.Errorf("job %q in namespace %q doesn't exist to be deregistered", jobID, namespace)
}
stopped := current.Copy()
stopped.Stop = true
if err := n.state.UpsertJobTxn(index, stopped, tx); err != nil {
n.logger.Error("UpsertJob failed", "error", err)
return err
}
}
return nil
}
func (n *nomadFSM) applyUpdateEval(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "update_eval"}, time.Now())
var req structs.EvalUpdateRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
return n.upsertEvals(index, req.Evals)
}
func (n *nomadFSM) upsertEvals(index uint64, evals []*structs.Evaluation) error {
if err := n.state.UpsertEvals(index, evals); err != nil {
n.logger.Error("UpsertEvals failed", "error", err)
return err
}
n.handleUpsertedEvals(evals)
return nil
}
// handleUpsertingEval is a helper for taking action after upserting
// evaluations.
func (n *nomadFSM) handleUpsertedEvals(evals []*structs.Evaluation) {
for _, eval := range evals {
n.handleUpsertedEval(eval)
}
}
// handleUpsertingEval is a helper for taking action after upserting an eval.
func (n *nomadFSM) handleUpsertedEval(eval *structs.Evaluation) {
if eval == nil {
return
}
if eval.ShouldEnqueue() {
n.evalBroker.Enqueue(eval)
} else if eval.ShouldBlock() {
n.blockedEvals.Block(eval)
} else if eval.Status == structs.EvalStatusComplete &&
len(eval.FailedTGAllocs) == 0 {
// If we have a successful evaluation for a node, untrack any
// blocked evaluation
n.blockedEvals.Untrack(eval.JobID, eval.Namespace)
}
}
func (n *nomadFSM) applyDeleteEval(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "delete_eval"}, time.Now())
var req structs.EvalDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteEval(index, req.Evals, req.Allocs); err != nil {
n.logger.Error("DeleteEval failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyAllocUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "alloc_update"}, time.Now())
var req structs.AllocUpdateRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
// Attach the job to all the allocations. It is pulled out in the
// payload to avoid the redundancy of encoding, but should be denormalized
// prior to being inserted into MemDB.
structs.DenormalizeAllocationJobs(req.Job, req.Alloc)
// COMPAT(0.11): Remove in 0.11
// Calculate the total resources of allocations. It is pulled out in the
// payload to avoid encoding something that can be computed, but should be
// denormalized prior to being inserted into MemDB.
for _, alloc := range req.Alloc {
if alloc.Resources != nil {
continue
}
alloc.Resources = new(structs.Resources)
for _, task := range alloc.TaskResources {
alloc.Resources.Add(task)
}
// Add the shared resources
alloc.Resources.Add(alloc.SharedResources)
}
if err := n.state.UpsertAllocs(index, req.Alloc); err != nil {
n.logger.Error("UpsertAllocs failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyAllocClientUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "alloc_client_update"}, time.Now())
var req structs.AllocUpdateRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if len(req.Alloc) == 0 {
return nil
}
// Create a watch set
ws := memdb.NewWatchSet()
// Updating the allocs with the job id and task group name
for _, alloc := range req.Alloc {
if existing, _ := n.state.AllocByID(ws, alloc.ID); existing != nil {
alloc.JobID = existing.JobID
alloc.TaskGroup = existing.TaskGroup
}
}
// Update all the client allocations
if err := n.state.UpdateAllocsFromClient(index, req.Alloc); err != nil {
n.logger.Error("UpdateAllocFromClient failed", "error", err)
return err
}
// Update any evals
if len(req.Evals) > 0 {
if err := n.upsertEvals(index, req.Evals); err != nil {
n.logger.Error("applyAllocClientUpdate failed to update evaluations", "error", err)
return err
}
}
// Unblock evals for the nodes computed node class if the client has
// finished running an allocation.
for _, alloc := range req.Alloc {
if alloc.ClientStatus == structs.AllocClientStatusComplete ||
alloc.ClientStatus == structs.AllocClientStatusFailed {
nodeID := alloc.NodeID
node, err := n.state.NodeByID(ws, nodeID)
if err != nil || node == nil {
n.logger.Error("looking up node failed", "node_id", nodeID, "error", err)
return err
}
// Unblock any associated quota
quota, err := n.allocQuota(alloc.ID)
if err != nil {
n.logger.Error("looking up quota associated with alloc failed", "alloc_id", alloc.ID, "error", err)
return err
}
n.blockedEvals.UnblockClassAndQuota(node.ComputedClass, quota, index)
}
}
return nil
}
// applyAllocUpdateDesiredTransition is used to update the desired transitions
// of a set of allocations.
func (n *nomadFSM) applyAllocUpdateDesiredTransition(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "alloc_update_desired_transition"}, time.Now())
var req structs.AllocUpdateDesiredTransitionRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateAllocsDesiredTransitions(index, req.Allocs, req.Evals); err != nil {
n.logger.Error("UpdateAllocsDesiredTransitions failed", "error", err)
return err
}
n.handleUpsertedEvals(req.Evals)
return nil
}
// applyReconcileSummaries reconciles summaries for all the jobs
func (n *nomadFSM) applyReconcileSummaries(buf []byte, index uint64) interface{} {
if err := n.state.ReconcileJobSummaries(index); err != nil {
return err
}
return n.reconcileQueuedAllocations(index)
}
// applyUpsertNodeEvent tracks the given node events.
func (n *nomadFSM) applyUpsertNodeEvent(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "upsert_node_events"}, time.Now())
var req structs.EmitNodeEventsRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode EmitNodeEventsRequest: %v", err))
}
if err := n.state.UpsertNodeEvents(index, req.NodeEvents); err != nil {
n.logger.Error("failed to add node events", "error", err)
return err
}
return nil
}
// applyUpsertVaultAccessor stores the Vault accessors for a given allocation
// and task
func (n *nomadFSM) applyUpsertVaultAccessor(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "upsert_vault_accessor"}, time.Now())
var req structs.VaultAccessorsRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpsertVaultAccessor(index, req.Accessors); err != nil {
n.logger.Error("UpsertVaultAccessor failed", "error", err)
return err
}
return nil
}
// applyDeregisterVaultAccessor deregisters a set of Vault accessors
func (n *nomadFSM) applyDeregisterVaultAccessor(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "deregister_vault_accessor"}, time.Now())
var req structs.VaultAccessorsRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteVaultAccessors(index, req.Accessors); err != nil {
n.logger.Error("DeregisterVaultAccessor failed", "error", err)
return err
}
return nil
}
// applyPlanApply applies the results of a plan application
func (n *nomadFSM) applyPlanResults(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_plan_results"}, time.Now())
var req structs.ApplyPlanResultsRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpsertPlanResults(index, &req); err != nil {
n.logger.Error("ApplyPlan failed", "error", err)
return err
}
// Add evals for jobs that were preempted
n.handleUpsertedEvals(req.PreemptionEvals)
return nil
}
// applyDeploymentStatusUpdate is used to update the status of an existing
// deployment
func (n *nomadFSM) applyDeploymentStatusUpdate(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_deployment_status_update"}, time.Now())
var req structs.DeploymentStatusUpdateRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateDeploymentStatus(index, &req); err != nil {
n.logger.Error("UpsertDeploymentStatusUpdate failed", "error", err)
return err
}
n.handleUpsertedEval(req.Eval)
return nil
}
// applyDeploymentPromotion is used to promote canaries in a deployment
func (n *nomadFSM) applyDeploymentPromotion(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_deployment_promotion"}, time.Now())
var req structs.ApplyDeploymentPromoteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateDeploymentPromotion(index, &req); err != nil {
n.logger.Error("UpsertDeploymentPromotion failed", "error", err)
return err
}
n.handleUpsertedEval(req.Eval)
return nil
}
// applyDeploymentAllocHealth is used to set the health of allocations as part
// of a deployment
func (n *nomadFSM) applyDeploymentAllocHealth(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_deployment_alloc_health"}, time.Now())
var req structs.ApplyDeploymentAllocHealthRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateDeploymentAllocHealth(index, &req); err != nil {
n.logger.Error("UpsertDeploymentAllocHealth failed", "error", err)
return err
}
n.handleUpsertedEval(req.Eval)
return nil
}
// applyDeploymentDelete is used to delete a set of deployments
func (n *nomadFSM) applyDeploymentDelete(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_deployment_delete"}, time.Now())
var req structs.DeploymentDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteDeployment(index, req.Deployments); err != nil {
n.logger.Error("DeleteDeployment failed", "error", err)
return err
}
return nil
}
// applyJobStability is used to set the stability of a job
func (n *nomadFSM) applyJobStability(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_job_stability"}, time.Now())
var req structs.JobStabilityRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateJobStability(index, req.Namespace, req.JobID, req.JobVersion, req.Stable); err != nil {
n.logger.Error("UpdateJobStability failed", "error", err)
return err
}
return nil
}
// applyACLPolicyUpsert is used to upsert a set of policies
func (n *nomadFSM) applyACLPolicyUpsert(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_policy_upsert"}, time.Now())
var req structs.ACLPolicyUpsertRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpsertACLPolicies(index, req.Policies); err != nil {
n.logger.Error("UpsertACLPolicies failed", "error", err)
return err
}
return nil
}
// applyACLPolicyDelete is used to delete a set of policies
func (n *nomadFSM) applyACLPolicyDelete(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_policy_delete"}, time.Now())
var req structs.ACLPolicyDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteACLPolicies(index, req.Names); err != nil {
n.logger.Error("DeleteACLPolicies failed", "error", err)
return err
}
return nil
}
// applyACLTokenUpsert is used to upsert a set of policies
func (n *nomadFSM) applyACLTokenUpsert(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_upsert"}, time.Now())
var req structs.ACLTokenUpsertRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpsertACLTokens(index, req.Tokens); err != nil {
n.logger.Error("UpsertACLTokens failed", "error", err)
return err
}
return nil
}
// applyACLTokenDelete is used to delete a set of policies
func (n *nomadFSM) applyACLTokenDelete(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_delete"}, time.Now())
var req structs.ACLTokenDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.DeleteACLTokens(index, req.AccessorIDs); err != nil {
n.logger.Error("DeleteACLTokens failed", "error", err)
return err
}
return nil
}
// applyACLTokenBootstrap is used to bootstrap an ACL token
func (n *nomadFSM) applyACLTokenBootstrap(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_acl_token_bootstrap"}, time.Now())
var req structs.ACLTokenBootstrapRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.BootstrapACLTokens(index, req.ResetIndex, req.Token); err != nil {
n.logger.Error("BootstrapACLToken failed", "error", err)
return err
}
return nil
}
func (n *nomadFSM) applyAutopilotUpdate(buf []byte, index uint64) interface{} {
var req structs.AutopilotSetConfigRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
defer metrics.MeasureSince([]string{"nomad", "fsm", "autopilot"}, time.Now())
if req.CAS {
act, err := n.state.AutopilotCASConfig(index, req.Config.ModifyIndex, &req.Config)
if err != nil {
return err
}
return act
}
return n.state.AutopilotSetConfig(index, &req.Config)
}
func (n *nomadFSM) applySchedulerConfigUpdate(buf []byte, index uint64) interface{} {
var req structs.SchedulerSetConfigRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_scheduler_config"}, time.Now())
if req.CAS {
applied, err := n.state.SchedulerCASConfig(index, req.Config.ModifyIndex, &req.Config)
if err != nil {
return err
}
return applied
}
return n.state.SchedulerSetConfig(index, &req.Config)
}
func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) {
// Create a new snapshot
snap, err := n.state.Snapshot()
if err != nil {
return nil, err
}
ns := &nomadSnapshot{
snap: snap,
timetable: n.timetable,
}
return ns, nil
}
func (n *nomadFSM) Restore(old io.ReadCloser) error {
defer old.Close()
// Create a new state store
config := &state.StateStoreConfig{
Logger: n.config.Logger,
Region: n.config.Region,
}
newState, err := state.NewStateStore(config)
if err != nil {
return err
}
// Start the state restore
restore, err := newState.Restore()
if err != nil {
return err
}
defer restore.Abort()
// Create a decoder
dec := codec.NewDecoder(old, structs.MsgpackHandle)
// Read in the header
var header snapshotHeader
if err := dec.Decode(&header); err != nil {
return err
}
// Populate the new state
msgType := make([]byte, 1)
for {
// Read the message type
_, err := old.Read(msgType)
if err == io.EOF {
break
} else if err != nil {
return err
}
// Decode
snapType := SnapshotType(msgType[0])
switch snapType {
case TimeTableSnapshot:
if err := n.timetable.Deserialize(dec); err != nil {
return fmt.Errorf("time table deserialize failed: %v", err)
}
case NodeSnapshot:
node := new(structs.Node)
if err := dec.Decode(node); err != nil {
return err
}
// Handle upgrade paths
node.Canonicalize()
if err := restore.NodeRestore(node); err != nil {
return err
}
case JobSnapshot:
job := new(structs.Job)
if err := dec.Decode(job); err != nil {
return err
}
/* Handle upgrade paths:
* - Empty maps and slices should be treated as nil to avoid
* un-intended destructive updates in scheduler since we use
* reflect.DeepEqual. Starting Nomad 0.4.1, job submission sanitizes
* the incoming job.
* - Migrate from old style upgrade stanza that used only a stagger.
*/
job.Canonicalize()
if err := restore.JobRestore(job); err != nil {
return err
}
case EvalSnapshot:
eval := new(structs.Evaluation)
if err := dec.Decode(eval); err != nil {
return err
}
if err := restore.EvalRestore(eval); err != nil {
return err
}
case AllocSnapshot:
alloc := new(structs.Allocation)
if err := dec.Decode(alloc); err != nil {
return err
}
if err := restore.AllocRestore(alloc); err != nil {
return err
}
case IndexSnapshot:
idx := new(state.IndexEntry)
if err := dec.Decode(idx); err != nil {
return err
}
if err := restore.IndexRestore(idx); err != nil {
return err
}
case PeriodicLaunchSnapshot:
launch := new(structs.PeriodicLaunch)
if err := dec.Decode(launch); err != nil {
return err
}
if err := restore.PeriodicLaunchRestore(launch); err != nil {
return err
}
case JobSummarySnapshot:
summary := new(structs.JobSummary)
if err := dec.Decode(summary); err != nil {
return err
}
if err := restore.JobSummaryRestore(summary); err != nil {
return err
}
case VaultAccessorSnapshot:
accessor := new(structs.VaultAccessor)
if err := dec.Decode(accessor); err != nil {
return err
}
if err := restore.VaultAccessorRestore(accessor); err != nil {
return err
}
case JobVersionSnapshot:
version := new(structs.Job)
if err := dec.Decode(version); err != nil {
return err
}
if err := restore.JobVersionRestore(version); err != nil {
return err
}
case DeploymentSnapshot:
deployment := new(structs.Deployment)
if err := dec.Decode(deployment); err != nil {
return err
}
if err := restore.DeploymentRestore(deployment); err != nil {
return err
}
case ACLPolicySnapshot:
policy := new(structs.ACLPolicy)
if err := dec.Decode(policy); err != nil {
return err
}
if err := restore.ACLPolicyRestore(policy); err != nil {
return err
}
case ACLTokenSnapshot:
token := new(structs.ACLToken)
if err := dec.Decode(token); err != nil {
return err
}
if err := restore.ACLTokenRestore(token); err != nil {
return err
}
case SchedulerConfigSnapshot:
schedConfig := new(structs.SchedulerConfiguration)
if err := dec.Decode(schedConfig); err != nil {
return err
}
if err := restore.SchedulerConfigRestore(schedConfig); err != nil {
return err
}
default:
// Check if this is an enterprise only object being restored
restorer, ok := n.enterpriseRestorers[snapType]
if !ok {
return fmt.Errorf("Unrecognized snapshot type: %v", msgType)
}
// Restore the enterprise only object
if err := restorer(restore, dec); err != nil {
return err
}
}
}
restore.Commit()
// COMPAT Remove in 0.10
// Clean up active deployments that do not have a job
if err := n.failLeakedDeployments(newState); err != nil {
return err
}
// External code might be calling State(), so we need to synchronize
// here to make sure we swap in the new state store atomically.
n.stateLock.Lock()
stateOld := n.state
n.state = newState
n.stateLock.Unlock()
// Signal that the old state store has been abandoned. This is required
// because we don't operate on it any more, we just throw it away, so
// blocking queries won't see any changes and need to be woken up.
stateOld.Abandon()
return nil
}
// failLeakedDeployments is used to fail deployments that do not have a job.
// This state is a broken invariant that should not occur since 0.8.X.
func (n *nomadFSM) failLeakedDeployments(state *state.StateStore) error {
// Scan for deployments that are referencing a job that no longer exists.
// This could happen if multiple deployments were created for a given job
// and thus the older deployment leaks and then the job is removed.
iter, err := state.Deployments(nil)
if err != nil {
return fmt.Errorf("failed to query deployments: %v", err)
}
dindex, err := state.Index("deployment")
if err != nil {
return fmt.Errorf("couldn't fetch index of deployments table: %v", err)
}
for {
raw := iter.Next()
if raw == nil {
break
}
d := raw.(*structs.Deployment)
// We are only looking for active deployments where the job no longer
// exists
if !d.Active() {
continue
}
// Find the job
job, err := state.JobByID(nil, d.Namespace, d.JobID)
if err != nil {
return fmt.Errorf("failed to lookup job %s from deployment %q: %v", d.JobID, d.ID, err)
}
// Job exists.
if job != nil {
continue
}
// Update the deployment to be terminal
failed := d.Copy()
failed.Status = structs.DeploymentStatusCancelled
failed.StatusDescription = structs.DeploymentStatusDescriptionStoppedJob
if err := state.UpsertDeployment(dindex, failed); err != nil {
return fmt.Errorf("failed to mark leaked deployment %q as failed: %v", failed.ID, err)
}
}
return nil
}
// reconcileQueuedAllocations re-calculates the queued allocations for every job that we
// created a Job Summary during the snap shot restore
func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error {
// Get all the jobs
ws := memdb.NewWatchSet()
iter, err := n.state.Jobs(ws)
if err != nil {
return err
}
snap, err := n.state.Snapshot()
if err != nil {
return fmt.Errorf("unable to create snapshot: %v", err)
}
// Invoking the scheduler for every job so that we can populate the number
// of queued allocations for every job
for {
rawJob := iter.Next()
if rawJob == nil {
break
}
job := rawJob.(*structs.Job)
// Nothing to do for queued allocations if the job is a parent periodic/parameterized job
if job.IsParameterized() || job.IsPeriodic() {
continue
}
planner := &scheduler.Harness{
State: &snap.StateStore,
}
// Create an eval and mark it as requiring annotations and insert that as well
eval := &structs.Evaluation{
ID: uuid.Generate(),
Namespace: job.Namespace,
Priority: job.Priority,
Type: job.Type,
TriggeredBy: structs.EvalTriggerJobRegister,
JobID: job.ID,
JobModifyIndex: job.JobModifyIndex + 1,
Status: structs.EvalStatusPending,
AnnotatePlan: true,
}
snap.UpsertEvals(100, []*structs.Evaluation{eval})
// Create the scheduler and run it
sched, err := scheduler.NewScheduler(eval.Type, n.logger, snap, planner)
if err != nil {
return err
}
if err := sched.Process(eval); err != nil {
return err
}
// Get the job summary from the fsm state store
originalSummary, err := n.state.JobSummaryByID(ws, job.Namespace, job.ID)
if err != nil {
return err
}
summary := originalSummary.Copy()
// Add the allocations scheduler has made to queued since these
// allocations are never getting placed until the scheduler is invoked
// with a real planner
if l := len(planner.Plans); l != 1 {
return fmt.Errorf("unexpected number of plans during restore %d. Please file an issue including the logs", l)
}
for _, allocations := range planner.Plans[0].NodeAllocation {
for _, allocation := range allocations {
tgSummary, ok := summary.Summary[allocation.TaskGroup]
if !ok {
return fmt.Errorf("task group %q not found while updating queued count", allocation.TaskGroup)
}
tgSummary.Queued += 1
summary.Summary[allocation.TaskGroup] = tgSummary
}
}
// Add the queued allocations attached to the evaluation to the queued
// counter of the job summary
if l := len(planner.Evals); l != 1 {
return fmt.Errorf("unexpected number of evals during restore %d. Please file an issue including the logs", l)
}
for tg, queued := range planner.Evals[0].QueuedAllocations {
tgSummary, ok := summary.Summary[tg]
if !ok {
return fmt.Errorf("task group %q not found while updating queued count", tg)
}
// We add instead of setting here because we want to take into
// consideration what the scheduler with a mock planner thinks it
// placed. Those should be counted as queued as well
tgSummary.Queued += queued
summary.Summary[tg] = tgSummary
}
if !reflect.DeepEqual(summary, originalSummary) {
summary.ModifyIndex = index
if err := n.state.UpsertJobSummary(index, summary); err != nil {
return err
}
}
}
return nil
}
func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error {
defer metrics.MeasureSince([]string{"nomad", "fsm", "persist"}, time.Now())
// Register the nodes
encoder := codec.NewEncoder(sink, structs.MsgpackHandle)
// Write the header
header := snapshotHeader{}
if err := encoder.Encode(&header); err != nil {
sink.Cancel()
return err
}
// Write the time table
sink.Write([]byte{byte(TimeTableSnapshot)})
if err := s.timetable.Serialize(encoder); err != nil {
sink.Cancel()
return err
}
// Write all the data out
if err := s.persistIndexes(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistNodes(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistJobs(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistEvals(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistAllocs(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistPeriodicLaunches(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistJobSummaries(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistVaultAccessors(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistJobVersions(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistDeployments(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistACLPolicies(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistACLTokens(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistEnterpriseTables(sink, encoder); err != nil {
sink.Cancel()
return err
}
if err := s.persistSchedulerConfig(sink, encoder); err != nil {
sink.Cancel()
return err
}
return nil
}
func (s *nomadSnapshot) persistIndexes(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the indexes
iter, err := s.snap.Indexes()
if err != nil {
return err
}
for {
// Get the next item
raw := iter.Next()
if raw == nil {
break
}
// Prepare the request struct
idx := raw.(*state.IndexEntry)
// Write out a node registration
sink.Write([]byte{byte(IndexSnapshot)})
if err := encoder.Encode(idx); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistNodes(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the nodes
ws := memdb.NewWatchSet()
nodes, err := s.snap.Nodes(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := nodes.Next()
if raw == nil {
break
}
// Prepare the request struct
node := raw.(*structs.Node)
// Write out a node registration
sink.Write([]byte{byte(NodeSnapshot)})
if err := encoder.Encode(node); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistJobs(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the jobs
ws := memdb.NewWatchSet()
jobs, err := s.snap.Jobs(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := jobs.Next()
if raw == nil {
break
}
// Prepare the request struct
job := raw.(*structs.Job)
// Write out a job registration
sink.Write([]byte{byte(JobSnapshot)})
if err := encoder.Encode(job); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistEvals(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the evaluations
ws := memdb.NewWatchSet()
evals, err := s.snap.Evals(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := evals.Next()
if raw == nil {
break
}
// Prepare the request struct
eval := raw.(*structs.Evaluation)
// Write out the evaluation
sink.Write([]byte{byte(EvalSnapshot)})
if err := encoder.Encode(eval); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistAllocs(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the allocations
ws := memdb.NewWatchSet()
allocs, err := s.snap.Allocs(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := allocs.Next()
if raw == nil {
break
}
// Prepare the request struct
alloc := raw.(*structs.Allocation)
// Write out the evaluation
sink.Write([]byte{byte(AllocSnapshot)})
if err := encoder.Encode(alloc); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistPeriodicLaunches(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the jobs
ws := memdb.NewWatchSet()
launches, err := s.snap.PeriodicLaunches(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := launches.Next()
if raw == nil {
break
}
// Prepare the request struct
launch := raw.(*structs.PeriodicLaunch)
// Write out a job registration
sink.Write([]byte{byte(PeriodicLaunchSnapshot)})
if err := encoder.Encode(launch); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistJobSummaries(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
ws := memdb.NewWatchSet()
summaries, err := s.snap.JobSummaries(ws)
if err != nil {
return err
}
for {
raw := summaries.Next()
if raw == nil {
break
}
jobSummary := raw.(*structs.JobSummary)
sink.Write([]byte{byte(JobSummarySnapshot)})
if err := encoder.Encode(jobSummary); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistVaultAccessors(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
ws := memdb.NewWatchSet()
accessors, err := s.snap.VaultAccessors(ws)
if err != nil {
return err
}
for {
raw := accessors.Next()
if raw == nil {
break
}
accessor := raw.(*structs.VaultAccessor)
sink.Write([]byte{byte(VaultAccessorSnapshot)})
if err := encoder.Encode(accessor); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistJobVersions(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the jobs
ws := memdb.NewWatchSet()
versions, err := s.snap.JobVersions(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := versions.Next()
if raw == nil {
break
}
// Prepare the request struct
job := raw.(*structs.Job)
// Write out a job registration
sink.Write([]byte{byte(JobVersionSnapshot)})
if err := encoder.Encode(job); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistDeployments(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the jobs
ws := memdb.NewWatchSet()
deployments, err := s.snap.Deployments(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := deployments.Next()
if raw == nil {
break
}
// Prepare the request struct
deployment := raw.(*structs.Deployment)
// Write out a job registration
sink.Write([]byte{byte(DeploymentSnapshot)})
if err := encoder.Encode(deployment); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistACLPolicies(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the policies
ws := memdb.NewWatchSet()
policies, err := s.snap.ACLPolicies(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := policies.Next()
if raw == nil {
break
}
// Prepare the request struct
policy := raw.(*structs.ACLPolicy)
// Write out a policy registration
sink.Write([]byte{byte(ACLPolicySnapshot)})
if err := encoder.Encode(policy); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistACLTokens(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get all the policies
ws := memdb.NewWatchSet()
tokens, err := s.snap.ACLTokens(ws)
if err != nil {
return err
}
for {
// Get the next item
raw := tokens.Next()
if raw == nil {
break
}
// Prepare the request struct
token := raw.(*structs.ACLToken)
// Write out a token registration
sink.Write([]byte{byte(ACLTokenSnapshot)})
if err := encoder.Encode(token); err != nil {
return err
}
}
return nil
}
func (s *nomadSnapshot) persistSchedulerConfig(sink raft.SnapshotSink,
encoder *codec.Encoder) error {
// Get scheduler config
_, schedConfig, err := s.snap.SchedulerConfig()
if err != nil {
return err
}
// Write out scheduler config
sink.Write([]byte{byte(SchedulerConfigSnapshot)})
if err := encoder.Encode(schedConfig); err != nil {
return err
}
return nil
}
// Release is a no-op, as we just need to GC the pointer
// to the state store snapshot. There is nothing to explicitly
// cleanup.
func (s *nomadSnapshot) Release() {}
|
// Package eperf offers an easy solution to analyse the basic performance outline
// of program parts.
package eperf
import (
"log"
"time"
)
type Perftest struct {
name string
cycles int
round int
min time.Duration
last time.Time
}
// New creates a new Perftest with a given name and the number of times
// (cycles > 0) the program part will be executed.
func New(name string, cycles int) *Perftest {
return &Perftest{name: name, cycles: cycles, round: 0}
}
// Run runs the given Perftest. Use within a for loop.
// The fastest execution time of all cycles will be printed.
// perftest := eperf.New("code test", 1000) // Create a new Perftest
// for perftest.Run() { // Use Run() in a loop
// code() // code() will be executed 1000 times.
// }
// // The fastest execution time will be logged. Example output:
// // 2014/07/06 17:26:59 eperf: code test runtime: 133.806us
func (p *Perftest) Run() bool {
if p.round > p.cycles {
if p.round > 1 {
log.Println("eperf:", p.name, "runtime:", p.min)
return false
} else {
log.Println("eperf:", p.name, "no cycles!")
return false
}
}
run := time.Since(p.last)
if run < p.min || p.round == 0 {
p.min = run
}
p.round++
p.last = time.Now()
return true
}
removed unnecessary else
// Package eperf offers an easy solution to analyse the basic performance outline
// of program parts.
package eperf
import (
"log"
"time"
)
type Perftest struct {
name string
cycles int
round int
min time.Duration
last time.Time
}
// New creates a new Perftest with a given name and the number of times
// (cycles > 0) the program part will be executed.
func New(name string, cycles int) *Perftest {
return &Perftest{name: name, cycles: cycles, round: 0}
}
// Run runs the given Perftest. Use within a for loop.
// The fastest execution time of all cycles will be printed.
// perftest := eperf.New("code test", 1000) // Create a new Perftest
// for perftest.Run() { // Use Run() in a loop
// code() // code() will be executed 1000 times.
// }
// // The fastest execution time will be logged. Example output:
// // 2014/07/06 17:26:59 eperf: code test runtime: 133.806us
func (p *Perftest) Run() bool {
if p.round > p.cycles {
if p.round > 1 {
log.Println("eperf:", p.name, "runtime:", p.min)
return false
}
log.Println("eperf:", p.name, "no cycles!")
return false
}
run := time.Since(p.last)
if run < p.min || p.round == 0 {
p.min = run
}
p.round++
p.last = time.Now()
return true
}
|
// Copyright (c) 2012-2013 Matt Nunogawa @amattn
// This source code is release under the MIT License, http://opensource.org/licenses/MIT
package deeperror
import (
"fmt"
"log"
"net/http"
"runtime"
"strconv"
"strings"
)
var globalErrorLoggingEnabled bool
func init() {
globalErrorLoggingEnabled = false
}
const (
globalDefaultStatusCode = http.StatusInternalServerError
)
//
type DeepError struct {
Num int64
Filename string
CallingMethod string
Line int
EndUserMsg string
DebugMsg string
DebugFields map[string]interface{}
Err error // inner or source error
StatusCode int
StackTrace string
}
// Primary Constructor. Create a DeepError ptr with the given number, end user message and optional parent error.
func New(num int64, endUserMsg string, parentErr error) *DeepError {
e := new(DeepError)
e.Num = num
e.EndUserMsg = endUserMsg
e.Err = parentErr
e.StatusCode = globalDefaultStatusCode
e.DebugFields = make(map[string]interface{})
gerr, ok := parentErr.(*DeepError)
if ok {
if gerr != nil {
e.StatusCode = gerr.StatusCode
}
}
pc, file, line, ok := runtime.Caller(1)
if ok {
e.Line = line
components := strings.Split(file, "/")
e.Filename = components[(len(components) - 1)]
f := runtime.FuncForPC(pc)
e.CallingMethod = f.Name()
}
const size = 1 << 12
buf := make([]byte, size)
n := runtime.Stack(buf, false)
e.StackTrace = string(buf[:n])
if globalErrorLoggingEnabled {
log.Print(e)
}
return e
}
// HTTP variant. Create a DeepError with the given http status code
func NewHTTPError(num int64, endUserMsg string, err error, statusCode int) *DeepError {
derr := New(num, endUserMsg, err)
derr.StatusCode = statusCode
if len(endUserMsg) == 0 {
derr.EndUserMsg = http.StatusText(statusCode)
}
return derr
}
// Convenience method. creates a simple DeepError with the given error number. The error message is set to "TODO"
func NewTODOError(num int64, printArgs ...interface{}) *DeepError {
derr := New(num, "TODO", nil)
for i, printArg := range printArgs {
derr.AddDebugField(strconv.Itoa(i), printArg)
}
return derr
}
// Convenience method. This will return nil if parrentErr == nil. Otherwise it will create a DeepError and return that.
func NewOrNilFromParent(num int64, endUserMsg string, parentErr error) error {
if parentErr == nil {
return nil
}
return New(num, endUserMsg, parentErr)
}
// Convenience method. Equivalient to derr:=New(...); log.Fatal(derr)
func Fatal(num int64, endUserMsg string, parentErr error) {
derr := New(num, endUserMsg, parentErr)
log.Fatal(derr)
}
// Add arbitrary debugging data to a given DeepError
func (derr *DeepError) AddDebugField(key string, value interface{}) {
derr.DebugFields[key] = value
}
// internal usage for formatting/pretty printing
func prependToLines(para, prefix string) string {
lines := strings.Split(para, "\n")
for i, line := range lines {
lines[i] = prefix + line
}
return strings.Join(lines, "\n")
}
//
func (derr *DeepError) StatusCodeIsDefaultValue() bool {
if derr.StatusCode == globalDefaultStatusCode {
return true
} else {
return false
}
}
// Conform to the go built-in error interface
// http://golang.org/pkg/builtin/#error
func (derr *DeepError) Error() string {
parentError := "nil"
// fmt.Println("THISERR", e.Num, "PARENT ERR", e.Err)
if derr.Err != nil {
parentError = prependToLines(derr.Err.Error(), "-- ")
}
debugFieldStrings := make([]string, 0, len(derr.DebugFields))
for k, v := range derr.DebugFields {
str := fmt.Sprintf("\n-- DebugField[%s]: %+v", k, v)
debugFieldStrings = append(debugFieldStrings, str)
}
dbgMsg := ""
if len(derr.DebugMsg) > 0 {
dbgMsg = "\n-- DebugMsg: " + derr.DebugMsg
}
return fmt.Sprintln(
"\n\n-- DeepError",
derr.Num,
derr.StatusCode,
derr.Filename,
derr.CallingMethod,
"line:", derr.Line,
"\n-- EndUserMsg: ", derr.EndUserMsg,
dbgMsg,
strings.Join(debugFieldStrings, ""),
"\n-- StackTrace:",
strings.TrimLeft(prependToLines(derr.StackTrace, "-- "), " "),
"\n-- ParentError:", parentError,
)
}
// enable/disable automatic logging of deeperrors upon creation
func ErrorLoggingEnabled() bool {
return globalErrorLoggingEnabled
}
// anything performed in this anonymous function will not trigger automatic logging of deeperrors upon creation
type NoErrorsLoggingAction func()
// you can use this method to temporarily disable automatic logging of deeperrors
func ExecWithoutErrorLogging(action NoErrorsLoggingAction) {
// this is racy... I feel ashamed.
original := globalErrorLoggingEnabled
globalErrorLoggingEnabled = false
action()
globalErrorLoggingEnabled = original
}
support for the Unwrap convention introduced in go 1.13
// Copyright (c) 2012-2013 Matt Nunogawa @amattn
// This source code is release under the MIT License, http://opensource.org/licenses/MIT
package deeperror
import (
"fmt"
"log"
"net/http"
"runtime"
"strconv"
"strings"
)
var globalErrorLoggingEnabled bool
func init() {
globalErrorLoggingEnabled = false
}
const (
globalDefaultStatusCode = http.StatusInternalServerError
)
//
type DeepError struct {
Num int64
Filename string
CallingMethod string
Line int
EndUserMsg string
DebugMsg string
DebugFields map[string]interface{}
Err error // inner or source error
StatusCode int
StackTrace string
}
// Primary Constructor. Create a DeepError ptr with the given number, end user message and optional parent error.
func New(num int64, endUserMsg string, parentErr error) *DeepError {
e := new(DeepError)
e.Num = num
e.EndUserMsg = endUserMsg
e.Err = parentErr
e.StatusCode = globalDefaultStatusCode
e.DebugFields = make(map[string]interface{})
gerr, ok := parentErr.(*DeepError)
if ok {
if gerr != nil {
e.StatusCode = gerr.StatusCode
}
}
pc, file, line, ok := runtime.Caller(1)
if ok {
e.Line = line
components := strings.Split(file, "/")
e.Filename = components[(len(components) - 1)]
f := runtime.FuncForPC(pc)
e.CallingMethod = f.Name()
}
const size = 1 << 12
buf := make([]byte, size)
n := runtime.Stack(buf, false)
e.StackTrace = string(buf[:n])
if globalErrorLoggingEnabled {
log.Print(e)
}
return e
}
// HTTP variant. Create a DeepError with the given http status code
func NewHTTPError(num int64, endUserMsg string, err error, statusCode int) *DeepError {
derr := New(num, endUserMsg, err)
derr.StatusCode = statusCode
if len(endUserMsg) == 0 {
derr.EndUserMsg = http.StatusText(statusCode)
}
return derr
}
// Convenience method. creates a simple DeepError with the given error number. The error message is set to "TODO"
func NewTODOError(num int64, printArgs ...interface{}) *DeepError {
derr := New(num, "TODO", nil)
for i, printArg := range printArgs {
derr.AddDebugField(strconv.Itoa(i), printArg)
}
return derr
}
// Convenience method. This will return nil if parrentErr == nil. Otherwise it will create a DeepError and return that.
func NewOrNilFromParent(num int64, endUserMsg string, parentErr error) error {
if parentErr == nil {
return nil
}
return New(num, endUserMsg, parentErr)
}
// Convenience method. Equivalient to derr:=New(...); log.Fatal(derr)
func Fatal(num int64, endUserMsg string, parentErr error) {
derr := New(num, endUserMsg, parentErr)
log.Fatal(derr)
}
// Add arbitrary debugging data to a given DeepError
func (derr *DeepError) AddDebugField(key string, value interface{}) {
derr.DebugFields[key] = value
}
// cConform to the new Unwrap interface.
// Unwrap() will expose errors further down the error chain
// This should allow support for Is() and As() in Go 1.13 and later
// Alternatively, earlier version of Go can
// import "golang.org/x/xerrors" to get library support
// for Is(), As(), and Unwrap()
// see https://blog.golang.org/go1.13-errors for details
func (derr *DeepError) Unwrap() error {
return derr.Err
}
// internal usage for formatting/pretty printing
func prependToLines(para, prefix string) string {
lines := strings.Split(para, "\n")
for i, line := range lines {
lines[i] = prefix + line
}
return strings.Join(lines, "\n")
}
// Check if the current status code matches the global default
func (derr *DeepError) StatusCodeIsDefaultValue() bool {
if derr.StatusCode == globalDefaultStatusCode {
return true
} else {
return false
}
}
// Conform to the go built-in error interface
// http://golang.org/pkg/builtin/#error
func (derr *DeepError) Error() string {
parentError := "nil"
if derr.Err != nil {
parentError = prependToLines(derr.Err.Error(), "-- ")
}
debugFieldStrings := make([]string, 0, len(derr.DebugFields))
for k, v := range derr.DebugFields {
str := fmt.Sprintf("\n-- DebugField[%s]: %+v", k, v)
debugFieldStrings = append(debugFieldStrings, str)
}
dbgMsg := ""
if len(derr.DebugMsg) > 0 {
dbgMsg = "\n-- DebugMsg: " + derr.DebugMsg
}
return fmt.Sprintln(
"\n\n-- DeepError",
derr.Num,
derr.StatusCode,
derr.Filename,
derr.CallingMethod,
"line:", derr.Line,
"\n-- EndUserMsg: ", derr.EndUserMsg,
dbgMsg,
strings.Join(debugFieldStrings, ""),
"\n-- StackTrace:",
strings.TrimLeft(prependToLines(derr.StackTrace, "-- "), " "),
"\n-- ParentError:", parentError,
)
}
// enable/disable automatic logging of deeperrors upon creation
func ErrorLoggingEnabled() bool {
return globalErrorLoggingEnabled
}
// anything performed in this anonymous function will not trigger automatic logging of deeperrors upon creation
type NoErrorsLoggingAction func()
// you can use this method to temporarily disable automatic logging of deeperrors
func ExecWithoutErrorLogging(action NoErrorsLoggingAction) {
// this is racy... I feel ashamed.
original := globalErrorLoggingEnabled
globalErrorLoggingEnabled = false
action()
globalErrorLoggingEnabled = original
}
|
package dropbox
import (
"net/http"
)
// error tag constant values
const (
TooManyRequests = "too_many_requests"
)
// Error response.
type Error struct {
Status string
StatusCode int
Header http.Header
Summary string `json:"error_summary"`
Message string `json:"user_message"` // optionally present
Err interface{} `json:"error"`
}
// Error string.
func (e *Error) Error() string {
return e.Summary
}
// Tag returns the inner tag for the error
func (e *Error) Tag() (tag, value string) {
payload, ok := e.Err.(map[string]interface{})
if !ok {
val, ok := e.Err.(string)
if ok {
return "", val
}
return
}
tag, ok = payload[".tag"].(string)
if !ok {
return
}
data, ok := payload[tag].(map[string]interface{})
if !ok {
return
}
value, ok = data[".tag"].(string)
if !ok {
return
}
return
}
add fallback error formatting to handle 500s
package dropbox
import (
"fmt"
"net/http"
)
// error tag constant values
const (
TooManyRequests = "too_many_requests"
)
// Error response.
type Error struct {
Status string
StatusCode int
Header http.Header
Summary string `json:"error_summary"`
Message string `json:"user_message"` // optionally present
Err interface{} `json:"error"`
}
// Error string.
func (e *Error) Error() string {
if e.Summary != "" {
return e.Summary
}
return fmt.Sprintf("%d: %s", e.StatusCode, e.Status)
}
// Tag returns the inner tag for the error
func (e *Error) Tag() (tag, value string) {
payload, ok := e.Err.(map[string]interface{})
if !ok {
val, ok := e.Err.(string)
if ok {
return "", val
}
return
}
tag, ok = payload[".tag"].(string)
if !ok {
return
}
data, ok := payload[tag].(map[string]interface{})
if !ok {
return
}
value, ok = data[".tag"].(string)
if !ok {
return
}
return
}
|
package redis
import (
"fmt"
"io"
"net"
"strings"
)
// Redis nil reply, .e.g. when key does not exist.
var Nil = errorf("redis: nil")
// Redis transaction failed.
var TxFailedErr = errorf("redis: transaction failed")
type redisError struct {
s string
}
func errorf(s string, args ...interface{}) redisError {
return redisError{s: fmt.Sprintf(s, args...)}
}
func (err redisError) Error() string {
return err.s
}
func isInternalError(err error) bool {
_, ok := err.(redisError)
return ok
}
func isNetworkError(err error) bool {
if err == io.EOF {
return true
}
_, ok := err.(net.Error)
return ok
}
func isBadConn(err error, allowTimeout bool) bool {
if err == nil {
return false
}
if isInternalError(err) {
return false
}
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
return false
}
}
return true
}
func isMovedError(err error) (moved bool, ask bool, addr string) {
if _, ok := err.(redisError); !ok {
return
}
s := err.Error()
if strings.HasPrefix(s, "MOVED ") {
moved = true
} else if strings.HasPrefix(s, "ASK ") {
ask = true
} else {
return
}
ind := strings.LastIndexByte(s, ' ')
if ind == -1 {
return false, false, ""
}
addr = s[ind+1:]
return
}
// shouldRetry reports whether failed command should be retried.
func shouldRetry(err error) bool {
return isNetworkError(err)
}
go 1.4 compatible
package redis
import (
"fmt"
"io"
"net"
"strings"
)
// Redis nil reply, .e.g. when key does not exist.
var Nil = errorf("redis: nil")
// Redis transaction failed.
var TxFailedErr = errorf("redis: transaction failed")
type redisError struct {
s string
}
func errorf(s string, args ...interface{}) redisError {
return redisError{s: fmt.Sprintf(s, args...)}
}
func (err redisError) Error() string {
return err.s
}
func isInternalError(err error) bool {
_, ok := err.(redisError)
return ok
}
func isNetworkError(err error) bool {
if err == io.EOF {
return true
}
_, ok := err.(net.Error)
return ok
}
func isBadConn(err error, allowTimeout bool) bool {
if err == nil {
return false
}
if isInternalError(err) {
return false
}
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
return false
}
}
return true
}
func isMovedError(err error) (moved bool, ask bool, addr string) {
if _, ok := err.(redisError); !ok {
return
}
s := err.Error()
if strings.HasPrefix(s, "MOVED ") {
moved = true
} else if strings.HasPrefix(s, "ASK ") {
ask = true
} else {
return
}
ind := LastIndexByte(s, ' ')
if ind == -1 {
return false, false, ""
}
addr = s[ind+1:]
return
}
func LastIndexByte(s string, c byte) int {
for i := len(s) - 1; i >= 0; i-- {
if s[i] == c {
return i
}
}
return -1
}
// shouldRetry reports whether failed command should be retried.
func shouldRetry(err error) bool {
return isNetworkError(err)
}
|
package txbuilder
import (
"bytes"
"fmt"
"time"
"golang.org/x/net/context"
"chain/cos/bc"
"chain/cos/hdkey"
"chain/cos/txscript"
"chain/errors"
)
// ErrBadBuildRequest is returned from Build when the
// arguments are invalid.
var ErrBadBuildRequest = errors.New("bad build request")
// Build builds or adds on to a transaction.
// Initially, inputs are left unconsumed, and destinations unsatisfied.
// Build partners then satisfy and consume inputs and destinations.
// The final party must ensure that the transaction is
// balanced before calling finalize.
func Build(ctx context.Context, prev *Template, sources []*Source, dests []*Destination, metadata []byte, ttl time.Duration) (*Template, error) {
if ttl < time.Minute {
ttl = time.Minute
}
tpl, err := build(ctx, sources, dests, metadata, ttl)
if err != nil {
return nil, err
}
if prev != nil {
tpl, err = combine(prev, tpl)
if err != nil {
return nil, err
}
}
ComputeSigHashes(ctx, tpl)
return tpl, nil
}
func build(ctx context.Context, sources []*Source, dests []*Destination, metadata []byte, ttl time.Duration) (*Template, error) {
tx := &bc.TxData{
Version: bc.CurrentTransactionVersion,
Metadata: metadata,
}
var inputs []*Input
for _, source := range sources {
reserveResult, err := source.Reserve(ctx, ttl)
if err != nil {
return nil, errors.Wrap(err, "reserve")
}
for _, item := range reserveResult.Items {
// Empty signature arrays should be serialized as empty arrays, not null.
if item.TemplateInput.Sigs == nil {
item.TemplateInput.Sigs = []*Signature{}
}
if item.TemplateInput.SigComponents == nil {
item.TemplateInput.SigComponents = []*SigScriptComponent{}
}
tx.Inputs = append(tx.Inputs, item.TxInput)
inputs = append(inputs, item.TemplateInput)
}
dests = append(dests, reserveResult.Change...)
}
for _, dest := range dests {
output := &bc.TxOutput{
AssetAmount: bc.AssetAmount{AssetID: dest.AssetID, Amount: dest.Amount},
Script: dest.PKScript(),
Metadata: dest.Metadata,
}
tx.Outputs = append(tx.Outputs, output)
}
appTx := &Template{
Unsigned: tx,
BlockChain: "sandbox",
Inputs: inputs,
}
return appTx, nil
}
func combine(txs ...*Template) (*Template, error) {
if len(txs) == 0 {
return nil, errors.New("must pass at least one tx")
}
completeWire := &bc.TxData{Version: bc.CurrentTransactionVersion}
complete := &Template{BlockChain: txs[0].BlockChain, Unsigned: completeWire}
for _, tx := range txs {
if tx.BlockChain != complete.BlockChain {
return nil, errors.New("all txs must be the same BlockChain")
}
if len(tx.Unsigned.Metadata) != 0 && len(complete.Unsigned.Metadata) != 0 &&
!bytes.Equal(tx.Unsigned.Metadata, complete.Unsigned.Metadata) {
return nil, errors.WithDetail(ErrBadBuildRequest, "transaction metadata does not match previous template's metadata")
}
complete.Unsigned.Metadata = tx.Unsigned.Metadata
complete.Inputs = append(complete.Inputs, tx.Inputs...)
for _, txin := range tx.Unsigned.Inputs {
completeWire.Inputs = append(completeWire.Inputs, txin)
}
for _, txout := range tx.Unsigned.Outputs {
completeWire.Outputs = append(completeWire.Outputs, txout)
}
}
return complete, nil
}
// ComputeSigHashes populates signature data for every input and sigscript
// component.
func ComputeSigHashes(ctx context.Context, tpl *Template) {
hashCache := &bc.SigHashCache{}
for i, in := range tpl.Inputs {
aa := in.AssetAmount
in.SignatureData = tpl.Unsigned.HashForSigCached(i, aa, bc.SigHashAll, hashCache)
for _, c := range in.SigComponents {
c.SignatureData = in.SignatureData
}
}
}
// AssembleSignatures takes a filled in Template
// and adds the signatures to the template's unsigned transaction,
// creating a fully-signed transaction.
func AssembleSignatures(txTemplate *Template) (*bc.Tx, error) {
msg := txTemplate.Unsigned
for i, input := range txTemplate.Inputs {
components := input.SigComponents
// For backwards compatability, convert old input.Sigs to a signature
// sigscript component.
// TODO(jackson): Remove once all the SDKs are using the new format.
if len(input.Sigs) > 0 || len(input.SigComponents) == 0 {
sigsReqd, err := getSigsRequired(input.SigScriptSuffix)
if err != nil {
return nil, err
}
// Replace the existing components. Only SDKs that don't understand
// signature components will populate input.Sigs.
components = []*SigScriptComponent{
{
Type: "signature",
Required: sigsReqd,
SignatureData: input.SignatureData,
Signatures: input.Sigs,
},
{
Type: "script",
Script: input.SigScriptSuffix,
},
}
}
sb := txscript.NewScriptBuilder()
for _, c := range components {
switch c.Type {
case "script":
sb.ConcatRawScript(c.Script)
case "data":
sb.AddData(c.Data)
case "signature":
if len(c.Signatures) == 0 {
break
}
sb.AddOp(txscript.OP_FALSE)
added := 0
for _, sig := range c.Signatures {
if len(sig.DER) == 0 {
continue
}
sb.AddData(sig.DER)
added++
if added == c.Required {
break
}
}
default:
return nil, fmt.Errorf("unknown sigscript component `%s`", c.Type)
}
}
script, err := sb.Script()
if err != nil {
return nil, errors.Wrap(err)
}
msg.Inputs[i].SignatureScript = script
}
return bc.NewTx(*msg), nil
}
// InputSigs takes a set of keys
// and creates a matching set of Input Signatures
// for a Template
func InputSigs(keys []*hdkey.Key) (sigs []*Signature) {
sigs = []*Signature{}
for _, k := range keys {
sigs = append(sigs, &Signature{
XPub: k.Root.String(),
DerivationPath: k.Path,
})
}
return sigs
}
func getSigsRequired(script []byte) (sigsReqd int, err error) {
sigsReqd = 1
if txscript.IsMultiSig(script) {
_, sigsReqd, err = txscript.CalcMultiSigStats(script)
if err != nil {
return 0, err
}
}
return sigsReqd, nil
}
core/txbuilder: combine should merge metadata only if present
Closes chain/chainprv#958.
Reviewers: da39a3ee5e6b4b0d3255bfef95601890afd80709@kr
package txbuilder
import (
"bytes"
"fmt"
"time"
"golang.org/x/net/context"
"chain/cos/bc"
"chain/cos/hdkey"
"chain/cos/txscript"
"chain/errors"
)
// ErrBadBuildRequest is returned from Build when the
// arguments are invalid.
var ErrBadBuildRequest = errors.New("bad build request")
// Build builds or adds on to a transaction.
// Initially, inputs are left unconsumed, and destinations unsatisfied.
// Build partners then satisfy and consume inputs and destinations.
// The final party must ensure that the transaction is
// balanced before calling finalize.
func Build(ctx context.Context, prev *Template, sources []*Source, dests []*Destination, metadata []byte, ttl time.Duration) (*Template, error) {
if ttl < time.Minute {
ttl = time.Minute
}
tpl, err := build(ctx, sources, dests, metadata, ttl)
if err != nil {
return nil, err
}
if prev != nil {
tpl, err = combine(prev, tpl)
if err != nil {
return nil, err
}
}
ComputeSigHashes(ctx, tpl)
return tpl, nil
}
func build(ctx context.Context, sources []*Source, dests []*Destination, metadata []byte, ttl time.Duration) (*Template, error) {
tx := &bc.TxData{
Version: bc.CurrentTransactionVersion,
Metadata: metadata,
}
var inputs []*Input
for _, source := range sources {
reserveResult, err := source.Reserve(ctx, ttl)
if err != nil {
return nil, errors.Wrap(err, "reserve")
}
for _, item := range reserveResult.Items {
// Empty signature arrays should be serialized as empty arrays, not null.
if item.TemplateInput.Sigs == nil {
item.TemplateInput.Sigs = []*Signature{}
}
if item.TemplateInput.SigComponents == nil {
item.TemplateInput.SigComponents = []*SigScriptComponent{}
}
tx.Inputs = append(tx.Inputs, item.TxInput)
inputs = append(inputs, item.TemplateInput)
}
dests = append(dests, reserveResult.Change...)
}
for _, dest := range dests {
output := &bc.TxOutput{
AssetAmount: bc.AssetAmount{AssetID: dest.AssetID, Amount: dest.Amount},
Script: dest.PKScript(),
Metadata: dest.Metadata,
}
tx.Outputs = append(tx.Outputs, output)
}
appTx := &Template{
Unsigned: tx,
BlockChain: "sandbox",
Inputs: inputs,
}
return appTx, nil
}
func combine(txs ...*Template) (*Template, error) {
if len(txs) == 0 {
return nil, errors.New("must pass at least one tx")
}
completeWire := &bc.TxData{Version: bc.CurrentTransactionVersion}
complete := &Template{BlockChain: txs[0].BlockChain, Unsigned: completeWire}
for _, tx := range txs {
if tx.BlockChain != complete.BlockChain {
return nil, errors.New("all txs must be the same BlockChain")
}
if len(tx.Unsigned.Metadata) != 0 {
if len(complete.Unsigned.Metadata) != 0 &&
!bytes.Equal(tx.Unsigned.Metadata, complete.Unsigned.Metadata) {
return nil, errors.WithDetail(ErrBadBuildRequest, "transaction metadata does not match previous template's metadata")
}
complete.Unsigned.Metadata = tx.Unsigned.Metadata
}
complete.Inputs = append(complete.Inputs, tx.Inputs...)
for _, txin := range tx.Unsigned.Inputs {
completeWire.Inputs = append(completeWire.Inputs, txin)
}
for _, txout := range tx.Unsigned.Outputs {
completeWire.Outputs = append(completeWire.Outputs, txout)
}
}
return complete, nil
}
// ComputeSigHashes populates signature data for every input and sigscript
// component.
func ComputeSigHashes(ctx context.Context, tpl *Template) {
hashCache := &bc.SigHashCache{}
for i, in := range tpl.Inputs {
aa := in.AssetAmount
in.SignatureData = tpl.Unsigned.HashForSigCached(i, aa, bc.SigHashAll, hashCache)
for _, c := range in.SigComponents {
c.SignatureData = in.SignatureData
}
}
}
// AssembleSignatures takes a filled in Template
// and adds the signatures to the template's unsigned transaction,
// creating a fully-signed transaction.
func AssembleSignatures(txTemplate *Template) (*bc.Tx, error) {
msg := txTemplate.Unsigned
for i, input := range txTemplate.Inputs {
components := input.SigComponents
// For backwards compatability, convert old input.Sigs to a signature
// sigscript component.
// TODO(jackson): Remove once all the SDKs are using the new format.
if len(input.Sigs) > 0 || len(input.SigComponents) == 0 {
sigsReqd, err := getSigsRequired(input.SigScriptSuffix)
if err != nil {
return nil, err
}
// Replace the existing components. Only SDKs that don't understand
// signature components will populate input.Sigs.
components = []*SigScriptComponent{
{
Type: "signature",
Required: sigsReqd,
SignatureData: input.SignatureData,
Signatures: input.Sigs,
},
{
Type: "script",
Script: input.SigScriptSuffix,
},
}
}
sb := txscript.NewScriptBuilder()
for _, c := range components {
switch c.Type {
case "script":
sb.ConcatRawScript(c.Script)
case "data":
sb.AddData(c.Data)
case "signature":
if len(c.Signatures) == 0 {
break
}
sb.AddOp(txscript.OP_FALSE)
added := 0
for _, sig := range c.Signatures {
if len(sig.DER) == 0 {
continue
}
sb.AddData(sig.DER)
added++
if added == c.Required {
break
}
}
default:
return nil, fmt.Errorf("unknown sigscript component `%s`", c.Type)
}
}
script, err := sb.Script()
if err != nil {
return nil, errors.Wrap(err)
}
msg.Inputs[i].SignatureScript = script
}
return bc.NewTx(*msg), nil
}
// InputSigs takes a set of keys
// and creates a matching set of Input Signatures
// for a Template
func InputSigs(keys []*hdkey.Key) (sigs []*Signature) {
sigs = []*Signature{}
for _, k := range keys {
sigs = append(sigs, &Signature{
XPub: k.Root.String(),
DerivationPath: k.Path,
})
}
return sigs
}
func getSigsRequired(script []byte) (sigsReqd int, err error) {
sigsReqd = 1
if txscript.IsMultiSig(script) {
_, sigsReqd, err = txscript.CalcMultiSigStats(script)
if err != nil {
return 0, err
}
}
return sigsReqd, nil
}
|
package repeater
import (
m "github.com/advantageous/metricsd/metric"
lg "github.com/advantageous/metricsd/logger"
"github.com/advantageous/metricsd/util"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"time"
)
type AwsCloudMetricRepeater struct {
logger lg.Logger
conn *cloudwatch.CloudWatch
config *m.Config
}
func (cw AwsCloudMetricRepeater)ProcessMetrics(metrics []m.Metric) error {
timestamp := aws.Time(time.Now())
aDatum := func(name string) *cloudwatch.MetricDatum {
return &cloudwatch.MetricDatum{
MetricName: aws.String(name),
Timestamp: timestamp,
}
}
data := []*cloudwatch.MetricDatum{}
var err error
for index, d := range metrics {
if cw.config.Debug {
cw.logger.Printf("%s %d %d", d.GetName(), d.GetType(), d.GetValue())
}
switch(d.GetType()) {
case m.COUNT:
value := float64(d.GetValue())
datum := aDatum(d.GetName())
datum.Unit = aws.String(cloudwatch.StandardUnitCount)
datum.Value = aws.Float64(float64(value))
data = append(data, datum)
case m.LEVEL:
value := float64(d.GetValue())
datum := aDatum(d.GetName())
datum.Unit = aws.String(cloudwatch.StandardUnitKilobytes)
datum.Value = aws.Float64(float64(value))
data = append(data, datum)
case m.TIMING:
value := float64(d.GetValue())
datum := aDatum(d.GetName())
datum.Unit = aws.String(cloudwatch.StandardUnitMilliseconds)
datum.Value = aws.Float64(float64(value))
data = append(data, datum)
}
if index % 20 == 0 && index != 0{
data = []*cloudwatch.MetricDatum{}
if len (data) > 0 {
request := &cloudwatch.PutMetricDataInput{
Namespace: aws.String(cw.config.MetricPrefix),
MetricData: data,
}
_, err = cw.conn.PutMetricData(request)
if err != nil {
cw.logger.PrintError("Error writing metrics", err)
cw.logger.Error("Error writing metrics", err, index)
} else {
if cw.config.Debug {
cw.logger.Info("SENT..........................")
}
}
}
}
}
if len (data) > 0 {
request := &cloudwatch.PutMetricDataInput{
Namespace: aws.String(cw.config.MetricPrefix),
MetricData: data,
}
_, err = cw.conn.PutMetricData(request)
}
return err
}
func NewAwsCloudMetricRepeater(config *m.Config) AwsCloudMetricRepeater {
session := util.NewAWSSession(config)
logger := lg.NewSimpleLogger("log-repeater")
return AwsCloudMetricRepeater{logger, cloudwatch.New(session), config}
}
more tweaks
package repeater
import (
m "github.com/advantageous/metricsd/metric"
lg "github.com/advantageous/metricsd/logger"
"github.com/advantageous/metricsd/util"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"time"
"strings"
)
type AwsCloudMetricRepeater struct {
logger lg.Logger
conn *cloudwatch.CloudWatch
config *m.Config
}
func (cw AwsCloudMetricRepeater)ProcessMetrics(metrics []m.Metric) error {
timestamp := aws.Time(time.Now())
aDatum := func(name string) *cloudwatch.MetricDatum {
return &cloudwatch.MetricDatum{
MetricName: aws.String(name),
Timestamp: timestamp,
}
}
data := []*cloudwatch.MetricDatum{}
var err error
for index, d := range metrics {
if cw.config.Debug {
cw.logger.Printf("%s %d %d", d.GetName(), d.GetType(), d.GetValue())
}
switch(d.GetType()) {
case m.COUNT:
value := float64(d.GetValue())
datum := aDatum(d.GetName())
if strings.HasSuffix(d.GetName(), "Per") {
datum.Unit = aws.String(cloudwatch.StandardUnitCount)
} else {
datum.Unit = aws.String(cloudwatch.StandardUnitPercent)
}
datum.Value = aws.Float64(float64(value))
data = append(data, datum)
case m.LEVEL:
value := float64(d.GetValue())
datum := aDatum(d.GetName())
if strings.HasSuffix(d.GetName(), "Per") {
datum.Unit = aws.String(cloudwatch.StandardUnitKilobytes)
} else {
datum.Unit = aws.String(cloudwatch.StandardUnitPercent)
}
datum.Value = aws.Float64(float64(value))
data = append(data, datum)
case m.TIMING:
value := float64(d.GetValue())
datum := aDatum(d.GetName())
datum.Unit = aws.String(cloudwatch.StandardUnitMilliseconds)
datum.Value = aws.Float64(float64(value))
data = append(data, datum)
}
if index % 20 == 0 && index != 0{
data = []*cloudwatch.MetricDatum{}
if len (data) > 0 {
request := &cloudwatch.PutMetricDataInput{
Namespace: aws.String(cw.config.MetricPrefix),
MetricData: data,
}
_, err = cw.conn.PutMetricData(request)
if err != nil {
cw.logger.PrintError("Error writing metrics", err)
cw.logger.Error("Error writing metrics", err, index)
} else {
if cw.config.Debug {
cw.logger.Info("SENT..........................")
}
}
}
}
}
if len (data) > 0 {
request := &cloudwatch.PutMetricDataInput{
Namespace: aws.String(cw.config.MetricPrefix),
MetricData: data,
}
_, err = cw.conn.PutMetricData(request)
}
return err
}
func NewAwsCloudMetricRepeater(config *m.Config) AwsCloudMetricRepeater {
session := util.NewAWSSession(config)
logger := lg.NewSimpleLogger("log-repeater")
return AwsCloudMetricRepeater{logger, cloudwatch.New(session), config}
} |
package repeatgenome
/*
WARNING!!! This program is currently under development and may be buggy or broken.
A barebones (at the moment) Go script for parsing and minimizing RepeatMasker output files alongside FASTA reference genomes.
This script expects there to be a subdirectory of the current directory named after the reference genome used (e.g. "dm3") that contains the following files:
* a RepeatMasker library containing:
- the match library (e.g. "dm3.fa.out")
- the alignment information (e.g. "dm3.fa.align")
* one or more reference genome files in FASTA format with the suffix ".fa"
Premature commenting is the root of all evil, and I have sinned. Please read comments skeptically - they will eventually be audited.
KmerInt.Minimize() logic could be changed now that minimizers are 32 bits
Should a Seq's first field be a *byte to discard the extra two fields?
Should probably make a file solely for type defs.
Reads are currently kept in TextSeq form until the bitter end because, with Go's referenced based slices, there's no compelling reason not to, and because they're easier (and probably faster) to manipulate than Seqs. This may change at some point, though.
If a minimizer is associated with a single repeat type, can we use that heuristically?
Error handling should be updated with a custom ParseError type - panics should be removed, excepting performance-cricial sequence manipulation functions
Should consider splitting at hyphenated class names like TcMar-Tc1
For portability's sake, the flags should be used as args to Generate() rather than globals.
The concurrent read-kmer generator could be reintroduced using a select statement.
Should probably restrict activity of chans with directionals
It would make sense to discard kmers associated with ClassNodes greater than a certain size.
Kmer counting should be re-added eventually - it's currently excluded for performance reasons because we aren't using it.
We should test a version that doesn't cache minimizers, as that seems to be a needless bottleneck. It could also be conditional on the number of CPUs available.
Minimizers are currently not written to file in any order. This is for memory efficiency, and can be changed in the future.
I should probably change some variable names, like repeatGenome, to less verbose variants, and use more aliases.
Ranges should be changed to use actual values instead of indexes.
Slice sizes should be specified in the make() call when the size is known.
seqToInt and revCompToInt need minor cleanup and a potential name-change.
All sequences containing Ns are currently ignored.
We should consider taking end minimizers once the code base is more mature.
We should also review how to deal with m <= len(match) < k.
For caching efficiency, we should change the minimizer data structure to a map-indexed 1D slice of Kmers (not *Kmers). (This technique originated in Kraken.)
Int sizes should be reviewed for memory efficiency.
The sole command line argument is the name of the reference genome (e.g. "dm3").
*/
import (
"bytes"
"fmt"
"io/ioutil"
"log"
//"mapset"
"os"
"runtime"
"runtime/pprof"
"sort"
"strconv"
"strings"
"sync"
"unsafe"
)
type Flags struct {
Debug bool
CPUProfile bool
MemProfile bool
Minimize bool
WriteKraken bool
WriteJSON bool
}
// Match.SW_Score - Smith-Waterman score, describing the likeness to the repeat reference sequence
// Match.PercDiv - "% substitutions in matching region compared to the consensus" - RepeatMasker docs
// Match.PercDel - "% of bases opposite a gap in the query sequence (deleted bp)" - RepeatMasker docs
// Match.PercIns - "% of bases opposite a gap in the repeat consensus (inserted bp)" - RepeatMasker docs
// Match.SeqName - the reference genome file this match came from (typically the chromosome)
// Match.SeqStart - the starting index (inclusive) in the reference genome
// Match.SeqEnd - the ending index (exclusive) in the reference genome
// Match.SeqRemains - the number of bases past the end of the match in the relevant reference seqence
// Match.IsRevComp - the match may be for the complement of the reference sequence
// Match.RepeatClass - the repeat's full ancestry, including its repeat class and repeat name (which are listed separately in the RepeatMasker output file)
// Match.RepeatStart- the starting index in the repeat consensus sequence
// Match.RepeatEnd - the ending sequence (exclusive) in the consensus repeat sequence
// Match.RepeatRemains - the number of bases past the end of the match in the consensus repeat sequence
// Match.InsertionID - a numerical ID for the repeat type (starts at 1)
//
// below are not in parsed data file
// Match.RepeatName - simply repeatClass concatenated - used for printing and map indexing
// Match.ClassNode - pointer to corresponding ClassNode in RepeatGenome.ClassTree
// Match.Repeat - pointer to corresponding Repeat struct in RepeatGenome.Repeats
type Match struct {
SW_Score int32
PercDiv float64
PercDel float64
PercIns float64
SeqName string
SeqStart uint64
SeqEnd uint64
SeqRemains uint64
IsRevComp bool
RepeatClass []string
// in weird cases, RepeatStart can be negative, so they must be signed
RepeatStart int64
RepeatEnd int64
RepeatRemains int64
InsertionID uint64
// these are generated, not parsed
RepeatName string
ClassNode *ClassNode
Repeat *Repeat
}
type RepeatGenome struct {
Name string
Flags Flags
// maps a chromosome name to a map of its sequences
// as discussed above, though, matches only contain 1D sequence indexes
chroms map[string](map[string]TextSeq)
K uint8
M uint8
Kmers Kmers
// stores the offset of each minimizer's first kmer in RepeatGenome.Kmers - indexed by the minimizer's index in SortedMins
OffsetsToMin []uint64
// stores the number of kmers that each minimizer is associated with
SortedMins MinInts
Matches Matches
ClassTree ClassTree
Repeats Repeats
RepeatMap map[string]*Repeat
memProfFile *os.File
}
type ClassTree struct {
// maps all class names to a pointer to their node struct
// we must use pointers because of this foible in golang: https://code.google.com/p/go/issues/detail?id=3117
// if we didn't use pointers, we'd have to completely reassign the struct when adding parents, children, etc.
ClassNodes map[string](*ClassNode)
NodesByID []*ClassNode
// a pointer to the the class tree's root, used for recursive descent etc.
// we explicitly create the root (because RepeatMatcher doesn't)
Root *ClassNode
}
type MuxKmers struct {
sync.Mutex
Kmers Kmers
}
type MuxMinToKmers struct {
sync.RWMutex
m map[MinInt]*MuxKmers
}
// Used to differentiate sequence representations with one base per byte (type TextSeq) from those with four bases per byte (type Seq).
type TextSeq []byte
// Used to clarify context of integers, and to differentiate full Kmers (which include an LCA ID) from integer-represented kmer sequences.
type KmerInt uint64
type KmerInts []KmerInt
type MinInt uint32
type MinInts []MinInt
// can store a kmer where k <= 32
// the value of k is not stored in the struct, but rather in the RepeatGenome, for memory efficiency
// first eight bits are the int representation of the sequence
// the last two are the LCA ID
type Kmer [10]byte
// as with the Kmer type, each base is represented by two bits
// any excess bits are the first bits of the first byte (seq is right-justified)
// remember that len(Seq.Bases) is not the actual number of bases, but rather the number of bytes necessary to represent them
type Seq struct {
Bases []byte
Len uint64
}
type Seqs []Seq
type repeatLoc struct {
SeqName string
StartInd uint64
EndInd uint64
}
type Repeat struct {
// assigned in simple incremented order starting from 1
// they are therefore not compatible across genomes
// we give root ID = 0
ID uint64
// a list containing the repeat's ancestry path, from top down
// root is implicit, and is therefore excluded from the list
ClassList []string
ClassNode *ClassNode
Name string
Instances []*Match
Locations []repeatLoc
}
type ClassNode struct {
Name string
ID uint16
Class []string
Parent *ClassNode
Children []*ClassNode
IsRoot bool
Repeat *Repeat
}
// type synonyms, necessary to implement interfaces (e.g. sort) and methods
type Kmers []Kmer
type PKmers []*Kmer
type MinMap map[MinInt]Kmers
type Repeats []Repeat
type Matches []Match
//type Chroms map[string](map[string]TextSeq)
type MinPair struct {
KmerInt Kmer
Minimizer MinInt
}
type ThreadResponse struct {
KmerInt KmerInt
MinInt MinInt
Relative *ClassNode
}
type MinCache map[KmerInt]MinInt
func parseMatches(genomeName string) (error, Matches) {
// "my_genome_name" -> "my_genome_name/my_genome_name.fa.out"
filepath := strings.Join([]string{genomeName, "/", genomeName, ".fa.out"}, "")
err, matchLines := fileLines(filepath)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
// drop header
matchLines = matchLines[3:]
var matches Matches
var sw_Score int64
for _, matchLine := range matchLines {
rawVals := strings.Fields(string(matchLine))
if len(rawVals) != 15 {
return ParseError{"repeatgenome.parseMatches()",
filepath,
fmt.Errorf("supplied match line is not 15 fields long (has %d fields and length %d):\n", len(rawVals), len(matchLine))},
nil
}
var match Match
match.IsRevComp = rawVals[8] == "C"
// remove enclosing parentheses
// !!! in the future, checkes to ensure that the parentheses exist should be added
// !!! it would also be sensible to check that rawVals[8] is either "C" or "+"
rawVals[7] = rawVals[7][1 : len(rawVals[7])-1]
if match.IsRevComp {
rawVals[11] = rawVals[11][1 : len(rawVals[11])-1]
} else {
rawVals[13] = rawVals[13][1 : len(rawVals[13])-1]
}
// everything in this block is just vanilla trimming, converting, and error checking
sw_Score, err = strconv.ParseInt(rawVals[0], 10, 32)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.SW_Score = int32(sw_Score)
match.PercDiv, err = strconv.ParseFloat(rawVals[1], 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.PercDel, err = strconv.ParseFloat(rawVals[2], 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.PercIns, err = strconv.ParseFloat(rawVals[3], 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.SeqName = strings.TrimSpace(rawVals[4])
match.SeqStart, err = strconv.ParseUint(rawVals[5], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.SeqEnd, err = strconv.ParseUint(rawVals[6], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.SeqRemains, err = strconv.ParseUint(rawVals[7], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
// match.IsComplement, rawVals[8], moved above
match.RepeatClass = append(strings.Split(strings.TrimSpace(rawVals[10]), "/"), strings.TrimSpace(rawVals[9]))
match.RepeatStart, err = strconv.ParseInt(rawVals[11], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.RepeatEnd, err = strconv.ParseInt(rawVals[12], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.RepeatRemains, err = strconv.ParseInt(rawVals[13], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.InsertionID, err = strconv.ParseUint(rawVals[14], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
// necessary swaps to convert reverse complement repeat indexes to positive-strand indexes
if match.IsRevComp {
match.RepeatStart = match.RepeatRemains
match.RepeatEnd = match.RepeatStart
match.RepeatRemains = match.RepeatRemains + (match.RepeatEnd - match.RepeatStart)
}
// decrement match.SeqStart and match.RepeatStart so that they work from a start index of 0 rather than 1
// that way, we can use them without modification in slices
match.SeqStart--
match.RepeatStart--
// "Other" and "Unknown" classes are heirarchically meaningless and really just mean "root", so we remove them
if match.RepeatClass[0] == "Other" || match.RepeatClass[0] == "Unknown" {
match.RepeatClass = match.RepeatClass[1:]
}
match.RepeatName = strings.Join(match.RepeatClass, "/")
matches = append(matches, match)
}
return nil, matches
}
func parseGenome(genomeName string) (error, map[string](map[string]TextSeq)) {
chromFileInfos, err := ioutil.ReadDir(genomeName)
if err != nil {
return IOError{"repeatgenome.parseGenome()", err}, nil
}
warned := false
chroms := make(map[string](map[string]TextSeq))
// used below to store the two keys for RepeatGenome.chroms
for i := range chromFileInfos {
// "my_genome_name", "my_chrom_name" -> "my_genome_name/my_chrom_name"
chromFilename := chromFileInfos[i].Name()
chromFilepath := strings.Join([]string{genomeName, chromFilename}, "/")
// process the ref genome files (*.fa), not the repeat ref files (*.fa.out and *.fa.align) or anything else
if strings.HasSuffix(chromFilepath, ".fa") {
err, seqLines := fileLines(chromFilepath)
if err != nil {
// error should already be IOError
return err, nil
}
// maps each sequence name in this chrom to a slice of its sequence's lines
// the list is concatenated at the end for efficiency's sake
seqMap := make(map[string][][]byte)
numLines := uint64(len(seqLines))
var seqName string = "" // forward initialization necessary
var i uint64
for i = 0; i < numLines; i++ {
seqLine := bytes.TrimSpace(seqLines[i])
if seqLine[0] == byte('>') {
seqName = string(bytes.TrimSpace(seqLine[1:]))
if !warned && seqName != chromFilename[:len(chromFilename)-3] {
fmt.Println("WARNING: reference genome is two-dimensional, containing sequences not named after their chromosome.")
fmt.Println("Because RepeatMasker supplied only one-dimensional indexing, this may cause unexpected behavior or program failure.")
fmt.Println("seqName:", seqName, "\tlen(seqName):", len(seqName))
fmt.Println("chrom name:", chromFilename[:len(chromFilename)-3], "\tlen(chrom name):", len(chromFilename)-3)
warned = true
}
} else {
if seqName == "" {
return ParseError{"repeatgenome.parseGenome", chromFilepath, fmt.Errorf("Empty or missing sequence name")}, nil
}
seqMap[seqName] = append(seqMap[seqName], seqLine)
}
}
// finally, we insert this map into the full map
chromName := chromFilepath[len(genomeName)+1 : len(chromFilepath)-3]
chroms[chromName] = make(map[string]TextSeq)
for seqName, seqLines := range seqMap {
chroms[chromName][seqName] = TextSeq(bytes.ToLower(bytes.Join(seqLines, []byte{})))
}
}
}
return nil, chroms
}
func Generate(genomeName string, k, m uint8, rgFlags Flags) (error, *RepeatGenome) {
var err error
// we popoulate the RepeatGenome mostly with helper functions
// we should consider whether it makes more sense for them to alter the object directly, than to return their results
rg := new(RepeatGenome)
rg.Name = genomeName
rg.Flags = rgFlags
if rg.Flags.MemProfile {
os.Mkdir("profiles", os.ModeDir)
rg.memProfFile, err = os.Create("profiles/" + rg.Name + ".memprof")
if err != nil {
return IOError{"RepeatGenome.getKrakenSlice()", err}, nil
}
pprof.WriteHeapProfile(rg.memProfFile)
defer rg.memProfFile.Close()
}
err, rg.chroms = parseGenome(genomeName)
if err != nil {
return err, nil
}
err, rg.Matches = parseMatches(genomeName)
if err != nil {
return err, nil
}
rg.getRepeats()
rg.getClassTree()
rg.K = k
rg.M = m
if rg.Flags.Minimize {
// calling the parallel minimizer and writing the result
rg.getKrakenSlice()
}
if rg.Flags.WriteJSON {
rg.WriteClassJSON(false, false)
}
if rg.Flags.Debug {
rg.RunDebugTests()
}
return nil, rg
}
func (rg *RepeatGenome) RunDebugTests() {
fmt.Println()
for k, v := range rg.chroms {
for k_, v_ := range v {
fmt.Printf("chrom: %s\tseq: %s\t%s...%s\n", k, k_, v_[:20], v_[len(v_)-20:])
}
}
fmt.Println()
fmt.Println("number of chromosomes parsed:", len(rg.chroms))
fmt.Println()
fmt.Println("total number of bases in genome:", rg.Size())
rg.ClassTree.PrintBranches()
fmt.Println()
fmt.Println("number of ClassNodes:", len(rg.ClassTree.ClassNodes))
fmt.Println()
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['DNA/TcMar-Mariner'], rg.ClassTree.ClassNodes['DNA/TcMar-Tc1']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["DNA/TcMar-Mariner"], rg.ClassTree.ClassNodes["DNA/TcMar-Tc1"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['ARTEFACT'], rg.ClassTree.ClassNodes['DNA/TcMar-Tc1']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["ARTEFACT"], rg.ClassTree.ClassNodes["DNA/TcMar-Tc1"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['LINE/LOA'], rg.ClassTree.ClassNodes['root']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["LINE/LOA"], rg.ClassTree.ClassNodes["root"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['Simple_repeat/(T)n'], rg.ClassTree.ClassNodes['Simple_repeat/(T)n']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["Simple_repeat/(T)n"], rg.ClassTree.ClassNodes["Simple_repeat/(T)n"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['LTR/Gypsy/MICROPIA_I-int'], rg.ClassTree.ClassNodes['LTR/Gypsy']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["LTR/Gypsy/MICROPIA_I-int"], rg.ClassTree.ClassNodes["LTR/Gypsy"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['LTR/Gypsy'], rg.ClassTree.ClassNodes['LTR/Gypsy/MICROPIA_I-int']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["LTR/Gypsy"], rg.ClassTree.ClassNodes["LTR/Gypsy/MICROPIA_I-int"]).Name)
fmt.Println()
fmt.Println("min(5, 7):", min(5, 7))
fmt.Println("max64(int64(5), int64(7)):", max64(int64(5), int64(7)))
fmt.Println()
testSeq := TextSeq("atgtttgtgtttttcataaagacgaaagatg")
thisMin := testSeq.kmerInt().Minimize(uint8(len(testSeq)), 15)
fmt.Println("getMinimizer('tgctcctgtcatgcatacgcaggtcatgcat', 15): ")
thisMin.print(15)
fmt.Println()
fmt.Printf("Kmer struct size: %d\n", unsafe.Sizeof(Kmer{}))
}
func (rg *RepeatGenome) getRepeats() {
// we now populate a list of unique repeat types
// repeats are stored in the below slice, indexed by their ID
// we first determine the necessary size of the slice - we can't use append because matches are not sorted by repeatID
rg.RepeatMap = make(map[string]*Repeat)
// DON'T use the second field of the range - this causes the Match struct to be copied
// creating an alias struct (match := rg.Matches[i]) of type Match rather than *Match causes the repeat.Instance item to point to a copy, not the original Match struct
for i := range rg.Matches {
match := &rg.Matches[i]
// don't bother overwriting
if repeat, exists := rg.RepeatMap[match.RepeatName]; exists {
repeat.Instances = append(repeat.Instances, match)
match.Repeat = repeat
} else {
var repeat Repeat
repeat.ID = uint64(len(rg.Repeats))
repeat.ClassList = match.RepeatClass
repeat.Name = match.RepeatName
repeat.Instances = []*Match{match}
repeat.Locations = append(repeat.Locations, repeatLoc{match.SeqName, match.SeqStart, match.SeqEnd})
rg.Repeats = append(rg.Repeats, repeat)
rg.RepeatMap[repeat.Name] = &repeat
match.Repeat = &repeat
}
}
}
func (rg *RepeatGenome) getClassTree() {
// mapping to pointers allows us to make references (i.e. pointers) to values
tree := &rg.ClassTree
tree.ClassNodes = make(map[string](*ClassNode))
// would be prettier if expanded
tree.Root = &ClassNode{"root", 0, []string{"root"}, nil, nil, true, nil}
tree.ClassNodes["root"] = tree.Root
tree.NodesByID = append(tree.NodesByID, tree.Root)
for _, repeat := range rg.Repeats {
// process every heirarchy level (e.g. for "DNA/LINE/TiGGER", process "DNA", then "DNA/LINE", then "DNA/LINE/TiGGER")
for j := 1; j <= len(repeat.ClassList); j++ {
thisClass := repeat.ClassList[:j]
thisClassName := strings.Join(thisClass, "/")
_, keyExists := tree.ClassNodes[thisClassName]
if !keyExists {
if len(tree.NodesByID) > 65534 {
panic("RepeatGenome.getClassTree(): more than 65,536 class nodes - ID is overflowed")
}
classNode := new(ClassNode)
classNode.Name = thisClassName
classNode.ID = uint16(len(tree.NodesByID))
classNode.Class = thisClass
classNode.IsRoot = false
if repeat, exists := rg.RepeatMap[thisClassName]; exists {
classNode.Repeat = repeat
}
tree.ClassNodes[thisClassName] = classNode
tree.NodesByID = append(tree.NodesByID, classNode)
// first case handles primary classes, as root is implicit and not listed in thisClass
if j == 1 {
classNode.Parent = tree.Root
} else {
classNode.Parent = tree.ClassNodes[strings.Join(thisClass[:len(thisClass)-1], "/")]
}
if classNode.Parent.Children == nil {
classNode.Parent.Children = make([]*ClassNode, 0)
}
classNode.Parent.Children = append(classNode.Parent.Children, tree.ClassNodes[thisClassName])
}
}
}
// MUST NOT USE RANGE - the struct will be copied!
for i := 0; i < len(rg.Repeats); i++ {
repeat := &rg.Repeats[i]
repeat.ClassNode = tree.ClassNodes[repeat.Name]
if repeat.ClassNode == nil {
fmt.Println(repeat.Name)
log.Fatal("getClassTree(): nil Repeat.ClassNode")
}
}
// MUST NOT USE RANGE - the struct will be copied!
for i := 0; i < len(rg.Matches); i++ {
match := &rg.Matches[i]
match.ClassNode = tree.ClassNodes[match.RepeatName]
if match.ClassNode == nil {
fmt.Println(match.RepeatName)
log.Fatal("getClassTree(): nil Match.ClassNode")
}
}
}
// returns ancestry list, beginning at (and including) self, excluding root
func (cn *ClassNode) getAncestry() []*ClassNode {
ancestry := []*ClassNode{}
walker := cn
for !walker.IsRoot {
ancestry = append(ancestry, walker)
walker = walker.Parent
}
return ancestry
}
func (classTree *ClassTree) getLCA(cnA, cnB *ClassNode) *ClassNode {
for cnBWalker := cnB; cnBWalker != classTree.Root; cnBWalker = cnBWalker.Parent {
for cnAWalker := cnA; cnAWalker != classTree.Root; cnAWalker = cnAWalker.Parent {
if cnBWalker == cnAWalker {
return cnBWalker
}
}
}
return classTree.Root
}
// some of the logic in here is deeply nested or non-obvious for efficiency's sake
// specifically, we made sure not to make any heap allocations, which means reverse complements can never be explicitly evaluated
func (rg *RepeatGenome) minimizeThread(matchStart, matchEnd uint64, c chan ThreadResponse) {
k := rg.K
k_ := uint64(k)
m := rg.M
for i := matchStart; i < matchEnd; i++ {
match := &rg.Matches[i]
seq := rg.chroms[match.SeqName][match.SeqName]
matchLen := match.SeqEnd - match.SeqStart
// for now, we will ignore matches too short to be traditionally minimized
if matchLen < k_ {
continue
}
KmerLoop:
for j := match.SeqStart; j <= match.SeqEnd-k_; j++ {
// we begin by skipping any kmers containing n's
// we start checking from the end for maximum skipping efficiency
for x := k_ - 1; x >= 0; x-- {
if seq[j+x] == byte('n') {
j += x
continue KmerLoop
}
// prevents negative overflow - a bit of a hack, but seqs can be big, so we need uint64's capacity
if x == 0 { break }
}
kmerInt := TextSeq(seq[j : j+k_]).kmerInt()
// make the sequence strand-agnostic
kmerInt = minKmerInt(kmerInt, kmerInt.revComp(k))
thisMin := kmerInt.Minimize(k, m)
/*
if match.ClassNode == nil {
fmt.Println("current match's repeat:", match.RepeatName)
panic("minimizeThread(): match has nil ClassNode")
}
*/
c <- ThreadResponse{kmerInt, thisMin, match.ClassNode}
}
}
close(c)
}
func (rg *RepeatGenome) krakenUpdateThread(minToKmers MuxMinToKmers, respChan <-chan ThreadResponse, wg sync.WaitGroup) {
for resp := range respChan {
kmerInt, minInt, relative := resp.KmerInt, resp.MinInt, resp.Relative
minToKmers.RLock()
fmt.Println(minToKmers.m[minInt])
if muxKmers, minExists := minToKmers.m[minInt]; minExists {
minToKmers.RUnlock();
muxKmers.Lock()
kmers := muxKmers.Kmers
kmerExists := false
for i, kmer := range kmers {
// the case that we've already processed this exact kmer - we just update the LCA
if kmerInt == *(*KmerInt)(unsafe.Pointer(&kmer[0])) {
kmerExists = true
prev_LCA_ID := *(*uint16)(unsafe.Pointer(&kmer[8]))
lca := rg.ClassTree.getLCA(rg.ClassTree.NodesByID[prev_LCA_ID], relative)
/*
prevLCA := rg.ClassTree.NodesByID[prev_LCA_ID]
if lca != rg.ClassTree.Root && !(strings.HasPrefix(prevLCA.Name, lca.Name) && strings.HasPrefix(relative.Name, lca.Name)) {
fmt.Println()
fmt.Println("prev LCA:", prevLCA.Name)
fmt.Println("relative:", relative.Name)
fmt.Println("new LCA:", lca.Name)
panic("fatal LCA error")
}
*/
*(*uint16)(unsafe.Pointer(&kmer[8])) = lca.ID
kmers[i] = kmer
break
}
}
if !kmerExists {
var kmer Kmer
*(*KmerInt)(unsafe.Pointer(&kmer[0])) = kmerInt
*(*uint16)(unsafe.Pointer(&kmer[8])) = relative.ID
kmers = append(kmers, kmer)
}
muxKmers.Unlock()
// ...otherwise we initialize it in the kmerMap
} else {
minToKmers.RUnlock();
var kmer Kmer
*(*KmerInt)(unsafe.Pointer(&kmer[0])) = kmerInt
*(*uint16)(unsafe.Pointer(&kmer[8])) = relative.ID
// we don't need to lock because the update is atomic with the addition
minToKmers.Lock()
minToKmers.m[minInt] = &MuxKmers{ sync.Mutex{}, Kmers{kmer} }
minToKmers.Unlock()
}
}
wg.Done()
}
func (rg *RepeatGenome) getKrakenSlice() error {
// a rudimentary way of deciding how many threads to allow, should eventually be improved
numCPU := runtime.NumCPU()
if rg.Flags.Debug {
fmt.Printf("getKrakenSlice() using %d CPUs\n", ceilDiv(numCPU, 2))
}
runtime.GOMAXPROCS(numCPU)
var mStart, mEnd uint64
if rg.Flags.Debug {
numKmers := rg.numKmers()
fmt.Printf("expecting >= %d million kmers\n", numKmers/1000000)
}
var threadChans [](chan ThreadResponse)
for i := 0; i < numCPU; i++ {
mStart = uint64(i * len(rg.Matches) / numCPU)
mEnd = uint64((i + 1) * len(rg.Matches) / numCPU)
c := make(chan ThreadResponse, 1000)
threadChans = append(threadChans, c)
go rg.minimizeThread(mStart, mEnd, c)
}
numUpdateThreads := numCPU / 2
var wg sync.WaitGroup
var minToKmers MuxMinToKmers
minToKmers.m = make(map[MinInt]*MuxKmers)
masterRespChan := mergeThreadResp(threadChans)
for i := 0; i < numUpdateThreads; i++ {
go rg.krakenUpdateThread(minToKmers, masterRespChan, wg)
wg.Add(1)
}
wg.Wait()
fmt.Println()
fmt.Println("finished!")
os.Exit(0)
/*
// below is the atomic section of minimizing
// this seems to be the rate-limiting section, as 24+ goroutines use only ~9-10 CPU-equivalents
// it should therefore be optimized first
var kmersProcessed uint64 = 0
for response := range {
if kmersProcessed%5000000 == 0 {
fmt.Println(comma(kmersProcessed/1000000), "million kmers processed...")
}
kmersProcessed++
}
*/
fmt.Println("...all kmers processed")
fmt.Println()
if rg.Flags.MemProfile {
pprof.WriteHeapProfile(rg.memProfFile)
}
return rg.populateKraken(minToKmers)
}
func (rg *RepeatGenome) populateKraken(minToKmers MuxMinToKmers) error {
var numUniqKmers uint64 = 0
for minInt, muxKmers := range minToKmers.m {
kmers := muxKmers.Kmers
numUniqKmers += uint64(len(kmers))
sort.Sort(minToKmers.m[minInt].Kmers)
rg.SortedMins = append(rg.SortedMins, minInt)
}
sort.Sort(rg.SortedMins)
fmt.Println(comma(numUniqKmers), "unique kmers generated")
numUniqMins := uint64(len(rg.SortedMins))
fmt.Println(comma(numUniqMins), "unique minimizers used")
var currOffset uint64 = 0
rg.OffsetsToMin = make([]uint64, 0, numUniqMins)
rg.Kmers = make(Kmers, 0, numUniqKmers)
for _, thisMin := range rg.SortedMins {
rg.OffsetsToMin = append(rg.OffsetsToMin, currOffset)
currOffset += uint64(len(minToKmers.m[thisMin].Kmers))
for _, kmer := range minToKmers.m[thisMin].Kmers {
rg.Kmers = append(rg.Kmers, kmer)
}
// delete(minMap, thisMin) // useless because it's going to be almost immediately nilled anyway
}
if uint64(len(rg.Kmers)) != numUniqKmers {
panic(fmt.Errorf("error populating RepeatGenome.Kmers - %d kmers inserted rather than expected %d", len(rg.Kmers), numUniqKmers))
}
if rg.Flags.WriteKraken {
err := rg.WriteMins()
if err != nil {
return err
}
}
if rg.Flags.MemProfile {
pprof.WriteHeapProfile(rg.memProfFile)
}
return nil
}
func (rg *RepeatGenome) numKmers() uint64 {
var k = int(rg.K)
var numKmers uint64 = 0
splitOnN := func(c rune) bool { return c == 'n' }
for i := range rg.Matches {
match := &rg.Matches[i]
seq := rg.chroms[match.SeqName][match.SeqName][match.SeqStart:match.SeqEnd]
seqs := bytes.FieldsFunc([]byte(seq), splitOnN)
for j := range seqs {
if len(seqs[j]) >= k {
numKmers += uint64(len(seqs[j]) - k + 1)
}
}
}
return numKmers
}
func (rg *RepeatGenome) getMinIndex(minInt MinInt) (bool, uint64) {
var i uint64 = 0
j := uint64(len(rg.SortedMins))
for i < j {
x := (i+j)/2
if minInt == rg.SortedMins[x] {
return true, x
} else if minInt < rg.SortedMins[x] {
j = x
} else {
i = x + 1
}
}
return false, 0
}
func (rg *RepeatGenome) getKmer(kmerInt KmerInt) *Kmer {
minimizer := kmerInt.Minimize(rg.K, rg.M)
minExists, minIndex := rg.getMinIndex(minimizer)
if !minExists {
return nil
}
startInd := rg.OffsetsToMin[minIndex]
var endInd uint64
if minIndex == uint64(len(rg.SortedMins)) - 1 {
endInd = uint64(len(rg.Kmers))
} else {
endInd = rg.OffsetsToMin[minIndex+1]
}
if endInd > uint64(len(rg.Kmers)) {
panic(fmt.Errorf("getKmer(): out-of-bounds RepeatGenome.Kmers access (len(rg.Kmers) = %d, endInd = %d)", len(rg.Kmers), endInd))
}
/*
if !sort.IsSorted(rg.Kmers[startInd:endInd]) {
panic("minimizer's kmers not sorted")
}
*/
// simple binary search within the range of RepeatGenome.Kmers that has this kmer's minimizer
i, j := startInd, endInd
for i < j {
x := (j+i)/2
thisKmerInt := *(*KmerInt)(unsafe.Pointer(&rg.Kmers[x][0]))
if thisKmerInt == kmerInt {
return &rg.Kmers[x]
} else if thisKmerInt < kmerInt {
i = x + 1
} else {
j = x
}
}
return nil
}
type SeqAndClass struct {
Seq Seq
Class *ClassNode
}
/*
func (rg *RepeatGenome) kmerSeqFeed(seq TextSeq) chan uint64 {
c := make(chan uint64)
go func() {
numKmers := uint8(len(seq)) - (rg.K - 1)
var i uint8
KmerLoop:
for i = 0; i < numKmers; i++ {
kmerSeq := seq[i : i+rg.K]
// an ugly but necessary n-skipper
for j := rg.K - 1; j >= 0; j-- {
if kmerSeq[j] == byte('n') {
i += j
continue KmerLoop
}
// necessary for j to be unsigned
if j == 0 { break }
}
kmerInt := seqToInt(string(kmerSeq))
c <- minU64(kmerInt, intRevComp(kmerInt, rg.K))
}
}()
return c
}
*/
type ReadResponse struct {
Seq TextSeq
ClassNode *ClassNode
}
// This function assumes that the Seqs in readSeqs do not contain 'n's.
// The output reads of sequencing simulators will generally contain 'n's if the input reference genome does.
// They must therefore be filtered upstream.
func (rg *RepeatGenome) ClassifyReads(readTextSeqs []TextSeq, responseChan chan ReadResponse) {
var kmerSet map[KmerInt]bool
var byteBuf TextSeq
if rg.Flags.Debug {
byteBuf = make(TextSeq, rg.K, rg.K)
kmerSet = make(map[KmerInt]bool, len(rg.Kmers))
for _, kmer := range rg.Kmers {
kmerSeq := *(*KmerInt)(unsafe.Pointer(&kmer[0]))
kmerSet[kmerSeq] = true
}
}
ReadLoop:
for _, read := range readTextSeqs {
// we use sign int64s in this triple-nested loop because the innermost one counts down and would otherwise overflow
// this isn't a significant limitation because reads are never big enough to overflow one
k_ := int64(rg.K)
numKmers := int64(len(read)) - k_ + 1
var i int64
KmerLoop:
for i = 0; i < numKmers; i++ {
// if this is ever revived, all the loop ints must be made signed again
for j := k_ + i - 1; j >= i ; j-- {
if read[j] == byte('n') {
i += j - i
continue KmerLoop
}
}
kmerBytes := read[i : i+k_]
kmerInt := kmerBytes.kmerInt()
kmerInt = minKmerInt(kmerInt, kmerInt.revComp(rg.K))
kmer := rg.getKmer(kmerInt)
if rg.Flags.Debug && kmer == nil && kmerSet[kmerInt] {
fillKmerBuf(byteBuf, kmerInt)
panic(fmt.Errorf("RepeatGenome.getKmer() returned nil for %s, but kmer exists\n", byteBuf))
}
if kmer != nil {
fillKmerBuf(byteBuf, kmerInt)
lcaID := *(*uint16)(unsafe.Pointer(&kmer[8]))
responseChan <- ReadResponse{read, rg.ClassTree.NodesByID[lcaID]}
// only use the first matched kmer
continue ReadLoop
}
}
responseChan <- ReadResponse{read, nil}
}
close(responseChan)
}
func (rg *RepeatGenome) GetReadClassChan(reads []TextSeq) chan ReadResponse {
// a rudimentary way of deciding how many threads to allow, should eventually be improved
numCPU := uint64(runtime.NumCPU())
if rg.Flags.Debug {
fmt.Printf("GetReadClassChan() using %d CPUs\n", numCPU)
}
runtime.GOMAXPROCS(int(numCPU))
responseChans := make([]chan ReadResponse, 0, numCPU)
numReads := uint64(len(reads))
var i uint64
for i = 0; i < numCPU; i++ {
responseChans = append(responseChans, make(chan ReadResponse, 50))
startInd := i * (numReads / numCPU)
endInd := ((i + 1) * numReads) / numCPU
go rg.ClassifyReads(reads[startInd : endInd], responseChans[i])
}
var wg sync.WaitGroup
wg.Add(len(responseChans))
master := make(chan ReadResponse)
for _, respChan := range responseChans {
go func(respChan chan ReadResponse) {
for resp := range respChan {
master <- resp
}
wg.Done()
}(respChan)
}
go func() {
wg.Wait()
close(master)
}()
return master
}
func (rg *RepeatGenome) ProcessReads() (error, chan ReadResponse) {
workingDirName, err := os.Getwd()
if err != nil {
return err, nil
}
readsDirName := workingDirName + "/" + rg.Name + "-reads"
currDir, err := os.Open(readsDirName)
if err != nil {
return err, nil
}
fileinfos, err := currDir.Readdir(-1)
if err != nil {
return err, nil
}
processedFiles := []os.FileInfo{}
for _, fileinfo := range fileinfos {
if len(fileinfo.Name()) > 5 && fileinfo.Name()[len(fileinfo.Name())-5 : ] == ".proc" {
processedFiles = append(processedFiles, fileinfo)
}
}
var reads []TextSeq
for _, fileinfo := range processedFiles {
_, theseReadsBytes := fileLines(readsDirName + "/" + fileinfo.Name())
for _, bytesLine := range theseReadsBytes {
reads = append(reads, TextSeq(bytesLine))
}
}
return nil, rg.GetReadClassChan(reads)
}
adding working parallel kmer update/addition
package repeatgenome
/*
WARNING!!! This program is currently under development and may be buggy or broken.
A barebones (at the moment) Go script for parsing and minimizing RepeatMasker output files alongside FASTA reference genomes.
This script expects there to be a subdirectory of the current directory named after the reference genome used (e.g. "dm3") that contains the following files:
* a RepeatMasker library containing:
- the match library (e.g. "dm3.fa.out")
- the alignment information (e.g. "dm3.fa.align")
* one or more reference genome files in FASTA format with the suffix ".fa"
Premature commenting is the root of all evil, and I have sinned. Please read comments skeptically - they will eventually be audited.
KmerInt.Minimize() logic could be changed now that minimizers are 32 bits
Should a Seq's first field be a *byte to discard the extra two fields?
Should probably make a file solely for type defs.
Reads are currently kept in TextSeq form until the bitter end because, with Go's referenced based slices, there's no compelling reason not to, and because they're easier (and probably faster) to manipulate than Seqs. This may change at some point, though.
If a minimizer is associated with a single repeat type, can we use that heuristically?
Error handling should be updated with a custom ParseError type - panics should be removed, excepting performance-cricial sequence manipulation functions
Should consider splitting at hyphenated class names like TcMar-Tc1
For portability's sake, the flags should be used as args to Generate() rather than globals.
The concurrent read-kmer generator could be reintroduced using a select statement.
Should probably restrict activity of chans with directionals
It would make sense to discard kmers associated with ClassNodes greater than a certain size.
Kmer counting should be re-added eventually - it's currently excluded for performance reasons because we aren't using it.
We should test a version that doesn't cache minimizers, as that seems to be a needless bottleneck. It could also be conditional on the number of CPUs available.
Minimizers are currently not written to file in any order. This is for memory efficiency, and can be changed in the future.
I should probably change some variable names, like repeatGenome, to less verbose variants, and use more aliases.
Ranges should be changed to use actual values instead of indexes.
Slice sizes should be specified in the make() call when the size is known.
seqToInt and revCompToInt need minor cleanup and a potential name-change.
All sequences containing Ns are currently ignored.
We should consider taking end minimizers once the code base is more mature.
We should also review how to deal with m <= len(match) < k.
For caching efficiency, we should change the minimizer data structure to a map-indexed 1D slice of Kmers (not *Kmers). (This technique originated in Kraken.)
Int sizes should be reviewed for memory efficiency.
The sole command line argument is the name of the reference genome (e.g. "dm3").
*/
import (
"bytes"
"fmt"
"io/ioutil"
"log"
//"mapset"
"os"
"runtime"
"runtime/pprof"
"sort"
"strconv"
"strings"
"sync"
"unsafe"
)
type Flags struct {
Debug bool
CPUProfile bool
MemProfile bool
Minimize bool
WriteKraken bool
WriteJSON bool
}
// Match.SW_Score - Smith-Waterman score, describing the likeness to the repeat reference sequence
// Match.PercDiv - "% substitutions in matching region compared to the consensus" - RepeatMasker docs
// Match.PercDel - "% of bases opposite a gap in the query sequence (deleted bp)" - RepeatMasker docs
// Match.PercIns - "% of bases opposite a gap in the repeat consensus (inserted bp)" - RepeatMasker docs
// Match.SeqName - the reference genome file this match came from (typically the chromosome)
// Match.SeqStart - the starting index (inclusive) in the reference genome
// Match.SeqEnd - the ending index (exclusive) in the reference genome
// Match.SeqRemains - the number of bases past the end of the match in the relevant reference seqence
// Match.IsRevComp - the match may be for the complement of the reference sequence
// Match.RepeatClass - the repeat's full ancestry, including its repeat class and repeat name (which are listed separately in the RepeatMasker output file)
// Match.RepeatStart- the starting index in the repeat consensus sequence
// Match.RepeatEnd - the ending sequence (exclusive) in the consensus repeat sequence
// Match.RepeatRemains - the number of bases past the end of the match in the consensus repeat sequence
// Match.InsertionID - a numerical ID for the repeat type (starts at 1)
//
// below are not in parsed data file
// Match.RepeatName - simply repeatClass concatenated - used for printing and map indexing
// Match.ClassNode - pointer to corresponding ClassNode in RepeatGenome.ClassTree
// Match.Repeat - pointer to corresponding Repeat struct in RepeatGenome.Repeats
type Match struct {
SW_Score int32
PercDiv float64
PercDel float64
PercIns float64
SeqName string
SeqStart uint64
SeqEnd uint64
SeqRemains uint64
IsRevComp bool
RepeatClass []string
// in weird cases, RepeatStart can be negative, so they must be signed
RepeatStart int64
RepeatEnd int64
RepeatRemains int64
InsertionID uint64
// these are generated, not parsed
RepeatName string
ClassNode *ClassNode
Repeat *Repeat
}
type RepeatGenome struct {
Name string
Flags Flags
// maps a chromosome name to a map of its sequences
// as discussed above, though, matches only contain 1D sequence indexes
chroms map[string](map[string]TextSeq)
K uint8
M uint8
Kmers Kmers
// stores the offset of each minimizer's first kmer in RepeatGenome.Kmers - indexed by the minimizer's index in SortedMins
OffsetsToMin []uint64
// stores the number of kmers that each minimizer is associated with
SortedMins MinInts
Matches Matches
ClassTree ClassTree
Repeats Repeats
RepeatMap map[string]*Repeat
memProfFile *os.File
}
type ClassTree struct {
// maps all class names to a pointer to their node struct
// we must use pointers because of this foible in golang: https://code.google.com/p/go/issues/detail?id=3117
// if we didn't use pointers, we'd have to completely reassign the struct when adding parents, children, etc.
ClassNodes map[string](*ClassNode)
NodesByID []*ClassNode
// a pointer to the the class tree's root, used for recursive descent etc.
// we explicitly create the root (because RepeatMatcher doesn't)
Root *ClassNode
}
type MuxKmers struct {
sync.Mutex
Kmers Kmers
}
// Used to differentiate sequence representations with one base per byte (type TextSeq) from those with four bases per byte (type Seq).
type TextSeq []byte
// Used to clarify context of integers, and to differentiate full Kmers (which include an LCA ID) from integer-represented kmer sequences.
type KmerInt uint64
type KmerInts []KmerInt
type MinInt uint32
type MinInts []MinInt
// can store a kmer where k <= 32
// the value of k is not stored in the struct, but rather in the RepeatGenome, for memory efficiency
// first eight bits are the int representation of the sequence
// the last two are the LCA ID
type Kmer [10]byte
// as with the Kmer type, each base is represented by two bits
// any excess bits are the first bits of the first byte (seq is right-justified)
// remember that len(Seq.Bases) is not the actual number of bases, but rather the number of bytes necessary to represent them
type Seq struct {
Bases []byte
Len uint64
}
type Seqs []Seq
type repeatLoc struct {
SeqName string
StartInd uint64
EndInd uint64
}
type Repeat struct {
// assigned in simple incremented order starting from 1
// they are therefore not compatible across genomes
// we give root ID = 0
ID uint64
// a list containing the repeat's ancestry path, from top down
// root is implicit, and is therefore excluded from the list
ClassList []string
ClassNode *ClassNode
Name string
Instances []*Match
Locations []repeatLoc
}
type ClassNode struct {
Name string
ID uint16
Class []string
Parent *ClassNode
Children []*ClassNode
IsRoot bool
Repeat *Repeat
}
// type synonyms, necessary to implement interfaces (e.g. sort) and methods
type Kmers []Kmer
type PKmers []*Kmer
type MinMap map[MinInt]Kmers
type Repeats []Repeat
type Matches []Match
//type Chroms map[string](map[string]TextSeq)
type MinPair struct {
KmerInt Kmer
Minimizer MinInt
}
type ThreadResponse struct {
KmerInt KmerInt
MinInt MinInt
Relative *ClassNode
}
type MinCache map[KmerInt]MinInt
func parseMatches(genomeName string) (error, Matches) {
// "my_genome_name" -> "my_genome_name/my_genome_name.fa.out"
filepath := strings.Join([]string{genomeName, "/", genomeName, ".fa.out"}, "")
err, matchLines := fileLines(filepath)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
// drop header
matchLines = matchLines[3:]
var matches Matches
var sw_Score int64
for _, matchLine := range matchLines {
rawVals := strings.Fields(string(matchLine))
if len(rawVals) != 15 {
return ParseError{"repeatgenome.parseMatches()",
filepath,
fmt.Errorf("supplied match line is not 15 fields long (has %d fields and length %d):\n", len(rawVals), len(matchLine))},
nil
}
var match Match
match.IsRevComp = rawVals[8] == "C"
// remove enclosing parentheses
// !!! in the future, checkes to ensure that the parentheses exist should be added
// !!! it would also be sensible to check that rawVals[8] is either "C" or "+"
rawVals[7] = rawVals[7][1 : len(rawVals[7])-1]
if match.IsRevComp {
rawVals[11] = rawVals[11][1 : len(rawVals[11])-1]
} else {
rawVals[13] = rawVals[13][1 : len(rawVals[13])-1]
}
// everything in this block is just vanilla trimming, converting, and error checking
sw_Score, err = strconv.ParseInt(rawVals[0], 10, 32)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.SW_Score = int32(sw_Score)
match.PercDiv, err = strconv.ParseFloat(rawVals[1], 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.PercDel, err = strconv.ParseFloat(rawVals[2], 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.PercIns, err = strconv.ParseFloat(rawVals[3], 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.SeqName = strings.TrimSpace(rawVals[4])
match.SeqStart, err = strconv.ParseUint(rawVals[5], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.SeqEnd, err = strconv.ParseUint(rawVals[6], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.SeqRemains, err = strconv.ParseUint(rawVals[7], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
// match.IsComplement, rawVals[8], moved above
match.RepeatClass = append(strings.Split(strings.TrimSpace(rawVals[10]), "/"), strings.TrimSpace(rawVals[9]))
match.RepeatStart, err = strconv.ParseInt(rawVals[11], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.RepeatEnd, err = strconv.ParseInt(rawVals[12], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.RepeatRemains, err = strconv.ParseInt(rawVals[13], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
match.InsertionID, err = strconv.ParseUint(rawVals[14], 10, 64)
if err != nil {
return ParseError{"repeatgenome.parseMatches()", filepath, err}, nil
}
// necessary swaps to convert reverse complement repeat indexes to positive-strand indexes
if match.IsRevComp {
match.RepeatStart = match.RepeatRemains
match.RepeatEnd = match.RepeatStart
match.RepeatRemains = match.RepeatRemains + (match.RepeatEnd - match.RepeatStart)
}
// decrement match.SeqStart and match.RepeatStart so that they work from a start index of 0 rather than 1
// that way, we can use them without modification in slices
match.SeqStart--
match.RepeatStart--
// "Other" and "Unknown" classes are heirarchically meaningless and really just mean "root", so we remove them
if match.RepeatClass[0] == "Other" || match.RepeatClass[0] == "Unknown" {
match.RepeatClass = match.RepeatClass[1:]
}
match.RepeatName = strings.Join(match.RepeatClass, "/")
matches = append(matches, match)
}
return nil, matches
}
func parseGenome(genomeName string) (error, map[string](map[string]TextSeq)) {
chromFileInfos, err := ioutil.ReadDir(genomeName)
if err != nil {
return IOError{"repeatgenome.parseGenome()", err}, nil
}
warned := false
chroms := make(map[string](map[string]TextSeq))
// used below to store the two keys for RepeatGenome.chroms
for i := range chromFileInfos {
// "my_genome_name", "my_chrom_name" -> "my_genome_name/my_chrom_name"
chromFilename := chromFileInfos[i].Name()
chromFilepath := strings.Join([]string{genomeName, chromFilename}, "/")
// process the ref genome files (*.fa), not the repeat ref files (*.fa.out and *.fa.align) or anything else
if strings.HasSuffix(chromFilepath, ".fa") {
err, seqLines := fileLines(chromFilepath)
if err != nil {
// error should already be IOError
return err, nil
}
// maps each sequence name in this chrom to a slice of its sequence's lines
// the list is concatenated at the end for efficiency's sake
seqMap := make(map[string][][]byte)
numLines := uint64(len(seqLines))
var seqName string = "" // forward initialization necessary
var i uint64
for i = 0; i < numLines; i++ {
seqLine := bytes.TrimSpace(seqLines[i])
if seqLine[0] == byte('>') {
seqName = string(bytes.TrimSpace(seqLine[1:]))
if !warned && seqName != chromFilename[:len(chromFilename)-3] {
fmt.Println("WARNING: reference genome is two-dimensional, containing sequences not named after their chromosome.")
fmt.Println("Because RepeatMasker supplied only one-dimensional indexing, this may cause unexpected behavior or program failure.")
fmt.Println("seqName:", seqName, "\tlen(seqName):", len(seqName))
fmt.Println("chrom name:", chromFilename[:len(chromFilename)-3], "\tlen(chrom name):", len(chromFilename)-3)
warned = true
}
} else {
if seqName == "" {
return ParseError{"repeatgenome.parseGenome", chromFilepath, fmt.Errorf("Empty or missing sequence name")}, nil
}
seqMap[seqName] = append(seqMap[seqName], seqLine)
}
}
// finally, we insert this map into the full map
chromName := chromFilepath[len(genomeName)+1 : len(chromFilepath)-3]
chroms[chromName] = make(map[string]TextSeq)
for seqName, seqLines := range seqMap {
chroms[chromName][seqName] = TextSeq(bytes.ToLower(bytes.Join(seqLines, []byte{})))
}
}
}
return nil, chroms
}
func Generate(genomeName string, k, m uint8, rgFlags Flags) (error, *RepeatGenome) {
var err error
// we popoulate the RepeatGenome mostly with helper functions
// we should consider whether it makes more sense for them to alter the object directly, than to return their results
rg := new(RepeatGenome)
rg.Name = genomeName
rg.Flags = rgFlags
if rg.Flags.MemProfile {
os.Mkdir("profiles", os.ModeDir)
rg.memProfFile, err = os.Create("profiles/" + rg.Name + ".memprof")
if err != nil {
return IOError{"RepeatGenome.getKrakenSlice()", err}, nil
}
pprof.WriteHeapProfile(rg.memProfFile)
defer rg.memProfFile.Close()
}
err, rg.chroms = parseGenome(genomeName)
if err != nil {
return err, nil
}
err, rg.Matches = parseMatches(genomeName)
if err != nil {
return err, nil
}
rg.getRepeats()
rg.getClassTree()
rg.K = k
rg.M = m
if rg.Flags.Minimize {
// calling the parallel minimizer and writing the result
rg.getKrakenSlice()
}
if rg.Flags.WriteJSON {
rg.WriteClassJSON(false, false)
}
if rg.Flags.Debug {
rg.RunDebugTests()
}
return nil, rg
}
func (rg *RepeatGenome) RunDebugTests() {
fmt.Println()
for k, v := range rg.chroms {
for k_, v_ := range v {
fmt.Printf("chrom: %s\tseq: %s\t%s...%s\n", k, k_, v_[:20], v_[len(v_)-20:])
}
}
fmt.Println()
fmt.Println("number of chromosomes parsed:", len(rg.chroms))
fmt.Println()
fmt.Println("total number of bases in genome:", rg.Size())
rg.ClassTree.PrintBranches()
fmt.Println()
fmt.Println("number of ClassNodes:", len(rg.ClassTree.ClassNodes))
fmt.Println()
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['DNA/TcMar-Mariner'], rg.ClassTree.ClassNodes['DNA/TcMar-Tc1']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["DNA/TcMar-Mariner"], rg.ClassTree.ClassNodes["DNA/TcMar-Tc1"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['ARTEFACT'], rg.ClassTree.ClassNodes['DNA/TcMar-Tc1']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["ARTEFACT"], rg.ClassTree.ClassNodes["DNA/TcMar-Tc1"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['LINE/LOA'], rg.ClassTree.ClassNodes['root']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["LINE/LOA"], rg.ClassTree.ClassNodes["root"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['Simple_repeat/(T)n'], rg.ClassTree.ClassNodes['Simple_repeat/(T)n']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["Simple_repeat/(T)n"], rg.ClassTree.ClassNodes["Simple_repeat/(T)n"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['LTR/Gypsy/MICROPIA_I-int'], rg.ClassTree.ClassNodes['LTR/Gypsy']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["LTR/Gypsy/MICROPIA_I-int"], rg.ClassTree.ClassNodes["LTR/Gypsy"]).Name)
fmt.Println("rg.ClassTree.getLCA(rg.ClassTree.ClassNodes['LTR/Gypsy'], rg.ClassTree.ClassNodes['LTR/Gypsy/MICROPIA_I-int']).Name:", rg.ClassTree.getLCA(rg.ClassTree.ClassNodes["LTR/Gypsy"], rg.ClassTree.ClassNodes["LTR/Gypsy/MICROPIA_I-int"]).Name)
fmt.Println()
fmt.Println("min(5, 7):", min(5, 7))
fmt.Println("max64(int64(5), int64(7)):", max64(int64(5), int64(7)))
fmt.Println()
testSeq := TextSeq("atgtttgtgtttttcataaagacgaaagatg")
thisMin := testSeq.kmerInt().Minimize(uint8(len(testSeq)), 15)
fmt.Println("getMinimizer('tgctcctgtcatgcatacgcaggtcatgcat', 15): ")
thisMin.print(15)
fmt.Println()
fmt.Printf("Kmer struct size: %d\n", unsafe.Sizeof(Kmer{}))
}
func (rg *RepeatGenome) getRepeats() {
// we now populate a list of unique repeat types
// repeats are stored in the below slice, indexed by their ID
// we first determine the necessary size of the slice - we can't use append because matches are not sorted by repeatID
rg.RepeatMap = make(map[string]*Repeat)
// DON'T use the second field of the range - this causes the Match struct to be copied
// creating an alias struct (match := rg.Matches[i]) of type Match rather than *Match causes the repeat.Instance item to point to a copy, not the original Match struct
for i := range rg.Matches {
match := &rg.Matches[i]
// don't bother overwriting
if repeat, exists := rg.RepeatMap[match.RepeatName]; exists {
repeat.Instances = append(repeat.Instances, match)
match.Repeat = repeat
} else {
var repeat Repeat
repeat.ID = uint64(len(rg.Repeats))
repeat.ClassList = match.RepeatClass
repeat.Name = match.RepeatName
repeat.Instances = []*Match{match}
repeat.Locations = append(repeat.Locations, repeatLoc{match.SeqName, match.SeqStart, match.SeqEnd})
rg.Repeats = append(rg.Repeats, repeat)
rg.RepeatMap[repeat.Name] = &repeat
match.Repeat = &repeat
}
}
}
func (rg *RepeatGenome) getClassTree() {
// mapping to pointers allows us to make references (i.e. pointers) to values
tree := &rg.ClassTree
tree.ClassNodes = make(map[string](*ClassNode))
// would be prettier if expanded
tree.Root = &ClassNode{"root", 0, []string{"root"}, nil, nil, true, nil}
tree.ClassNodes["root"] = tree.Root
tree.NodesByID = append(tree.NodesByID, tree.Root)
for _, repeat := range rg.Repeats {
// process every heirarchy level (e.g. for "DNA/LINE/TiGGER", process "DNA", then "DNA/LINE", then "DNA/LINE/TiGGER")
for j := 1; j <= len(repeat.ClassList); j++ {
thisClass := repeat.ClassList[:j]
thisClassName := strings.Join(thisClass, "/")
_, keyExists := tree.ClassNodes[thisClassName]
if !keyExists {
if len(tree.NodesByID) > 65534 {
panic("RepeatGenome.getClassTree(): more than 65,536 class nodes - ID is overflowed")
}
classNode := new(ClassNode)
classNode.Name = thisClassName
classNode.ID = uint16(len(tree.NodesByID))
classNode.Class = thisClass
classNode.IsRoot = false
if repeat, exists := rg.RepeatMap[thisClassName]; exists {
classNode.Repeat = repeat
}
tree.ClassNodes[thisClassName] = classNode
tree.NodesByID = append(tree.NodesByID, classNode)
// first case handles primary classes, as root is implicit and not listed in thisClass
if j == 1 {
classNode.Parent = tree.Root
} else {
classNode.Parent = tree.ClassNodes[strings.Join(thisClass[:len(thisClass)-1], "/")]
}
if classNode.Parent.Children == nil {
classNode.Parent.Children = make([]*ClassNode, 0)
}
classNode.Parent.Children = append(classNode.Parent.Children, tree.ClassNodes[thisClassName])
}
}
}
// MUST NOT USE RANGE - the struct will be copied!
for i := 0; i < len(rg.Repeats); i++ {
repeat := &rg.Repeats[i]
repeat.ClassNode = tree.ClassNodes[repeat.Name]
if repeat.ClassNode == nil {
fmt.Println(repeat.Name)
log.Fatal("getClassTree(): nil Repeat.ClassNode")
}
}
// MUST NOT USE RANGE - the struct will be copied!
for i := 0; i < len(rg.Matches); i++ {
match := &rg.Matches[i]
match.ClassNode = tree.ClassNodes[match.RepeatName]
if match.ClassNode == nil {
fmt.Println(match.RepeatName)
log.Fatal("getClassTree(): nil Match.ClassNode")
}
}
}
// returns ancestry list, beginning at (and including) self, excluding root
func (cn *ClassNode) getAncestry() []*ClassNode {
ancestry := []*ClassNode{}
walker := cn
for !walker.IsRoot {
ancestry = append(ancestry, walker)
walker = walker.Parent
}
return ancestry
}
func (classTree *ClassTree) getLCA(cnA, cnB *ClassNode) *ClassNode {
for cnBWalker := cnB; cnBWalker != classTree.Root; cnBWalker = cnBWalker.Parent {
for cnAWalker := cnA; cnAWalker != classTree.Root; cnAWalker = cnAWalker.Parent {
if cnBWalker == cnAWalker {
return cnBWalker
}
}
}
return classTree.Root
}
// some of the logic in here is deeply nested or non-obvious for efficiency's sake
// specifically, we made sure not to make any heap allocations, which means reverse complements can never be explicitly evaluated
func (rg *RepeatGenome) minimizeThread(matchStart, matchEnd uint64, c chan ThreadResponse) {
k := rg.K
k_ := uint64(k)
m := rg.M
for i := matchStart; i < matchEnd; i++ {
match := &rg.Matches[i]
seq := rg.chroms[match.SeqName][match.SeqName]
matchLen := match.SeqEnd - match.SeqStart
// for now, we will ignore matches too short to be traditionally minimized
if matchLen < k_ {
continue
}
KmerLoop:
for j := match.SeqStart; j <= match.SeqEnd-k_; j++ {
// we begin by skipping any kmers containing n's
// we start checking from the end for maximum skipping efficiency
for x := k_ - 1; x >= 0; x-- {
if seq[j+x] == byte('n') {
j += x
continue KmerLoop
}
// prevents negative overflow - a bit of a hack, but seqs can be big, so we need uint64's capacity
if x == 0 { break }
}
kmerInt := TextSeq(seq[j : j+k_]).kmerInt()
// make the sequence strand-agnostic
kmerInt = minKmerInt(kmerInt, kmerInt.revComp(k))
thisMin := kmerInt.Minimize(k, m)
/*
if match.ClassNode == nil {
fmt.Println("current match's repeat:", match.RepeatName)
panic("minimizeThread(): match has nil ClassNode")
}
*/
c <- ThreadResponse{kmerInt, thisMin, match.ClassNode}
}
}
close(c)
}
type UpdateInfo struct {
muxKmers *MuxKmers
kmerInt KmerInt
relative *ClassNode
}
func (rg *RepeatGenome) krakenUpdateThread(wg *sync.WaitGroup, updateChan chan UpdateInfo) {
for updateInfo := range updateChan {
muxKmers, kmerInt, relative := updateInfo.muxKmers, updateInfo.kmerInt, updateInfo.relative
muxKmers.Lock()
kmerExists := false
for i, kmer := range muxKmers.Kmers {
// the case that we've already processed this exact kmer - we just update the LCA
if kmerInt == *(*KmerInt)(unsafe.Pointer(&kmer[0])) {
kmerExists = true
prev_LCA_ID := *(*uint16)(unsafe.Pointer(&kmer[8]))
lca := rg.ClassTree.getLCA(rg.ClassTree.NodesByID[prev_LCA_ID], relative)
*(*uint16)(unsafe.Pointer(&kmer[8])) = lca.ID
muxKmers.Kmers[i] = kmer
break
}
}
if !kmerExists {
var kmer Kmer
*(*KmerInt)(unsafe.Pointer(&kmer[0])) = kmerInt
*(*uint16)(unsafe.Pointer(&kmer[8])) = relative.ID
muxKmers.Kmers = append(muxKmers.Kmers, kmer)
}
muxKmers.Unlock()
wg.Done()
}
}
func (rg *RepeatGenome) getKrakenSlice() error {
// a rudimentary way of deciding how many threads to allow, should eventually be improved
numCPU := runtime.NumCPU()
if rg.Flags.Debug {
fmt.Printf("getKrakenSlice() using %d CPUs\n", ceilDiv(numCPU, 2))
}
runtime.GOMAXPROCS(numCPU)
var mStart, mEnd uint64
if rg.Flags.Debug {
numKmers := rg.numKmers()
fmt.Printf("expecting >= %d million kmers\n", numKmers/1000000)
}
var threadChans [](chan ThreadResponse)
for i := 0; i < numCPU; i++ {
mStart = uint64(i * len(rg.Matches) / numCPU)
mEnd = uint64((i + 1) * len(rg.Matches) / numCPU)
c := make(chan ThreadResponse, 1000)
threadChans = append(threadChans, c)
go rg.minimizeThread(mStart, mEnd, c)
}
minToKmers := make(map[MinInt]*MuxKmers)
numUpdateThreads := numCPU / 2
updateChan := make(chan UpdateInfo, 1000)
var wg sync.WaitGroup
for i := 0; i < numUpdateThreads; i++ {
go rg.krakenUpdateThread(&wg, updateChan)
}
var kmersProcessed uint64 = 0
// below is the atomic section of minimizing
// this seems to be the rate-limiting section, as 24+ goroutines use only ~9-10 CPU-equivalents
// it should therefore be optimized first
for resp := range mergeThreadResp(threadChans) {
if kmersProcessed%5000000 == 0 {
fmt.Println(comma(kmersProcessed/1000000), "million kmers processed...")
}
kmersProcessed++
kmerInt, minInt, relative := resp.KmerInt, resp.MinInt, resp.Relative
if muxKmers, minExists := minToKmers[minInt]; minExists {
wg.Add(1)
updateChan <- UpdateInfo{muxKmers, kmerInt, relative}
// ...otherwise we initialize it in the kmerMap
} else {
var kmer Kmer
*(*KmerInt)(unsafe.Pointer(&kmer[0])) = kmerInt
*(*uint16)(unsafe.Pointer(&kmer[8])) = relative.ID
// we don't need to lock because the update is atomic with the addition
minToKmers[minInt] = &MuxKmers{ sync.Mutex{}, Kmers{kmer} }
}
}
wg.Wait()
close(updateChan)
fmt.Println("...all kmers processed")
fmt.Println()
if rg.Flags.MemProfile {
pprof.WriteHeapProfile(rg.memProfFile)
}
return rg.populateKraken(minToKmers)
}
func (rg *RepeatGenome) populateKraken(minToKmers map[MinInt]*MuxKmers) error {
var numUniqKmers uint64 = 0
for minInt, muxKmers := range minToKmers {
kmers := muxKmers.Kmers
numUniqKmers += uint64(len(kmers))
sort.Sort(kmers)
rg.SortedMins = append(rg.SortedMins, minInt)
}
sort.Sort(rg.SortedMins)
fmt.Println(comma(numUniqKmers), "unique kmers generated")
numUniqMins := uint64(len(rg.SortedMins))
fmt.Println(comma(numUniqMins), "unique minimizers used")
var currOffset uint64 = 0
rg.OffsetsToMin = make([]uint64, 0, numUniqMins)
rg.Kmers = make(Kmers, 0, numUniqKmers)
for _, thisMin := range rg.SortedMins {
rg.OffsetsToMin = append(rg.OffsetsToMin, currOffset)
currOffset += uint64(len(minToKmers[thisMin].Kmers))
for _, kmer := range minToKmers[thisMin].Kmers {
rg.Kmers = append(rg.Kmers, kmer)
}
// delete(minMap, thisMin) // useless because it's going to be almost immediately nilled anyway
}
if uint64(len(rg.Kmers)) != numUniqKmers {
panic(fmt.Errorf("error populating RepeatGenome.Kmers - %d kmers inserted rather than expected %d", len(rg.Kmers), numUniqKmers))
}
if rg.Flags.WriteKraken {
err := rg.WriteMins()
if err != nil {
return err
}
}
if rg.Flags.MemProfile {
pprof.WriteHeapProfile(rg.memProfFile)
}
return nil
}
func (rg *RepeatGenome) numKmers() uint64 {
var k = int(rg.K)
var numKmers uint64 = 0
splitOnN := func(c rune) bool { return c == 'n' }
for i := range rg.Matches {
match := &rg.Matches[i]
seq := rg.chroms[match.SeqName][match.SeqName][match.SeqStart:match.SeqEnd]
seqs := bytes.FieldsFunc([]byte(seq), splitOnN)
for j := range seqs {
if len(seqs[j]) >= k {
numKmers += uint64(len(seqs[j]) - k + 1)
}
}
}
return numKmers
}
func (rg *RepeatGenome) getMinIndex(minInt MinInt) (bool, uint64) {
var i uint64 = 0
j := uint64(len(rg.SortedMins))
for i < j {
x := (i+j)/2
if minInt == rg.SortedMins[x] {
return true, x
} else if minInt < rg.SortedMins[x] {
j = x
} else {
i = x + 1
}
}
return false, 0
}
func (rg *RepeatGenome) getKmer(kmerInt KmerInt) *Kmer {
minimizer := kmerInt.Minimize(rg.K, rg.M)
minExists, minIndex := rg.getMinIndex(minimizer)
if !minExists {
return nil
}
startInd := rg.OffsetsToMin[minIndex]
var endInd uint64
if minIndex == uint64(len(rg.SortedMins)) - 1 {
endInd = uint64(len(rg.Kmers))
} else {
endInd = rg.OffsetsToMin[minIndex+1]
}
if endInd > uint64(len(rg.Kmers)) {
panic(fmt.Errorf("getKmer(): out-of-bounds RepeatGenome.Kmers access (len(rg.Kmers) = %d, endInd = %d)", len(rg.Kmers), endInd))
}
/*
if !sort.IsSorted(rg.Kmers[startInd:endInd]) {
panic("minimizer's kmers not sorted")
}
*/
// simple binary search within the range of RepeatGenome.Kmers that has this kmer's minimizer
i, j := startInd, endInd
for i < j {
x := (j+i)/2
thisKmerInt := *(*KmerInt)(unsafe.Pointer(&rg.Kmers[x][0]))
if thisKmerInt == kmerInt {
return &rg.Kmers[x]
} else if thisKmerInt < kmerInt {
i = x + 1
} else {
j = x
}
}
return nil
}
type SeqAndClass struct {
Seq Seq
Class *ClassNode
}
/*
func (rg *RepeatGenome) kmerSeqFeed(seq TextSeq) chan uint64 {
c := make(chan uint64)
go func() {
numKmers := uint8(len(seq)) - (rg.K - 1)
var i uint8
KmerLoop:
for i = 0; i < numKmers; i++ {
kmerSeq := seq[i : i+rg.K]
// an ugly but necessary n-skipper
for j := rg.K - 1; j >= 0; j-- {
if kmerSeq[j] == byte('n') {
i += j
continue KmerLoop
}
// necessary for j to be unsigned
if j == 0 { break }
}
kmerInt := seqToInt(string(kmerSeq))
c <- minU64(kmerInt, intRevComp(kmerInt, rg.K))
}
}()
return c
}
*/
type ReadResponse struct {
Seq TextSeq
ClassNode *ClassNode
}
// This function assumes that the Seqs in readSeqs do not contain 'n's.
// The output reads of sequencing simulators will generally contain 'n's if the input reference genome does.
// They must therefore be filtered upstream.
func (rg *RepeatGenome) ClassifyReads(readTextSeqs []TextSeq, responseChan chan ReadResponse) {
var kmerSet map[KmerInt]bool
var byteBuf TextSeq
if rg.Flags.Debug {
byteBuf = make(TextSeq, rg.K, rg.K)
kmerSet = make(map[KmerInt]bool, len(rg.Kmers))
for _, kmer := range rg.Kmers {
kmerSeq := *(*KmerInt)(unsafe.Pointer(&kmer[0]))
kmerSet[kmerSeq] = true
}
}
ReadLoop:
for _, read := range readTextSeqs {
// we use sign int64s in this triple-nested loop because the innermost one counts down and would otherwise overflow
// this isn't a significant limitation because reads are never big enough to overflow one
k_ := int64(rg.K)
numKmers := int64(len(read)) - k_ + 1
var i int64
KmerLoop:
for i = 0; i < numKmers; i++ {
// if this is ever revived, all the loop ints must be made signed again
for j := k_ + i - 1; j >= i ; j-- {
if read[j] == byte('n') {
i += j - i
continue KmerLoop
}
}
kmerBytes := read[i : i+k_]
kmerInt := kmerBytes.kmerInt()
kmerInt = minKmerInt(kmerInt, kmerInt.revComp(rg.K))
kmer := rg.getKmer(kmerInt)
if rg.Flags.Debug && kmer == nil && kmerSet[kmerInt] {
fillKmerBuf(byteBuf, kmerInt)
panic(fmt.Errorf("RepeatGenome.getKmer() returned nil for %s, but kmer exists\n", byteBuf))
}
if kmer != nil {
fillKmerBuf(byteBuf, kmerInt)
lcaID := *(*uint16)(unsafe.Pointer(&kmer[8]))
responseChan <- ReadResponse{read, rg.ClassTree.NodesByID[lcaID]}
// only use the first matched kmer
continue ReadLoop
}
}
responseChan <- ReadResponse{read, nil}
}
close(responseChan)
}
func (rg *RepeatGenome) GetReadClassChan(reads []TextSeq) chan ReadResponse {
// a rudimentary way of deciding how many threads to allow, should eventually be improved
numCPU := uint64(runtime.NumCPU())
if rg.Flags.Debug {
fmt.Printf("GetReadClassChan() using %d CPUs\n", numCPU)
}
runtime.GOMAXPROCS(int(numCPU))
responseChans := make([]chan ReadResponse, 0, numCPU)
numReads := uint64(len(reads))
var i uint64
for i = 0; i < numCPU; i++ {
responseChans = append(responseChans, make(chan ReadResponse, 50))
startInd := i * (numReads / numCPU)
endInd := ((i + 1) * numReads) / numCPU
go rg.ClassifyReads(reads[startInd : endInd], responseChans[i])
}
var wg sync.WaitGroup
wg.Add(len(responseChans))
master := make(chan ReadResponse)
for _, respChan := range responseChans {
go func(respChan chan ReadResponse) {
for resp := range respChan {
master <- resp
}
wg.Done()
}(respChan)
}
go func() {
wg.Wait()
close(master)
}()
return master
}
func (rg *RepeatGenome) ProcessReads() (error, chan ReadResponse) {
workingDirName, err := os.Getwd()
if err != nil {
return err, nil
}
readsDirName := workingDirName + "/" + rg.Name + "-reads"
currDir, err := os.Open(readsDirName)
if err != nil {
return err, nil
}
fileinfos, err := currDir.Readdir(-1)
if err != nil {
return err, nil
}
processedFiles := []os.FileInfo{}
for _, fileinfo := range fileinfos {
if len(fileinfo.Name()) > 5 && fileinfo.Name()[len(fileinfo.Name())-5 : ] == ".proc" {
processedFiles = append(processedFiles, fileinfo)
}
}
var reads []TextSeq
for _, fileinfo := range processedFiles {
_, theseReadsBytes := fileLines(readsDirName + "/" + fileinfo.Name())
for _, bytesLine := range theseReadsBytes {
reads = append(reads, TextSeq(bytesLine))
}
}
return nil, rg.GetReadClassChan(reads)
}
|
package maxminddb
import (
"errors"
"fmt"
"io/ioutil"
"math/big"
"math/rand"
"net"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReader(t *testing.T) {
for _, recordSize := range []uint{24, 28, 32} {
for _, ipVersion := range []uint{4, 6} {
fileName := fmt.Sprintf(testFile("MaxMind-DB-test-ipv%d-%d.mmdb"), ipVersion, recordSize)
reader, err := Open(fileName)
require.NoError(t, err, "unexpected error while opening database: %v", err)
checkMetadata(t, reader, ipVersion, recordSize)
if ipVersion == 4 {
checkIpv4(t, reader)
} else {
checkIpv6(t, reader)
}
}
}
}
func TestReaderBytes(t *testing.T) {
for _, recordSize := range []uint{24, 28, 32} {
for _, ipVersion := range []uint{4, 6} {
fileName := fmt.Sprintf(testFile("MaxMind-DB-test-ipv%d-%d.mmdb"), ipVersion, recordSize)
bytes, _ := ioutil.ReadFile(fileName)
reader, err := FromBytes(bytes)
require.NoError(t, err, "unexpected error while opening bytes: %v", err)
checkMetadata(t, reader, ipVersion, recordSize)
if ipVersion == 4 {
checkIpv4(t, reader)
} else {
checkIpv6(t, reader)
}
}
}
}
func TestLookupNetwork(t *testing.T) {
bigInt := new(big.Int)
bigInt.SetString("1329227995784915872903807060280344576", 10)
decoderRecord := map[string]interface{}{"array": []interface{}{uint64(1),
uint64(2),
uint64(3)},
"boolean": true,
"bytes": []uint8{
0x0,
0x0,
0x0,
0x2a,
},
"double": 42.123456,
"float": float32(1.1),
"int32": -268435456,
"map": map[string]interface{}{
"mapX": map[string]interface{}{
"arrayX": []interface{}{
uint64(0x7),
uint64(0x8),
uint64(0x9)},
"utf8_stringX": "hello",
},
},
"uint128": bigInt,
"uint16": uint64(0x64),
"uint32": uint64(0x10000000),
"uint64": uint64(0x1000000000000000),
"utf8_string": "unicode! ☯ - ♫",
}
tests := []struct {
IP net.IP
DBFile string
ExpectedCIDR string
ExpectedRecord interface{}
ExpectedOK bool
}{
// XXX - add test of IPv4 lookup in IPv6 database with no IPv4 subtree
{
IP: net.ParseIP("1.1.1.1"),
DBFile: "MaxMind-DB-test-ipv6-32.mmdb",
ExpectedCIDR: "1.0.0.0/8",
ExpectedRecord: nil,
ExpectedOK: false,
},
{
IP: net.ParseIP("::1:ffff:ffff"),
DBFile: "MaxMind-DB-test-ipv6-24.mmdb",
ExpectedCIDR: "::1:ffff:ffff/128",
ExpectedRecord: map[string]interface{}{"ip": "::1:ffff:ffff"},
ExpectedOK: true,
},
{
IP: net.ParseIP("::2:0:1"),
DBFile: "MaxMind-DB-test-ipv6-24.mmdb",
ExpectedCIDR: "::2:0:0/122",
ExpectedRecord: map[string]interface{}{"ip": "::2:0:0"},
ExpectedOK: true,
},
{
IP: net.ParseIP("1.1.1.1"),
DBFile: "MaxMind-DB-test-ipv4-24.mmdb",
ExpectedCIDR: "1.1.1.1/32",
ExpectedRecord: map[string]interface{}{"ip": "1.1.1.1"},
ExpectedOK: true,
},
{
IP: net.ParseIP("1.1.1.3"),
DBFile: "MaxMind-DB-test-ipv4-24.mmdb",
ExpectedCIDR: "1.1.1.2/31",
ExpectedRecord: map[string]interface{}{"ip": "1.1.1.2"},
ExpectedOK: true,
},
{
IP: net.ParseIP("1.1.1.3"),
DBFile: "MaxMind-DB-test-decoder.mmdb",
ExpectedCIDR: "1.1.1.0/24",
ExpectedRecord: decoderRecord,
ExpectedOK: true,
},
{
IP: net.ParseIP("::ffff:1.1.1.128"),
DBFile: "MaxMind-DB-test-decoder.mmdb",
ExpectedCIDR: "1.1.1.0/24",
ExpectedRecord: decoderRecord,
ExpectedOK: true,
},
{
IP: net.ParseIP("::1.1.1.128"),
DBFile: "MaxMind-DB-test-decoder.mmdb",
ExpectedCIDR: "::101:100/120",
ExpectedRecord: decoderRecord,
ExpectedOK: true,
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s - %s", test.DBFile, test.IP), func(t *testing.T) {
var record interface{}
reader, err := Open(testFile(test.DBFile))
require.NoError(t, err)
network, ok, err := reader.LookupNetwork(test.IP, &record)
require.NoError(t, err)
assert.Equal(t, test.ExpectedOK, ok)
assert.Equal(t, test.ExpectedCIDR, network.String())
assert.Equal(t, test.ExpectedRecord, record)
})
}
}
func TestDecodingToInterface(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err, "unexpected error while opening database: %v", err)
var recordInterface interface{}
err = reader.Lookup(net.ParseIP("::1.1.1.0"), &recordInterface)
require.NoError(t, err, "unexpected error while doing lookup: %v", err)
record := recordInterface.(map[string]interface{})
assert.Equal(t, record["array"], []interface{}{uint64(1), uint64(2), uint64(3)})
assert.Equal(t, record["boolean"], true)
assert.Equal(t, record["bytes"], []byte{0x00, 0x00, 0x00, 0x2a})
assert.Equal(t, record["double"], 42.123456)
assert.Equal(t, record["float"], float32(1.1))
assert.Equal(t, record["int32"], -268435456)
assert.Equal(t, record["map"],
map[string]interface{}{
"mapX": map[string]interface{}{
"arrayX": []interface{}{uint64(7), uint64(8), uint64(9)},
"utf8_stringX": "hello",
}})
assert.Equal(t, record["uint16"], uint64(100))
assert.Equal(t, record["uint32"], uint64(268435456))
assert.Equal(t, record["uint64"], uint64(1152921504606846976))
assert.Equal(t, record["utf8_string"], "unicode! ☯ - ♫")
bigInt := new(big.Int)
bigInt.SetString("1329227995784915872903807060280344576", 10)
assert.Equal(t, record["uint128"], bigInt)
}
// nolint: maligned
type TestType struct {
Array []uint `maxminddb:"array"`
Boolean bool `maxminddb:"boolean"`
Bytes []byte `maxminddb:"bytes"`
Double float64 `maxminddb:"double"`
Float float32 `maxminddb:"float"`
Int32 int32 `maxminddb:"int32"`
Map map[string]interface{} `maxminddb:"map"`
Uint16 uint16 `maxminddb:"uint16"`
Uint32 uint32 `maxminddb:"uint32"`
Uint64 uint64 `maxminddb:"uint64"`
Uint128 big.Int `maxminddb:"uint128"`
Utf8String string `maxminddb:"utf8_string"`
}
func TestDecoder(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err)
verify := func(result TestType) {
assert.Equal(t, result.Array, []uint{uint(1), uint(2), uint(3)})
assert.Equal(t, result.Boolean, true)
assert.Equal(t, result.Bytes, []byte{0x00, 0x00, 0x00, 0x2a})
assert.Equal(t, result.Double, 42.123456)
assert.Equal(t, result.Float, float32(1.1))
assert.Equal(t, result.Int32, int32(-268435456))
assert.Equal(t, result.Map,
map[string]interface{}{
"mapX": map[string]interface{}{
"arrayX": []interface{}{uint64(7), uint64(8), uint64(9)},
"utf8_stringX": "hello",
}})
assert.Equal(t, result.Uint16, uint16(100))
assert.Equal(t, result.Uint32, uint32(268435456))
assert.Equal(t, result.Uint64, uint64(1152921504606846976))
assert.Equal(t, result.Utf8String, "unicode! ☯ - ♫")
bigInt := new(big.Int)
bigInt.SetString("1329227995784915872903807060280344576", 10)
assert.Equal(t, &result.Uint128, bigInt)
}
{
// Directly lookup and decode.
var result TestType
require.NoError(t, reader.Lookup(net.ParseIP("::1.1.1.0"), &result))
verify(result)
}
{
// Lookup record offset, then Decode.
var result TestType
offset, err := reader.LookupOffset(net.ParseIP("::1.1.1.0"))
require.NoError(t, err)
assert.NotEqual(t, offset, NotFound)
assert.NoError(t, reader.Decode(offset, &result))
verify(result)
}
assert.NoError(t, reader.Close())
}
type TestInterface interface {
method() bool
}
func (t *TestType) method() bool {
return t.Boolean
}
func TestStructInterface(t *testing.T) {
var result TestInterface = &TestType{}
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err)
require.NoError(t, reader.Lookup(net.ParseIP("::1.1.1.0"), &result))
assert.Equal(t, result.method(), true)
}
func TestNonEmptyNilInterface(t *testing.T) {
var result TestInterface
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err)
err = reader.Lookup(net.ParseIP("::1.1.1.0"), &result)
assert.Equal(t, err.Error(), "maxminddb: cannot unmarshal map into type maxminddb.TestInterface")
}
type CityTraits struct {
AutonomousSystemNumber uint `json:"autonomous_system_number,omitempty" maxminddb:"autonomous_system_number"`
}
type City struct {
Traits CityTraits `maxminddb:"traits"`
}
func TestEmbeddedStructAsInterface(t *testing.T) {
var city City
var result interface{} = city.Traits
db, err := Open(testFile("GeoIP2-ISP-Test.mmdb"))
require.NoError(t, err)
assert.NoError(t, db.Lookup(net.ParseIP("1.128.0.0"), &result))
}
type BoolInterface interface {
true() bool
}
type Bool bool
func (b Bool) true() bool {
return bool(b)
}
type ValueTypeTestType struct {
Boolean BoolInterface `maxminddb:"boolean"`
}
func TesValueTypeInterface(t *testing.T) {
var result ValueTypeTestType
result.Boolean = Bool(false)
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err)
require.NoError(t, reader.Lookup(net.ParseIP("::1.1.1.0"), &result))
assert.Equal(t, result.Boolean.true(), true)
}
type NestedMapX struct {
UTF8StringX string `maxminddb:"utf8_stringX"`
}
type NestedPointerMapX struct {
ArrayX []int `maxminddb:"arrayX"`
}
type PointerMap struct {
MapX struct {
Ignored string
NestedMapX
*NestedPointerMapX
} `maxminddb:"mapX"`
}
type TestPointerType struct {
Array *[]uint `maxminddb:"array"`
Boolean *bool `maxminddb:"boolean"`
Bytes *[]byte `maxminddb:"bytes"`
Double *float64 `maxminddb:"double"`
Float *float32 `maxminddb:"float"`
Int32 *int32 `maxminddb:"int32"`
Map *PointerMap `maxminddb:"map"`
Uint16 *uint16 `maxminddb:"uint16"`
Uint32 *uint32 `maxminddb:"uint32"`
// Test for pointer to pointer
Uint64 **uint64 `maxminddb:"uint64"`
Uint128 *big.Int `maxminddb:"uint128"`
Utf8String *string `maxminddb:"utf8_string"`
}
func TestComplexStructWithNestingAndPointer(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
assert.NoError(t, err)
var result TestPointerType
err = reader.Lookup(net.ParseIP("::1.1.1.0"), &result)
require.NoError(t, err)
assert.Equal(t, *result.Array, []uint{uint(1), uint(2), uint(3)})
assert.Equal(t, *result.Boolean, true)
assert.Equal(t, *result.Bytes, []byte{0x00, 0x00, 0x00, 0x2a})
assert.Equal(t, *result.Double, 42.123456)
assert.Equal(t, *result.Float, float32(1.1))
assert.Equal(t, *result.Int32, int32(-268435456))
assert.Equal(t, result.Map.MapX.ArrayX, []int{7, 8, 9})
assert.Equal(t, result.Map.MapX.UTF8StringX, "hello")
assert.Equal(t, *result.Uint16, uint16(100))
assert.Equal(t, *result.Uint32, uint32(268435456))
assert.Equal(t, **result.Uint64, uint64(1152921504606846976))
assert.Equal(t, *result.Utf8String, "unicode! ☯ - ♫")
bigInt := new(big.Int)
bigInt.SetString("1329227995784915872903807060280344576", 10)
assert.Equal(t, result.Uint128, bigInt)
assert.NoError(t, reader.Close())
}
func TestNestedOffsetDecode(t *testing.T) {
db, err := Open(testFile("GeoIP2-City-Test.mmdb"))
require.NoError(t, err)
off, err := db.LookupOffset(net.ParseIP("81.2.69.142"))
assert.NotEqual(t, off, NotFound)
require.NoError(t, err)
var root struct {
CountryOffset uintptr `maxminddb:"country"`
Location struct {
Latitude float64 `maxminddb:"latitude"`
// Longitude is directly nested within the parent map.
LongitudeOffset uintptr `maxminddb:"longitude"`
// TimeZone is indirected via a pointer.
TimeZoneOffset uintptr `maxminddb:"time_zone"`
} `maxminddb:"location"`
}
assert.NoError(t, db.Decode(off, &root))
assert.Equal(t, root.Location.Latitude, 51.5142)
var longitude float64
assert.NoError(t, db.Decode(root.Location.LongitudeOffset, &longitude))
assert.Equal(t, longitude, -0.0931)
var timeZone string
assert.NoError(t, db.Decode(root.Location.TimeZoneOffset, &timeZone))
assert.Equal(t, timeZone, "Europe/London")
var country struct {
IsoCode string `maxminddb:"iso_code"`
}
assert.NoError(t, db.Decode(root.CountryOffset, &country))
assert.Equal(t, country.IsoCode, "GB")
assert.NoError(t, db.Close())
}
func TestDecodingUint16IntoInt(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err, "unexpected error while opening database: %v", err)
var result struct {
Uint16 int `maxminddb:"uint16"`
}
err = reader.Lookup(net.ParseIP("::1.1.1.0"), &result)
require.NoError(t, err)
assert.Equal(t, result.Uint16, 100)
}
func TestIpv6inIpv4(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-ipv4-24.mmdb"))
require.NoError(t, err, "unexpected error while opening database: %v", err)
var result TestType
err = reader.Lookup(net.ParseIP("2001::"), &result)
var emptyResult TestType
assert.Equal(t, result, emptyResult)
expected := errors.New("error looking up '2001::': you attempted to look up an IPv6 address in an IPv4-only database")
assert.Equal(t, err, expected)
assert.NoError(t, reader.Close(), "error on close")
}
func TestBrokenDoubleDatabase(t *testing.T) {
reader, err := Open(testFile("GeoIP2-City-Test-Broken-Double-Format.mmdb"))
require.NoError(t, err, "unexpected error while opening database: %v", err)
var result interface{}
err = reader.Lookup(net.ParseIP("2001:220::"), &result)
expected := newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of 2)")
assert.Equal(t, err, expected)
assert.NoError(t, reader.Close(), "error on close")
}
func TestInvalidNodeCountDatabase(t *testing.T) {
_, err := Open(testFile("GeoIP2-City-Test-Invalid-Node-Count.mmdb"))
expected := newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
assert.Equal(t, err, expected)
}
func TestMissingDatabase(t *testing.T) {
reader, err := Open("file-does-not-exist.mmdb")
assert.Nil(t, reader, "received reader when doing lookups on DB that doesn't exist")
assert.Regexp(t, "open file-does-not-exist.mmdb.*", err)
}
func TestNonDatabase(t *testing.T) {
reader, err := Open("README.md")
assert.Nil(t, reader, "received reader when doing lookups on DB that doesn't exist")
assert.Equal(t, err.Error(), "error opening database: invalid MaxMind DB file")
}
func TestDecodingToNonPointer(t *testing.T) {
reader, _ := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
var recordInterface interface{}
err := reader.Lookup(net.ParseIP("::1.1.1.0"), recordInterface)
assert.Equal(t, err.Error(), "result param must be a pointer")
assert.NoError(t, reader.Close(), "error on close")
}
func TestNilLookup(t *testing.T) {
reader, _ := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
var recordInterface interface{}
err := reader.Lookup(nil, recordInterface)
assert.Equal(t, err.Error(), "ipAddress passed to Lookup cannot be nil")
assert.NoError(t, reader.Close(), "error on close")
}
func TestUsingClosedDatabase(t *testing.T) {
reader, _ := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
reader.Close()
var recordInterface interface{}
err := reader.Lookup(nil, recordInterface)
assert.Equal(t, err.Error(), "cannot call Lookup on a closed database")
_, err = reader.LookupOffset(nil)
assert.Equal(t, err.Error(), "cannot call LookupOffset on a closed database")
err = reader.Decode(0, recordInterface)
assert.Equal(t, err.Error(), "cannot call Decode on a closed database")
}
func checkMetadata(t *testing.T, reader *Reader, ipVersion uint, recordSize uint) {
metadata := reader.Metadata
assert.Equal(t, metadata.BinaryFormatMajorVersion, uint(2))
assert.Equal(t, metadata.BinaryFormatMinorVersion, uint(0))
assert.IsType(t, uint(0), metadata.BuildEpoch)
assert.Equal(t, metadata.DatabaseType, "Test")
assert.Equal(t, metadata.Description,
map[string]string{
"en": "Test Database",
"zh": "Test Database Chinese",
})
assert.Equal(t, metadata.IPVersion, ipVersion)
assert.Equal(t, metadata.Languages, []string{"en", "zh"})
if ipVersion == 4 {
assert.Equal(t, metadata.NodeCount, uint(164))
} else {
assert.Equal(t, metadata.NodeCount, uint(416))
}
assert.Equal(t, metadata.RecordSize, recordSize)
}
func checkIpv4(t *testing.T, reader *Reader) {
for i := uint(0); i < 6; i++ {
address := fmt.Sprintf("1.1.1.%d", uint(1)<<i)
ip := net.ParseIP(address)
var result map[string]string
err := reader.Lookup(ip, &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Equal(t, result, map[string]string{"ip": address})
}
pairs := map[string]string{
"1.1.1.3": "1.1.1.2",
"1.1.1.5": "1.1.1.4",
"1.1.1.7": "1.1.1.4",
"1.1.1.9": "1.1.1.8",
"1.1.1.15": "1.1.1.8",
"1.1.1.17": "1.1.1.16",
"1.1.1.31": "1.1.1.16",
}
for keyAddress, valueAddress := range pairs {
data := map[string]string{"ip": valueAddress}
ip := net.ParseIP(keyAddress)
var result map[string]string
err := reader.Lookup(ip, &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Equal(t, result, data)
}
for _, address := range []string{"1.1.1.33", "255.254.253.123"} {
ip := net.ParseIP(address)
var result map[string]string
err := reader.Lookup(ip, &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Nil(t, result)
}
}
func checkIpv6(t *testing.T, reader *Reader) {
subnets := []string{"::1:ffff:ffff", "::2:0:0",
"::2:0:40", "::2:0:50", "::2:0:58"}
for _, address := range subnets {
var result map[string]string
err := reader.Lookup(net.ParseIP(address), &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Equal(t, result, map[string]string{"ip": address})
}
pairs := map[string]string{
"::2:0:1": "::2:0:0",
"::2:0:33": "::2:0:0",
"::2:0:39": "::2:0:0",
"::2:0:41": "::2:0:40",
"::2:0:49": "::2:0:40",
"::2:0:52": "::2:0:50",
"::2:0:57": "::2:0:50",
"::2:0:59": "::2:0:58",
}
for keyAddress, valueAddress := range pairs {
data := map[string]string{"ip": valueAddress}
var result map[string]string
err := reader.Lookup(net.ParseIP(keyAddress), &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Equal(t, result, data)
}
for _, address := range []string{"1.1.1.33", "255.254.253.123", "89fa::"} {
var result map[string]string
err := reader.Lookup(net.ParseIP(address), &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Nil(t, result)
}
}
func BenchmarkMaxMindDB(b *testing.B) {
db, err := Open("GeoLite2-City.mmdb")
require.NoError(b, err)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var result interface{}
ip := make(net.IP, 4)
for i := 0; i < b.N; i++ {
randomIPv4Address(r, ip)
err = db.Lookup(ip, &result)
assert.NoError(b, err)
}
assert.NoError(b, db.Close(), "error on close")
}
func BenchmarkCountryCode(b *testing.B) {
db, err := Open("GeoLite2-City.mmdb")
require.NoError(b, err)
type MinCountry struct {
Country struct {
IsoCode string `maxminddb:"iso_code"`
} `maxminddb:"country"`
}
r := rand.New(rand.NewSource(0))
var result MinCountry
ip := make(net.IP, 4)
for i := 0; i < b.N; i++ {
randomIPv4Address(r, ip)
err = db.Lookup(ip, &result)
assert.NoError(b, err)
}
assert.NoError(b, db.Close(), "error on close")
}
func randomIPv4Address(r *rand.Rand, ip []byte) {
num := r.Uint32()
ip[0] = byte(num >> 24)
ip[1] = byte(num >> 16)
ip[2] = byte(num >> 8)
ip[3] = byte(num)
}
func testFile(file string) string {
return filepath.Join("test-data/test-data", file)
}
Rename benchmark method to be more accurate
package maxminddb
import (
"errors"
"fmt"
"io/ioutil"
"math/big"
"math/rand"
"net"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReader(t *testing.T) {
for _, recordSize := range []uint{24, 28, 32} {
for _, ipVersion := range []uint{4, 6} {
fileName := fmt.Sprintf(testFile("MaxMind-DB-test-ipv%d-%d.mmdb"), ipVersion, recordSize)
reader, err := Open(fileName)
require.NoError(t, err, "unexpected error while opening database: %v", err)
checkMetadata(t, reader, ipVersion, recordSize)
if ipVersion == 4 {
checkIpv4(t, reader)
} else {
checkIpv6(t, reader)
}
}
}
}
func TestReaderBytes(t *testing.T) {
for _, recordSize := range []uint{24, 28, 32} {
for _, ipVersion := range []uint{4, 6} {
fileName := fmt.Sprintf(testFile("MaxMind-DB-test-ipv%d-%d.mmdb"), ipVersion, recordSize)
bytes, _ := ioutil.ReadFile(fileName)
reader, err := FromBytes(bytes)
require.NoError(t, err, "unexpected error while opening bytes: %v", err)
checkMetadata(t, reader, ipVersion, recordSize)
if ipVersion == 4 {
checkIpv4(t, reader)
} else {
checkIpv6(t, reader)
}
}
}
}
func TestLookupNetwork(t *testing.T) {
bigInt := new(big.Int)
bigInt.SetString("1329227995784915872903807060280344576", 10)
decoderRecord := map[string]interface{}{"array": []interface{}{uint64(1),
uint64(2),
uint64(3)},
"boolean": true,
"bytes": []uint8{
0x0,
0x0,
0x0,
0x2a,
},
"double": 42.123456,
"float": float32(1.1),
"int32": -268435456,
"map": map[string]interface{}{
"mapX": map[string]interface{}{
"arrayX": []interface{}{
uint64(0x7),
uint64(0x8),
uint64(0x9)},
"utf8_stringX": "hello",
},
},
"uint128": bigInt,
"uint16": uint64(0x64),
"uint32": uint64(0x10000000),
"uint64": uint64(0x1000000000000000),
"utf8_string": "unicode! ☯ - ♫",
}
tests := []struct {
IP net.IP
DBFile string
ExpectedCIDR string
ExpectedRecord interface{}
ExpectedOK bool
}{
// XXX - add test of IPv4 lookup in IPv6 database with no IPv4 subtree
{
IP: net.ParseIP("1.1.1.1"),
DBFile: "MaxMind-DB-test-ipv6-32.mmdb",
ExpectedCIDR: "1.0.0.0/8",
ExpectedRecord: nil,
ExpectedOK: false,
},
{
IP: net.ParseIP("::1:ffff:ffff"),
DBFile: "MaxMind-DB-test-ipv6-24.mmdb",
ExpectedCIDR: "::1:ffff:ffff/128",
ExpectedRecord: map[string]interface{}{"ip": "::1:ffff:ffff"},
ExpectedOK: true,
},
{
IP: net.ParseIP("::2:0:1"),
DBFile: "MaxMind-DB-test-ipv6-24.mmdb",
ExpectedCIDR: "::2:0:0/122",
ExpectedRecord: map[string]interface{}{"ip": "::2:0:0"},
ExpectedOK: true,
},
{
IP: net.ParseIP("1.1.1.1"),
DBFile: "MaxMind-DB-test-ipv4-24.mmdb",
ExpectedCIDR: "1.1.1.1/32",
ExpectedRecord: map[string]interface{}{"ip": "1.1.1.1"},
ExpectedOK: true,
},
{
IP: net.ParseIP("1.1.1.3"),
DBFile: "MaxMind-DB-test-ipv4-24.mmdb",
ExpectedCIDR: "1.1.1.2/31",
ExpectedRecord: map[string]interface{}{"ip": "1.1.1.2"},
ExpectedOK: true,
},
{
IP: net.ParseIP("1.1.1.3"),
DBFile: "MaxMind-DB-test-decoder.mmdb",
ExpectedCIDR: "1.1.1.0/24",
ExpectedRecord: decoderRecord,
ExpectedOK: true,
},
{
IP: net.ParseIP("::ffff:1.1.1.128"),
DBFile: "MaxMind-DB-test-decoder.mmdb",
ExpectedCIDR: "1.1.1.0/24",
ExpectedRecord: decoderRecord,
ExpectedOK: true,
},
{
IP: net.ParseIP("::1.1.1.128"),
DBFile: "MaxMind-DB-test-decoder.mmdb",
ExpectedCIDR: "::101:100/120",
ExpectedRecord: decoderRecord,
ExpectedOK: true,
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s - %s", test.DBFile, test.IP), func(t *testing.T) {
var record interface{}
reader, err := Open(testFile(test.DBFile))
require.NoError(t, err)
network, ok, err := reader.LookupNetwork(test.IP, &record)
require.NoError(t, err)
assert.Equal(t, test.ExpectedOK, ok)
assert.Equal(t, test.ExpectedCIDR, network.String())
assert.Equal(t, test.ExpectedRecord, record)
})
}
}
func TestDecodingToInterface(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err, "unexpected error while opening database: %v", err)
var recordInterface interface{}
err = reader.Lookup(net.ParseIP("::1.1.1.0"), &recordInterface)
require.NoError(t, err, "unexpected error while doing lookup: %v", err)
record := recordInterface.(map[string]interface{})
assert.Equal(t, record["array"], []interface{}{uint64(1), uint64(2), uint64(3)})
assert.Equal(t, record["boolean"], true)
assert.Equal(t, record["bytes"], []byte{0x00, 0x00, 0x00, 0x2a})
assert.Equal(t, record["double"], 42.123456)
assert.Equal(t, record["float"], float32(1.1))
assert.Equal(t, record["int32"], -268435456)
assert.Equal(t, record["map"],
map[string]interface{}{
"mapX": map[string]interface{}{
"arrayX": []interface{}{uint64(7), uint64(8), uint64(9)},
"utf8_stringX": "hello",
}})
assert.Equal(t, record["uint16"], uint64(100))
assert.Equal(t, record["uint32"], uint64(268435456))
assert.Equal(t, record["uint64"], uint64(1152921504606846976))
assert.Equal(t, record["utf8_string"], "unicode! ☯ - ♫")
bigInt := new(big.Int)
bigInt.SetString("1329227995784915872903807060280344576", 10)
assert.Equal(t, record["uint128"], bigInt)
}
// nolint: maligned
type TestType struct {
Array []uint `maxminddb:"array"`
Boolean bool `maxminddb:"boolean"`
Bytes []byte `maxminddb:"bytes"`
Double float64 `maxminddb:"double"`
Float float32 `maxminddb:"float"`
Int32 int32 `maxminddb:"int32"`
Map map[string]interface{} `maxminddb:"map"`
Uint16 uint16 `maxminddb:"uint16"`
Uint32 uint32 `maxminddb:"uint32"`
Uint64 uint64 `maxminddb:"uint64"`
Uint128 big.Int `maxminddb:"uint128"`
Utf8String string `maxminddb:"utf8_string"`
}
func TestDecoder(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err)
verify := func(result TestType) {
assert.Equal(t, result.Array, []uint{uint(1), uint(2), uint(3)})
assert.Equal(t, result.Boolean, true)
assert.Equal(t, result.Bytes, []byte{0x00, 0x00, 0x00, 0x2a})
assert.Equal(t, result.Double, 42.123456)
assert.Equal(t, result.Float, float32(1.1))
assert.Equal(t, result.Int32, int32(-268435456))
assert.Equal(t, result.Map,
map[string]interface{}{
"mapX": map[string]interface{}{
"arrayX": []interface{}{uint64(7), uint64(8), uint64(9)},
"utf8_stringX": "hello",
}})
assert.Equal(t, result.Uint16, uint16(100))
assert.Equal(t, result.Uint32, uint32(268435456))
assert.Equal(t, result.Uint64, uint64(1152921504606846976))
assert.Equal(t, result.Utf8String, "unicode! ☯ - ♫")
bigInt := new(big.Int)
bigInt.SetString("1329227995784915872903807060280344576", 10)
assert.Equal(t, &result.Uint128, bigInt)
}
{
// Directly lookup and decode.
var result TestType
require.NoError(t, reader.Lookup(net.ParseIP("::1.1.1.0"), &result))
verify(result)
}
{
// Lookup record offset, then Decode.
var result TestType
offset, err := reader.LookupOffset(net.ParseIP("::1.1.1.0"))
require.NoError(t, err)
assert.NotEqual(t, offset, NotFound)
assert.NoError(t, reader.Decode(offset, &result))
verify(result)
}
assert.NoError(t, reader.Close())
}
type TestInterface interface {
method() bool
}
func (t *TestType) method() bool {
return t.Boolean
}
func TestStructInterface(t *testing.T) {
var result TestInterface = &TestType{}
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err)
require.NoError(t, reader.Lookup(net.ParseIP("::1.1.1.0"), &result))
assert.Equal(t, result.method(), true)
}
func TestNonEmptyNilInterface(t *testing.T) {
var result TestInterface
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err)
err = reader.Lookup(net.ParseIP("::1.1.1.0"), &result)
assert.Equal(t, err.Error(), "maxminddb: cannot unmarshal map into type maxminddb.TestInterface")
}
type CityTraits struct {
AutonomousSystemNumber uint `json:"autonomous_system_number,omitempty" maxminddb:"autonomous_system_number"`
}
type City struct {
Traits CityTraits `maxminddb:"traits"`
}
func TestEmbeddedStructAsInterface(t *testing.T) {
var city City
var result interface{} = city.Traits
db, err := Open(testFile("GeoIP2-ISP-Test.mmdb"))
require.NoError(t, err)
assert.NoError(t, db.Lookup(net.ParseIP("1.128.0.0"), &result))
}
type BoolInterface interface {
true() bool
}
type Bool bool
func (b Bool) true() bool {
return bool(b)
}
type ValueTypeTestType struct {
Boolean BoolInterface `maxminddb:"boolean"`
}
func TesValueTypeInterface(t *testing.T) {
var result ValueTypeTestType
result.Boolean = Bool(false)
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err)
require.NoError(t, reader.Lookup(net.ParseIP("::1.1.1.0"), &result))
assert.Equal(t, result.Boolean.true(), true)
}
type NestedMapX struct {
UTF8StringX string `maxminddb:"utf8_stringX"`
}
type NestedPointerMapX struct {
ArrayX []int `maxminddb:"arrayX"`
}
type PointerMap struct {
MapX struct {
Ignored string
NestedMapX
*NestedPointerMapX
} `maxminddb:"mapX"`
}
type TestPointerType struct {
Array *[]uint `maxminddb:"array"`
Boolean *bool `maxminddb:"boolean"`
Bytes *[]byte `maxminddb:"bytes"`
Double *float64 `maxminddb:"double"`
Float *float32 `maxminddb:"float"`
Int32 *int32 `maxminddb:"int32"`
Map *PointerMap `maxminddb:"map"`
Uint16 *uint16 `maxminddb:"uint16"`
Uint32 *uint32 `maxminddb:"uint32"`
// Test for pointer to pointer
Uint64 **uint64 `maxminddb:"uint64"`
Uint128 *big.Int `maxminddb:"uint128"`
Utf8String *string `maxminddb:"utf8_string"`
}
func TestComplexStructWithNestingAndPointer(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
assert.NoError(t, err)
var result TestPointerType
err = reader.Lookup(net.ParseIP("::1.1.1.0"), &result)
require.NoError(t, err)
assert.Equal(t, *result.Array, []uint{uint(1), uint(2), uint(3)})
assert.Equal(t, *result.Boolean, true)
assert.Equal(t, *result.Bytes, []byte{0x00, 0x00, 0x00, 0x2a})
assert.Equal(t, *result.Double, 42.123456)
assert.Equal(t, *result.Float, float32(1.1))
assert.Equal(t, *result.Int32, int32(-268435456))
assert.Equal(t, result.Map.MapX.ArrayX, []int{7, 8, 9})
assert.Equal(t, result.Map.MapX.UTF8StringX, "hello")
assert.Equal(t, *result.Uint16, uint16(100))
assert.Equal(t, *result.Uint32, uint32(268435456))
assert.Equal(t, **result.Uint64, uint64(1152921504606846976))
assert.Equal(t, *result.Utf8String, "unicode! ☯ - ♫")
bigInt := new(big.Int)
bigInt.SetString("1329227995784915872903807060280344576", 10)
assert.Equal(t, result.Uint128, bigInt)
assert.NoError(t, reader.Close())
}
func TestNestedOffsetDecode(t *testing.T) {
db, err := Open(testFile("GeoIP2-City-Test.mmdb"))
require.NoError(t, err)
off, err := db.LookupOffset(net.ParseIP("81.2.69.142"))
assert.NotEqual(t, off, NotFound)
require.NoError(t, err)
var root struct {
CountryOffset uintptr `maxminddb:"country"`
Location struct {
Latitude float64 `maxminddb:"latitude"`
// Longitude is directly nested within the parent map.
LongitudeOffset uintptr `maxminddb:"longitude"`
// TimeZone is indirected via a pointer.
TimeZoneOffset uintptr `maxminddb:"time_zone"`
} `maxminddb:"location"`
}
assert.NoError(t, db.Decode(off, &root))
assert.Equal(t, root.Location.Latitude, 51.5142)
var longitude float64
assert.NoError(t, db.Decode(root.Location.LongitudeOffset, &longitude))
assert.Equal(t, longitude, -0.0931)
var timeZone string
assert.NoError(t, db.Decode(root.Location.TimeZoneOffset, &timeZone))
assert.Equal(t, timeZone, "Europe/London")
var country struct {
IsoCode string `maxminddb:"iso_code"`
}
assert.NoError(t, db.Decode(root.CountryOffset, &country))
assert.Equal(t, country.IsoCode, "GB")
assert.NoError(t, db.Close())
}
func TestDecodingUint16IntoInt(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
require.NoError(t, err, "unexpected error while opening database: %v", err)
var result struct {
Uint16 int `maxminddb:"uint16"`
}
err = reader.Lookup(net.ParseIP("::1.1.1.0"), &result)
require.NoError(t, err)
assert.Equal(t, result.Uint16, 100)
}
func TestIpv6inIpv4(t *testing.T) {
reader, err := Open(testFile("MaxMind-DB-test-ipv4-24.mmdb"))
require.NoError(t, err, "unexpected error while opening database: %v", err)
var result TestType
err = reader.Lookup(net.ParseIP("2001::"), &result)
var emptyResult TestType
assert.Equal(t, result, emptyResult)
expected := errors.New("error looking up '2001::': you attempted to look up an IPv6 address in an IPv4-only database")
assert.Equal(t, err, expected)
assert.NoError(t, reader.Close(), "error on close")
}
func TestBrokenDoubleDatabase(t *testing.T) {
reader, err := Open(testFile("GeoIP2-City-Test-Broken-Double-Format.mmdb"))
require.NoError(t, err, "unexpected error while opening database: %v", err)
var result interface{}
err = reader.Lookup(net.ParseIP("2001:220::"), &result)
expected := newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of 2)")
assert.Equal(t, err, expected)
assert.NoError(t, reader.Close(), "error on close")
}
func TestInvalidNodeCountDatabase(t *testing.T) {
_, err := Open(testFile("GeoIP2-City-Test-Invalid-Node-Count.mmdb"))
expected := newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
assert.Equal(t, err, expected)
}
func TestMissingDatabase(t *testing.T) {
reader, err := Open("file-does-not-exist.mmdb")
assert.Nil(t, reader, "received reader when doing lookups on DB that doesn't exist")
assert.Regexp(t, "open file-does-not-exist.mmdb.*", err)
}
func TestNonDatabase(t *testing.T) {
reader, err := Open("README.md")
assert.Nil(t, reader, "received reader when doing lookups on DB that doesn't exist")
assert.Equal(t, err.Error(), "error opening database: invalid MaxMind DB file")
}
func TestDecodingToNonPointer(t *testing.T) {
reader, _ := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
var recordInterface interface{}
err := reader.Lookup(net.ParseIP("::1.1.1.0"), recordInterface)
assert.Equal(t, err.Error(), "result param must be a pointer")
assert.NoError(t, reader.Close(), "error on close")
}
func TestNilLookup(t *testing.T) {
reader, _ := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
var recordInterface interface{}
err := reader.Lookup(nil, recordInterface)
assert.Equal(t, err.Error(), "ipAddress passed to Lookup cannot be nil")
assert.NoError(t, reader.Close(), "error on close")
}
func TestUsingClosedDatabase(t *testing.T) {
reader, _ := Open(testFile("MaxMind-DB-test-decoder.mmdb"))
reader.Close()
var recordInterface interface{}
err := reader.Lookup(nil, recordInterface)
assert.Equal(t, err.Error(), "cannot call Lookup on a closed database")
_, err = reader.LookupOffset(nil)
assert.Equal(t, err.Error(), "cannot call LookupOffset on a closed database")
err = reader.Decode(0, recordInterface)
assert.Equal(t, err.Error(), "cannot call Decode on a closed database")
}
func checkMetadata(t *testing.T, reader *Reader, ipVersion uint, recordSize uint) {
metadata := reader.Metadata
assert.Equal(t, metadata.BinaryFormatMajorVersion, uint(2))
assert.Equal(t, metadata.BinaryFormatMinorVersion, uint(0))
assert.IsType(t, uint(0), metadata.BuildEpoch)
assert.Equal(t, metadata.DatabaseType, "Test")
assert.Equal(t, metadata.Description,
map[string]string{
"en": "Test Database",
"zh": "Test Database Chinese",
})
assert.Equal(t, metadata.IPVersion, ipVersion)
assert.Equal(t, metadata.Languages, []string{"en", "zh"})
if ipVersion == 4 {
assert.Equal(t, metadata.NodeCount, uint(164))
} else {
assert.Equal(t, metadata.NodeCount, uint(416))
}
assert.Equal(t, metadata.RecordSize, recordSize)
}
func checkIpv4(t *testing.T, reader *Reader) {
for i := uint(0); i < 6; i++ {
address := fmt.Sprintf("1.1.1.%d", uint(1)<<i)
ip := net.ParseIP(address)
var result map[string]string
err := reader.Lookup(ip, &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Equal(t, result, map[string]string{"ip": address})
}
pairs := map[string]string{
"1.1.1.3": "1.1.1.2",
"1.1.1.5": "1.1.1.4",
"1.1.1.7": "1.1.1.4",
"1.1.1.9": "1.1.1.8",
"1.1.1.15": "1.1.1.8",
"1.1.1.17": "1.1.1.16",
"1.1.1.31": "1.1.1.16",
}
for keyAddress, valueAddress := range pairs {
data := map[string]string{"ip": valueAddress}
ip := net.ParseIP(keyAddress)
var result map[string]string
err := reader.Lookup(ip, &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Equal(t, result, data)
}
for _, address := range []string{"1.1.1.33", "255.254.253.123"} {
ip := net.ParseIP(address)
var result map[string]string
err := reader.Lookup(ip, &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Nil(t, result)
}
}
func checkIpv6(t *testing.T, reader *Reader) {
subnets := []string{"::1:ffff:ffff", "::2:0:0",
"::2:0:40", "::2:0:50", "::2:0:58"}
for _, address := range subnets {
var result map[string]string
err := reader.Lookup(net.ParseIP(address), &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Equal(t, result, map[string]string{"ip": address})
}
pairs := map[string]string{
"::2:0:1": "::2:0:0",
"::2:0:33": "::2:0:0",
"::2:0:39": "::2:0:0",
"::2:0:41": "::2:0:40",
"::2:0:49": "::2:0:40",
"::2:0:52": "::2:0:50",
"::2:0:57": "::2:0:50",
"::2:0:59": "::2:0:58",
}
for keyAddress, valueAddress := range pairs {
data := map[string]string{"ip": valueAddress}
var result map[string]string
err := reader.Lookup(net.ParseIP(keyAddress), &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Equal(t, result, data)
}
for _, address := range []string{"1.1.1.33", "255.254.253.123", "89fa::"} {
var result map[string]string
err := reader.Lookup(net.ParseIP(address), &result)
assert.NoError(t, err, "unexpected error while doing lookup: %v", err)
assert.Nil(t, result)
}
}
func BenchmarkLookup(b *testing.B) {
db, err := Open("GeoLite2-City.mmdb")
require.NoError(b, err)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var result interface{}
ip := make(net.IP, 4)
for i := 0; i < b.N; i++ {
randomIPv4Address(r, ip)
err = db.Lookup(ip, &result)
assert.NoError(b, err)
}
assert.NoError(b, db.Close(), "error on close")
}
func BenchmarkCountryCode(b *testing.B) {
db, err := Open("GeoLite2-City.mmdb")
require.NoError(b, err)
type MinCountry struct {
Country struct {
IsoCode string `maxminddb:"iso_code"`
} `maxminddb:"country"`
}
r := rand.New(rand.NewSource(0))
var result MinCountry
ip := make(net.IP, 4)
for i := 0; i < b.N; i++ {
randomIPv4Address(r, ip)
err = db.Lookup(ip, &result)
assert.NoError(b, err)
}
assert.NoError(b, db.Close(), "error on close")
}
func randomIPv4Address(r *rand.Rand, ip []byte) {
num := r.Uint32()
ip[0] = byte(num >> 24)
ip[1] = byte(num >> 16)
ip[2] = byte(num >> 8)
ip[3] = byte(num)
}
func testFile(file string) string {
return filepath.Join("test-data/test-data", file)
}
|
package checkerlution
import (
"fmt"
"github.com/couchbaselabs/logg"
cbot "github.com/tleyden/checkers-bot"
core "github.com/tleyden/checkers-core"
ng "github.com/tleyden/neurgo"
)
type OperationMode int
const (
RUNNING_MODE = iota
TRAINING_MODE
)
type Checkerlution struct {
ourTeamId cbot.TeamType
cortex *ng.Cortex
currentGameState GameStateVector
currentBoard core.Board
currentPossibleMove ValidMoveCortexInput
latestActuatorOutput []float64
mode OperationMode
latestFitnessScore float64
}
type CheckerlutionFlags struct {
CheckersBotFlags cbot.CheckersBotFlags
PopulationName string
}
func (c *Checkerlution) Start(ourTeamId cbot.TeamType) {
c.ourTeamId = ourTeamId
c.CreateNeurgoCortex()
cortex := c.cortex
cortex.Run()
}
func (c *Checkerlution) StartWithCortex(cortex *ng.Cortex, ourTeamId cbot.TeamType) {
c.ourTeamId = ourTeamId
c.setSensorActuatorFunctions(cortex)
c.cortex = cortex
cortex.Run()
}
func (c *Checkerlution) Think(gameState cbot.GameState) (bestMove cbot.ValidMove, ok bool) {
ok = true
ourTeam := gameState.Teams[c.ourTeamId]
allValidMoves := ourTeam.AllValidMoves()
if len(allValidMoves) > 0 {
// convert into core.board representation
board := gameState.Export()
logg.LogTo("DEBUG", "Before team %v move %v", c.ourTeamId.String(), board.CompactString(true))
// generate best move (will be a core.move) -- initially, pick random
move := c.generateBestMove(board)
// search allValidMoves to find corresponding valid move
found, bestValidMoveIndex := cbot.CorrespondingValidMoveIndex(move, allValidMoves)
if !found {
msg := "Could not find corresponding valid move: %v in %v"
logg.LogPanic(msg, move, allValidMoves)
} else {
bestMove = allValidMoves[bestValidMoveIndex]
}
// this is just for debugging purposes
player := cbot.GetCorePlayer(c.ourTeamId)
boardPostMove := board.ApplyMove(player, move)
logg.LogTo("DEBUG", "After team %v move %v", c.ourTeamId.String(), boardPostMove.CompactString(true))
return
} else {
ok = false
}
return
}
func (c *Checkerlution) GameFinished(gameState cbot.GameState) (shouldQuit bool) {
switch c.mode {
case TRAINING_MODE:
shouldQuit = true
c.latestFitnessScore = c.calculateFitness(gameState)
case RUNNING_MODE:
shouldQuit = false
}
return
}
func (c Checkerlution) Cortex() *ng.Cortex {
return c.cortex
}
func (c *Checkerlution) SetMode(mode OperationMode) {
c.mode = mode
}
func (c Checkerlution) calculateFitness(gameState cbot.GameState) (fitness float64) {
switch gameState.WinningTeam {
case c.ourTeamId:
logg.LogTo("CHECKERLUTION", "calculateFitness based on winning. Turn: %v", gameState.Turn)
fitness = 1.0
case c.ourTeamId.Opponent():
logg.LogTo("CHECKERLUTION", "calculateFitness based on losing. Turn: %v", gameState.Turn)
fitness = -2.0
default:
logg.LogTo("CHECKERLUTION", "calculateFitness based on draw. Turn: %v", gameState.Turn)
fitness = 0.0
}
return
}
func (c *Checkerlution) CreateNeurgoCortex() {
uuid := ng.NewUuid()
cortexUuid := fmt.Sprintf("cortex-%s", uuid)
nodeId := ng.NewCortexId(cortexUuid)
c.cortex = &ng.Cortex{
NodeId: nodeId,
}
c.cortex.Init()
c.CreateSensors()
outputNeuron := c.CreateOutputNeuron()
// layer1Neurons := c.CreateHiddenLayer1Neurons(outputNeuron)
// layer2Neurons := c.CreateHiddenLayer2Neurons(layer1Neurons, outputNeuron)
// combine all into single slice and add neurons to cortex
neurons := []*ng.Neuron{}
// neurons = append(neurons, layer1Neurons...)
// neurons = append(neurons, layer2Neurons...)
neurons = append(neurons, outputNeuron)
c.cortex.SetNeurons(neurons)
actuator := c.CreateActuator()
// workaround for error
// Cannot make outbound connection, dataChan == nil [recovered]
c.cortex.Init()
outputNeuron.ConnectOutbound(actuator)
actuator.ConnectInbound(outputNeuron)
}
func (c *Checkerlution) LoadNeurgoCortex(filename string) {
cortex, err := ng.NewCortexFromJSONFile(filename)
if err != nil {
logg.LogPanic("Error reading cortex from: %v. Err: %v", filename, err)
}
c.setSensorActuatorFunctions(cortex)
c.cortex = cortex
}
func (c *Checkerlution) setSensorActuatorFunctions(cortex *ng.Cortex) {
// DO we still need this for anything??
// sensor := cortex.FindSensor(ng.NewSensorId("SensorGameState", 0))
// sensor.SensorFunction = c.sensorFuncGameState()
actuator := cortex.FindActuator(ng.NewActuatorId("Actuator", 0))
actuator.ActuatorFunction = c.actuatorFunc()
}
func (c *Checkerlution) CreateHiddenLayer1Neurons(outputNeuron *ng.Neuron) []*ng.Neuron {
cortex := c.cortex
neurons := []*ng.Neuron{}
layerIndex := 0.25
for i := 0; i < 40; i++ {
name := fmt.Sprintf("hidden-layer-%f-n-%d", layerIndex, i)
neuron := &ng.Neuron{
ActivationFunction: ng.EncodableTanh(),
NodeId: ng.NewNeuronId(name, layerIndex),
Bias: 0.0,
}
// Workaround for error:
// Cannot make outbound connection, dataChan == nil [recovered]
neuron.Init()
for _, sensor := range cortex.Sensors {
sensor.ConnectOutbound(neuron)
weights := ng.RandomWeights(sensor.VectorLength)
neuron.ConnectInboundWeighted(sensor, weights)
}
neurons = append(neurons, neuron)
}
return neurons
}
func (c *Checkerlution) CreateHiddenLayer2Neurons(layer1Neurons []*ng.Neuron, outputNeuron *ng.Neuron) []*ng.Neuron {
neurons := []*ng.Neuron{}
layerIndex := 0.35
for i := 0; i < 10; i++ {
name := fmt.Sprintf("hidden-layer-%f-n-%d", layerIndex, i)
neuron := &ng.Neuron{
ActivationFunction: ng.EncodableTanh(),
NodeId: ng.NewNeuronId(name, layerIndex),
Bias: 0.0,
}
// Workaround for error:
// Cannot make outbound connection, dataChan == nil [recovered]
neuron.Init()
for _, layer1Neuron := range layer1Neurons {
layer1Neuron.ConnectOutbound(neuron)
weights := ng.RandomWeights(1)
neuron.ConnectInboundWeighted(layer1Neuron, weights)
}
// connect to output neuron
neuron.ConnectOutbound(outputNeuron)
weights := ng.RandomWeights(1)
outputNeuron.ConnectInboundWeighted(neuron, weights)
neurons = append(neurons, neuron)
}
return neurons
}
func (c *Checkerlution) CreateOutputNeuron() *ng.Neuron {
layerIndex := 0.45
neuron := &ng.Neuron{
ActivationFunction: ng.EncodableTanh(),
NodeId: ng.NewNeuronId("OutputNeuron", layerIndex),
Bias: 0.0,
}
// Workaround for error:
// Cannot make outbound connection, dataChan == nil [recovered]
// The best fix is to just load nn from json
neuron.Init()
// connect sensors directly to output neuron
for _, sensor := range c.cortex.Sensors {
sensor.ConnectOutbound(neuron)
weights := ng.RandomWeights(sensor.VectorLength)
neuron.ConnectInboundWeighted(sensor, weights)
}
return neuron
}
func (c *Checkerlution) CreateActuator() *ng.Actuator {
actuatorNodeId := ng.NewActuatorId("Actuator", 0.5)
actuator := &ng.Actuator{
NodeId: actuatorNodeId,
VectorLength: 1,
ActuatorFunction: c.actuatorFunc(),
}
c.cortex.SetActuators([]*ng.Actuator{actuator})
return actuator
}
func (c *Checkerlution) actuatorFunc() ng.ActuatorFunction {
return func(outputs []float64) {
c.latestActuatorOutput = outputs
}
}
func (c *Checkerlution) CreateSensors() {
sensorLayer := 0.0
/*
sensorGameStateNodeId := ng.NewSensorId("SensorGameState", sensorLayer)
sensorGameState := &ng.Sensor{
NodeId: sensorGameStateNodeId,
VectorLength: 32,
SensorFunction: c.sensorFuncGameState(),
}
*/
pieceDifferentialNodeId := ng.NewSensorId("SensorPieceDifferential", sensorLayer)
sensorPieceDifferential := &ng.Sensor{
NodeId: pieceDifferentialNodeId,
VectorLength: 1,
SensorFunction: c.sensorFuncPieceDifferential(),
}
kingsDifferentialNodeId := ng.NewSensorId("SensorKingsDifferential", sensorLayer)
sensorKingsDifferential := &ng.Sensor{
NodeId: kingsDifferentialNodeId,
VectorLength: 1,
SensorFunction: c.sensorFuncKingsDifferential(),
}
sensors := []*ng.Sensor{
// sensorGameState,
sensorPieceDifferential,
sensorKingsDifferential,
}
c.cortex.SetSensors(sensors)
}
func (c *Checkerlution) sensorFuncGameState() ng.SensorFunction {
return func(syncCounter int) []float64 {
if len(c.currentGameState) == 0 {
logg.LogPanic("sensor would return invalid gamestate")
}
return c.currentGameState
}
}
func (c *Checkerlution) sensorFuncPieceDifferential() ng.SensorFunction {
return func(syncCounter int) []float64 {
player := cbot.GetCorePlayer(c.ourTeamId)
pieceDifferential := c.currentBoard.WeightedScorePiecesOnly(player)
return []float64{pieceDifferential}
}
}
func (c *Checkerlution) sensorFuncKingsDifferential() ng.SensorFunction {
return func(syncCounter int) []float64 {
player := cbot.GetCorePlayer(c.ourTeamId)
pieceDifferential := c.currentBoard.WeightedScoreKingsOnly(player)
return []float64{pieceDifferential}
}
}
func (c *Checkerlution) sensorFuncPossibleMove() ng.SensorFunction {
return func(syncCounter int) []float64 {
return c.currentPossibleMove.VectorRepresentation()
}
}
func (c Checkerlution) Stop() {
}
func (c *Checkerlution) generateBestMove(board core.Board) core.Move {
counter := 0
evalFunc := c.getEvaluationFunction(&counter)
player := cbot.GetCorePlayer(c.ourTeamId)
// with depth = 5, not working too well on first move. when
// there are only a few pieces on the board it seems to work,
// but with full board .. taking a long time.
depth := 4 // TODO: crank this up higher
bestMove, scorePostMove := board.Minimax(player, depth, evalFunc)
logg.LogTo("DEBUG", "scorePostMove: %v. boards eval'd: %v", scorePostMove, counter)
return bestMove
}
func (c *Checkerlution) getEvaluationFunction(counter *int) core.EvaluationFunction {
evalFunc := func(currentPlayer core.Player, board core.Board) float64 {
*counter += 1
// convert the board into inputs for the neural net (32 elt vector)
// taking into account whether this player is "us" or not
gameStateVector := NewGameStateVector()
gameStateVector.loadFromBoard(board, currentPlayer)
// send input to the neural net
c.currentGameState = gameStateVector
c.currentBoard = board
c.cortex.SyncSensors()
c.cortex.SyncActuators()
// return output
return c.latestActuatorOutput[0]
}
return evalFunc
}
Fix bug that was causing all games to be identical, ending after 59 moves. Looks like sensors weren't hooked up.
package checkerlution
import (
"fmt"
"github.com/couchbaselabs/logg"
cbot "github.com/tleyden/checkers-bot"
core "github.com/tleyden/checkers-core"
ng "github.com/tleyden/neurgo"
)
type OperationMode int
const (
RUNNING_MODE = iota
TRAINING_MODE
)
type Checkerlution struct {
ourTeamId cbot.TeamType
cortex *ng.Cortex
currentGameState GameStateVector
currentBoard core.Board
currentPossibleMove ValidMoveCortexInput
latestActuatorOutput []float64
mode OperationMode
latestFitnessScore float64
}
type CheckerlutionFlags struct {
CheckersBotFlags cbot.CheckersBotFlags
PopulationName string
}
func (c *Checkerlution) Start(ourTeamId cbot.TeamType) {
c.ourTeamId = ourTeamId
c.CreateNeurgoCortex()
cortex := c.cortex
cortex.Run()
}
func (c *Checkerlution) StartWithCortex(cortex *ng.Cortex, ourTeamId cbot.TeamType) {
c.ourTeamId = ourTeamId
c.setSensorActuatorFunctions(cortex)
c.cortex = cortex
cortex.Run()
}
func (c *Checkerlution) Think(gameState cbot.GameState) (bestMove cbot.ValidMove, ok bool) {
ok = true
ourTeam := gameState.Teams[c.ourTeamId]
allValidMoves := ourTeam.AllValidMoves()
if len(allValidMoves) > 0 {
// convert into core.board representation
board := gameState.Export()
logg.LogTo("DEBUG", "Before team %v move %v", c.ourTeamId.String(), board.CompactString(true))
// generate best move (will be a core.move) -- initially, pick random
move := c.generateBestMove(board)
// search allValidMoves to find corresponding valid move
found, bestValidMoveIndex := cbot.CorrespondingValidMoveIndex(move, allValidMoves)
if !found {
msg := "Could not find corresponding valid move: %v in %v"
logg.LogPanic(msg, move, allValidMoves)
} else {
bestMove = allValidMoves[bestValidMoveIndex]
}
// this is just for debugging purposes
player := cbot.GetCorePlayer(c.ourTeamId)
boardPostMove := board.ApplyMove(player, move)
logg.LogTo("DEBUG", "After team %v move %v", c.ourTeamId.String(), boardPostMove.CompactString(true))
return
} else {
ok = false
}
return
}
func (c *Checkerlution) GameFinished(gameState cbot.GameState) (shouldQuit bool) {
switch c.mode {
case TRAINING_MODE:
shouldQuit = true
c.latestFitnessScore = c.calculateFitness(gameState)
case RUNNING_MODE:
shouldQuit = false
}
return
}
func (c Checkerlution) Cortex() *ng.Cortex {
return c.cortex
}
func (c *Checkerlution) SetMode(mode OperationMode) {
c.mode = mode
}
func (c Checkerlution) calculateFitness(gameState cbot.GameState) (fitness float64) {
switch gameState.WinningTeam {
case c.ourTeamId:
logg.LogTo("CHECKERLUTION", "calculateFitness based on winning. Turn: %v", gameState.Turn)
fitness = 1.0
case c.ourTeamId.Opponent():
logg.LogTo("CHECKERLUTION", "calculateFitness based on losing. Turn: %v", gameState.Turn)
fitness = -2.0
default:
logg.LogTo("CHECKERLUTION", "calculateFitness based on draw. Turn: %v", gameState.Turn)
fitness = 0.0
}
return
}
func (c *Checkerlution) CreateNeurgoCortex() {
uuid := ng.NewUuid()
cortexUuid := fmt.Sprintf("cortex-%s", uuid)
nodeId := ng.NewCortexId(cortexUuid)
c.cortex = &ng.Cortex{
NodeId: nodeId,
}
c.cortex.Init()
c.CreateSensors()
outputNeuron := c.CreateOutputNeuron()
// layer1Neurons := c.CreateHiddenLayer1Neurons(outputNeuron)
// layer2Neurons := c.CreateHiddenLayer2Neurons(layer1Neurons, outputNeuron)
// combine all into single slice and add neurons to cortex
neurons := []*ng.Neuron{}
// neurons = append(neurons, layer1Neurons...)
// neurons = append(neurons, layer2Neurons...)
neurons = append(neurons, outputNeuron)
c.cortex.SetNeurons(neurons)
actuator := c.CreateActuator()
// workaround for error
// Cannot make outbound connection, dataChan == nil [recovered]
c.cortex.Init()
outputNeuron.ConnectOutbound(actuator)
actuator.ConnectInbound(outputNeuron)
}
func (c *Checkerlution) LoadNeurgoCortex(filename string) {
cortex, err := ng.NewCortexFromJSONFile(filename)
if err != nil {
logg.LogPanic("Error reading cortex from: %v. Err: %v", filename, err)
}
c.setSensorActuatorFunctions(cortex)
c.cortex = cortex
}
func (c *Checkerlution) setSensorActuatorFunctions(cortex *ng.Cortex) {
// TODO: accomplish this in less brittle way
// sensor := cortex.FindSensor(ng.NewSensorId("SensorGameState", 0))
// sensor.SensorFunction = c.sensorFuncGameState()
sensorPiece := cortex.FindSensor(ng.NewSensorId("SensorPieceDifferential", 0))
sensorPiece.SensorFunction = c.sensorFuncPieceDifferential()
sensorKings := cortex.FindSensor(ng.NewSensorId("SensorKingsDifferential", 0))
sensorKings.SensorFunction = c.sensorFuncKingsDifferential()
actuator := cortex.FindActuator(ng.NewActuatorId("Actuator", 0))
actuator.ActuatorFunction = c.actuatorFunc()
}
func (c *Checkerlution) CreateHiddenLayer1Neurons(outputNeuron *ng.Neuron) []*ng.Neuron {
cortex := c.cortex
neurons := []*ng.Neuron{}
layerIndex := 0.25
for i := 0; i < 40; i++ {
name := fmt.Sprintf("hidden-layer-%f-n-%d", layerIndex, i)
neuron := &ng.Neuron{
ActivationFunction: ng.EncodableTanh(),
NodeId: ng.NewNeuronId(name, layerIndex),
Bias: 0.0,
}
// Workaround for error:
// Cannot make outbound connection, dataChan == nil [recovered]
neuron.Init()
for _, sensor := range cortex.Sensors {
sensor.ConnectOutbound(neuron)
weights := ng.RandomWeights(sensor.VectorLength)
neuron.ConnectInboundWeighted(sensor, weights)
}
neurons = append(neurons, neuron)
}
return neurons
}
func (c *Checkerlution) CreateHiddenLayer2Neurons(layer1Neurons []*ng.Neuron, outputNeuron *ng.Neuron) []*ng.Neuron {
neurons := []*ng.Neuron{}
layerIndex := 0.35
for i := 0; i < 10; i++ {
name := fmt.Sprintf("hidden-layer-%f-n-%d", layerIndex, i)
neuron := &ng.Neuron{
ActivationFunction: ng.EncodableTanh(),
NodeId: ng.NewNeuronId(name, layerIndex),
Bias: 0.0,
}
// Workaround for error:
// Cannot make outbound connection, dataChan == nil [recovered]
neuron.Init()
for _, layer1Neuron := range layer1Neurons {
layer1Neuron.ConnectOutbound(neuron)
weights := ng.RandomWeights(1)
neuron.ConnectInboundWeighted(layer1Neuron, weights)
}
// connect to output neuron
neuron.ConnectOutbound(outputNeuron)
weights := ng.RandomWeights(1)
outputNeuron.ConnectInboundWeighted(neuron, weights)
neurons = append(neurons, neuron)
}
return neurons
}
func (c *Checkerlution) CreateOutputNeuron() *ng.Neuron {
layerIndex := 0.45
neuron := &ng.Neuron{
ActivationFunction: ng.EncodableTanh(),
NodeId: ng.NewNeuronId("OutputNeuron", layerIndex),
Bias: 0.0,
}
// Workaround for error:
// Cannot make outbound connection, dataChan == nil [recovered]
// The best fix is to just load nn from json
neuron.Init()
// connect sensors directly to output neuron
for _, sensor := range c.cortex.Sensors {
sensor.ConnectOutbound(neuron)
weights := ng.RandomWeights(sensor.VectorLength)
neuron.ConnectInboundWeighted(sensor, weights)
}
return neuron
}
func (c *Checkerlution) CreateActuator() *ng.Actuator {
actuatorNodeId := ng.NewActuatorId("Actuator", 0.5)
actuator := &ng.Actuator{
NodeId: actuatorNodeId,
VectorLength: 1,
ActuatorFunction: c.actuatorFunc(),
}
c.cortex.SetActuators([]*ng.Actuator{actuator})
return actuator
}
func (c *Checkerlution) actuatorFunc() ng.ActuatorFunction {
return func(outputs []float64) {
c.latestActuatorOutput = outputs
}
}
func (c *Checkerlution) CreateSensors() {
sensorLayer := 0.0
/*
sensorGameStateNodeId := ng.NewSensorId("SensorGameState", sensorLayer)
sensorGameState := &ng.Sensor{
NodeId: sensorGameStateNodeId,
VectorLength: 32,
SensorFunction: c.sensorFuncGameState(),
}
*/
pieceDifferentialNodeId := ng.NewSensorId("SensorPieceDifferential", sensorLayer)
sensorPieceDifferential := &ng.Sensor{
NodeId: pieceDifferentialNodeId,
VectorLength: 1,
SensorFunction: c.sensorFuncPieceDifferential(),
}
kingsDifferentialNodeId := ng.NewSensorId("SensorKingsDifferential", sensorLayer)
sensorKingsDifferential := &ng.Sensor{
NodeId: kingsDifferentialNodeId,
VectorLength: 1,
SensorFunction: c.sensorFuncKingsDifferential(),
}
sensors := []*ng.Sensor{
// sensorGameState,
sensorPieceDifferential,
sensorKingsDifferential,
}
c.cortex.SetSensors(sensors)
}
func (c *Checkerlution) sensorFuncGameState() ng.SensorFunction {
return func(syncCounter int) []float64 {
if len(c.currentGameState) == 0 {
logg.LogPanic("sensor would return invalid gamestate")
}
return c.currentGameState
}
}
func (c *Checkerlution) sensorFuncPieceDifferential() ng.SensorFunction {
return func(syncCounter int) []float64 {
player := cbot.GetCorePlayer(c.ourTeamId)
pieceDifferential := c.currentBoard.WeightedScorePiecesOnly(player)
return []float64{pieceDifferential}
}
}
func (c *Checkerlution) sensorFuncKingsDifferential() ng.SensorFunction {
return func(syncCounter int) []float64 {
player := cbot.GetCorePlayer(c.ourTeamId)
pieceDifferential := c.currentBoard.WeightedScoreKingsOnly(player)
return []float64{pieceDifferential}
}
}
func (c *Checkerlution) sensorFuncPossibleMove() ng.SensorFunction {
return func(syncCounter int) []float64 {
return c.currentPossibleMove.VectorRepresentation()
}
}
func (c Checkerlution) Stop() {
}
func (c *Checkerlution) generateBestMove(board core.Board) core.Move {
counter := 0
evalFunc := c.getEvaluationFunction(&counter)
player := cbot.GetCorePlayer(c.ourTeamId)
// with depth = 5, not working too well on first move. when
// there are only a few pieces on the board it seems to work,
// but with full board .. taking a long time.
depth := 4 // TODO: crank this up higher
bestMove, scorePostMove := board.Minimax(player, depth, evalFunc)
logg.LogTo("DEBUG", "scorePostMove: %v. boards eval'd: %v", scorePostMove, counter)
return bestMove
}
func (c *Checkerlution) getEvaluationFunction(counter *int) core.EvaluationFunction {
evalFunc := func(currentPlayer core.Player, board core.Board) float64 {
*counter += 1
// convert the board into inputs for the neural net (32 elt vector)
// taking into account whether this player is "us" or not
gameStateVector := NewGameStateVector()
gameStateVector.loadFromBoard(board, currentPlayer)
// send input to the neural net
c.currentGameState = gameStateVector
c.currentBoard = board
c.cortex.SyncSensors()
c.cortex.SyncActuators()
// return output
return c.latestActuatorOutput[0]
}
return evalFunc
}
|
package gonatra
import (
"net/http"
"testing"
)
func TestBuildRequest(t *testing.T) {
route := Route{"/foo/:id/bar/:bar_id", HTTP_GET, nil, nil}
url := "http://example.com/foo/123/bar/456"
request, err := http.NewRequest(HTTP_GET, url, nil)
if err != nil {
t.Errorf("Something went wrong while creating fake request to %s", url)
}
gonatraRequest := buildRequest(request, &route)
// Test it has set HttpRequest
if gonatraRequest.HttpRequest == nil {
t.Errorf("expected HttpRequest to be set but got nil")
}
// Test it has set Params.
if gonatraRequest.Params == nil {
t.Errorf("expected Params to be set but got nil")
} else {
// Test it sets the params properly.
fooParam := gonatraRequest.Params["id"]
barParam := gonatraRequest.Params["bar_id"]
if fooParam != "123" {
t.Errorf(`expected param "id" to be "123" but got %s`, fooParam)
}
if barParam != "456" {
t.Errorf(`expected param "bar_id" to be "456" but got %s`, barParam)
}
}
}
func TestGetParams(t *testing.T) {
url := "http://example.com/users/123/articles/456/comments/789?foo=bar&lolz=katz"
request, err := http.NewRequest(HTTP_GET, url, nil)
if err != nil {
t.Errorf("Something went wrong while creating fake request to %s", url)
}
path := "/users/:id/articles/:article_id/comments/:comment_id"
route := Route{path, HTTP_GET, nil, nil}
params := getParams(&route, request)
expectedParams := map[string]string{
"id": "123",
"article_id": "456",
"comment_id": "789",
"foo": "bar",
"lolz": "katz",
}
for key, expectedValue := range expectedParams {
val, keyIsPresent := params[key]
// Test that the key is present.
if !keyIsPresent {
t.Errorf("expected key %s to be present in params map.", key)
} else {
// Test that the key holds the proper value.
if expectedValue != val {
t.Errorf("expected key %s to have value %s but got %s", key, expectedValue, val)
}
}
}
}
Simplifying tests.
package gonatra
import (
"net/http"
"testing"
)
func TestBuildRequest(t *testing.T) {
expectations := map[string]string{
"id" : "123",
"bar_id" : "456",
"tepote" : "foo",
"lang" : "en",
}
route := Route{"/foo/:id/bar/:bar_id", HTTP_GET, nil, nil}
url := "http://example.com/foo/123/bar/456?tepote=foo&lang=en"
req, err := http.NewRequest(HTTP_GET, url, nil)
if err != nil {
t.Errorf("Something went wrong while creating fake request to %s", url)
}
request := buildRequest(req, &route)
// Test it has set HttpRequest
if request.HttpRequest == nil {
t.Errorf("expected HttpRequest to be set but got nil")
}
// Test it has set Params.
if request.Params == nil {
t.Errorf("expected Params to be set but got nil")
} else {
for param, expected := range expectations {
actual := request.Params[param]
if actual != expected {
t.Errorf(`expected param "%s" to be "%s" but got %s`, param, expected, actual)
}
}
}
}
func TestGetParams(t *testing.T) {
url := "http://example.com/users/123/articles/456/comments/789?foo=bar&lolz=katz"
request, err := http.NewRequest(HTTP_GET, url, nil)
if err != nil {
t.Errorf("Something went wrong while creating fake request to %s", url)
}
path := "/users/:id/articles/:article_id/comments/:comment_id"
route := Route{path, HTTP_GET, nil, nil}
params := getParams(&route, request)
expectations := map[string]string{
"id" : "123",
"article_id" : "456",
"comment_id" : "789",
"foo" : "bar",
"lolz" : "katz",
}
for param, expected := range expectations {
actual, keyIsPresent := params[param]
// Test that the key is present.
if !keyIsPresent {
t.Errorf("expected param %s to be present in params map.", param)
} else {
// Test that the key holds the proper value.
if expected != actual {
t.Errorf("expected param %s to have value %s but got %s", param, expected, actual)
}
}
}
}
|
// Package that provides chronobiology functions to analyse time series data
package chronobiology
import (
"time"
"math"
"errors"
)
/* BEGIN INTERNAL FUNCTIONS */
// Used to truncate a float64 value
func round(value float64) float64 {
return math.Floor(value + .5)
}
// Used to truncate a float64 value to a particular precision
func roundPlus(value float64, places int) (float64) {
shift := math.Pow(10, float64(places))
return round(value * shift) / shift;
}
// Searches for a value in a slice and returns its position
func findPosition(value int, data []int) (int) {
if len(data) == 0 {
return -1
}
for index := 0; index < len(data); index++ {
if data[index] == value {
return index
}
}
return -1
}
// Finds the max value in a slice and returns its position
func findMaxPosition(data []int) (int) {
if len(data) == 0 {
return -1
}
var position int
max := data[0]
for index := 0; index < len(data); index++ {
if data[index] > max {
max = data[index]
position = index
}
}
return position
}
// Calculates the difference between two time.Time in seconds
func secondsTo(date1 time.Time, date2 time.Time) (int) {
if date1.Equal(date2) || date1.After(date2) {
return 0
}
// Get the number of seconds elapsed since 01/01/1970
seconds1 := date1.Unix()
seconds2 := date2.Unix()
// Calculate the difference in seconds
seconds := seconds2-seconds1
// Return the seconds as int instead of int64
return int(seconds)
}
// Compares two float values using a predetermined epsilon
func floatEquals(a, b float64) bool {
var epsilon float64 = 0.000000001
if ((a - b) < epsilon && (b - a) < epsilon) {
return true
}
return false
}
// Function used to decrease the epoch
func decrease(dateTime []time.Time, data []float64, currentEpoch int, newEpoch int) (newDateTime []time.Time, newData[]float64) {
startDateTime := dateTime[0]
// The start time must be the same start time of the current recorded data
startDateTime = startDateTime.Add(-(time.Duration(currentEpoch) * time.Second))
for index1 := 0; index1 < len(dateTime); index1++ {
// To each data "row", split it to X new "rows"
for index2 := 0; index2 < currentEpoch/newEpoch; index2++ {
startDateTime = startDateTime.Add(time.Duration(newEpoch) * time.Second)
newDateTime = append(newDateTime, startDateTime)
newData = append(newData, data[index1])
}
}
return
}
// Function used to increase the epoch
func increase(dateTime []time.Time, data []float64, currentEpoch int, newEpoch int) (newDateTime []time.Time, newData[]float64) {
var tempEpoch int
var tempData float64
startDateTime := dateTime[0]
// The start time must be the same start time of the current recorded data
startDateTime = startDateTime.Add(-(time.Duration(currentEpoch) * time.Second))
for index1 := 0; index1 < len(dateTime); index1++ {
tempEpoch += currentEpoch
tempData += data[index1]
if tempEpoch >= newEpoch {
startDateTime = startDateTime.Add(time.Duration(newEpoch) * time.Second)
newDateTime = append(newDateTime, startDateTime)
tempData = tempData / (float64(newEpoch)/float64(currentEpoch))
tempData = roundPlus(tempData, 4)
newData = append(newData, tempData)
tempEpoch = 0
tempData = 0.0
}
}
return
}
// Function used in the IS analysis to normalize the data to a specific epoch passed as parameter
func normalizeDataIS(dateTime []time.Time, data []float64, minutes int)(temporaryDateTime []time.Time, temporaryData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
// If the minute is equal to 1, just return the original slices
if minutes == 1 {
temporaryDateTime = dateTime
temporaryData = data
return
}
// Store the first DateTime
currentDateTime := dateTime[0]
// Gets the last valid position according to the minutes passed by parameter
lastValidIndex := -1
for index := len(dateTime); index > 0; index-- {
if index % minutes == 0 {
lastValidIndex = index
break
}
}
count := 0
tempData := 0.0
// "Normalize" the data based on the minutes passed as parameter
for index := 0; index < lastValidIndex; index += minutes {
for tempIndex := index; tempIndex < index+minutes; tempIndex++ {
tempData += data[tempIndex]
count++
}
temporaryDateTime = append(temporaryDateTime, currentDateTime)
temporaryData = append(temporaryData, (tempData/float64(count)))
currentDateTime = currentDateTime.Add(time.Duration(minutes) * time.Minute)
}
return
}
/* END INTERNAL FUNCTIONS */
// Calculates the average of a float64 slice
func average(data []float64) (float64) {
var average float64
if len(data) == 0 {
return average
}
for index := 0; index < len(data); index++ {
average += data[index]
}
return average / float64(len(data))
}
// Function that finds the highest activity average of the followed X hours (defined by parameter)
func HigherActivity(hours int, dateTime []time.Time, data []float64) (higherActivity float64, onsetHigherActivity time.Time, err error) {
// Check the parameters
if hours == 0 {
err = errors.New("InvalidHours")
return
}
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if dateTime[0].Add(time.Duration(hours) * time.Hour).After( dateTime[len(dateTime)-1] ) {
err = errors.New("HoursHigher")
return
}
for index := 0; index < len(dateTime); index++ {
startDateTime := dateTime[index]
finalDateTime := startDateTime.Add(time.Duration(hours) * time.Hour)
tempDateTime := startDateTime
if finalDateTime.After( dateTime[len(dateTime)-1] ) {
break
}
currentActivity := 0.0
tempIndex := index
count := 0
for tempDateTime.Before(finalDateTime) {
currentActivity += data[tempIndex]
count += 1
tempIndex += 1
if tempIndex >= len(dateTime) {
break
}
tempDateTime = dateTime[tempIndex]
}
currentActivity /= float64(count)
if currentActivity > higherActivity || floatEquals(higherActivity, 0.0) {
higherActivity = roundPlus(currentActivity, 4)
onsetHigherActivity = startDateTime
}
}
return
}
// Function that finds the lowest activity average of the followed X hours (defined by parameter)
func LowerActivity(hours int, dateTime []time.Time, data []float64) (lowerActivity float64, onsetLowerActivity time.Time, err error) {
// Check the parameters
if hours == 0 {
err = errors.New("InvalidHours")
return
}
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if dateTime[0].Add(time.Duration(hours) * time.Hour).After( dateTime[len(dateTime)-1] ) {
err = errors.New("HoursHigher")
return
}
firstTime := true
for index := 0; index < len(dateTime); index++ {
startDateTime := dateTime[index]
finalDateTime := startDateTime.Add(time.Duration(hours) * time.Hour)
tempDateTime := startDateTime
if finalDateTime.After( dateTime[len(dateTime)-1] ) {
break
}
currentActivity := 0.0
tempIndex := index
count := 0
for tempDateTime.Before(finalDateTime) {
currentActivity += data[tempIndex]
count += 1
tempIndex += 1
if tempIndex >= len(dateTime) {
break
}
tempDateTime = dateTime[tempIndex]
}
currentActivity /= float64(count)
if currentActivity < lowerActivity || firstTime == true {
lowerActivity = roundPlus(currentActivity, 4)
onsetLowerActivity = startDateTime
firstTime = false
}
}
return
}
// Function that finds the highest activity average of the followed 10 hours
func M10(dateTime []time.Time, data []float64) (higherActivity float64, onsetHigherActivity time.Time, err error) {
higherActivity, onsetHigherActivity, err = HigherActivity(10, dateTime, data)
return
}
// Function that finds the lowest activity average of the following 5 hours
func L5(dateTime []time.Time, data []float64) (lowerActivity float64, onsetLowerActivity time.Time, err error) {
lowerActivity, onsetLowerActivity, err = LowerActivity(5, dateTime, data)
return
}
// Function that calculates the relative amplitude based on the formula (M10-L5)/(M10+L5)
func RelativeAmplitude(highestAverage float64, lowestAverage float64) (RA float64, err error) {
if( highestAverage == 0.0 && lowestAverage == 0.0 ) {
err = errors.New("NullValues")
return
}
RA = (highestAverage-lowestAverage) / (highestAverage+lowestAverage)
RA = roundPlus(RA, 4)
return
}
// Function that calculates the intradaily variability
func IntradailyVariability(dateTime []time.Time, data []float64) (iv []float64, err error) {
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if secondsTo(dateTime[0], dateTime[len(dateTime)-1]) < (2*60*60) {
err = errors.New("LessThan2Hours")
}
// The zero position is allocated to store the average value of the iv vector
iv = append(iv, 0.0)
for mainIndex := 1; mainIndex <= 60; mainIndex++ {
_, tempData, err := ConvertDataBasedOnEpoch(dateTime, data, (mainIndex*60))
if err != nil {
err = errors.New("ConvertDataBasedOnEpoch error")
iv = nil
return iv, err
}
if len(tempData) > 0 {
average := average(tempData)
// Calculates the numerator
var numerator float64
for index := 1; index < len(tempData); index++ {
tempValue := tempData[index] - tempData[index-1]
numerator += math.Pow(tempValue, 2)
}
numerator = numerator * float64(len(tempData))
// Calculates the denominator
var denominator float64
for index := 0; index < len(tempData); index++ {
tempValue := average - tempData[index]
denominator += math.Pow(tempValue, 2)
}
denominator = denominator * (float64(len(tempData)) - 1.0)
result := roundPlus((numerator/denominator), 4)
iv = append(iv, result)
} else {
iv = append(iv, 0.0)
}
}
// Calculates the IV average
var average float64
for index := 1; index < len(iv); index++ {
average += iv[index]
}
average = average / float64(len(iv)-1)
iv[0] = average
return
}
// Function that finds the epoch of a time series (seconds)
func FindEpoch(dateTime []time.Time) (epoch int) {
if len(dateTime) == 0 {
return
}
var count []int
var epochs []int
for index := 1; index < len(dateTime); index++ {
seconds := secondsTo(dateTime[index-1], dateTime[index])
position := findPosition(seconds, epochs)
if position > -1 {
count[position] += 1
}else {
epochs = append(epochs, seconds)
count = append(count, 1)
}
}
maxPos := findMaxPosition(count)
epoch = epochs[maxPos]
return
}
// Convert the data and dateTime slices to the new epoch passed by parameter
func ConvertDataBasedOnEpoch(dateTime []time.Time, data []float64, newEpoch int) (newDateTime []time.Time, newData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if newEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
currentEpoch := FindEpoch(dateTime)
// Could not find the epoch
if currentEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
if newEpoch == currentEpoch {
return dateTime, data, nil
}
// If the new Epoch is not divisible or multipliable by the currentEpoch
// It needs to be decreased to 1 second to then increase to the newEpoch
if (newEpoch > currentEpoch && newEpoch % currentEpoch != 0) ||
(currentEpoch > newEpoch && currentEpoch % newEpoch != 0) {
// Decrease to 1 second
dateTime, data = decrease(dateTime, data, currentEpoch, 1);
// Increase to the newEpoch
newDateTime, newData = increase(dateTime, data, 1, newEpoch);
} else {
// Increase
if newEpoch > currentEpoch {
newDateTime, newData = increase(dateTime, data, currentEpoch, newEpoch);
// Decrease
} else {
newDateTime, newData = decrease(dateTime, data, currentEpoch, newEpoch);
}
}
return
}
// Function created to filter the data based on the startTime and endTime passed as parameter
func FilterDataByDateTime(dateTime []time.Time, data []float64, startTime time.Time, endTime time.Time) (newDateTime []time.Time, newData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if endTime.Before(startTime) {
err = errors.New("InvalidTimeRange")
return
}
// Filter the data based on the startTime and endTime
for index := 0; index < len(dateTime); index++ {
if (dateTime[index].After(startTime) || dateTime[index].Equal(startTime)) &&
(dateTime[index].Before(endTime) || dateTime[index].Equal(endTime)) {
newDateTime = append(newDateTime, dateTime[index])
newData = append(newData, data[index])
}
}
return
}
// Function that calculates the interdaily stability
func InterdailyStability(dateTime []time.Time, data []float64) (is []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if secondsTo(dateTime[0], dateTime[len(dateTime)-1]) < (48*60*60) {
err = errors.New("LessThan2Days")
return
}
currentEpoch := FindEpoch(dateTime)
// Could not find the epoch
if currentEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
if currentEpoch != 60 {
newDateTime, newData, convertError := ConvertDataBasedOnEpoch(dateTime, data, 60)
dateTime = newDateTime
data = newData
if convertError != nil {
err = errors.New("ErrorConvertingData")
return
}
}
// The zero position is allocated to store the average value of the IS vector
is = append(is, 0.0)
for isIndex := 1; isIndex <= 60; isIndex++ {
if 1440 % isIndex == 0 {
// Normalizes data to the new epoch (minutes)
temporaryDateTime, temporaryData, _ := normalizeDataIS(dateTime, data, isIndex)
// Calculate the average day
_, averageDayData, _ := AverageDay(temporaryDateTime, temporaryData)
// Get the new N (length)
n := len(temporaryData)
// Calculate the number of points per day
p := len(averageDayData)
//p := 1440 / isIndex
// Calculate the new average (Xm)
average := average(temporaryData)
numerator := 0.0
denominator := 0.0
// The "h" value represents the same "h" from the IS calculation formula
for h := 0; h < p; h++ {
numerator += math.Pow((averageDayData[h]-average), 2)
}
// The "i" value represents the same "i" from the IS calculation formula
for i := 0; i < n; i++ {
denominator += math.Pow((temporaryData[i]-average), 2)
}
numerator = float64(n) * numerator
denominator = float64(p) * denominator
// Prevent NaN
if denominator == 0 {
is = append(is, -1.0)
} else {
is = append(is, (numerator/denominator))
}
} else {
// Append -1 in the positions that will not be used
is = append(is, -1.0)
}
}
// Calculates the IS average of all "valid" values
average := 0.0
count := 0
for index := 0; index < len(is); index++ {
if is[index] > -1.0 {
average += is[index]
count++
}
}
is[0] = average/float64(count)
return
}
// Function that searches for gaps in the time series and fills it with a specific value passed as parameter (usually zero)
func FillGapsInData(dateTime []time.Time, data []float64, value float64) (newDateTime []time.Time, newData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
currentEpoch := FindEpoch(dateTime)
// Could not find the epoch
if currentEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
for index := 0; index < len(dateTime)-1; index++ {
newDateTime = append(newDateTime, dateTime[index])
newData = append(newData, data[index])
// If this condition is true, then this is a gap
if secondsTo(dateTime[index], dateTime[index+1]) >= (currentEpoch*2) {
tempDateTime := dateTime[index]
count := (secondsTo(dateTime[index], dateTime[index+1]) / currentEpoch) - 1
for tempIndex := 0; tempIndex < count; tempIndex++ {
tempDateTime = tempDateTime.Add(time.Duration(currentEpoch) * time.Second)
newDateTime = append(newDateTime, tempDateTime)
newData = append(newData, value)
}
}
}
newDateTime = append(newDateTime, dateTime[len(dateTime)-1])
newData = append(newData, data[len(dateTime)-1])
return
}
// Creates an average day based on the time series.
func AverageDay(dateTime []time.Time, data []float64) (newDateTime []time.Time, newData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
currentEpoch := FindEpoch(dateTime)
// Could not find the epoch
if currentEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
if secondsTo(dateTime[0], dateTime[len(dateTime)-1]) < (48*60*60) {
err = errors.New("LessThan2Days")
return
}
gapValue := -999.999
dateTime, data, _ = FillGapsInData(dateTime, data, gapValue)
pointsPerDay := (60*1440) / currentEpoch
var countPoints []int
for index := 0; index < pointsPerDay; index++ {
newData = append(newData, 0.0)
countPoints = append(countPoints, 0)
}
pointIndex := 0
for index := 0; index < len(data); index++ {
if pointIndex >= pointsPerDay {
pointIndex = 0
}
if !floatEquals(data[index], gapValue) {
newData[pointIndex] += data[index]
countPoints[pointIndex] += 1
}
pointIndex++
}
tempDateTime := dateTime[0]
for index := 0; index < len(newData); index++ {
newDateTime = append(newDateTime, tempDateTime)
tempDateTime = tempDateTime.Add(time.Duration(currentEpoch) * time.Second)
newData[index] = roundPlus((newData[index] / float64(countPoints[index])), 4)
}
return
}
Fixed some issues in the normalizeDataIS
// Package that provides chronobiology functions to analyse time series data
package chronobiology
import (
"time"
"math"
"errors"
)
/* BEGIN INTERNAL FUNCTIONS */
// Used to truncate a float64 value
func round(value float64) float64 {
return math.Floor(value + .5)
}
// Used to truncate a float64 value to a particular precision
func roundPlus(value float64, places int) (float64) {
shift := math.Pow(10, float64(places))
return round(value * shift) / shift;
}
// Searches for a value in a slice and returns its position
func findPosition(value int, data []int) (int) {
if len(data) == 0 {
return -1
}
for index := 0; index < len(data); index++ {
if data[index] == value {
return index
}
}
return -1
}
// Finds the max value in a slice and returns its position
func findMaxPosition(data []int) (int) {
if len(data) == 0 {
return -1
}
var position int
max := data[0]
for index := 0; index < len(data); index++ {
if data[index] > max {
max = data[index]
position = index
}
}
return position
}
// Calculates the difference between two time.Time in seconds
func secondsTo(date1 time.Time, date2 time.Time) (int) {
if date1.Equal(date2) || date1.After(date2) {
return 0
}
// Get the number of seconds elapsed since 01/01/1970
seconds1 := date1.Unix()
seconds2 := date2.Unix()
// Calculate the difference in seconds
seconds := seconds2-seconds1
// Return the seconds as int instead of int64
return int(seconds)
}
// Compares two float values using a predetermined epsilon
func floatEquals(a, b float64) bool {
var epsilon float64 = 0.000000001
if ((a - b) < epsilon && (b - a) < epsilon) {
return true
}
return false
}
// Function used to decrease the epoch
func decrease(dateTime []time.Time, data []float64, currentEpoch int, newEpoch int) (newDateTime []time.Time, newData[]float64) {
startDateTime := dateTime[0]
// The start time must be the same start time of the current recorded data
startDateTime = startDateTime.Add(-(time.Duration(currentEpoch) * time.Second))
for index1 := 0; index1 < len(dateTime); index1++ {
// To each data "row", split it to X new "rows"
for index2 := 0; index2 < currentEpoch/newEpoch; index2++ {
startDateTime = startDateTime.Add(time.Duration(newEpoch) * time.Second)
newDateTime = append(newDateTime, startDateTime)
newData = append(newData, data[index1])
}
}
return
}
// Function used to increase the epoch
func increase(dateTime []time.Time, data []float64, currentEpoch int, newEpoch int) (newDateTime []time.Time, newData[]float64) {
var tempEpoch int
var tempData float64
startDateTime := dateTime[0]
// The start time must be the same start time of the current recorded data
startDateTime = startDateTime.Add(-(time.Duration(currentEpoch) * time.Second))
for index1 := 0; index1 < len(dateTime); index1++ {
tempEpoch += currentEpoch
tempData += data[index1]
if tempEpoch >= newEpoch {
startDateTime = startDateTime.Add(time.Duration(newEpoch) * time.Second)
newDateTime = append(newDateTime, startDateTime)
tempData = tempData / (float64(newEpoch)/float64(currentEpoch))
tempData = roundPlus(tempData, 4)
newData = append(newData, tempData)
tempEpoch = 0
tempData = 0.0
}
}
return
}
// Function used in the IS analysis to normalize the data to a specific epoch passed as parameter
func normalizeDataIS(dateTime []time.Time, data []float64, minutes int)(temporaryDateTime []time.Time, temporaryData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if minutes <= 0 {
err = errors.New("MinutesInvalid")
return
}
// If the minute is equal to 1, just return the original slices
if minutes == 1 {
temporaryDateTime = dateTime
temporaryData = data
return
}
// Store the first DateTime
currentDateTime := dateTime[0]
// Gets the last valid position according to the minutes passed by parameter
lastValidIndex := -1
for index := len(dateTime); index > 0; index-- {
if index % minutes == 0 {
lastValidIndex = index
break
}
}
// "Normalize" the data based on the minutes passed as parameter
for index := 0; index < lastValidIndex; index += minutes {
tempData := 0.0
count := 0
for tempIndex := index; tempIndex < index+minutes; tempIndex++ {
tempData += data[tempIndex]
count++
}
temporaryDateTime = append(temporaryDateTime, currentDateTime)
temporaryData = append(temporaryData, (tempData/float64(count)))
currentDateTime = currentDateTime.Add(time.Duration(minutes) * time.Minute)
}
return
}
/* END INTERNAL FUNCTIONS */
// Calculates the average of a float64 slice
func average(data []float64) (float64) {
var average float64
if len(data) == 0 {
return average
}
for index := 0; index < len(data); index++ {
average += data[index]
}
return average / float64(len(data))
}
// Function that finds the highest activity average of the followed X hours (defined by parameter)
func HigherActivity(hours int, dateTime []time.Time, data []float64) (higherActivity float64, onsetHigherActivity time.Time, err error) {
// Check the parameters
if hours == 0 {
err = errors.New("InvalidHours")
return
}
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if dateTime[0].Add(time.Duration(hours) * time.Hour).After( dateTime[len(dateTime)-1] ) {
err = errors.New("HoursHigher")
return
}
for index := 0; index < len(dateTime); index++ {
startDateTime := dateTime[index]
finalDateTime := startDateTime.Add(time.Duration(hours) * time.Hour)
tempDateTime := startDateTime
if finalDateTime.After( dateTime[len(dateTime)-1] ) {
break
}
currentActivity := 0.0
tempIndex := index
count := 0
for tempDateTime.Before(finalDateTime) {
currentActivity += data[tempIndex]
count += 1
tempIndex += 1
if tempIndex >= len(dateTime) {
break
}
tempDateTime = dateTime[tempIndex]
}
currentActivity /= float64(count)
if currentActivity > higherActivity || floatEquals(higherActivity, 0.0) {
higherActivity = roundPlus(currentActivity, 4)
onsetHigherActivity = startDateTime
}
}
return
}
// Function that finds the lowest activity average of the followed X hours (defined by parameter)
func LowerActivity(hours int, dateTime []time.Time, data []float64) (lowerActivity float64, onsetLowerActivity time.Time, err error) {
// Check the parameters
if hours == 0 {
err = errors.New("InvalidHours")
return
}
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if dateTime[0].Add(time.Duration(hours) * time.Hour).After( dateTime[len(dateTime)-1] ) {
err = errors.New("HoursHigher")
return
}
firstTime := true
for index := 0; index < len(dateTime); index++ {
startDateTime := dateTime[index]
finalDateTime := startDateTime.Add(time.Duration(hours) * time.Hour)
tempDateTime := startDateTime
if finalDateTime.After( dateTime[len(dateTime)-1] ) {
break
}
currentActivity := 0.0
tempIndex := index
count := 0
for tempDateTime.Before(finalDateTime) {
currentActivity += data[tempIndex]
count += 1
tempIndex += 1
if tempIndex >= len(dateTime) {
break
}
tempDateTime = dateTime[tempIndex]
}
currentActivity /= float64(count)
if currentActivity < lowerActivity || firstTime == true {
lowerActivity = roundPlus(currentActivity, 4)
onsetLowerActivity = startDateTime
firstTime = false
}
}
return
}
// Function that finds the highest activity average of the followed 10 hours
func M10(dateTime []time.Time, data []float64) (higherActivity float64, onsetHigherActivity time.Time, err error) {
higherActivity, onsetHigherActivity, err = HigherActivity(10, dateTime, data)
return
}
// Function that finds the lowest activity average of the following 5 hours
func L5(dateTime []time.Time, data []float64) (lowerActivity float64, onsetLowerActivity time.Time, err error) {
lowerActivity, onsetLowerActivity, err = LowerActivity(5, dateTime, data)
return
}
// Function that calculates the relative amplitude based on the formula (M10-L5)/(M10+L5)
func RelativeAmplitude(highestAverage float64, lowestAverage float64) (RA float64, err error) {
if( highestAverage == 0.0 && lowestAverage == 0.0 ) {
err = errors.New("NullValues")
return
}
RA = (highestAverage-lowestAverage) / (highestAverage+lowestAverage)
RA = roundPlus(RA, 4)
return
}
// Function that calculates the intradaily variability
func IntradailyVariability(dateTime []time.Time, data []float64) (iv []float64, err error) {
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if secondsTo(dateTime[0], dateTime[len(dateTime)-1]) < (2*60*60) {
err = errors.New("LessThan2Hours")
}
// The zero position is allocated to store the average value of the iv vector
iv = append(iv, 0.0)
for mainIndex := 1; mainIndex <= 60; mainIndex++ {
_, tempData, err := ConvertDataBasedOnEpoch(dateTime, data, (mainIndex*60))
if err != nil {
err = errors.New("ConvertDataBasedOnEpoch error")
iv = nil
return iv, err
}
if len(tempData) > 0 {
average := average(tempData)
// Calculates the numerator
var numerator float64
for index := 1; index < len(tempData); index++ {
tempValue := tempData[index] - tempData[index-1]
numerator += math.Pow(tempValue, 2)
}
numerator = numerator * float64(len(tempData))
// Calculates the denominator
var denominator float64
for index := 0; index < len(tempData); index++ {
tempValue := average - tempData[index]
denominator += math.Pow(tempValue, 2)
}
denominator = denominator * (float64(len(tempData)) - 1.0)
result := roundPlus((numerator/denominator), 4)
iv = append(iv, result)
} else {
iv = append(iv, 0.0)
}
}
// Calculates the IV average
var average float64
for index := 1; index < len(iv); index++ {
average += iv[index]
}
average = average / float64(len(iv)-1)
iv[0] = average
return
}
// Function that finds the epoch of a time series (seconds)
func FindEpoch(dateTime []time.Time) (epoch int) {
if len(dateTime) == 0 {
return
}
var count []int
var epochs []int
for index := 1; index < len(dateTime); index++ {
seconds := secondsTo(dateTime[index-1], dateTime[index])
position := findPosition(seconds, epochs)
if position > -1 {
count[position] += 1
}else {
epochs = append(epochs, seconds)
count = append(count, 1)
}
}
maxPos := findMaxPosition(count)
epoch = epochs[maxPos]
return
}
// Convert the data and dateTime slices to the new epoch passed by parameter
func ConvertDataBasedOnEpoch(dateTime []time.Time, data []float64, newEpoch int) (newDateTime []time.Time, newData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if newEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
currentEpoch := FindEpoch(dateTime)
// Could not find the epoch
if currentEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
if newEpoch == currentEpoch {
return dateTime, data, nil
}
// If the new Epoch is not divisible or multipliable by the currentEpoch
// It needs to be decreased to 1 second to then increase to the newEpoch
if (newEpoch > currentEpoch && newEpoch % currentEpoch != 0) ||
(currentEpoch > newEpoch && currentEpoch % newEpoch != 0) {
// Decrease to 1 second
dateTime, data = decrease(dateTime, data, currentEpoch, 1);
// Increase to the newEpoch
newDateTime, newData = increase(dateTime, data, 1, newEpoch);
} else {
// Increase
if newEpoch > currentEpoch {
newDateTime, newData = increase(dateTime, data, currentEpoch, newEpoch);
// Decrease
} else {
newDateTime, newData = decrease(dateTime, data, currentEpoch, newEpoch);
}
}
return
}
// Function created to filter the data based on the startTime and endTime passed as parameter
func FilterDataByDateTime(dateTime []time.Time, data []float64, startTime time.Time, endTime time.Time) (newDateTime []time.Time, newData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if endTime.Before(startTime) {
err = errors.New("InvalidTimeRange")
return
}
// Filter the data based on the startTime and endTime
for index := 0; index < len(dateTime); index++ {
if (dateTime[index].After(startTime) || dateTime[index].Equal(startTime)) &&
(dateTime[index].Before(endTime) || dateTime[index].Equal(endTime)) {
newDateTime = append(newDateTime, dateTime[index])
newData = append(newData, data[index])
}
}
return
}
// Function that calculates the interdaily stability
func InterdailyStability(dateTime []time.Time, data []float64) (is []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
if secondsTo(dateTime[0], dateTime[len(dateTime)-1]) < (48*60*60) {
err = errors.New("LessThan2Days")
return
}
currentEpoch := FindEpoch(dateTime)
// Could not find the epoch
if currentEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
if currentEpoch != 60 {
newDateTime, newData, convertError := ConvertDataBasedOnEpoch(dateTime, data, 60)
dateTime = newDateTime
data = newData
if convertError != nil {
err = errors.New("ErrorConvertingData")
return
}
}
// The zero position is allocated to store the average value of the IS vector
is = append(is, 0.0)
for isIndex := 1; isIndex <= 60; isIndex++ {
if 1440 % isIndex == 0 {
// Normalizes data to the new epoch (minutes)
temporaryDateTime, temporaryData, _ := normalizeDataIS(dateTime, data, isIndex)
// Calculate the average day
_, averageDayData, _ := AverageDay(temporaryDateTime, temporaryData)
// Get the new N (length)
n := len(temporaryData)
// Calculate the number of points per day
p := len(averageDayData)
//p := 1440 / isIndex
// Calculate the new average (Xm)
average := average(temporaryData)
numerator := 0.0
denominator := 0.0
// The "h" value represents the same "h" from the IS calculation formula
for h := 0; h < p; h++ {
numerator += math.Pow((averageDayData[h]-average), 2)
}
// The "i" value represents the same "i" from the IS calculation formula
for i := 0; i < n; i++ {
denominator += math.Pow((temporaryData[i]-average), 2)
}
numerator = float64(n) * numerator
denominator = float64(p) * denominator
// Prevent NaN
if denominator == 0 {
is = append(is, -1.0)
} else {
is = append(is, (numerator/denominator))
}
} else {
// Append -1 in the positions that will not be used
is = append(is, -1.0)
}
}
// Calculates the IS average of all "valid" values
average := 0.0
count := 0
for index := 0; index < len(is); index++ {
if is[index] > -1.0 {
average += is[index]
count++
}
}
is[0] = average/float64(count)
return
}
// Function that searches for gaps in the time series and fills it with a specific value passed as parameter (usually zero)
func FillGapsInData(dateTime []time.Time, data []float64, value float64) (newDateTime []time.Time, newData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
currentEpoch := FindEpoch(dateTime)
// Could not find the epoch
if currentEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
for index := 0; index < len(dateTime)-1; index++ {
newDateTime = append(newDateTime, dateTime[index])
newData = append(newData, data[index])
// If this condition is true, then this is a gap
if secondsTo(dateTime[index], dateTime[index+1]) >= (currentEpoch*2) {
tempDateTime := dateTime[index]
count := (secondsTo(dateTime[index], dateTime[index+1]) / currentEpoch) - 1
for tempIndex := 0; tempIndex < count; tempIndex++ {
tempDateTime = tempDateTime.Add(time.Duration(currentEpoch) * time.Second)
newDateTime = append(newDateTime, tempDateTime)
newData = append(newData, value)
}
}
}
newDateTime = append(newDateTime, dateTime[len(dateTime)-1])
newData = append(newData, data[len(dateTime)-1])
return
}
// Creates an average day based on the time series.
func AverageDay(dateTime []time.Time, data []float64) (newDateTime []time.Time, newData []float64, err error) {
// Check the parameters
if len(dateTime) == 0 || len(data) == 0 {
err = errors.New("Empty")
return
}
if len(dateTime) != len(data) {
err = errors.New("DifferentSize")
return
}
currentEpoch := FindEpoch(dateTime)
// Could not find the epoch
if currentEpoch == 0 {
err = errors.New("InvalidEpoch")
return
}
if secondsTo(dateTime[0], dateTime[len(dateTime)-1]) < (48*60*60) {
err = errors.New("LessThan2Days")
return
}
gapValue := -999.999
dateTime, data, _ = FillGapsInData(dateTime, data, gapValue)
pointsPerDay := (60*1440) / currentEpoch
var countPoints []int
for index := 0; index < pointsPerDay; index++ {
newData = append(newData, 0.0)
countPoints = append(countPoints, 0)
}
pointIndex := 0
for index := 0; index < len(data); index++ {
if pointIndex >= pointsPerDay {
pointIndex = 0
}
if !floatEquals(data[index], gapValue) {
newData[pointIndex] += data[index]
countPoints[pointIndex] += 1
}
pointIndex++
}
tempDateTime := dateTime[0]
for index := 0; index < len(newData); index++ {
newDateTime = append(newDateTime, tempDateTime)
tempDateTime = tempDateTime.Add(time.Duration(currentEpoch) * time.Second)
newData[index] = roundPlus((newData[index] / float64(countPoints[index])), 4)
}
return
}
|
package gentle
import (
"errors"
"github.com/afex/hystrix-go/hystrix"
"github.com/benbjohnson/clock"
"sync"
"time"
)
const (
// Types of resilience, are most often used as part of RegistryKey.
StreamRateLimited = "sRate"
StreamRetry = "sRetry"
StreamBulkhead = "sBulk"
//StreamSemaphore = "sSem"
StreamCircuitBreaker = "sCircuit"
//StreamChannel = "sChan"
//StreamHandled = "sHan"
//StreamFallback = "sFb"
)
var (
labelOk = map[string]string{"result": "ok"}
labelErr = map[string]string{"result": "err"}
)
// Common options for XXXStreamOpts
type streamOpts struct {
Namespace string
Name string
Log Logger
MetricGet Metric
}
// Common fields for XXXStream
type streamFields struct {
namespace string
name string
log Logger
mxGet Metric
}
func newStreamFields(opts *streamOpts) *streamFields {
return &streamFields{
namespace: opts.Namespace,
name: opts.Name,
log: opts.Log,
mxGet: opts.MetricGet,
}
}
type RateLimitedStreamOpts struct {
streamOpts
Limiter RateLimit
}
func NewRateLimitedStreamOpts(namespace, name string, limiter RateLimit) *RateLimitedStreamOpts {
return &RateLimitedStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace,
"gentle", StreamRateLimited, "name", name),
MetricGet: noopMetric,
},
Limiter: limiter,
}
}
// Rate limiting pattern is used to limit the speed of a series of Get().
type rateLimitedStream struct {
*streamFields
limiter RateLimit
stream Stream
}
func NewRateLimitedStream(opts *RateLimitedStreamOpts, upstream Stream) Stream {
return &rateLimitedStream{
streamFields: newStreamFields(&opts.streamOpts),
limiter: opts.Limiter,
stream: upstream,
}
}
// Get() is blocked when the limit is exceeded.
func (r *rateLimitedStream) Get() (Message, error) {
begin := time.Now()
r.log.Debug("[Stream] Get() ...")
r.limiter.Wait(1, 0)
msg, err := r.stream.Get()
timespan := time.Since(begin).Seconds()
if err != nil {
r.log.Error("[Stream] Get() err", "err", err,
"timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
return nil, err
}
r.log.Debug("[Stream] Get() ok", "msgOut", msg.ID(),
"timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msg, nil
}
func (r *rateLimitedStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamRateLimited,
Name: r.name,
}
}
type RetryStreamOpts struct {
streamOpts
MetricTryNum Metric
Clock Clock
BackOffFactory BackOffFactory
}
func NewRetryStreamOpts(namespace, name string, backOffFactory BackOffFactory) *RetryStreamOpts {
return &RetryStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace, "gentle",
StreamRetry, "name", name),
MetricGet: noopMetric,
},
MetricTryNum: noopMetric,
Clock: clock.New(),
BackOffFactory: backOffFactory,
}
}
// retryStream will, when Get() encounters error, back off for some time
// and then retries.
type retryStream struct {
*streamFields
obTryNum Metric
clock Clock
backOffFactory BackOffFactory
stream Stream
}
func NewRetryStream(opts *RetryStreamOpts, upstream Stream) Stream {
return &retryStream{
streamFields: newStreamFields(&opts.streamOpts),
obTryNum: opts.MetricTryNum,
clock: opts.Clock,
backOffFactory: opts.BackOffFactory,
stream: upstream,
}
}
func (r *retryStream) Get() (Message, error) {
begin := r.clock.Now()
count := 1
r.log.Debug("[Stream] Get() ...", "count", count)
var once sync.Once
var backOff BackOff
for {
msg, err := r.stream.Get()
if err == nil {
timespan := r.clock.Now().Sub(begin).Seconds()
r.log.Debug("[Stream] Get() ok", "msgOut", msg.ID(),
"timespan", timespan, "count", count)
r.mxGet.Observe(timespan, labelOk)
r.obTryNum.Observe(float64(count), labelOk)
return msg, nil
}
once.Do(func() {
backOff = r.backOffFactory.NewBackOff()
})
toWait := backOff.Next()
// Next() should immediately return but we can't guarantee so
// timespan is calculated after Next().
timespan := r.clock.Now().Sub(begin).Seconds()
if toWait == BackOffStop {
r.log.Error("[Streamer] Get() err and no more backing off",
"err", err, "timespan", timespan,
"count", count)
r.mxGet.Observe(timespan, labelErr)
r.obTryNum.Observe(float64(count), labelErr)
return nil, err
}
// timespan in our convention is used to track the overall
// time of current function. Here we record time
// passed as "elapsed".
count++
r.log.Error("[Stream] Get() err, backing off ...",
"err", err, "elapsed", timespan, "count", count,
"wait", toWait)
r.clock.Sleep(toWait)
}
}
func (r *retryStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamRetry,
Name: r.name,
}
}
type BulkheadStreamOpts struct {
streamOpts
MaxConcurrency int
}
func NewBulkheadStreamOpts(namespace, name string, maxConcurrency int) *BulkheadStreamOpts {
if maxConcurrency <= 0 {
panic(errors.New("max_concurrent must be greater than 0"))
}
return &BulkheadStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace, "gentle",
StreamBulkhead, "name", name),
MetricGet: noopMetric,
},
MaxConcurrency: maxConcurrency,
}
}
// Bulkhead pattern is used to limit the number of concurrently hanging Get().
// It uses semaphore isolation, similar to the approach used in hystrix.
// http://stackoverflow.com/questions/30391809/what-is-bulkhead-pattern-used-by-hystrix
type bulkheadStream struct {
*streamFields
stream Stream
semaphore chan struct{}
}
// Create a bulkheadStream that allows at maximum $max_concurrency Get() to
// run concurrently.
func NewBulkheadStream(opts *BulkheadStreamOpts, upstream Stream) Stream {
return &bulkheadStream{
streamFields: newStreamFields(&opts.streamOpts),
stream: upstream,
semaphore: make(chan struct{}, opts.MaxConcurrency),
}
}
// Get() is blocked when the limit is exceeded.
func (r *bulkheadStream) Get() (Message, error) {
begin := time.Now()
r.log.Debug("[Stream] Get() ...")
select {
case r.semaphore <- struct{}{}:
defer func() {
<-r.semaphore
}()
msg, err := r.stream.Get()
timespan := time.Since(begin).Seconds()
if err != nil {
r.log.Error("[Stream] Get() err", "err", err,
"timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
return nil, err
}
r.log.Debug("[Stream] Get() ok", "msgOut", msg.ID(),
"timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msg, nil
default:
r.log.Error("[Stream] Get() err", "err", ErrMaxConcurrency)
return nil, ErrMaxConcurrency
}
}
func (r *bulkheadStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamBulkhead,
Name: r.name,
}
}
func (r *bulkheadStream) GetMaxConcurrency() int {
return cap(r.semaphore)
}
func (r *bulkheadStream) GetCurrentConcurrency() int {
return len(r.semaphore)
}
/*
type SemaphoreStreamOpts struct {
streamOpts
MaxConcurrency int
}
func NewSemaphoreStreamOpts(namespace, name string, maxConcurrency int) *SemaphoreStreamOpts {
if maxConcurrency <= 0 {
panic(errors.New("max_concurrent must be greater than 0"))
}
return &SemaphoreStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace, "gentle",
StreamSemaphore, "name", name),
MetricGet: noopMetric,
},
MaxConcurrency: maxConcurrency,
}
}
// It allows at maximum $max_concurrency Get() to run concurrently. Similar
// to Bulkhead, but it blocks when MaxConcurrency is reached.
type semaphoreStream struct {
*streamFields
stream Stream
semaphore chan struct{}
}
func NewSemaphoreStream(opts *SemaphoreStreamOpts, upstream Stream) Stream {
return &semaphoreStream{
streamFields: newStreamFields(&opts.streamOpts),
stream: upstream,
semaphore: make(chan struct{}, opts.MaxConcurrency),
}
}
func (r *semaphoreStream) Get() (Message, error) {
begin := time.Now()
r.log.Debug("[Stream] Get() ...")
r.semaphore <- struct{}{}
defer func() { <-r.semaphore }()
msg, err := r.stream.Get()
timespan := time.Since(begin).Seconds()
if err != nil {
r.log.Error("[Stream] Get() err", "err", err,
"timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
return nil, err
}
r.log.Debug("[Stream] Get() ok", "msgOut", msg.ID(),
"timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msg, nil
}
func (r *semaphoreStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamSemaphore,
Name: r.name,
}
}
func (r *semaphoreStream) GetMaxConcurrency() int {
return cap(r.semaphore)
}
func (r *semaphoreStream) GetCurrentConcurrency() int {
return len(r.semaphore)
}
*/
type CircuitBreakerStreamOpts struct {
streamOpts
MetricCbErr Metric
Circuit string
}
func NewCircuitBreakerStreamOpts(namespace, name, circuit string) *CircuitBreakerStreamOpts {
return &CircuitBreakerStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace,
"gentle", StreamCircuitBreaker,
"name", name, "circuit", circuit),
MetricGet: noopMetric,
},
MetricCbErr: noopMetric,
Circuit: circuit,
}
}
// circuitBreakerStream is a Stream equipped with a circuit-breaker.
type circuitBreakerStream struct {
*streamFields
mxCbErr Metric
circuit string
stream Stream
}
// In hystrix-go, a circuit-breaker must be given a unique name.
// NewCircuitBreakerStream() creates a circuitBreakerStream with a
// circuit-breaker named $circuit.
func NewCircuitBreakerStream(opts *CircuitBreakerStreamOpts, stream Stream) Stream {
// Note that if it might overwrite or be overwritten by concurrently
// registering the same circuit.
allCircuits := hystrix.GetCircuitSettings()
if _, ok := allCircuits[opts.Circuit]; !ok {
NewDefaultCircuitBreakerConf().RegisterFor(opts.Circuit)
}
return &circuitBreakerStream{
streamFields: newStreamFields(&opts.streamOpts),
mxCbErr: opts.MetricCbErr,
circuit: opts.Circuit,
stream: stream,
}
}
func (r *circuitBreakerStream) Get() (Message, error) {
begin := time.Now()
r.log.Debug("[Stream] Get() ...")
result := make(chan interface{}, 1)
err := hystrix.Do(r.circuit, func() error {
msg, err := r.stream.Get()
timespan := time.Since(begin).Seconds()
if err != nil {
r.log.Error("[Stream] Do()::Get() err",
"err", err, "timespan", timespan)
return err
}
r.log.Debug("[Stream] Do()::Get() ok",
"msgOut", msg.ID(), "timespan", timespan)
result <- msg
return nil
}, nil)
// NOTE:
// err can be from Do()::Get() or hystrix errors if criteria are matched.
// Do()::Get()'s err, being returned or not, contributes to hystrix metrics
if err != nil {
defer func() {
timespan := time.Since(begin).Seconds()
r.log.Error("[Stream] Circuit err", "err", err,
"timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
}()
// To prevent misinterpreting when wrapping one
// circuitBreakerStream over another. Hystrix errors are
// replaced so that Get() won't return any hystrix errors.
switch err {
case hystrix.ErrCircuitOpen:
r.mxCbErr.Observe(1,
map[string]string{"err": "ErrCbOpen"})
return nil, ErrCbOpen
case hystrix.ErrMaxConcurrency:
r.mxCbErr.Observe(1,
map[string]string{"err": "ErrCbMaxConcurrency"})
return nil, ErrCbMaxConcurrency
case hystrix.ErrTimeout:
r.mxCbErr.Observe(1,
map[string]string{"err": "ErrCbTimeout"})
return nil, ErrCbTimeout
default:
r.mxCbErr.Observe(1,
map[string]string{"err": "NonCbErr"})
return nil, err
}
}
msgOut := (<-result).(Message)
timespan := time.Since(begin).Seconds()
r.log.Debug("[Stream] Get() ok", "msgOut", msgOut.ID(),
"timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msgOut, nil
}
func (r *circuitBreakerStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamCircuitBreaker,
Name: r.name,
}
}
func (r *circuitBreakerStream) GetCircuitName() string {
return r.circuit
}
/*
type FallbackStreamOpts struct {
streamOpts
FallbackFunc func(error) (Message, error)
}
func NewFallbackStreamOpts(namespace, name string,
fallbackFunc func(error) (Message, error)) *FallbackStreamOpts {
return &FallbackStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace,
"gentle", StreamFallback, "name", name),
MetricGet: noopMetric,
},
FallbackFunc: fallbackFunc,
}
}
// fallbackStream transforms what Stream.Get() returns.
type fallbackStream struct {
*streamFields
fallbackFunc func(error) (Message, error)
stream Stream
}
func NewFallbackStream(opts *FallbackStreamOpts, upstream Stream) Stream {
return &fallbackStream{
streamFields: newStreamFields(&opts.streamOpts),
fallbackFunc: opts.FallbackFunc,
stream: upstream,
}
}
func (r *fallbackStream) Get() (Message, error) {
begin := time.Now()
msg, err := r.stream.Get()
if err == nil {
timespan := time.Since(begin).Seconds()
r.log.Debug("[Stream] Get() ok, skip fallbackFunc",
"msg", msg.ID(), "timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msg, nil
}
r.log.Debug("[Stream] Get() err, fallbackFunc() ...", "err", err)
// fallback to deal with the err
msg, err = r.fallbackFunc(err)
timespan := time.Since(begin).Seconds()
if err != nil {
r.log.Error("[Stream] fallbackFunc() err",
"err", err, "timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
return nil, err
}
r.log.Debug("[Stream] fallbackFunc() ok",
"msg", msg.ID(), "timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msg, nil
}
func (r *fallbackStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamFallback,
Name: r.name,
}
}
type ChannelStreamOpts struct {
streamOpts
Channel <-chan interface{}
}
func NewChannelStreamOpts(namespace, name string, channel <-chan interface{}) *ChannelStreamOpts {
return &ChannelStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace, "gentle",
StreamChannel, "name", name),
MetricGet: noopMetric,
},
Channel: channel,
}
}
// channelStream forms a stream from a channel.
type channelStream struct {
*streamFields
channel <-chan interface{}
}
// Create a channelStream that gets Messages from $channel.
func NewChannelStream(opts *ChannelStreamOpts) Stream {
return &channelStream{
streamFields: newStreamFields(&opts.streamOpts),
channel: opts.Channel,
}
}
func (r *channelStream) Get() (Message, error) {
begin := time.Now()
r.log.Debug("[Stream] Get() ...")
switch v := (<-r.channel).(type) {
case Message:
timespan := time.Since(begin).Seconds()
r.log.Debug("[Stream] Get() ok", "msgOut", v.ID(),
"timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return v, nil
case error:
timespan := time.Since(begin).Seconds()
r.log.Debug("[Stream] Get() err", "err", v,
"timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
return nil, v
default:
timespan := time.Since(begin).Seconds()
r.log.Error("[Stream] Get() err, invalid type",
"value", v, "timespan", timespan)
return nil, ErrInvalidType
}
}
func (r *channelStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamChannel,
Name: r.name,
}
}
*/
remove deprecated code
package gentle
import (
"errors"
"github.com/afex/hystrix-go/hystrix"
"github.com/benbjohnson/clock"
"sync"
"time"
)
const (
// Types of resilience, are most often used as part of RegistryKey.
StreamRateLimited = "sRate"
StreamRetry = "sRetry"
StreamBulkhead = "sBulk"
StreamCircuitBreaker = "sCircuit"
)
var (
labelOk = map[string]string{"result": "ok"}
labelErr = map[string]string{"result": "err"}
)
// Common options for XXXStreamOpts
type streamOpts struct {
Namespace string
Name string
Log Logger
MetricGet Metric
}
// Common fields for XXXStream
type streamFields struct {
namespace string
name string
log Logger
mxGet Metric
}
func newStreamFields(opts *streamOpts) *streamFields {
return &streamFields{
namespace: opts.Namespace,
name: opts.Name,
log: opts.Log,
mxGet: opts.MetricGet,
}
}
type RateLimitedStreamOpts struct {
streamOpts
Limiter RateLimit
}
func NewRateLimitedStreamOpts(namespace, name string, limiter RateLimit) *RateLimitedStreamOpts {
return &RateLimitedStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace,
"gentle", StreamRateLimited, "name", name),
MetricGet: noopMetric,
},
Limiter: limiter,
}
}
// Rate limiting pattern is used to limit the speed of a series of Get().
type rateLimitedStream struct {
*streamFields
limiter RateLimit
stream Stream
}
func NewRateLimitedStream(opts *RateLimitedStreamOpts, upstream Stream) Stream {
return &rateLimitedStream{
streamFields: newStreamFields(&opts.streamOpts),
limiter: opts.Limiter,
stream: upstream,
}
}
// Get() is blocked when the limit is exceeded.
func (r *rateLimitedStream) Get() (Message, error) {
begin := time.Now()
r.log.Debug("[Stream] Get() ...")
r.limiter.Wait(1, 0)
msg, err := r.stream.Get()
timespan := time.Since(begin).Seconds()
if err != nil {
r.log.Error("[Stream] Get() err", "err", err,
"timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
return nil, err
}
r.log.Debug("[Stream] Get() ok", "msgOut", msg.ID(),
"timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msg, nil
}
func (r *rateLimitedStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamRateLimited,
Name: r.name,
}
}
type RetryStreamOpts struct {
streamOpts
MetricTryNum Metric
Clock Clock
BackOffFactory BackOffFactory
}
func NewRetryStreamOpts(namespace, name string, backOffFactory BackOffFactory) *RetryStreamOpts {
return &RetryStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace, "gentle",
StreamRetry, "name", name),
MetricGet: noopMetric,
},
MetricTryNum: noopMetric,
Clock: clock.New(),
BackOffFactory: backOffFactory,
}
}
// retryStream will, when Get() encounters error, back off for some time
// and then retries.
type retryStream struct {
*streamFields
obTryNum Metric
clock Clock
backOffFactory BackOffFactory
stream Stream
}
func NewRetryStream(opts *RetryStreamOpts, upstream Stream) Stream {
return &retryStream{
streamFields: newStreamFields(&opts.streamOpts),
obTryNum: opts.MetricTryNum,
clock: opts.Clock,
backOffFactory: opts.BackOffFactory,
stream: upstream,
}
}
func (r *retryStream) Get() (Message, error) {
begin := r.clock.Now()
count := 1
r.log.Debug("[Stream] Get() ...", "count", count)
var once sync.Once
var backOff BackOff
for {
msg, err := r.stream.Get()
if err == nil {
timespan := r.clock.Now().Sub(begin).Seconds()
r.log.Debug("[Stream] Get() ok", "msgOut", msg.ID(),
"timespan", timespan, "count", count)
r.mxGet.Observe(timespan, labelOk)
r.obTryNum.Observe(float64(count), labelOk)
return msg, nil
}
once.Do(func() {
backOff = r.backOffFactory.NewBackOff()
})
toWait := backOff.Next()
// Next() should immediately return but we can't guarantee so
// timespan is calculated after Next().
timespan := r.clock.Now().Sub(begin).Seconds()
if toWait == BackOffStop {
r.log.Error("[Streamer] Get() err and no more backing off",
"err", err, "timespan", timespan,
"count", count)
r.mxGet.Observe(timespan, labelErr)
r.obTryNum.Observe(float64(count), labelErr)
return nil, err
}
// timespan in our convention is used to track the overall
// time of current function. Here we record time
// passed as "elapsed".
count++
r.log.Error("[Stream] Get() err, backing off ...",
"err", err, "elapsed", timespan, "count", count,
"wait", toWait)
r.clock.Sleep(toWait)
}
}
func (r *retryStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamRetry,
Name: r.name,
}
}
type BulkheadStreamOpts struct {
streamOpts
MaxConcurrency int
}
func NewBulkheadStreamOpts(namespace, name string, maxConcurrency int) *BulkheadStreamOpts {
if maxConcurrency <= 0 {
panic(errors.New("max_concurrent must be greater than 0"))
}
return &BulkheadStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace, "gentle",
StreamBulkhead, "name", name),
MetricGet: noopMetric,
},
MaxConcurrency: maxConcurrency,
}
}
// Bulkhead pattern is used to limit the number of concurrently hanging Get().
// It uses semaphore isolation, similar to the approach used in hystrix.
// http://stackoverflow.com/questions/30391809/what-is-bulkhead-pattern-used-by-hystrix
type bulkheadStream struct {
*streamFields
stream Stream
semaphore chan struct{}
}
// Create a bulkheadStream that allows at maximum $max_concurrency Get() to
// run concurrently.
func NewBulkheadStream(opts *BulkheadStreamOpts, upstream Stream) Stream {
return &bulkheadStream{
streamFields: newStreamFields(&opts.streamOpts),
stream: upstream,
semaphore: make(chan struct{}, opts.MaxConcurrency),
}
}
// Get() is blocked when the limit is exceeded.
func (r *bulkheadStream) Get() (Message, error) {
begin := time.Now()
r.log.Debug("[Stream] Get() ...")
select {
case r.semaphore <- struct{}{}:
defer func() {
<-r.semaphore
}()
msg, err := r.stream.Get()
timespan := time.Since(begin).Seconds()
if err != nil {
r.log.Error("[Stream] Get() err", "err", err,
"timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
return nil, err
}
r.log.Debug("[Stream] Get() ok", "msgOut", msg.ID(),
"timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msg, nil
default:
r.log.Error("[Stream] Get() err", "err", ErrMaxConcurrency)
return nil, ErrMaxConcurrency
}
}
func (r *bulkheadStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamBulkhead,
Name: r.name,
}
}
func (r *bulkheadStream) GetMaxConcurrency() int {
return cap(r.semaphore)
}
func (r *bulkheadStream) GetCurrentConcurrency() int {
return len(r.semaphore)
}
type CircuitBreakerStreamOpts struct {
streamOpts
MetricCbErr Metric
Circuit string
}
func NewCircuitBreakerStreamOpts(namespace, name, circuit string) *CircuitBreakerStreamOpts {
return &CircuitBreakerStreamOpts{
streamOpts: streamOpts{
Namespace: namespace,
Name: name,
Log: Log.New("namespace", namespace,
"gentle", StreamCircuitBreaker,
"name", name, "circuit", circuit),
MetricGet: noopMetric,
},
MetricCbErr: noopMetric,
Circuit: circuit,
}
}
// circuitBreakerStream is a Stream equipped with a circuit-breaker.
type circuitBreakerStream struct {
*streamFields
mxCbErr Metric
circuit string
stream Stream
}
// In hystrix-go, a circuit-breaker must be given a unique name.
// NewCircuitBreakerStream() creates a circuitBreakerStream with a
// circuit-breaker named $circuit.
func NewCircuitBreakerStream(opts *CircuitBreakerStreamOpts, stream Stream) Stream {
// Note that if it might overwrite or be overwritten by concurrently
// registering the same circuit.
allCircuits := hystrix.GetCircuitSettings()
if _, ok := allCircuits[opts.Circuit]; !ok {
NewDefaultCircuitBreakerConf().RegisterFor(opts.Circuit)
}
return &circuitBreakerStream{
streamFields: newStreamFields(&opts.streamOpts),
mxCbErr: opts.MetricCbErr,
circuit: opts.Circuit,
stream: stream,
}
}
func (r *circuitBreakerStream) Get() (Message, error) {
begin := time.Now()
r.log.Debug("[Stream] Get() ...")
result := make(chan interface{}, 1)
err := hystrix.Do(r.circuit, func() error {
msg, err := r.stream.Get()
timespan := time.Since(begin).Seconds()
if err != nil {
r.log.Error("[Stream] Do()::Get() err",
"err", err, "timespan", timespan)
return err
}
r.log.Debug("[Stream] Do()::Get() ok",
"msgOut", msg.ID(), "timespan", timespan)
result <- msg
return nil
}, nil)
// NOTE:
// err can be from Do()::Get() or hystrix errors if criteria are matched.
// Do()::Get()'s err, being returned or not, contributes to hystrix metrics
if err != nil {
defer func() {
timespan := time.Since(begin).Seconds()
r.log.Error("[Stream] Circuit err", "err", err,
"timespan", timespan)
r.mxGet.Observe(timespan, labelErr)
}()
// To prevent misinterpreting when wrapping one
// circuitBreakerStream over another. Hystrix errors are
// replaced so that Get() won't return any hystrix errors.
switch err {
case hystrix.ErrCircuitOpen:
r.mxCbErr.Observe(1,
map[string]string{"err": "ErrCbOpen"})
return nil, ErrCbOpen
case hystrix.ErrMaxConcurrency:
r.mxCbErr.Observe(1,
map[string]string{"err": "ErrCbMaxConcurrency"})
return nil, ErrCbMaxConcurrency
case hystrix.ErrTimeout:
r.mxCbErr.Observe(1,
map[string]string{"err": "ErrCbTimeout"})
return nil, ErrCbTimeout
default:
r.mxCbErr.Observe(1,
map[string]string{"err": "NonCbErr"})
return nil, err
}
}
msgOut := (<-result).(Message)
timespan := time.Since(begin).Seconds()
r.log.Debug("[Stream] Get() ok", "msgOut", msgOut.ID(),
"timespan", timespan)
r.mxGet.Observe(timespan, labelOk)
return msgOut, nil
}
func (r *circuitBreakerStream) GetNames() *Names {
return &Names{
Namespace: r.namespace,
Resilience: StreamCircuitBreaker,
Name: r.name,
}
}
func (r *circuitBreakerStream) GetCircuitName() string {
return r.circuit
}
|
package main
import (
"log"
"os"
"strconv"
"time"
"github.com/garyburd/redigo/redis"
"github.com/lancetw/hcfd-forecast/db"
"github.com/lancetw/hcfd-forecast/rain"
"github.com/line/line-bot-sdk-go/linebot"
)
const timeZone = "Asia/Taipei"
var bot *linebot.Client
func main() {
strID := os.Getenv("ChannelID")
numID, err := strconv.ParseInt(strID, 10, 64)
if err != nil {
log.Fatal("Wrong environment setting about ChannelID")
}
bot, err = linebot.NewClient(numID, os.Getenv("ChannelSecret"), os.Getenv("MID"))
if err != nil {
log.Println("Bot:", bot, " err:", err)
}
for {
c := db.Connect(os.Getenv("REDISTOGO_URL"))
targets := []string{"新竹市", "新竹縣", "台中市", "高雄市", "台北市"}
msgs, token := rain.GetInfo(targets[0], targets)
n, addErr := c.Do("SADD", "token", token)
if addErr != nil {
log.Println("SADD to redis error", addErr, n)
}
status, getErr := redis.Int(c.Do("SISMEMBER", "token", token))
if getErr != nil {
if err != nil {
log.Println(err)
}
}
if status == 1 {
log.Println("\n***************************************")
users, smembersErr := redis.Strings(c.Do("SMEMBERS", "user"))
if smembersErr != nil {
log.Println("SMEMBERS redis error", smembersErr)
} else {
local := time.Now()
location, err := time.LoadLocation(timeZone)
if err == nil {
local = local.In(location)
}
for _, contentTo := range users {
for _, msg := range msgs {
_, err = bot.SendText([]string{contentTo}, msg)
if err != nil {
log.Println(err)
}
}
}
}
}
defer c.Close()
time.Sleep(60 * time.Second)
}
}
fixed typo
package main
import (
"log"
"os"
"strconv"
"time"
"github.com/garyburd/redigo/redis"
"github.com/lancetw/hcfd-forecast/db"
"github.com/lancetw/hcfd-forecast/rain"
"github.com/line/line-bot-sdk-go/linebot"
)
const timeZone = "Asia/Taipei"
var bot *linebot.Client
func main() {
strID := os.Getenv("ChannelID")
numID, err := strconv.ParseInt(strID, 10, 64)
if err != nil {
log.Fatal("Wrong environment setting about ChannelID")
}
bot, err = linebot.NewClient(numID, os.Getenv("ChannelSecret"), os.Getenv("MID"))
if err != nil {
log.Println("Bot:", bot, " err:", err)
}
for {
c := db.Connect(os.Getenv("REDISTOGO_URL"))
targets := []string{"新竹市", "新竹縣", "台中市", "高雄市", "台北市"}
msgs, token := rain.GetInfo(targets[0], targets)
n, addErr := c.Do("SADD", "token", token)
if addErr != nil {
log.Println("SADD to redis error", addErr, n)
}
status, getErr := redis.Int(c.Do("SISMEMBER", "token", token))
if getErr != nil {
if err != nil {
log.Println(err)
}
}
if status == 0 {
log.Println("\n***************************************")
users, smembersErr := redis.Strings(c.Do("SMEMBERS", "user"))
if smembersErr != nil {
log.Println("SMEMBERS redis error", smembersErr)
} else {
local := time.Now()
location, err := time.LoadLocation(timeZone)
if err == nil {
local = local.In(location)
}
for _, contentTo := range users {
for _, msg := range msgs {
_, err = bot.SendText([]string{contentTo}, msg)
if err != nil {
log.Println(err)
}
}
}
}
}
defer c.Close()
time.Sleep(60 * time.Second)
}
}
|
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3_test
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/projectcalico/libcalico-go/lib/backend/model"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"context"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/libcalico-go/lib/backend"
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
"github.com/projectcalico/libcalico-go/lib/clientv3"
"github.com/projectcalico/libcalico-go/lib/options"
"github.com/projectcalico/libcalico-go/lib/testutils"
"github.com/projectcalico/libcalico-go/lib/watch"
)
var _ = testutils.E2eDatastoreDescribe("NetworkPolicy tests", testutils.DatastoreAll, func(config apiconfig.CalicoAPIConfig) {
ctx := context.Background()
order1 := 99.999
order2 := 22.222
namespace1 := "namespace-1"
namespace2 := "namespace-2"
name1 := "networkp-1"
name2 := "networkp-2"
spec1 := apiv3.NetworkPolicySpec{
Order: &order1,
Ingress: []apiv3.Rule{testutils.InRule1, testutils.InRule2},
Egress: []apiv3.Rule{testutils.EgressRule1, testutils.EgressRule2},
Selector: "thing == 'value'",
}
spec2 := apiv3.NetworkPolicySpec{
Order: &order2,
Ingress: []apiv3.Rule{testutils.InRule2, testutils.InRule1},
Egress: []apiv3.Rule{testutils.EgressRule2, testutils.EgressRule1},
Selector: "thing2 == 'value2'",
}
// Specs with only ingress or egress rules, without Types set.
ingressSpec1 := spec1
ingressSpec1.Egress = nil
egressSpec2 := spec2
egressSpec2.Ingress = nil
// Specs with ingress and egress rules, with Types set to just ingress or egress.
ingressTypesSpec1 := spec1
ingressTypesSpec1.Types = ingress
egressTypesSpec2 := spec2
egressTypesSpec2.Types = egress
DescribeTable("NetworkPolicy e2e CRUD tests",
func(namespace1, namespace2, name1, name2 string, spec1, spec2 apiv3.NetworkPolicySpec, types1, types2 []apiv3.PolicyType) {
c, err := clientv3.New(config)
Expect(err).NotTo(HaveOccurred())
be, err := backend.NewClient(config)
Expect(err).NotTo(HaveOccurred())
be.Clean()
By("Updating the NetworkPolicy before it is created")
var rv string
if config.Spec.DatastoreType != apiconfig.Kubernetes {
rv = "1234"
} else {
// Resource version for KDD is a combination of both the CRD and K8s NP backed
// resources separated by a slash.
rv = conversion.Converter{}.JoinNetworkPolicyRevisions("1234", "5678")
}
_, outError := c.NetworkPolicies().Update(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1, ResourceVersion: rv, CreationTimestamp: metav1.Now(), UID: "test-fail-networkpolicy"},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace1 + "/default." + name1 + ") with error:"))
By("Attempting to creating a new NetworkPolicy with name1/spec1 and a non-empty ResourceVersion")
_, outError = c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Name: name1, ResourceVersion: rv},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("error with field Metadata.ResourceVersion = '" + rv + "' (field must not be set for a Create request)"))
By("Creating a new NetworkPolicy with namespace1/name1/spec1")
res1, outError := c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).NotTo(HaveOccurred())
spec1.Types = types1
Expect(res1).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec1))
// Track the version of the original data for name1.
rv1_1 := res1.ResourceVersion
By("Attempting to create the same NetworkPolicy with name1 but with spec2")
_, outError = c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1},
Spec: spec2,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("resource already exists: NetworkPolicy(" + namespace1 + "/default." + name1 + ")"))
By("Getting NetworkPolicy (name1) and comparing the output against spec1")
res, outError := c.NetworkPolicies().Get(ctx, namespace1, name1, options.GetOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(res).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec1))
Expect(res.ResourceVersion).To(Equal(res1.ResourceVersion))
By("Getting NetworkPolicy (name2) before it is created")
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
By("Listing all the NetworkPolicies in namespace1, expecting a single result with name1/spec1")
outList, outError := c.NetworkPolicies().List(ctx, options.ListOptions{Namespace: namespace1})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace1, name1, spec1),
))
By("Creating a new NetworkPolicy with name2/spec2")
res2, outError := c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace2, Name: name2},
Spec: spec2,
}, options.SetOptions{})
Expect(outError).NotTo(HaveOccurred())
spec2.Types = types2
Expect(res2).To(MatchResource(apiv3.KindNetworkPolicy, namespace2, name2, spec2))
By("Getting NetworkPolicy (name2) and comparing the output against spec2")
res, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(res).To(MatchResource(apiv3.KindNetworkPolicy, namespace2, name2, spec2))
Expect(res.ResourceVersion).To(Equal(res2.ResourceVersion))
By("Listing all the NetworkPolicies using an empty namespace (all-namespaces), expecting a two results with name1/spec1 and name2/spec2")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace1, name1, spec1),
testutils.Resource(apiv3.KindNetworkPolicy, namespace2, name2, spec2),
))
By("Listing all the NetworkPolicies in namespace2, expecting a one results with name2/spec2")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{Namespace: namespace2})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace2, name2, spec2),
))
By("Updating NetworkPolicy name1 with spec2")
res1.Spec = spec2
res1, outError = c.NetworkPolicies().Update(ctx, res1, options.SetOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(res1).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec2))
By("Attempting to update the NetworkPolicy without a Creation Timestamp")
res, outError = c.NetworkPolicies().Update(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1, ResourceVersion: rv, UID: "test-fail-networkpolicy"},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(res).To(BeNil())
Expect(outError.Error()).To(Equal("error with field Metadata.CreationTimestamp = '0001-01-01 00:00:00 +0000 UTC' (field must be set for an Update request)"))
By("Attempting to update the NetworkPolicy without a UID")
res, outError = c.NetworkPolicies().Update(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1, ResourceVersion: rv, CreationTimestamp: metav1.Now()},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(res).To(BeNil())
Expect(outError.Error()).To(Equal("error with field Metadata.UID = '' (field must be set for an Update request)"))
// Track the version of the updated name1 data.
rv1_2 := res1.ResourceVersion
By("Updating NetworkPolicy name1 without specifying a resource version")
res1.Spec = spec1
res1.ObjectMeta.ResourceVersion = ""
_, outError = c.NetworkPolicies().Update(ctx, res1, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("error with field Metadata.ResourceVersion = '' (field must be set for an Update request)"))
By("Updating NetworkPolicy name1 using the previous resource version")
res1.Spec = spec1
res1.ResourceVersion = rv1_1
_, outError = c.NetworkPolicies().Update(ctx, res1, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("update conflict: NetworkPolicy(" + namespace1 + "/default." + name1 + ")"))
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Getting NetworkPolicy (name1) with the original resource version and comparing the output against spec1")
res, outError = c.NetworkPolicies().Get(ctx, namespace1, name1, options.GetOptions{ResourceVersion: rv1_1})
Expect(outError).NotTo(HaveOccurred())
Expect(res).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec1))
Expect(res.ResourceVersion).To(Equal(rv1_1))
}
By("Getting NetworkPolicy (name1) with the updated resource version and comparing the output against spec2")
res, outError = c.NetworkPolicies().Get(ctx, namespace1, name1, options.GetOptions{ResourceVersion: rv1_2})
Expect(outError).NotTo(HaveOccurred())
Expect(res).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec2))
Expect(res.ResourceVersion).To(Equal(rv1_2))
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Listing NetworkPolicies with the original resource version and checking for a single result with name1/spec1")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{Namespace: namespace1, ResourceVersion: rv1_1})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace1, name1, spec1),
))
}
By("Listing NetworkPolicies (all namespaces) with the latest resource version and checking for two results with name1/spec2 and name2/spec2")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace1, name1, spec2),
testutils.Resource(apiv3.KindNetworkPolicy, namespace2, name2, spec2),
))
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Deleting NetworkPolicy (name1) with the old resource version")
_, outError = c.NetworkPolicies().Delete(ctx, namespace1, name1, options.DeleteOptions{ResourceVersion: rv1_1})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("update conflict: NetworkPolicy(" + namespace1 + "/default." + name1 + ")"))
}
By("Deleting NetworkPolicy (name1) with the new resource version")
dres, outError := c.NetworkPolicies().Delete(ctx, namespace1, name1, options.DeleteOptions{ResourceVersion: rv1_2})
Expect(outError).NotTo(HaveOccurred())
Expect(dres).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec2))
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Updating NetworkPolicy name2 with a 2s TTL and waiting for the entry to be deleted")
_, outError = c.NetworkPolicies().Update(ctx, res2, options.SetOptions{TTL: 2 * time.Second})
Expect(outError).NotTo(HaveOccurred())
time.Sleep(1 * time.Second)
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).NotTo(HaveOccurred())
time.Sleep(2 * time.Second)
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
By("Creating NetworkPolicy name2 with a 2s TTL and waiting for the entry to be deleted")
_, outError = c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace2, Name: name2},
Spec: spec2,
}, options.SetOptions{TTL: 2 * time.Second})
Expect(outError).NotTo(HaveOccurred())
time.Sleep(1 * time.Second)
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).NotTo(HaveOccurred())
time.Sleep(2 * time.Second)
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
}
if config.Spec.DatastoreType == apiconfig.Kubernetes {
By("Attempting to deleting NetworkPolicy (name2) again")
dres, outError = c.NetworkPolicies().Delete(ctx, namespace2, name2, options.DeleteOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(dres).To(MatchResource(apiv3.KindNetworkPolicy, namespace2, name2, spec2))
}
By("Attempting to delete NetworkPolicy (name2) again")
_, outError = c.NetworkPolicies().Delete(ctx, namespace2, name2, options.DeleteOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
By("Listing all NetworkPolicies and expecting no items")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(HaveLen(0))
By("Getting NetworkPolicy (name2) and expecting an error")
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
},
// Pass two fully populated PolicySpecs and expect the series of operations to succeed.
Entry("Two fully populated PolicySpecs",
namespace1, namespace2,
name1, name2,
spec1, spec2,
ingressEgress, ingressEgress,
),
// Check defaulting for policies with ingress rules and egress rules only.
Entry("Ingress-only and egress-only policies",
namespace1, namespace2,
name1, name2,
ingressSpec1, egressSpec2,
ingress, egress,
),
// Check non-defaulting for policies with explicit Types value.
Entry("Policies with explicit ingress and egress Types",
namespace1, namespace2,
name1, name2,
ingressTypesSpec1, egressTypesSpec2,
ingress, egress,
),
)
Describe("NetworkPolicy watch functionality", func() {
It("should handle watch events for different resource versions and event types", func() {
c, err := clientv3.New(config)
Expect(err).NotTo(HaveOccurred())
be, err := backend.NewClient(config)
Expect(err).NotTo(HaveOccurred())
be.Clean()
By("Listing NetworkPolicies with the latest resource version and checking for two results with name1/spec2 and name2/spec2")
outList, outError := c.NetworkPolicies().List(ctx, options.ListOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(HaveLen(0))
rev0 := outList.ResourceVersion
By("Configuring a NetworkPolicy namespace1/name1/spec1 and storing the response")
outRes1, err := c.NetworkPolicies().Create(
ctx,
&apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1},
Spec: spec1,
},
options.SetOptions{},
)
rev1 := outRes1.ResourceVersion
By("Configuring a NetworkPolicy namespace2/name2/spec2 and storing the response")
outRes2, err := c.NetworkPolicies().Create(
ctx,
&apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace2, Name: name2},
Spec: spec2,
},
options.SetOptions{},
)
By("Starting a watcher from revision rev1 - this should skip the first creation")
w, err := c.NetworkPolicies().Watch(ctx, options.ListOptions{ResourceVersion: rev1})
Expect(err).NotTo(HaveOccurred())
testWatcher1 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher1.Stop()
By("Deleting res1")
_, err = c.NetworkPolicies().Delete(ctx, namespace1, name1, options.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Checking for two events, create res2 and delete re1")
testWatcher1.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes2,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
})
testWatcher1.Stop()
By("Starting a watcher from rev0 - this should get all events")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{ResourceVersion: rev0})
Expect(err).NotTo(HaveOccurred())
testWatcher2 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher2.Stop()
By("Modifying res2")
outRes3, err := c.NetworkPolicies().Update(
ctx,
&apiv3.NetworkPolicy{
ObjectMeta: outRes2.ObjectMeta,
Spec: spec1,
},
options.SetOptions{},
)
Expect(err).NotTo(HaveOccurred())
testWatcher2.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes1,
},
{
Type: watch.Added,
Object: outRes2,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
{
Type: watch.Modified,
Previous: outRes2,
Object: outRes3,
},
})
testWatcher2.Stop()
// Only etcdv3 supports watching a specific instance of a resource.
if config.Spec.DatastoreType == apiconfig.EtcdV3 {
By("Starting a watcher from rev0 watching name1 - this should get all events for name1")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{Namespace: namespace1, Name: name1, ResourceVersion: rev0})
Expect(err).NotTo(HaveOccurred())
testWatcher2_1 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher2_1.Stop()
testWatcher2_1.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes1,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
})
testWatcher2_1.Stop()
}
By("Starting a watcher not specifying a rev - expect the current snapshot")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{})
Expect(err).NotTo(HaveOccurred())
testWatcher3 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher3.Stop()
testWatcher3.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes3,
},
})
testWatcher3.Stop()
By("Starting a watcher at rev0 in namespace1 - expect the events for policy in namespace1")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{Namespace: namespace1, ResourceVersion: rev0})
Expect(err).NotTo(HaveOccurred())
testWatcher4 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher4.Stop()
testWatcher4.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes1,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
})
testWatcher4.Stop()
})
})
// These tests check that the names we use on the API properly round-trip. In particular,
// k8s and OpenStack policies have special prefixes, which should be preserved. Other
// names get stored with a prefix, for consistency but the API returns them without the
// prefix.
nameNormalizationTests := []TableEntry{
// OpenStack names should round-trip, including their prefix.
Entry("OpenStack policy", "ossg.default.group1", "ossg.default.group1"),
// As should normal names.
Entry("OpenStack policy", "foo-bar", "default.foo-bar"),
}
if config.Spec.DatastoreType != "kubernetes" {
// Only test writing a knp-prefixed policy if we're not backed by KDD. In KDD,
// the knp-prefixed policies are derived from k8s data so it doesn't make sense
// to write them through our API.
knpName := "knp.default.a-name"
nameNormalizationTests = append(nameNormalizationTests,
Entry("KDD policy", knpName, knpName),
)
}
DescribeTable("name round-tripping tests",
func(name, backendName string) {
c, err := clientv3.New(config)
Expect(err).NotTo(HaveOccurred())
be, err := backend.NewClient(config)
Expect(err).NotTo(HaveOccurred())
be.Clean()
By("Attempting to creating a new NetworkPolicy with name: " + name)
inNp := &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name},
Spec: ingressTypesSpec1,
}
np, outError := c.NetworkPolicies().Create(ctx, inNp, options.SetOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(inNp.GetName()).To(Equal(name), "Create() shouldn't touch input data")
Expect(np.GetName()).To(Equal(name), "Create() should return the data as we'd read it")
By("Reading back the raw data with its normalized name: " + backendName)
// Make sure that, where the name and the storage name differ, we do the write with
// the storage name. Then the assertions below verify that all the CRUD methods
// do the right conversion too.
kv, err := be.Get(ctx, model.ResourceKey{
Kind: apiv3.KindNetworkPolicy,
Namespace: namespace1,
Name: backendName,
}, "")
Expect(err).NotTo(HaveOccurred())
Expect(kv.Value.(*apiv3.NetworkPolicy).Spec).To(Equal(ingressTypesSpec1))
By("Getting the right policy by name")
np, err = c.NetworkPolicies().Get(ctx, namespace1, name, options.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(np.GetName()).To(Equal(name))
Expect(np.Spec).To(Equal(ingressTypesSpec1))
By("Updating the policy")
np.Spec = egressTypesSpec2
np, err = c.NetworkPolicies().Update(ctx, np, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Getting the right policy")
np, err = c.NetworkPolicies().Get(ctx, namespace1, name, options.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(np.GetName()).To(Equal(name))
Expect(np.Spec).To(Equal(egressTypesSpec2))
By("Listing the policy with correct name (no query options)")
nps, err := c.NetworkPolicies().List(ctx, options.ListOptions{Namespace: namespace1})
Expect(err).NotTo(HaveOccurred())
var names []string
for _, np := range nps.Items {
names = append(names, np.GetName())
}
Expect(names).To(ContainElement(name))
if name != name {
Expect(names).NotTo(ContainElement(name))
}
By("Listing the policy with correct name (list by name)")
nps, err = c.NetworkPolicies().List(ctx,
options.ListOptions{Namespace: namespace1, Name: name})
Expect(err).NotTo(HaveOccurred())
names = nil
for _, np := range nps.Items {
names = append(names, np.GetName())
}
Expect(names).To(ConsistOf(name))
By("Deleting the policy via the name")
np, err = c.NetworkPolicies().Delete(ctx, namespace1, name, options.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
if np != nil {
Expect(np.GetName()).To(Equal(name))
}
},
nameNormalizationTests...,
)
})
UT that reproduces watch problem
Signed-off-by: Spike Curtis <73cc33b96ddcddc98995c569e3a0bca29451c8a8@tigera.io>
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3_test
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/projectcalico/libcalico-go/lib/backend/model"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"context"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/libcalico-go/lib/backend"
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
"github.com/projectcalico/libcalico-go/lib/clientv3"
"github.com/projectcalico/libcalico-go/lib/options"
"github.com/projectcalico/libcalico-go/lib/testutils"
"github.com/projectcalico/libcalico-go/lib/watch"
)
var _ = testutils.E2eDatastoreDescribe("NetworkPolicy tests", testutils.DatastoreAll, func(config apiconfig.CalicoAPIConfig) {
ctx := context.Background()
order1 := 99.999
order2 := 22.222
namespace1 := "namespace-1"
namespace2 := "namespace-2"
name1 := "networkp-1"
name2 := "networkp-2"
spec1 := apiv3.NetworkPolicySpec{
Order: &order1,
Ingress: []apiv3.Rule{testutils.InRule1, testutils.InRule2},
Egress: []apiv3.Rule{testutils.EgressRule1, testutils.EgressRule2},
Selector: "thing == 'value'",
}
spec2 := apiv3.NetworkPolicySpec{
Order: &order2,
Ingress: []apiv3.Rule{testutils.InRule2, testutils.InRule1},
Egress: []apiv3.Rule{testutils.EgressRule2, testutils.EgressRule1},
Selector: "thing2 == 'value2'",
}
// Specs with only ingress or egress rules, without Types set.
ingressSpec1 := spec1
ingressSpec1.Egress = nil
egressSpec2 := spec2
egressSpec2.Ingress = nil
// Specs with ingress and egress rules, with Types set to just ingress or egress.
ingressTypesSpec1 := spec1
ingressTypesSpec1.Types = ingress
egressTypesSpec2 := spec2
egressTypesSpec2.Types = egress
DescribeTable("NetworkPolicy e2e CRUD tests",
func(namespace1, namespace2, name1, name2 string, spec1, spec2 apiv3.NetworkPolicySpec, types1, types2 []apiv3.PolicyType) {
c, err := clientv3.New(config)
Expect(err).NotTo(HaveOccurred())
be, err := backend.NewClient(config)
Expect(err).NotTo(HaveOccurred())
be.Clean()
By("Updating the NetworkPolicy before it is created")
var rv string
if config.Spec.DatastoreType != apiconfig.Kubernetes {
rv = "1234"
} else {
// Resource version for KDD is a combination of both the CRD and K8s NP backed
// resources separated by a slash.
rv = conversion.Converter{}.JoinNetworkPolicyRevisions("1234", "5678")
}
_, outError := c.NetworkPolicies().Update(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1, ResourceVersion: rv, CreationTimestamp: metav1.Now(), UID: "test-fail-networkpolicy"},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace1 + "/default." + name1 + ") with error:"))
By("Attempting to creating a new NetworkPolicy with name1/spec1 and a non-empty ResourceVersion")
_, outError = c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Name: name1, ResourceVersion: rv},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("error with field Metadata.ResourceVersion = '" + rv + "' (field must not be set for a Create request)"))
By("Creating a new NetworkPolicy with namespace1/name1/spec1")
res1, outError := c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).NotTo(HaveOccurred())
spec1.Types = types1
Expect(res1).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec1))
// Track the version of the original data for name1.
rv1_1 := res1.ResourceVersion
By("Attempting to create the same NetworkPolicy with name1 but with spec2")
_, outError = c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1},
Spec: spec2,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("resource already exists: NetworkPolicy(" + namespace1 + "/default." + name1 + ")"))
By("Getting NetworkPolicy (name1) and comparing the output against spec1")
res, outError := c.NetworkPolicies().Get(ctx, namespace1, name1, options.GetOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(res).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec1))
Expect(res.ResourceVersion).To(Equal(res1.ResourceVersion))
By("Getting NetworkPolicy (name2) before it is created")
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
By("Listing all the NetworkPolicies in namespace1, expecting a single result with name1/spec1")
outList, outError := c.NetworkPolicies().List(ctx, options.ListOptions{Namespace: namespace1})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace1, name1, spec1),
))
By("Creating a new NetworkPolicy with name2/spec2")
res2, outError := c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace2, Name: name2},
Spec: spec2,
}, options.SetOptions{})
Expect(outError).NotTo(HaveOccurred())
spec2.Types = types2
Expect(res2).To(MatchResource(apiv3.KindNetworkPolicy, namespace2, name2, spec2))
By("Getting NetworkPolicy (name2) and comparing the output against spec2")
res, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(res).To(MatchResource(apiv3.KindNetworkPolicy, namespace2, name2, spec2))
Expect(res.ResourceVersion).To(Equal(res2.ResourceVersion))
By("Listing all the NetworkPolicies using an empty namespace (all-namespaces), expecting a two results with name1/spec1 and name2/spec2")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace1, name1, spec1),
testutils.Resource(apiv3.KindNetworkPolicy, namespace2, name2, spec2),
))
By("Listing all the NetworkPolicies in namespace2, expecting a one results with name2/spec2")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{Namespace: namespace2})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace2, name2, spec2),
))
By("Updating NetworkPolicy name1 with spec2")
res1.Spec = spec2
res1, outError = c.NetworkPolicies().Update(ctx, res1, options.SetOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(res1).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec2))
By("Attempting to update the NetworkPolicy without a Creation Timestamp")
res, outError = c.NetworkPolicies().Update(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1, ResourceVersion: rv, UID: "test-fail-networkpolicy"},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(res).To(BeNil())
Expect(outError.Error()).To(Equal("error with field Metadata.CreationTimestamp = '0001-01-01 00:00:00 +0000 UTC' (field must be set for an Update request)"))
By("Attempting to update the NetworkPolicy without a UID")
res, outError = c.NetworkPolicies().Update(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1, ResourceVersion: rv, CreationTimestamp: metav1.Now()},
Spec: spec1,
}, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(res).To(BeNil())
Expect(outError.Error()).To(Equal("error with field Metadata.UID = '' (field must be set for an Update request)"))
// Track the version of the updated name1 data.
rv1_2 := res1.ResourceVersion
By("Updating NetworkPolicy name1 without specifying a resource version")
res1.Spec = spec1
res1.ObjectMeta.ResourceVersion = ""
_, outError = c.NetworkPolicies().Update(ctx, res1, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("error with field Metadata.ResourceVersion = '' (field must be set for an Update request)"))
By("Updating NetworkPolicy name1 using the previous resource version")
res1.Spec = spec1
res1.ResourceVersion = rv1_1
_, outError = c.NetworkPolicies().Update(ctx, res1, options.SetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("update conflict: NetworkPolicy(" + namespace1 + "/default." + name1 + ")"))
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Getting NetworkPolicy (name1) with the original resource version and comparing the output against spec1")
res, outError = c.NetworkPolicies().Get(ctx, namespace1, name1, options.GetOptions{ResourceVersion: rv1_1})
Expect(outError).NotTo(HaveOccurred())
Expect(res).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec1))
Expect(res.ResourceVersion).To(Equal(rv1_1))
}
By("Getting NetworkPolicy (name1) with the updated resource version and comparing the output against spec2")
res, outError = c.NetworkPolicies().Get(ctx, namespace1, name1, options.GetOptions{ResourceVersion: rv1_2})
Expect(outError).NotTo(HaveOccurred())
Expect(res).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec2))
Expect(res.ResourceVersion).To(Equal(rv1_2))
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Listing NetworkPolicies with the original resource version and checking for a single result with name1/spec1")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{Namespace: namespace1, ResourceVersion: rv1_1})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace1, name1, spec1),
))
}
By("Listing NetworkPolicies (all namespaces) with the latest resource version and checking for two results with name1/spec2 and name2/spec2")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(ConsistOf(
testutils.Resource(apiv3.KindNetworkPolicy, namespace1, name1, spec2),
testutils.Resource(apiv3.KindNetworkPolicy, namespace2, name2, spec2),
))
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Deleting NetworkPolicy (name1) with the old resource version")
_, outError = c.NetworkPolicies().Delete(ctx, namespace1, name1, options.DeleteOptions{ResourceVersion: rv1_1})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(Equal("update conflict: NetworkPolicy(" + namespace1 + "/default." + name1 + ")"))
}
By("Deleting NetworkPolicy (name1) with the new resource version")
dres, outError := c.NetworkPolicies().Delete(ctx, namespace1, name1, options.DeleteOptions{ResourceVersion: rv1_2})
Expect(outError).NotTo(HaveOccurred())
Expect(dres).To(MatchResource(apiv3.KindNetworkPolicy, namespace1, name1, spec2))
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Updating NetworkPolicy name2 with a 2s TTL and waiting for the entry to be deleted")
_, outError = c.NetworkPolicies().Update(ctx, res2, options.SetOptions{TTL: 2 * time.Second})
Expect(outError).NotTo(HaveOccurred())
time.Sleep(1 * time.Second)
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).NotTo(HaveOccurred())
time.Sleep(2 * time.Second)
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
By("Creating NetworkPolicy name2 with a 2s TTL and waiting for the entry to be deleted")
_, outError = c.NetworkPolicies().Create(ctx, &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace2, Name: name2},
Spec: spec2,
}, options.SetOptions{TTL: 2 * time.Second})
Expect(outError).NotTo(HaveOccurred())
time.Sleep(1 * time.Second)
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).NotTo(HaveOccurred())
time.Sleep(2 * time.Second)
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
}
if config.Spec.DatastoreType == apiconfig.Kubernetes {
By("Attempting to deleting NetworkPolicy (name2) again")
dres, outError = c.NetworkPolicies().Delete(ctx, namespace2, name2, options.DeleteOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(dres).To(MatchResource(apiv3.KindNetworkPolicy, namespace2, name2, spec2))
}
By("Attempting to delete NetworkPolicy (name2) again")
_, outError = c.NetworkPolicies().Delete(ctx, namespace2, name2, options.DeleteOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
By("Listing all NetworkPolicies and expecting no items")
outList, outError = c.NetworkPolicies().List(ctx, options.ListOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(HaveLen(0))
By("Getting NetworkPolicy (name2) and expecting an error")
_, outError = c.NetworkPolicies().Get(ctx, namespace2, name2, options.GetOptions{})
Expect(outError).To(HaveOccurred())
Expect(outError.Error()).To(ContainSubstring("resource does not exist: NetworkPolicy(" + namespace2 + "/default." + name2 + ") with error:"))
},
// Pass two fully populated PolicySpecs and expect the series of operations to succeed.
Entry("Two fully populated PolicySpecs",
namespace1, namespace2,
name1, name2,
spec1, spec2,
ingressEgress, ingressEgress,
),
// Check defaulting for policies with ingress rules and egress rules only.
Entry("Ingress-only and egress-only policies",
namespace1, namespace2,
name1, name2,
ingressSpec1, egressSpec2,
ingress, egress,
),
// Check non-defaulting for policies with explicit Types value.
Entry("Policies with explicit ingress and egress Types",
namespace1, namespace2,
name1, name2,
ingressTypesSpec1, egressTypesSpec2,
ingress, egress,
),
)
Describe("NetworkPolicy watch functionality", func() {
It("should handle watch events for different resource versions and event types", func() {
c, err := clientv3.New(config)
Expect(err).NotTo(HaveOccurred())
be, err := backend.NewClient(config)
Expect(err).NotTo(HaveOccurred())
be.Clean()
By("Listing NetworkPolicies with the latest resource version and checking for two results with name1/spec2 and name2/spec2")
outList, outError := c.NetworkPolicies().List(ctx, options.ListOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(outList.Items).To(HaveLen(0))
rev0 := outList.ResourceVersion
By("Configuring a NetworkPolicy namespace1/name1/spec1 and storing the response")
outRes1, err := c.NetworkPolicies().Create(
ctx,
&apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name1},
Spec: spec1,
},
options.SetOptions{},
)
rev1 := outRes1.ResourceVersion
By("Configuring a NetworkPolicy namespace2/name2/spec2 and storing the response")
outRes2, err := c.NetworkPolicies().Create(
ctx,
&apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace2, Name: name2},
Spec: spec2,
},
options.SetOptions{},
)
By("Starting a watcher from revision rev1 - this should skip the first creation")
w, err := c.NetworkPolicies().Watch(ctx, options.ListOptions{ResourceVersion: rev1})
Expect(err).NotTo(HaveOccurred())
testWatcher1 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher1.Stop()
By("Deleting res1")
_, err = c.NetworkPolicies().Delete(ctx, namespace1, name1, options.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Checking for two events, create res2 and delete re1")
testWatcher1.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes2,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
})
testWatcher1.Stop()
By("Starting a watcher from rev0 - this should get all events")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{ResourceVersion: rev0})
Expect(err).NotTo(HaveOccurred())
testWatcher2 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher2.Stop()
By("Modifying res2")
outRes3, err := c.NetworkPolicies().Update(
ctx,
&apiv3.NetworkPolicy{
ObjectMeta: outRes2.ObjectMeta,
Spec: spec1,
},
options.SetOptions{},
)
Expect(err).NotTo(HaveOccurred())
testWatcher2.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes1,
},
{
Type: watch.Added,
Object: outRes2,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
{
Type: watch.Modified,
Previous: outRes2,
Object: outRes3,
},
})
testWatcher2.Stop()
// Only etcdv3 supports watching a specific instance of a resource.
if config.Spec.DatastoreType == apiconfig.EtcdV3 {
By("Starting a watcher from rev0 watching namespace1/name1 - this should get all events for name1")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{Namespace: namespace1, Name: name1, ResourceVersion: rev0})
Expect(err).NotTo(HaveOccurred())
testWatcher2_1 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher2_1.Stop()
testWatcher2_1.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes1,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
})
testWatcher2_1.Stop()
By("Starting a watcher from rev0 watching name1 - this should get all events for name1")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{Name: name1, ResourceVersion: rev0})
Expect(err).NotTo(HaveOccurred())
testWatcher2_2 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher2_2.Stop()
testWatcher2_2.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes1,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
})
testWatcher2_2.Stop()
}
By("Starting a watcher not specifying a rev - expect the current snapshot")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{})
Expect(err).NotTo(HaveOccurred())
testWatcher3 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher3.Stop()
testWatcher3.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes3,
},
})
testWatcher3.Stop()
By("Starting a watcher at rev0 in namespace1 - expect the events for policy in namespace1")
w, err = c.NetworkPolicies().Watch(ctx, options.ListOptions{Namespace: namespace1, ResourceVersion: rev0})
Expect(err).NotTo(HaveOccurred())
testWatcher4 := testutils.NewTestResourceWatch(config.Spec.DatastoreType, w)
defer testWatcher4.Stop()
testWatcher4.ExpectEvents(apiv3.KindNetworkPolicy, []watch.Event{
{
Type: watch.Added,
Object: outRes1,
},
{
Type: watch.Deleted,
Previous: outRes1,
},
})
testWatcher4.Stop()
})
})
// These tests check that the names we use on the API properly round-trip. In particular,
// k8s and OpenStack policies have special prefixes, which should be preserved. Other
// names get stored with a prefix, for consistency but the API returns them without the
// prefix.
nameNormalizationTests := []TableEntry{
// OpenStack names should round-trip, including their prefix.
Entry("OpenStack policy", "ossg.default.group1", "ossg.default.group1"),
// As should normal names.
Entry("OpenStack policy", "foo-bar", "default.foo-bar"),
}
if config.Spec.DatastoreType != "kubernetes" {
// Only test writing a knp-prefixed policy if we're not backed by KDD. In KDD,
// the knp-prefixed policies are derived from k8s data so it doesn't make sense
// to write them through our API.
knpName := "knp.default.a-name"
nameNormalizationTests = append(nameNormalizationTests,
Entry("KDD policy", knpName, knpName),
)
}
DescribeTable("name round-tripping tests",
func(name, backendName string) {
c, err := clientv3.New(config)
Expect(err).NotTo(HaveOccurred())
be, err := backend.NewClient(config)
Expect(err).NotTo(HaveOccurred())
be.Clean()
By("Attempting to creating a new NetworkPolicy with name: " + name)
inNp := &apiv3.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace1, Name: name},
Spec: ingressTypesSpec1,
}
np, outError := c.NetworkPolicies().Create(ctx, inNp, options.SetOptions{})
Expect(outError).NotTo(HaveOccurred())
Expect(inNp.GetName()).To(Equal(name), "Create() shouldn't touch input data")
Expect(np.GetName()).To(Equal(name), "Create() should return the data as we'd read it")
By("Reading back the raw data with its normalized name: " + backendName)
// Make sure that, where the name and the storage name differ, we do the write with
// the storage name. Then the assertions below verify that all the CRUD methods
// do the right conversion too.
kv, err := be.Get(ctx, model.ResourceKey{
Kind: apiv3.KindNetworkPolicy,
Namespace: namespace1,
Name: backendName,
}, "")
Expect(err).NotTo(HaveOccurred())
Expect(kv.Value.(*apiv3.NetworkPolicy).Spec).To(Equal(ingressTypesSpec1))
By("Getting the right policy by name")
np, err = c.NetworkPolicies().Get(ctx, namespace1, name, options.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(np.GetName()).To(Equal(name))
Expect(np.Spec).To(Equal(ingressTypesSpec1))
By("Updating the policy")
np.Spec = egressTypesSpec2
np, err = c.NetworkPolicies().Update(ctx, np, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Getting the right policy")
np, err = c.NetworkPolicies().Get(ctx, namespace1, name, options.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(np.GetName()).To(Equal(name))
Expect(np.Spec).To(Equal(egressTypesSpec2))
By("Listing the policy with correct name (no query options)")
nps, err := c.NetworkPolicies().List(ctx, options.ListOptions{Namespace: namespace1})
Expect(err).NotTo(HaveOccurred())
var names []string
for _, np := range nps.Items {
names = append(names, np.GetName())
}
Expect(names).To(ContainElement(name))
if name != name {
Expect(names).NotTo(ContainElement(name))
}
By("Listing the policy with correct name (list by name)")
nps, err = c.NetworkPolicies().List(ctx,
options.ListOptions{Namespace: namespace1, Name: name})
Expect(err).NotTo(HaveOccurred())
names = nil
for _, np := range nps.Items {
names = append(names, np.GetName())
}
Expect(names).To(ConsistOf(name))
By("Deleting the policy via the name")
np, err = c.NetworkPolicies().Delete(ctx, namespace1, name, options.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
if np != nil {
Expect(np.GetName()).To(Equal(name))
}
},
nameNormalizationTests...,
)
})
|
// Copyright 2014-2015 Apptimist, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"runtime"
"time"
"github.com/apptimistco/asn/debug"
)
const (
AsnStr = "asn"
ConnTO = 200 * time.Millisecond
MaxSegSz = 4096
MoreFlag = uint16(1 << 15)
)
const (
opened uint8 = iota
provisional
established
closed
)
var ErrTooLarge = errors.New("exceeds MaxSegSz")
type asn struct {
debug.Debug
name struct {
local, remote string
}
// Version adapts to peer
version Version
// State may be { opened, provisional, established, closed }
state uint8
// Keys to Open/Seal
box *Box
rx struct {
ch chan *PDU
err error
black []byte
red []byte
going bool
}
tx struct {
ch chan pdubox
err error
black []byte
red []byte
going bool
}
conn net.Conn
repos *Repos
acker acker
time struct {
in, out time.Time
}
}
// Pair box and pdu to support reset of box after Ack of Login
type pdubox struct {
pdu *PDU
box *Box
}
func (asn *asn) Init() {
asn.version = Latest
asn.rx.ch = make(chan *PDU, 4)
asn.tx.ch = make(chan pdubox, 4)
asn.rx.going = false
asn.tx.going = false
asn.rx.black = make([]byte, 0, MaxSegSz)
asn.tx.black = make([]byte, 0, MaxSegSz)
asn.rx.red = make([]byte, 0, MaxSegSz)
asn.tx.red = make([]byte, 0, 2+MaxSegSz)
asn.acker.Init()
}
func (asn *asn) Conn() net.Conn { return asn.conn }
func (asn *asn) IsOpened() bool { return asn.state == opened }
func (asn *asn) IsProvisional() bool { return asn.state == provisional }
func (asn *asn) IsEstablished() bool { return asn.state == established }
func (asn *asn) IsClosed() bool { return asn.state == closed }
// gorx receives, decrypts and reassembles segmented PDUs on the asn.Rx.Q
// until error, or EOF; then closes asn.Rx.Q when done.
func (asn *asn) gorx() {
pdu := NewPDUBuf()
defer func() {
r := recover()
pdu.Free()
if r != nil {
asn.rx.err = r.(error)
if asn.rx.err != io.EOF {
asn.Failure(debug.Depth(4), asn.rx.err)
}
}
close(asn.rx.ch)
asn.rx.going = false
}()
for {
l := uint16(0)
if pdu.File != nil && pdu.PB != nil {
panic(os.ErrInvalid)
}
_, err := (NBOReader{asn}).ReadNBO(&l)
if err != nil {
panic(err)
}
n := l & ^MoreFlag
if n > MaxSegSz {
panic(ErrTooLarge)
}
if n == 0 {
panic(os.ErrInvalid)
}
asn.rx.red = asn.rx.red[:0]
_, err = asn.Read(asn.rx.red[:n])
if err != nil {
panic(err)
}
asn.rx.black = asn.rx.black[:0]
b, err := asn.box.Open(asn.rx.black[:], asn.rx.red[:n])
if err != nil {
panic(err)
}
_, err = pdu.Write(b)
if err != nil {
panic(err)
}
if (l & MoreFlag) == 0 {
asn.rx.ch <- pdu
pdu = NewPDUBuf()
} else if pdu.PB != nil {
pdu.File = asn.repos.tmp.New()
pdu.FN = pdu.File.Name()
pdu.File.Write(pdu.PB.Bytes())
pdu.PB.Free()
pdu.PB = nil
}
}
}
// gotx pulls PDU from a channel, segments, and encrypts before sending through
// asn.conn. This stops and closes the connection on error or closed channel.
func (asn *asn) gotx() {
const maxBlack = MaxSegSz - BoxOverhead
defer func() {
r := recover()
if asn.conn != nil {
asn.state = closed
asn.conn.Close()
}
if r != nil {
asn.tx.err = r.(error)
asn.Diag(debug.Depth(4), asn.tx.err)
}
asn.tx.going = false
}()
for {
pb, open := <-asn.tx.ch
if !open {
asn.Diag("quit pdutx")
runtime.Goexit()
}
err := pb.pdu.Open()
if err != nil {
panic(err)
}
for n := pb.pdu.Len(); n > 0; n = pb.pdu.Len() {
if n > maxBlack {
n = maxBlack
}
asn.tx.black = asn.tx.black[:n]
if _, err = pb.pdu.Read(asn.tx.black); err != nil {
panic(err)
}
asn.tx.red = asn.tx.red[:2]
asn.tx.red, err = pb.box.Seal(asn.tx.red, asn.tx.black)
if err != nil {
panic(err)
}
l := uint16(len(asn.tx.red[2:]))
if pb.pdu.Len() > 0 {
l |= MoreFlag
}
binary.BigEndian.PutUint16(asn.tx.red[:2], l)
if _, err = asn.Write(asn.tx.red); err != nil {
panic(err)
}
}
pb.pdu.Free()
pb.pdu = nil
pb.box = nil
}
}
func IsNetTimeout(err error) bool {
e, ok := err.(net.Error)
return ok && e.Timeout()
}
// Read full buffer from asn.conn unless preempted with state == closed.
func (asn *asn) Read(b []byte) (n int, err error) {
for i := 0; n < len(b); n += i {
if asn.IsClosed() {
err = io.EOF
asn.Diag("closed")
return
}
asn.conn.SetReadDeadline(time.Now().Add(ConnTO))
i, err = asn.conn.Read(b[n:])
asn.conn.SetReadDeadline(time.Time{})
if err != nil && !IsNetTimeout(err) {
if asn.IsClosed() {
err = io.EOF
} else {
asn.Diag(err)
}
return
}
}
return
}
func (asn *asn) Reset() {
asn.Diag(debug.Depth(2), "asn reset")
if asn.conn != nil {
if asn.state != closed {
asn.state = closed
asn.conn.Close()
}
asn.conn = nil
}
asn.box = nil
asn.repos = nil
asn.rx.black = asn.rx.black[:0]
asn.tx.black = asn.tx.black[:0]
asn.rx.red = asn.rx.red[:0]
asn.tx.red = asn.tx.red[:0]
asn.name.local = ""
asn.name.remote = ""
asn.Debug.Reset()
asn.acker.Reset()
}
func (asn *asn) Set(v interface{}) error {
switch t := v.(type) {
case *Box:
asn.box = t
case net.Conn:
asn.conn = t
asn.state = opened
go asn.gorx()
go asn.gotx()
case string:
asn.name.remote = t
asn.Debug.Set(fmt.Sprintf("%s(%s)", asn.name.local, t))
case *Repos:
asn.repos = t
case Version:
if asn.version > t {
asn.version = t
}
default:
return os.ErrInvalid
}
return nil
}
// Queue PDU for segmentation, encryption and transmission
func (asn *asn) Tx(pdu *PDU) {
if asn == nil {
asn.Diag(debug.Depth(2), "tried to Tx on freed asn")
return
}
if asn.IsClosed() {
asn.Diag(debug.Depth(2), "tried to Tx on closed asn")
return
}
asn.tx.ch <- pdubox{pdu: pdu, box: asn.box}
}
// Version steps down to the peer.
func (asn *asn) Version() Version { return asn.version }
// Write full buffer unless preempted byt Closed state.
func (asn *asn) Write(b []byte) (n int, err error) {
for i := 0; n < len(b); n += i {
if asn.IsClosed() {
err = io.EOF
asn.Diag("closed")
return
}
asn.conn.SetWriteDeadline(time.Now().Add(ConnTO))
i, err = asn.conn.Write(b[n:])
asn.conn.SetWriteDeadline(time.Time{})
if err != nil && !IsNetTimeout(err) {
asn.Diag(err)
return
}
}
return
}
Consider nil'd connection closed
Signed-off-by: Tom Grennan <96835dd8bfa718bd6447ccc87af89ae1675daeca@apptimist.co>
// Copyright 2014-2015 Apptimist, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"runtime"
"time"
"github.com/apptimistco/asn/debug"
)
const (
AsnStr = "asn"
ConnTO = 200 * time.Millisecond
MaxSegSz = 4096
MoreFlag = uint16(1 << 15)
)
const (
opened uint8 = iota
provisional
established
closed
)
var ErrTooLarge = errors.New("exceeds MaxSegSz")
type asn struct {
debug.Debug
name struct {
local, remote string
}
// Version adapts to peer
version Version
// State may be { opened, provisional, established, closed }
state uint8
// Keys to Open/Seal
box *Box
rx struct {
ch chan *PDU
err error
black []byte
red []byte
going bool
}
tx struct {
ch chan pdubox
err error
black []byte
red []byte
going bool
}
conn net.Conn
repos *Repos
acker acker
time struct {
in, out time.Time
}
}
// Pair box and pdu to support reset of box after Ack of Login
type pdubox struct {
pdu *PDU
box *Box
}
func (asn *asn) Init() {
asn.version = Latest
asn.rx.ch = make(chan *PDU, 4)
asn.tx.ch = make(chan pdubox, 4)
asn.rx.going = false
asn.tx.going = false
asn.rx.black = make([]byte, 0, MaxSegSz)
asn.tx.black = make([]byte, 0, MaxSegSz)
asn.rx.red = make([]byte, 0, MaxSegSz)
asn.tx.red = make([]byte, 0, 2+MaxSegSz)
asn.acker.Init()
}
func (asn *asn) Conn() net.Conn { return asn.conn }
func (asn *asn) IsOpened() bool { return asn.state == opened }
func (asn *asn) IsProvisional() bool { return asn.state == provisional }
func (asn *asn) IsEstablished() bool { return asn.state == established }
func (asn *asn) IsClosed() bool {
return asn.conn == nil || asn.state == closed
}
// gorx receives, decrypts and reassembles segmented PDUs on the asn.Rx.Q
// until error, or EOF; then closes asn.Rx.Q when done.
func (asn *asn) gorx() {
pdu := NewPDUBuf()
defer func() {
r := recover()
pdu.Free()
if r != nil {
asn.rx.err = r.(error)
if asn.rx.err != io.EOF {
asn.Failure(debug.Depth(4), asn.rx.err)
}
}
close(asn.rx.ch)
asn.rx.going = false
}()
for {
l := uint16(0)
if pdu.File != nil && pdu.PB != nil {
panic(os.ErrInvalid)
}
_, err := (NBOReader{asn}).ReadNBO(&l)
if err != nil {
panic(err)
}
n := l & ^MoreFlag
if n > MaxSegSz {
panic(ErrTooLarge)
}
if n == 0 {
panic(os.ErrInvalid)
}
asn.rx.red = asn.rx.red[:0]
_, err = asn.Read(asn.rx.red[:n])
if err != nil {
panic(err)
}
asn.rx.black = asn.rx.black[:0]
b, err := asn.box.Open(asn.rx.black[:], asn.rx.red[:n])
if err != nil {
panic(err)
}
_, err = pdu.Write(b)
if err != nil {
panic(err)
}
if (l & MoreFlag) == 0 {
asn.rx.ch <- pdu
pdu = NewPDUBuf()
} else if pdu.PB != nil {
pdu.File = asn.repos.tmp.New()
pdu.FN = pdu.File.Name()
pdu.File.Write(pdu.PB.Bytes())
pdu.PB.Free()
pdu.PB = nil
}
}
}
// gotx pulls PDU from a channel, segments, and encrypts before sending through
// asn.conn. This stops and closes the connection on error or closed channel.
func (asn *asn) gotx() {
const maxBlack = MaxSegSz - BoxOverhead
defer func() {
r := recover()
if asn.conn != nil {
asn.state = closed
asn.conn.Close()
}
if r != nil {
asn.tx.err = r.(error)
asn.Diag(debug.Depth(4), asn.tx.err)
}
asn.tx.going = false
}()
for {
pb, open := <-asn.tx.ch
if !open {
asn.Diag("quit pdutx")
runtime.Goexit()
}
err := pb.pdu.Open()
if err != nil {
panic(err)
}
for n := pb.pdu.Len(); n > 0; n = pb.pdu.Len() {
if n > maxBlack {
n = maxBlack
}
asn.tx.black = asn.tx.black[:n]
if _, err = pb.pdu.Read(asn.tx.black); err != nil {
panic(err)
}
asn.tx.red = asn.tx.red[:2]
asn.tx.red, err = pb.box.Seal(asn.tx.red, asn.tx.black)
if err != nil {
panic(err)
}
l := uint16(len(asn.tx.red[2:]))
if pb.pdu.Len() > 0 {
l |= MoreFlag
}
binary.BigEndian.PutUint16(asn.tx.red[:2], l)
if _, err = asn.Write(asn.tx.red); err != nil {
panic(err)
}
}
pb.pdu.Free()
pb.pdu = nil
pb.box = nil
}
}
func IsNetTimeout(err error) bool {
e, ok := err.(net.Error)
return ok && e.Timeout()
}
// Read full buffer from asn.conn unless preempted with state == closed.
func (asn *asn) Read(b []byte) (n int, err error) {
for i := 0; n < len(b); n += i {
if asn.IsClosed() {
err = io.EOF
asn.Diag("closed")
return
}
asn.conn.SetReadDeadline(time.Now().Add(ConnTO))
i, err = asn.conn.Read(b[n:])
asn.conn.SetReadDeadline(time.Time{})
if err != nil && !IsNetTimeout(err) {
if asn.IsClosed() {
err = io.EOF
} else {
asn.Diag(err)
}
return
}
}
return
}
func (asn *asn) Reset() {
asn.Diag(debug.Depth(2), "asn reset")
if asn.conn != nil {
if asn.state != closed {
asn.state = closed
asn.conn.Close()
}
asn.conn = nil
}
asn.box = nil
asn.repos = nil
asn.rx.black = asn.rx.black[:0]
asn.tx.black = asn.tx.black[:0]
asn.rx.red = asn.rx.red[:0]
asn.tx.red = asn.tx.red[:0]
asn.name.local = ""
asn.name.remote = ""
asn.Debug.Reset()
asn.acker.Reset()
}
func (asn *asn) Set(v interface{}) error {
switch t := v.(type) {
case *Box:
asn.box = t
case net.Conn:
asn.conn = t
asn.state = opened
go asn.gorx()
go asn.gotx()
case string:
asn.name.remote = t
asn.Debug.Set(fmt.Sprintf("%s(%s)", asn.name.local, t))
case *Repos:
asn.repos = t
case Version:
if asn.version > t {
asn.version = t
}
default:
return os.ErrInvalid
}
return nil
}
// Queue PDU for segmentation, encryption and transmission
func (asn *asn) Tx(pdu *PDU) {
if asn == nil {
asn.Diag(debug.Depth(2), "tried to Tx on freed asn")
return
}
if asn.IsClosed() {
asn.Diag(debug.Depth(2), "tried to Tx on closed asn")
return
}
asn.tx.ch <- pdubox{pdu: pdu, box: asn.box}
}
// Version steps down to the peer.
func (asn *asn) Version() Version { return asn.version }
// Write full buffer unless preempted byt Closed state.
func (asn *asn) Write(b []byte) (n int, err error) {
for i := 0; n < len(b); n += i {
if asn.IsClosed() {
err = io.EOF
asn.Diag("closed")
return
}
asn.conn.SetWriteDeadline(time.Now().Add(ConnTO))
i, err = asn.conn.Write(b[n:])
asn.conn.SetWriteDeadline(time.Time{})
if err != nil && !IsNetTimeout(err) {
asn.Diag(err)
return
}
}
return
}
|
package crypto
import (
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/denkhaus/bitshares/config"
"github.com/denkhaus/bitshares/types"
"github.com/juju/errors"
)
//TransactionSigner can sign and verify a transaction.
type TransactionSigner struct {
*types.SignedTransaction
}
//NewTransactionSigner creates an New TransactionSigner. Invalid expiration time will be adjusted.
func NewTransactionSigner(tx *types.SignedTransaction) *TransactionSigner {
tm := time.Now().UTC()
if tx.Expiration.IsZero() || tx.Expiration.Before(tm) {
tx.Expiration.Set(30 * time.Second)
}
return &TransactionSigner{
SignedTransaction: tx,
}
}
//Sign signs the underlying transaction
func (tx *TransactionSigner) Sign(privKeys types.PrivateKeys, chain *config.ChainConfig) error {
for _, prv := range privKeys {
ecdsaKey := prv.ToECDSA()
if ecdsaKey.Curve != btcec.S256() {
return types.ErrInvalidPrivateKeyCurve
}
for {
digest, err := tx.Digest(chain)
if err != nil {
return errors.Annotate(err, "Digest")
}
sig, err := prv.SignCompact(digest)
if err != nil {
return errors.Annotate(err, "SignCompact")
}
if !isCanonical(sig) {
//make canonical by adjusting expiration time
tx.AdjustExpiration(time.Second)
} else {
tx.Signatures = append(tx.Signatures, types.Buffer(sig))
break
}
}
}
return nil
}
//Verify verifies the underlying transaction against a given KeyBag
func (tx *TransactionSigner) Verify(keyBag *KeyBag, chain *config.ChainConfig) (bool, error) {
dig, err := tx.Digest(chain)
if err != nil {
return false, errors.Annotate(err, "Digest")
}
pubKeysFound := make([]*types.PublicKey, 0, len(tx.Signatures))
for _, signature := range tx.Signatures {
sig := signature.Bytes()
p, _, err := btcec.RecoverCompact(btcec.S256(), sig, dig)
if err != nil {
return false, errors.Annotate(err, "RecoverCompact")
}
pub, err := types.NewPublicKey(p)
if err != nil {
return false, errors.Annotate(err, "NewPublicKey")
}
pubKeysFound = append(pubKeysFound, pub)
}
for _, pub := range pubKeysFound {
if !keyBag.PublicPresent(pub) {
return false, nil
}
}
return true, nil
}
func isCanonical(sig []byte) bool {
if ((sig[0] & 0x80) != 0) || (sig[0] == 0) ||
((sig[1] & 0x80) != 0) || ((sig[32] & 0x80) != 0) ||
(sig[32] == 0) || ((sig[33] & 0x80) != 0) {
return false
}
return true
}
fix transactionsigner
package crypto
import (
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/denkhaus/bitshares/config"
"github.com/denkhaus/bitshares/types"
"github.com/juju/errors"
)
//TransactionSigner can sign and verify a transaction.
type TransactionSigner struct {
*types.SignedTransaction
}
//NewTransactionSigner creates an New TransactionSigner. Invalid expiration time will be adjusted.
func NewTransactionSigner(tx *types.SignedTransaction) *TransactionSigner {
tm := time.Now().UTC()
if tx.Expiration.IsZero() || tx.Expiration.Before(tm) {
tx.Expiration.Set(30 * time.Second)
}
return &TransactionSigner{
SignedTransaction: tx,
}
}
//Sign signs the underlying transaction
func (tx *TransactionSigner) Sign(privKeys types.PrivateKeys, chain *config.ChainConfig) error {
for _, prv := range privKeys {
ecdsaKey := prv.ToECDSA()
if ecdsaKey.Curve != btcec.S256() {
return types.ErrInvalidPrivateKeyCurve
}
for {
digest, err := tx.Digest(chain)
if err != nil {
return errors.Annotate(err, "Digest")
}
sig, err := prv.SignCompact(digest)
if err != nil {
return errors.Annotate(err, "SignCompact")
}
if !isCanonical(sig) {
//make canonical by adjusting expiration time
tx.AdjustExpiration(time.Second)
} else {
tx.Signatures = append(tx.Signatures, types.Buffer(sig))
break
}
}
}
return nil
}
//Verify verifies the underlying transaction against a given KeyBag
func (tx *TransactionSigner) Verify(keyBag *KeyBag, chain *config.ChainConfig) (bool, error) {
dig, err := tx.Digest(chain)
if err != nil {
return false, errors.Annotate(err, "Digest")
}
pubKeysFound := make([]*types.PublicKey, 0, len(tx.Signatures))
for _, signature := range tx.Signatures {
sig := signature.Bytes()
p, _, err := btcec.RecoverCompact(btcec.S256(), sig, dig)
if err != nil {
return false, errors.Annotate(err, "RecoverCompact")
}
pub, err := types.NewPublicKey(p)
if err != nil {
return false, errors.Annotate(err, "NewPublicKey")
}
pubKeysFound = append(pubKeysFound, pub)
}
for _, pub := range pubKeysFound {
if !keyBag.PublicPresent(pub) {
return false, nil
}
}
return true, nil
}
func isCanonical(sig []byte) bool {
d := sig
t1 := (d[1] & 0x80) == 0
t2 := !(d[1] == 0 && ((d[2] & 0x80) == 0))
t3 := (d[33] & 0x80) == 0
t4 := !(d[33] == 0 && ((d[34] & 0x80) == 0))
return t1 && t2 && t3 && t4
}
|
package record
import (
"database/sql"
"errors"
"fmt"
_ "github.com/lib/pq"
"github.com/rkbodenner/parallel_universe/game"
)
type GameRecord struct {
Game *game.Game
}
func NewGameRecord(g *game.Game) *GameRecord {
return &GameRecord{g}
}
func (rec *GameRecord) Find(db *sql.DB, id int) error {
var err error
var name string
var minPlayers int
var maxPlayers int
err = db.QueryRow("SELECT name, min_players, max_players FROM games WHERE id = $1", id).Scan(&name, &minPlayers, &maxPlayers)
if nil != err {
return err
}
rec.Game.Id = (uint)(id)
rec.Game.Name = name
rec.Game.MinPlayers = minPlayers
rec.Game.MaxPlayers = maxPlayers
// Eager-load the associated game's setup rules
rules := NewSetupRuleRecordList()
err = rules.FindByGame(db, rec.Game)
if err != nil {
return err
}
fmt.Printf("Loaded %d setup rules\n", len(rules.List()))
rec.Game.SetupRules = rules.List()
return nil
}
func (rec *GameRecord) Create(db *sql.DB) error {
err := db.QueryRow("INSERT INTO games(id, name, min_players, max_players) VALUES(default, $1, $2, $3) RETURNING id",
rec.Game.Name, rec.Game.MinPlayers, rec.Game.MaxPlayers).Scan(&rec.Game.Id)
if nil != err {
return err
}
for _, rule := range rec.Game.SetupRules {
ruleRec := &SetupRuleRecord{Rule: rule, Game: rec.Game}
err = ruleRec.Create(db)
if nil != err {
return err
}
}
return nil
}
type GameRecordList struct {
records []*GameRecord
}
func (recs *GameRecordList) FindAll(db *sql.DB) error {
recs.records = make([]*GameRecord, 0)
ids := make([]int, 0)
rows, err := db.Query("SELECT id FROM games")
if nil != err {
return err
}
defer rows.Close()
for rows.Next() {
var id int
if err := rows.Scan(&id); nil != err {
return err
}
ids = append(ids, id)
}
for _, id := range ids {
gameRec := &GameRecord{&game.Game{}}
err := gameRec.Find(db, id)
if nil != err {
return errors.New(fmt.Sprintf("Error finding game %d: %s", id, err))
}
fmt.Printf("Loaded game %d\n", gameRec.Game.Id)
recs.records = append(recs.records, gameRec)
}
return nil
}
func (recs *GameRecordList) List() []*game.Game {
games := make([]*game.Game, 0)
for _, rec := range recs.records {
games = append(games, rec.Game)
}
return games
}
Function to load a game by name
package record
import (
"database/sql"
"errors"
"fmt"
_ "github.com/lib/pq"
"github.com/rkbodenner/parallel_universe/game"
)
type GameRecord struct {
Game *game.Game
}
func NewGameRecord(g *game.Game) *GameRecord {
return &GameRecord{g}
}
func NewEmptyGameRecord() *GameRecord {
return &GameRecord{
&game.Game{
SetupRules: make([]*game.SetupRule, 0),
},
}
}
func (rec *GameRecord) Find(db *sql.DB, id int) error {
var err error
var name string
var minPlayers int
var maxPlayers int
err = db.QueryRow("SELECT name, min_players, max_players FROM games WHERE id = $1", id).Scan(&name, &minPlayers, &maxPlayers)
if nil != err {
return err
}
rec.Game.Id = (uint)(id)
rec.Game.Name = name
rec.Game.MinPlayers = minPlayers
rec.Game.MaxPlayers = maxPlayers
// Eager-load the associated game's setup rules
rules := NewSetupRuleRecordList()
err = rules.FindByGame(db, rec.Game)
if err != nil {
return err
}
fmt.Printf("Loaded %d setup rules\n", len(rules.List()))
rec.Game.SetupRules = rules.List()
return nil
}
func (rec *GameRecord) FindByName(db *sql.DB, name string) error {
var err error
var id int
var minPlayers int
var maxPlayers int
err = db.QueryRow("SELECT id, min_players, max_players FROM games WHERE name = $1", name).Scan(&id, &minPlayers, &maxPlayers)
if nil != err {
return err
}
rec.Game.Id = (uint)(id)
rec.Game.Name = name
rec.Game.MinPlayers = minPlayers
rec.Game.MaxPlayers = maxPlayers
// Eager-load the associated game's setup rules
rec.findAssociations(db)
return nil
}
func (rec *GameRecord) findAssociations(db *sql.DB) error {
rules := NewSetupRuleRecordList()
err := rules.FindByGame(db, rec.Game)
if err != nil {
return err
}
fmt.Printf("Loaded %d setup rules\n", len(rules.List()))
rec.Game.SetupRules = rules.List()
return nil
}
func (rec *GameRecord) Create(db *sql.DB) error {
err := db.QueryRow("INSERT INTO games(id, name, min_players, max_players) VALUES(default, $1, $2, $3) RETURNING id",
rec.Game.Name, rec.Game.MinPlayers, rec.Game.MaxPlayers).Scan(&rec.Game.Id)
if nil != err {
return err
}
for _, rule := range rec.Game.SetupRules {
ruleRec := &SetupRuleRecord{Rule: rule, Game: rec.Game}
err = ruleRec.Create(db)
if nil != err {
return err
}
}
return nil
}
type GameRecordList struct {
records []*GameRecord
}
func (recs *GameRecordList) FindAll(db *sql.DB) error {
recs.records = make([]*GameRecord, 0)
ids := make([]int, 0)
rows, err := db.Query("SELECT id FROM games")
if nil != err {
return err
}
defer rows.Close()
for rows.Next() {
var id int
if err := rows.Scan(&id); nil != err {
return err
}
ids = append(ids, id)
}
for _, id := range ids {
gameRec := &GameRecord{&game.Game{}}
err := gameRec.Find(db, id)
if nil != err {
return errors.New(fmt.Sprintf("Error finding game %d: %s", id, err))
}
fmt.Printf("Loaded game %d\n", gameRec.Game.Id)
recs.records = append(recs.records, gameRec)
}
return nil
}
func (recs *GameRecordList) List() []*game.Game {
games := make([]*game.Game, 0)
for _, rec := range recs.records {
games = append(games, rec.Game)
}
return games
}
|
package bzip2
import (
"testing"
)
func TestNewWriter(t *testing.T) {
t.Parallel()
}
func TestNewWriterLevel(t *testing.T) {
t.Run("with DefaultCompression level", func(t *testing.T) {
t.Parallel()
})
t.Run("with level lower than BestSpeed", func(t *testing.T) {
t.Parallel()
})
t.Run("with BestSpeed level", func(t *testing.T) {
t.Parallel()
})
t.Run("with level within BestSpeed and BestCompression", func(t *testing.T) {
t.Parallel()
})
t.Run("with BestCompression level", func(t *testing.T) {
t.Parallel()
})
t.Run("with level greater than BestCompression", func(t *testing.T) {
t.Parallel()
})
}
func TestWriterErr(t *testing.T) {
t.Run("no error has occurred", func(t *testing.T) {
t.Parallel()
})
t.Run("error occurred during Write", func(t *testing.T) {
t.Parallel()
})
t.Run("error occurred during Close", func(t *testing.T) {
t.Parallel()
})
}
func TestWriterReset(t *testing.T) {
t.Parallel()
}
func TestWriterClose(t *testing.T) {
t.Run("writer has errored previously", func(t *testing.T) {
t.Parallel()
})
t.Run("with no data written", func(t *testing.T) {
t.Parallel()
})
t.Run("with an incomplete block", func(t *testing.T) {
t.Parallel()
})
t.Run("with a completed block", func(t *testing.T) {
t.Parallel()
})
}
func TestWriterWrite(t *testing.T) {
t.Run("writer has errored previously", func(t *testing.T) {
t.Parallel()
})
t.Run("with an empty writer less than a block of data is written", func(t *testing.T) {
t.Parallel()
})
t.Run("with some data in the writer enough data is written to fill a block of data", func(t *testing.T) {
t.Parallel()
})
t.Run("with some data in the writer enough data is written to fill a block of data with left over", func(t *testing.T) {
t.Parallel()
})
t.Run("with an empty writer enough data is written to fill a block of data", func(t *testing.T) {
t.Parallel()
})
t.Run("with an empty writer enough data is written to fill a block of data with left over", func(t *testing.T) {
t.Parallel()
})
t.Run("with an empty writer enough data is written to fill multiple blocks of data", func(t *testing.T) {
t.Parallel()
})
}
Add more test cases for writer
package bzip2
import (
"testing"
)
func TestNewWriter(t *testing.T) {
t.Parallel()
}
func TestNewWriterLevel(t *testing.T) {
t.Run("with DefaultCompression level", func(t *testing.T) {
t.Parallel()
})
t.Run("with level lower than BestSpeed", func(t *testing.T) {
t.Parallel()
})
t.Run("with BestSpeed level", func(t *testing.T) {
t.Parallel()
})
t.Run("with level within BestSpeed and BestCompression", func(t *testing.T) {
t.Parallel()
})
t.Run("with BestCompression level", func(t *testing.T) {
t.Parallel()
})
t.Run("with level greater than BestCompression", func(t *testing.T) {
t.Parallel()
})
}
func TestWriterErr(t *testing.T) {
t.Run("no error has occurred", func(t *testing.T) {
t.Parallel()
})
t.Run("writer has been closed", func(t *testing.T) {
t.Parallel()
})
t.Run("error occurred during Write", func(t *testing.T) {
t.Parallel()
})
t.Run("error occurred during Close", func(t *testing.T) {
t.Parallel()
})
}
func TestWriterReset(t *testing.T) {
t.Parallel()
}
func TestWriterClose(t *testing.T) {
t.Run("writer has errored previously", func(t *testing.T) {
t.Parallel()
})
t.Run("writer has been closed", func(t *testing.T) {
t.Parallel()
})
t.Run("with no data written", func(t *testing.T) {
t.Parallel()
})
t.Run("with an incomplete block", func(t *testing.T) {
t.Parallel()
})
t.Run("with a completed block", func(t *testing.T) {
t.Parallel()
})
}
func TestWriterWrite(t *testing.T) {
t.Run("writer has errored previously", func(t *testing.T) {
t.Parallel()
})
t.Run("writer has been closed", func(t *testing.T) {
t.Parallel()
})
t.Run("with an empty writer less than a block of data is written", func(t *testing.T) {
t.Parallel()
})
t.Run("with some data in the writer enough data is written to fill a block of data", func(t *testing.T) {
t.Parallel()
})
t.Run("with some data in the writer enough data is written to fill a block of data with left over", func(t *testing.T) {
t.Parallel()
})
t.Run("with an empty writer enough data is written to fill a block of data", func(t *testing.T) {
t.Parallel()
})
t.Run("with an empty writer enough data is written to fill a block of data with left over", func(t *testing.T) {
t.Parallel()
})
t.Run("with an empty writer enough data is written to fill multiple blocks of data", func(t *testing.T) {
t.Parallel()
})
}
|
package player
import (
"bytes"
"expvar"
"fmt"
"log"
"net"
"os"
"sync"
"chunkymonkey/gamerules"
"chunkymonkey/nbtutil"
"chunkymonkey/physics"
"chunkymonkey/proto"
. "chunkymonkey/types"
"chunkymonkey/window"
"nbt"
)
var (
expVarPlayerConnectionCount *expvar.Int
expVarPlayerDisconnectionCount *expvar.Int
errUnknownItemID os.Error
)
const (
StanceNormal = 1.62
MaxHealth = 20
)
func init() {
expVarPlayerConnectionCount = expvar.NewInt("player-connection-count")
expVarPlayerDisconnectionCount = expvar.NewInt("player-disconnection-count")
errUnknownItemID = os.NewError("Unknown item ID")
}
type Player struct {
EntityId
shardReceiver shardPlayerClient
shardConnecter gamerules.IShardConnecter
conn net.Conn
name string
spawnBlock BlockXyz
position AbsXyz
height AbsCoord
look LookDegrees
chunkSubs chunkSubscriptions
loginComplete bool
health Health
cursor gamerules.Slot // Item being moved by mouse cursor.
inventory window.PlayerInventory
curWindow window.IWindow
nextWindowId WindowId
remoteInv *RemoteInventory
mainQueue chan func(*Player)
txQueue chan []byte
// TODO remove this lock, packet handling shouldn't use a lock, it should use
// a channel instead (ideally).
lock sync.Mutex
onDisconnect chan<- EntityId
}
func NewPlayer(entityId EntityId, shardConnecter gamerules.IShardConnecter, conn net.Conn, name string, spawnBlock BlockXyz, onDisconnect chan<- EntityId) *Player {
player := &Player{
EntityId: entityId,
shardConnecter: shardConnecter,
conn: conn,
name: name,
spawnBlock: spawnBlock,
position: AbsXyz{
X: AbsCoord(spawnBlock.X),
Y: AbsCoord(spawnBlock.Y),
Z: AbsCoord(spawnBlock.Z),
},
height: StanceNormal,
look: LookDegrees{0, 0},
health: MaxHealth,
curWindow: nil,
nextWindowId: WindowIdFreeMin,
mainQueue: make(chan func(*Player), 128),
txQueue: make(chan []byte, 128),
onDisconnect: onDisconnect,
}
player.shardReceiver.Init(player)
player.inventory.Init(player.EntityId, player)
return player
}
// ReadNbt reads the player data from their persistently stored NBT data. It
// must only be called before Player.Start().
func (player *Player) ReadNbt(playerData nbt.ITag) (err os.Error) {
if player.position, err = nbtutil.ReadAbsXyz(playerData, "Pos"); err != nil {
return
}
if player.look, err = nbtutil.ReadLookDegrees(playerData, "Rotation"); err != nil {
return
}
health, err := nbtutil.ReadShort(playerData, "Health")
if err != nil {
return
}
player.health = Health(health)
if err = player.inventory.ReadNbt(playerData.Lookup("Inventory")); err != nil {
return
}
return
}
func (player *Player) getHeldItemTypeId() ItemTypeId {
heldSlot, _ := player.inventory.HeldItem()
heldItemId := heldSlot.ItemTypeId
if heldItemId < 0 {
return 0
}
return heldItemId
}
func (player *Player) Start() {
buf := &bytes.Buffer{}
// TODO pass proper dimension. This is low priority, because we don't yet
// support multiple dimensions.
proto.ServerWriteLogin(buf, player.EntityId, 0, DimensionNormal)
proto.WriteSpawnPosition(buf, &player.spawnBlock)
player.TransmitPacket(buf.Bytes())
go player.receiveLoop()
go player.transmitLoop()
go player.mainLoop()
}
// Start of packet handling code
// Note: any packet handlers that could change the player state or read a
// changeable state must use player.lock
func (player *Player) PacketKeepAlive() {
}
func (player *Player) PacketChatMessage(message string) {
prefix := gamerules.CommandFramework.Prefix()
if message[0:len(prefix)] == prefix {
gamerules.CommandFramework.Process(message, player)
} else {
player.sendChatMessage(fmt.Sprintf("<%s> %s", player.name, message), true)
}
}
func (player *Player) PacketEntityAction(entityId EntityId, action EntityAction) {
}
func (player *Player) PacketUseEntity(user EntityId, target EntityId, leftClick bool) {
}
func (player *Player) PacketRespawn(dimension DimensionId) {
}
func (player *Player) PacketPlayer(onGround bool) {
}
func (player *Player) PacketPlayerPosition(position *AbsXyz, stance AbsCoord, onGround bool) {
player.lock.Lock()
defer player.lock.Unlock()
if !player.position.IsWithinDistanceOf(position, 10) {
log.Printf("Discarding player position that is too far removed (%.2f, %.2f, %.2f)",
position.X, position.Y, position.Z)
return
}
player.position = *position
player.height = stance - position.Y
player.chunkSubs.Move(position)
// TODO: Should keep track of when players enter/leave their mutual radius
// of "awareness". I.e a client should receive a RemoveEntity packet when
// the player walks out of range, and no longer receive WriteEntityTeleport
// packets for them. The converse should happen when players come in range
// of each other.
}
func (player *Player) PacketPlayerLook(look *LookDegrees, onGround bool) {
player.lock.Lock()
defer player.lock.Unlock()
// TODO input validation
player.look = *look
buf := new(bytes.Buffer)
proto.WriteEntityLook(buf, player.EntityId, look.ToLookBytes())
// TODO update playerData on current chunk
player.chunkSubs.curShard.ReqMulticastPlayers(
player.chunkSubs.curChunkLoc,
player.EntityId,
buf.Bytes(),
)
}
func (player *Player) PacketPlayerBlockHit(status DigStatus, target *BlockXyz, face Face) {
player.lock.Lock()
defer player.lock.Unlock()
// This packet handles 'throwing' an item as well, with status = 4, and
// the zero values for target and face, so check for that.
if status == 4 && target.IsZero() && face == 0 {
blockLoc := player.position.ToBlockXyz()
shardClient, _, ok := player.chunkSubs.ShardClientForBlockXyz(blockLoc)
if !ok {
return
}
var itemToThrow gamerules.Slot
player.inventory.TakeOneHeldItem(&itemToThrow)
if !itemToThrow.IsEmpty() {
velocity := physics.VelocityFromLook(player.look, 0.30)
position := player.position
position.Y += player.height
shardClient.ReqDropItem(itemToThrow, position, velocity, TicksPerSecond/2)
}
return
}
// Validate that the player is actually somewhere near the block.
targetAbsPos := target.MidPointToAbsXyz()
if !targetAbsPos.IsWithinDistanceOf(&player.position, MaxInteractDistance) {
log.Printf("Player/PacketPlayerBlockHit: ignoring player dig at %v (too far away)", target)
return
}
// TODO measure the dig time on the target block and relay to the shard to
// stop speed hacking (based on block type and tool used - non-trivial).
shardClient, _, ok := player.chunkSubs.ShardClientForBlockXyz(target)
if ok {
held, _ := player.inventory.HeldItem()
shardClient.ReqHitBlock(held, *target, status, face)
}
}
func (player *Player) PacketPlayerBlockInteract(itemId ItemTypeId, target *BlockXyz, face Face, amount ItemCount, uses ItemData) {
if face < FaceMinValid || face > FaceMaxValid {
// TODO sometimes FaceNull means something. This case should be covered.
log.Printf("Player/PacketPlayerBlockInteract: invalid face %d", face)
return
}
player.lock.Lock()
defer player.lock.Unlock()
// Validate that the player is actually somewhere near the block.
targetAbsPos := target.MidPointToAbsXyz()
if !targetAbsPos.IsWithinDistanceOf(&player.position, MaxInteractDistance) {
log.Printf("Player/PacketPlayerBlockInteract: ignoring player interact at %v (too far away)", target)
return
}
shardClient, _, ok := player.chunkSubs.ShardClientForBlockXyz(target)
if ok {
held, _ := player.inventory.HeldItem()
shardClient.ReqInteractBlock(held, *target, face)
}
}
func (player *Player) PacketHoldingChange(slotId SlotId) {
player.lock.Lock()
defer player.lock.Unlock()
player.inventory.SetHolding(slotId)
}
func (player *Player) PacketEntityAnimation(entityId EntityId, animation EntityAnimation) {
}
func (player *Player) PacketUnknown0x1b(field1, field2 float32, field3, field4 bool, field5, field6 float32) {
log.Printf(
"PacketUnknown0x1b(field1=%v, field2=%v, field3=%t, field4=%t, field5=%v, field6=%v)",
field1, field2, field3, field4, field5, field6)
}
func (player *Player) PacketUnknown0x3d(field1, field2 int32, field3 int8, field4, field5 int32) {
// TODO Remove this method if it's S->C only.
log.Printf(
"PacketUnknown0x3d(field1=%d, field2=%d, field3=%d, field4=%d, field5=%d)",
field1, field2, field3, field4, field5)
}
func (player *Player) PacketWindowClose(windowId WindowId) {
player.lock.Lock()
defer player.lock.Unlock()
player.closeCurrentWindow(false)
}
func (player *Player) PacketWindowClick(windowId WindowId, slotId SlotId, rightClick bool, txId TxId, shiftClick bool, expectedSlot *proto.WindowSlot) {
player.lock.Lock()
defer player.lock.Unlock()
// Note that the expectedSlot parameter is currently ignored. The item(s)
// involved are worked out from the server-side data.
// TODO use the expectedSlot as a conditions for the click, and base the
// transaction result on that.
// Determine which inventory window is involved.
// TODO support for more windows
var clickedWindow window.IWindow
if windowId == WindowIdInventory {
clickedWindow = &player.inventory
} else if player.curWindow != nil && player.curWindow.WindowId() == windowId {
clickedWindow = player.curWindow
} else {
log.Printf(
"Warning: ignored window click on unknown window ID %d",
windowId)
}
expectedSlotContent := &gamerules.Slot{
ItemTypeId: expectedSlot.ItemTypeId,
Count: expectedSlot.Count,
Data: expectedSlot.Data,
}
// The client tends to send item IDs even when the count is zero.
expectedSlotContent.Normalize()
txState := TxStateRejected
click := gamerules.Click{
SlotId: slotId,
Cursor: player.cursor,
RightClick: rightClick,
ShiftClick: shiftClick,
TxId: txId,
}
click.ExpectedSlot.SetWindowSlot(expectedSlot)
if clickedWindow != nil {
txState = clickedWindow.Click(&click)
}
switch txState {
case TxStateAccepted, TxStateRejected:
// Inform client of operation status.
buf := new(bytes.Buffer)
proto.WriteWindowTransaction(buf, windowId, txId, txState == TxStateAccepted)
player.cursor = click.Cursor
player.cursor.SendUpdate(buf, WindowIdCursor, SlotIdCursor)
player.TransmitPacket(buf.Bytes())
case TxStateDeferred:
// The remote inventory should send the transaction outcome.
}
}
func (player *Player) PacketWindowTransaction(windowId WindowId, txId TxId, accepted bool) {
// TODO investigate when this packet is sent from the client and what it
// means when it does get sent.
log.Printf(
"Got PacketWindowTransaction from player %q: windowId=%d txId=%d accepted=%t",
player.name, windowId, txId, accepted)
}
func (player *Player) PacketSignUpdate(position *BlockXyz, lines [4]string) {
}
func (player *Player) PacketDisconnect(reason string) {
log.Printf("Player %s disconnected reason=%s", player.name, reason)
player.sendChatMessage(fmt.Sprintf("%s has left", player.name), false)
player.onDisconnect <- player.EntityId
player.txQueue <- nil
player.mainQueue <- nil
player.conn.Close()
}
func (player *Player) receiveLoop() {
for {
err := proto.ServerReadPacket(player.conn, player)
if err != nil {
if err != os.EOF {
log.Print("ReceiveLoop failed: ", err.String())
}
return
}
}
}
// End of packet handling code
func (player *Player) transmitLoop() {
for {
bs, ok := <-player.txQueue
if !ok || bs == nil {
return // txQueue closed
}
_, err := player.conn.Write(bs)
if err != nil {
if err != os.EOF {
log.Print("TransmitLoop failed: ", err.String())
}
return
}
}
}
func (player *Player) TransmitPacket(packet []byte) {
if packet == nil {
return // skip empty packets
}
player.txQueue <- packet
}
func (player *Player) runQueuedCall(f func(*Player)) {
player.lock.Lock()
defer player.lock.Unlock()
f(player)
}
func (player *Player) mainLoop() {
expVarPlayerConnectionCount.Add(1)
defer expVarPlayerDisconnectionCount.Add(1)
player.chunkSubs.Init(player)
defer player.chunkSubs.Close()
player.sendChatMessage(fmt.Sprintf("%s has joined", player.name), false)
for {
f, ok := <-player.mainQueue
if !ok || f == nil {
return
}
player.runQueuedCall(f)
}
}
func (player *Player) reqNotifyChunkLoad() {
// Player seems to fall through block unless elevated very slightly.
player.position.Y += 0.01
if !player.loginComplete {
player.loginComplete = true
// Send player start position etc.
buf := new(bytes.Buffer)
proto.ServerWritePlayerPositionLook(
buf,
&player.position, player.position.Y+player.height,
&player.look, false)
player.inventory.WriteWindowItems(buf)
proto.WriteUpdateHealth(buf, player.health)
player.TransmitPacket(buf.Bytes())
}
}
func (player *Player) reqInventorySubscribed(block *BlockXyz, invTypeId InvTypeId, slots []proto.WindowSlot) {
if player.remoteInv != nil {
player.closeCurrentWindow(true)
}
remoteInv := NewRemoteInventory(block, &player.chunkSubs, slots)
window := player.inventory.NewWindow(invTypeId, player.nextWindowId, remoteInv)
if window == nil {
return
}
player.remoteInv = remoteInv
player.curWindow = window
if player.nextWindowId >= WindowIdFreeMax {
player.nextWindowId = WindowIdFreeMin
} else {
player.nextWindowId++
}
buf := new(bytes.Buffer)
window.WriteWindowOpen(buf)
window.WriteWindowItems(buf)
player.TransmitPacket(buf.Bytes())
}
func (player *Player) reqInventorySlotUpdate(block *BlockXyz, slot *gamerules.Slot, slotId SlotId) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) {
return
}
player.remoteInv.slotUpdate(slot, slotId)
}
func (player *Player) reqInventoryProgressUpdate(block *BlockXyz, prgBarId PrgBarId, value PrgBarValue) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) {
return
}
player.remoteInv.progressUpdate(prgBarId, value)
}
func (player *Player) reqInventoryCursorUpdate(block *BlockXyz, cursor *gamerules.Slot) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) {
return
}
player.cursor = *cursor
buf := new(bytes.Buffer)
player.cursor.SendUpdate(buf, WindowIdCursor, SlotIdCursor)
player.TransmitPacket(buf.Bytes())
}
func (player *Player) reqInventoryTxState(block *BlockXyz, txId TxId, accepted bool) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) || player.curWindow == nil {
return
}
buf := new(bytes.Buffer)
proto.WriteWindowTransaction(buf, player.curWindow.WindowId(), txId, accepted)
player.TransmitPacket(buf.Bytes())
}
func (player *Player) reqInventoryUnsubscribed(block *BlockXyz) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) {
return
}
player.closeCurrentWindow(true)
}
func (player *Player) reqPlaceHeldItem(target *BlockXyz, wasHeld *gamerules.Slot) {
curHeld, _ := player.inventory.HeldItem()
// Currently held item has changed since chunk saw it.
// TODO think about having the slot index passed as well so if that changes,
// we can still track the original item and improve placement success rate.
if !curHeld.IsSameType(wasHeld) {
return
}
shardClient, _, ok := player.chunkSubs.ShardClientForBlockXyz(target)
if ok {
var into gamerules.Slot
player.inventory.TakeOneHeldItem(&into)
shardClient.ReqPlaceItem(*target, into)
}
}
// Used to receive items picked up from chunks. It is synchronous so that the
// passed item can be looked at by the caller afterwards to see if it has been
// consumed.
func (player *Player) reqOfferItem(fromChunk *ChunkXz, entityId EntityId, item *gamerules.Slot) {
if player.inventory.CanTakeItem(item) {
shardClient, ok := player.chunkSubs.ShardClientForChunkXz(fromChunk)
if ok {
shardClient.ReqTakeItem(*fromChunk, entityId)
}
}
return
}
func (player *Player) reqGiveItem(atPosition *AbsXyz, item *gamerules.Slot) {
defer func() {
// Check if item not fully consumed. If it is not, then throw the remains
// back to the chunk.
if item.Count > 0 {
chunkLoc := atPosition.ToChunkXz()
shardClient, ok := player.chunkSubs.ShardClientForChunkXz(&chunkLoc)
if ok {
shardClient.ReqDropItem(*item, *atPosition, AbsVelocity{}, TicksPerSecond)
}
}
}()
player.inventory.PutItem(item)
}
// Enqueue queues a function to run with the player lock within the player's
// mainloop.
func (player *Player) Enqueue(f func(*Player)) {
if f == nil {
return
}
player.mainQueue <- f
}
func (player *Player) sendChatMessage(message string, sendToSelf bool) {
buf := new(bytes.Buffer)
proto.WriteChatMessage(buf, message)
packet := buf.Bytes()
if sendToSelf {
player.TransmitPacket(packet)
}
player.chunkSubs.curShard.ReqMulticastPlayers(
player.chunkSubs.curChunkLoc,
player.EntityId,
packet,
)
}
// closeCurrentWindow closes any open window. It must be called with
// player.lock held.
func (player *Player) closeCurrentWindow(sendClosePacket bool) {
if player.curWindow != nil {
player.curWindow.Finalize(sendClosePacket)
player.curWindow = nil
}
if player.remoteInv != nil {
player.remoteInv.Close()
player.remoteInv = nil
}
player.inventory.Resubscribe()
}
// ICommandHandler implementations
func (player *Player) SendMessageToPlayer(msg string) {
buf := new(bytes.Buffer)
proto.WriteChatMessage(buf, msg)
packet := buf.Bytes()
player.TransmitPacket(packet)
}
func (player *Player) BroadcastMessage(msg string, self bool) {
player.sendChatMessage(msg, self)
}
func (player *Player) GiveItem(id int, quantity int, data int) {
item := gamerules.Slot{
ItemTypeId: ItemTypeId(id),
Count: ItemCount(quantity),
Data: ItemData(data),
}
player.reqGiveItem(&player.position, &item)
}
Using DigDropItem in place of 4.
package player
import (
"bytes"
"expvar"
"fmt"
"log"
"net"
"os"
"sync"
"chunkymonkey/gamerules"
"chunkymonkey/nbtutil"
"chunkymonkey/physics"
"chunkymonkey/proto"
. "chunkymonkey/types"
"chunkymonkey/window"
"nbt"
)
var (
expVarPlayerConnectionCount *expvar.Int
expVarPlayerDisconnectionCount *expvar.Int
errUnknownItemID os.Error
)
const (
StanceNormal = 1.62
MaxHealth = 20
)
func init() {
expVarPlayerConnectionCount = expvar.NewInt("player-connection-count")
expVarPlayerDisconnectionCount = expvar.NewInt("player-disconnection-count")
errUnknownItemID = os.NewError("Unknown item ID")
}
type Player struct {
EntityId
shardReceiver shardPlayerClient
shardConnecter gamerules.IShardConnecter
conn net.Conn
name string
spawnBlock BlockXyz
position AbsXyz
height AbsCoord
look LookDegrees
chunkSubs chunkSubscriptions
loginComplete bool
health Health
cursor gamerules.Slot // Item being moved by mouse cursor.
inventory window.PlayerInventory
curWindow window.IWindow
nextWindowId WindowId
remoteInv *RemoteInventory
mainQueue chan func(*Player)
txQueue chan []byte
// TODO remove this lock, packet handling shouldn't use a lock, it should use
// a channel instead (ideally).
lock sync.Mutex
onDisconnect chan<- EntityId
}
func NewPlayer(entityId EntityId, shardConnecter gamerules.IShardConnecter, conn net.Conn, name string, spawnBlock BlockXyz, onDisconnect chan<- EntityId) *Player {
player := &Player{
EntityId: entityId,
shardConnecter: shardConnecter,
conn: conn,
name: name,
spawnBlock: spawnBlock,
position: AbsXyz{
X: AbsCoord(spawnBlock.X),
Y: AbsCoord(spawnBlock.Y),
Z: AbsCoord(spawnBlock.Z),
},
height: StanceNormal,
look: LookDegrees{0, 0},
health: MaxHealth,
curWindow: nil,
nextWindowId: WindowIdFreeMin,
mainQueue: make(chan func(*Player), 128),
txQueue: make(chan []byte, 128),
onDisconnect: onDisconnect,
}
player.shardReceiver.Init(player)
player.inventory.Init(player.EntityId, player)
return player
}
// ReadNbt reads the player data from their persistently stored NBT data. It
// must only be called before Player.Start().
func (player *Player) ReadNbt(playerData nbt.ITag) (err os.Error) {
if player.position, err = nbtutil.ReadAbsXyz(playerData, "Pos"); err != nil {
return
}
if player.look, err = nbtutil.ReadLookDegrees(playerData, "Rotation"); err != nil {
return
}
health, err := nbtutil.ReadShort(playerData, "Health")
if err != nil {
return
}
player.health = Health(health)
if err = player.inventory.ReadNbt(playerData.Lookup("Inventory")); err != nil {
return
}
return
}
func (player *Player) getHeldItemTypeId() ItemTypeId {
heldSlot, _ := player.inventory.HeldItem()
heldItemId := heldSlot.ItemTypeId
if heldItemId < 0 {
return 0
}
return heldItemId
}
func (player *Player) Start() {
buf := &bytes.Buffer{}
// TODO pass proper dimension. This is low priority, because we don't yet
// support multiple dimensions.
proto.ServerWriteLogin(buf, player.EntityId, 0, DimensionNormal)
proto.WriteSpawnPosition(buf, &player.spawnBlock)
player.TransmitPacket(buf.Bytes())
go player.receiveLoop()
go player.transmitLoop()
go player.mainLoop()
}
// Start of packet handling code
// Note: any packet handlers that could change the player state or read a
// changeable state must use player.lock
func (player *Player) PacketKeepAlive() {
}
func (player *Player) PacketChatMessage(message string) {
prefix := gamerules.CommandFramework.Prefix()
if message[0:len(prefix)] == prefix {
gamerules.CommandFramework.Process(message, player)
} else {
player.sendChatMessage(fmt.Sprintf("<%s> %s", player.name, message), true)
}
}
func (player *Player) PacketEntityAction(entityId EntityId, action EntityAction) {
}
func (player *Player) PacketUseEntity(user EntityId, target EntityId, leftClick bool) {
}
func (player *Player) PacketRespawn(dimension DimensionId) {
}
func (player *Player) PacketPlayer(onGround bool) {
}
func (player *Player) PacketPlayerPosition(position *AbsXyz, stance AbsCoord, onGround bool) {
player.lock.Lock()
defer player.lock.Unlock()
if !player.position.IsWithinDistanceOf(position, 10) {
log.Printf("Discarding player position that is too far removed (%.2f, %.2f, %.2f)",
position.X, position.Y, position.Z)
return
}
player.position = *position
player.height = stance - position.Y
player.chunkSubs.Move(position)
// TODO: Should keep track of when players enter/leave their mutual radius
// of "awareness". I.e a client should receive a RemoveEntity packet when
// the player walks out of range, and no longer receive WriteEntityTeleport
// packets for them. The converse should happen when players come in range
// of each other.
}
func (player *Player) PacketPlayerLook(look *LookDegrees, onGround bool) {
player.lock.Lock()
defer player.lock.Unlock()
// TODO input validation
player.look = *look
buf := new(bytes.Buffer)
proto.WriteEntityLook(buf, player.EntityId, look.ToLookBytes())
// TODO update playerData on current chunk
player.chunkSubs.curShard.ReqMulticastPlayers(
player.chunkSubs.curChunkLoc,
player.EntityId,
buf.Bytes(),
)
}
func (player *Player) PacketPlayerBlockHit(status DigStatus, target *BlockXyz, face Face) {
player.lock.Lock()
defer player.lock.Unlock()
// This packet handles 'throwing' an item as well, with status = 4, and
// the zero values for target and face, so check for that.
if status == DigDropItem && target.IsZero() && face == 0 {
blockLoc := player.position.ToBlockXyz()
shardClient, _, ok := player.chunkSubs.ShardClientForBlockXyz(blockLoc)
if !ok {
return
}
var itemToThrow gamerules.Slot
player.inventory.TakeOneHeldItem(&itemToThrow)
if !itemToThrow.IsEmpty() {
velocity := physics.VelocityFromLook(player.look, 0.30)
position := player.position
position.Y += player.height
shardClient.ReqDropItem(itemToThrow, position, velocity, TicksPerSecond/2)
}
return
}
// Validate that the player is actually somewhere near the block.
targetAbsPos := target.MidPointToAbsXyz()
if !targetAbsPos.IsWithinDistanceOf(&player.position, MaxInteractDistance) {
log.Printf("Player/PacketPlayerBlockHit: ignoring player dig at %v (too far away)", target)
return
}
// TODO measure the dig time on the target block and relay to the shard to
// stop speed hacking (based on block type and tool used - non-trivial).
shardClient, _, ok := player.chunkSubs.ShardClientForBlockXyz(target)
if ok {
held, _ := player.inventory.HeldItem()
shardClient.ReqHitBlock(held, *target, status, face)
}
}
func (player *Player) PacketPlayerBlockInteract(itemId ItemTypeId, target *BlockXyz, face Face, amount ItemCount, uses ItemData) {
if face < FaceMinValid || face > FaceMaxValid {
// TODO sometimes FaceNull means something. This case should be covered.
log.Printf("Player/PacketPlayerBlockInteract: invalid face %d", face)
return
}
player.lock.Lock()
defer player.lock.Unlock()
// Validate that the player is actually somewhere near the block.
targetAbsPos := target.MidPointToAbsXyz()
if !targetAbsPos.IsWithinDistanceOf(&player.position, MaxInteractDistance) {
log.Printf("Player/PacketPlayerBlockInteract: ignoring player interact at %v (too far away)", target)
return
}
shardClient, _, ok := player.chunkSubs.ShardClientForBlockXyz(target)
if ok {
held, _ := player.inventory.HeldItem()
shardClient.ReqInteractBlock(held, *target, face)
}
}
func (player *Player) PacketHoldingChange(slotId SlotId) {
player.lock.Lock()
defer player.lock.Unlock()
player.inventory.SetHolding(slotId)
}
func (player *Player) PacketEntityAnimation(entityId EntityId, animation EntityAnimation) {
}
func (player *Player) PacketUnknown0x1b(field1, field2 float32, field3, field4 bool, field5, field6 float32) {
log.Printf(
"PacketUnknown0x1b(field1=%v, field2=%v, field3=%t, field4=%t, field5=%v, field6=%v)",
field1, field2, field3, field4, field5, field6)
}
func (player *Player) PacketUnknown0x3d(field1, field2 int32, field3 int8, field4, field5 int32) {
// TODO Remove this method if it's S->C only.
log.Printf(
"PacketUnknown0x3d(field1=%d, field2=%d, field3=%d, field4=%d, field5=%d)",
field1, field2, field3, field4, field5)
}
func (player *Player) PacketWindowClose(windowId WindowId) {
player.lock.Lock()
defer player.lock.Unlock()
player.closeCurrentWindow(false)
}
func (player *Player) PacketWindowClick(windowId WindowId, slotId SlotId, rightClick bool, txId TxId, shiftClick bool, expectedSlot *proto.WindowSlot) {
player.lock.Lock()
defer player.lock.Unlock()
// Note that the expectedSlot parameter is currently ignored. The item(s)
// involved are worked out from the server-side data.
// TODO use the expectedSlot as a conditions for the click, and base the
// transaction result on that.
// Determine which inventory window is involved.
// TODO support for more windows
var clickedWindow window.IWindow
if windowId == WindowIdInventory {
clickedWindow = &player.inventory
} else if player.curWindow != nil && player.curWindow.WindowId() == windowId {
clickedWindow = player.curWindow
} else {
log.Printf(
"Warning: ignored window click on unknown window ID %d",
windowId)
}
expectedSlotContent := &gamerules.Slot{
ItemTypeId: expectedSlot.ItemTypeId,
Count: expectedSlot.Count,
Data: expectedSlot.Data,
}
// The client tends to send item IDs even when the count is zero.
expectedSlotContent.Normalize()
txState := TxStateRejected
click := gamerules.Click{
SlotId: slotId,
Cursor: player.cursor,
RightClick: rightClick,
ShiftClick: shiftClick,
TxId: txId,
}
click.ExpectedSlot.SetWindowSlot(expectedSlot)
if clickedWindow != nil {
txState = clickedWindow.Click(&click)
}
switch txState {
case TxStateAccepted, TxStateRejected:
// Inform client of operation status.
buf := new(bytes.Buffer)
proto.WriteWindowTransaction(buf, windowId, txId, txState == TxStateAccepted)
player.cursor = click.Cursor
player.cursor.SendUpdate(buf, WindowIdCursor, SlotIdCursor)
player.TransmitPacket(buf.Bytes())
case TxStateDeferred:
// The remote inventory should send the transaction outcome.
}
}
func (player *Player) PacketWindowTransaction(windowId WindowId, txId TxId, accepted bool) {
// TODO investigate when this packet is sent from the client and what it
// means when it does get sent.
log.Printf(
"Got PacketWindowTransaction from player %q: windowId=%d txId=%d accepted=%t",
player.name, windowId, txId, accepted)
}
func (player *Player) PacketSignUpdate(position *BlockXyz, lines [4]string) {
}
func (player *Player) PacketDisconnect(reason string) {
log.Printf("Player %s disconnected reason=%s", player.name, reason)
player.sendChatMessage(fmt.Sprintf("%s has left", player.name), false)
player.onDisconnect <- player.EntityId
player.txQueue <- nil
player.mainQueue <- nil
player.conn.Close()
}
func (player *Player) receiveLoop() {
for {
err := proto.ServerReadPacket(player.conn, player)
if err != nil {
if err != os.EOF {
log.Print("ReceiveLoop failed: ", err.String())
}
return
}
}
}
// End of packet handling code
func (player *Player) transmitLoop() {
for {
bs, ok := <-player.txQueue
if !ok || bs == nil {
return // txQueue closed
}
_, err := player.conn.Write(bs)
if err != nil {
if err != os.EOF {
log.Print("TransmitLoop failed: ", err.String())
}
return
}
}
}
func (player *Player) TransmitPacket(packet []byte) {
if packet == nil {
return // skip empty packets
}
player.txQueue <- packet
}
func (player *Player) runQueuedCall(f func(*Player)) {
player.lock.Lock()
defer player.lock.Unlock()
f(player)
}
func (player *Player) mainLoop() {
expVarPlayerConnectionCount.Add(1)
defer expVarPlayerDisconnectionCount.Add(1)
player.chunkSubs.Init(player)
defer player.chunkSubs.Close()
player.sendChatMessage(fmt.Sprintf("%s has joined", player.name), false)
for {
f, ok := <-player.mainQueue
if !ok || f == nil {
return
}
player.runQueuedCall(f)
}
}
func (player *Player) reqNotifyChunkLoad() {
// Player seems to fall through block unless elevated very slightly.
player.position.Y += 0.01
if !player.loginComplete {
player.loginComplete = true
// Send player start position etc.
buf := new(bytes.Buffer)
proto.ServerWritePlayerPositionLook(
buf,
&player.position, player.position.Y+player.height,
&player.look, false)
player.inventory.WriteWindowItems(buf)
proto.WriteUpdateHealth(buf, player.health)
player.TransmitPacket(buf.Bytes())
}
}
func (player *Player) reqInventorySubscribed(block *BlockXyz, invTypeId InvTypeId, slots []proto.WindowSlot) {
if player.remoteInv != nil {
player.closeCurrentWindow(true)
}
remoteInv := NewRemoteInventory(block, &player.chunkSubs, slots)
window := player.inventory.NewWindow(invTypeId, player.nextWindowId, remoteInv)
if window == nil {
return
}
player.remoteInv = remoteInv
player.curWindow = window
if player.nextWindowId >= WindowIdFreeMax {
player.nextWindowId = WindowIdFreeMin
} else {
player.nextWindowId++
}
buf := new(bytes.Buffer)
window.WriteWindowOpen(buf)
window.WriteWindowItems(buf)
player.TransmitPacket(buf.Bytes())
}
func (player *Player) reqInventorySlotUpdate(block *BlockXyz, slot *gamerules.Slot, slotId SlotId) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) {
return
}
player.remoteInv.slotUpdate(slot, slotId)
}
func (player *Player) reqInventoryProgressUpdate(block *BlockXyz, prgBarId PrgBarId, value PrgBarValue) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) {
return
}
player.remoteInv.progressUpdate(prgBarId, value)
}
func (player *Player) reqInventoryCursorUpdate(block *BlockXyz, cursor *gamerules.Slot) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) {
return
}
player.cursor = *cursor
buf := new(bytes.Buffer)
player.cursor.SendUpdate(buf, WindowIdCursor, SlotIdCursor)
player.TransmitPacket(buf.Bytes())
}
func (player *Player) reqInventoryTxState(block *BlockXyz, txId TxId, accepted bool) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) || player.curWindow == nil {
return
}
buf := new(bytes.Buffer)
proto.WriteWindowTransaction(buf, player.curWindow.WindowId(), txId, accepted)
player.TransmitPacket(buf.Bytes())
}
func (player *Player) reqInventoryUnsubscribed(block *BlockXyz) {
if player.remoteInv == nil || !player.remoteInv.IsForBlock(block) {
return
}
player.closeCurrentWindow(true)
}
func (player *Player) reqPlaceHeldItem(target *BlockXyz, wasHeld *gamerules.Slot) {
curHeld, _ := player.inventory.HeldItem()
// Currently held item has changed since chunk saw it.
// TODO think about having the slot index passed as well so if that changes,
// we can still track the original item and improve placement success rate.
if !curHeld.IsSameType(wasHeld) {
return
}
shardClient, _, ok := player.chunkSubs.ShardClientForBlockXyz(target)
if ok {
var into gamerules.Slot
player.inventory.TakeOneHeldItem(&into)
shardClient.ReqPlaceItem(*target, into)
}
}
// Used to receive items picked up from chunks. It is synchronous so that the
// passed item can be looked at by the caller afterwards to see if it has been
// consumed.
func (player *Player) reqOfferItem(fromChunk *ChunkXz, entityId EntityId, item *gamerules.Slot) {
if player.inventory.CanTakeItem(item) {
shardClient, ok := player.chunkSubs.ShardClientForChunkXz(fromChunk)
if ok {
shardClient.ReqTakeItem(*fromChunk, entityId)
}
}
return
}
func (player *Player) reqGiveItem(atPosition *AbsXyz, item *gamerules.Slot) {
defer func() {
// Check if item not fully consumed. If it is not, then throw the remains
// back to the chunk.
if item.Count > 0 {
chunkLoc := atPosition.ToChunkXz()
shardClient, ok := player.chunkSubs.ShardClientForChunkXz(&chunkLoc)
if ok {
shardClient.ReqDropItem(*item, *atPosition, AbsVelocity{}, TicksPerSecond)
}
}
}()
player.inventory.PutItem(item)
}
// Enqueue queues a function to run with the player lock within the player's
// mainloop.
func (player *Player) Enqueue(f func(*Player)) {
if f == nil {
return
}
player.mainQueue <- f
}
func (player *Player) sendChatMessage(message string, sendToSelf bool) {
buf := new(bytes.Buffer)
proto.WriteChatMessage(buf, message)
packet := buf.Bytes()
if sendToSelf {
player.TransmitPacket(packet)
}
player.chunkSubs.curShard.ReqMulticastPlayers(
player.chunkSubs.curChunkLoc,
player.EntityId,
packet,
)
}
// closeCurrentWindow closes any open window. It must be called with
// player.lock held.
func (player *Player) closeCurrentWindow(sendClosePacket bool) {
if player.curWindow != nil {
player.curWindow.Finalize(sendClosePacket)
player.curWindow = nil
}
if player.remoteInv != nil {
player.remoteInv.Close()
player.remoteInv = nil
}
player.inventory.Resubscribe()
}
// ICommandHandler implementations
func (player *Player) SendMessageToPlayer(msg string) {
buf := new(bytes.Buffer)
proto.WriteChatMessage(buf, msg)
packet := buf.Bytes()
player.TransmitPacket(packet)
}
func (player *Player) BroadcastMessage(msg string, self bool) {
player.sendChatMessage(msg, self)
}
func (player *Player) GiveItem(id int, quantity int, data int) {
item := gamerules.Slot{
ItemTypeId: ItemTypeId(id),
Count: ItemCount(quantity),
Data: ItemData(data),
}
player.reqGiveItem(&player.position, &item)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.