CombinedText stringlengths 4 3.42M |
|---|
package manager
import (
"fmt"
"strings"
"sync"
"time"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/common/log"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/common/poller"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/traffic_monitor/config"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/traffic_monitor/enum"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/traffic_monitor/peer"
to "github.com/apache/incubator-trafficcontrol/traffic_ops/client"
)
// CopyTrafficMonitorConfigMap returns a deep copy of the given TrafficMonitorConfigMap
func CopyTrafficMonitorConfigMap(a *to.TrafficMonitorConfigMap) to.TrafficMonitorConfigMap {
b := to.TrafficMonitorConfigMap{}
b.TrafficServer = map[string]to.TrafficServer{}
b.CacheGroup = map[string]to.TMCacheGroup{}
b.Config = map[string]interface{}{}
b.TrafficMonitor = map[string]to.TrafficMonitor{}
b.DeliveryService = map[string]to.TMDeliveryService{}
b.Profile = map[string]to.TMProfile{}
for k, v := range a.TrafficServer {
b.TrafficServer[k] = v
}
for k, v := range a.CacheGroup {
b.CacheGroup[k] = v
}
for k, v := range a.Config {
b.Config[k] = v
}
for k, v := range a.TrafficMonitor {
b.TrafficMonitor[k] = v
}
for k, v := range a.DeliveryService {
b.DeliveryService[k] = v
}
for k, v := range a.Profile {
b.Profile[k] = v
}
return b
}
// TrafficMonitorConfigMapThreadsafe encapsulates a TrafficMonitorConfigMap safe for multiple readers and a single writer.
type TrafficMonitorConfigMapThreadsafe struct {
monitorConfig *to.TrafficMonitorConfigMap
m *sync.RWMutex
}
// NewTrafficMonitorConfigMapThreadsafe returns an encapsulated TrafficMonitorConfigMap safe for multiple readers and a single writer.
func NewTrafficMonitorConfigMapThreadsafe() TrafficMonitorConfigMapThreadsafe {
return TrafficMonitorConfigMapThreadsafe{monitorConfig: &to.TrafficMonitorConfigMap{}, m: &sync.RWMutex{}}
}
// Get returns the TrafficMonitorConfigMap. Callers MUST NOT modify, it is not threadsafe for mutation. If mutation is necessary, call CopyTrafficMonitorConfigMap().
func (t *TrafficMonitorConfigMapThreadsafe) Get() to.TrafficMonitorConfigMap {
t.m.RLock()
defer t.m.RUnlock()
return *t.monitorConfig
}
// Set sets the TrafficMonitorConfigMap. This is only safe for one writer. This MUST NOT be called by multiple threads.
func (t *TrafficMonitorConfigMapThreadsafe) Set(c to.TrafficMonitorConfigMap) {
t.m.Lock()
*t.monitorConfig = c
t.m.Unlock()
}
// StartMonitorConfigManager runs the monitor config manager goroutine, and returns the threadsafe data which it sets.
func StartMonitorConfigManager(
monitorConfigPollChan <-chan to.TrafficMonitorConfigMap,
localStates peer.CRStatesThreadsafe,
statURLSubscriber chan<- poller.HttpPollerConfig,
healthURLSubscriber chan<- poller.HttpPollerConfig,
peerURLSubscriber chan<- poller.HttpPollerConfig,
cachesChangeSubscriber chan<- struct{},
cfg config.Config,
staticAppData StaticAppData,
) TrafficMonitorConfigMapThreadsafe {
monitorConfig := NewTrafficMonitorConfigMapThreadsafe()
go monitorConfigListen(monitorConfig,
monitorConfigPollChan,
localStates,
statURLSubscriber,
healthURLSubscriber,
peerURLSubscriber,
cachesChangeSubscriber,
cfg,
staticAppData,
)
return monitorConfig
}
// trafficOpsHealthConnectionTimeoutToDuration takes the int from Traffic Ops, which is in milliseconds, and returns a time.Duration
// TODO change Traffic Ops Client API to a time.Duration
func trafficOpsHealthConnectionTimeoutToDuration(t int) time.Duration {
return time.Duration(t) * time.Millisecond
}
// trafficOpsPeerPollIntervalToDuration takes the int from Traffic Ops, which is in milliseconds, and returns a time.Duration
// TODO change Traffic Ops Client API to a time.Duration
func trafficOpsPeerPollIntervalToDuration(t int) time.Duration {
return time.Duration(t) * time.Millisecond
}
// trafficOpsStatPollIntervalToDuration takes the int from Traffic Ops, which is in milliseconds, and returns a time.Duration
// TODO change Traffic Ops Client API to a time.Duration
func trafficOpsStatPollIntervalToDuration(t int) time.Duration {
return time.Duration(t) * time.Millisecond
}
// trafficOpsHealthPollIntervalToDuration takes the int from Traffic Ops, which is in milliseconds, and returns a time.Duration
// TODO change Traffic Ops Client API to a time.Duration
func trafficOpsHealthPollIntervalToDuration(t int) time.Duration {
return time.Duration(t) * time.Millisecond
}
// getPollIntervals reads the Traffic Ops Client monitorConfig structure, and parses and returns the health, peer, and stat poll intervals
func getHealthPeerStatPollIntervals(monitorConfig to.TrafficMonitorConfigMap, cfg config.Config) (time.Duration, time.Duration, time.Duration, error) {
healthPollIntervalI, healthPollIntervalExists := monitorConfig.Config["health.polling.interval"]
if !healthPollIntervalExists {
return 0, 0, 0, fmt.Errorf("Traffic Ops Monitor config missing 'health.polling.interval', not setting config changes.\n")
}
healthPollIntervalInt, healthPollIntervalIsInt := healthPollIntervalI.(float64)
if !healthPollIntervalIsInt {
return 0, 0, 0, fmt.Errorf("Traffic Ops Monitor config 'health.polling.interval' value '%v' type %T is not an integer, not setting config changes.\n", healthPollIntervalI, healthPollIntervalI)
}
healthPollInterval := trafficOpsHealthPollIntervalToDuration(int(healthPollIntervalInt))
peerPollIntervalI, peerPollIntervalExists := monitorConfig.Config["peers.polling.interval"]
if !peerPollIntervalExists {
return 0, 0, 0, fmt.Errorf("Traffic Ops Monitor config missing 'peers.polling.interval', not setting config changes.\n")
}
peerPollIntervalInt, peerPollIntervalIsInt := peerPollIntervalI.(float64)
if !peerPollIntervalIsInt {
return 0, 0, 0, fmt.Errorf("Traffic Ops Monitor config 'peers.polling.interval' value '%v' type %T is not an integer, not setting config changes.\n", peerPollIntervalI, peerPollIntervalI)
}
peerPollInterval := trafficOpsHealthPollIntervalToDuration(int(peerPollIntervalInt))
statPollIntervalI, statPollIntervalExists := monitorConfig.Config["stat.polling.interval"]
if !statPollIntervalExists {
log.Warnf("Traffic Ops Monitor config missing 'stat.polling.interval', using health for stat.\n")
statPollIntervalI = healthPollIntervalI
}
statPollIntervalInt, statPollIntervalIsInt := statPollIntervalI.(float64)
if !statPollIntervalIsInt {
log.Warnf("Traffic Ops Monitor config 'stat.polling.interval' value '%v' type %T is not an integer, using health for stat\n", statPollIntervalI, statPollIntervalI)
statPollIntervalI = healthPollIntervalI
}
statPollInterval := trafficOpsHealthPollIntervalToDuration(int(statPollIntervalInt))
// Formerly, only 'health' polling existed. If TO still has old configuration and doesn't have a 'stat' parameter, this allows us to assume the 'health' poll is slow, and sets it to the stat poll (which used to be the only poll, getting all astats data) to the given presumed-slow health poll, and set the now-fast-and-small health poll to a short fraction of that.
if healthPollIntervalExists && !statPollIntervalExists {
healthPollInterval = time.Duration(float64(healthPollInterval) / float64(cfg.HealthToStatRatio))
}
return healthPollInterval, peerPollInterval, statPollInterval, nil
}
// TODO timing, and determine if the case, or its internal `for`, should be put in a goroutine
// TODO determine if subscribers take action on change, and change to mutexed objects if not.
func monitorConfigListen(
monitorConfigTS TrafficMonitorConfigMapThreadsafe,
monitorConfigPollChan <-chan to.TrafficMonitorConfigMap,
localStates peer.CRStatesThreadsafe,
statURLSubscriber chan<- poller.HttpPollerConfig,
healthURLSubscriber chan<- poller.HttpPollerConfig,
peerURLSubscriber chan<- poller.HttpPollerConfig,
cachesChangeSubscriber chan<- struct{},
cfg config.Config,
staticAppData StaticAppData,
) {
for monitorConfig := range monitorConfigPollChan {
monitorConfigTS.Set(monitorConfig)
healthUrls := map[string]poller.PollConfig{}
statUrls := map[string]poller.PollConfig{}
peerUrls := map[string]poller.PollConfig{}
caches := map[string]string{}
healthPollInterval, peerPollInterval, statPollInterval, err := getHealthPeerStatPollIntervals(monitorConfig, cfg)
if err != nil {
continue
}
for _, srv := range monitorConfig.TrafficServer {
caches[srv.HostName] = srv.Status
cacheName := enum.CacheName(srv.HostName)
if srv.Status == "ONLINE" {
localStates.SetCache(cacheName, peer.IsAvailable{IsAvailable: true})
continue
}
if srv.Status == "OFFLINE" {
continue
}
// seed states with available = false until our polling cycle picks up a result
if _, exists := localStates.Get().Caches[cacheName]; !exists {
localStates.SetCache(cacheName, peer.IsAvailable{IsAvailable: false})
}
url := monitorConfig.Profile[srv.Profile].Parameters.HealthPollingURL
r := strings.NewReplacer(
"${hostname}", srv.IP,
"${interface_name}", srv.InterfaceName,
"application=system", "application=plugin.remap",
"application=", "application=plugin.remap",
)
url = r.Replace(url)
connTimeout := trafficOpsHealthConnectionTimeoutToDuration(monitorConfig.Profile[srv.Profile].Parameters.HealthConnectionTimeout)
healthUrls[srv.HostName] = poller.PollConfig{URL: url, Timeout: connTimeout}
r = strings.NewReplacer("application=plugin.remap", "application=")
statUrl := r.Replace(url)
statUrls[srv.HostName] = poller.PollConfig{URL: statUrl, Timeout: connTimeout}
}
for _, srv := range monitorConfig.TrafficMonitor {
if srv.HostName == staticAppData.Hostname {
continue
}
if srv.Status != "ONLINE" {
continue
}
// TODO: the URL should be config driven. -jse
url := fmt.Sprintf("http://%s:%d/publish/CrStates?raw", srv.IP, srv.Port)
peerUrls[srv.HostName] = poller.PollConfig{URL: url} // TODO determine timeout.
}
statURLSubscriber <- poller.HttpPollerConfig{Urls: statUrls, Interval: statPollInterval}
healthURLSubscriber <- poller.HttpPollerConfig{Urls: healthUrls, Interval: healthPollInterval}
peerURLSubscriber <- poller.HttpPollerConfig{Urls: peerUrls, Interval: peerPollInterval}
for cacheName := range localStates.GetCaches() {
if _, exists := monitorConfig.TrafficServer[string(cacheName)]; !exists {
log.Warnf("Removing %s from localStates", cacheName)
localStates.DeleteCache(cacheName)
}
}
cachesChangeSubscriber <- struct{}{}
// TODO because there are multiple writers to localStates.DeliveryService, there is a race condition, where MonitorConfig (this func) and HealthResultManager could write at the same time, and the HealthResultManager could overwrite a delivery service addition or deletion here. Probably the simplest and most performant fix would be a lock-free algorithm using atomic compare-and-swaps.
for _, ds := range monitorConfig.DeliveryService {
// since caches default to unavailable, also default DS false
if _, exists := localStates.Get().Deliveryservice[enum.DeliveryServiceName(ds.XMLID)]; !exists {
localStates.SetDeliveryService(enum.DeliveryServiceName(ds.XMLID), peer.Deliveryservice{IsAvailable: false, DisabledLocations: []enum.CacheName{}}) // important to initialize DisabledLocations, so JSON is `[]` not `null`
}
}
for ds := range localStates.Get().Deliveryservice {
if _, exists := monitorConfig.DeliveryService[string(ds)]; !exists {
localStates.DeleteDeliveryService(ds)
}
}
}
}
Fix TM2 duplicate strings to enums
package manager
import (
"fmt"
"strings"
"sync"
"time"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/common/log"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/common/poller"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/traffic_monitor/config"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/traffic_monitor/enum"
"github.com/apache/incubator-trafficcontrol/traffic_monitor/experimental/traffic_monitor/peer"
to "github.com/apache/incubator-trafficcontrol/traffic_ops/client"
)
// CopyTrafficMonitorConfigMap returns a deep copy of the given TrafficMonitorConfigMap
func CopyTrafficMonitorConfigMap(a *to.TrafficMonitorConfigMap) to.TrafficMonitorConfigMap {
b := to.TrafficMonitorConfigMap{}
b.TrafficServer = map[string]to.TrafficServer{}
b.CacheGroup = map[string]to.TMCacheGroup{}
b.Config = map[string]interface{}{}
b.TrafficMonitor = map[string]to.TrafficMonitor{}
b.DeliveryService = map[string]to.TMDeliveryService{}
b.Profile = map[string]to.TMProfile{}
for k, v := range a.TrafficServer {
b.TrafficServer[k] = v
}
for k, v := range a.CacheGroup {
b.CacheGroup[k] = v
}
for k, v := range a.Config {
b.Config[k] = v
}
for k, v := range a.TrafficMonitor {
b.TrafficMonitor[k] = v
}
for k, v := range a.DeliveryService {
b.DeliveryService[k] = v
}
for k, v := range a.Profile {
b.Profile[k] = v
}
return b
}
// TrafficMonitorConfigMapThreadsafe encapsulates a TrafficMonitorConfigMap safe for multiple readers and a single writer.
type TrafficMonitorConfigMapThreadsafe struct {
monitorConfig *to.TrafficMonitorConfigMap
m *sync.RWMutex
}
// NewTrafficMonitorConfigMapThreadsafe returns an encapsulated TrafficMonitorConfigMap safe for multiple readers and a single writer.
func NewTrafficMonitorConfigMapThreadsafe() TrafficMonitorConfigMapThreadsafe {
return TrafficMonitorConfigMapThreadsafe{monitorConfig: &to.TrafficMonitorConfigMap{}, m: &sync.RWMutex{}}
}
// Get returns the TrafficMonitorConfigMap. Callers MUST NOT modify, it is not threadsafe for mutation. If mutation is necessary, call CopyTrafficMonitorConfigMap().
func (t *TrafficMonitorConfigMapThreadsafe) Get() to.TrafficMonitorConfigMap {
t.m.RLock()
defer t.m.RUnlock()
return *t.monitorConfig
}
// Set sets the TrafficMonitorConfigMap. This is only safe for one writer. This MUST NOT be called by multiple threads.
func (t *TrafficMonitorConfigMapThreadsafe) Set(c to.TrafficMonitorConfigMap) {
t.m.Lock()
*t.monitorConfig = c
t.m.Unlock()
}
// StartMonitorConfigManager runs the monitor config manager goroutine, and returns the threadsafe data which it sets.
func StartMonitorConfigManager(
monitorConfigPollChan <-chan to.TrafficMonitorConfigMap,
localStates peer.CRStatesThreadsafe,
statURLSubscriber chan<- poller.HttpPollerConfig,
healthURLSubscriber chan<- poller.HttpPollerConfig,
peerURLSubscriber chan<- poller.HttpPollerConfig,
cachesChangeSubscriber chan<- struct{},
cfg config.Config,
staticAppData StaticAppData,
) TrafficMonitorConfigMapThreadsafe {
monitorConfig := NewTrafficMonitorConfigMapThreadsafe()
go monitorConfigListen(monitorConfig,
monitorConfigPollChan,
localStates,
statURLSubscriber,
healthURLSubscriber,
peerURLSubscriber,
cachesChangeSubscriber,
cfg,
staticAppData,
)
return monitorConfig
}
// trafficOpsHealthConnectionTimeoutToDuration takes the int from Traffic Ops, which is in milliseconds, and returns a time.Duration
// TODO change Traffic Ops Client API to a time.Duration
func trafficOpsHealthConnectionTimeoutToDuration(t int) time.Duration {
return time.Duration(t) * time.Millisecond
}
// trafficOpsPeerPollIntervalToDuration takes the int from Traffic Ops, which is in milliseconds, and returns a time.Duration
// TODO change Traffic Ops Client API to a time.Duration
func trafficOpsPeerPollIntervalToDuration(t int) time.Duration {
return time.Duration(t) * time.Millisecond
}
// trafficOpsStatPollIntervalToDuration takes the int from Traffic Ops, which is in milliseconds, and returns a time.Duration
// TODO change Traffic Ops Client API to a time.Duration
func trafficOpsStatPollIntervalToDuration(t int) time.Duration {
return time.Duration(t) * time.Millisecond
}
// trafficOpsHealthPollIntervalToDuration takes the int from Traffic Ops, which is in milliseconds, and returns a time.Duration
// TODO change Traffic Ops Client API to a time.Duration
func trafficOpsHealthPollIntervalToDuration(t int) time.Duration {
return time.Duration(t) * time.Millisecond
}
// getPollIntervals reads the Traffic Ops Client monitorConfig structure, and parses and returns the health, peer, and stat poll intervals
func getHealthPeerStatPollIntervals(monitorConfig to.TrafficMonitorConfigMap, cfg config.Config) (time.Duration, time.Duration, time.Duration, error) {
healthPollIntervalI, healthPollIntervalExists := monitorConfig.Config["health.polling.interval"]
if !healthPollIntervalExists {
return 0, 0, 0, fmt.Errorf("Traffic Ops Monitor config missing 'health.polling.interval', not setting config changes.\n")
}
healthPollIntervalInt, healthPollIntervalIsInt := healthPollIntervalI.(float64)
if !healthPollIntervalIsInt {
return 0, 0, 0, fmt.Errorf("Traffic Ops Monitor config 'health.polling.interval' value '%v' type %T is not an integer, not setting config changes.\n", healthPollIntervalI, healthPollIntervalI)
}
healthPollInterval := trafficOpsHealthPollIntervalToDuration(int(healthPollIntervalInt))
peerPollIntervalI, peerPollIntervalExists := monitorConfig.Config["peers.polling.interval"]
if !peerPollIntervalExists {
return 0, 0, 0, fmt.Errorf("Traffic Ops Monitor config missing 'peers.polling.interval', not setting config changes.\n")
}
peerPollIntervalInt, peerPollIntervalIsInt := peerPollIntervalI.(float64)
if !peerPollIntervalIsInt {
return 0, 0, 0, fmt.Errorf("Traffic Ops Monitor config 'peers.polling.interval' value '%v' type %T is not an integer, not setting config changes.\n", peerPollIntervalI, peerPollIntervalI)
}
peerPollInterval := trafficOpsHealthPollIntervalToDuration(int(peerPollIntervalInt))
statPollIntervalI, statPollIntervalExists := monitorConfig.Config["stat.polling.interval"]
if !statPollIntervalExists {
log.Warnf("Traffic Ops Monitor config missing 'stat.polling.interval', using health for stat.\n")
statPollIntervalI = healthPollIntervalI
}
statPollIntervalInt, statPollIntervalIsInt := statPollIntervalI.(float64)
if !statPollIntervalIsInt {
log.Warnf("Traffic Ops Monitor config 'stat.polling.interval' value '%v' type %T is not an integer, using health for stat\n", statPollIntervalI, statPollIntervalI)
statPollIntervalI = healthPollIntervalI
}
statPollInterval := trafficOpsHealthPollIntervalToDuration(int(statPollIntervalInt))
// Formerly, only 'health' polling existed. If TO still has old configuration and doesn't have a 'stat' parameter, this allows us to assume the 'health' poll is slow, and sets it to the stat poll (which used to be the only poll, getting all astats data) to the given presumed-slow health poll, and set the now-fast-and-small health poll to a short fraction of that.
if healthPollIntervalExists && !statPollIntervalExists {
healthPollInterval = time.Duration(float64(healthPollInterval) / float64(cfg.HealthToStatRatio))
}
return healthPollInterval, peerPollInterval, statPollInterval, nil
}
// TODO timing, and determine if the case, or its internal `for`, should be put in a goroutine
// TODO determine if subscribers take action on change, and change to mutexed objects if not.
func monitorConfigListen(
monitorConfigTS TrafficMonitorConfigMapThreadsafe,
monitorConfigPollChan <-chan to.TrafficMonitorConfigMap,
localStates peer.CRStatesThreadsafe,
statURLSubscriber chan<- poller.HttpPollerConfig,
healthURLSubscriber chan<- poller.HttpPollerConfig,
peerURLSubscriber chan<- poller.HttpPollerConfig,
cachesChangeSubscriber chan<- struct{},
cfg config.Config,
staticAppData StaticAppData,
) {
for monitorConfig := range monitorConfigPollChan {
monitorConfigTS.Set(monitorConfig)
healthUrls := map[string]poller.PollConfig{}
statUrls := map[string]poller.PollConfig{}
peerUrls := map[string]poller.PollConfig{}
caches := map[string]string{}
healthPollInterval, peerPollInterval, statPollInterval, err := getHealthPeerStatPollIntervals(monitorConfig, cfg)
if err != nil {
continue
}
for _, srv := range monitorConfig.TrafficServer {
caches[srv.HostName] = srv.Status
cacheName := enum.CacheName(srv.HostName)
srvStatus := enum.CacheStatusFromString(srv.Status)
if srvStatus == enum.CacheStatusOnline {
localStates.SetCache(cacheName, peer.IsAvailable{IsAvailable: true})
continue
}
if srvStatus == enum.CacheStatusOffline {
continue
}
// seed states with available = false until our polling cycle picks up a result
if _, exists := localStates.Get().Caches[cacheName]; !exists {
localStates.SetCache(cacheName, peer.IsAvailable{IsAvailable: false})
}
url := monitorConfig.Profile[srv.Profile].Parameters.HealthPollingURL
r := strings.NewReplacer(
"${hostname}", srv.IP,
"${interface_name}", srv.InterfaceName,
"application=system", "application=plugin.remap",
"application=", "application=plugin.remap",
)
url = r.Replace(url)
connTimeout := trafficOpsHealthConnectionTimeoutToDuration(monitorConfig.Profile[srv.Profile].Parameters.HealthConnectionTimeout)
healthUrls[srv.HostName] = poller.PollConfig{URL: url, Timeout: connTimeout}
r = strings.NewReplacer("application=plugin.remap", "application=")
statUrl := r.Replace(url)
statUrls[srv.HostName] = poller.PollConfig{URL: statUrl, Timeout: connTimeout}
}
for _, srv := range monitorConfig.TrafficMonitor {
if srv.HostName == staticAppData.Hostname {
continue
}
if enum.CacheStatusFromString(srv.Status) != enum.CacheStatusOnline {
continue
}
// TODO: the URL should be config driven. -jse
url := fmt.Sprintf("http://%s:%d/publish/CrStates?raw", srv.IP, srv.Port)
peerUrls[srv.HostName] = poller.PollConfig{URL: url} // TODO determine timeout.
}
statURLSubscriber <- poller.HttpPollerConfig{Urls: statUrls, Interval: statPollInterval}
healthURLSubscriber <- poller.HttpPollerConfig{Urls: healthUrls, Interval: healthPollInterval}
peerURLSubscriber <- poller.HttpPollerConfig{Urls: peerUrls, Interval: peerPollInterval}
for cacheName := range localStates.GetCaches() {
if _, exists := monitorConfig.TrafficServer[string(cacheName)]; !exists {
log.Warnf("Removing %s from localStates", cacheName)
localStates.DeleteCache(cacheName)
}
}
cachesChangeSubscriber <- struct{}{}
// TODO because there are multiple writers to localStates.DeliveryService, there is a race condition, where MonitorConfig (this func) and HealthResultManager could write at the same time, and the HealthResultManager could overwrite a delivery service addition or deletion here. Probably the simplest and most performant fix would be a lock-free algorithm using atomic compare-and-swaps.
for _, ds := range monitorConfig.DeliveryService {
// since caches default to unavailable, also default DS false
if _, exists := localStates.Get().Deliveryservice[enum.DeliveryServiceName(ds.XMLID)]; !exists {
localStates.SetDeliveryService(enum.DeliveryServiceName(ds.XMLID), peer.Deliveryservice{IsAvailable: false, DisabledLocations: []enum.CacheName{}}) // important to initialize DisabledLocations, so JSON is `[]` not `null`
}
}
for ds := range localStates.Get().Deliveryservice {
if _, exists := monitorConfig.DeliveryService[string(ds)]; !exists {
localStates.DeleteDeliveryService(ds)
}
}
}
}
|
package backend
import (
"bytes"
"errors"
"io"
"sort"
"sync"
"github.com/restic/restic/debug"
)
type entry struct {
Type Type
Name string
}
type memMap map[entry][]byte
// MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests.
type MemoryBackend struct {
data memMap
m sync.Mutex
MockBackend
}
// NewMemoryBackend returns a new backend that saves all data in a map in
// memory.
func NewMemoryBackend() *MemoryBackend {
be := &MemoryBackend{
data: make(memMap),
}
be.MockBackend.TestFn = func(t Type, name string) (bool, error) {
return memTest(be, t, name)
}
be.MockBackend.CreateFn = func() (Blob, error) {
return memCreate(be)
}
be.MockBackend.GetFn = func(t Type, name string) (io.ReadCloser, error) {
return memGet(be, t, name)
}
be.MockBackend.GetReaderFn = func(t Type, name string, offset, length uint) (io.ReadCloser, error) {
return memGetReader(be, t, name, offset, length)
}
be.MockBackend.RemoveFn = func(t Type, name string) error {
return memRemove(be, t, name)
}
be.MockBackend.ListFn = func(t Type, done <-chan struct{}) <-chan string {
return memList(be, t, done)
}
be.MockBackend.DeleteFn = func() error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
debug.Log("MemoryBackend.New", "created new memory backend")
return be
}
func (be *MemoryBackend) insert(t Type, name string, data []byte) error {
be.m.Lock()
defer be.m.Unlock()
if _, ok := be.data[entry{t, name}]; ok {
return errors.New("already present")
}
be.data[entry{t, name}] = data
return nil
}
func memTest(be *MemoryBackend, t Type, name string) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Test", "test %v %v", t, name)
if _, ok := be.data[entry{t, name}]; ok {
return true, nil
}
return false, nil
}
// tempMemEntry temporarily holds data written to the memory backend before it
// is finalized.
type tempMemEntry struct {
be *MemoryBackend
data bytes.Buffer
}
func (e *tempMemEntry) Write(p []byte) (int, error) {
return e.data.Write(p)
}
func (e *tempMemEntry) Size() uint {
return uint(len(e.data.Bytes()))
}
func (e *tempMemEntry) Finalize(t Type, name string) error {
if t == Config {
name = ""
}
debug.Log("MemoryBackend", "save blob %p (%d bytes) as %v %v", e, len(e.data.Bytes()), t, name)
return e.be.insert(t, name, e.data.Bytes())
}
func memCreate(be *MemoryBackend) (Blob, error) {
blob := &tempMemEntry{be: be}
debug.Log("MemoryBackend.Create", "create new blob %p", blob)
return blob, nil
}
// readCloser wraps a reader and adds a noop Close method.
type readCloser struct {
io.Reader
}
func (rd readCloser) Close() error {
return nil
}
func memGet(be *MemoryBackend, t Type, name string) (io.ReadCloser, error) {
be.m.Lock()
defer be.m.Unlock()
if t == Config {
name = ""
}
debug.Log("MemoryBackend.Get", "get %v %v", t, name)
if _, ok := be.data[entry{t, name}]; !ok {
return nil, errors.New("no such data")
}
return readCloser{bytes.NewReader(be.data[entry{t, name}])}, nil
}
func memGetReader(be *MemoryBackend, t Type, name string, offset, length uint) (io.ReadCloser, error) {
be.m.Lock()
defer be.m.Unlock()
if t == Config {
name = ""
}
debug.Log("MemoryBackend.GetReader", "get %v %v offset %v len %v", t, name, offset, length)
if _, ok := be.data[entry{t, name}]; !ok {
return nil, errors.New("no such data")
}
buf := be.data[entry{t, name}]
if offset > uint(len(buf)) {
return nil, errors.New("offset beyond end of file")
}
buf = buf[offset:]
if length > 0 {
if length > uint(len(buf)) {
length = uint(len(buf))
}
buf = buf[:length]
}
return readCloser{bytes.NewReader(buf)}, nil
}
func memRemove(be *MemoryBackend, t Type, name string) error {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Remove", "get %v %v", t, name)
if _, ok := be.data[entry{t, name}]; !ok {
return errors.New("no such data")
}
delete(be.data, entry{t, name})
return nil
}
func memList(be *MemoryBackend, t Type, done <-chan struct{}) <-chan string {
be.m.Lock()
defer be.m.Unlock()
ch := make(chan string)
var ids []string
for entry := range be.data {
if entry.Type != t {
continue
}
ids = append(ids, entry.Name)
}
sort.Strings(ids)
debug.Log("MemoryBackend.List", "list %v: %v", t, ids)
go func() {
defer close(ch)
for _, id := range ids {
select {
case ch <- id:
case <-done:
return
}
}
}()
return ch
}
Make ReadCloser public
package backend
import (
"bytes"
"errors"
"io"
"sort"
"sync"
"github.com/restic/restic/debug"
)
type entry struct {
Type Type
Name string
}
type memMap map[entry][]byte
// MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests.
type MemoryBackend struct {
data memMap
m sync.Mutex
MockBackend
}
// NewMemoryBackend returns a new backend that saves all data in a map in
// memory.
func NewMemoryBackend() *MemoryBackend {
be := &MemoryBackend{
data: make(memMap),
}
be.MockBackend.TestFn = func(t Type, name string) (bool, error) {
return memTest(be, t, name)
}
be.MockBackend.CreateFn = func() (Blob, error) {
return memCreate(be)
}
be.MockBackend.GetFn = func(t Type, name string) (io.ReadCloser, error) {
return memGet(be, t, name)
}
be.MockBackend.GetReaderFn = func(t Type, name string, offset, length uint) (io.ReadCloser, error) {
return memGetReader(be, t, name, offset, length)
}
be.MockBackend.RemoveFn = func(t Type, name string) error {
return memRemove(be, t, name)
}
be.MockBackend.ListFn = func(t Type, done <-chan struct{}) <-chan string {
return memList(be, t, done)
}
be.MockBackend.DeleteFn = func() error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
debug.Log("MemoryBackend.New", "created new memory backend")
return be
}
func (be *MemoryBackend) insert(t Type, name string, data []byte) error {
be.m.Lock()
defer be.m.Unlock()
if _, ok := be.data[entry{t, name}]; ok {
return errors.New("already present")
}
be.data[entry{t, name}] = data
return nil
}
func memTest(be *MemoryBackend, t Type, name string) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Test", "test %v %v", t, name)
if _, ok := be.data[entry{t, name}]; ok {
return true, nil
}
return false, nil
}
// tempMemEntry temporarily holds data written to the memory backend before it
// is finalized.
type tempMemEntry struct {
be *MemoryBackend
data bytes.Buffer
}
func (e *tempMemEntry) Write(p []byte) (int, error) {
return e.data.Write(p)
}
func (e *tempMemEntry) Size() uint {
return uint(len(e.data.Bytes()))
}
func (e *tempMemEntry) Finalize(t Type, name string) error {
if t == Config {
name = ""
}
debug.Log("MemoryBackend", "save blob %p (%d bytes) as %v %v", e, len(e.data.Bytes()), t, name)
return e.be.insert(t, name, e.data.Bytes())
}
func memCreate(be *MemoryBackend) (Blob, error) {
blob := &tempMemEntry{be: be}
debug.Log("MemoryBackend.Create", "create new blob %p", blob)
return blob, nil
}
// ReadCloser wraps a reader and adds a noop Close method.
func ReadCloser(rd io.Reader) io.ReadCloser {
return readCloser{rd}
}
// readCloser wraps a reader and adds a noop Close method.
type readCloser struct {
io.Reader
}
func (rd readCloser) Close() error {
return nil
}
func memGet(be *MemoryBackend, t Type, name string) (io.ReadCloser, error) {
be.m.Lock()
defer be.m.Unlock()
if t == Config {
name = ""
}
debug.Log("MemoryBackend.Get", "get %v %v", t, name)
if _, ok := be.data[entry{t, name}]; !ok {
return nil, errors.New("no such data")
}
return readCloser{bytes.NewReader(be.data[entry{t, name}])}, nil
}
func memGetReader(be *MemoryBackend, t Type, name string, offset, length uint) (io.ReadCloser, error) {
be.m.Lock()
defer be.m.Unlock()
if t == Config {
name = ""
}
debug.Log("MemoryBackend.GetReader", "get %v %v offset %v len %v", t, name, offset, length)
if _, ok := be.data[entry{t, name}]; !ok {
return nil, errors.New("no such data")
}
buf := be.data[entry{t, name}]
if offset > uint(len(buf)) {
return nil, errors.New("offset beyond end of file")
}
buf = buf[offset:]
if length > 0 {
if length > uint(len(buf)) {
length = uint(len(buf))
}
buf = buf[:length]
}
return readCloser{bytes.NewReader(buf)}, nil
}
func memRemove(be *MemoryBackend, t Type, name string) error {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Remove", "get %v %v", t, name)
if _, ok := be.data[entry{t, name}]; !ok {
return errors.New("no such data")
}
delete(be.data, entry{t, name})
return nil
}
func memList(be *MemoryBackend, t Type, done <-chan struct{}) <-chan string {
be.m.Lock()
defer be.m.Unlock()
ch := make(chan string)
var ids []string
for entry := range be.data {
if entry.Type != t {
continue
}
ids = append(ids, entry.Name)
}
sort.Strings(ids)
debug.Log("MemoryBackend.List", "list %v: %v", t, ids)
go func() {
defer close(ch)
for _, id := range ids {
select {
case ch <- id:
case <-done:
return
}
}
}()
return ch
}
|
// Package swift provides an interface to the Swift object storage system
package swift
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
// Constants
const (
directoryMarkerContentType = "application/directory" // content type of directory marker objects
listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.GibiByte
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// SharedOptions are shared between swift and hubic
var SharedOptions = []fs.Option{{
Name: "chunk_size",
Help: `Above this size files will be chunked into a _segments container.
Above this size files will be chunked into a _segments container. The
default for this is 5GB which is its maximum value.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "no_chunk",
Help: `Don't chunk files during streaming upload.
When doing streaming uploads (eg using rcat or mount) setting this
flag will cause the swift backend to not upload chunked files.
This will limit the maximum upload size to 5GB. However non chunked
files are easier to deal with and have an MD5SUM.
Rclone will still chunk files bigger than chunk_size when doing normal
copy operations.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash),
}}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "swift",
Description: "OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
NewFs: NewFs,
Options: append([]fs.Option{{
Name: "env_auth",
Help: "Get swift credentials from environment variables in standard OpenStack form.",
Default: false,
Examples: []fs.OptionExample{
{
Value: "false",
Help: "Enter swift credentials in the next step",
}, {
Value: "true",
Help: "Get swift credentials from environment vars. Leave other fields blank if using this.",
},
},
}, {
Name: "user",
Help: "User name to log in (OS_USERNAME).",
}, {
Name: "key",
Help: "API key or password (OS_PASSWORD).",
}, {
Name: "auth",
Help: "Authentication URL for server (OS_AUTH_URL).",
Examples: []fs.OptionExample{{
Help: "Rackspace US",
Value: "https://auth.api.rackspacecloud.com/v1.0",
}, {
Help: "Rackspace UK",
Value: "https://lon.auth.api.rackspacecloud.com/v1.0",
}, {
Help: "Rackspace v2",
Value: "https://identity.api.rackspacecloud.com/v2.0",
}, {
Help: "Memset Memstore UK",
Value: "https://auth.storage.memset.com/v1.0",
}, {
Help: "Memset Memstore UK v2",
Value: "https://auth.storage.memset.com/v2.0",
}, {
Help: "OVH",
Value: "https://auth.cloud.ovh.net/v3",
}},
}, {
Name: "user_id",
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
}, {
Name: "domain",
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
}, {
Name: "tenant",
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)",
}, {
Name: "tenant_id",
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)",
}, {
Name: "tenant_domain",
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)",
}, {
Name: "region",
Help: "Region name - optional (OS_REGION_NAME)",
}, {
Name: "storage_url",
Help: "Storage URL - optional (OS_STORAGE_URL)",
}, {
Name: "auth_token",
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
}, {
Name: "application_credential_id",
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
}, {
Name: "application_credential_name",
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
}, {
Name: "application_credential_secret",
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
}, {
Name: "auth_version",
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
Default: 0,
}, {
Name: "endpoint_type",
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
Default: "public",
Examples: []fs.OptionExample{{
Help: "Public (default, choose this if not sure)",
Value: "public",
}, {
Help: "Internal (use internal service net)",
Value: "internal",
}, {
Help: "Admin",
Value: "admin",
}},
}, {
Name: "storage_policy",
Help: `The storage policy to use when creating a new container
This applies the specified storage policy when creating a new
container. The policy cannot be changed afterwards. The allowed
configuration values and their meaning depend on your Swift storage
provider.`,
Default: "",
Examples: []fs.OptionExample{{
Help: "Default",
Value: "",
}, {
Help: "OVH Public Cloud Storage",
Value: "pcs",
}, {
Help: "OVH Public Cloud Archive",
Value: "pca",
}},
}}, SharedOptions...),
})
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
User string `config:"user"`
Key string `config:"key"`
Auth string `config:"auth"`
UserID string `config:"user_id"`
Domain string `config:"domain"`
Tenant string `config:"tenant"`
TenantID string `config:"tenant_id"`
TenantDomain string `config:"tenant_domain"`
Region string `config:"region"`
StorageURL string `config:"storage_url"`
AuthToken string `config:"auth_token"`
AuthVersion int `config:"auth_version"`
ApplicationCredentialID string `config:"application_credential_id"`
ApplicationCredentialName string `config:"application_credential_name"`
ApplicationCredentialSecret string `config:"application_credential_secret"`
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote swift server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
opt Options // options for this backend
c *swift.Connection // the connection to the swift server
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of container status
noCheckContainer bool // don't check the container before creating it
pacer *fs.Pacer // To pace the API calls
}
// Object describes a swift object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64
lastModified time.Time
contentType string
md5 string
headers swift.Headers // The object headers if known
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return fmt.Sprintf("Swift root")
}
if f.rootDirectory == "" {
return fmt.Sprintf("Swift container %s", f.rootContainer)
}
return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
408, // Request Timeout
409, // Conflict - various states that could be resolved on a retry
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable/Slow Down - "Reduce your request rate"
504, // Gateway Time-out
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
// If this is a swift.Error object extract the HTTP error code
if swiftError, ok := err.(*swift.Error); ok {
for _, e := range retryErrorCodes {
if swiftError.StatusCode == e {
return true, err
}
}
}
// Check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// shouldRetryHeaders returns a boolean as to whether this err
// deserves to be retried. It reads the headers passed in looking for
// `Retry-After`. It returns the err as a convenience
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
if value := headers["Retry-After"]; value != "" {
retryAfter, parseErr := strconv.Atoi(value)
if parseErr != nil {
fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr)
} else {
duration := time.Second * time.Duration(retryAfter)
if duration <= 60*time.Second {
// Do a short sleep immediately
fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration)
time.Sleep(duration)
return true, err
}
// Delay a long sleep for a retry
return false, fserrors.NewErrorRetryAfter(duration)
}
}
}
return shouldRetry(err)
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(container), f.opt.Enc.FromStandardPath(containerPath)
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// swiftConnection makes a connection to swift
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
c := &swift.Connection{
// Keep these in the same order as the Config for ease of checking
UserName: opt.User,
ApiKey: opt.Key,
AuthUrl: opt.Auth,
UserId: opt.UserID,
Domain: opt.Domain,
Tenant: opt.Tenant,
TenantId: opt.TenantID,
TenantDomain: opt.TenantDomain,
Region: opt.Region,
StorageUrl: opt.StorageURL,
AuthToken: opt.AuthToken,
AuthVersion: opt.AuthVersion,
ApplicationCredentialId: opt.ApplicationCredentialID,
ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
}
if opt.EnvAuth {
err := c.ApplyEnvironment()
if err != nil {
return nil, errors.Wrap(err, "failed to read environment variables")
}
}
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
if !c.Authenticated() {
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
if c.UserName == "" && c.UserId == "" {
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
}
if c.ApiKey == "" {
return nil, errors.New("key not found")
}
}
if c.AuthUrl == "" {
return nil, errors.New("auth not found")
}
err := c.Authenticate() // fills in c.StorageUrl and c.AuthToken
if err != nil {
return nil, err
}
}
// Make sure we re-auth with the AuthToken and StorageUrl
// provided by wrapping the existing auth, so we can just
// override one or the other or both.
if StorageUrl != "" || AuthToken != "" {
// Re-write StorageURL and AuthToken if they are being
// overridden as c.Authenticate above will have
// overwritten them.
if StorageUrl != "" {
c.StorageUrl = StorageUrl
}
if AuthToken != "" {
c.AuthToken = AuthToken
}
c.Auth = newAuth(c.Auth, StorageUrl, AuthToken)
}
return c, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection.
//
// if noCheckContainer is set then the Fs won't check the container
// exists before creating it.
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
c: c,
noCheckContainer: noCheckContainer,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SlowModTime: true,
}).Fill(f)
if f.rootContainer != "" && f.rootDirectory != "" {
// Check to see if the object exists - ignoring directory markers
var info swift.Object
var err error
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "swift: chunk size")
}
c, err := swiftConnection(opt, name)
if err != nil {
return nil, err
}
return NewFsWithConnection(opt, name, root, c, false)
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
// Note that due to a quirk of swift, dynamic large objects are
// returned as 0 bytes in the listing. Correct this here by
// making sure we read the full metadata for all 0 byte files.
// We don't read the metadata for directory marker objects.
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
err := o.readMetaData() // reads info and headers, returning an error
if err == fs.ErrorObjectNotFound {
// We have a dangling large object here so just return the original metadata
fs.Errorf(o, "dangling large object with no contents")
} else if err != nil {
return nil, err
} else {
return o, nil
}
}
if info != nil {
// Set info but not headers
err := o.decodeMetaData(info)
if err != nil {
return nil, err
}
} else {
err := o.readMetaData() // reads info and headers, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found it
// returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// listFn is called from list and listContainerRoot to handle an object.
type listFn func(remote string, object *swift.Object, isDirectory bool) error
// listContainerRoot lists the objects into the function supplied from
// the container and directory supplied. The remote has prefix
// removed from it and if addContainer is set then it adds the
// container to the start.
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
if directory != "" && !strings.HasSuffix(directory, "/") {
directory += "/"
}
// Options for ObjectsWalk
opts := swift.ObjectsOpts{
Prefix: directory,
Limit: listChunks,
}
if !recurse {
opts.Delimiter = '/'
}
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
var objects []swift.Object
var err error
err = f.pacer.Call(func() (bool, error) {
objects, err = f.c.Objects(container, opts)
return shouldRetry(err)
})
if err == nil {
for i := range objects {
object := &objects[i]
isDirectory := false
if !recurse {
isDirectory = strings.HasSuffix(object.Name, "/")
}
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
if !includeDirMarkers && remote == prefix {
// If we have zero length directory markers ending in / then swift
// will return them in the listing for the directory which causes
// duplicate directories. Ignore them here.
continue
}
remote = remote[len(prefix):]
if addContainer {
remote = path.Join(container, remote)
}
err = fn(remote, object, isDirectory)
if err != nil {
break
}
}
}
return objects, err
})
}
type addEntryFn func(fs.DirEntry) error
// list the objects into the function supplied
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
if isDirectory {
remote = strings.TrimRight(remote, "/")
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
err = fn(d)
} else {
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
var o fs.Object
o, err = f.newObjectWithInfo(remote, object)
if err != nil {
return err
}
if includeDirMarkers || o.Storable() {
err = fn(o)
}
}
return err
})
if err == swift.ContainerNotFound {
err = fs.ErrorDirNotFound
}
return err
}
// listDir lists a single directory
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if container == "" {
return nil, fs.ErrorListBucketRequired
}
// List the objects
err = f.list(container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
entries = append(entries, entry)
return nil
})
if err != nil {
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return entries, nil
}
// listContainers lists the containers
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
for _, container := range containers {
f.cache.MarkOK(container.Name)
d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
}
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
return list.Flush()
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
var total, objects int64
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
usage := &fs.Usage{
Used: fs.NewUsageValue(total), // bytes in use
Objects: fs.NewUsageValue(objects), // objects in use
}
return usage, nil
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
headers: swift.Headers{}, // Empty object headers to stop readMetaData being called
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
return f.makeContainer(ctx, container)
}
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// Check to see if container exists first
var err error = swift.ContainerNotFound
if !f.noCheckContainer {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(container)
return shouldRetryHeaders(rxHeaders, err)
})
}
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if f.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(container, headers)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q created", container)
}
}
return err
}, nil)
}
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
return nil
}
err := f.cache.Remove(container, func() error {
err := f.pacer.Call(func() (bool, error) {
err := f.c.ContainerDelete(container)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q removed", container)
}
return err
})
return err
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Purge deletes all the files in the directory
//
// Implemented here so we can make sure we delete directory markers
func (f *Fs) Purge(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" {
return fs.ErrorListBucketRequired
}
// Delete all the files including the directory markers
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
}()
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
if o, ok := entry.(*Object); ok {
toBeDeleted <- o
}
return nil
})
close(toBeDeleted)
delError := <-delErr
if err == nil {
err = delError
}
if err != nil {
return err
}
return f.Rmdir(ctx, dir)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return "", err
}
isStaticLargeObject, err := o.isStaticLargeObject()
if err != nil {
return "", err
}
if isDynamicLargeObject || isStaticLargeObject {
fs.Debugf(o, "Returning empty Md5sum for swift large object")
return "", nil
}
return strings.ToLower(o.md5), nil
}
// hasHeader checks for the header passed in returning false if the
// object isn't found.
func (o *Object) hasHeader(header string) (bool, error) {
err := o.readMetaData()
if err != nil {
if err == fs.ErrorObjectNotFound {
return false, nil
}
return false, err
}
_, isDynamicLargeObject := o.headers[header]
return isDynamicLargeObject, nil
}
// isDynamicLargeObject checks for X-Object-Manifest header
func (o *Object) isDynamicLargeObject() (bool, error) {
return o.hasHeader("X-Object-Manifest")
}
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object")
}
func (o *Object) isInContainerVersioning(container string) (bool, error) {
_, headers, err := o.fs.c.Container(container)
if err != nil {
return false, err
}
xHistoryLocation := headers["X-History-Location"]
if len(xHistoryLocation) > 0 {
return true, nil
}
return false, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// decodeMetaData sets the metadata in the object from a swift.Object
//
// Sets
// o.lastModified
// o.size
// o.md5
// o.contentType
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
o.lastModified = info.LastModified
o.size = info.Bytes
o.md5 = info.Hash
o.contentType = info.ContentType
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
//
// it returns fs.ErrorObjectNotFound if the object isn't found
func (o *Object) readMetaData() (err error) {
if o.headers != nil {
return nil
}
var info swift.Object
var h swift.Headers
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(container, containerPath)
return shouldRetryHeaders(h, err)
})
if err != nil {
if err == swift.ObjectNotFound {
return fs.ErrorObjectNotFound
}
return err
}
o.headers = h
err = o.decodeMetaData(&info)
if err != nil {
return err
}
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime {
return o.lastModified
}
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %s", err)
return o.lastModified
}
modTime, err := o.headers.ObjectMetadata().GetModTime()
if err != nil {
// fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData()
if err != nil {
return err
}
meta := o.headers.ObjectMetadata()
meta.SetModTime(modTime)
newHeaders := meta.ObjectHeaders()
for k, v := range newHeaders {
o.headers[k] = v
}
// Include any other metadata from request
for k, v := range o.headers {
if strings.HasPrefix(k, "X-Object-") {
newHeaders[k] = v
}
}
container, containerPath := o.split()
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
return shouldRetry(err)
})
}
// Storable returns if this object is storable
//
// It compares the Content-Type to directoryMarkerContentType - that
// makes it a directory marker which is not storable.
func (o *Object) Storable() bool {
return o.contentType != directoryMarkerContentType
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
headers := fs.OpenOptionHeaders(options)
_, isRanging := headers["Range"]
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
return shouldRetryHeaders(rxHeaders, err)
})
return
}
// min returns the smallest of x, y
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
// removeSegments removes any old segments from o
//
// if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error {
segmentsContainer, prefix, err := o.getSegmentsDlo()
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
}
if except != "" && strings.HasPrefix(remote, except) {
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, segmentsContainer)
return nil
}
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
var err error
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
return shouldRetry(err)
})
})
if err != nil {
return err
}
// remove the segments container if empty, ignore errors
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerDelete(segmentsContainer)
if err == swift.ContainerNotFound || err == swift.ContainerNotEmpty {
return false, err
}
return shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
}
return nil
}
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(); err != nil {
return
}
dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
return
}
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
}
// urlEncode encodes a string so that it is a valid URL
//
// We don't use any of Go's standard methods as we need `/` not
// encoded but we need '&' encoded.
func urlEncode(str string) string {
var buf bytes.Buffer
for i := 0; i < len(str); i++ {
c := str[i]
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' {
_ = buf.WriteByte(c)
} else {
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
}
}
return buf.String()
}
// updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments.
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
// Create the segmentsContainer if it doesn't exist
var err error
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
return shouldRetryHeaders(rxHeaders, err)
})
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if o.fs.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
return shouldRetry(err)
})
}
if err != nil {
return "", err
}
// Upload the chunks
left := size
i := 0
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
segmentsPath := path.Join(containerPath, uniquePrefix)
in := bufio.NewReader(in0)
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
for {
// can we read at least one byte?
if _, err := in.Peek(1); err != nil {
if left > 0 {
return "", err // read less than expected
}
fs.Debugf(o, "Uploading segments into %q seems done (%v)", segmentsContainer, err)
break
}
n := int64(o.fs.opt.ChunkSize)
if size != -1 {
n = min(left, n)
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
left -= n
}
segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
segmentInfos = nil
return "", err
}
i++
}
// Upload the manifest
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", segmentsContainer, segmentsPath))
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
segmentInfos = nil
}
return uniquePrefix + "/", err
}
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
if segmentInfos != nil && len(segmentInfos) > 0 {
for _, v := range segmentInfos {
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
e := o.fs.c.ObjectDelete(segmentsContainer, v)
if e != nil {
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
}
}
}
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
container, containerPath := o.split()
if container == "" {
return fserrors.FatalError(errors.New("can't upload files to the root"))
}
err := o.fs.makeContainer(ctx, container)
if err != nil {
return err
}
size := src.Size()
modTime := src.ModTime(ctx)
// Note whether this is a dynamic large object before starting
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// Set the mtime
m := swift.Metadata{}
m.SetModTime(modTime)
contentType := fs.MimeType(ctx, src)
headers := m.ObjectHeaders()
fs.OpenOptionAddHeaders(options, headers)
uniquePrefix := ""
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
if err != nil {
return err
}
o.headers = nil // wipe old metadata
} else {
var inCount *readers.CountingReader
if size >= 0 {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
} else {
// otherwise count the size for later
inCount = readers.NewCountingReader(in)
in = inCount
}
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return err
}
// set Metadata since ObjectPut checked the hash and length so we know the
// object has been safely uploaded
o.lastModified = modTime
o.size = size
o.md5 = rxHeaders["ETag"]
o.contentType = contentType
o.headers = headers
if inCount != nil {
// update the size if streaming from the reader
o.size = int64(inCount.BytesRead())
}
}
// If file was a dynamic large object then remove old/all segments
if isDynamicLargeObject {
err = o.removeSegments(uniquePrefix)
if err != nil {
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
}
}
// Read the metadata from the newly created object if necessary
return o.readMetaData()
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
container, containerPath := o.split()
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(container, containerPath)
return shouldRetry(err)
})
if err != nil {
return err
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// ...then segments if required
if isDynamicLargeObject {
isInContainerVersioning, err := o.isInContainerVersioning(container)
if err != nil {
return err
}
if !isInContainerVersioning {
err = o.removeSegments("")
if err != nil {
return err
}
}
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Copier = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)
swift: fix update multipart object removing all of its own parts
After uploading a multipart object, rclone deletes any unused parts.
Probably as part of the listing unification, the detection of the
parts beloning to the current upload was failing and calling Update
was deleting the parts for the current object.
This change fixes the detection and deletes all the old parts but none
of the new ones now.
Fixes #4075
// Package swift provides an interface to the Swift object storage system
package swift
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
// Constants
const (
directoryMarkerContentType = "application/directory" // content type of directory marker objects
listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.GibiByte
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// SharedOptions are shared between swift and hubic
var SharedOptions = []fs.Option{{
Name: "chunk_size",
Help: `Above this size files will be chunked into a _segments container.
Above this size files will be chunked into a _segments container. The
default for this is 5GB which is its maximum value.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "no_chunk",
Help: `Don't chunk files during streaming upload.
When doing streaming uploads (eg using rcat or mount) setting this
flag will cause the swift backend to not upload chunked files.
This will limit the maximum upload size to 5GB. However non chunked
files are easier to deal with and have an MD5SUM.
Rclone will still chunk files bigger than chunk_size when doing normal
copy operations.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash),
}}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "swift",
Description: "OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
NewFs: NewFs,
Options: append([]fs.Option{{
Name: "env_auth",
Help: "Get swift credentials from environment variables in standard OpenStack form.",
Default: false,
Examples: []fs.OptionExample{
{
Value: "false",
Help: "Enter swift credentials in the next step",
}, {
Value: "true",
Help: "Get swift credentials from environment vars. Leave other fields blank if using this.",
},
},
}, {
Name: "user",
Help: "User name to log in (OS_USERNAME).",
}, {
Name: "key",
Help: "API key or password (OS_PASSWORD).",
}, {
Name: "auth",
Help: "Authentication URL for server (OS_AUTH_URL).",
Examples: []fs.OptionExample{{
Help: "Rackspace US",
Value: "https://auth.api.rackspacecloud.com/v1.0",
}, {
Help: "Rackspace UK",
Value: "https://lon.auth.api.rackspacecloud.com/v1.0",
}, {
Help: "Rackspace v2",
Value: "https://identity.api.rackspacecloud.com/v2.0",
}, {
Help: "Memset Memstore UK",
Value: "https://auth.storage.memset.com/v1.0",
}, {
Help: "Memset Memstore UK v2",
Value: "https://auth.storage.memset.com/v2.0",
}, {
Help: "OVH",
Value: "https://auth.cloud.ovh.net/v3",
}},
}, {
Name: "user_id",
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
}, {
Name: "domain",
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
}, {
Name: "tenant",
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)",
}, {
Name: "tenant_id",
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)",
}, {
Name: "tenant_domain",
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)",
}, {
Name: "region",
Help: "Region name - optional (OS_REGION_NAME)",
}, {
Name: "storage_url",
Help: "Storage URL - optional (OS_STORAGE_URL)",
}, {
Name: "auth_token",
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
}, {
Name: "application_credential_id",
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
}, {
Name: "application_credential_name",
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
}, {
Name: "application_credential_secret",
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
}, {
Name: "auth_version",
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
Default: 0,
}, {
Name: "endpoint_type",
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
Default: "public",
Examples: []fs.OptionExample{{
Help: "Public (default, choose this if not sure)",
Value: "public",
}, {
Help: "Internal (use internal service net)",
Value: "internal",
}, {
Help: "Admin",
Value: "admin",
}},
}, {
Name: "storage_policy",
Help: `The storage policy to use when creating a new container
This applies the specified storage policy when creating a new
container. The policy cannot be changed afterwards. The allowed
configuration values and their meaning depend on your Swift storage
provider.`,
Default: "",
Examples: []fs.OptionExample{{
Help: "Default",
Value: "",
}, {
Help: "OVH Public Cloud Storage",
Value: "pcs",
}, {
Help: "OVH Public Cloud Archive",
Value: "pca",
}},
}}, SharedOptions...),
})
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
User string `config:"user"`
Key string `config:"key"`
Auth string `config:"auth"`
UserID string `config:"user_id"`
Domain string `config:"domain"`
Tenant string `config:"tenant"`
TenantID string `config:"tenant_id"`
TenantDomain string `config:"tenant_domain"`
Region string `config:"region"`
StorageURL string `config:"storage_url"`
AuthToken string `config:"auth_token"`
AuthVersion int `config:"auth_version"`
ApplicationCredentialID string `config:"application_credential_id"`
ApplicationCredentialName string `config:"application_credential_name"`
ApplicationCredentialSecret string `config:"application_credential_secret"`
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote swift server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
opt Options // options for this backend
c *swift.Connection // the connection to the swift server
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of container status
noCheckContainer bool // don't check the container before creating it
pacer *fs.Pacer // To pace the API calls
}
// Object describes a swift object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64
lastModified time.Time
contentType string
md5 string
headers swift.Headers // The object headers if known
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return fmt.Sprintf("Swift root")
}
if f.rootDirectory == "" {
return fmt.Sprintf("Swift container %s", f.rootContainer)
}
return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
408, // Request Timeout
409, // Conflict - various states that could be resolved on a retry
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable/Slow Down - "Reduce your request rate"
504, // Gateway Time-out
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
// If this is a swift.Error object extract the HTTP error code
if swiftError, ok := err.(*swift.Error); ok {
for _, e := range retryErrorCodes {
if swiftError.StatusCode == e {
return true, err
}
}
}
// Check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// shouldRetryHeaders returns a boolean as to whether this err
// deserves to be retried. It reads the headers passed in looking for
// `Retry-After`. It returns the err as a convenience
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
if value := headers["Retry-After"]; value != "" {
retryAfter, parseErr := strconv.Atoi(value)
if parseErr != nil {
fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr)
} else {
duration := time.Second * time.Duration(retryAfter)
if duration <= 60*time.Second {
// Do a short sleep immediately
fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration)
time.Sleep(duration)
return true, err
}
// Delay a long sleep for a retry
return false, fserrors.NewErrorRetryAfter(duration)
}
}
}
return shouldRetry(err)
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(container), f.opt.Enc.FromStandardPath(containerPath)
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// swiftConnection makes a connection to swift
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
c := &swift.Connection{
// Keep these in the same order as the Config for ease of checking
UserName: opt.User,
ApiKey: opt.Key,
AuthUrl: opt.Auth,
UserId: opt.UserID,
Domain: opt.Domain,
Tenant: opt.Tenant,
TenantId: opt.TenantID,
TenantDomain: opt.TenantDomain,
Region: opt.Region,
StorageUrl: opt.StorageURL,
AuthToken: opt.AuthToken,
AuthVersion: opt.AuthVersion,
ApplicationCredentialId: opt.ApplicationCredentialID,
ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
}
if opt.EnvAuth {
err := c.ApplyEnvironment()
if err != nil {
return nil, errors.Wrap(err, "failed to read environment variables")
}
}
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
if !c.Authenticated() {
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
if c.UserName == "" && c.UserId == "" {
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
}
if c.ApiKey == "" {
return nil, errors.New("key not found")
}
}
if c.AuthUrl == "" {
return nil, errors.New("auth not found")
}
err := c.Authenticate() // fills in c.StorageUrl and c.AuthToken
if err != nil {
return nil, err
}
}
// Make sure we re-auth with the AuthToken and StorageUrl
// provided by wrapping the existing auth, so we can just
// override one or the other or both.
if StorageUrl != "" || AuthToken != "" {
// Re-write StorageURL and AuthToken if they are being
// overridden as c.Authenticate above will have
// overwritten them.
if StorageUrl != "" {
c.StorageUrl = StorageUrl
}
if AuthToken != "" {
c.AuthToken = AuthToken
}
c.Auth = newAuth(c.Auth, StorageUrl, AuthToken)
}
return c, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection.
//
// if noCheckContainer is set then the Fs won't check the container
// exists before creating it.
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
c: c,
noCheckContainer: noCheckContainer,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SlowModTime: true,
}).Fill(f)
if f.rootContainer != "" && f.rootDirectory != "" {
// Check to see if the object exists - ignoring directory markers
var info swift.Object
var err error
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "swift: chunk size")
}
c, err := swiftConnection(opt, name)
if err != nil {
return nil, err
}
return NewFsWithConnection(opt, name, root, c, false)
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
// Note that due to a quirk of swift, dynamic large objects are
// returned as 0 bytes in the listing. Correct this here by
// making sure we read the full metadata for all 0 byte files.
// We don't read the metadata for directory marker objects.
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
err := o.readMetaData() // reads info and headers, returning an error
if err == fs.ErrorObjectNotFound {
// We have a dangling large object here so just return the original metadata
fs.Errorf(o, "dangling large object with no contents")
} else if err != nil {
return nil, err
} else {
return o, nil
}
}
if info != nil {
// Set info but not headers
err := o.decodeMetaData(info)
if err != nil {
return nil, err
}
} else {
err := o.readMetaData() // reads info and headers, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found it
// returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// listFn is called from list and listContainerRoot to handle an object.
type listFn func(remote string, object *swift.Object, isDirectory bool) error
// listContainerRoot lists the objects into the function supplied from
// the container and directory supplied. The remote has prefix
// removed from it and if addContainer is set then it adds the
// container to the start.
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
if directory != "" && !strings.HasSuffix(directory, "/") {
directory += "/"
}
// Options for ObjectsWalk
opts := swift.ObjectsOpts{
Prefix: directory,
Limit: listChunks,
}
if !recurse {
opts.Delimiter = '/'
}
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
var objects []swift.Object
var err error
err = f.pacer.Call(func() (bool, error) {
objects, err = f.c.Objects(container, opts)
return shouldRetry(err)
})
if err == nil {
for i := range objects {
object := &objects[i]
isDirectory := false
if !recurse {
isDirectory = strings.HasSuffix(object.Name, "/")
}
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
if !includeDirMarkers && remote == prefix {
// If we have zero length directory markers ending in / then swift
// will return them in the listing for the directory which causes
// duplicate directories. Ignore them here.
continue
}
remote = remote[len(prefix):]
if addContainer {
remote = path.Join(container, remote)
}
err = fn(remote, object, isDirectory)
if err != nil {
break
}
}
}
return objects, err
})
}
type addEntryFn func(fs.DirEntry) error
// list the objects into the function supplied
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
if isDirectory {
remote = strings.TrimRight(remote, "/")
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
err = fn(d)
} else {
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
var o fs.Object
o, err = f.newObjectWithInfo(remote, object)
if err != nil {
return err
}
if includeDirMarkers || o.Storable() {
err = fn(o)
}
}
return err
})
if err == swift.ContainerNotFound {
err = fs.ErrorDirNotFound
}
return err
}
// listDir lists a single directory
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if container == "" {
return nil, fs.ErrorListBucketRequired
}
// List the objects
err = f.list(container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
entries = append(entries, entry)
return nil
})
if err != nil {
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return entries, nil
}
// listContainers lists the containers
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
for _, container := range containers {
f.cache.MarkOK(container.Name)
d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
}
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
return list.Flush()
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
}
var total, objects int64
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
usage := &fs.Usage{
Used: fs.NewUsageValue(total), // bytes in use
Objects: fs.NewUsageValue(objects), // objects in use
}
return usage, nil
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
headers: swift.Headers{}, // Empty object headers to stop readMetaData being called
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
return f.makeContainer(ctx, container)
}
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// Check to see if container exists first
var err error = swift.ContainerNotFound
if !f.noCheckContainer {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(container)
return shouldRetryHeaders(rxHeaders, err)
})
}
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if f.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(container, headers)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q created", container)
}
}
return err
}, nil)
}
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
return nil
}
err := f.cache.Remove(container, func() error {
err := f.pacer.Call(func() (bool, error) {
err := f.c.ContainerDelete(container)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q removed", container)
}
return err
})
return err
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Purge deletes all the files in the directory
//
// Implemented here so we can make sure we delete directory markers
func (f *Fs) Purge(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" {
return fs.ErrorListBucketRequired
}
// Delete all the files including the directory markers
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
}()
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
if o, ok := entry.(*Object); ok {
toBeDeleted <- o
}
return nil
})
close(toBeDeleted)
delError := <-delErr
if err == nil {
err = delError
}
if err != nil {
return err
}
return f.Rmdir(ctx, dir)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return "", err
}
isStaticLargeObject, err := o.isStaticLargeObject()
if err != nil {
return "", err
}
if isDynamicLargeObject || isStaticLargeObject {
fs.Debugf(o, "Returning empty Md5sum for swift large object")
return "", nil
}
return strings.ToLower(o.md5), nil
}
// hasHeader checks for the header passed in returning false if the
// object isn't found.
func (o *Object) hasHeader(header string) (bool, error) {
err := o.readMetaData()
if err != nil {
if err == fs.ErrorObjectNotFound {
return false, nil
}
return false, err
}
_, isDynamicLargeObject := o.headers[header]
return isDynamicLargeObject, nil
}
// isDynamicLargeObject checks for X-Object-Manifest header
func (o *Object) isDynamicLargeObject() (bool, error) {
return o.hasHeader("X-Object-Manifest")
}
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object")
}
func (o *Object) isInContainerVersioning(container string) (bool, error) {
_, headers, err := o.fs.c.Container(container)
if err != nil {
return false, err
}
xHistoryLocation := headers["X-History-Location"]
if len(xHistoryLocation) > 0 {
return true, nil
}
return false, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// decodeMetaData sets the metadata in the object from a swift.Object
//
// Sets
// o.lastModified
// o.size
// o.md5
// o.contentType
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
o.lastModified = info.LastModified
o.size = info.Bytes
o.md5 = info.Hash
o.contentType = info.ContentType
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
//
// it returns fs.ErrorObjectNotFound if the object isn't found
func (o *Object) readMetaData() (err error) {
if o.headers != nil {
return nil
}
var info swift.Object
var h swift.Headers
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(container, containerPath)
return shouldRetryHeaders(h, err)
})
if err != nil {
if err == swift.ObjectNotFound {
return fs.ErrorObjectNotFound
}
return err
}
o.headers = h
err = o.decodeMetaData(&info)
if err != nil {
return err
}
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime {
return o.lastModified
}
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %s", err)
return o.lastModified
}
modTime, err := o.headers.ObjectMetadata().GetModTime()
if err != nil {
// fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData()
if err != nil {
return err
}
meta := o.headers.ObjectMetadata()
meta.SetModTime(modTime)
newHeaders := meta.ObjectHeaders()
for k, v := range newHeaders {
o.headers[k] = v
}
// Include any other metadata from request
for k, v := range o.headers {
if strings.HasPrefix(k, "X-Object-") {
newHeaders[k] = v
}
}
container, containerPath := o.split()
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
return shouldRetry(err)
})
}
// Storable returns if this object is storable
//
// It compares the Content-Type to directoryMarkerContentType - that
// makes it a directory marker which is not storable.
func (o *Object) Storable() bool {
return o.contentType != directoryMarkerContentType
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
headers := fs.OpenOptionHeaders(options)
_, isRanging := headers["Range"]
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
return shouldRetryHeaders(rxHeaders, err)
})
return
}
// min returns the smallest of x, y
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
// removeSegments removes any old segments from o
//
// if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error {
segmentsContainer, _, err := o.getSegmentsDlo()
if err != nil {
return err
}
except = path.Join(o.remote, except)
// fs.Debugf(o, "segmentsContainer %q prefix %q", segmentsContainer, prefix)
err = o.fs.listContainerRoot(segmentsContainer, o.remote, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
}
if except != "" && strings.HasPrefix(remote, except) {
// fs.Debugf(o, "Ignoring current segment file %q in container %q", remote, segmentsContainer)
return nil
}
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
var err error
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
return shouldRetry(err)
})
})
if err != nil {
return err
}
// remove the segments container if empty, ignore errors
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerDelete(segmentsContainer)
if err == swift.ContainerNotFound || err == swift.ContainerNotEmpty {
return false, err
}
return shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
}
return nil
}
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(); err != nil {
return
}
dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
return
}
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
}
// urlEncode encodes a string so that it is a valid URL
//
// We don't use any of Go's standard methods as we need `/` not
// encoded but we need '&' encoded.
func urlEncode(str string) string {
var buf bytes.Buffer
for i := 0; i < len(str); i++ {
c := str[i]
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' {
_ = buf.WriteByte(c)
} else {
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
}
}
return buf.String()
}
// updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments.
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
// Create the segmentsContainer if it doesn't exist
var err error
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
return shouldRetryHeaders(rxHeaders, err)
})
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if o.fs.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
return shouldRetry(err)
})
}
if err != nil {
return "", err
}
// Upload the chunks
left := size
i := 0
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
segmentsPath := path.Join(containerPath, uniquePrefix)
in := bufio.NewReader(in0)
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
for {
// can we read at least one byte?
if _, err := in.Peek(1); err != nil {
if left > 0 {
return "", err // read less than expected
}
fs.Debugf(o, "Uploading segments into %q seems done (%v)", segmentsContainer, err)
break
}
n := int64(o.fs.opt.ChunkSize)
if size != -1 {
n = min(left, n)
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
left -= n
}
segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
segmentInfos = nil
return "", err
}
i++
}
// Upload the manifest
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", segmentsContainer, segmentsPath))
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
segmentInfos = nil
}
return uniquePrefix + "/", err
}
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
if segmentInfos != nil && len(segmentInfos) > 0 {
for _, v := range segmentInfos {
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
e := o.fs.c.ObjectDelete(segmentsContainer, v)
if e != nil {
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
}
}
}
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
container, containerPath := o.split()
if container == "" {
return fserrors.FatalError(errors.New("can't upload files to the root"))
}
err := o.fs.makeContainer(ctx, container)
if err != nil {
return err
}
size := src.Size()
modTime := src.ModTime(ctx)
// Note whether this is a dynamic large object before starting
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// Set the mtime
m := swift.Metadata{}
m.SetModTime(modTime)
contentType := fs.MimeType(ctx, src)
headers := m.ObjectHeaders()
fs.OpenOptionAddHeaders(options, headers)
uniquePrefix := ""
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
if err != nil {
return err
}
o.headers = nil // wipe old metadata
} else {
var inCount *readers.CountingReader
if size >= 0 {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
} else {
// otherwise count the size for later
inCount = readers.NewCountingReader(in)
in = inCount
}
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return err
}
// set Metadata since ObjectPut checked the hash and length so we know the
// object has been safely uploaded
o.lastModified = modTime
o.size = size
o.md5 = rxHeaders["ETag"]
o.contentType = contentType
o.headers = headers
if inCount != nil {
// update the size if streaming from the reader
o.size = int64(inCount.BytesRead())
}
}
// If file was a dynamic large object then remove old/all segments
if isDynamicLargeObject {
err = o.removeSegments(uniquePrefix)
if err != nil {
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
}
}
// Read the metadata from the newly created object if necessary
return o.readMetaData()
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
container, containerPath := o.split()
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(container, containerPath)
return shouldRetry(err)
})
if err != nil {
return err
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// ...then segments if required
if isDynamicLargeObject {
isInContainerVersioning, err := o.isInContainerVersioning(container)
if err != nil {
return err
}
if !isInContainerVersioning {
err = o.removeSegments("")
if err != nil {
return err
}
}
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Copier = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)
|
package main
import (
"fmt"
"net/http"
"strconv"
"time"
. "github.com/Deleplace/programming-idioms/pig"
"google.golang.org/appengine/log"
)
func implSave(w http.ResponseWriter, r *http.Request) error {
idiomIDStr := r.FormValue("idiom_id")
existingIDStr := r.FormValue("impl_id")
username := r.FormValue("user_nickname")
username = Truncate(username, 30)
if !toggles["anonymousWrite"] {
if username == "" {
return PiError{"Username is mandatory. No anonymous edit.", http.StatusBadRequest}
}
}
setNicknameCookie(w, username)
if existingIDStr == "" {
return newImplSave(w, r, username, idiomIDStr)
}
return existingImplSave(w, r, username, idiomIDStr, existingIDStr)
}
func newImplSave(w http.ResponseWriter, r *http.Request, username string, idiomIDStr string) error {
if err := togglesMissing(w, r, "implAddition"); err != nil {
return err
}
if err := parametersMissing(w, r, "impl_language"); err != nil {
return err
}
ctx := r.Context()
language := NormLang(r.FormValue("impl_language"))
imports := r.FormValue("impl_imports")
code := r.FormValue("impl_code")
comment := r.FormValue("impl_comment")
attributionURL := r.FormValue("impl_attribution_url")
demoURL := r.FormValue("impl_demo_url")
docURL := r.FormValue("impl_doc_url")
editSummary := fmt.Sprintf("New %s implementation by user [%s]", PrintNiceLang(language), username)
imports = Truncate(imports, 200)
code = TruncateBytes(NoCR(code), 500)
comment = TruncateBytes(comment, 500)
attributionURL = Truncate(attributionURL, 250)
demoURL = Truncate(demoURL, 250)
docURL = Truncate(docURL, 250)
log.Infof(ctx, "[%s] is creating new %s impl for idiom %v", username, PrintNiceLang(language), idiomIDStr)
if !StringSliceContains(AllLanguages(), language) {
return PiError{fmt.Sprintf("Sorry, [%v] is currently not a supported language. Supported languages are %v.", r.FormValue("impl_language"), AllNiceLangs), http.StatusBadRequest}
}
idiomID := String2Int(idiomIDStr)
if idiomID == -1 {
return PiError{idiomIDStr + " is not a valid idiom id.", http.StatusBadRequest}
}
key, idiom, err := dao.getIdiom(ctx, idiomID)
if err != nil {
return PiError{"Could not find idiom " + idiomIDStr, http.StatusNotFound}
}
if err := validateURLFormatOrEmpty(attributionURL); err != nil {
return PiError{"Can't accept URL [" + attributionURL + "]", http.StatusBadRequest}
}
if err := validateURLFormatOrEmpty(demoURL); err != nil {
return PiError{"Can't accept URL [" + demoURL + "]", http.StatusBadRequest}
}
implID, err := dao.nextImplID(ctx)
if err != nil {
return err
}
now := time.Now()
newImpl := Impl{
Id: implID,
OrigId: implID,
Author: username,
CreationDate: now,
LastEditor: username,
LanguageName: language,
ImportsBlock: imports,
CodeBlock: code,
AuthorComment: comment,
OriginalAttributionURL: attributionURL,
DemoURL: demoURL,
DocumentationURL: docURL,
Version: 1,
VersionDate: now,
}
if IsAdmin(r) {
// 2016-10: only Admin may set an impl picture
newImpl.PictureURL = r.FormValue("impl_picture_url")
}
idiom.Implementations = append(idiom.Implementations, newImpl)
idiom.EditSummary = editSummary
idiom.LastEditedImplID = implID
err = dao.saveExistingIdiom(ctx, key, idiom)
if err != nil {
return err
}
http.Redirect(w, r, NiceImplURL(idiom, implID, language), http.StatusFound)
return nil
}
func existingImplSave(w http.ResponseWriter, r *http.Request, username string, idiomIDStr string, existingImplIDStr string) error {
if err := togglesMissing(w, r, "implEditing"); err != nil {
return err
}
if err := parametersMissing(w, r, "impl_version"); err != nil {
return err
}
ctx := r.Context()
imports := r.FormValue("impl_imports")
code := r.FormValue("impl_code")
comment := r.FormValue("impl_comment")
attributionURL := r.FormValue("impl_attribution_url")
demoURL := r.FormValue("impl_demo_url")
docURL := r.FormValue("impl_doc_url")
imports = Truncate(imports, 200)
code = TruncateBytes(NoCR(code), 500)
comment = TruncateBytes(comment, 500)
attributionURL = Truncate(attributionURL, 250)
demoURL = Truncate(demoURL, 250)
docURL = Truncate(docURL, 250)
log.Infof(ctx, "[%s] is updating impl %s of idiom %s", username, existingImplIDStr, idiomIDStr)
idiomID := String2Int(idiomIDStr)
if idiomID == -1 {
return PiError{idiomIDStr + " is not a valid idiom id.", http.StatusBadRequest}
}
key, idiom, err := dao.getIdiom(ctx, idiomID)
if err != nil {
return PiError{"Could not find implementation " + existingImplIDStr + " for idiom " + idiomIDStr, http.StatusNotFound}
}
implID := String2Int(existingImplIDStr)
if implID == -1 {
return PiError{existingImplIDStr + " is not a valid implementation id.", http.StatusBadRequest}
}
_, impl, _ := idiom.FindImplInIdiom(implID)
isAdmin := IsAdmin(r)
if idiom.Protected && !isAdmin {
return PiError{"Can't edit protected idiom " + idiomIDStr, http.StatusUnauthorized}
}
if impl.Protected && !isAdmin {
return PiError{"Can't edit protected impl " + existingImplIDStr, http.StatusUnauthorized}
}
if isAdmin {
wasProtected := impl.Protected
impl.Protected = r.FormValue("impl_protected") != ""
if wasProtected && !impl.Protected {
log.Infof(ctx, "[%v] unprotects impl %v of idiom %v", username, existingImplIDStr, idiomIDStr)
}
if !wasProtected && impl.Protected {
log.Infof(ctx, "[%v] protects impl %v of idiom %v", username, existingImplIDStr, idiomIDStr)
}
}
if r.FormValue("impl_version") != strconv.Itoa(impl.Version) {
return PiError{fmt.Sprintf("Implementation has been concurrently modified (editing version %v, current version is %v)", r.FormValue("impl_version"), impl.Version), http.StatusConflict}
}
if err := validateURLFormatOrEmpty(attributionURL); err != nil {
return PiError{"Can't accept URL [" + attributionURL + "]", http.StatusBadRequest}
}
if err := validateURLFormatOrEmpty(demoURL); err != nil {
return PiError{"Can't accept URL [" + demoURL + "]", http.StatusBadRequest}
}
idiom.EditSummary = "[" + PrintNiceLang(impl.LanguageName) + "] " + r.FormValue("edit_summary")
idiom.LastEditedImplID = implID
impl.ImportsBlock = imports
impl.CodeBlock = code
impl.AuthorComment = comment
impl.LastEditor = username
impl.OriginalAttributionURL = attributionURL
impl.DemoURL = demoURL
impl.DocumentationURL = docURL
impl.Version = impl.Version + 1
impl.VersionDate = time.Now()
if isAdmin {
// 2016-10: only Admin may set an impl picture
impl.PictureURL = r.FormValue("impl_picture_url")
}
err = dao.saveExistingIdiom(ctx, key, idiom)
if err != nil {
return err
}
http.Redirect(w, r, NiceImplURL(idiom, implID, impl.LanguageName), http.StatusFound)
return nil
}
Input cleansing: trim URLs.
package main
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
. "github.com/Deleplace/programming-idioms/pig"
"google.golang.org/appengine/log"
)
func implSave(w http.ResponseWriter, r *http.Request) error {
idiomIDStr := r.FormValue("idiom_id")
existingIDStr := r.FormValue("impl_id")
username := r.FormValue("user_nickname")
username = Truncate(username, 30)
if !toggles["anonymousWrite"] {
if username == "" {
return PiError{"Username is mandatory. No anonymous edit.", http.StatusBadRequest}
}
}
setNicknameCookie(w, username)
if existingIDStr == "" {
return newImplSave(w, r, username, idiomIDStr)
}
return existingImplSave(w, r, username, idiomIDStr, existingIDStr)
}
func newImplSave(w http.ResponseWriter, r *http.Request, username string, idiomIDStr string) error {
if err := togglesMissing(w, r, "implAddition"); err != nil {
return err
}
if err := parametersMissing(w, r, "impl_language"); err != nil {
return err
}
ctx := r.Context()
language := NormLang(r.FormValue("impl_language"))
imports := r.FormValue("impl_imports")
code := r.FormValue("impl_code")
comment := r.FormValue("impl_comment")
attributionURL := r.FormValue("impl_attribution_url")
demoURL := r.FormValue("impl_demo_url")
docURL := r.FormValue("impl_doc_url")
editSummary := fmt.Sprintf("New %s implementation by user [%s]", PrintNiceLang(language), username)
trim := strings.TrimSpace
imports = trim(Truncate(imports, 200))
code = TruncateBytes(NoCR(code), 500)
comment = trim(TruncateBytes(comment, 500))
attributionURL = trim(Truncate(attributionURL, 250))
demoURL = trim(Truncate(demoURL, 250))
docURL = trim(Truncate(docURL, 250))
log.Infof(ctx, "[%s] is creating new %s impl for idiom %v", username, PrintNiceLang(language), idiomIDStr)
if !StringSliceContains(AllLanguages(), language) {
return PiError{fmt.Sprintf("Sorry, [%v] is currently not a supported language. Supported languages are %v.", r.FormValue("impl_language"), AllNiceLangs), http.StatusBadRequest}
}
idiomID := String2Int(idiomIDStr)
if idiomID == -1 {
return PiError{idiomIDStr + " is not a valid idiom id.", http.StatusBadRequest}
}
key, idiom, err := dao.getIdiom(ctx, idiomID)
if err != nil {
return PiError{"Could not find idiom " + idiomIDStr, http.StatusNotFound}
}
if err := validateURLFormatOrEmpty(attributionURL); err != nil {
return PiError{"Can't accept URL [" + attributionURL + "]", http.StatusBadRequest}
}
if err := validateURLFormatOrEmpty(demoURL); err != nil {
return PiError{"Can't accept URL [" + demoURL + "]", http.StatusBadRequest}
}
implID, err := dao.nextImplID(ctx)
if err != nil {
return err
}
now := time.Now()
newImpl := Impl{
Id: implID,
OrigId: implID,
Author: username,
CreationDate: now,
LastEditor: username,
LanguageName: language,
ImportsBlock: imports,
CodeBlock: code,
AuthorComment: comment,
OriginalAttributionURL: attributionURL,
DemoURL: demoURL,
DocumentationURL: docURL,
Version: 1,
VersionDate: now,
}
if IsAdmin(r) {
// 2016-10: only Admin may set an impl picture
newImpl.PictureURL = r.FormValue("impl_picture_url")
}
idiom.Implementations = append(idiom.Implementations, newImpl)
idiom.EditSummary = editSummary
idiom.LastEditedImplID = implID
err = dao.saveExistingIdiom(ctx, key, idiom)
if err != nil {
return err
}
http.Redirect(w, r, NiceImplURL(idiom, implID, language), http.StatusFound)
return nil
}
func existingImplSave(w http.ResponseWriter, r *http.Request, username string, idiomIDStr string, existingImplIDStr string) error {
if err := togglesMissing(w, r, "implEditing"); err != nil {
return err
}
if err := parametersMissing(w, r, "impl_version"); err != nil {
return err
}
ctx := r.Context()
imports := r.FormValue("impl_imports")
code := r.FormValue("impl_code")
comment := r.FormValue("impl_comment")
attributionURL := r.FormValue("impl_attribution_url")
demoURL := r.FormValue("impl_demo_url")
docURL := r.FormValue("impl_doc_url")
trim := strings.TrimSpace
imports = trim(Truncate(imports, 200))
code = TruncateBytes(NoCR(code), 500)
comment = trim(TruncateBytes(comment, 500))
attributionURL = trim(Truncate(attributionURL, 250))
demoURL = trim(Truncate(demoURL, 250))
docURL = trim(Truncate(docURL, 250))
log.Infof(ctx, "[%s] is updating impl %s of idiom %s", username, existingImplIDStr, idiomIDStr)
idiomID := String2Int(idiomIDStr)
if idiomID == -1 {
return PiError{idiomIDStr + " is not a valid idiom id.", http.StatusBadRequest}
}
key, idiom, err := dao.getIdiom(ctx, idiomID)
if err != nil {
return PiError{"Could not find implementation " + existingImplIDStr + " for idiom " + idiomIDStr, http.StatusNotFound}
}
implID := String2Int(existingImplIDStr)
if implID == -1 {
return PiError{existingImplIDStr + " is not a valid implementation id.", http.StatusBadRequest}
}
_, impl, _ := idiom.FindImplInIdiom(implID)
isAdmin := IsAdmin(r)
if idiom.Protected && !isAdmin {
return PiError{"Can't edit protected idiom " + idiomIDStr, http.StatusUnauthorized}
}
if impl.Protected && !isAdmin {
return PiError{"Can't edit protected impl " + existingImplIDStr, http.StatusUnauthorized}
}
if isAdmin {
wasProtected := impl.Protected
impl.Protected = r.FormValue("impl_protected") != ""
if wasProtected && !impl.Protected {
log.Infof(ctx, "[%v] unprotects impl %v of idiom %v", username, existingImplIDStr, idiomIDStr)
}
if !wasProtected && impl.Protected {
log.Infof(ctx, "[%v] protects impl %v of idiom %v", username, existingImplIDStr, idiomIDStr)
}
}
if r.FormValue("impl_version") != strconv.Itoa(impl.Version) {
return PiError{fmt.Sprintf("Implementation has been concurrently modified (editing version %v, current version is %v)", r.FormValue("impl_version"), impl.Version), http.StatusConflict}
}
if err := validateURLFormatOrEmpty(attributionURL); err != nil {
return PiError{"Can't accept URL [" + attributionURL + "]", http.StatusBadRequest}
}
if err := validateURLFormatOrEmpty(demoURL); err != nil {
return PiError{"Can't accept URL [" + demoURL + "]", http.StatusBadRequest}
}
idiom.EditSummary = "[" + PrintNiceLang(impl.LanguageName) + "] " + r.FormValue("edit_summary")
idiom.LastEditedImplID = implID
impl.ImportsBlock = imports
impl.CodeBlock = code
impl.AuthorComment = comment
impl.LastEditor = username
impl.OriginalAttributionURL = attributionURL
impl.DemoURL = demoURL
impl.DocumentationURL = docURL
impl.Version = impl.Version + 1
impl.VersionDate = time.Now()
if isAdmin {
// 2016-10: only Admin may set an impl picture
impl.PictureURL = r.FormValue("impl_picture_url")
}
err = dao.saveExistingIdiom(ctx, key, idiom)
if err != nil {
return err
}
http.Redirect(w, r, NiceImplURL(idiom, implID, impl.LanguageName), http.StatusFound)
return nil
}
|
package main
import (
"flag"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"syscall"
"time"
pb "github.com/viru/berrybot/proto"
log "github.com/Sirupsen/logrus"
"github.com/kidoman/embd"
_ "github.com/kidoman/embd/host/rpi" // This loads the RPi driver
"google.golang.org/grpc"
)
// server is used to implement hellowrld.GreeterServer.
type server struct {
front, rear *echo
driver driver
}
type echo struct {
name string
echo embd.DigitalPin
trig embd.DigitalPin
quit, done chan bool
dist int64
}
func newEcho(name string, trigPin, echoPin int) (*echo, error) {
var e echo
e.name = name
e.quit = make(chan bool)
e.done = make(chan bool)
var err error
e.trig, err = embd.NewDigitalPin(trigPin)
if err != nil {
return nil, fmt.Errorf("can't init trigger pin: %v", err)
}
e.echo, err = embd.NewDigitalPin(echoPin)
if err != nil {
return nil, fmt.Errorf("can't init echo pin: %v", err)
}
// Set direction.
if err := e.trig.SetDirection(embd.Out); err != nil {
return nil, fmt.Errorf("can't set trigger direction: %v", err)
}
if err := e.echo.SetDirection(embd.In); err != nil {
return nil, fmt.Errorf("can't set echo direction: %v", err)
}
return &e, nil
}
func (e *echo) runDistancer() {
if err := e.trig.Write(embd.Low); err != nil {
log.Warnf("can't set trigger to low: %v", err)
}
time.Sleep(time.Second * 1)
tick := time.NewTicker(time.Millisecond * 500)
defer tick.Stop()
for {
select {
case <-e.quit:
e.done <- true
return
case <-tick.C:
log.Infof("%s: measuring...", e.name)
if err := e.trig.Write(embd.High); err != nil {
log.Warnf("can't set trigger to high: %v", err)
}
time.Sleep(time.Microsecond * 10)
if err := e.trig.Write(embd.Low); err != nil {
log.Warnf("can't set trigger to low: %v", err)
}
dur, err := e.echo.TimePulse(embd.High)
if err != nil {
log.Warnf("can't time pulse: %v", err)
}
log.Infof("%s: distance: %dcm", e.name, dur.Nanoseconds()/1000*34/1000/2)
e.dist = dur.Nanoseconds() / 1000 * 34 / 1000 / 2
}
}
}
func (e *echo) close() {
e.quit <- true
<-e.done
e.echo.Close()
e.trig.Close()
}
type driver struct {
left, right *engine
}
const (
safeStraightDist = 10
safeTurningDist = 5
)
func (s *server) drive(dir *pb.Direction) {
switch {
case dir.Dy > -5 && dir.Dy < 5 && dir.Dx > -5 && dir.Dx < 5:
// Full stop.
s.driver.left.pwr.Write(embd.Low)
s.driver.right.pwr.Write(embd.Low)
log.Info("driver STOP")
case dir.Dy > 5 && dir.Dx > -5 && dir.Dx < 5 && s.front.dist > safeStraightDist:
// Forward.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.High)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.High)
log.Info("driver FWD")
case dir.Dy < -5 && dir.Dx > -5 && dir.Dx < 5 && s.rear.dist > safeStraightDist:
// Backward.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.Low)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.Low)
log.Info("driver BACK")
case dir.Dx > 5 && dir.Dy > -5 && dir.Dy < 5:
// Sharp right.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.High)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.Low)
log.Info("driver TURN RIGHT")
case dir.Dx < -5 && dir.Dy > -5 && dir.Dy < 5:
// Sharp left.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.Low)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.High)
log.Info("driver TURN LEFT")
case dir.Dx > 5 && dir.Dy > 5 && s.front.dist > safeTurningDist:
// Forward + right.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.High)
s.driver.right.pwr.Write(embd.Low)
s.driver.right.fwd.Write(embd.High)
log.Info("driver FWD RIGHT")
case dir.Dx < -5 && dir.Dy > 5 && s.front.dist > safeTurningDist:
// Forward + left.
s.driver.left.pwr.Write(embd.Low)
s.driver.left.fwd.Write(embd.High)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.High)
log.Info("driver FWD LEFT")
case dir.Dx > 5 && dir.Dy < -5 && s.rear.dist > safeTurningDist:
// Backward + right.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.Low)
s.driver.right.pwr.Write(embd.Low)
s.driver.right.fwd.Write(embd.Low)
log.Info("driver BACK RIGHT")
case dir.Dx < -5 && dir.Dy < -5 && s.rear.dist > safeTurningDist:
// Backward + left.
s.driver.left.pwr.Write(embd.Low)
s.driver.left.fwd.Write(embd.Low)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.Low)
log.Info("driver BACK LEFT")
}
}
type engine struct {
fwd, pwr embd.DigitalPin
}
func newEngine(pwrPin, fwdPin int) (*engine, error) {
var e engine
var err error
e.pwr, err = embd.NewDigitalPin(pwrPin)
if err != nil {
return nil, fmt.Errorf("can't init power pin: %v", err)
}
e.fwd, err = embd.NewDigitalPin(fwdPin)
if err != nil {
return nil, fmt.Errorf("can't init forward pin: %v", err)
}
// Set direction.
if err := e.pwr.SetDirection(embd.Out); err != nil {
return nil, fmt.Errorf("can't set power direction: %v", err)
}
if err := e.fwd.SetDirection(embd.Out); err != nil {
return nil, fmt.Errorf("can't set forward direction: %v", err)
}
return &e, nil
}
func (e *engine) close() {
e.pwr.Close()
e.fwd.Close()
}
const (
sensorUnknown = iota
sensorFront
sensorRear
)
func (s *server) Drive(stream pb.Driver_DriveServer) error {
waitc := make(chan struct{})
go func() {
for {
d, err := stream.Recv()
if err != nil {
log.Warnf("ERR from client: %v", err)
close(waitc)
return
}
log.WithFields(log.Fields{
"dx": d.Dx,
"dy": d.Dy,
}).Info("Direction")
s.drive(d)
}
}()
for {
select {
case <-time.After(time.Second):
if err := stream.Send(&pb.Telemetry{Speed: 1, DistFront: int32(s.front.dist), DistRear: int32(s.rear.dist)}); err != nil {
log.Errorf("can't send telemetry: %v", err)
return err
}
log.Info("Sending telemetry!")
case <-waitc:
log.Info("got ERR from client, closing sending loop")
return nil
}
}
}
var grpcPort = flag.String("grpc-port", "31337", "gRPC listen port")
func main() {
flag.Parse()
go http.ListenAndServe(":9191", nil)
// Initialize GPIO.
var err error
if err = embd.InitGPIO(); err != nil {
log.Fatalf("Can't init GPIO: %v", err)
}
defer embd.CloseGPIO()
front, err := newEcho("front", 9, 10)
if err != nil {
log.Fatalf("Can't init front echo: %v", err)
}
defer front.close()
rear, err := newEcho("rear", 19, 20)
if err != nil {
log.Fatalf("Can't init rear echo: %v", err)
}
defer rear.close()
go front.runDistancer()
go rear.runDistancer()
left, err := newEngine(23, 4)
if err != nil {
log.Fatalf("Can't init left engine: %v", err)
}
defer left.close()
right, err := newEngine(24, 17)
if err != nil {
log.Fatalf("Can't init right engine: %v", err)
}
defer right.close()
// Listen for GRPC connections.
lis, err := net.Listen("tcp", ":"+*grpcPort)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
defer lis.Close()
srv := server{front: front, rear: rear, driver: driver{left: left, right: right}}
s := grpc.NewServer()
pb.RegisterDriverServer(s, &srv)
// Open broadcast connection.
bcast, err := net.ListenPacket("udp", ":0")
if err != nil {
log.Fatal(err)
}
defer bcast.Close()
broadcastAddr := "255.255.255.255:8032"
dst, err := net.ResolveUDPAddr("udp", broadcastAddr)
if err != nil {
log.Fatal(err)
}
go func() {
log.Infof("Starting to broadcast our port %s on %s", *grpcPort, broadcastAddr)
for {
if _, err := bcast.WriteTo([]byte(*grpcPort), dst); err != nil {
log.Warn(err)
}
time.Sleep(time.Second)
}
}()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
go func() {
sig := <-c
log.Infof("Got %s, trying to shutdown gracefully", sig.String())
front.close()
rear.close()
left.close()
right.close()
embd.CloseGPIO()
lis.Close()
bcast.Close()
os.Exit(0)
}()
// Start serving GRPC.
log.Fatal(s.Serve(lis))
}
Add safety stop when stalled on receiving direction
package main
import (
"flag"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"syscall"
"time"
pb "github.com/viru/berrybot/proto"
log "github.com/Sirupsen/logrus"
"github.com/kidoman/embd"
_ "github.com/kidoman/embd/host/rpi" // This loads the RPi driver
"google.golang.org/grpc"
)
// server is used to implement hellowrld.GreeterServer.
type server struct {
front, rear *echo
driver driver
shutdown bool
}
type echo struct {
name string
echo embd.DigitalPin
trig embd.DigitalPin
quit, done chan bool
dist int64
last time.Time
}
func newEcho(name string, trigPin, echoPin int) (*echo, error) {
var e echo
e.name = name
e.quit = make(chan bool)
e.done = make(chan bool)
var err error
e.trig, err = embd.NewDigitalPin(trigPin)
if err != nil {
return nil, fmt.Errorf("can't init trigger pin: %v", err)
}
e.echo, err = embd.NewDigitalPin(echoPin)
if err != nil {
return nil, fmt.Errorf("can't init echo pin: %v", err)
}
// Set direction.
if err := e.trig.SetDirection(embd.Out); err != nil {
return nil, fmt.Errorf("can't set trigger direction: %v", err)
}
if err := e.echo.SetDirection(embd.In); err != nil {
return nil, fmt.Errorf("can't set echo direction: %v", err)
}
return &e, nil
}
func (e *echo) runDistancer() {
if err := e.trig.Write(embd.Low); err != nil {
log.Warnf("can't set trigger to low: %v", err)
}
time.Sleep(time.Second * 1)
tick := time.NewTicker(time.Millisecond * 500)
defer tick.Stop()
for {
select {
case <-e.quit:
e.done <- true
return
case <-tick.C:
log.Infof("%s: measuring...", e.name)
if err := e.trig.Write(embd.High); err != nil {
log.Warnf("can't set trigger to high: %v", err)
}
time.Sleep(time.Microsecond * 10)
if err := e.trig.Write(embd.Low); err != nil {
log.Warnf("can't set trigger to low: %v", err)
}
dur, err := e.echo.TimePulse(embd.High)
if err != nil {
log.Warnf("can't time pulse: %v", err)
}
log.Infof("%s: distance: %dcm", e.name, dur.Nanoseconds()/1000*34/1000/2)
e.dist = dur.Nanoseconds() / 1000 * 34 / 1000 / 2
}
}
}
func (e *echo) close() {
e.quit <- true
<-e.done
e.echo.Close()
e.trig.Close()
}
type driver struct {
left, right *engine
last time.Time
}
func (d *driver) safetyStop() {
ticker := time.NewTicker(time.Second)
for range ticker.C {
if d.last.Add(time.Second).Before(time.Now()) {
log.Warn("Stopping driver!")
d.stop()
}
}
}
func (d *driver) stop() {
d.left.pwr.Write(embd.Low)
d.right.pwr.Write(embd.Low)
log.Info("driver STOP")
}
func (d *driver) forward() {
d.left.pwr.Write(embd.High)
d.left.fwd.Write(embd.High)
d.right.pwr.Write(embd.High)
d.right.fwd.Write(embd.High)
d.last = time.Now()
log.Info("driver FWD")
}
func (d *driver) backward() {
d.left.pwr.Write(embd.High)
d.left.fwd.Write(embd.Low)
d.right.pwr.Write(embd.High)
d.right.fwd.Write(embd.Low)
d.last = time.Now()
// log.Info("driver BACK")
}
const (
safeStraightDist = 20
safeTurningDist = 10
)
func (s *server) drive(dir *pb.Direction) {
switch {
case dir.Dy > -5 && dir.Dy < 5 && dir.Dx > -5 && dir.Dx < 5:
// Full stop.
s.driver.stop()
case dir.Dy > 5 && dir.Dx > -5 && dir.Dx < 5:
// Forward.
s.front.measure()
if s.front.dist < safeStraightDist {
s.driver.stop()
return
}
s.driver.forward()
case dir.Dy < -5 && dir.Dx > -5 && dir.Dx < 5:
// Backward.
s.rear.measure()
if s.rear.dist < safeStraightDist {
s.driver.stop()
return
}
s.driver.backward()
case dir.Dx > 5 && dir.Dy > -5 && dir.Dy < 5:
// Sharp right.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.High)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.Low)
log.Info("driver TURN RIGHT")
case dir.Dx < -5 && dir.Dy > -5 && dir.Dy < 5:
// Sharp left.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.Low)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.High)
log.Info("driver TURN LEFT")
case dir.Dx > 5 && dir.Dy > 5:
// Forward + right.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.High)
s.driver.right.pwr.Write(embd.Low)
s.driver.right.fwd.Write(embd.High)
log.Info("driver FWD RIGHT")
case dir.Dx < -5 && dir.Dy > 5:
// Forward + left.
s.driver.left.pwr.Write(embd.Low)
s.driver.left.fwd.Write(embd.High)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.High)
log.Info("driver FWD LEFT")
case dir.Dx > 5 && dir.Dy < -5:
// Backward + right.
s.driver.left.pwr.Write(embd.High)
s.driver.left.fwd.Write(embd.Low)
s.driver.right.pwr.Write(embd.Low)
s.driver.right.fwd.Write(embd.Low)
log.Info("driver BACK RIGHT")
case dir.Dx < -5 && dir.Dy < -5:
// Backward + left.
s.driver.left.pwr.Write(embd.Low)
s.driver.left.fwd.Write(embd.Low)
s.driver.right.pwr.Write(embd.High)
s.driver.right.fwd.Write(embd.Low)
log.Info("driver BACK LEFT")
}
}
type engine struct {
fwd, pwr embd.DigitalPin
}
func newEngine(pwrPin, fwdPin int) (*engine, error) {
var e engine
var err error
e.pwr, err = embd.NewDigitalPin(pwrPin)
if err != nil {
return nil, fmt.Errorf("can't init power pin: %v", err)
}
e.fwd, err = embd.NewDigitalPin(fwdPin)
if err != nil {
return nil, fmt.Errorf("can't init forward pin: %v", err)
}
// Set direction.
if err := e.pwr.SetDirection(embd.Out); err != nil {
return nil, fmt.Errorf("can't set power direction: %v", err)
}
if err := e.fwd.SetDirection(embd.Out); err != nil {
return nil, fmt.Errorf("can't set forward direction: %v", err)
}
return &e, nil
}
func (e *engine) close() {
e.pwr.Close()
e.fwd.Close()
}
const (
sensorUnknown = iota
sensorFront
sensorRear
)
func (s *server) Drive(stream pb.Driver_DriveServer) error {
waitc := make(chan struct{})
go func() {
for {
d, err := stream.Recv()
if err != nil {
log.Warnf("ERR from client: %v", err)
close(waitc)
return
}
// log.WithFields(log.Fields{
// "dx": d.Dx,
// "dy": d.Dy,
// }).Info("Direction")
s.drive(d)
}
}()
for {
select {
case <-time.After(time.Minute):
if err := stream.Send(&pb.Telemetry{Speed: 1, DistFront: int32(s.front.dist), DistRear: int32(s.rear.dist)}); err != nil {
log.Errorf("can't send telemetry: %v", err)
return err
}
log.Info("Sending telemetry!")
case <-waitc:
log.Info("got ERR from client, closing sending loop")
return nil
}
}
}
var grpcPort = flag.String("grpc-port", "31337", "gRPC listen port")
func main() {
flag.Parse()
go http.ListenAndServe(":9191", nil)
// Initialize GPIO.
var err error
if err = embd.InitGPIO(); err != nil {
log.Fatalf("Can't init GPIO: %v", err)
}
defer embd.CloseGPIO()
front, err := newEcho("front", 9, 10)
if err != nil {
log.Fatalf("Can't init front echo: %v", err)
}
defer front.close()
rear, err := newEcho("rear", 19, 20)
if err != nil {
log.Fatalf("Can't init rear echo: %v", err)
}
defer rear.close()
go front.runDistancer()
go rear.runDistancer()
left, err := newEngine(23, 4)
if err != nil {
log.Fatalf("Can't init left engine: %v", err)
}
defer left.close()
right, err := newEngine(24, 17)
if err != nil {
log.Fatalf("Can't init right engine: %v", err)
}
defer right.close()
// Listen for GRPC connections.
lis, err := net.Listen("tcp", ":"+*grpcPort)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
defer lis.Close()
drv := driver{left: left, right: right}
go drv.safetyStop()
srv := server{front: front, rear: rear, driver: drv}
s := grpc.NewServer()
pb.RegisterDriverServer(s, &srv)
// Open broadcast connection.
bcast, err := net.ListenPacket("udp", ":0")
if err != nil {
log.Fatal(err)
}
defer bcast.Close()
broadcastAddr := "255.255.255.255:8032"
dst, err := net.ResolveUDPAddr("udp", broadcastAddr)
if err != nil {
log.Fatal(err)
}
go func() {
log.Infof("Starting to broadcast our port %s on %s", *grpcPort, broadcastAddr)
for {
if _, err := bcast.WriteTo([]byte(*grpcPort), dst); err != nil {
log.Warn(err)
}
time.Sleep(time.Second)
}
}()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
go func() {
sig := <-c
log.Infof("Got %s, trying to shutdown gracefully", sig.String())
srv.shutdown = true
front.close()
rear.close()
left.close()
right.close()
embd.CloseGPIO()
lis.Close()
bcast.Close()
os.Exit(0)
}()
// Start serving GRPC.
log.Fatal(s.Serve(lis))
}
|
package concordances
import (
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"testing"
cmneo4j "github.com/Financial-Times/cm-neo4j-driver"
"github.com/Financial-Times/concepts-rw-neo4j/concepts"
"github.com/Financial-Times/neo-utils-go/v2/neoutils"
"github.com/Financial-Times/go-logger/v2"
"github.com/stretchr/testify/assert"
)
var concordedBrandSmartlogic = Concordance{
Concept{
ID: "http://api.ft.com/things/b20801ac-5a76-43cf-b816-8c3b2f7133ad",
APIURL: "http://api.ft.com/brands/b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
Identifier{
Authority: "http://api.ft.com/system/SMARTLOGIC",
IdentifierValue: "b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
}
var concordedManagedLocationByConceptId = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/WIKIDATA",
IdentifierValue: "http://www.wikidata.org/entity/Q218"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/FT-TME",
IdentifierValue: "TnN0ZWluX0dMX1JP-R0w="},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/MANAGEDLOCATION",
IdentifierValue: "5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/ISO-3166-1",
IdentifierValue: "RO"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "4534282c-d3ee-3595-9957-81a9293200f3"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "4411b761-e632-30e7-855c-06aeca76c48d"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
},
},
}
var concordedManagedLocationByAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/MANAGEDLOCATION",
IdentifierValue: "5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
},
},
}
var concordedManagedLocationByISO31661Authority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/ISO-3166-1",
IdentifierValue: "RO"},
},
},
}
var concordedBrandSmartlogicUPP = Concordance{
Concept{
ID: "http://api.ft.com/things/b20801ac-5a76-43cf-b816-8c3b2f7133ad",
APIURL: "http://api.ft.com/brands/b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
}
var concordedBrandTME = Concordance{
Concept{
ID: "http://api.ft.com/things/b20801ac-5a76-43cf-b816-8c3b2f7133ad",
APIURL: "http://api.ft.com/brands/b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
Identifier{
Authority: "http://api.ft.com/system/FT-TME",
IdentifierValue: "VGhlIFJvbWFu-QnJhbmRz"},
}
var concordedBrandTMEUPP = Concordance{
Concept{
ID: "http://api.ft.com/things/b20801ac-5a76-43cf-b816-8c3b2f7133ad",
APIURL: "http://api.ft.com/brands/b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "70f4732b-7f7d-30a1-9c29-0cceec23760e"},
}
var expectedConcordanceBankOfTest = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "2cdeb859-70df-3a0e-b125-f958366bea44"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/FACTSET",
IdentifierValue: "7IV872-E"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/FT-TME",
IdentifierValue: "QmFuayBvZiBUZXN0-T04="},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/LEI",
IdentifierValue: "VNF516RB4DFV5NQ22UF0"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/SMARTLOGIC",
IdentifierValue: "cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "d56e7388-25cb-343e-aea9-8b512e28476e"},
},
},
}
var expectedConcordanceBankOfTestByAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/FACTSET",
IdentifierValue: "7IV872-E"},
},
},
}
var expectedConcordanceBankOfTestByUPPAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "d56e7388-25cb-343e-aea9-8b512e28476e"},
},
},
}
var expectedConcordanceBankOfTestByLEIAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/LEI",
IdentifierValue: "VNF516RB4DFV5NQ22UF0"},
},
},
}
var unconcordedBrandTME = Concordance{
Concept{
ID: "http://api.ft.com/things/ad56856a-7d38-48e2-a131-7d104f17e8f6",
APIURL: "http://api.ft.com/brands/ad56856a-7d38-48e2-a131-7d104f17e8f6"},
Identifier{
Authority: "http://api.ft.com/system/FT-TME",
IdentifierValue: "UGFydHkgcGVvcGxl-QnJhbmRz"},
}
var unconcordedBrandTMEUPP = Concordance{
Concept{
ID: "http://api.ft.com/things/ad56856a-7d38-48e2-a131-7d104f17e8f6",
APIURL: "http://api.ft.com/brands/ad56856a-7d38-48e2-a131-7d104f17e8f6"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "ad56856a-7d38-48e2-a131-7d104f17e8f6"},
}
var expectedConcordanceNAICSIndustryClassification = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
APIURL: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
Identifier{
Authority: "http://api.ft.com/system/SMARTLOGIC",
IdentifierValue: "38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
},
{
Concept{
ID: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
APIURL: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
Identifier{
Authority: "http://api.ft.com/system/NAICS",
IdentifierValue: "5111"},
},
{
Concept{
ID: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
APIURL: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
},
},
}
var expectedConcordanceNAICSIndustryClassificationByAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
APIURL: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
Identifier{
Authority: "http://api.ft.com/system/NAICS",
IdentifierValue: "5111"},
},
},
}
func TestNeoReadByConceptID(t *testing.T) {
conn := getDatabaseConnection(assert.New(t))
driver := getNeoDriver(assert.New(t))
log := logger.NewUPPLogger("public-concordances-api-test", "PANIC")
conceptRW := concepts.NewConceptService(conn, log)
assert.NoError(t, conceptRW.Initialise())
tests := []struct {
name string
fixture string
conceptIDs []string
expectedLen int
expected Concordances
}{
{
name: "NewModel_Unconcorded",
fixture: "Brand-Unconcorded-ad56856a-7d38-48e2-a131-7d104f17e8f6.json",
conceptIDs: []string{"ad56856a-7d38-48e2-a131-7d104f17e8f6"},
expectedLen: 2,
expected: Concordances{[]Concordance{unconcordedBrandTME, unconcordedBrandTMEUPP}},
},
{
name: "NewModel_Concorded",
fixture: "Brand-Concorded-b20801ac-5a76-43cf-b816-8c3b2f7133ad.json",
conceptIDs: []string{"b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
expectedLen: 4,
expected: Concordances{[]Concordance{concordedBrandSmartlogic, concordedBrandSmartlogicUPP, concordedBrandTME, concordedBrandTMEUPP}},
},
{
name: "ManagedLocation",
fixture: "ManagedLocation-Concorded-5aba454b-3e31-31b9-bdeb-0caf83f62b44.json",
conceptIDs: []string{"5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
expectedLen: 7,
expected: concordedManagedLocationByConceptId,
},
{
name: "ToConcordancesMandatoryFields",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
conceptIDs: []string{"cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
expectedLen: 7,
expected: expectedConcordanceBankOfTest,
},
{
name: "ReturnMultipleConcordancesForMultipleIdentifiers",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
conceptIDs: []string{"cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
expectedLen: 7,
expected: expectedConcordanceBankOfTest,
},
{
name: "NAICSIndustryClassification",
fixture: "NAICSIndustryClassification-38ee195d-ebdd-48a9-af4b-c8a322e7b04d.json",
conceptIDs: []string{"38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
expectedLen: 3,
expected: expectedConcordanceNAICSIndustryClassification,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
writeGenericConceptJSONToService(conceptRW, "./fixtures/"+test.fixture, assert.New(t))
defer cleanUp(assert.New(t), driver)
undertest := NewCypherDriver(driver, "prod")
conc, found, err := undertest.ReadByConceptID(test.conceptIDs)
assert.NoError(t, err)
assert.True(t, found)
assert.Equal(t, test.expectedLen, len(conc.Concordance))
readConceptAndCompare(t, test.expected, conc, "TestNeoReadByConceptID_"+test.name)
})
}
}
func TestNeoReadByAuthority(t *testing.T) {
conn := getDatabaseConnection(assert.New(t))
driver := getNeoDriver(assert.New(t))
log := logger.NewUPPLogger("public-concordances-api-test", "PANIC")
conceptRW := concepts.NewConceptService(conn, log)
assert.NoError(t, conceptRW.Initialise())
tests := []struct {
name string
fixture string
authority string
identifierValues []string
expected Concordances
expectedErr bool
}{
{
name: "NewModel_Concorded",
fixture: "Brand-Concorded-b20801ac-5a76-43cf-b816-8c3b2f7133ad.json",
authority: "http://api.ft.com/system/SMARTLOGIC",
identifierValues: []string{"b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
expected: Concordances{[]Concordance{concordedBrandSmartlogic}},
},
{
name: "NewModel_Unconcorded",
fixture: "Brand-Unconcorded-ad56856a-7d38-48e2-a131-7d104f17e8f6.json",
authority: "http://api.ft.com/system/FT-TME",
identifierValues: []string{"UGFydHkgcGVvcGxl-QnJhbmRz"},
expected: Concordances{[]Concordance{unconcordedBrandTME}},
},
{
name: "ManagedLocation",
fixture: "ManagedLocation-Concorded-5aba454b-3e31-31b9-bdeb-0caf83f62b44.json",
authority: "http://api.ft.com/system/MANAGEDLOCATION",
identifierValues: []string{"5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
expected: concordedManagedLocationByAuthority,
},
{
name: "ISO31661",
fixture: "ManagedLocation-Concorded-5aba454b-3e31-31b9-bdeb-0caf83f62b44.json",
authority: "http://api.ft.com/system/ISO-3166-1",
identifierValues: []string{"RO"},
expected: concordedManagedLocationByISO31661Authority,
},
{
name: "ToConcordancesMandatoryField",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/FACTSET",
identifierValues: []string{"7IV872-E"},
expected: expectedConcordanceBankOfTestByAuthority,
},
{
name: "ToConcordancesByUPPAuthority",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/UPP",
identifierValues: []string{"d56e7388-25cb-343e-aea9-8b512e28476e"},
expected: expectedConcordanceBankOfTestByUPPAuthority,
},
{
name: "ToConcordancesByLEIAuthority",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/LEI",
identifierValues: []string{"VNF516RB4DFV5NQ22UF0"},
expected: expectedConcordanceBankOfTestByLEIAuthority,
},
{
name: "OnlyOneConcordancePerIdentifierValue",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/FACTSET",
identifierValues: []string{"7IV872-E"},
expected: expectedConcordanceBankOfTestByAuthority,
},
{
name: "NAICSIndustryClassification",
fixture: "NAICSIndustryClassification-38ee195d-ebdd-48a9-af4b-c8a322e7b04d.json",
authority: "http://api.ft.com/system/NAICS",
identifierValues: []string{"5111"},
expected: expectedConcordanceNAICSIndustryClassificationByAuthority,
},
{
name: "EmptyConcordancesWhenUnsupportedAuthority",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/UnsupportedAuthority",
identifierValues: []string{"DANMUR-1"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
writeGenericConceptJSONToService(conceptRW, "./fixtures/"+test.fixture, assert.New(t))
defer cleanUp(assert.New(t), driver)
undertest := NewCypherDriver(driver, "prod")
conc, found, err := undertest.ReadByAuthority(test.authority, test.identifierValues)
assert.NoError(t, err)
if len(test.expected.Concordance) > 0 {
assert.True(t, found)
assert.Equal(t, 1, len(conc.Concordance))
readConceptAndCompare(t, test.expected, conc, "TestNeoReadByAuthority_"+test.name)
return
}
assert.False(t, found)
assert.Empty(t, conc.Concordance)
})
}
}
func readConceptAndCompare(t *testing.T, expected Concordances, actual Concordances, testName string) {
sortConcordances(expected.Concordance)
sortConcordances(actual.Concordance)
assert.True(t, reflect.DeepEqual(expected, actual), fmt.Sprintf("Actual aggregated concept differs from expected: Test: %v \n Expected: %v \n Actual: %v", testName, expected, actual))
}
func sortConcordances(concordanceList []Concordance) {
sort.SliceStable(concordanceList, func(i, j int) bool {
return concordanceList[i].Concept.ID < concordanceList[j].Concept.ID
})
sort.SliceStable(concordanceList, func(i, j int) bool {
return concordanceList[i].Identifier.Authority < concordanceList[j].Identifier.Authority
})
sort.SliceStable(concordanceList, func(i, j int) bool {
return concordanceList[i].Identifier.IdentifierValue < concordanceList[j].Identifier.IdentifierValue
})
}
func getNeoDriver(assert *assert.Assertions) *cmneo4j.Driver {
url := os.Getenv("NEO4J_TEST_URL")
if url == "" {
url = "bolt://localhost:7687"
}
log := logger.NewUPPLogger("public-concordances-api-test", "PANIC")
driver, err := cmneo4j.NewDefaultDriver(url, log)
assert.NoError(err, "Failed to connect to Neo4j")
return driver
}
func writeGenericConceptJSONToService(service concepts.ConceptService, pathToJSONFile string, assert *assert.Assertions) {
f, err := os.Open(pathToJSONFile)
assert.NoError(err)
dec := json.NewDecoder(f)
inst, _, errr := service.DecodeJSON(dec)
assert.NoError(errr)
_, errrr := service.Write(inst, "test_transaction_id")
assert.NoError(errrr)
}
func cleanUp(assert *assert.Assertions, driver *cmneo4j.Driver) {
var queries []*cmneo4j.Query
// Concepts with canonical nodes
uuids := []string{
"cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
"5aba454b-3e31-31b9-bdeb-0caf83f62b44",
"b20801ac-5a76-43cf-b816-8c3b2f7133ad",
"ad56856a-7d38-48e2-a131-7d104f17e8f6",
"38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
}
for _, uuid := range uuids {
query := &cmneo4j.Query{
Cypher: `
MATCH (canonical:Concept{prefUUID:$uuid})--(source)
OPTIONAL MATCH (source)<-[:IDENTIFIES]-(identifier)
DETACH DELETE canonical, source, identifier`,
Params: map[string]interface{}{"uuid": uuid},
}
queries = append(queries, query)
}
// Things
uuids = []string{
"dbb0bdae-1f0c-11e4-b0cb-b2227cce2b54",
}
for _, uuid := range uuids {
query := &cmneo4j.Query{
Cypher: `
MATCH (source:Thing{uuid:$uuid})
OPTIONAL MATCH (source)<-[:IDENTIFIES]-(identifier)
DETACH DELETE source, identifier`,
Params: map[string]interface{}{"uuid": uuid},
}
queries = append(queries, query)
}
err := driver.Write(queries...)
assert.NoError(err)
}
func getDatabaseConnection(assert *assert.Assertions) neoutils.NeoConnection {
url := os.Getenv("NEO4J_TEST_URL")
if url == "" {
url = "http://localhost:7474/db/data"
}
conf := neoutils.DefaultConnectionConfig()
conf.Transactional = false
db, err := neoutils.Connect(url, conf, nil)
assert.NoError(err, "Failed to connect to Neo4j")
return db
}
Revert integration tag
//go:build integration
// +build integration
package concordances
import (
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"testing"
cmneo4j "github.com/Financial-Times/cm-neo4j-driver"
"github.com/Financial-Times/concepts-rw-neo4j/concepts"
"github.com/Financial-Times/neo-utils-go/v2/neoutils"
"github.com/Financial-Times/go-logger/v2"
"github.com/stretchr/testify/assert"
)
var concordedBrandSmartlogic = Concordance{
Concept{
ID: "http://api.ft.com/things/b20801ac-5a76-43cf-b816-8c3b2f7133ad",
APIURL: "http://api.ft.com/brands/b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
Identifier{
Authority: "http://api.ft.com/system/SMARTLOGIC",
IdentifierValue: "b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
}
var concordedManagedLocationByConceptId = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/WIKIDATA",
IdentifierValue: "http://www.wikidata.org/entity/Q218"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/FT-TME",
IdentifierValue: "TnN0ZWluX0dMX1JP-R0w="},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/MANAGEDLOCATION",
IdentifierValue: "5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/ISO-3166-1",
IdentifierValue: "RO"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "4534282c-d3ee-3595-9957-81a9293200f3"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "4411b761-e632-30e7-855c-06aeca76c48d"},
},
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
},
},
}
var concordedManagedLocationByAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/MANAGEDLOCATION",
IdentifierValue: "5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
},
},
}
var concordedManagedLocationByISO31661Authority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44",
APIURL: "http://api.ft.com/things/5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
Identifier{
Authority: "http://api.ft.com/system/ISO-3166-1",
IdentifierValue: "RO"},
},
},
}
var concordedBrandSmartlogicUPP = Concordance{
Concept{
ID: "http://api.ft.com/things/b20801ac-5a76-43cf-b816-8c3b2f7133ad",
APIURL: "http://api.ft.com/brands/b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
}
var concordedBrandTME = Concordance{
Concept{
ID: "http://api.ft.com/things/b20801ac-5a76-43cf-b816-8c3b2f7133ad",
APIURL: "http://api.ft.com/brands/b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
Identifier{
Authority: "http://api.ft.com/system/FT-TME",
IdentifierValue: "VGhlIFJvbWFu-QnJhbmRz"},
}
var concordedBrandTMEUPP = Concordance{
Concept{
ID: "http://api.ft.com/things/b20801ac-5a76-43cf-b816-8c3b2f7133ad",
APIURL: "http://api.ft.com/brands/b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "70f4732b-7f7d-30a1-9c29-0cceec23760e"},
}
var expectedConcordanceBankOfTest = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "2cdeb859-70df-3a0e-b125-f958366bea44"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/FACTSET",
IdentifierValue: "7IV872-E"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/FT-TME",
IdentifierValue: "QmFuayBvZiBUZXN0-T04="},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/LEI",
IdentifierValue: "VNF516RB4DFV5NQ22UF0"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/SMARTLOGIC",
IdentifierValue: "cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
},
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "d56e7388-25cb-343e-aea9-8b512e28476e"},
},
},
}
var expectedConcordanceBankOfTestByAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/FACTSET",
IdentifierValue: "7IV872-E"},
},
},
}
var expectedConcordanceBankOfTestByUPPAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "d56e7388-25cb-343e-aea9-8b512e28476e"},
},
},
}
var expectedConcordanceBankOfTestByLEIAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
APIURL: "http://api.ft.com/organisations/cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
Identifier{
Authority: "http://api.ft.com/system/LEI",
IdentifierValue: "VNF516RB4DFV5NQ22UF0"},
},
},
}
var unconcordedBrandTME = Concordance{
Concept{
ID: "http://api.ft.com/things/ad56856a-7d38-48e2-a131-7d104f17e8f6",
APIURL: "http://api.ft.com/brands/ad56856a-7d38-48e2-a131-7d104f17e8f6"},
Identifier{
Authority: "http://api.ft.com/system/FT-TME",
IdentifierValue: "UGFydHkgcGVvcGxl-QnJhbmRz"},
}
var unconcordedBrandTMEUPP = Concordance{
Concept{
ID: "http://api.ft.com/things/ad56856a-7d38-48e2-a131-7d104f17e8f6",
APIURL: "http://api.ft.com/brands/ad56856a-7d38-48e2-a131-7d104f17e8f6"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "ad56856a-7d38-48e2-a131-7d104f17e8f6"},
}
var expectedConcordanceNAICSIndustryClassification = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
APIURL: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
Identifier{
Authority: "http://api.ft.com/system/SMARTLOGIC",
IdentifierValue: "38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
},
{
Concept{
ID: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
APIURL: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
Identifier{
Authority: "http://api.ft.com/system/NAICS",
IdentifierValue: "5111"},
},
{
Concept{
ID: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
APIURL: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
Identifier{
Authority: "http://api.ft.com/system/UPP",
IdentifierValue: "38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
},
},
}
var expectedConcordanceNAICSIndustryClassificationByAuthority = Concordances{
[]Concordance{
{
Concept{
ID: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
APIURL: "http://api.ft.com/things/38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
Identifier{
Authority: "http://api.ft.com/system/NAICS",
IdentifierValue: "5111"},
},
},
}
func TestNeoReadByConceptID(t *testing.T) {
conn := getDatabaseConnection(assert.New(t))
driver := getNeoDriver(assert.New(t))
log := logger.NewUPPLogger("public-concordances-api-test", "PANIC")
conceptRW := concepts.NewConceptService(conn, log)
assert.NoError(t, conceptRW.Initialise())
tests := []struct {
name string
fixture string
conceptIDs []string
expectedLen int
expected Concordances
}{
{
name: "NewModel_Unconcorded",
fixture: "Brand-Unconcorded-ad56856a-7d38-48e2-a131-7d104f17e8f6.json",
conceptIDs: []string{"ad56856a-7d38-48e2-a131-7d104f17e8f6"},
expectedLen: 2,
expected: Concordances{[]Concordance{unconcordedBrandTME, unconcordedBrandTMEUPP}},
},
{
name: "NewModel_Concorded",
fixture: "Brand-Concorded-b20801ac-5a76-43cf-b816-8c3b2f7133ad.json",
conceptIDs: []string{"b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
expectedLen: 4,
expected: Concordances{[]Concordance{concordedBrandSmartlogic, concordedBrandSmartlogicUPP, concordedBrandTME, concordedBrandTMEUPP}},
},
{
name: "ManagedLocation",
fixture: "ManagedLocation-Concorded-5aba454b-3e31-31b9-bdeb-0caf83f62b44.json",
conceptIDs: []string{"5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
expectedLen: 7,
expected: concordedManagedLocationByConceptId,
},
{
name: "ToConcordancesMandatoryFields",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
conceptIDs: []string{"cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
expectedLen: 7,
expected: expectedConcordanceBankOfTest,
},
{
name: "ReturnMultipleConcordancesForMultipleIdentifiers",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
conceptIDs: []string{"cd7e4345-f11f-41f3-a0f0-2cf5c43e0115"},
expectedLen: 7,
expected: expectedConcordanceBankOfTest,
},
{
name: "NAICSIndustryClassification",
fixture: "NAICSIndustryClassification-38ee195d-ebdd-48a9-af4b-c8a322e7b04d.json",
conceptIDs: []string{"38ee195d-ebdd-48a9-af4b-c8a322e7b04d"},
expectedLen: 3,
expected: expectedConcordanceNAICSIndustryClassification,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
writeGenericConceptJSONToService(conceptRW, "./fixtures/"+test.fixture, assert.New(t))
defer cleanUp(assert.New(t), driver)
undertest := NewCypherDriver(driver, "prod")
conc, found, err := undertest.ReadByConceptID(test.conceptIDs)
assert.NoError(t, err)
assert.True(t, found)
assert.Equal(t, test.expectedLen, len(conc.Concordance))
readConceptAndCompare(t, test.expected, conc, "TestNeoReadByConceptID_"+test.name)
})
}
}
func TestNeoReadByAuthority(t *testing.T) {
conn := getDatabaseConnection(assert.New(t))
driver := getNeoDriver(assert.New(t))
log := logger.NewUPPLogger("public-concordances-api-test", "PANIC")
conceptRW := concepts.NewConceptService(conn, log)
assert.NoError(t, conceptRW.Initialise())
tests := []struct {
name string
fixture string
authority string
identifierValues []string
expected Concordances
expectedErr bool
}{
{
name: "NewModel_Concorded",
fixture: "Brand-Concorded-b20801ac-5a76-43cf-b816-8c3b2f7133ad.json",
authority: "http://api.ft.com/system/SMARTLOGIC",
identifierValues: []string{"b20801ac-5a76-43cf-b816-8c3b2f7133ad"},
expected: Concordances{[]Concordance{concordedBrandSmartlogic}},
},
{
name: "NewModel_Unconcorded",
fixture: "Brand-Unconcorded-ad56856a-7d38-48e2-a131-7d104f17e8f6.json",
authority: "http://api.ft.com/system/FT-TME",
identifierValues: []string{"UGFydHkgcGVvcGxl-QnJhbmRz"},
expected: Concordances{[]Concordance{unconcordedBrandTME}},
},
{
name: "ManagedLocation",
fixture: "ManagedLocation-Concorded-5aba454b-3e31-31b9-bdeb-0caf83f62b44.json",
authority: "http://api.ft.com/system/MANAGEDLOCATION",
identifierValues: []string{"5aba454b-3e31-31b9-bdeb-0caf83f62b44"},
expected: concordedManagedLocationByAuthority,
},
{
name: "ISO31661",
fixture: "ManagedLocation-Concorded-5aba454b-3e31-31b9-bdeb-0caf83f62b44.json",
authority: "http://api.ft.com/system/ISO-3166-1",
identifierValues: []string{"RO"},
expected: concordedManagedLocationByISO31661Authority,
},
{
name: "ToConcordancesMandatoryField",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/FACTSET",
identifierValues: []string{"7IV872-E"},
expected: expectedConcordanceBankOfTestByAuthority,
},
{
name: "ToConcordancesByUPPAuthority",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/UPP",
identifierValues: []string{"d56e7388-25cb-343e-aea9-8b512e28476e"},
expected: expectedConcordanceBankOfTestByUPPAuthority,
},
{
name: "ToConcordancesByLEIAuthority",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/LEI",
identifierValues: []string{"VNF516RB4DFV5NQ22UF0"},
expected: expectedConcordanceBankOfTestByLEIAuthority,
},
{
name: "OnlyOneConcordancePerIdentifierValue",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/FACTSET",
identifierValues: []string{"7IV872-E"},
expected: expectedConcordanceBankOfTestByAuthority,
},
{
name: "NAICSIndustryClassification",
fixture: "NAICSIndustryClassification-38ee195d-ebdd-48a9-af4b-c8a322e7b04d.json",
authority: "http://api.ft.com/system/NAICS",
identifierValues: []string{"5111"},
expected: expectedConcordanceNAICSIndustryClassificationByAuthority,
},
{
name: "EmptyConcordancesWhenUnsupportedAuthority",
fixture: "Organisation-BankOfTest-cd7e4345-f11f-41f3-a0f0-2cf5c43e0115.json",
authority: "http://api.ft.com/system/UnsupportedAuthority",
identifierValues: []string{"DANMUR-1"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
writeGenericConceptJSONToService(conceptRW, "./fixtures/"+test.fixture, assert.New(t))
defer cleanUp(assert.New(t), driver)
undertest := NewCypherDriver(driver, "prod")
conc, found, err := undertest.ReadByAuthority(test.authority, test.identifierValues)
assert.NoError(t, err)
if len(test.expected.Concordance) > 0 {
assert.True(t, found)
assert.Equal(t, 1, len(conc.Concordance))
readConceptAndCompare(t, test.expected, conc, "TestNeoReadByAuthority_"+test.name)
return
}
assert.False(t, found)
assert.Empty(t, conc.Concordance)
})
}
}
func readConceptAndCompare(t *testing.T, expected Concordances, actual Concordances, testName string) {
sortConcordances(expected.Concordance)
sortConcordances(actual.Concordance)
assert.True(t, reflect.DeepEqual(expected, actual), fmt.Sprintf("Actual aggregated concept differs from expected: Test: %v \n Expected: %v \n Actual: %v", testName, expected, actual))
}
func sortConcordances(concordanceList []Concordance) {
sort.SliceStable(concordanceList, func(i, j int) bool {
return concordanceList[i].Concept.ID < concordanceList[j].Concept.ID
})
sort.SliceStable(concordanceList, func(i, j int) bool {
return concordanceList[i].Identifier.Authority < concordanceList[j].Identifier.Authority
})
sort.SliceStable(concordanceList, func(i, j int) bool {
return concordanceList[i].Identifier.IdentifierValue < concordanceList[j].Identifier.IdentifierValue
})
}
func getNeoDriver(assert *assert.Assertions) *cmneo4j.Driver {
url := os.Getenv("NEO4J_TEST_URL")
if url == "" {
url = "bolt://localhost:7687"
}
log := logger.NewUPPLogger("public-concordances-api-test", "PANIC")
driver, err := cmneo4j.NewDefaultDriver(url, log)
assert.NoError(err, "Failed to connect to Neo4j")
return driver
}
func writeGenericConceptJSONToService(service concepts.ConceptService, pathToJSONFile string, assert *assert.Assertions) {
f, err := os.Open(pathToJSONFile)
assert.NoError(err)
dec := json.NewDecoder(f)
inst, _, errr := service.DecodeJSON(dec)
assert.NoError(errr)
_, errrr := service.Write(inst, "test_transaction_id")
assert.NoError(errrr)
}
func cleanUp(assert *assert.Assertions, driver *cmneo4j.Driver) {
var queries []*cmneo4j.Query
// Concepts with canonical nodes
uuids := []string{
"cd7e4345-f11f-41f3-a0f0-2cf5c43e0115",
"5aba454b-3e31-31b9-bdeb-0caf83f62b44",
"b20801ac-5a76-43cf-b816-8c3b2f7133ad",
"ad56856a-7d38-48e2-a131-7d104f17e8f6",
"38ee195d-ebdd-48a9-af4b-c8a322e7b04d",
}
for _, uuid := range uuids {
query := &cmneo4j.Query{
Cypher: `
MATCH (canonical:Concept{prefUUID:$uuid})--(source)
OPTIONAL MATCH (source)<-[:IDENTIFIES]-(identifier)
DETACH DELETE canonical, source, identifier`,
Params: map[string]interface{}{"uuid": uuid},
}
queries = append(queries, query)
}
// Things
uuids = []string{
"dbb0bdae-1f0c-11e4-b0cb-b2227cce2b54",
}
for _, uuid := range uuids {
query := &cmneo4j.Query{
Cypher: `
MATCH (source:Thing{uuid:$uuid})
OPTIONAL MATCH (source)<-[:IDENTIFIES]-(identifier)
DETACH DELETE source, identifier`,
Params: map[string]interface{}{"uuid": uuid},
}
queries = append(queries, query)
}
err := driver.Write(queries...)
assert.NoError(err)
}
func getDatabaseConnection(assert *assert.Assertions) neoutils.NeoConnection {
url := os.Getenv("NEO4J_TEST_URL")
if url == "" {
url = "http://localhost:7474/db/data"
}
conf := neoutils.DefaultConnectionConfig()
conf.Transactional = false
db, err := neoutils.Connect(url, conf, nil)
assert.NoError(err, "Failed to connect to Neo4j")
return db
}
|
package pingbeat
import (
"errors"
"github.com/davecgh/go-spew/spew"
"github.com/elastic/beats/libbeat/beat"
"github.com/elastic/beats/libbeat/cfgfile"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/libbeat/publisher"
cfg "github.com/joshuar/pingbeat/config"
"github.com/oschwald/geoip2-golang"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"gopkg.in/go-playground/pool.v3"
"net"
"os"
"time"
)
// Pingbeat struct contains all options and
// hosts to ping
type Pingbeat struct {
isAlive bool
useIPv4 bool
useIPv6 bool
period time.Duration
ipv4network string
ipv6network string
ipv4targets map[string][2]string
ipv6targets map[string][2]string
geoipdb *geoip2.Reader
config cfg.ConfigSettings
events publisher.Client
done chan struct{}
}
// type PingSent struct {
// Seq int
// Target string
// Type icmp.Type
// Sent time.Time
// }
type PingRecv struct {
Seq int
Target string
Loss bool
LossReason string
}
func New() *Pingbeat {
return &Pingbeat{}
}
// Config reads in the pingbeat configuration file, validating
// configuration parameters and setting default values where needed
func (p *Pingbeat) Config(b *beat.Beat) error {
// Read in provided config file, bail if problem
err := cfgfile.Read(&p.config, "")
if err != nil {
logp.Err("Error reading configuration file: %v", err)
return err
}
// Use period provided in config or default to 10s
if p.config.Input.Period != nil {
duration, err := time.ParseDuration(*p.config.Input.Period)
p.period = duration
if duration < time.Second || err != nil {
logp.Warn("Config: Error parsing period or period too small: %v. Setting to default 10s", duration)
p.period = 10 * time.Second
}
} else {
logp.Warn("Config: No period set. Setting to default 10s")
p.period = 10 * time.Second
}
logp.Debug("pingbeat", "Period %v\n", p.period)
// Check if we can use privileged (i.e. raw socket) ping,
// else use a UDP ping
if *p.config.Input.Privileged {
if os.Getuid() != 0 {
err := errors.New("Privileged set but not running with privleges!")
return err
}
p.ipv4network = "ip4:icmp"
p.ipv6network = "ip6:ipv6-icmp"
} else {
p.ipv4network = "udp4"
p.ipv6network = "udp6"
}
logp.Debug("pingbeat", "Using %v and/or %v for pings\n", p.ipv4network, p.ipv6network)
// Check whether IPv4/IPv6 pings are requested in config
// Default to just IPv4 pings
if &p.config.Input.UseIPv4 != nil {
p.useIPv4 = *p.config.Input.UseIPv4
} else {
p.useIPv4 = true
}
if &p.config.Input.UseIPv6 != nil {
p.useIPv6 = *p.config.Input.UseIPv6
} else {
p.useIPv6 = false
}
logp.Debug("pingbeat", "Using IPv4: %v. Using IPv6: %v\n", p.useIPv4, p.useIPv6)
// Fill the IPv4/IPv6 targets maps
p.ipv4targets = make(map[string][2]string)
p.ipv6targets = make(map[string][2]string)
if p.config.Input.Targets != nil {
for tag, targets := range *p.config.Input.Targets {
for i := 0; i < len(targets); i++ {
p.AddTarget(targets[i], tag)
}
}
} else {
err := errors.New("No targets specified, cannot continue!")
return err
}
// Check and load the GeoIP database
if p.config.Input.GeoIPDB != nil {
db, err := geoip2.Open(*p.config.Input.GeoIPDB)
if err != nil {
return err
}
p.geoipdb = db
defer db.Close()
}
return nil
}
// Setup performs boilerplate Beats setup
func (p *Pingbeat) Setup(b *beat.Beat) error {
p.events = b.Events
p.done = make(chan struct{})
return nil
}
func (p *Pingbeat) Run(b *beat.Beat) error {
spool := pool.New()
defer spool.Close()
rpool := pool.New()
defer rpool.Close()
ticker := time.NewTicker(p.period)
defer ticker.Stop()
createConn := func(n string, a string) *icmp.PacketConn {
c, err := icmp.ListenPacket(n, a)
if err != nil {
logp.Err("Error creating connection: %v", err)
return nil
} else {
return c
}
}
c4 := &icmp.PacketConn{}
if p.useIPv4 {
c4 = createConn(p.ipv4network, "0.0.0.0")
}
defer c4.Close()
c6 := &icmp.PacketConn{}
if p.useIPv6 {
c6 = createConn(p.ipv6network, "::")
}
defer c6.Close()
state := NewPingState()
for {
select {
case <-p.done:
ticker.Stop()
spool.Close()
rpool.Close()
return nil
case <-ticker.C:
sendBatch := spool.Batch()
if p.useIPv4 {
go p.QueueRequests(state, c4, sendBatch)
}
if p.useIPv6 {
go p.QueueRequests(state, c6, sendBatch)
}
for result := range sendBatch.Results() {
if err := result.Error(); err != nil {
logp.Err("Send unsuccessful: %v", err)
} else {
// ping := result.Value().(icmp.Type)
var recv pool.WorkUnit
switch result.Value().(icmp.Type) {
case ipv4.ICMPTypeEcho:
recv = rpool.Queue(RecvPing(c4))
case ipv6.ICMPTypeEchoRequest:
recv = rpool.Queue(RecvPing(c6))
default:
logp.Err("Invalid ICMP message type")
}
recv.Wait()
if err := recv.Error(); err != nil {
logp.Err("Recv unsuccessful: %v", err)
} else {
ping := recv.Value().(*PingRecv)
if ping.Loss == false {
target := ping.Target
state.MU.Lock()
rtt := time.Since(state.Pings[ping.Seq].Sent)
delete(state.Pings, ping.Seq)
state.MU.Unlock()
go p.ProcessPing(target, rtt)
}
}
}
}
p.ProcessMissing(state)
}
}
}
func (p *Pingbeat) Cleanup(b *beat.Beat) error {
return nil
}
func (p *Pingbeat) Stop() {
close(p.done)
}
// AddTarget takes a target name and tag, fetches the IP addresses associated
// with it and adds them to the Pingbeat struct
func (p *Pingbeat) AddTarget(target string, tag string) {
if addr := net.ParseIP(target); addr.String() == target {
if addr.To4() != nil && p.useIPv4 {
p.ipv4targets[addr.String()] = [2]string{target, tag}
} else if p.useIPv6 {
p.ipv6targets[addr.String()] = [2]string{target, tag}
}
} else {
ip4addr := make(chan string)
ip6addr := make(chan string)
go FetchIPs(ip4addr, ip6addr, target)
lookup:
for {
select {
case ip := <-ip4addr:
if ip == "done" {
break lookup
} else if p.useIPv4 {
logp.Debug("pingbeat", "Target %s has an IPv4 address %s\n", target, ip)
p.ipv4targets[ip] = [2]string{target, tag}
}
case ip := <-ip6addr:
if ip == "done" {
break lookup
} else if p.useIPv6 {
logp.Debug("pingbeat", "Target %s has an IPv6 address %s\n", target, ip)
p.ipv6targets[ip] = [2]string{target, tag}
}
}
}
}
}
// Addr2Name takes a address as a string and returns the name and tag
// associated with that address in the Pingbeat struct
func (p *Pingbeat) FetchDetails(addr string) (string, string) {
var name, tag string
if _, found := p.ipv4targets[addr]; found {
name = p.ipv4targets[addr][0]
tag = p.ipv4targets[addr][1]
} else if _, found := p.ipv6targets[addr]; found {
name = p.ipv6targets[addr][0]
tag = p.ipv6targets[addr][1]
} else {
logp.Err("Error: %s not found in Pingbeat targets!", addr)
name = "err"
tag = "err"
}
return name, tag
}
// milliSeconds converts seconds to milliseconds
func milliSeconds(d time.Duration) float64 {
msec := d / time.Millisecond
nsec := d % time.Millisecond
return float64(msec) + float64(nsec)*1e-6
}
// FetchIPs takes a target hostname, resolves the IP addresses for that
// hostname via DNS and returns the results through the ip4addr/ip6addr
// channels
func FetchIPs(ip4addr, ip6addr chan string, target string) {
addrs, err := net.LookupIP(target)
if err != nil {
logp.Warn("Failed to resolve %s to IP address, ignoring this target.\n", target)
} else {
for j := 0; j < len(addrs); j++ {
if addrs[j].To4() != nil {
ip4addr <- addrs[j].String()
} else {
ip6addr <- addrs[j].String()
}
}
}
ip4addr <- "done"
close(ip4addr)
ip6addr <- "done"
close(ip6addr)
return
}
func (p *Pingbeat) ProcessPing(target string, rtt time.Duration) {
name, tag := p.FetchDetails(target)
event := common.MapStr{
"@timestamp": common.Time(time.Now().UTC()),
"type": "pingbeat",
"target_name": name,
"target_addr": target,
"tag": tag,
"rtt": milliSeconds(rtt),
}
p.events.PublishEvent(event)
}
func (p *Pingbeat) ProcessMissing(state *PingState) {
for seq_no, details := range state.Pings {
name, tag := p.FetchDetails(details.Target)
event := common.MapStr{
"@timestamp": common.Time(time.Now().UTC()),
"type": "pingbeat",
"target_name": name,
"target_addr": details.Target,
"tag": tag,
"loss": true,
}
p.events.PublishEvent(event)
state.MU.Lock()
delete(state.Pings, seq_no)
state.MU.Unlock()
}
}
func (p *Pingbeat) QueueRequests(state *PingState, conn *icmp.PacketConn, batch pool.Batch) {
var network string
var ping_type icmp.Type
switch {
case conn.IPv4PacketConn() != nil:
ping_type = ipv4.ICMPTypeEcho
case conn.IPv4PacketConn() != nil:
ping_type = ipv6.ICMPTypeEchoRequest
default:
logp.Err("QueueRequests: Unknown connection type")
}
targets := make(map[string][2]string)
switch ping_type {
case ipv4.ICMPTypeEcho:
targets = p.ipv4targets
network = p.ipv4network
case ipv6.ICMPTypeEchoRequest:
targets = p.ipv6targets
network = p.ipv6network
default:
logp.Err("QueueTargets: Invalid ICMP message type")
}
for addr, _ := range targets {
seq := state.GetSeqNo()
req, err := NewPingRequest(seq, ping_type, addr, network)
if err != nil {
logp.Err("QueueTargets: %v", err)
}
batch.Queue(SendPing(conn, p.period, req))
state.MU.Lock()
state.Pings[seq] = NewPingRecord(addr)
state.MU.Unlock()
}
batch.QueueComplete()
}
func SendPing(conn *icmp.PacketConn, timeout time.Duration, req *PingRequest) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
if wu.IsCancelled() {
logp.Debug("pingbeat", "SendPing: workunit cancelled")
return nil, nil
}
if _, err := conn.WriteTo(req.binary_payload, req.addr); err != nil {
return nil, err
} else {
if err := conn.SetReadDeadline(time.Now().Add(timeout)); err != nil {
return nil, err
}
return req.ping_type, nil
// ping := &PingSent{}
// ping.Seq = req.seq_no
// ping.Target = req.addr.String()
// ping.Type = req.ping_type
// ping.Sent = time.Now().UTC()
// return ping, nil
}
}
}
func RecvPing(conn *icmp.PacketConn) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
if wu.IsCancelled() {
logp.Debug("pingbeat", "RecvPing: workunit cancelled")
return nil, nil
}
var ping_type icmp.Type
switch {
case conn.IPv4PacketConn() != nil:
ping_type = ipv4.ICMPTypeEcho
case conn.IPv4PacketConn() != nil:
ping_type = ipv6.ICMPTypeEchoRequest
default:
err := errors.New("Unknown connection type")
return nil, err
}
rep, err := NewPingReply(ping_type)
if err != nil {
return nil, err
}
n, peer, err := conn.ReadFrom(rep.binary_payload)
if err != nil {
return nil, err
}
rep.target = peer.String()
rm, err := icmp.ParseMessage(rep.ping_type.Protocol(), rep.binary_payload[:n])
if err != nil {
return nil, err
} else {
rep.text_payload = rm
}
ping := &PingRecv{}
switch rep.text_payload.Body.(type) {
case *icmp.TimeExceeded:
ping.Loss = true
ping.LossReason = "Time Exceeded"
return nil, err
case *icmp.PacketTooBig:
ping.LossReason = "Packet Too Big"
ping.Loss = true
return nil, err
case *icmp.DstUnreach:
ping.LossReason = "Destination Unreachable"
var d []byte
d = rep.text_payload.Body.(*icmp.DstUnreach).Data
header, _ := ipv4.ParseHeader(d[:len(d)-8])
spew.Dump(header)
// rm, err := icmp.ParseMessage(ipv4.ICMPTypeEcho.Protocol(), d[len(d)-8:])
// spew.Dump(rm)
ping.Loss = true
return nil, err
case *icmp.Echo:
ping.Seq = rep.text_payload.Body.(*icmp.Echo).Seq
ping.Target = rep.target
ping.Loss = false
return ping, nil
default:
err := errors.New("Unknown ICMP Packet")
return nil, err
}
}
}
Some more code simplification.
package pingbeat
import (
"errors"
"github.com/davecgh/go-spew/spew"
"github.com/elastic/beats/libbeat/beat"
"github.com/elastic/beats/libbeat/cfgfile"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/libbeat/publisher"
cfg "github.com/joshuar/pingbeat/config"
"github.com/oschwald/geoip2-golang"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"gopkg.in/go-playground/pool.v3"
"net"
"os"
"time"
)
// Pingbeat struct contains all options and
// hosts to ping
type Pingbeat struct {
isAlive bool
useIPv4 bool
useIPv6 bool
period time.Duration
ipv4network string
ipv6network string
ipv4targets map[string][2]string
ipv6targets map[string][2]string
geoipdb *geoip2.Reader
config cfg.ConfigSettings
events publisher.Client
done chan struct{}
}
type PingSent struct {
Seq int
Target net.Addr
Sent time.Time
}
type PingRecv struct {
Seq int
Target string
Loss bool
LossReason string
}
func New() *Pingbeat {
return &Pingbeat{}
}
// Config reads in the pingbeat configuration file, validating
// configuration parameters and setting default values where needed
func (p *Pingbeat) Config(b *beat.Beat) error {
// Read in provided config file, bail if problem
err := cfgfile.Read(&p.config, "")
if err != nil {
logp.Err("Error reading configuration file: %v", err)
return err
}
// Use period provided in config or default to 10s
if p.config.Input.Period != nil {
duration, err := time.ParseDuration(*p.config.Input.Period)
p.period = duration
if duration < time.Second || err != nil {
logp.Warn("Config: Error parsing period or period too small: %v. Setting to default 10s", duration)
p.period = 10 * time.Second
}
} else {
logp.Warn("Config: No period set. Setting to default 10s")
p.period = 10 * time.Second
}
logp.Debug("pingbeat", "Period %v\n", p.period)
// Check if we can use privileged (i.e. raw socket) ping,
// else use a UDP ping
if *p.config.Input.Privileged {
if os.Getuid() != 0 {
err := errors.New("Privileged set but not running with privleges!")
return err
}
p.ipv4network = "ip4:icmp"
p.ipv6network = "ip6:ipv6-icmp"
} else {
p.ipv4network = "udp4"
p.ipv6network = "udp6"
}
logp.Debug("pingbeat", "Using %v and/or %v for pings\n", p.ipv4network, p.ipv6network)
// Check whether IPv4/IPv6 pings are requested in config
// Default to just IPv4 pings
if &p.config.Input.UseIPv4 != nil {
p.useIPv4 = *p.config.Input.UseIPv4
} else {
p.useIPv4 = true
}
if &p.config.Input.UseIPv6 != nil {
p.useIPv6 = *p.config.Input.UseIPv6
} else {
p.useIPv6 = false
}
logp.Debug("pingbeat", "Using IPv4: %v. Using IPv6: %v\n", p.useIPv4, p.useIPv6)
// Fill the IPv4/IPv6 targets maps
p.ipv4targets = make(map[string][2]string)
p.ipv6targets = make(map[string][2]string)
if p.config.Input.Targets != nil {
for tag, targets := range *p.config.Input.Targets {
for i := 0; i < len(targets); i++ {
p.AddTarget(targets[i], tag)
}
}
} else {
err := errors.New("No targets specified, cannot continue!")
return err
}
// Check and load the GeoIP database
if p.config.Input.GeoIPDB != nil {
db, err := geoip2.Open(*p.config.Input.GeoIPDB)
if err != nil {
return err
}
p.geoipdb = db
defer db.Close()
}
return nil
}
// Setup performs boilerplate Beats setup
func (p *Pingbeat) Setup(b *beat.Beat) error {
p.events = b.Events
p.done = make(chan struct{})
return nil
}
func (p *Pingbeat) Run(b *beat.Beat) error {
spool := pool.New()
defer spool.Close()
rpool := pool.New()
defer rpool.Close()
ticker := time.NewTicker(p.period)
defer ticker.Stop()
createConn := func(n string, a string) *icmp.PacketConn {
c, err := icmp.ListenPacket(n, a)
if err != nil {
logp.Err("Error creating connection: %v", err)
return nil
} else {
return c
}
}
c4 := &icmp.PacketConn{}
if p.useIPv4 {
c4 = createConn(p.ipv4network, "0.0.0.0")
}
defer c4.Close()
c6 := &icmp.PacketConn{}
if p.useIPv6 {
c6 = createConn(p.ipv6network, "::")
}
defer c6.Close()
state := NewPingState()
for {
select {
case <-p.done:
ticker.Stop()
spool.Close()
rpool.Close()
return nil
case <-ticker.C:
sendBatch := spool.Batch()
if p.useIPv4 {
go p.QueueRequests(state, c4, sendBatch)
}
if p.useIPv6 {
go p.QueueRequests(state, c6, sendBatch)
}
for result := range sendBatch.Results() {
if err := result.Error(); err != nil {
logp.Err("Send unsuccessful: %v", err)
} else {
// ping := result.Value().(icmp.Type)
var recv pool.WorkUnit
switch result.Value().(icmp.Type) {
case ipv4.ICMPTypeEcho:
recv = rpool.Queue(RecvPing(c4))
case ipv6.ICMPTypeEchoRequest:
recv = rpool.Queue(RecvPing(c6))
default:
logp.Err("Invalid ICMP message type")
}
recv.Wait()
if err := recv.Error(); err != nil {
logp.Err("Recv unsuccessful: %v", err)
} else {
ping := recv.Value().(*PingRecv)
if ping.Loss == false {
target := ping.Target
state.MU.Lock()
rtt := time.Since(state.Pings[ping.Seq].Sent)
delete(state.Pings, ping.Seq)
state.MU.Unlock()
go p.ProcessPing(target, rtt)
}
}
}
}
p.ProcessMissing(state)
}
}
}
func (p *Pingbeat) Cleanup(b *beat.Beat) error {
return nil
}
func (p *Pingbeat) Stop() {
close(p.done)
}
// AddTarget takes a target name and tag, fetches the IP addresses associated
// with it and adds them to the Pingbeat struct
func (p *Pingbeat) AddTarget(target string, tag string) {
if addr := net.ParseIP(target); addr.String() == target {
if addr.To4() != nil && p.useIPv4 {
p.ipv4targets[addr.String()] = [2]string{target, tag}
} else if p.useIPv6 {
p.ipv6targets[addr.String()] = [2]string{target, tag}
}
} else {
ip4addr := make(chan string)
ip6addr := make(chan string)
go FetchIPs(ip4addr, ip6addr, target)
lookup:
for {
select {
case ip := <-ip4addr:
if ip == "done" {
break lookup
} else if p.useIPv4 {
logp.Debug("pingbeat", "Target %s has an IPv4 address %s\n", target, ip)
p.ipv4targets[ip] = [2]string{target, tag}
}
case ip := <-ip6addr:
if ip == "done" {
break lookup
} else if p.useIPv6 {
logp.Debug("pingbeat", "Target %s has an IPv6 address %s\n", target, ip)
p.ipv6targets[ip] = [2]string{target, tag}
}
}
}
}
}
// Addr2Name takes a address as a string and returns the name and tag
// associated with that address in the Pingbeat struct
func (p *Pingbeat) FetchDetails(addr string) (string, string) {
var name, tag string
if _, found := p.ipv4targets[addr]; found {
name = p.ipv4targets[addr][0]
tag = p.ipv4targets[addr][1]
} else if _, found := p.ipv6targets[addr]; found {
name = p.ipv6targets[addr][0]
tag = p.ipv6targets[addr][1]
} else {
logp.Err("Error: %s not found in Pingbeat targets!", addr)
name = "err"
tag = "err"
}
return name, tag
}
// milliSeconds converts seconds to milliseconds
func milliSeconds(d time.Duration) float64 {
msec := d / time.Millisecond
nsec := d % time.Millisecond
return float64(msec) + float64(nsec)*1e-6
}
// FetchIPs takes a target hostname, resolves the IP addresses for that
// hostname via DNS and returns the results through the ip4addr/ip6addr
// channels
func FetchIPs(ip4addr, ip6addr chan string, target string) {
addrs, err := net.LookupIP(target)
if err != nil {
logp.Warn("Failed to resolve %s to IP address, ignoring this target.\n", target)
} else {
for j := 0; j < len(addrs); j++ {
if addrs[j].To4() != nil {
ip4addr <- addrs[j].String()
} else {
ip6addr <- addrs[j].String()
}
}
}
ip4addr <- "done"
close(ip4addr)
ip6addr <- "done"
close(ip6addr)
return
}
func (p *Pingbeat) ProcessPing(target string, rtt time.Duration) {
name, tag := p.FetchDetails(target)
event := common.MapStr{
"@timestamp": common.Time(time.Now().UTC()),
"type": "pingbeat",
"target_name": name,
"target_addr": target,
"tag": tag,
"rtt": milliSeconds(rtt),
}
p.events.PublishEvent(event)
}
func (p *Pingbeat) ProcessMissing(state *PingState) {
for seq_no, details := range state.Pings {
name, tag := p.FetchDetails(details.Target)
event := common.MapStr{
"@timestamp": common.Time(time.Now().UTC()),
"type": "pingbeat",
"target_name": name,
"target_addr": details.Target,
"tag": tag,
"loss": true,
}
p.events.PublishEvent(event)
state.MU.Lock()
delete(state.Pings, seq_no)
state.MU.Unlock()
}
}
func (p *Pingbeat) QueueRequests(state *PingState, conn *icmp.PacketConn, batch pool.Batch) {
var network string
targets := make(map[string][2]string)
switch {
case conn.IPv4PacketConn() != nil:
targets = p.ipv4targets
network = p.ipv4network
case conn.IPv4PacketConn() != nil:
targets = p.ipv6targets
network = p.ipv6network
default:
logp.Err("QueueRequests: Unknown connection type")
}
for addr, _ := range targets {
seq := state.GetSeqNo()
batch.Queue(SendPing(conn, p.period, seq, addr, network))
state.MU.Lock()
state.Pings[seq] = NewPingRecord(addr)
state.MU.Unlock()
}
batch.QueueComplete()
}
func SendPing(conn *icmp.PacketConn, timeout time.Duration, seq int, addr string, net string) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
if wu.IsCancelled() {
logp.Debug("pingbeat", "SendPing: workunit cancelled")
return nil, nil
}
var ping_type icmp.Type
switch {
case conn.IPv4PacketConn() != nil:
ping_type = ipv4.ICMPTypeEcho
case conn.IPv4PacketConn() != nil:
ping_type = ipv6.ICMPTypeEchoRequest
default:
logp.Err("QueueRequests: Unknown connection type")
}
req, err := NewPingRequest(seq, ping_type, addr, net)
if err != nil {
logp.Err("QueueTargets: %v", err)
}
if _, err := conn.WriteTo(req.binary_payload, req.addr); err != nil {
return nil, err
} else {
if err := conn.SetReadDeadline(time.Now().Add(timeout)); err != nil {
return nil, err
}
return req.ping_type, nil
}
}
}
func RecvPing(conn *icmp.PacketConn) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
if wu.IsCancelled() {
logp.Debug("pingbeat", "RecvPing: workunit cancelled")
return nil, nil
}
var ping_type icmp.Type
switch {
case conn.IPv4PacketConn() != nil:
ping_type = ipv4.ICMPTypeEcho
case conn.IPv4PacketConn() != nil:
ping_type = ipv6.ICMPTypeEchoRequest
default:
err := errors.New("Unknown connection type")
return nil, err
}
rep, err := NewPingReply(ping_type)
if err != nil {
return nil, err
}
n, peer, err := conn.ReadFrom(rep.binary_payload)
if err != nil {
return nil, err
}
rep.target = peer.String()
rm, err := icmp.ParseMessage(rep.ping_type.Protocol(), rep.binary_payload[:n])
if err != nil {
return nil, err
} else {
rep.text_payload = rm
}
ping := &PingRecv{}
switch rep.text_payload.Body.(type) {
case *icmp.TimeExceeded:
ping.Loss = true
ping.LossReason = "Time Exceeded"
return nil, err
case *icmp.PacketTooBig:
ping.LossReason = "Packet Too Big"
ping.Loss = true
return nil, err
case *icmp.DstUnreach:
ping.LossReason = "Destination Unreachable"
var d []byte
d = rep.text_payload.Body.(*icmp.DstUnreach).Data
header, _ := ipv4.ParseHeader(d[:len(d)-8])
spew.Dump(header)
// rm, err := icmp.ParseMessage(ipv4.ICMPTypeEcho.Protocol(), d[len(d)-8:])
// spew.Dump(rm)
ping.Loss = true
return nil, err
case *icmp.Echo:
ping.Seq = rep.text_payload.Body.(*icmp.Echo).Seq
ping.Target = rep.target
ping.Loss = false
return ping, nil
default:
err := errors.New("Unknown ICMP Packet")
return nil, err
}
}
}
|
/*
* MIT License
*
* Copyright (c) 2017 SmartestEE Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Revision History:
* Initial: 2017/04/19 Feng Yifei
*/
/**
* 抽象工厂模式:
* 提供一个创建一系列相关或相互依赖对象的接口,而无须指定它们具体的类;
* 在抽象工厂模式中,每一个具体工厂都提供了多个工厂方法用于产生多种不同类型的对象
* 特点:
* 横向功能支持简单易行
* 纵向支持新特性难
*/
package main
import (
"fmt"
)
// 核心结构:
// AbstractFactory: 声明一组用于创建对象的方法
// ConcreteFactory: 实现抽象工厂中的方法
// AbstractProduct: 每种对象的方法
// ConcreteProduct: 实现每种对象的方法
type AbstractFactory interface {
CreateButton() AbstractButton
CreateSwitch() AbstractSwitch
}
type Windows struct{}
func (f *Windows) CreateButton() AbstractButton {
return new(WindowsButton)
}
func (f *Windows) CreateSwitch() AbstractSwitch {
return new(MacOSButton)
}
type MacOS struct{}
func (f *MacOS) CreateButton() AbstractButton {
return new(WindowsSwitch)
}
func (f *MacOS) CreateSwitch() AbstractSwitch {
return new(MacOSSwitch)
}
type AbstractButton interface{}
type AbstractSwitch interface{}
type WindowsButton struct{}
type WindowsSwitch struct{}
type MacOSButton struct{}
type MacOSSwitch struct{}
func main() {
windows := new(Windows)
button1 := windows.CreateButton()
switch1 := windows.CreateButton()
fmt.Printf("%T\n", button1)
fmt.Printf("%T\n", switch1)
mac := new(MacOS)
button2 := mac.CreateSwitch()
switch2 := mac.CreateSwitch()
fmt.Printf("%T\n", button2)
fmt.Printf("%T\n", switch2)
}
修改抽象工厂模式
/*
* MIT License
*
* Copyright (c) 2017 SmartestEE Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Revision History:
* Initial: 2017/04/19 Feng Yifei
*/
/**
* 抽象工厂模式:
* 提供一个创建一系列相关或相互依赖对象的接口,而无须指定它们具体的类;
* 在抽象工厂模式中,每一个具体工厂都提供了多个工厂方法用于产生多种不同类型的对象
* 特点:
* 横向功能支持简单易行
* 纵向支持新特性难
*/
package main
import (
"fmt"
)
// 核心结构:
// AbstractFactory: 声明一组用于创建对象的方法
// ConcreteFactory: 实现抽象工厂中的方法
// AbstractProduct: 每种对象的方法
// ConcreteProduct: 实现每种对象的方法
type AbstractFactory interface {
CreateButton() AbstractButton
CreateSwitch() AbstractSwitch
}
type Windows struct{}
func (f *Windows) CreateButton() AbstractButton {
return new(WindowsButton)
}
func (f *Windows) CreateSwitch() AbstractSwitch {
return new(WindowsSwitch)
}
type MacOS struct{}
func (f *MacOS) CreateButton() AbstractButton {
return new(MacOSButton)
}
func (f *MacOS) CreateSwitch() AbstractSwitch {
return new(MacOSSwitch)
}
type AbstractButton interface{}
type AbstractSwitch interface{}
type WindowsButton struct{}
type WindowsSwitch struct{}
type MacOSButton struct{}
type MacOSSwitch struct{}
func main() {
windows := new(Windows)
button1 := windows.CreateButton()
switch1 := windows.CreateSwitch()
fmt.Printf("%T\n", button1)
fmt.Printf("%T\n", switch1)
mac := new(MacOS)
button2 := mac.CreateButton()
switch2 := mac.CreateSwitch()
fmt.Printf("%T\n", button2)
fmt.Printf("%T\n", switch2)
}
|
// TODO : need to change to api
package main
import (
"bytes"
"encoding/json"
"encoding/binary"
"fmt"
"html"
"io"
"io/ioutil"
"os/exec"
"errors"
"strconv"
"log"
"net/http"
"github.com/gorilla/mux"
"time"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/mem"
"github.com/shirou/gopsutil/disk"
"github.com/shirou/gopsutil/net"
"github.com/shirou/gopsutil/docker"
)
func Index(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
}
// list every node
func GetNodes(w http.ResponseWriter, r *http.Request) {
// to do need to read api and port of api server from configuration file
res, err := http.Get("http://localhost:8080/api/v1/nodes")
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
// defer for ensure that res is close.
defer res.Body.Close()
if err != nil {
log.Fatal(err)
}
fmt.Fprint(w, string(body))
}
func GetNode(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// node name from user.
nodesName := vars["nodeName"]
// TODO: need to read api and port of api server from configuration file
res, err := http.Get("http://localhost:8080/api/v1/nodes/" + nodesName)
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
panic(err)
}
var object map[string]interface{}
err = json.Unmarshal([]byte(body), &object)
if err == nil {
fmt.Printf("%+v\n", object)
} else {
fmt.Println(err)
}
send_obj, err := json.MarshalIndent(object, "", "\t")
if err != nil {
fmt.Println(err)
}
fmt.Fprint(w, string(send_obj))
}
func OptionCors(w http.ResponseWriter, r *http.Request) {
// TODO: need to change origin to deployed domain name
if origin := r.Header.Get("Origin"); origin != "http://localhost" {
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
w.Header().Set("Access-Control-Allow-Headers",
"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
}
// list specific node cpu
func NodeCpu(w http.ResponseWriter, r *http.Request) {
}
// list specifc node memory
func NodeMemory(w http.ResponseWriter, r *http.Request) {
}
// list all pods
func GetPods(w http.ResponseWriter, r *http.Request) {
// to do need to read api and port of api server from configuration file
res, err := http.Get("http://localhost:8080/api/v1/pods")
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
panic(err)
}
fmt.Fprint(w, string(body))
}
// list specific pod details
func GetPod(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// node name from user.
podName := vars["podName"]
fmt.Fprint(w, string(podName))
// to do need to read api and port of api server from configuration file
// TODO: change namespace to flexible.
res, err := http.Get("http://localhost:8080/api/v1/namespaces/default/pods/" + podName)
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
panic(err)
}
fmt.Fprint(w, string(body))
}
// list specific pod cpu
func PodCpu(w http.ResponseWriter, r *http.Request) {
}
// list specific pod memory
func PodMemory(w http.ResponseWriter, r *http.Request) {
}
// post handler for scale pod by pod name
// TODO : remove
// test mocks
func nodeTestMock(w http.ResponseWriter, r *http.Request) {
nodes := Nodes{
Node{Name: "node1", Ip: "192.168.1.2", Cpu: 5000, Memory: 3000, DiskUsage: 1000},
Node{Name: "node2", Ip: "192.169.1.4", Cpu: 5000, Memory: 3000, DiskUsage: 1000},
}
w.Header().Set("Content-Type", "application/json;charset=UTF-8")
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(nodes); err != nil {
panic(err)
}
}
// TODO : remove
// test ssh to exec command on other machine
func testExec(w http.ResponseWriter, r *http.Request) {
commander := SSHCommander{"root", "161.246.70.75"}
cmd := []string{
"ls",
".",
}
var (
output []byte
err error
)
if output, err = commander.Command(cmd...).Output(); err != nil {
log.Fatal(err)
}
fmt.Fprint(w, string(output[:6]))
}
func GetApp(w http.ResponseWriter, r *http.Request){
vars := mux.Vars(r)
// app name from user.
appName := vars["appName"]
fmt.Println(appName)
// TODO: need to find new solution to get info from api like other done.
res, err := exec.Command("kubectl", "get", "pod", "-l", "app="+appName, "-o", "json").Output()
if err != nil {
panic(err)
}
fmt.Fprint(w, string(res))
}
func GetApps(w http.ResponseWriter, r *http.Request){
res, err := exec.Command("kubectl", "get", "rc", "-o", "json").Output()
if err != nil {
panic(err)
}
fmt.Fprint(w, string(res))
}
func CreatePod(w http.ResponseWriter, r *http.Request) {
var pod Pod
// limits json post request for prevent overflow attack.
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
if err != nil {
panic(err)
}
// catch error from close reader
if err := r.Body.Close(); err != nil {
panic(err)
}
// get request json information
if err := json.Unmarshal(body, &pod); err != nil {
w.Header().Set("Content-Type", "application/json;charset=UTF-8")
w.WriteHeader(422) // unprocessable entity
if err := json.NewEncoder(w).Encode(err); err != nil {
panic(err)
}
}
// prepare json to send to create by kubernetes api server
labels := map[string]interface{}{
"app": pod.Name,
}
metadata := map[string]interface{}{
"name": pod.Name,
"labels": labels,
}
ports := map[string]interface{}{
"containerPort": 80,
}
containers := map[string]interface{}{
"name": pod.Name,
"image": pod.Image,
"ports": []map[string]interface{}{ports},
"memory": pod.Memory,
"cpu": pod.Cpu,
}
spec := map[string]interface{}{
"containers": []map[string]interface{}{containers},
}
objReq := map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": metadata,
"spec": spec,
}
jsonReq, err := json.MarshalIndent(objReq, "", "\t")
if err != nil {
panic(err)
}
fmt.Println("you sent ", string(jsonReq))
// post json to kubernete api server
// TODO: need to change name space to user namespace
postUrl := "http://localhost:8080/api/v1/namespaces/default/pods"
req, err := http.NewRequest("POST", postUrl, bytes.NewBuffer(jsonReq))
req.Header.Set("X-Custom-Header", "myvalue")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
// defer for ensure
defer resp.Body.Close()
response, _ := ioutil.ReadAll(resp.Body)
fmt.Println(string(response))
}
/**
* Get current resource by using cgroup
**/
func GetCurrentResourceCgroup(container_id string, metric_type int) (uint64, error) {
// TODO : Read Latency from HA Proxy
// file path prefix
var path = "/sys/fs/cgroup/memory/docker/" + container_id + "/"
if metric_type == 2 {
// read memory usage
current_usage, err := ioutil.ReadFile(path + "memory.usage_in_bytes")
if err != nil {
return binary.BigEndian.Uint64(current_usage), nil
} else {
return 0, err
}
} else if metric_type == 3 {
// read memory usage
current_usage, err := ioutil.ReadFile(path + "memory.usage_in_bytes")
if err != nil {
return 0, err
} else {
n := bytes.Index(current_usage, []byte{10})
usage_str := string(current_usage[:n])
resource_usage, _ := strconv.ParseInt(usage_str, 10, 64)
return uint64(resource_usage), nil
}
} else {
// not match any case
return 0, errors.New("not match any case")
}
}
/**
* Get List of ContainerID and pod's ip by replication name and their namespace
**/
func GetContainerIDList(url string, port string, rc_name string, namespace string) ([]string, []string, error) {
// TODO : maybe user want to get container id which map with it's pod
// initail rasult array
container_ids := []string{}
pod_ips := []string{}
res, err := http.Get(url + ":" + port + "/api/v1/namespaces/" + namespace + "/pods")
if err != nil {
fmt.Println("Can't connect to cadvisor")
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
return nil, nil, err
} else {
// json handler type
var res_obj map[string]interface{}
if err := json.Unmarshal(body, &res_obj); err != nil {
return nil, nil, err
}
pod_arr := res_obj["items"].([]interface{})
// iterate to get pod of specific rc
for _, pod := range pod_arr {
pod_name := pod.(map[string]interface{})["metadata"].(map[string]interface{})["generateName"]
if pod_name != nil {
if pod_name == rc_name+"-" {
pod_ips = append(pod_ips, pod.(map[string]interface{})["status"].(map[string]interface{})["podIP"].(string))
containers := pod.(map[string]interface{})["status"].(map[string]interface{})["containerStatuses"].([]interface{})
// one pod can has many container ,so iterate for get each container
for _, container := range containers {
container_id := container.(map[string]interface{})["containerID"].(string)[9:]
container_ids = append(container_ids, container_id)
}
}
}
}
return container_ids, pod_ips, nil
}
}
/**
read node resource usage
**/
func GetNodeResource(w http.ResponseWriter, r *http.Request) {
// get this node memory
memory, _ := mem.VirtualMemory()
// get this node cpu percent usage
cpu_percent, _ := cpu.CPUPercent(time.Duration(1) * time.Second, false)
// Disk mount Point
disk_partitions, _ := disk.DiskPartitions(true)
// Disk usage
var disk_usages []*disk.DiskUsageStat
for _, disk_partition := range disk_partitions {
if disk_partition.Mountpoint == "/" || disk_partition.Mountpoint == "/home" {
disk_stat, _ := disk.DiskUsage(disk_partition.Device);
disk_usages = append(disk_usages, disk_stat)
}
}
// Network
network, _ := net.NetIOCounters(false)
// create new node obj with resource usage information
node_metric := NodeMetric{
Cpu: cpu_percent,
Memory: memory,
DiskUsage: disk_usages,
Network: network,
}
node_json, err := json.MarshalIndent(node_metric, "", "\t")
if err != nil {
fmt.Println("error:", err)
}
fmt.Fprint(w, string(node_json))
}
/**
CPU Percent Calculation
**/
func DockerCPUPercent(interval time.Duration, container_id string) (float64, error) {
getAllBusy := func(t *cpu.CPUTimesStat) (float64, float64) {
busy := t.User + t.System + t.Nice + t.Iowait + t.Irq +
t.Softirq + t.Steal + t.Guest + t.GuestNice + t.Stolen
return busy + t.Idle, busy
}
calculate := func(t1, t2 *cpu.CPUTimesStat) float64 {
t1All, t1Busy := getAllBusy(t1)
t2All, t2Busy := getAllBusy(t2)
if t2Busy <= t1Busy {
return 0
}
if t2All <= t1All {
return 1
}
return (t2Busy - t1Busy) / (t2All - t1All) * 100
}
// Get CPU usage at the start of the interval.
var cpuTimes1 *cpu.CPUTimesStat
cpuTimes1, _ = docker.CgroupCPUDocker(container_id)
if interval > 0 {
time.Sleep(interval)
}
// And at the end of the interval.
var cpuTimes2 *cpu.CPUTimesStat
cpuTimes2, _ = docker.CgroupCPUDocker(container_id)
var rets float64
rets = calculate(cpuTimes1, cpuTimes2)
return rets, nil
}
/**
get resource usage of application (pods) on node
**/
func GetAppResource(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// get app Name
appName := vars["appName"]
var summary_cpu float64
var memory_bundle []*docker.CgroupMemStat
container_ids, pod_ips, err := GetContainerIDList("http://localhost", "8080", appName, "default")
if err != nil {
fmt.Println(err)
}
for _, container_id := range container_ids {
fmt.Println(container_id, pod_ips)
// calculation percentage of cpu usage
container_cpu, _ := DockerCPUPercent(time.Duration(1) * time.Second, container_id)
summary_cpu += container_cpu
// memory usage
container_memory, _ := docker.CgroupMemDocker(container_id)
memory_bundle = append(memory_bundle, container_memory)
}
// find the request per sec from haproxy-frontend
res_front, err := http.Get("http://localhost:10001/v1/stats/frontends")
if err != nil {
panic(err)
}
body_front, err := ioutil.ReadAll(res_front.Body)
res_front.Body.Close()
if err != nil {
panic(err)
}
//var rps uint64
var object_front []map[string]interface{}
err = json.Unmarshal([]byte(body_front), &object_front)
rps := object_front[0]["req_rate"].(string)
rps_int, _ := strconv.ParseInt(rps, 10, 64)
if err == nil {
} else {
fmt.Println(err)
}
//find resonse time from haproxy-backends
//var rtime uint64
res_back, err := http.Get("http://localhost:10001/v1/stats/backends")
if err != nil {
panic(err)
}
body_back, err := ioutil.ReadAll(res_back.Body)
res_back.Body.Close()
if err != nil {
panic(err)
}
var object_back []map[string]interface{}
err = json.Unmarshal([]byte(body_back), &object_back)
rtime := object_back[0]["rtime"].(string)
res_2xx := object_back[0]["hrsp_2xx"].(string)
res_4xx := object_back[0]["hrsp_4xx"].(string)
res_5xx := object_back[0]["hrsp_5xx"].(string)
rtime_int, _ := strconv.ParseInt(rtime, 10, 64)
res2xx_int, _ := strconv.ParseInt(res_2xx, 10, 64)
res4xx_int, _ := strconv.ParseInt(res_4xx, 10, 64)
res5xx_int, _ := strconv.ParseInt(res_5xx, 10, 64)
if err == nil {
} else {
fmt.Println(err)
}
fmt.Println("rps: ", rps, ", rtime: ", rtime)
// find the cpu avarage of application cpu usage
average_cpu := summary_cpu/float64(len(container_ids))
// create appliction object
app_metric := AppMetric{
App: appName,
Cpu: average_cpu,
Memory: memory_bundle,
Request: rps_int,
Response: rtime_int,
Response2xx: res2xx_int,
Response4xx: res4xx_int,
Response5xx: res5xx_int,
}
app_json, err := json.MarshalIndent(app_metric, "", "\t")
if err != nil {
fmt.Println("error:", err)
}
fmt.Fprint(w, string(app_json))
}
Change kube-api to variable
// TODO : need to change to api
package main
import (
"bytes"
"encoding/json"
"encoding/binary"
"fmt"
"html"
"io"
"io/ioutil"
"os/exec"
"errors"
"strconv"
"log"
"net/http"
"github.com/gorilla/mux"
"time"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/mem"
"github.com/shirou/gopsutil/disk"
"github.com/shirou/gopsutil/net"
"github.com/shirou/gopsutil/docker"
)
var kube_api string ="http://localhost:8080"
func Index(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
}
// list every node
func GetNodes(w http.ResponseWriter, r *http.Request) {
// to do need to read api and port of api server from configuration file
res, err := http.Get(kube_api+"/api/v1/nodes")
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
// defer for ensure that res is close.
defer res.Body.Close()
if err != nil {
log.Fatal(err)
}
fmt.Fprint(w, string(body))
}
func GetNode(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// node name from user.
nodesName := vars["nodeName"]
// TODO: need to read api and port of api server from configuration file
res, err := http.Get(kube_api+"/api/v1/nodes/" + nodesName)
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
panic(err)
}
var object map[string]interface{}
err = json.Unmarshal([]byte(body), &object)
if err == nil {
fmt.Printf("%+v\n", object)
} else {
fmt.Println(err)
}
send_obj, err := json.MarshalIndent(object, "", "\t")
if err != nil {
fmt.Println(err)
}
fmt.Fprint(w, string(send_obj))
}
func OptionCors(w http.ResponseWriter, r *http.Request) {
// TODO: need to change origin to deployed domain name
if origin := r.Header.Get("Origin"); origin != "http://localhost" {
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
w.Header().Set("Access-Control-Allow-Headers",
"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
}
// list specific node cpu
func NodeCpu(w http.ResponseWriter, r *http.Request) {
}
// list specifc node memory
func NodeMemory(w http.ResponseWriter, r *http.Request) {
}
// list all pods
func GetPods(w http.ResponseWriter, r *http.Request) {
// to do need to read api and port of api server from configuration file
res, err := http.Get(kube_api+"/api/v1/pods")
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
panic(err)
}
fmt.Fprint(w, string(body))
}
// list specific pod details
func GetPod(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// node name from user.
podName := vars["podName"]
fmt.Fprint(w, string(podName))
// to do need to read api and port of api server from configuration file
// TODO: change namespace to flexible.
res, err := http.Get(kube_api+"/api/v1/namespaces/default/pods/" + podName)
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
panic(err)
}
fmt.Fprint(w, string(body))
}
// list specific pod cpu
func PodCpu(w http.ResponseWriter, r *http.Request) {
}
// list specific pod memory
func PodMemory(w http.ResponseWriter, r *http.Request) {
}
// post handler for scale pod by pod name
// TODO : remove
// test mocks
func nodeTestMock(w http.ResponseWriter, r *http.Request) {
nodes := Nodes{
Node{Name: "node1", Ip: "192.168.1.2", Cpu: 5000, Memory: 3000, DiskUsage: 1000},
Node{Name: "node2", Ip: "192.169.1.4", Cpu: 5000, Memory: 3000, DiskUsage: 1000},
}
w.Header().Set("Content-Type", "application/json;charset=UTF-8")
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(nodes); err != nil {
panic(err)
}
}
// TODO : remove
// test ssh to exec command on other machine
func testExec(w http.ResponseWriter, r *http.Request) {
commander := SSHCommander{"root", "161.246.70.75"}
cmd := []string{
"ls",
".",
}
var (
output []byte
err error
)
if output, err = commander.Command(cmd...).Output(); err != nil {
log.Fatal(err)
}
fmt.Fprint(w, string(output[:6]))
}
func GetApp(w http.ResponseWriter, r *http.Request){
vars := mux.Vars(r)
// app name from user.
appName := vars["appName"]
fmt.Println(appName)
// TODO: need to find new solution to get info from api like other done.
res, err := exec.Command("kubectl", "get", "pod", "-l", "app="+appName, "-o", "json").Output()
if err != nil {
panic(err)
}
fmt.Fprint(w, string(res))
}
func GetApps(w http.ResponseWriter, r *http.Request){
res, err := exec.Command("kubectl", "get", "rc", "-o", "json").Output()
if err != nil {
panic(err)
}
fmt.Fprint(w, string(res))
}
func CreatePod(w http.ResponseWriter, r *http.Request) {
var pod Pod
// limits json post request for prevent overflow attack.
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))
if err != nil {
panic(err)
}
// catch error from close reader
if err := r.Body.Close(); err != nil {
panic(err)
}
// get request json information
if err := json.Unmarshal(body, &pod); err != nil {
w.Header().Set("Content-Type", "application/json;charset=UTF-8")
w.WriteHeader(422) // unprocessable entity
if err := json.NewEncoder(w).Encode(err); err != nil {
panic(err)
}
}
// prepare json to send to create by kubernetes api server
labels := map[string]interface{}{
"app": pod.Name,
}
metadata := map[string]interface{}{
"name": pod.Name,
"labels": labels,
}
ports := map[string]interface{}{
"containerPort": 80,
}
containers := map[string]interface{}{
"name": pod.Name,
"image": pod.Image,
"ports": []map[string]interface{}{ports},
"memory": pod.Memory,
"cpu": pod.Cpu,
}
spec := map[string]interface{}{
"containers": []map[string]interface{}{containers},
}
objReq := map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": metadata,
"spec": spec,
}
jsonReq, err := json.MarshalIndent(objReq, "", "\t")
if err != nil {
panic(err)
}
fmt.Println("you sent ", string(jsonReq))
// post json to kubernete api server
// TODO: need to change name space to user namespace
postUrl := kube_api+"/api/v1/namespaces/default/pods"
req, err := http.NewRequest("POST", postUrl, bytes.NewBuffer(jsonReq))
req.Header.Set("X-Custom-Header", "myvalue")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
// defer for ensure
defer resp.Body.Close()
response, _ := ioutil.ReadAll(resp.Body)
fmt.Println(string(response))
}
/**
* Get current resource by using cgroup
**/
func GetCurrentResourceCgroup(container_id string, metric_type int) (uint64, error) {
// TODO : Read Latency from HA Proxy
// file path prefix
var path = "/sys/fs/cgroup/memory/docker/" + container_id + "/"
if metric_type == 2 {
// read memory usage
current_usage, err := ioutil.ReadFile(path + "memory.usage_in_bytes")
if err != nil {
return binary.BigEndian.Uint64(current_usage), nil
} else {
return 0, err
}
} else if metric_type == 3 {
// read memory usage
current_usage, err := ioutil.ReadFile(path + "memory.usage_in_bytes")
if err != nil {
return 0, err
} else {
n := bytes.Index(current_usage, []byte{10})
usage_str := string(current_usage[:n])
resource_usage, _ := strconv.ParseInt(usage_str, 10, 64)
return uint64(resource_usage), nil
}
} else {
// not match any case
return 0, errors.New("not match any case")
}
}
/**
* Get List of ContainerID and pod's ip by replication name and their namespace
**/
func GetContainerIDList(url string, port string, rc_name string, namespace string) ([]string, []string, error) {
// TODO : maybe user want to get container id which map with it's pod
// initail rasult array
container_ids := []string{}
pod_ips := []string{}
res, err := http.Get(url + ":" + port + "/api/v1/namespaces/" + namespace + "/pods")
if err != nil {
fmt.Println("Can't connect to cadvisor")
panic(err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
return nil, nil, err
} else {
// json handler type
var res_obj map[string]interface{}
if err := json.Unmarshal(body, &res_obj); err != nil {
return nil, nil, err
}
pod_arr := res_obj["items"].([]interface{})
// iterate to get pod of specific rc
for _, pod := range pod_arr {
pod_name := pod.(map[string]interface{})["metadata"].(map[string]interface{})["generateName"]
if pod_name != nil {
if pod_name == rc_name+"-" {
pod_ips = append(pod_ips, pod.(map[string]interface{})["status"].(map[string]interface{})["podIP"].(string))
containers := pod.(map[string]interface{})["status"].(map[string]interface{})["containerStatuses"].([]interface{})
// one pod can has many container ,so iterate for get each container
for _, container := range containers {
container_id := container.(map[string]interface{})["containerID"].(string)[9:]
container_ids = append(container_ids, container_id)
}
}
}
}
return container_ids, pod_ips, nil
}
}
/**
read node resource usage
**/
func GetNodeResource(w http.ResponseWriter, r *http.Request) {
// get this node memory
memory, _ := mem.VirtualMemory()
// get this node cpu percent usage
cpu_percent, _ := cpu.CPUPercent(time.Duration(1) * time.Second, false)
// Disk mount Point
disk_partitions, _ := disk.DiskPartitions(true)
// Disk usage
var disk_usages []*disk.DiskUsageStat
for _, disk_partition := range disk_partitions {
if disk_partition.Mountpoint == "/" || disk_partition.Mountpoint == "/home" {
disk_stat, _ := disk.DiskUsage(disk_partition.Device);
disk_usages = append(disk_usages, disk_stat)
}
}
// Network
network, _ := net.NetIOCounters(false)
// create new node obj with resource usage information
node_metric := NodeMetric{
Cpu: cpu_percent,
Memory: memory,
DiskUsage: disk_usages,
Network: network,
}
node_json, err := json.MarshalIndent(node_metric, "", "\t")
if err != nil {
fmt.Println("error:", err)
}
fmt.Fprint(w, string(node_json))
}
/**
CPU Percent Calculation
**/
func DockerCPUPercent(interval time.Duration, container_id string) (float64, error) {
getAllBusy := func(t *cpu.CPUTimesStat) (float64, float64) {
busy := t.User + t.System + t.Nice + t.Iowait + t.Irq +
t.Softirq + t.Steal + t.Guest + t.GuestNice + t.Stolen
return busy + t.Idle, busy
}
calculate := func(t1, t2 *cpu.CPUTimesStat) float64 {
t1All, t1Busy := getAllBusy(t1)
t2All, t2Busy := getAllBusy(t2)
if t2Busy <= t1Busy {
return 0
}
if t2All <= t1All {
return 1
}
return (t2Busy - t1Busy) / (t2All - t1All) * 100
}
// Get CPU usage at the start of the interval.
var cpuTimes1 *cpu.CPUTimesStat
cpuTimes1, _ = docker.CgroupCPUDocker(container_id)
if interval > 0 {
time.Sleep(interval)
}
// And at the end of the interval.
var cpuTimes2 *cpu.CPUTimesStat
cpuTimes2, _ = docker.CgroupCPUDocker(container_id)
var rets float64
rets = calculate(cpuTimes1, cpuTimes2)
return rets, nil
}
/**
get resource usage of application (pods) on node
**/
func GetAppResource(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
// get app Name
appName := vars["appName"]
var summary_cpu float64
var memory_bundle []*docker.CgroupMemStat
container_ids, pod_ips, err := GetContainerIDList("http://localhost", "8080", appName, "default")
if err != nil {
fmt.Println(err)
}
for _, container_id := range container_ids {
fmt.Println(container_id, pod_ips)
// calculation percentage of cpu usage
container_cpu, _ := DockerCPUPercent(time.Duration(1) * time.Second, container_id)
summary_cpu += container_cpu
// memory usage
container_memory, _ := docker.CgroupMemDocker(container_id)
memory_bundle = append(memory_bundle, container_memory)
}
// find the request per sec from haproxy-frontend
res_front, err := http.Get("http://localhost:10001/v1/stats/frontends")
if err != nil {
panic(err)
}
body_front, err := ioutil.ReadAll(res_front.Body)
res_front.Body.Close()
if err != nil {
panic(err)
}
//var rps uint64
var object_front []map[string]interface{}
err = json.Unmarshal([]byte(body_front), &object_front)
rps := object_front[0]["req_rate"].(string)
rps_int, _ := strconv.ParseInt(rps, 10, 64)
if err == nil {
} else {
fmt.Println(err)
}
//find resonse time from haproxy-backends
//var rtime uint64
res_back, err := http.Get("http://localhost:10001/v1/stats/backends")
if err != nil {
panic(err)
}
body_back, err := ioutil.ReadAll(res_back.Body)
res_back.Body.Close()
if err != nil {
panic(err)
}
var object_back []map[string]interface{}
err = json.Unmarshal([]byte(body_back), &object_back)
rtime := object_back[0]["rtime"].(string)
res_2xx := object_back[0]["hrsp_2xx"].(string)
res_4xx := object_back[0]["hrsp_4xx"].(string)
res_5xx := object_back[0]["hrsp_5xx"].(string)
rtime_int, _ := strconv.ParseInt(rtime, 10, 64)
res2xx_int, _ := strconv.ParseInt(res_2xx, 10, 64)
res4xx_int, _ := strconv.ParseInt(res_4xx, 10, 64)
res5xx_int, _ := strconv.ParseInt(res_5xx, 10, 64)
if err == nil {
} else {
fmt.Println(err)
}
fmt.Println("rps: ", rps, ", rtime: ", rtime)
// find the cpu avarage of application cpu usage
average_cpu := summary_cpu/float64(len(container_ids))
// create appliction object
app_metric := AppMetric{
App: appName,
Cpu: average_cpu,
Memory: memory_bundle,
Request: rps_int,
Response: rtime_int,
Response2xx: res2xx_int,
Response4xx: res4xx_int,
Response5xx: res5xx_int,
}
app_json, err := json.MarshalIndent(app_metric, "", "\t")
if err != nil {
fmt.Println("error:", err)
}
fmt.Fprint(w, string(app_json))
}
|
// Copyright 2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ec2
import (
"context"
"errors"
"fmt"
"github.com/cilium/cilium/pkg/api/helpers"
"github.com/cilium/cilium/pkg/aws/endpoints"
eniTypes "github.com/cilium/cilium/pkg/aws/eni/types"
"github.com/cilium/cilium/pkg/aws/types"
"github.com/cilium/cilium/pkg/cidr"
ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
"github.com/cilium/cilium/pkg/spanstat"
"github.com/aws/aws-sdk-go-v2/aws"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/ec2"
ec2_types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
)
// Client represents an EC2 API client
type Client struct {
ec2Client *ec2.Client
limiter *helpers.ApiLimiter
metricsAPI MetricsAPI
subnetsFilters []ec2_types.Filter
eniTagSpecification ec2_types.TagSpecification
}
// MetricsAPI represents the metrics maintained by the AWS API client
type MetricsAPI interface {
helpers.MetricsAPI
ObserveAPICall(call, status string, duration float64)
}
// NewClient returns a new EC2 client
func NewClient(ec2Client *ec2.Client, metrics MetricsAPI, rateLimit float64, burst int, subnetsFilters []ec2_types.Filter, eniTags map[string]string) *Client {
eniTagSpecification := ec2_types.TagSpecification{
ResourceType: ec2_types.ResourceTypeNetworkInterface,
Tags: createAWSTagSlice(eniTags),
}
return &Client{
ec2Client: ec2Client,
metricsAPI: metrics,
limiter: helpers.NewApiLimiter(metrics, rateLimit, burst),
subnetsFilters: subnetsFilters,
eniTagSpecification: eniTagSpecification,
}
}
// NewConfig returns a new aws.Config configured with the correct region + endpoint resolver
func NewConfig(ctx context.Context) (aws.Config, error) {
cfg, err := awsconfig.LoadDefaultConfig(ctx)
if err != nil {
return aws.Config{}, fmt.Errorf("unable to load AWS configuration: %w", err)
}
metadataClient := imds.NewFromConfig(cfg)
instance, err := metadataClient.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{})
if err != nil {
return aws.Config{}, fmt.Errorf("unable to retrieve instance identity document: %w", err)
}
cfg.Region = instance.Region
cfg.EndpointResolver = aws.EndpointResolverFunc(endpoints.Resolver)
return cfg, nil
}
// NewSubnetsFilters transforms a map of tags and values and a slice of subnets
// into a slice of ec2.Filter adequate to filter AWS subnets.
func NewSubnetsFilters(tags map[string]string, ids []string) []ec2_types.Filter {
filters := make([]ec2_types.Filter, 0, len(tags)+1)
for k, v := range tags {
filters = append(filters, ec2_types.Filter{
Name: aws.String(fmt.Sprintf("tag:%s", k)),
Values: []string{v},
})
}
if len(ids) > 0 {
filters = append(filters, ec2_types.Filter{
Name: aws.String("subnet-id"),
Values: ids,
})
}
return filters
}
// deriveStatus returns a status string based on the HTTP response provided by
// the AWS API server. If no specific status is provided, either "OK" or
// "Failed" is returned based on the error variable.
func deriveStatus(err error) string {
var respErr *awshttp.ResponseError
if errors.As(err, &respErr) {
return respErr.Response.Status
}
if err != nil {
return "Failed"
}
return "OK"
}
// describeNetworkInterfaces lists all ENIs
func (c *Client) describeNetworkInterfaces(ctx context.Context, subnets ipamTypes.SubnetMap) ([]ec2_types.NetworkInterface, error) {
var result []ec2_types.NetworkInterface
input := &ec2.DescribeNetworkInterfacesInput{}
if len(c.subnetsFilters) > 0 {
subnetsIDs := make([]string, 0, len(subnets))
for id := range subnets {
subnetsIDs = append(subnetsIDs, id)
}
input.Filters = []ec2_types.Filter{
{
Name: aws.String("subnet-id"),
Values: subnetsIDs,
},
}
}
paginator := ec2.NewDescribeNetworkInterfacesPaginator(c.ec2Client, input)
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeNetworkInterfaces")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeNetworkInterfaces", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.NetworkInterfaces...)
}
return result, nil
}
// parseENI parses a ec2.NetworkInterface as returned by the EC2 service API,
// converts it into a eniTypes.ENI object
func parseENI(iface *ec2_types.NetworkInterface, vpcs ipamTypes.VirtualNetworkMap, subnets ipamTypes.SubnetMap) (instanceID string, eni *eniTypes.ENI, err error) {
if iface.PrivateIpAddress == nil {
err = fmt.Errorf("ENI has no IP address")
return
}
eni = &eniTypes.ENI{
IP: aws.ToString(iface.PrivateIpAddress),
SecurityGroups: []string{},
Addresses: []string{},
}
if iface.MacAddress != nil {
eni.MAC = aws.ToString(iface.MacAddress)
}
if iface.NetworkInterfaceId != nil {
eni.ID = aws.ToString(iface.NetworkInterfaceId)
}
if iface.Description != nil {
eni.Description = aws.ToString(iface.Description)
}
if iface.Attachment != nil {
eni.Number = int(iface.Attachment.DeviceIndex)
if iface.Attachment.InstanceId != nil {
instanceID = aws.ToString(iface.Attachment.InstanceId)
}
}
if iface.SubnetId != nil {
eni.Subnet.ID = aws.ToString(iface.SubnetId)
if subnets != nil {
if subnet, ok := subnets[eni.Subnet.ID]; ok && subnet.CIDR != nil {
eni.Subnet.CIDR = subnet.CIDR.String()
}
}
}
if iface.VpcId != nil {
eni.VPC.ID = aws.ToString(iface.VpcId)
if vpcs != nil {
if vpc, ok := vpcs[eni.VPC.ID]; ok {
eni.VPC.PrimaryCIDR = vpc.PrimaryCIDR
}
}
}
for _, ip := range iface.PrivateIpAddresses {
if ip.PrivateIpAddress != nil {
eni.Addresses = append(eni.Addresses, aws.ToString(ip.PrivateIpAddress))
}
}
for _, g := range iface.Groups {
if g.GroupId != nil {
eni.SecurityGroups = append(eni.SecurityGroups, aws.ToString(g.GroupId))
}
}
return
}
// GetInstances returns the list of all instances including their ENIs as
// instanceMap
func (c *Client) GetInstances(ctx context.Context, vpcs ipamTypes.VirtualNetworkMap, subnets ipamTypes.SubnetMap) (*ipamTypes.InstanceMap, error) {
instances := ipamTypes.NewInstanceMap()
networkInterfaces, err := c.describeNetworkInterfaces(ctx, subnets)
if err != nil {
return nil, err
}
for _, iface := range networkInterfaces {
id, eni, err := parseENI(&iface, vpcs, subnets)
if err != nil {
return nil, err
}
if id != "" {
instances.Update(id, ipamTypes.InterfaceRevision{Resource: eni})
}
}
return instances, nil
}
// describeVpcs lists all VPCs
func (c *Client) describeVpcs(ctx context.Context) ([]ec2_types.Vpc, error) {
var result []ec2_types.Vpc
paginator := ec2.NewDescribeVpcsPaginator(c.ec2Client, &ec2.DescribeVpcsInput{})
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeVpcs")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeVpcs", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.Vpcs...)
}
return result, nil
}
// GetVpcs retrieves and returns all Vpcs
func (c *Client) GetVpcs(ctx context.Context) (ipamTypes.VirtualNetworkMap, error) {
vpcs := ipamTypes.VirtualNetworkMap{}
vpcList, err := c.describeVpcs(ctx)
if err != nil {
return nil, err
}
for _, v := range vpcList {
vpc := &ipamTypes.VirtualNetwork{ID: aws.ToString(v.VpcId)}
if v.CidrBlock != nil {
vpc.PrimaryCIDR = aws.ToString(v.CidrBlock)
}
vpcs[vpc.ID] = vpc
}
return vpcs, nil
}
// describeSubnets lists all subnets
func (c *Client) describeSubnets(ctx context.Context) ([]ec2_types.Subnet, error) {
var result []ec2_types.Subnet
input := &ec2.DescribeSubnetsInput{}
if len(c.subnetsFilters) > 0 {
input.Filters = c.subnetsFilters
}
paginator := ec2.NewDescribeSubnetsPaginator(c.ec2Client, input)
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeSubnets")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeSubnets", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.Subnets...)
}
return result, nil
}
// GetSubnets returns all EC2 subnets as a subnetMap
func (c *Client) GetSubnets(ctx context.Context) (ipamTypes.SubnetMap, error) {
subnets := ipamTypes.SubnetMap{}
subnetList, err := c.describeSubnets(ctx)
if err != nil {
return nil, err
}
for _, s := range subnetList {
c, err := cidr.ParseCIDR(aws.ToString(s.CidrBlock))
if err != nil {
continue
}
subnet := &ipamTypes.Subnet{
ID: aws.ToString(s.SubnetId),
CIDR: c,
AvailableAddresses: int(s.AvailableIpAddressCount),
Tags: map[string]string{},
}
if s.AvailabilityZone != nil {
subnet.AvailabilityZone = aws.ToString(s.AvailabilityZone)
}
if s.VpcId != nil {
subnet.VirtualNetworkID = aws.ToString(s.VpcId)
}
for _, tag := range s.Tags {
if aws.ToString(tag.Key) == "Name" {
subnet.Name = aws.ToString(tag.Value)
}
subnet.Tags[aws.ToString(tag.Key)] = aws.ToString(tag.Value)
}
subnets[subnet.ID] = subnet
}
return subnets, nil
}
// CreateNetworkInterface creates an ENI with the given parameters
func (c *Client) CreateNetworkInterface(ctx context.Context, toAllocate int32, subnetID, desc string, groups []string) (string, *eniTypes.ENI, error) {
input := &ec2.CreateNetworkInterfaceInput{
Description: aws.String(desc),
SecondaryPrivateIpAddressCount: toAllocate,
SubnetId: aws.String(subnetID),
Groups: groups,
TagSpecifications: []ec2_types.TagSpecification{
c.eniTagSpecification,
},
}
c.limiter.Limit(ctx, "CreateNetworkInterface")
sinceStart := spanstat.Start()
output, err := c.ec2Client.CreateNetworkInterface(ctx, input)
c.metricsAPI.ObserveAPICall("CreateNetworkInterface", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return "", nil, err
}
_, eni, err := parseENI(output.NetworkInterface, nil, nil)
if err != nil {
// The error is ignored on purpose. The allocation itself has
// succeeded. The ability to parse and return the ENI
// information is optional. Returning the ENI ID is sufficient
// to allow for the caller to retrieve the ENI information via
// the API or wait for a regular sync to fetch the information.
return aws.ToString(output.NetworkInterface.NetworkInterfaceId), nil, nil
}
return eni.ID, eni, nil
}
// DeleteNetworkInterface deletes an ENI with the specified ID
func (c *Client) DeleteNetworkInterface(ctx context.Context, eniID string) error {
input := &ec2.DeleteNetworkInterfaceInput{
NetworkInterfaceId: aws.String(eniID),
}
c.limiter.Limit(ctx, "DeleteNetworkInterface")
sinceStart := spanstat.Start()
_, err := c.ec2Client.DeleteNetworkInterface(ctx, input)
c.metricsAPI.ObserveAPICall("DeleteNetworkInterface", deriveStatus(err), sinceStart.Seconds())
return err
}
// AttachNetworkInterface attaches a previously created ENI to an instance
func (c *Client) AttachNetworkInterface(ctx context.Context, index int32, instanceID, eniID string) (string, error) {
input := &ec2.AttachNetworkInterfaceInput{
DeviceIndex: index,
InstanceId: aws.String(instanceID),
NetworkInterfaceId: aws.String(eniID),
}
c.limiter.Limit(ctx, "AttachNetworkInterface")
sinceStart := spanstat.Start()
output, err := c.ec2Client.AttachNetworkInterface(ctx, input)
c.metricsAPI.ObserveAPICall("AttachNetworkInterface", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return "", err
}
return *output.AttachmentId, nil
}
// ModifyNetworkInterface modifies the attributes of an ENI
func (c *Client) ModifyNetworkInterface(ctx context.Context, eniID, attachmentID string, deleteOnTermination bool) error {
changes := &ec2_types.NetworkInterfaceAttachmentChanges{
AttachmentId: aws.String(attachmentID),
DeleteOnTermination: deleteOnTermination,
}
input := &ec2.ModifyNetworkInterfaceAttributeInput{
Attachment: changes,
NetworkInterfaceId: aws.String(eniID),
}
c.limiter.Limit(ctx, "ModifyNetworkInterfaceAttribute")
sinceStart := spanstat.Start()
_, err := c.ec2Client.ModifyNetworkInterfaceAttribute(ctx, input)
c.metricsAPI.ObserveAPICall("ModifyNetworkInterface", deriveStatus(err), sinceStart.Seconds())
return err
}
// AssignPrivateIpAddresses assigns the specified number of secondary IP
// addresses
func (c *Client) AssignPrivateIpAddresses(ctx context.Context, eniID string, addresses int32) error {
input := &ec2.AssignPrivateIpAddressesInput{
NetworkInterfaceId: aws.String(eniID),
SecondaryPrivateIpAddressCount: addresses,
}
c.limiter.Limit(ctx, "AssignPrivateIpAddresses")
sinceStart := spanstat.Start()
_, err := c.ec2Client.AssignPrivateIpAddresses(ctx, input)
c.metricsAPI.ObserveAPICall("AssignPrivateIpAddresses", deriveStatus(err), sinceStart.Seconds())
return err
}
// UnassignPrivateIpAddresses unassigns specified IP addresses from ENI
func (c *Client) UnassignPrivateIpAddresses(ctx context.Context, eniID string, addresses []string) error {
input := &ec2.UnassignPrivateIpAddressesInput{
NetworkInterfaceId: aws.String(eniID),
PrivateIpAddresses: addresses,
}
c.limiter.Limit(ctx, "UnassignPrivateIpAddresses")
sinceStart := spanstat.Start()
_, err := c.ec2Client.UnassignPrivateIpAddresses(ctx, input)
c.metricsAPI.ObserveAPICall("UnassignPrivateIpAddresses", deriveStatus(err), sinceStart.Seconds())
return err
}
func createAWSTagSlice(tags map[string]string) []ec2_types.Tag {
awsTags := make([]ec2_types.Tag, 0, len(tags))
for k, v := range tags {
awsTag := ec2_types.Tag{
Key: aws.String(k),
Value: aws.String(v),
}
awsTags = append(awsTags, awsTag)
}
return awsTags
}
func (c *Client) describeSecurityGroups(ctx context.Context) ([]ec2_types.SecurityGroup, error) {
var result []ec2_types.SecurityGroup
paginator := ec2.NewDescribeSecurityGroupsPaginator(c.ec2Client, &ec2.DescribeSecurityGroupsInput{})
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeSecurityGroups")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeSecurityGroups", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.SecurityGroups...)
}
return result, nil
}
// GetSecurityGroups returns all EC2 security groups as a SecurityGroupMap
func (c *Client) GetSecurityGroups(ctx context.Context) (types.SecurityGroupMap, error) {
securityGroups := types.SecurityGroupMap{}
secGroupList, err := c.describeSecurityGroups(ctx)
if err != nil {
return securityGroups, err
}
for _, secGroup := range secGroupList {
id := aws.ToString(secGroup.GroupId)
securityGroup := &types.SecurityGroup{
ID: id,
VpcID: aws.ToString(secGroup.VpcId),
Tags: map[string]string{},
}
for _, tag := range secGroup.Tags {
key := aws.ToString(tag.Key)
value := aws.ToString(tag.Value)
securityGroup.Tags[key] = value
}
securityGroups[id] = securityGroup
}
return securityGroups, nil
}
// GetInstanceTypes returns all the known EC2 instance types in the configured region
func (c *Client) GetInstanceTypes(ctx context.Context) ([]ec2_types.InstanceTypeInfo, error) {
var result []ec2_types.InstanceTypeInfo
paginator := ec2.NewDescribeInstanceTypesPaginator(c.ec2Client, &ec2.DescribeInstanceTypesInput{})
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeInstanceTypes")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeInstanceTypes", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.InstanceTypes...)
}
return result, nil
}
Add TagSpecifications to ec2:CreateNetworkInterface only when len > 0
If no tags are being passed we currently still set the TagSpecification
which results in a 4xx from the ec2 API.
Fixes 02bf3ba40040d735814c599e578a02356f4c861c.
Signed-off-by: Vlad Ungureanu <74f020a4111db471e6d40b8663c548673ce682e8@palantir.com>
// Copyright 2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ec2
import (
"context"
"errors"
"fmt"
"github.com/cilium/cilium/pkg/api/helpers"
"github.com/cilium/cilium/pkg/aws/endpoints"
eniTypes "github.com/cilium/cilium/pkg/aws/eni/types"
"github.com/cilium/cilium/pkg/aws/types"
"github.com/cilium/cilium/pkg/cidr"
ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
"github.com/cilium/cilium/pkg/spanstat"
"github.com/aws/aws-sdk-go-v2/aws"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/ec2"
ec2_types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
)
// Client represents an EC2 API client
type Client struct {
ec2Client *ec2.Client
limiter *helpers.ApiLimiter
metricsAPI MetricsAPI
subnetsFilters []ec2_types.Filter
eniTagSpecification ec2_types.TagSpecification
}
// MetricsAPI represents the metrics maintained by the AWS API client
type MetricsAPI interface {
helpers.MetricsAPI
ObserveAPICall(call, status string, duration float64)
}
// NewClient returns a new EC2 client
func NewClient(ec2Client *ec2.Client, metrics MetricsAPI, rateLimit float64, burst int, subnetsFilters []ec2_types.Filter, eniTags map[string]string) *Client {
eniTagSpecification := ec2_types.TagSpecification{
ResourceType: ec2_types.ResourceTypeNetworkInterface,
Tags: createAWSTagSlice(eniTags),
}
return &Client{
ec2Client: ec2Client,
metricsAPI: metrics,
limiter: helpers.NewApiLimiter(metrics, rateLimit, burst),
subnetsFilters: subnetsFilters,
eniTagSpecification: eniTagSpecification,
}
}
// NewConfig returns a new aws.Config configured with the correct region + endpoint resolver
func NewConfig(ctx context.Context) (aws.Config, error) {
cfg, err := awsconfig.LoadDefaultConfig(ctx)
if err != nil {
return aws.Config{}, fmt.Errorf("unable to load AWS configuration: %w", err)
}
metadataClient := imds.NewFromConfig(cfg)
instance, err := metadataClient.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{})
if err != nil {
return aws.Config{}, fmt.Errorf("unable to retrieve instance identity document: %w", err)
}
cfg.Region = instance.Region
cfg.EndpointResolver = aws.EndpointResolverFunc(endpoints.Resolver)
return cfg, nil
}
// NewSubnetsFilters transforms a map of tags and values and a slice of subnets
// into a slice of ec2.Filter adequate to filter AWS subnets.
func NewSubnetsFilters(tags map[string]string, ids []string) []ec2_types.Filter {
filters := make([]ec2_types.Filter, 0, len(tags)+1)
for k, v := range tags {
filters = append(filters, ec2_types.Filter{
Name: aws.String(fmt.Sprintf("tag:%s", k)),
Values: []string{v},
})
}
if len(ids) > 0 {
filters = append(filters, ec2_types.Filter{
Name: aws.String("subnet-id"),
Values: ids,
})
}
return filters
}
// deriveStatus returns a status string based on the HTTP response provided by
// the AWS API server. If no specific status is provided, either "OK" or
// "Failed" is returned based on the error variable.
func deriveStatus(err error) string {
var respErr *awshttp.ResponseError
if errors.As(err, &respErr) {
return respErr.Response.Status
}
if err != nil {
return "Failed"
}
return "OK"
}
// describeNetworkInterfaces lists all ENIs
func (c *Client) describeNetworkInterfaces(ctx context.Context, subnets ipamTypes.SubnetMap) ([]ec2_types.NetworkInterface, error) {
var result []ec2_types.NetworkInterface
input := &ec2.DescribeNetworkInterfacesInput{}
if len(c.subnetsFilters) > 0 {
subnetsIDs := make([]string, 0, len(subnets))
for id := range subnets {
subnetsIDs = append(subnetsIDs, id)
}
input.Filters = []ec2_types.Filter{
{
Name: aws.String("subnet-id"),
Values: subnetsIDs,
},
}
}
paginator := ec2.NewDescribeNetworkInterfacesPaginator(c.ec2Client, input)
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeNetworkInterfaces")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeNetworkInterfaces", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.NetworkInterfaces...)
}
return result, nil
}
// parseENI parses a ec2.NetworkInterface as returned by the EC2 service API,
// converts it into a eniTypes.ENI object
func parseENI(iface *ec2_types.NetworkInterface, vpcs ipamTypes.VirtualNetworkMap, subnets ipamTypes.SubnetMap) (instanceID string, eni *eniTypes.ENI, err error) {
if iface.PrivateIpAddress == nil {
err = fmt.Errorf("ENI has no IP address")
return
}
eni = &eniTypes.ENI{
IP: aws.ToString(iface.PrivateIpAddress),
SecurityGroups: []string{},
Addresses: []string{},
}
if iface.MacAddress != nil {
eni.MAC = aws.ToString(iface.MacAddress)
}
if iface.NetworkInterfaceId != nil {
eni.ID = aws.ToString(iface.NetworkInterfaceId)
}
if iface.Description != nil {
eni.Description = aws.ToString(iface.Description)
}
if iface.Attachment != nil {
eni.Number = int(iface.Attachment.DeviceIndex)
if iface.Attachment.InstanceId != nil {
instanceID = aws.ToString(iface.Attachment.InstanceId)
}
}
if iface.SubnetId != nil {
eni.Subnet.ID = aws.ToString(iface.SubnetId)
if subnets != nil {
if subnet, ok := subnets[eni.Subnet.ID]; ok && subnet.CIDR != nil {
eni.Subnet.CIDR = subnet.CIDR.String()
}
}
}
if iface.VpcId != nil {
eni.VPC.ID = aws.ToString(iface.VpcId)
if vpcs != nil {
if vpc, ok := vpcs[eni.VPC.ID]; ok {
eni.VPC.PrimaryCIDR = vpc.PrimaryCIDR
}
}
}
for _, ip := range iface.PrivateIpAddresses {
if ip.PrivateIpAddress != nil {
eni.Addresses = append(eni.Addresses, aws.ToString(ip.PrivateIpAddress))
}
}
for _, g := range iface.Groups {
if g.GroupId != nil {
eni.SecurityGroups = append(eni.SecurityGroups, aws.ToString(g.GroupId))
}
}
return
}
// GetInstances returns the list of all instances including their ENIs as
// instanceMap
func (c *Client) GetInstances(ctx context.Context, vpcs ipamTypes.VirtualNetworkMap, subnets ipamTypes.SubnetMap) (*ipamTypes.InstanceMap, error) {
instances := ipamTypes.NewInstanceMap()
networkInterfaces, err := c.describeNetworkInterfaces(ctx, subnets)
if err != nil {
return nil, err
}
for _, iface := range networkInterfaces {
id, eni, err := parseENI(&iface, vpcs, subnets)
if err != nil {
return nil, err
}
if id != "" {
instances.Update(id, ipamTypes.InterfaceRevision{Resource: eni})
}
}
return instances, nil
}
// describeVpcs lists all VPCs
func (c *Client) describeVpcs(ctx context.Context) ([]ec2_types.Vpc, error) {
var result []ec2_types.Vpc
paginator := ec2.NewDescribeVpcsPaginator(c.ec2Client, &ec2.DescribeVpcsInput{})
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeVpcs")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeVpcs", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.Vpcs...)
}
return result, nil
}
// GetVpcs retrieves and returns all Vpcs
func (c *Client) GetVpcs(ctx context.Context) (ipamTypes.VirtualNetworkMap, error) {
vpcs := ipamTypes.VirtualNetworkMap{}
vpcList, err := c.describeVpcs(ctx)
if err != nil {
return nil, err
}
for _, v := range vpcList {
vpc := &ipamTypes.VirtualNetwork{ID: aws.ToString(v.VpcId)}
if v.CidrBlock != nil {
vpc.PrimaryCIDR = aws.ToString(v.CidrBlock)
}
vpcs[vpc.ID] = vpc
}
return vpcs, nil
}
// describeSubnets lists all subnets
func (c *Client) describeSubnets(ctx context.Context) ([]ec2_types.Subnet, error) {
var result []ec2_types.Subnet
input := &ec2.DescribeSubnetsInput{}
if len(c.subnetsFilters) > 0 {
input.Filters = c.subnetsFilters
}
paginator := ec2.NewDescribeSubnetsPaginator(c.ec2Client, input)
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeSubnets")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeSubnets", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.Subnets...)
}
return result, nil
}
// GetSubnets returns all EC2 subnets as a subnetMap
func (c *Client) GetSubnets(ctx context.Context) (ipamTypes.SubnetMap, error) {
subnets := ipamTypes.SubnetMap{}
subnetList, err := c.describeSubnets(ctx)
if err != nil {
return nil, err
}
for _, s := range subnetList {
c, err := cidr.ParseCIDR(aws.ToString(s.CidrBlock))
if err != nil {
continue
}
subnet := &ipamTypes.Subnet{
ID: aws.ToString(s.SubnetId),
CIDR: c,
AvailableAddresses: int(s.AvailableIpAddressCount),
Tags: map[string]string{},
}
if s.AvailabilityZone != nil {
subnet.AvailabilityZone = aws.ToString(s.AvailabilityZone)
}
if s.VpcId != nil {
subnet.VirtualNetworkID = aws.ToString(s.VpcId)
}
for _, tag := range s.Tags {
if aws.ToString(tag.Key) == "Name" {
subnet.Name = aws.ToString(tag.Value)
}
subnet.Tags[aws.ToString(tag.Key)] = aws.ToString(tag.Value)
}
subnets[subnet.ID] = subnet
}
return subnets, nil
}
// CreateNetworkInterface creates an ENI with the given parameters
func (c *Client) CreateNetworkInterface(ctx context.Context, toAllocate int32, subnetID, desc string, groups []string) (string, *eniTypes.ENI, error) {
input := &ec2.CreateNetworkInterfaceInput{
Description: aws.String(desc),
SecondaryPrivateIpAddressCount: toAllocate,
SubnetId: aws.String(subnetID),
Groups: groups,
}
if len(c.eniTagSpecification.Tags) > 0 {
input.TagSpecifications = []ec2_types.TagSpecification{
c.eniTagSpecification,
}
}
c.limiter.Limit(ctx, "CreateNetworkInterface")
sinceStart := spanstat.Start()
output, err := c.ec2Client.CreateNetworkInterface(ctx, input)
c.metricsAPI.ObserveAPICall("CreateNetworkInterface", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return "", nil, err
}
_, eni, err := parseENI(output.NetworkInterface, nil, nil)
if err != nil {
// The error is ignored on purpose. The allocation itself has
// succeeded. The ability to parse and return the ENI
// information is optional. Returning the ENI ID is sufficient
// to allow for the caller to retrieve the ENI information via
// the API or wait for a regular sync to fetch the information.
return aws.ToString(output.NetworkInterface.NetworkInterfaceId), nil, nil
}
return eni.ID, eni, nil
}
// DeleteNetworkInterface deletes an ENI with the specified ID
func (c *Client) DeleteNetworkInterface(ctx context.Context, eniID string) error {
input := &ec2.DeleteNetworkInterfaceInput{
NetworkInterfaceId: aws.String(eniID),
}
c.limiter.Limit(ctx, "DeleteNetworkInterface")
sinceStart := spanstat.Start()
_, err := c.ec2Client.DeleteNetworkInterface(ctx, input)
c.metricsAPI.ObserveAPICall("DeleteNetworkInterface", deriveStatus(err), sinceStart.Seconds())
return err
}
// AttachNetworkInterface attaches a previously created ENI to an instance
func (c *Client) AttachNetworkInterface(ctx context.Context, index int32, instanceID, eniID string) (string, error) {
input := &ec2.AttachNetworkInterfaceInput{
DeviceIndex: index,
InstanceId: aws.String(instanceID),
NetworkInterfaceId: aws.String(eniID),
}
c.limiter.Limit(ctx, "AttachNetworkInterface")
sinceStart := spanstat.Start()
output, err := c.ec2Client.AttachNetworkInterface(ctx, input)
c.metricsAPI.ObserveAPICall("AttachNetworkInterface", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return "", err
}
return *output.AttachmentId, nil
}
// ModifyNetworkInterface modifies the attributes of an ENI
func (c *Client) ModifyNetworkInterface(ctx context.Context, eniID, attachmentID string, deleteOnTermination bool) error {
changes := &ec2_types.NetworkInterfaceAttachmentChanges{
AttachmentId: aws.String(attachmentID),
DeleteOnTermination: deleteOnTermination,
}
input := &ec2.ModifyNetworkInterfaceAttributeInput{
Attachment: changes,
NetworkInterfaceId: aws.String(eniID),
}
c.limiter.Limit(ctx, "ModifyNetworkInterfaceAttribute")
sinceStart := spanstat.Start()
_, err := c.ec2Client.ModifyNetworkInterfaceAttribute(ctx, input)
c.metricsAPI.ObserveAPICall("ModifyNetworkInterface", deriveStatus(err), sinceStart.Seconds())
return err
}
// AssignPrivateIpAddresses assigns the specified number of secondary IP
// addresses
func (c *Client) AssignPrivateIpAddresses(ctx context.Context, eniID string, addresses int32) error {
input := &ec2.AssignPrivateIpAddressesInput{
NetworkInterfaceId: aws.String(eniID),
SecondaryPrivateIpAddressCount: addresses,
}
c.limiter.Limit(ctx, "AssignPrivateIpAddresses")
sinceStart := spanstat.Start()
_, err := c.ec2Client.AssignPrivateIpAddresses(ctx, input)
c.metricsAPI.ObserveAPICall("AssignPrivateIpAddresses", deriveStatus(err), sinceStart.Seconds())
return err
}
// UnassignPrivateIpAddresses unassigns specified IP addresses from ENI
func (c *Client) UnassignPrivateIpAddresses(ctx context.Context, eniID string, addresses []string) error {
input := &ec2.UnassignPrivateIpAddressesInput{
NetworkInterfaceId: aws.String(eniID),
PrivateIpAddresses: addresses,
}
c.limiter.Limit(ctx, "UnassignPrivateIpAddresses")
sinceStart := spanstat.Start()
_, err := c.ec2Client.UnassignPrivateIpAddresses(ctx, input)
c.metricsAPI.ObserveAPICall("UnassignPrivateIpAddresses", deriveStatus(err), sinceStart.Seconds())
return err
}
func createAWSTagSlice(tags map[string]string) []ec2_types.Tag {
awsTags := make([]ec2_types.Tag, 0, len(tags))
for k, v := range tags {
awsTag := ec2_types.Tag{
Key: aws.String(k),
Value: aws.String(v),
}
awsTags = append(awsTags, awsTag)
}
return awsTags
}
func (c *Client) describeSecurityGroups(ctx context.Context) ([]ec2_types.SecurityGroup, error) {
var result []ec2_types.SecurityGroup
paginator := ec2.NewDescribeSecurityGroupsPaginator(c.ec2Client, &ec2.DescribeSecurityGroupsInput{})
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeSecurityGroups")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeSecurityGroups", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.SecurityGroups...)
}
return result, nil
}
// GetSecurityGroups returns all EC2 security groups as a SecurityGroupMap
func (c *Client) GetSecurityGroups(ctx context.Context) (types.SecurityGroupMap, error) {
securityGroups := types.SecurityGroupMap{}
secGroupList, err := c.describeSecurityGroups(ctx)
if err != nil {
return securityGroups, err
}
for _, secGroup := range secGroupList {
id := aws.ToString(secGroup.GroupId)
securityGroup := &types.SecurityGroup{
ID: id,
VpcID: aws.ToString(secGroup.VpcId),
Tags: map[string]string{},
}
for _, tag := range secGroup.Tags {
key := aws.ToString(tag.Key)
value := aws.ToString(tag.Value)
securityGroup.Tags[key] = value
}
securityGroups[id] = securityGroup
}
return securityGroups, nil
}
// GetInstanceTypes returns all the known EC2 instance types in the configured region
func (c *Client) GetInstanceTypes(ctx context.Context) ([]ec2_types.InstanceTypeInfo, error) {
var result []ec2_types.InstanceTypeInfo
paginator := ec2.NewDescribeInstanceTypesPaginator(c.ec2Client, &ec2.DescribeInstanceTypesInput{})
for paginator.HasMorePages() {
c.limiter.Limit(ctx, "DescribeInstanceTypes")
sinceStart := spanstat.Start()
output, err := paginator.NextPage(ctx)
c.metricsAPI.ObserveAPICall("DescribeInstanceTypes", deriveStatus(err), sinceStart.Seconds())
if err != nil {
return nil, err
}
result = append(result, output.InstanceTypes...)
}
return result, nil
}
|
// Copyright 2018 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
// Package build contains helper functions for building kernels/images.
package build
import (
"bytes"
"fmt"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
)
// Params is input arguments for the Image function.
type Params struct {
TargetOS string
TargetArch string
VMType string
KernelDir string
OutputDir string
Compiler string
UserspaceDir string
CmdlineFile string
SysctlFile string
Config []byte
}
// Image creates a disk image for the specified OS/ARCH/VM.
// Kernel is taken from KernelDir, userspace system is taken from UserspaceDir.
// If CmdlineFile is not empty, contents of the file are appended to the kernel command line.
// If SysctlFile is not empty, contents of the file are appended to the image /etc/sysctl.conf.
// Output is stored in OutputDir and includes (everything except for image is optional):
// - image: the image
// - key: ssh key for the image
// - kernel: kernel for injected boot
// - initrd: initrd for injected boot
// - kernel.config: actual kernel config used during build
// - obj/: directory with kernel object files (this should match KernelObject
// specified in sys/targets, e.g. vmlinux for linux)
// The returned string is a kernel ID that will be the same for kernels with the
// same runtime behavior, and different for kernels with different runtime
// behavior. Binary equal builds, or builds that differ only in e.g. debug info,
// have the same ID. The ID may be empty if OS implementation does not have
// a way to calculate such IDs.
func Image(params *Params) (string, error) {
builder, err := getBuilder(params.TargetOS, params.TargetArch, params.VMType)
if err != nil {
return "", err
}
if err := osutil.MkdirAll(filepath.Join(params.OutputDir, "obj")); err != nil {
return "", err
}
if len(params.Config) != 0 {
// Write kernel config early, so that it's captured on build failures.
if err := osutil.WriteFile(filepath.Join(params.OutputDir, "kernel.config"), params.Config); err != nil {
return "", fmt.Errorf("failed to write config file: %v", err)
}
}
err = builder.build(params)
if err != nil {
return "", extractRootCause(err, params.TargetOS, params.KernelDir)
}
sign := ""
if signer, ok := builder.(signer); ok {
sign, err = signer.sign(params)
if err != nil {
return "", err
}
}
return sign, nil
}
func Clean(targetOS, targetArch, vmType, kernelDir string) error {
builder, err := getBuilder(targetOS, targetArch, vmType)
if err != nil {
return err
}
return builder.clean(kernelDir, targetArch)
}
type KernelError struct {
Report []byte
Output []byte
Maintainers []string
guiltyFile string
}
func (err *KernelError) Error() string {
return string(err.Report)
}
type builder interface {
build(params *Params) error
clean(kernelDir, targetArch string) error
}
type signer interface {
sign(params *Params) (string, error)
}
func getBuilder(targetOS, targetArch, vmType string) (builder, error) {
var supported = []struct {
OS string
arch string
vms []string
b builder
}{
{"linux", "amd64", []string{"gvisor"}, gvisor{}},
{"linux", "amd64", []string{"gce", "qemu"}, linux{}},
{"linux", "ppc64le", []string{"qemu"}, linux{}},
{"fuchsia", "amd64", []string{"qemu"}, fuchsia{}},
{"fuchsia", "arm64", []string{"qemu"}, fuchsia{}},
{"akaros", "amd64", []string{"qemu"}, akaros{}},
{"openbsd", "amd64", []string{"gce", "vmm"}, openbsd{}},
{"netbsd", "amd64", []string{"gce", "qemu"}, netbsd{}},
{"freebsd", "amd64", []string{"gce", "qemu"}, freebsd{}},
{"test", "64", []string{"qemu"}, test{}},
}
for _, s := range supported {
if targetOS == s.OS && targetArch == s.arch {
for _, vm := range s.vms {
if vmType == vm {
return s.b, nil
}
}
}
}
return nil, fmt.Errorf("unsupported image type %v/%v/%v", targetOS, targetArch, vmType)
}
func CompilerIdentity(compiler string) (string, error) {
if compiler == "" {
return "", nil
}
bazel := strings.HasSuffix(compiler, "bazel")
arg := "--version"
if bazel {
arg = ""
}
output, err := osutil.RunCmd(time.Minute, "", compiler, arg)
if err != nil {
return "", err
}
for _, line := range strings.Split(string(output), "\n") {
if bazel {
// Strip extracting and log lines...
if strings.Contains(line, "Extracting Bazel") {
continue
}
if strings.HasPrefix(line, "INFO: ") {
continue
}
if strings.HasPrefix(line, "WARNING: ") {
continue
}
}
return strings.TrimSpace(line), nil
}
return "", fmt.Errorf("no output from compiler --version")
}
func extractRootCause(err error, OS, kernelSrc string) error {
if err == nil {
return nil
}
verr, ok := err.(*osutil.VerboseError)
if !ok {
return err
}
reason, file := extractCauseInner(verr.Output, kernelSrc)
if len(reason) == 0 {
return err
}
kernelErr := &KernelError{
Report: reason,
Output: verr.Output,
guiltyFile: file,
}
if file != "" && OS == "linux" {
maintainers, err := report.GetLinuxMaintainers(kernelSrc, file)
if err != nil {
kernelErr.Output = append(kernelErr.Output, err.Error()...)
}
kernelErr.Maintainers = maintainers
}
return kernelErr
}
func extractCauseInner(s []byte, kernelSrc string) ([]byte, string) {
lines := extractCauseRaw(s)
const maxLines = 20
if len(lines) > maxLines {
lines = lines[:maxLines]
}
var stripPrefix []byte
if kernelSrc != "" {
stripPrefix = []byte(kernelSrc)
if stripPrefix[len(stripPrefix)-1] != filepath.Separator {
stripPrefix = append(stripPrefix, filepath.Separator)
}
}
file := ""
for i := range lines {
if stripPrefix != nil {
lines[i] = bytes.Replace(lines[i], stripPrefix, nil, -1)
}
if file == "" {
match := fileRe.FindSubmatch(lines[i])
if match != nil {
file = string(match[1])
if file[0] == '/' {
// We already removed kernel source prefix,
// if we still have an absolute path, it's probably pointing
// to compiler/system libraries (not going to work).
file = ""
}
}
}
}
file = strings.TrimPrefix(file, "./")
res := bytes.Join(lines, []byte{'\n'})
// gcc uses these weird quotes around identifiers, which may be
// mis-rendered by systems that don't understand utf-8.
res = bytes.Replace(res, []byte("‘"), []byte{'\''}, -1)
res = bytes.Replace(res, []byte("’"), []byte{'\''}, -1)
return res, file
}
func extractCauseRaw(s []byte) [][]byte {
weak := true
var cause [][]byte
dedup := make(map[string]bool)
for _, line := range bytes.Split(s, []byte{'\n'}) {
for _, pattern := range buildFailureCauses {
if !bytes.Contains(line, pattern.pattern) {
continue
}
if weak && !pattern.weak {
cause = nil
dedup = make(map[string]bool)
}
if dedup[string(line)] {
continue
}
dedup[string(line)] = true
if cause == nil {
weak = pattern.weak
}
cause = append(cause, line)
break
}
}
return cause
}
type buildFailureCause struct {
pattern []byte
weak bool
}
var buildFailureCauses = [...]buildFailureCause{
{pattern: []byte(": error: ")},
{pattern: []byte("ERROR: ")},
{pattern: []byte(": fatal error: ")},
{pattern: []byte(": undefined reference to")},
{pattern: []byte(": Permission denied")},
{weak: true, pattern: []byte(": final link failed: ")},
{weak: true, pattern: []byte("collect2: error: ")},
{weak: true, pattern: []byte("FAILED: Build did NOT complete")},
}
var fileRe = regexp.MustCompile(`^([a-zA-Z0-9_\-/.]+):[0-9]+:([0-9]+:)? `)
pkg/build: chmod output ssh key
ssh is very picky about ssh key permissions.
Let's enforce the right persmissions without bothering user with this.
// Copyright 2018 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
// Package build contains helper functions for building kernels/images.
package build
import (
"bytes"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
)
// Params is input arguments for the Image function.
type Params struct {
TargetOS string
TargetArch string
VMType string
KernelDir string
OutputDir string
Compiler string
UserspaceDir string
CmdlineFile string
SysctlFile string
Config []byte
}
// Image creates a disk image for the specified OS/ARCH/VM.
// Kernel is taken from KernelDir, userspace system is taken from UserspaceDir.
// If CmdlineFile is not empty, contents of the file are appended to the kernel command line.
// If SysctlFile is not empty, contents of the file are appended to the image /etc/sysctl.conf.
// Output is stored in OutputDir and includes (everything except for image is optional):
// - image: the image
// - key: ssh key for the image
// - kernel: kernel for injected boot
// - initrd: initrd for injected boot
// - kernel.config: actual kernel config used during build
// - obj/: directory with kernel object files (this should match KernelObject
// specified in sys/targets, e.g. vmlinux for linux)
// The returned string is a kernel ID that will be the same for kernels with the
// same runtime behavior, and different for kernels with different runtime
// behavior. Binary equal builds, or builds that differ only in e.g. debug info,
// have the same ID. The ID may be empty if OS implementation does not have
// a way to calculate such IDs.
func Image(params *Params) (string, error) {
builder, err := getBuilder(params.TargetOS, params.TargetArch, params.VMType)
if err != nil {
return "", err
}
if err := osutil.MkdirAll(filepath.Join(params.OutputDir, "obj")); err != nil {
return "", err
}
if len(params.Config) != 0 {
// Write kernel config early, so that it's captured on build failures.
if err := osutil.WriteFile(filepath.Join(params.OutputDir, "kernel.config"), params.Config); err != nil {
return "", fmt.Errorf("failed to write config file: %v", err)
}
}
err = builder.build(params)
if err != nil {
return "", extractRootCause(err, params.TargetOS, params.KernelDir)
}
if key := filepath.Join(params.OutputDir, "key"); osutil.IsExist(key) {
if err := os.Chmod(key, 0600); err != nil {
return "", fmt.Errorf("failed to chmod 0600 %v: %v", key, err)
}
}
sign := ""
if signer, ok := builder.(signer); ok {
sign, err = signer.sign(params)
if err != nil {
return "", err
}
}
return sign, nil
}
func Clean(targetOS, targetArch, vmType, kernelDir string) error {
builder, err := getBuilder(targetOS, targetArch, vmType)
if err != nil {
return err
}
return builder.clean(kernelDir, targetArch)
}
type KernelError struct {
Report []byte
Output []byte
Maintainers []string
guiltyFile string
}
func (err *KernelError) Error() string {
return string(err.Report)
}
type builder interface {
build(params *Params) error
clean(kernelDir, targetArch string) error
}
type signer interface {
sign(params *Params) (string, error)
}
func getBuilder(targetOS, targetArch, vmType string) (builder, error) {
var supported = []struct {
OS string
arch string
vms []string
b builder
}{
{"linux", "amd64", []string{"gvisor"}, gvisor{}},
{"linux", "amd64", []string{"gce", "qemu"}, linux{}},
{"linux", "ppc64le", []string{"qemu"}, linux{}},
{"fuchsia", "amd64", []string{"qemu"}, fuchsia{}},
{"fuchsia", "arm64", []string{"qemu"}, fuchsia{}},
{"akaros", "amd64", []string{"qemu"}, akaros{}},
{"openbsd", "amd64", []string{"gce", "vmm"}, openbsd{}},
{"netbsd", "amd64", []string{"gce", "qemu"}, netbsd{}},
{"freebsd", "amd64", []string{"gce", "qemu"}, freebsd{}},
{"test", "64", []string{"qemu"}, test{}},
}
for _, s := range supported {
if targetOS == s.OS && targetArch == s.arch {
for _, vm := range s.vms {
if vmType == vm {
return s.b, nil
}
}
}
}
return nil, fmt.Errorf("unsupported image type %v/%v/%v", targetOS, targetArch, vmType)
}
func CompilerIdentity(compiler string) (string, error) {
if compiler == "" {
return "", nil
}
bazel := strings.HasSuffix(compiler, "bazel")
arg := "--version"
if bazel {
arg = ""
}
output, err := osutil.RunCmd(time.Minute, "", compiler, arg)
if err != nil {
return "", err
}
for _, line := range strings.Split(string(output), "\n") {
if bazel {
// Strip extracting and log lines...
if strings.Contains(line, "Extracting Bazel") {
continue
}
if strings.HasPrefix(line, "INFO: ") {
continue
}
if strings.HasPrefix(line, "WARNING: ") {
continue
}
}
return strings.TrimSpace(line), nil
}
return "", fmt.Errorf("no output from compiler --version")
}
func extractRootCause(err error, OS, kernelSrc string) error {
if err == nil {
return nil
}
verr, ok := err.(*osutil.VerboseError)
if !ok {
return err
}
reason, file := extractCauseInner(verr.Output, kernelSrc)
if len(reason) == 0 {
return err
}
kernelErr := &KernelError{
Report: reason,
Output: verr.Output,
guiltyFile: file,
}
if file != "" && OS == "linux" {
maintainers, err := report.GetLinuxMaintainers(kernelSrc, file)
if err != nil {
kernelErr.Output = append(kernelErr.Output, err.Error()...)
}
kernelErr.Maintainers = maintainers
}
return kernelErr
}
func extractCauseInner(s []byte, kernelSrc string) ([]byte, string) {
lines := extractCauseRaw(s)
const maxLines = 20
if len(lines) > maxLines {
lines = lines[:maxLines]
}
var stripPrefix []byte
if kernelSrc != "" {
stripPrefix = []byte(kernelSrc)
if stripPrefix[len(stripPrefix)-1] != filepath.Separator {
stripPrefix = append(stripPrefix, filepath.Separator)
}
}
file := ""
for i := range lines {
if stripPrefix != nil {
lines[i] = bytes.Replace(lines[i], stripPrefix, nil, -1)
}
if file == "" {
match := fileRe.FindSubmatch(lines[i])
if match != nil {
file = string(match[1])
if file[0] == '/' {
// We already removed kernel source prefix,
// if we still have an absolute path, it's probably pointing
// to compiler/system libraries (not going to work).
file = ""
}
}
}
}
file = strings.TrimPrefix(file, "./")
res := bytes.Join(lines, []byte{'\n'})
// gcc uses these weird quotes around identifiers, which may be
// mis-rendered by systems that don't understand utf-8.
res = bytes.Replace(res, []byte("‘"), []byte{'\''}, -1)
res = bytes.Replace(res, []byte("’"), []byte{'\''}, -1)
return res, file
}
func extractCauseRaw(s []byte) [][]byte {
weak := true
var cause [][]byte
dedup := make(map[string]bool)
for _, line := range bytes.Split(s, []byte{'\n'}) {
for _, pattern := range buildFailureCauses {
if !bytes.Contains(line, pattern.pattern) {
continue
}
if weak && !pattern.weak {
cause = nil
dedup = make(map[string]bool)
}
if dedup[string(line)] {
continue
}
dedup[string(line)] = true
if cause == nil {
weak = pattern.weak
}
cause = append(cause, line)
break
}
}
return cause
}
type buildFailureCause struct {
pattern []byte
weak bool
}
var buildFailureCauses = [...]buildFailureCause{
{pattern: []byte(": error: ")},
{pattern: []byte("ERROR: ")},
{pattern: []byte(": fatal error: ")},
{pattern: []byte(": undefined reference to")},
{pattern: []byte(": Permission denied")},
{weak: true, pattern: []byte(": final link failed: ")},
{weak: true, pattern: []byte("collect2: error: ")},
{weak: true, pattern: []byte("FAILED: Build did NOT complete")},
}
var fileRe = regexp.MustCompile(`^([a-zA-Z0-9_\-/.]+):[0-9]+:([0-9]+:)? `)
|
package castled
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"os/user"
"path/filepath"
"regexp"
"strconv"
"syscall"
"time"
"github.com/google/uuid"
"github.com/quantum/castle/pkg/cephd"
"github.com/quantum/clusterd/pkg/orchestrator"
"github.com/quantum/clusterd/pkg/proc"
)
const (
DevicesValue = "devices"
ForceFormatValue = "forceFormat"
bootstrapOSDKeyringTemplate = `
[client.bootstrap-osd]
key = %s
caps mon = "allow profile bootstrap-osd"
`
)
func NewOSDService() *orchestrator.ClusterService {
service := &orchestrator.ClusterService{Name: osdKey}
service.Leader = &osdLeader{}
service.Agent = &osdAgent{}
return service
}
// get the bootstrap OSD root dir
func getBootstrapOSDDir() string {
return "/tmp/bootstrap-osd"
}
// get the full path to the bootstrap OSD keyring
func getBootstrapOSDKeyringPath(clusterName string) string {
return fmt.Sprintf("%s/%s.keyring", getBootstrapOSDDir(), clusterName)
}
// get the full path to the given OSD's config file
func getOSDConfFilePath(osdDataPath, clusterName string) string {
return fmt.Sprintf("%s/%s.config", osdDataPath, clusterName)
}
// get the full path to the given OSD's keyring
func getOSDKeyringPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "keyring")
}
// get the full path to the given OSD's journal
func getOSDJournalPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "journal")
}
// get the full path to the given OSD's temporary mon map
func getOSDTempMonMapPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "tmp", "activate.monmap")
}
// create a keyring for the bootstrap-osd client, it gets a limited set of privileges
func createOSDBootstrapKeyring(conn *cephd.Conn, clusterName string, executor proc.Executor) error {
bootstrapOSDKeyringPath := getBootstrapOSDKeyringPath(clusterName)
if _, err := os.Stat(bootstrapOSDKeyringPath); os.IsNotExist(err) {
// get-or-create-key for client.bootstrap-osd
cmd := "auth get-or-create-key"
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"entity": "client.bootstrap-osd",
"caps": []string{"mon", "allow profile bootstrap-osd"},
})
if err != nil {
return fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
buf, _, err := conn.MonCommand(command)
if err != nil {
return fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
var resp map[string]interface{}
err = json.Unmarshal(buf, &resp)
if err != nil {
return fmt.Errorf("failed to unmarshall %s response: %+v", cmd, err)
}
bootstrapOSDKey := resp["key"].(string)
log.Printf("succeeded %s command, bootstrapOSDKey: %s", cmd, bootstrapOSDKey)
// write the bootstrap-osd keyring to disk
bootstrapOSDKeyringDir := filepath.Dir(bootstrapOSDKeyringPath)
if err := os.MkdirAll(bootstrapOSDKeyringDir, 0744); err != nil {
fmt.Printf("failed to create bootstrap OSD keyring dir at %s: %+v", bootstrapOSDKeyringDir, err)
}
bootstrapOSDKeyring := fmt.Sprintf(bootstrapOSDKeyringTemplate, bootstrapOSDKey)
if err := ioutil.WriteFile(bootstrapOSDKeyringPath, []byte(bootstrapOSDKeyring), 0644); err != nil {
return fmt.Errorf("failed to write bootstrap-osd keyring to %s: %+v", bootstrapOSDKeyringPath, err)
}
}
return nil
}
// format the given device for usage by an OSD
func formatOSD(device string, forceFormat bool, executor proc.Executor) (bool, error) {
// format the current volume
cmd := fmt.Sprintf("blkid %s", device)
devFS, err := executor.ExecuteCommandPipeline(
cmd,
fmt.Sprintf(`blkid /dev/%s | sed -nr 's/^.*TYPE=\"(.*)\"$/\1/p'`, device))
if err != nil {
return false, fmt.Errorf("command %s failed: %+v", cmd, err)
}
if devFS != "" && forceFormat {
// there's a filesystem on the device, but the user has specified to force a format. give a warning about that.
log.Printf("WARNING: device %s already formatted with %s, but forcing a format!!!", device, devFS)
}
if devFS == "" || forceFormat {
// execute the format operation
cmd = fmt.Sprintf("format %s", device)
err = executor.ExecuteCommand(cmd, "sudo", "/usr/sbin/mkfs.btrfs", "-f", "-m", "single", "-n", "32768", fmt.Sprintf("/dev/%s", device))
if err != nil {
return false, fmt.Errorf("command %s failed: %+v", cmd, err)
}
} else {
// disk is already formatted and the user doesn't want to force it, return no error, but also specify that no format was done
log.Printf("device %s already formatted with %s, cannot use for OSD", device, devFS)
return false, nil
}
return true, nil
}
// mount the OSD data directory onto the given device
func mountOSD(device string, mountPath string, executor proc.Executor) error {
cmd := fmt.Sprintf("lsblk %s", device)
var diskUUID string
retryCount := 0
retryMax := 10
sleepTime := 2
for {
// there is lag in between when a filesytem is created and its UUID is available. retry as needed
// until we have a usable UUID for the newly formatted filesystem.
var err error
diskUUID, err = executor.ExecuteCommandWithOutput(cmd, "lsblk", fmt.Sprintf("/dev/%s", device), "-d", "-n", "-r", "-o", "UUID")
if err != nil {
return fmt.Errorf("command %s failed: %+v", cmd, err)
}
if diskUUID != "" {
// we got the UUID from the disk. Verify this UUID is up to date in the /dev/disk/by-uuid dir by
// checking for it multiple times in a row. For an existing device, the device UUID and the
// by-uuid link can take a bit to get updated after getting formatted. Increase our confidence
// that we have the updated UUID by performing this check multiple times in a row.
log.Printf("verifying UUID %s", diskUUID)
uuidCheckOK := true
uuidCheckCount := 0
for uuidCheckCount < 3 {
uuidCheckCount++
if _, err := os.Stat(fmt.Sprintf("/dev/disk/by-uuid/%s", diskUUID)); os.IsNotExist(err) {
// the UUID we got for the disk does not exist under /dev/disk/by-uuid. Retry.
uuidCheckOK = false
break
}
<-time.After(time.Duration(500) * time.Millisecond)
}
if uuidCheckOK {
log.Printf("device %s UUID created: %s", device, diskUUID)
break
}
}
retryCount++
if retryCount > retryMax {
return fmt.Errorf("exceeded max retry count waiting for device %s UUID to be created", device)
}
<-time.After(time.Duration(sleepTime) * time.Second)
}
// mount the volume
os.MkdirAll(mountPath, 0777)
cmd = fmt.Sprintf("mount %s", device)
if err := executor.ExecuteCommand(cmd, "sudo", "mount", "-o", "user_subvol_rm_allowed",
fmt.Sprintf("/dev/disk/by-uuid/%s", diskUUID), mountPath); err != nil {
return fmt.Errorf("command %s failed: %+v", cmd, err)
}
// chown for the current user since we had to format and mount with sudo
currentUser, err := user.Current()
if err != nil {
log.Printf("unable to find current user: %+v", err)
} else {
cmd = fmt.Sprintf("chown %s", mountPath)
if err := executor.ExecuteCommand(cmd, "sudo", "chown", "-R",
fmt.Sprintf("%s:%s", currentUser.Username, currentUser.Username), mountPath); err != nil {
log.Printf("command %s failed: %+v", cmd, err)
}
}
return nil
}
// looks for an existing OSD data path under the given root
func findOSDDataPath(osdRoot, clusterName string) (string, error) {
var osdDataPath string
fl, err := ioutil.ReadDir(osdRoot)
if err != nil {
return "", fmt.Errorf("failed to read dir %s: %+v", osdRoot, err)
}
p := fmt.Sprintf(`%s-[A-Za-z0-9._-]+`, clusterName)
for _, f := range fl {
if f.IsDir() {
matched, err := regexp.MatchString(p, f.Name())
if err == nil && matched {
osdDataPath = filepath.Join(osdRoot, f.Name())
break
}
}
}
return osdDataPath, nil
}
// creates the OSD identity in the cluster via a mon_command
func createOSD(bootstrapConn *cephd.Conn, osdUUID uuid.UUID) (int, error) {
cmd := "osd create"
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"entity": "client.bootstrap-osd",
"uuid": osdUUID.String(),
})
if err != nil {
return 0, fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
buf, _, err := bootstrapConn.MonCommand(command)
if err != nil {
return 0, fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
var resp map[string]interface{}
err = json.Unmarshal(buf, &resp)
if err != nil {
return 0, fmt.Errorf("failed to unmarshall %s response: %+v. raw response: '%s'", cmd, err, string(buf[:]))
}
return int(resp["osdid"].(float64)), nil
}
// gets the current mon map for the cluster
func getMonMap(bootstrapConn *cephd.Conn) ([]byte, error) {
cmd := "mon getmap"
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"entity": "client.bootstrap-osd",
})
if err != nil {
return nil, fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
buf, _, err := bootstrapConn.MonCommand(command)
if err != nil {
return nil, fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
return buf, nil
}
// creates/initalizes the OSD filesystem and journal via a child process
func createOSDFileSystem(clusterName string, osdID int, osdUUID uuid.UUID, osdDataPath string, monMap []byte) error {
log.Printf("Initializing OSD %d file system at %s...", osdID, osdDataPath)
// the current monmap is needed to create the OSD, save it to a temp location so it is accessible
monMapTmpPath := getOSDTempMonMapPath(osdDataPath)
monMapTmpDir := filepath.Dir(monMapTmpPath)
if err := os.MkdirAll(monMapTmpDir, 0744); err != nil {
return fmt.Errorf("failed to create monmap tmp file directory at %s: %+v", monMapTmpDir, err)
}
if err := ioutil.WriteFile(monMapTmpPath, monMap, 0644); err != nil {
return fmt.Errorf("failed to write mon map to tmp file %s, %+v", monMapTmpPath, err)
}
// create the OSD file system and journal
err := proc.RunChildProcess(
"osd",
"--mkfs",
"--mkkey",
fmt.Sprintf("--id=%s", strconv.Itoa(osdID)),
fmt.Sprintf("--cluster=%s", clusterName),
fmt.Sprintf("--osd-data=%s", osdDataPath),
fmt.Sprintf("--osd-journal=%s", getOSDJournalPath(osdDataPath)),
fmt.Sprintf("--conf=%s", getOSDConfFilePath(osdDataPath, clusterName)),
fmt.Sprintf("--keyring=%s", getOSDKeyringPath(osdDataPath)),
fmt.Sprintf("--osd-uuid=%s", osdUUID.String()),
fmt.Sprintf("--monmap=%s", monMapTmpPath))
if err != nil {
return fmt.Errorf("failed osd mkfs for OSD ID %d, UUID %s, dataDir %s: %+v",
osdID, osdUUID.String(), osdDataPath, err)
}
return nil
}
// add OSD auth privileges for the given OSD ID. the bootstrap-osd privileges are limited and a real OSD needs more.
func addOSDAuth(bootstrapConn *cephd.Conn, osdID int, osdDataPath string) error {
// create a new auth for this OSD
osdKeyringPath := getOSDKeyringPath(osdDataPath)
keyringBuffer, err := ioutil.ReadFile(osdKeyringPath)
if err != nil {
return fmt.Errorf("failed to read OSD keyring at %s, %+v", osdKeyringPath, err)
}
cmd := "auth add"
osdEntity := fmt.Sprintf("osd.%d", osdID)
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"entity": osdEntity,
"caps": []string{"osd", "allow *", "mon", "allow profile osd"},
})
if err != nil {
return fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
_, info, err := bootstrapConn.MonCommandWithInputBuffer(command, keyringBuffer)
if err != nil {
return fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
log.Printf("succeeded %s command for %s. info: %s", cmd, osdEntity, info)
return nil
}
// adds the given OSD to the crush map
func addOSDToCrushMap(osdConn *cephd.Conn, osdID int, osdDataPath string) error {
// get the size of the volume containing the OSD data dir
s := syscall.Statfs_t{}
if err := syscall.Statfs(osdDataPath, &s); err != nil {
return fmt.Errorf("failed to statfs on %s, %+v", osdDataPath, err)
}
all := s.Blocks * uint64(s.Bsize)
// weight is ratio of (size in KB) / (1 GB)
weight := float64(all/1024) / 1073741824.0
weight, _ = strconv.ParseFloat(fmt.Sprintf("%.2f", weight), 64)
osdEntity := fmt.Sprintf("osd.%d", osdID)
log.Printf("OSD %s at %s, bytes: %d, weight: %.2f", osdEntity, osdDataPath, all, weight)
hostName, err := os.Hostname()
if err != nil {
return fmt.Errorf("failed to get hostname, %+v", err)
}
cmd := "osd crush create-or-move"
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"id": osdID,
"weight": weight,
"args": []string{fmt.Sprintf("host=%s", hostName), "root=default"},
})
if err != nil {
return fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
log.Printf("%s command: '%s'", cmd, string(command))
_, info, err := osdConn.MonCommand(command)
if err != nil {
return fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
log.Printf("succeeded adding %s to crush map. info: %s", osdEntity, info)
return nil
}
Query the current user exactly once per process
package castled
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"os/user"
"path/filepath"
"regexp"
"strconv"
"syscall"
"time"
"github.com/google/uuid"
"github.com/quantum/castle/pkg/cephd"
"github.com/quantum/clusterd/pkg/orchestrator"
"github.com/quantum/clusterd/pkg/proc"
)
const (
DevicesValue = "devices"
ForceFormatValue = "forceFormat"
bootstrapOSDKeyringTemplate = `
[client.bootstrap-osd]
key = %s
caps mon = "allow profile bootstrap-osd"
`
)
// request the current user once and stash it in this global variable
var currentUser *user.User
func NewOSDService() *orchestrator.ClusterService {
service := &orchestrator.ClusterService{Name: osdKey}
service.Leader = &osdLeader{}
service.Agent = &osdAgent{}
return service
}
// get the bootstrap OSD root dir
func getBootstrapOSDDir() string {
return "/tmp/bootstrap-osd"
}
// get the full path to the bootstrap OSD keyring
func getBootstrapOSDKeyringPath(clusterName string) string {
return fmt.Sprintf("%s/%s.keyring", getBootstrapOSDDir(), clusterName)
}
// get the full path to the given OSD's config file
func getOSDConfFilePath(osdDataPath, clusterName string) string {
return fmt.Sprintf("%s/%s.config", osdDataPath, clusterName)
}
// get the full path to the given OSD's keyring
func getOSDKeyringPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "keyring")
}
// get the full path to the given OSD's journal
func getOSDJournalPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "journal")
}
// get the full path to the given OSD's temporary mon map
func getOSDTempMonMapPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "tmp", "activate.monmap")
}
// create a keyring for the bootstrap-osd client, it gets a limited set of privileges
func createOSDBootstrapKeyring(conn *cephd.Conn, clusterName string, executor proc.Executor) error {
bootstrapOSDKeyringPath := getBootstrapOSDKeyringPath(clusterName)
if _, err := os.Stat(bootstrapOSDKeyringPath); os.IsNotExist(err) {
// get-or-create-key for client.bootstrap-osd
cmd := "auth get-or-create-key"
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"entity": "client.bootstrap-osd",
"caps": []string{"mon", "allow profile bootstrap-osd"},
})
if err != nil {
return fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
buf, _, err := conn.MonCommand(command)
if err != nil {
return fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
var resp map[string]interface{}
err = json.Unmarshal(buf, &resp)
if err != nil {
return fmt.Errorf("failed to unmarshall %s response: %+v", cmd, err)
}
bootstrapOSDKey := resp["key"].(string)
log.Printf("succeeded %s command, bootstrapOSDKey: %s", cmd, bootstrapOSDKey)
// write the bootstrap-osd keyring to disk
bootstrapOSDKeyringDir := filepath.Dir(bootstrapOSDKeyringPath)
if err := os.MkdirAll(bootstrapOSDKeyringDir, 0744); err != nil {
fmt.Printf("failed to create bootstrap OSD keyring dir at %s: %+v", bootstrapOSDKeyringDir, err)
}
bootstrapOSDKeyring := fmt.Sprintf(bootstrapOSDKeyringTemplate, bootstrapOSDKey)
if err := ioutil.WriteFile(bootstrapOSDKeyringPath, []byte(bootstrapOSDKeyring), 0644); err != nil {
return fmt.Errorf("failed to write bootstrap-osd keyring to %s: %+v", bootstrapOSDKeyringPath, err)
}
}
return nil
}
// format the given device for usage by an OSD
func formatOSD(device string, forceFormat bool, executor proc.Executor) (bool, error) {
// format the current volume
cmd := fmt.Sprintf("blkid %s", device)
devFS, err := executor.ExecuteCommandPipeline(
cmd,
fmt.Sprintf(`blkid /dev/%s | sed -nr 's/^.*TYPE=\"(.*)\"$/\1/p'`, device))
if err != nil {
return false, fmt.Errorf("command %s failed: %+v", cmd, err)
}
if devFS != "" && forceFormat {
// there's a filesystem on the device, but the user has specified to force a format. give a warning about that.
log.Printf("WARNING: device %s already formatted with %s, but forcing a format!!!", device, devFS)
}
if devFS == "" || forceFormat {
// execute the format operation
cmd = fmt.Sprintf("format %s", device)
err = executor.ExecuteCommand(cmd, "sudo", "/usr/sbin/mkfs.btrfs", "-f", "-m", "single", "-n", "32768", fmt.Sprintf("/dev/%s", device))
if err != nil {
return false, fmt.Errorf("command %s failed: %+v", cmd, err)
}
} else {
// disk is already formatted and the user doesn't want to force it, return no error, but also specify that no format was done
log.Printf("device %s already formatted with %s, cannot use for OSD", device, devFS)
return false, nil
}
return true, nil
}
// mount the OSD data directory onto the given device
func mountOSD(device string, mountPath string, executor proc.Executor) error {
cmd := fmt.Sprintf("lsblk %s", device)
var diskUUID string
retryCount := 0
retryMax := 10
sleepTime := 2
for {
// there is lag in between when a filesytem is created and its UUID is available. retry as needed
// until we have a usable UUID for the newly formatted filesystem.
var err error
diskUUID, err = executor.ExecuteCommandWithOutput(cmd, "lsblk", fmt.Sprintf("/dev/%s", device), "-d", "-n", "-r", "-o", "UUID")
if err != nil {
return fmt.Errorf("command %s failed: %+v", cmd, err)
}
if diskUUID != "" {
// we got the UUID from the disk. Verify this UUID is up to date in the /dev/disk/by-uuid dir by
// checking for it multiple times in a row. For an existing device, the device UUID and the
// by-uuid link can take a bit to get updated after getting formatted. Increase our confidence
// that we have the updated UUID by performing this check multiple times in a row.
log.Printf("verifying UUID %s", diskUUID)
uuidCheckOK := true
uuidCheckCount := 0
for uuidCheckCount < 3 {
uuidCheckCount++
if _, err := os.Stat(fmt.Sprintf("/dev/disk/by-uuid/%s", diskUUID)); os.IsNotExist(err) {
// the UUID we got for the disk does not exist under /dev/disk/by-uuid. Retry.
uuidCheckOK = false
break
}
<-time.After(time.Duration(500) * time.Millisecond)
}
if uuidCheckOK {
log.Printf("device %s UUID created: %s", device, diskUUID)
break
}
}
retryCount++
if retryCount > retryMax {
return fmt.Errorf("exceeded max retry count waiting for device %s UUID to be created", device)
}
<-time.After(time.Duration(sleepTime) * time.Second)
}
// mount the volume
os.MkdirAll(mountPath, 0777)
cmd = fmt.Sprintf("mount %s", device)
if err := executor.ExecuteCommand(cmd, "sudo", "mount", "-o", "user_subvol_rm_allowed",
fmt.Sprintf("/dev/disk/by-uuid/%s", diskUUID), mountPath); err != nil {
return fmt.Errorf("command %s failed: %+v", cmd, err)
}
// chown for the current user since we had to format and mount with sudo
if currentUser == nil {
var err error
currentUser, err = user.Current()
if err != nil {
log.Printf("unable to find current user: %+v", err)
return err
}
}
if currentUser != nil {
cmd = fmt.Sprintf("chown %s", mountPath)
if err := executor.ExecuteCommand(cmd, "sudo", "chown", "-R",
fmt.Sprintf("%s:%s", currentUser.Username, currentUser.Username), mountPath); err != nil {
log.Printf("command %s failed: %+v", cmd, err)
}
}
return nil
}
// looks for an existing OSD data path under the given root
func findOSDDataPath(osdRoot, clusterName string) (string, error) {
var osdDataPath string
fl, err := ioutil.ReadDir(osdRoot)
if err != nil {
return "", fmt.Errorf("failed to read dir %s: %+v", osdRoot, err)
}
p := fmt.Sprintf(`%s-[A-Za-z0-9._-]+`, clusterName)
for _, f := range fl {
if f.IsDir() {
matched, err := regexp.MatchString(p, f.Name())
if err == nil && matched {
osdDataPath = filepath.Join(osdRoot, f.Name())
break
}
}
}
return osdDataPath, nil
}
// creates the OSD identity in the cluster via a mon_command
func createOSD(bootstrapConn *cephd.Conn, osdUUID uuid.UUID) (int, error) {
cmd := "osd create"
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"entity": "client.bootstrap-osd",
"uuid": osdUUID.String(),
})
if err != nil {
return 0, fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
buf, _, err := bootstrapConn.MonCommand(command)
if err != nil {
return 0, fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
var resp map[string]interface{}
err = json.Unmarshal(buf, &resp)
if err != nil {
return 0, fmt.Errorf("failed to unmarshall %s response: %+v. raw response: '%s'", cmd, err, string(buf[:]))
}
return int(resp["osdid"].(float64)), nil
}
// gets the current mon map for the cluster
func getMonMap(bootstrapConn *cephd.Conn) ([]byte, error) {
cmd := "mon getmap"
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"entity": "client.bootstrap-osd",
})
if err != nil {
return nil, fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
buf, _, err := bootstrapConn.MonCommand(command)
if err != nil {
return nil, fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
return buf, nil
}
// creates/initalizes the OSD filesystem and journal via a child process
func createOSDFileSystem(clusterName string, osdID int, osdUUID uuid.UUID, osdDataPath string, monMap []byte) error {
log.Printf("Initializing OSD %d file system at %s...", osdID, osdDataPath)
// the current monmap is needed to create the OSD, save it to a temp location so it is accessible
monMapTmpPath := getOSDTempMonMapPath(osdDataPath)
monMapTmpDir := filepath.Dir(monMapTmpPath)
if err := os.MkdirAll(monMapTmpDir, 0744); err != nil {
return fmt.Errorf("failed to create monmap tmp file directory at %s: %+v", monMapTmpDir, err)
}
if err := ioutil.WriteFile(monMapTmpPath, monMap, 0644); err != nil {
return fmt.Errorf("failed to write mon map to tmp file %s, %+v", monMapTmpPath, err)
}
// create the OSD file system and journal
err := proc.RunChildProcess(
"osd",
"--mkfs",
"--mkkey",
fmt.Sprintf("--id=%s", strconv.Itoa(osdID)),
fmt.Sprintf("--cluster=%s", clusterName),
fmt.Sprintf("--osd-data=%s", osdDataPath),
fmt.Sprintf("--osd-journal=%s", getOSDJournalPath(osdDataPath)),
fmt.Sprintf("--conf=%s", getOSDConfFilePath(osdDataPath, clusterName)),
fmt.Sprintf("--keyring=%s", getOSDKeyringPath(osdDataPath)),
fmt.Sprintf("--osd-uuid=%s", osdUUID.String()),
fmt.Sprintf("--monmap=%s", monMapTmpPath))
if err != nil {
return fmt.Errorf("failed osd mkfs for OSD ID %d, UUID %s, dataDir %s: %+v",
osdID, osdUUID.String(), osdDataPath, err)
}
return nil
}
// add OSD auth privileges for the given OSD ID. the bootstrap-osd privileges are limited and a real OSD needs more.
func addOSDAuth(bootstrapConn *cephd.Conn, osdID int, osdDataPath string) error {
// create a new auth for this OSD
osdKeyringPath := getOSDKeyringPath(osdDataPath)
keyringBuffer, err := ioutil.ReadFile(osdKeyringPath)
if err != nil {
return fmt.Errorf("failed to read OSD keyring at %s, %+v", osdKeyringPath, err)
}
cmd := "auth add"
osdEntity := fmt.Sprintf("osd.%d", osdID)
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"entity": osdEntity,
"caps": []string{"osd", "allow *", "mon", "allow profile osd"},
})
if err != nil {
return fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
_, info, err := bootstrapConn.MonCommandWithInputBuffer(command, keyringBuffer)
if err != nil {
return fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
log.Printf("succeeded %s command for %s. info: %s", cmd, osdEntity, info)
return nil
}
// adds the given OSD to the crush map
func addOSDToCrushMap(osdConn *cephd.Conn, osdID int, osdDataPath string) error {
// get the size of the volume containing the OSD data dir
s := syscall.Statfs_t{}
if err := syscall.Statfs(osdDataPath, &s); err != nil {
return fmt.Errorf("failed to statfs on %s, %+v", osdDataPath, err)
}
all := s.Blocks * uint64(s.Bsize)
// weight is ratio of (size in KB) / (1 GB)
weight := float64(all/1024) / 1073741824.0
weight, _ = strconv.ParseFloat(fmt.Sprintf("%.2f", weight), 64)
osdEntity := fmt.Sprintf("osd.%d", osdID)
log.Printf("OSD %s at %s, bytes: %d, weight: %.2f", osdEntity, osdDataPath, all, weight)
hostName, err := os.Hostname()
if err != nil {
return fmt.Errorf("failed to get hostname, %+v", err)
}
cmd := "osd crush create-or-move"
command, err := json.Marshal(map[string]interface{}{
"prefix": cmd,
"format": "json",
"id": osdID,
"weight": weight,
"args": []string{fmt.Sprintf("host=%s", hostName), "root=default"},
})
if err != nil {
return fmt.Errorf("command %s marshall failed: %+v", cmd, err)
}
log.Printf("%s command: '%s'", cmd, string(command))
_, info, err := osdConn.MonCommand(command)
if err != nil {
return fmt.Errorf("mon_command %s failed: %+v", cmd, err)
}
log.Printf("succeeded adding %s to crush map. info: %s", osdEntity, info)
return nil
}
|
package cli
import (
"errors"
"fmt"
"io"
"os"
"github.com/apprenda/kismatic/pkg/data"
"github.com/apprenda/kismatic/pkg/install"
"github.com/apprenda/kismatic/pkg/util"
"github.com/spf13/cobra"
)
type upgradeOpts struct {
generatedAssetsDir string
verbose bool
outputFormat string
skipPreflight bool
online bool
planFile string
restartServices bool
partialAllowed bool
maxParallelWorkers int
dryRun bool
}
// NewCmdUpgrade returns the upgrade command
func NewCmdUpgrade(out io.Writer) *cobra.Command {
var opts upgradeOpts
cmd := &cobra.Command{
Use: "upgrade",
Short: "Upgrade your Kubernetes cluster",
Long: `Upgrade your Kubernetes cluster.
The upgrade process is applied to each node, one node at a time. If a private docker registry
is being used, the new container images will be pushed by Kismatic before starting to upgrade
nodes.
Nodes in the cluster are upgraded in the following order:
1. Etcd nodes
2. Master nodes
3. Worker nodes (regardless of specialization)
`,
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.PersistentFlags().StringVar(&opts.generatedAssetsDir, "generated-assets-dir", "generated", "path to the directory where assets generated during the installation process will be stored")
cmd.PersistentFlags().BoolVar(&opts.verbose, "verbose", false, "enable verbose logging from the installation")
cmd.PersistentFlags().StringVarP(&opts.outputFormat, "output", "o", "simple", "installation output format (options \"simple\"|\"raw\")")
cmd.PersistentFlags().BoolVar(&opts.skipPreflight, "skip-preflight", false, "skip upgrade pre-flight checks")
cmd.PersistentFlags().BoolVar(&opts.restartServices, "restart-services", false, "force restart cluster services (Use with care)")
cmd.PersistentFlags().BoolVar(&opts.partialAllowed, "partial-ok", false, "allow the upgrade of ready nodes, and skip nodes that have been deemed unready for upgrade")
cmd.PersistentFlags().IntVar(&opts.maxParallelWorkers, "max-parallel-workers", 1, "the maximum number of worker nodes to be upgraded in parallel")
cmd.PersistentFlags().BoolVar(&opts.dryRun, "dry-run", false, "simulate the upgrade, but don't actually upgrade the cluster")
addPlanFileFlag(cmd.PersistentFlags(), &opts.planFile)
// Subcommands
cmd.AddCommand(NewCmdUpgradeOffline(out, &opts))
cmd.AddCommand(NewCmdUpgradeOnline(out, &opts))
return cmd
}
// NewCmdUpgradeOffline returns the command for running offline upgrades
func NewCmdUpgradeOffline(out io.Writer, opts *upgradeOpts) *cobra.Command {
cmd := cobra.Command{
Use: "offline",
Short: "Perform an offline upgrade of your Kubernetes cluster",
Long: `Perform an offline upgrade of your Kubernetes cluster.
The offline upgrade is available for those clusters in which safety and availabilty are not a concern.
In this mode, the safety and availability checks will not be performed, nor will the nodes in the cluster
be drained of workloads.
Performing an offline upgrade could result in loss of critical data and reduced service
availability. For this reason, this method should not be used for clusters that are housing
production workloads.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return doUpgrade(out, opts)
},
}
return &cmd
}
// NewCmdUpgradeOnline returns the command for running online upgrades
func NewCmdUpgradeOnline(out io.Writer, opts *upgradeOpts) *cobra.Command {
cmd := cobra.Command{
Use: "online",
Short: "Perform an online upgrade of your Kubernetes cluster",
Long: `Perform an online upgrade of your Kubernetes cluster.
During an online upgrade, Kismatic will run safety and availability checks (see table below) against the
existing cluster before performing the upgrade. If any unsafe condition is detected, a report will
be printed, and the upgrade will not proceed.
If the node under upgrade is a Kubernetes node, it is cordoned and drained of workloads
before any changes are applied.
`,
RunE: func(cmd *cobra.Command, args []string) error {
opts.online = true
return doUpgrade(out, opts)
},
}
return &cmd
}
func doUpgrade(out io.Writer, opts *upgradeOpts) error {
if opts.maxParallelWorkers < 1 {
return fmt.Errorf("maxParallelWorkers-workers must be greater or equal to 1, got: %d", opts.maxParallelWorkers)
}
planFile := opts.planFile
planner := install.FilePlanner{File: planFile}
executorOpts := install.ExecutorOptions{
GeneratedAssetsDirectory: opts.generatedAssetsDir,
RestartServices: opts.restartServices,
OutputFormat: opts.outputFormat,
Verbose: opts.verbose,
DryRun: opts.dryRun,
}
executor, err := install.NewExecutor(out, os.Stderr, executorOpts)
if err != nil {
return err
}
preflightExecOpts := executorOpts
preflightExecOpts.DryRun = false // We always want to run preflight, even if doing a dry-run
preflightExec, err := install.NewPreFlightExecutor(out, os.Stderr, preflightExecOpts)
if err != nil {
return err
}
util.PrintHeader(out, "Computing upgrade plan", '=')
// Read plan file
if !planner.PlanExists() {
util.PrettyPrintErr(out, "Reading plan file")
return fmt.Errorf("plan file %q does not exist", planFile)
}
util.PrettyPrintOk(out, "Reading plan file")
plan, err := planner.Read()
if err != nil {
util.PrettyPrintErr(out, "Reading plan file")
return fmt.Errorf("error reading plan file %q: %v", planFile, err)
}
// Validate SSH connectivity to nodes
if ok, errs := install.ValidatePlanSSHConnections(plan); !ok {
util.PrettyPrintErr(out, "Validate SSH connectivity to nodes")
util.PrintValidationErrors(out, errs)
return fmt.Errorf("SSH connectivity validation errors found")
}
util.PrettyPrintOk(out, "Validate SSH connectivity to nodes")
// Figure out which nodes to upgrade
cv, err := install.ListVersions(plan)
if err != nil {
return fmt.Errorf("error listing cluster versions: %v", err)
}
var toUpgrade []install.ListableNode
var toSkip []install.ListableNode
for _, n := range cv.Nodes {
if install.IsOlderVersion(n.Version) {
toUpgrade = append(toUpgrade, n)
} else {
toSkip = append(toSkip, n)
}
}
// Print the nodes that will be skipped
if len(toSkip) > 0 {
util.PrintHeader(out, "Skipping nodes", '=')
for _, n := range toSkip {
util.PrettyPrintOk(out, "- %q is at the target version %q", n.Node.Host, n.Version)
}
fmt.Fprintln(out)
}
if plan.ConfigureDockerRegistry() && plan.Cluster.DisconnectedInstallation {
util.PrintHeader(out, "Upgrade: Docker Registry", '=')
if err = executor.UpgradeDockerRegistry(*plan); err != nil {
return fmt.Errorf("Failed to upgrade docker registry: %v", err)
}
}
// Print message if there's no work to do
if len(toUpgrade) == 0 {
fmt.Fprintln(out, "All nodes are at the target version. Skipping node upgrades.")
} else {
if err = upgradeNodes(out, *plan, *opts, toUpgrade, executor, preflightExec); err != nil {
return err
}
}
if opts.partialAllowed {
util.PrintColor(out, util.Green, `
Partial upgrade complete.
Cluster level services are still left to upgrade. These can only be upgraded
when performing a full upgrade. When you are ready, you may use "kismatic upgrade"
without the "--partial-ok" flag to perform a full upgrade.
`)
return nil
}
// Upgrade the cluster services
util.PrintHeader(out, "Upgrade: Cluster Services", '=')
if err := executor.UpgradeClusterServices(*plan); err != nil {
return fmt.Errorf("Failed to upgrade cluster services: %v", err)
}
if err := executor.RunSmokeTest(plan); err != nil {
return fmt.Errorf("Smoke test failed: %v", err)
}
if !opts.dryRun {
fmt.Fprintln(out)
util.PrintColor(out, util.Green, "Upgrade complete\n")
fmt.Fprintln(out)
}
return nil
}
func upgradeNodes(out io.Writer, plan install.Plan, opts upgradeOpts, nodesNeedUpgrade []install.ListableNode, executor install.Executor, preflightExec install.PreFlightExecutor) error {
// Run safety checks if doing an online upgrade
unsafeNodes := []install.ListableNode{}
if opts.online {
util.PrintHeader(out, "Validate Online Upgrade", '=')
// Use the first master node for running kubectl
client, err := plan.GetSSHClient(plan.Master.Nodes[0].Host)
if err != nil {
return fmt.Errorf("error getting SSH client: %v", err)
}
kubeClient := data.RemoteKubectl{SSHClient: client}
for _, node := range nodesNeedUpgrade {
util.PrettyPrint(out, "%s %v", node.Node.Host, node.Roles)
errs := install.DetectNodeUpgradeSafety(plan, node.Node, kubeClient)
if len(errs) != 0 {
util.PrintError(out)
fmt.Fprintln(out)
for _, err := range errs {
fmt.Println("-", err.Error())
}
unsafeNodes = append(unsafeNodes, node)
} else {
util.PrintOkln(out)
}
}
// If we found any unsafe nodes, and we are not doing a partial upgrade, exit.
if len(unsafeNodes) > 0 && !opts.partialAllowed {
return errors.New("Unable to perform an online upgrade due to the unsafe conditions detected.")
}
// Block the upgrade if partial is allowed but there is an etcd or master node
// that cannot be upgraded
if opts.partialAllowed {
for _, n := range unsafeNodes {
for _, r := range n.Roles {
if r == "master" || r == "etcd" {
return errors.New("Unable to perform an online upgrade due to the unsafe conditions detected.")
}
}
}
}
}
// Run upgrade preflight on the nodes that are to be upgraded
unreadyNodes := []install.ListableNode{}
if !opts.skipPreflight {
for _, node := range nodesNeedUpgrade {
util.PrintHeader(out, fmt.Sprintf("Preflight Checks: %s %s", node.Node.Host, node.Roles), '=')
if err := preflightExec.RunUpgradePreFlightCheck(&plan, node); err != nil {
// return fmt.Errorf("Upgrade preflight check failed: %v", err)
unreadyNodes = append(unreadyNodes, node)
}
}
}
// Block upgrade if we found unready nodes, and we are not doing a partial upgrade
if len(unreadyNodes) > 0 && !opts.partialAllowed {
return errors.New("Errors found during preflight checks")
}
// Block the upgrade if partial is allowed but there is an etcd or master node
// that cannot be upgraded
if opts.partialAllowed {
for _, n := range unreadyNodes {
for _, r := range n.Roles {
if r == "master" || r == "etcd" {
return errors.New("Errors found during preflight checks")
}
}
}
}
// Filter out the nodes that are unsafe/unready
toUpgrade := []install.ListableNode{}
for _, n := range nodesNeedUpgrade {
upgrade := true
for _, unsafe := range unsafeNodes {
if unsafe.Node == n.Node {
upgrade = false
}
}
for _, unready := range unreadyNodes {
if unready.Node == n.Node {
upgrade = false
}
}
if upgrade {
toUpgrade = append(toUpgrade, n)
}
}
// get all etcd nodes
etcdToUpgrade := install.NodesWithRoles(toUpgrade, "etcd")
// it's safe to upgrade one node etcd cluster from 2.3 to 3.1
// it will always be required for this version because all prior ket versions had a etcd2
if len(etcdToUpgrade) > 1 {
// Run the upgrade on the nodes to Etcd v3.0.x
if err := executor.UpgradeEtcd2Nodes(plan, etcdToUpgrade); err != nil {
return fmt.Errorf("Failed to upgrade etcd2 nodes: %v", err)
}
}
// Run the upgrade on the nodes that need it
if err := executor.UpgradeNodes(plan, toUpgrade, opts.online, opts.maxParallelWorkers); err != nil {
return fmt.Errorf("Failed to upgrade nodes: %v", err)
}
return nil
}
Move parallel workers flag to offline upgrade
package cli
import (
"errors"
"fmt"
"io"
"os"
"github.com/apprenda/kismatic/pkg/data"
"github.com/apprenda/kismatic/pkg/install"
"github.com/apprenda/kismatic/pkg/util"
"github.com/spf13/cobra"
)
type upgradeOpts struct {
generatedAssetsDir string
verbose bool
outputFormat string
skipPreflight bool
online bool
planFile string
restartServices bool
partialAllowed bool
maxParallelWorkers int
dryRun bool
}
// NewCmdUpgrade returns the upgrade command
func NewCmdUpgrade(out io.Writer) *cobra.Command {
var opts upgradeOpts
cmd := &cobra.Command{
Use: "upgrade",
Short: "Upgrade your Kubernetes cluster",
Long: `Upgrade your Kubernetes cluster.
The upgrade process is applied to each node, one node at a time. If a private docker registry
is being used, the new container images will be pushed by Kismatic before starting to upgrade
nodes.
Nodes in the cluster are upgraded in the following order:
1. Etcd nodes
2. Master nodes
3. Worker nodes (regardless of specialization)
`,
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.PersistentFlags().StringVar(&opts.generatedAssetsDir, "generated-assets-dir", "generated", "path to the directory where assets generated during the installation process will be stored")
cmd.PersistentFlags().BoolVar(&opts.verbose, "verbose", false, "enable verbose logging from the installation")
cmd.PersistentFlags().StringVarP(&opts.outputFormat, "output", "o", "simple", "installation output format (options \"simple\"|\"raw\")")
cmd.PersistentFlags().BoolVar(&opts.skipPreflight, "skip-preflight", false, "skip upgrade pre-flight checks")
cmd.PersistentFlags().BoolVar(&opts.restartServices, "restart-services", false, "force restart cluster services (Use with care)")
cmd.PersistentFlags().BoolVar(&opts.partialAllowed, "partial-ok", false, "allow the upgrade of ready nodes, and skip nodes that have been deemed unready for upgrade")
cmd.PersistentFlags().BoolVar(&opts.dryRun, "dry-run", false, "simulate the upgrade, but don't actually upgrade the cluster")
addPlanFileFlag(cmd.PersistentFlags(), &opts.planFile)
// Subcommands
cmd.AddCommand(NewCmdUpgradeOffline(out, &opts))
cmd.AddCommand(NewCmdUpgradeOnline(out, &opts))
return cmd
}
// NewCmdUpgradeOffline returns the command for running offline upgrades
func NewCmdUpgradeOffline(out io.Writer, opts *upgradeOpts) *cobra.Command {
cmd := cobra.Command{
Use: "offline",
Short: "Perform an offline upgrade of your Kubernetes cluster",
Long: `Perform an offline upgrade of your Kubernetes cluster.
The offline upgrade is available for those clusters in which safety and availabilty are not a concern.
In this mode, the safety and availability checks will not be performed, nor will the nodes in the cluster
be drained of workloads.
Performing an offline upgrade could result in loss of critical data and reduced service
availability. For this reason, this method should not be used for clusters that are housing
production workloads.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return doUpgrade(out, opts)
},
}
cmd.Flags().IntVar(&opts.maxParallelWorkers, "max-parallel-workers", 1, "the maximum number of worker nodes to be upgraded in parallel")
return &cmd
}
// NewCmdUpgradeOnline returns the command for running online upgrades
func NewCmdUpgradeOnline(out io.Writer, opts *upgradeOpts) *cobra.Command {
cmd := cobra.Command{
Use: "online",
Short: "Perform an online upgrade of your Kubernetes cluster",
Long: `Perform an online upgrade of your Kubernetes cluster.
During an online upgrade, Kismatic will run safety and availability checks (see table below) against the
existing cluster before performing the upgrade. If any unsafe condition is detected, a report will
be printed, and the upgrade will not proceed.
If the node under upgrade is a Kubernetes node, it is cordoned and drained of workloads
before any changes are applied.
`,
RunE: func(cmd *cobra.Command, args []string) error {
opts.online = true
return doUpgrade(out, opts)
},
}
return &cmd
}
func doUpgrade(out io.Writer, opts *upgradeOpts) error {
if opts.maxParallelWorkers < 1 {
return fmt.Errorf("max-parallel-workers must be greater or equal to 1, got: %d", opts.maxParallelWorkers)
}
planFile := opts.planFile
planner := install.FilePlanner{File: planFile}
executorOpts := install.ExecutorOptions{
GeneratedAssetsDirectory: opts.generatedAssetsDir,
RestartServices: opts.restartServices,
OutputFormat: opts.outputFormat,
Verbose: opts.verbose,
DryRun: opts.dryRun,
}
executor, err := install.NewExecutor(out, os.Stderr, executorOpts)
if err != nil {
return err
}
preflightExecOpts := executorOpts
preflightExecOpts.DryRun = false // We always want to run preflight, even if doing a dry-run
preflightExec, err := install.NewPreFlightExecutor(out, os.Stderr, preflightExecOpts)
if err != nil {
return err
}
util.PrintHeader(out, "Computing upgrade plan", '=')
// Read plan file
if !planner.PlanExists() {
util.PrettyPrintErr(out, "Reading plan file")
return fmt.Errorf("plan file %q does not exist", planFile)
}
util.PrettyPrintOk(out, "Reading plan file")
plan, err := planner.Read()
if err != nil {
util.PrettyPrintErr(out, "Reading plan file")
return fmt.Errorf("error reading plan file %q: %v", planFile, err)
}
// Validate SSH connectivity to nodes
if ok, errs := install.ValidatePlanSSHConnections(plan); !ok {
util.PrettyPrintErr(out, "Validate SSH connectivity to nodes")
util.PrintValidationErrors(out, errs)
return fmt.Errorf("SSH connectivity validation errors found")
}
util.PrettyPrintOk(out, "Validate SSH connectivity to nodes")
// Figure out which nodes to upgrade
cv, err := install.ListVersions(plan)
if err != nil {
return fmt.Errorf("error listing cluster versions: %v", err)
}
var toUpgrade []install.ListableNode
var toSkip []install.ListableNode
for _, n := range cv.Nodes {
if install.IsOlderVersion(n.Version) {
toUpgrade = append(toUpgrade, n)
} else {
toSkip = append(toSkip, n)
}
}
// Print the nodes that will be skipped
if len(toSkip) > 0 {
util.PrintHeader(out, "Skipping nodes", '=')
for _, n := range toSkip {
util.PrettyPrintOk(out, "- %q is at the target version %q", n.Node.Host, n.Version)
}
fmt.Fprintln(out)
}
if plan.ConfigureDockerRegistry() && plan.Cluster.DisconnectedInstallation {
util.PrintHeader(out, "Upgrade: Docker Registry", '=')
if err = executor.UpgradeDockerRegistry(*plan); err != nil {
return fmt.Errorf("Failed to upgrade docker registry: %v", err)
}
}
// Print message if there's no work to do
if len(toUpgrade) == 0 {
fmt.Fprintln(out, "All nodes are at the target version. Skipping node upgrades.")
} else {
if err = upgradeNodes(out, *plan, *opts, toUpgrade, executor, preflightExec); err != nil {
return err
}
}
if opts.partialAllowed {
util.PrintColor(out, util.Green, `
Partial upgrade complete.
Cluster level services are still left to upgrade. These can only be upgraded
when performing a full upgrade. When you are ready, you may use "kismatic upgrade"
without the "--partial-ok" flag to perform a full upgrade.
`)
return nil
}
// Upgrade the cluster services
util.PrintHeader(out, "Upgrade: Cluster Services", '=')
if err := executor.UpgradeClusterServices(*plan); err != nil {
return fmt.Errorf("Failed to upgrade cluster services: %v", err)
}
if err := executor.RunSmokeTest(plan); err != nil {
return fmt.Errorf("Smoke test failed: %v", err)
}
if !opts.dryRun {
fmt.Fprintln(out)
util.PrintColor(out, util.Green, "Upgrade complete\n")
fmt.Fprintln(out)
}
return nil
}
func upgradeNodes(out io.Writer, plan install.Plan, opts upgradeOpts, nodesNeedUpgrade []install.ListableNode, executor install.Executor, preflightExec install.PreFlightExecutor) error {
// Run safety checks if doing an online upgrade
unsafeNodes := []install.ListableNode{}
if opts.online {
util.PrintHeader(out, "Validate Online Upgrade", '=')
// Use the first master node for running kubectl
client, err := plan.GetSSHClient(plan.Master.Nodes[0].Host)
if err != nil {
return fmt.Errorf("error getting SSH client: %v", err)
}
kubeClient := data.RemoteKubectl{SSHClient: client}
for _, node := range nodesNeedUpgrade {
util.PrettyPrint(out, "%s %v", node.Node.Host, node.Roles)
errs := install.DetectNodeUpgradeSafety(plan, node.Node, kubeClient)
if len(errs) != 0 {
util.PrintError(out)
fmt.Fprintln(out)
for _, err := range errs {
fmt.Println("-", err.Error())
}
unsafeNodes = append(unsafeNodes, node)
} else {
util.PrintOkln(out)
}
}
// If we found any unsafe nodes, and we are not doing a partial upgrade, exit.
if len(unsafeNodes) > 0 && !opts.partialAllowed {
return errors.New("Unable to perform an online upgrade due to the unsafe conditions detected.")
}
// Block the upgrade if partial is allowed but there is an etcd or master node
// that cannot be upgraded
if opts.partialAllowed {
for _, n := range unsafeNodes {
for _, r := range n.Roles {
if r == "master" || r == "etcd" {
return errors.New("Unable to perform an online upgrade due to the unsafe conditions detected.")
}
}
}
}
}
// Run upgrade preflight on the nodes that are to be upgraded
unreadyNodes := []install.ListableNode{}
if !opts.skipPreflight {
for _, node := range nodesNeedUpgrade {
util.PrintHeader(out, fmt.Sprintf("Preflight Checks: %s %s", node.Node.Host, node.Roles), '=')
if err := preflightExec.RunUpgradePreFlightCheck(&plan, node); err != nil {
// return fmt.Errorf("Upgrade preflight check failed: %v", err)
unreadyNodes = append(unreadyNodes, node)
}
}
}
// Block upgrade if we found unready nodes, and we are not doing a partial upgrade
if len(unreadyNodes) > 0 && !opts.partialAllowed {
return errors.New("Errors found during preflight checks")
}
// Block the upgrade if partial is allowed but there is an etcd or master node
// that cannot be upgraded
if opts.partialAllowed {
for _, n := range unreadyNodes {
for _, r := range n.Roles {
if r == "master" || r == "etcd" {
return errors.New("Errors found during preflight checks")
}
}
}
}
// Filter out the nodes that are unsafe/unready
toUpgrade := []install.ListableNode{}
for _, n := range nodesNeedUpgrade {
upgrade := true
for _, unsafe := range unsafeNodes {
if unsafe.Node == n.Node {
upgrade = false
}
}
for _, unready := range unreadyNodes {
if unready.Node == n.Node {
upgrade = false
}
}
if upgrade {
toUpgrade = append(toUpgrade, n)
}
}
// get all etcd nodes
etcdToUpgrade := install.NodesWithRoles(toUpgrade, "etcd")
// it's safe to upgrade one node etcd cluster from 2.3 to 3.1
// it will always be required for this version because all prior ket versions had a etcd2
if len(etcdToUpgrade) > 1 {
// Run the upgrade on the nodes to Etcd v3.0.x
if err := executor.UpgradeEtcd2Nodes(plan, etcdToUpgrade); err != nil {
return fmt.Errorf("Failed to upgrade etcd2 nodes: %v", err)
}
}
// Run the upgrade on the nodes that need it
if err := executor.UpgradeNodes(plan, toUpgrade, opts.online, opts.maxParallelWorkers); err != nil {
return fmt.Errorf("Failed to upgrade nodes: %v", err)
}
return nil
}
|
package client
import (
"bytes"
"fmt"
"github.com/Aptomi/aptomi/pkg/config"
"github.com/Aptomi/aptomi/pkg/lang"
"github.com/Aptomi/aptomi/pkg/object"
"github.com/Aptomi/aptomi/pkg/object/codec/yaml"
"github.com/gosuri/uitable"
"io/ioutil"
"net/http"
"time"
)
// Show method retrieves current policy from aptomi and prints it
func Show(cfg *config.Client) error {
catalog := object.NewCatalog().Append(lang.Objects...)
cod := yaml.NewCodec(catalog)
client := &http.Client{
Timeout: 5 * time.Second,
}
req, err := http.NewRequest(http.MethodGet, cfg.API.URL()+"/policy", bytes.NewBuffer([]byte{}))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/yaml")
req.Header.Set("User-Agent", "aptomictl")
fmt.Println("Request:", req)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close() // nolint: errcheck
// todo(slujanov): process response - check status and print returned data
fmt.Println("Response:", resp)
respData, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(fmt.Sprintf("Error while reading bytes from response Body: %s", err))
}
objects, err := cod.UnmarshalOneOrMany(respData)
if err != nil {
panic(fmt.Sprintf("Error while unmarshaling response: %s", err))
}
// todo(slukjanov): pretty print response
table := uitable.New()
table.MaxColWidth = 50
table.AddRow("#", "Namespace", "Kind", "Name", "Generation", "Object")
for idx, obj := range objects {
table.AddRow(idx, obj.GetNamespace(), obj.GetKind(), obj.GetName(), obj.GetGeneration(), obj)
}
fmt.Println(table)
return nil
}
Fix object catalog for policy show command in CLI
package client
import (
"bytes"
"fmt"
"github.com/Aptomi/aptomi/pkg/config"
"github.com/Aptomi/aptomi/pkg/object"
"github.com/Aptomi/aptomi/pkg/object/codec/yaml"
"github.com/Aptomi/aptomi/pkg/server/store"
"github.com/gosuri/uitable"
"io/ioutil"
"net/http"
"time"
)
// Show method retrieves current policy from aptomi and prints it
func Show(cfg *config.Client) error {
catalog := object.NewCatalog().Append(store.PolicyDataObject)
cod := yaml.NewCodec(catalog)
client := &http.Client{
Timeout: 5 * time.Second,
}
req, err := http.NewRequest(http.MethodGet, cfg.API.URL()+"/policy", bytes.NewBuffer([]byte{}))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/yaml")
req.Header.Set("User-Agent", "aptomictl")
fmt.Println("Request:", req)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close() // nolint: errcheck
// todo(slujanov): process response - check status and print returned data
fmt.Println("Response:", resp)
respData, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(fmt.Sprintf("Error while reading bytes from response Body: %s", err))
}
objects, err := cod.UnmarshalOneOrMany(respData)
if err != nil {
panic(fmt.Sprintf("Error while unmarshaling response: %s", err))
}
// todo(slukjanov): pretty print response
table := uitable.New()
table.MaxColWidth = 50
table.AddRow("#", "Namespace", "Kind", "Name", "Generation", "Object")
for idx, obj := range objects {
table.AddRow(idx, obj.GetNamespace(), obj.GetKind(), obj.GetName(), obj.GetGeneration(), obj)
}
fmt.Println(table)
return nil
}
|
/*
* Copyright 2018 The original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core
import (
"net/url"
"fmt"
"io/ioutil"
"net/http"
"bytes"
"time"
"errors"
)
type SystemInstallOptions struct {
NodePort bool
}
func (kc *kubectlClient) SystemInstall(options SystemInstallOptions) error {
istioRelease := "https://storage.googleapis.com/riff-releases/istio-riff-0.1.0.yaml"
servingRelease := "https://storage.googleapis.com/riff-releases/release-no-mon-riff-0.1.0.yaml"
eventingRelease := "https://storage.googleapis.com/riff-releases/release-eventing-riff-0.1.0.yaml"
stubBusRelease := "https://storage.googleapis.com/riff-releases/release-eventing-clusterbus-stub-riff-0.1.0.yaml"
istioUrl, err := resolveReleaseURLs(istioRelease)
if err != nil {
return err
}
print("Installing Istio: ", istioUrl.String(), "\n")
istioYaml, err := loadRelease(istioUrl)
if err != nil {
return err
}
if options.NodePort {
istioYaml = bytes.Replace(istioYaml, []byte("LoadBalancer"), []byte("NodePort"), -1)
}
istioLog, err := kc.kubeCtl.ExecStdin([]string{"apply", "-f", "-"}, &istioYaml)
if err != nil {
print(istioLog, "\n")
return err
}
print("Istio for riff installed\n", "\n")
err = waitForIstioSidecarInjector(kc)
if err != nil {
return err
}
servingUrl, err := resolveReleaseURLs(servingRelease)
if err != nil {
return err
}
print("Installing Knative Serving: ", servingUrl.String(), "\n")
servingYaml, err := loadRelease(servingUrl)
if err != nil {
return err
}
if options.NodePort {
servingYaml = bytes.Replace(servingYaml, []byte("LoadBalancer"), []byte("NodePort"), -1)
}
servingLog, err := kc.kubeCtl.ExecStdin([]string{"apply", "-f", "-"}, &servingYaml)
if err != nil {
print(servingLog, "\n")
return err
}
print("Knative Serving for riff installed\n", "\n")
eventingUrl, err := resolveReleaseURLs(eventingRelease)
if err != nil {
return err
}
print("Installing Knative Eventing: ", eventingUrl.String(), "\n")
eventingLog, err := kc.kubeCtl.Exec([]string{"apply", "-f", eventingUrl.String()})
if err != nil {
print(eventingLog, "\n")
return err
}
print("Knative Eventing for riff installed\n", "\n")
busUrl, err := resolveReleaseURLs(stubBusRelease)
if err != nil {
return err
}
print("Applying Stub ClusterBus resource: ", busUrl.String(), "\n")
busLog, err := kc.kubeCtl.Exec([]string{"apply", "-f", busUrl.String()})
print(busLog, "\n")
if err != nil {
return err
}
print("riff system install is complete\n", "\n")
return nil
}
func resolveReleaseURLs(filename string) (url.URL, error) {
u, err := url.Parse(filename)
if err != nil {
return url.URL{}, err
}
if u.Scheme == "http" || u.Scheme == "https" {
return *u, nil
}
return *u, fmt.Errorf("Filename must be file, http or https, got %s", u.Scheme)
}
func loadRelease(url url.URL) ([]byte, error) {
resp, err := http.Get(url.String())
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func waitForIstioSidecarInjector(kc *kubectlClient) error {
print("Waiting for istio-sidecar-injector to start ")
for i := 0; i < 36; i++ {
print(".")
injectorStatus, err := kc.kubeCtl.Exec([]string{"get", "pod", "-n", "istio-system", "-l", "istio=sidecar-injector", "-o", "jsonpath='{.items[0].status.phase}'"})
if err != nil {
return err
}
if injectorStatus == "'Error'" {
return errors.New("istio-sidecar-injector pod failed to start")
}
if injectorStatus == "'Running'" {
print("\n\n", injectorStatus)
return nil
}
time.Sleep(10 * time.Second) // wait for it to start
}
print("\n\n")
return errors.New("istio-sidecar-injector pod did not start in time")
}
Fix newline in wait loop
/*
* Copyright 2018 The original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"time"
)
type SystemInstallOptions struct {
NodePort bool
}
func (kc *kubectlClient) SystemInstall(options SystemInstallOptions) error {
istioRelease := "https://storage.googleapis.com/riff-releases/istio-riff-0.1.0.yaml"
servingRelease := "https://storage.googleapis.com/riff-releases/release-no-mon-riff-0.1.0.yaml"
eventingRelease := "https://storage.googleapis.com/riff-releases/release-eventing-riff-0.1.0.yaml"
stubBusRelease := "https://storage.googleapis.com/riff-releases/release-eventing-clusterbus-stub-riff-0.1.0.yaml"
istioUrl, err := resolveReleaseURLs(istioRelease)
if err != nil {
return err
}
print("Installing Istio: ", istioUrl.String(), "\n")
istioYaml, err := loadRelease(istioUrl)
if err != nil {
return err
}
if options.NodePort {
istioYaml = bytes.Replace(istioYaml, []byte("LoadBalancer"), []byte("NodePort"), -1)
}
istioLog, err := kc.kubeCtl.ExecStdin([]string{"apply", "-f", "-"}, &istioYaml)
if err != nil {
print(istioLog, "\n")
return err
}
print("Istio for riff installed\n", "\n")
err = waitForIstioSidecarInjector(kc)
if err != nil {
return err
}
servingUrl, err := resolveReleaseURLs(servingRelease)
if err != nil {
return err
}
print("Installing Knative Serving: ", servingUrl.String(), "\n")
servingYaml, err := loadRelease(servingUrl)
if err != nil {
return err
}
if options.NodePort {
servingYaml = bytes.Replace(servingYaml, []byte("LoadBalancer"), []byte("NodePort"), -1)
}
servingLog, err := kc.kubeCtl.ExecStdin([]string{"apply", "-f", "-"}, &servingYaml)
if err != nil {
print(servingLog, "\n")
return err
}
print("Knative Serving for riff installed\n", "\n")
eventingUrl, err := resolveReleaseURLs(eventingRelease)
if err != nil {
return err
}
print("Installing Knative Eventing: ", eventingUrl.String(), "\n")
eventingLog, err := kc.kubeCtl.Exec([]string{"apply", "-f", eventingUrl.String()})
if err != nil {
print(eventingLog, "\n")
return err
}
print("Knative Eventing for riff installed\n", "\n")
busUrl, err := resolveReleaseURLs(stubBusRelease)
if err != nil {
return err
}
print("Applying Stub ClusterBus resource: ", busUrl.String(), "\n")
busLog, err := kc.kubeCtl.Exec([]string{"apply", "-f", busUrl.String()})
print(busLog, "\n")
if err != nil {
return err
}
print("riff system install is complete\n", "\n")
return nil
}
func resolveReleaseURLs(filename string) (url.URL, error) {
u, err := url.Parse(filename)
if err != nil {
return url.URL{}, err
}
if u.Scheme == "http" || u.Scheme == "https" {
return *u, nil
}
return *u, fmt.Errorf("Filename must be file, http or https, got %s", u.Scheme)
}
func loadRelease(url url.URL) ([]byte, error) {
resp, err := http.Get(url.String())
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func waitForIstioSidecarInjector(kc *kubectlClient) error {
print("Waiting for istio-sidecar-injector to start ")
for i := 0; i < 36; i++ {
print(".")
injectorStatus, err := kc.kubeCtl.Exec([]string{"get", "pod", "-n", "istio-system", "-l", "istio=sidecar-injector", "-o", "jsonpath='{.items[0].status.phase}'"})
if err != nil {
return err
}
if injectorStatus == "'Error'" {
return errors.New("istio-sidecar-injector pod failed to start")
}
if injectorStatus == "'Running'" {
print(injectorStatus, "\n\n")
return nil
}
time.Sleep(10 * time.Second) // wait for it to start
}
print("\n\n")
return errors.New("istio-sidecar-injector pod did not start in time")
}
|
/*
* Copyright 2018 The original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core
import (
"bufio"
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/projectriff/riff/pkg/env"
"github.com/projectriff/riff/pkg/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const istioNamespace = "istio-system"
type SystemInstallOptions struct {
Manifest string
NodePort bool
Force bool
}
type SystemUninstallOptions struct {
Istio bool
Force bool
}
var (
knativeNamespaces = []string{"knative-eventing", "knative-serving", "knative-build", "knative-monitoring"}
allNameSpaces = append(knativeNamespaces, istioNamespace)
)
func (kc *kubectlClient) SystemInstall(manifests map[string]*Manifest, options SystemInstallOptions) (bool, error) {
manifest, err := ResolveManifest(manifests, options.Manifest)
if err != nil {
return false, err
}
err = ensureNotTerminating(kc, allNameSpaces, "Please try again later.")
if err != nil {
return false, err
}
istioStatus, err := getNamespaceStatus(kc, istioNamespace)
if istioStatus == "'NotFound'" {
fmt.Print("Installing Istio components\n")
for i, release := range manifest.Istio {
if i > 0 {
time.Sleep(5 * time.Second) // wait for previous resources to be created
}
err = kc.applyReleaseWithRetry(release, options)
if err != nil {
return false, err
}
}
fmt.Print("Istio components installed\n\n")
} else {
if !options.Force {
answer, err := confirm("Istio is already installed, do you want to install the Knative components for riff?")
if err != nil {
return false, err
}
if !answer {
return false, nil
}
}
}
err = waitForIstioComponents(kc)
if err != nil {
return false, err
}
fmt.Print("Installing Knative components\n")
for _, release := range manifest.Knative {
err = kc.applyReleaseWithRetry(release, options)
if err != nil {
return false, err
}
}
fmt.Print("Knative components installed\n\n")
return true, nil
}
func (kc *kubectlClient) applyReleaseWithRetry(release string, options SystemInstallOptions) error {
err := kc.applyRelease(release, options)
if err != nil {
fmt.Printf("Error applying resources, trying again\n")
return kc.applyRelease(release, options)
}
return nil
}
func (kc *kubectlClient) applyRelease(release string, options SystemInstallOptions) error {
yaml, err := resource.Load(release, filepath.Dir(options.Manifest))
if err != nil {
return err
}
if options.NodePort {
yaml = bytes.Replace(yaml, []byte("type: LoadBalancer"), []byte("type: NodePort"), -1)
}
fmt.Printf("Applying resources defined in: %s\n", release)
istioLog, err := kc.kubeCtl.ExecStdin([]string{"apply", "-f", "-"}, &yaml)
if err != nil {
fmt.Printf("%s\n", istioLog)
if strings.Contains(istioLog, "forbidden") {
fmt.Print(`It looks like you don't have cluster-admin permissions.
To fix this you need to:
1. Delete the current failed installation using:
` + env.Cli.Name + ` system uninstall --istio --force
2. Give the user account used for installation cluster-admin permissions, you can use the following command:
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole=cluster-admin \
--user=<install-user>
3. Re-install ` + env.Cli.Name + `
`)
}
return err
}
return nil
}
func (kc *kubectlClient) SystemUninstall(options SystemUninstallOptions) (bool, error) {
err := ensureNotTerminating(kc, allNameSpaces, "This would indicate that the system was already uninstalled.")
if err != nil {
return false, err
}
knativeNsCount, err := checkNamespacesExists(kc, knativeNamespaces)
istioNsCount, err := checkNamespacesExists(kc, []string{istioNamespace})
if err != nil {
return false, err
}
if knativeNsCount == 0 {
fmt.Print("No Knative components for " + env.Cli.Name + " found\n")
} else {
if !options.Force {
answer, err := confirm("Are you sure you want to uninstall the " + env.Cli.Name + " system?")
if err != nil {
return false, err
}
if !answer {
return false, nil
}
}
fmt.Print("Removing Knative for " + env.Cli.Name + " components\n")
err = deleteCrds(kc, "knative.dev")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "knative-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "build-controller-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "eventing-controller-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "clusterbus-controller-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrole", "knative-")
if err != nil {
return false, err
}
err = deleteNamespaces(kc, knativeNamespaces)
if err != nil {
return false, err
}
}
if istioNsCount == 0 {
fmt.Print("No Istio components found\n")
} else {
if !options.Istio {
if options.Force {
return true, nil
}
answer, err := confirm("Do you also want to uninstall Istio components?")
if err != nil {
return false, err
}
if !answer {
return false, nil
}
}
fmt.Print("Removing Istio components\n")
err = deleteCrds(kc, "istio.io")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "istio-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrole", "istio-")
if err != nil {
return false, err
}
err = deleteNamespaces(kc, []string{istioNamespace})
if err != nil {
return false, err
}
// TODO: remove this once https://github.com/knative/serving/issues/2018 is resolved
deleteSingleResource(kc, "horizontalpodautoscaler.autoscaling", "istio-pilot")
}
return true, nil
}
func waitForIstioComponents(kc *kubectlClient) error {
fmt.Print("Waiting for the Istio components to start ")
for i := 0; i < 36; i++ {
fmt.Print(".")
pods := kc.kubeClient.CoreV1().Pods(istioNamespace)
podList, err := pods.List(metav1.ListOptions{})
if err != nil {
return err
}
waitLonger := false
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, "istio-") {
continue
}
if pod.Status.Phase != "Running" && pod.Status.Phase != "Succeeded" {
waitLonger = true
break
} else {
if pod.Status.Phase == "Running" {
containers := pod.Status.ContainerStatuses
for _, cont := range containers {
if !cont.Ready {
waitLonger = true
break
}
}
}
}
}
if !waitLonger {
fmt.Print(" all components are 'Running'\n\n")
return nil
}
time.Sleep(10 * time.Second) // wait for them to start
}
return errors.New("the Istio components did not start in time")
}
func deleteNamespaces(kc *kubectlClient, namespaces []string) error {
for _, namespace := range namespaces {
fmt.Printf("Deleting resources defined in: %s\n", namespace)
deleteLog, err := kc.kubeCtl.Exec([]string{"delete", "namespace", namespace})
if err != nil {
if strings.Contains(deleteLog, "NotFound") {
fmt.Printf("Namespace \"%s\" was not found\n", namespace)
} else {
fmt.Printf("%s", deleteLog)
}
}
}
return nil
}
func deleteSingleResource(kc *kubectlClient, resourceType string, name string) error {
fmt.Printf("Deleting %s/%s resource\n", resourceType, name)
deleteLog, err := kc.kubeCtl.Exec([]string{"delete", resourceType, name})
if err != nil {
if !strings.Contains(deleteLog, "NotFound") {
fmt.Printf("%s", deleteLog)
}
}
return err
}
func deleteClusterResources(kc *kubectlClient, resourceType string, prefix string) error {
fmt.Printf("Deleting %ss prefixed with %s\n", resourceType, prefix)
resourceList, err := kc.kubeCtl.Exec([]string{"get", resourceType, "-ocustom-columns=name:metadata.name"})
if err != nil {
return err
}
resource := strings.Split(string(resourceList), "\n")
var resourcesToDelete []string
for _, resource := range resource {
if strings.HasPrefix(resource, prefix) {
resourcesToDelete = append(resourcesToDelete, resource)
}
}
if len(resourcesToDelete) > 0 {
resourceLog, err := kc.kubeCtl.Exec(append([]string{"delete", resourceType}, resourcesToDelete...))
if err != nil {
fmt.Printf("%s", resourceLog)
return err
}
}
return nil
}
func deleteCrds(kc *kubectlClient, suffix string) error {
fmt.Printf("Deleting CRDs for %s\n", suffix)
crdList, err := kc.kubeCtl.Exec([]string{"get", "customresourcedefinitions", "-ocustom-columns=name:metadata.name"})
if err != nil {
return err
}
crds := strings.Split(string(crdList), "\n")
var crdsToDelete []string
for _, crd := range crds {
if strings.HasSuffix(crd, suffix) {
crdsToDelete = append(crdsToDelete, crd)
}
}
if len(crdsToDelete) > 0 {
crdLog, err := kc.kubeCtl.Exec(append([]string{"delete", "customresourcedefinition"}, crdsToDelete...))
if err != nil {
fmt.Printf("%s", crdLog)
return err
}
}
return nil
}
func checkNamespacesExists(kc *kubectlClient, names []string) (int, error) {
count := 0
for _, name := range names {
status, err := getNamespaceStatus(kc, name)
if err != nil {
return count, err
}
if status != "'NotFound'" {
count = +1
}
}
return count, nil
}
func ensureNotTerminating(kc *kubectlClient, names []string, message string) error {
for _, name := range names {
status, err := getNamespaceStatus(kc, name)
if err != nil {
return err
}
if status == "'Terminating'" {
return errors.New(fmt.Sprintf("The %s namespace is currently 'Terminating'. %s", name, message))
}
}
return nil
}
func getNamespaceStatus(kc *kubectlClient, name string) (string, error) {
nsLog, err := kc.kubeCtl.Exec([]string{"get", "namespace", name, "-o", "jsonpath='{.status.phase}'"})
if err != nil {
if strings.Contains(nsLog, "NotFound") {
return "'NotFound'", nil
}
return "", err
}
return nsLog, nil
}
func confirm(s string) (bool, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Printf("%s [y/N]: ", s)
res, err := reader.ReadString('\n')
if err != nil {
return false, err
}
if len(res) < 2 {
return false, nil
}
answer := strings.ToLower(strings.TrimSpace(res))[0] == 'y'
return answer, nil
}
Add 5 sec sleep before retrying applying resources
/*
* Copyright 2018 The original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core
import (
"bufio"
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/projectriff/riff/pkg/env"
"github.com/projectriff/riff/pkg/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const istioNamespace = "istio-system"
type SystemInstallOptions struct {
Manifest string
NodePort bool
Force bool
}
type SystemUninstallOptions struct {
Istio bool
Force bool
}
var (
knativeNamespaces = []string{"knative-eventing", "knative-serving", "knative-build", "knative-monitoring"}
allNameSpaces = append(knativeNamespaces, istioNamespace)
)
func (kc *kubectlClient) SystemInstall(manifests map[string]*Manifest, options SystemInstallOptions) (bool, error) {
manifest, err := ResolveManifest(manifests, options.Manifest)
if err != nil {
return false, err
}
err = ensureNotTerminating(kc, allNameSpaces, "Please try again later.")
if err != nil {
return false, err
}
istioStatus, err := getNamespaceStatus(kc, istioNamespace)
if istioStatus == "'NotFound'" {
fmt.Print("Installing Istio components\n")
for i, release := range manifest.Istio {
if i > 0 {
time.Sleep(5 * time.Second) // wait for previous resources to be created
}
err = kc.applyReleaseWithRetry(release, options)
if err != nil {
return false, err
}
}
fmt.Print("Istio components installed\n\n")
} else {
if !options.Force {
answer, err := confirm("Istio is already installed, do you want to install the Knative components for riff?")
if err != nil {
return false, err
}
if !answer {
return false, nil
}
}
}
err = waitForIstioComponents(kc)
if err != nil {
return false, err
}
fmt.Print("Installing Knative components\n")
for _, release := range manifest.Knative {
err = kc.applyReleaseWithRetry(release, options)
if err != nil {
return false, err
}
}
fmt.Print("Knative components installed\n\n")
return true, nil
}
func (kc *kubectlClient) applyReleaseWithRetry(release string, options SystemInstallOptions) error {
err := kc.applyRelease(release, options)
if err != nil {
fmt.Printf("Error applying resources, trying again\n")
time.Sleep(5 * time.Second) // wait for previous resources to be created
return kc.applyRelease(release, options)
}
return nil
}
func (kc *kubectlClient) applyRelease(release string, options SystemInstallOptions) error {
yaml, err := resource.Load(release, filepath.Dir(options.Manifest))
if err != nil {
return err
}
if options.NodePort {
yaml = bytes.Replace(yaml, []byte("type: LoadBalancer"), []byte("type: NodePort"), -1)
}
fmt.Printf("Applying resources defined in: %s\n", release)
istioLog, err := kc.kubeCtl.ExecStdin([]string{"apply", "-f", "-"}, &yaml)
if err != nil {
fmt.Printf("%s\n", istioLog)
if strings.Contains(istioLog, "forbidden") {
fmt.Print(`It looks like you don't have cluster-admin permissions.
To fix this you need to:
1. Delete the current failed installation using:
` + env.Cli.Name + ` system uninstall --istio --force
2. Give the user account used for installation cluster-admin permissions, you can use the following command:
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole=cluster-admin \
--user=<install-user>
3. Re-install ` + env.Cli.Name + `
`)
}
return err
}
return nil
}
func (kc *kubectlClient) SystemUninstall(options SystemUninstallOptions) (bool, error) {
err := ensureNotTerminating(kc, allNameSpaces, "This would indicate that the system was already uninstalled.")
if err != nil {
return false, err
}
knativeNsCount, err := checkNamespacesExists(kc, knativeNamespaces)
istioNsCount, err := checkNamespacesExists(kc, []string{istioNamespace})
if err != nil {
return false, err
}
if knativeNsCount == 0 {
fmt.Print("No Knative components for " + env.Cli.Name + " found\n")
} else {
if !options.Force {
answer, err := confirm("Are you sure you want to uninstall the " + env.Cli.Name + " system?")
if err != nil {
return false, err
}
if !answer {
return false, nil
}
}
fmt.Print("Removing Knative for " + env.Cli.Name + " components\n")
err = deleteCrds(kc, "knative.dev")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "knative-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "build-controller-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "eventing-controller-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "clusterbus-controller-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrole", "knative-")
if err != nil {
return false, err
}
err = deleteNamespaces(kc, knativeNamespaces)
if err != nil {
return false, err
}
}
if istioNsCount == 0 {
fmt.Print("No Istio components found\n")
} else {
if !options.Istio {
if options.Force {
return true, nil
}
answer, err := confirm("Do you also want to uninstall Istio components?")
if err != nil {
return false, err
}
if !answer {
return false, nil
}
}
fmt.Print("Removing Istio components\n")
err = deleteCrds(kc, "istio.io")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrolebinding", "istio-")
if err != nil {
return false, err
}
err = deleteClusterResources(kc, "clusterrole", "istio-")
if err != nil {
return false, err
}
err = deleteNamespaces(kc, []string{istioNamespace})
if err != nil {
return false, err
}
// TODO: remove this once https://github.com/knative/serving/issues/2018 is resolved
deleteSingleResource(kc, "horizontalpodautoscaler.autoscaling", "istio-pilot")
}
return true, nil
}
func waitForIstioComponents(kc *kubectlClient) error {
fmt.Print("Waiting for the Istio components to start ")
for i := 0; i < 36; i++ {
fmt.Print(".")
pods := kc.kubeClient.CoreV1().Pods(istioNamespace)
podList, err := pods.List(metav1.ListOptions{})
if err != nil {
return err
}
waitLonger := false
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, "istio-") {
continue
}
if pod.Status.Phase != "Running" && pod.Status.Phase != "Succeeded" {
waitLonger = true
break
} else {
if pod.Status.Phase == "Running" {
containers := pod.Status.ContainerStatuses
for _, cont := range containers {
if !cont.Ready {
waitLonger = true
break
}
}
}
}
}
if !waitLonger {
fmt.Print(" all components are 'Running'\n\n")
return nil
}
time.Sleep(10 * time.Second) // wait for them to start
}
return errors.New("the Istio components did not start in time")
}
func deleteNamespaces(kc *kubectlClient, namespaces []string) error {
for _, namespace := range namespaces {
fmt.Printf("Deleting resources defined in: %s\n", namespace)
deleteLog, err := kc.kubeCtl.Exec([]string{"delete", "namespace", namespace})
if err != nil {
if strings.Contains(deleteLog, "NotFound") {
fmt.Printf("Namespace \"%s\" was not found\n", namespace)
} else {
fmt.Printf("%s", deleteLog)
}
}
}
return nil
}
func deleteSingleResource(kc *kubectlClient, resourceType string, name string) error {
fmt.Printf("Deleting %s/%s resource\n", resourceType, name)
deleteLog, err := kc.kubeCtl.Exec([]string{"delete", resourceType, name})
if err != nil {
if !strings.Contains(deleteLog, "NotFound") {
fmt.Printf("%s", deleteLog)
}
}
return err
}
func deleteClusterResources(kc *kubectlClient, resourceType string, prefix string) error {
fmt.Printf("Deleting %ss prefixed with %s\n", resourceType, prefix)
resourceList, err := kc.kubeCtl.Exec([]string{"get", resourceType, "-ocustom-columns=name:metadata.name"})
if err != nil {
return err
}
resource := strings.Split(string(resourceList), "\n")
var resourcesToDelete []string
for _, resource := range resource {
if strings.HasPrefix(resource, prefix) {
resourcesToDelete = append(resourcesToDelete, resource)
}
}
if len(resourcesToDelete) > 0 {
resourceLog, err := kc.kubeCtl.Exec(append([]string{"delete", resourceType}, resourcesToDelete...))
if err != nil {
fmt.Printf("%s", resourceLog)
return err
}
}
return nil
}
func deleteCrds(kc *kubectlClient, suffix string) error {
fmt.Printf("Deleting CRDs for %s\n", suffix)
crdList, err := kc.kubeCtl.Exec([]string{"get", "customresourcedefinitions", "-ocustom-columns=name:metadata.name"})
if err != nil {
return err
}
crds := strings.Split(string(crdList), "\n")
var crdsToDelete []string
for _, crd := range crds {
if strings.HasSuffix(crd, suffix) {
crdsToDelete = append(crdsToDelete, crd)
}
}
if len(crdsToDelete) > 0 {
crdLog, err := kc.kubeCtl.Exec(append([]string{"delete", "customresourcedefinition"}, crdsToDelete...))
if err != nil {
fmt.Printf("%s", crdLog)
return err
}
}
return nil
}
func checkNamespacesExists(kc *kubectlClient, names []string) (int, error) {
count := 0
for _, name := range names {
status, err := getNamespaceStatus(kc, name)
if err != nil {
return count, err
}
if status != "'NotFound'" {
count = +1
}
}
return count, nil
}
func ensureNotTerminating(kc *kubectlClient, names []string, message string) error {
for _, name := range names {
status, err := getNamespaceStatus(kc, name)
if err != nil {
return err
}
if status == "'Terminating'" {
return errors.New(fmt.Sprintf("The %s namespace is currently 'Terminating'. %s", name, message))
}
}
return nil
}
func getNamespaceStatus(kc *kubectlClient, name string) (string, error) {
nsLog, err := kc.kubeCtl.Exec([]string{"get", "namespace", name, "-o", "jsonpath='{.status.phase}'"})
if err != nil {
if strings.Contains(nsLog, "NotFound") {
return "'NotFound'", nil
}
return "", err
}
return nsLog, nil
}
func confirm(s string) (bool, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Printf("%s [y/N]: ", s)
res, err := reader.ReadString('\n')
if err != nil {
return false, err
}
if len(res) < 2 {
return false, nil
}
answer := strings.ToLower(strings.TrimSpace(res))[0] == 'y'
return answer, nil
}
|
package domain
import (
"errors"
"io/ioutil"
"net/http"
"github.com/vardius/go-api-boilerplate/pkg/http/response"
"github.com/vardius/gorouter"
)
// ErrEmptyRequestBody is when an request has empty body.
var ErrEmptyRequestBody = errors.New("Empty request body")
// ErrInvalidURLParams is when an request has invalid or missing parameters.
var ErrInvalidURLParams = errors.New("Invalid request URL params")
type dispatcher struct {
commandBus CommandBus
}
func (d *dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var e error
if r.Body == nil {
r.WithContext(response.WithPayload(r, response.HTTPError{http.StatusBadRequest, ErrEmptyRequestBody, "Empty request body"}))
return
}
params, ok := gorouter.FromContext(r.Context())
if !ok {
r.WithContext(response.WithPayload(r, response.HTTPError{http.StatusBadRequest, ErrInvalidURLParams, "Invalid URL params"}))
return
}
defer r.Body.Close()
body, e := ioutil.ReadAll(r.Body)
if e != nil {
r.WithContext(response.WithPayload(r, response.HTTPError{http.StatusBadRequest, e, "Invalid request body"}))
return
}
out := make(chan error)
defer close(out)
go func() {
d.commandBus.Publish(
r.Context(),
params.Value("domain")+params.Value("command"),
body,
out,
)
}()
if e = <-out; e != nil {
r.WithContext(response.WithPayload(r, response.HTTPError{http.StatusBadRequest, e, "Invalid request"}))
return
}
w.WriteHeader(http.StatusCreated)
return
}
// NewDispatcher creates handler for command bus
func NewDispatcher(cb CommandBus) http.Handler {
return &dispatcher{cb}
}
Fix composite literals
package domain
import (
"errors"
"io/ioutil"
"net/http"
"github.com/vardius/go-api-boilerplate/pkg/http/response"
"github.com/vardius/gorouter"
)
// ErrEmptyRequestBody is when an request has empty body.
var ErrEmptyRequestBody = errors.New("Empty request body")
// ErrInvalidURLParams is when an request has invalid or missing parameters.
var ErrInvalidURLParams = errors.New("Invalid request URL params")
type dispatcher struct {
commandBus CommandBus
}
func (d *dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var e error
if r.Body == nil {
r.WithContext(response.WithPayload(r, response.HTTPError{
Code: http.StatusBadRequest,
Error: ErrEmptyRequestBody,
Message: ErrEmptyRequestBody.Error,
}))
return
}
params, ok := gorouter.FromContext(r.Context())
if !ok {
r.WithContext(response.WithPayload(r, response.HTTPError{
Code: http.StatusBadRequest,
Error: ErrInvalidURLParams,
Message: ErrInvalidURLParams.Error,
}))
return
}
defer r.Body.Close()
body, e := ioutil.ReadAll(r.Body)
if e != nil {
r.WithContext(response.WithPayload(r, response.HTTPError{
Code: http.StatusBadRequest,
Error: e,
Message: "Invalid request body",
}))
return
}
out := make(chan error)
defer close(out)
go func() {
d.commandBus.Publish(
r.Context(),
params.Value("domain")+params.Value("command"),
body,
out,
)
}()
if e = <-out; e != nil {
r.WithContext(response.WithPayload(r, response.HTTPError{
Code: http.StatusBadRequest,
Error: e,
Message: "Invalid request",
}))
return
}
w.WriteHeader(http.StatusCreated)
return
}
// NewDispatcher creates handler for command bus
func NewDispatcher(cb CommandBus) http.Handler {
return &dispatcher{cb}
}
|
package install
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/apprenda/kismatic-platform/pkg/tls"
"github.com/cloudflare/cfssl/csr"
)
// The PKI provides a way for generating certificates for the cluster described by the Plan
type PKI interface {
GenerateClusterCerts(p *Plan) error
}
// LocalPKI is a file-based PKI
type LocalPKI struct {
CACsr string
CAConfigFile string
CASigningProfile string
DestinationDir string
Log io.Writer
}
// GenerateClusterCerts creates a Certificate Authority and Certificates
// for all nodes on the cluster.
func (lp *LocalPKI) GenerateClusterCerts(p *Plan) error {
if lp.Log == nil {
lp.Log = ioutil.Discard
}
// First, generate a CA
key, cert, err := tls.NewCACert(lp.CACsr)
if err != nil {
return fmt.Errorf("failed to create CA Cert: %v", err)
}
err = lp.writeFiles(key, cert, "ca")
if err != nil {
return fmt.Errorf("error writing CA files: %v", err)
}
ca := &tls.CA{
Key: key,
Cert: cert,
ConfigFile: lp.CAConfigFile,
Profile: lp.CASigningProfile,
}
// Add kubernetes service IP to certificates
kubeServiceIP, err := getKubernetesServiceIP(p)
if err != nil {
return fmt.Errorf("Error getting kubernetes service IP: %v", err)
}
kubeDNSServiceIP, err := getDNSServiceIP(p)
if err != nil {
return fmt.Errorf("Error getting DNS service IP: %v", err)
}
defaultCertHosts := []string{
p.Cluster.Name,
p.Cluster.Name + ".default",
p.Cluster.Name + ".default.svc",
p.Cluster.Name + ".default.svc.cluster.local",
"127.0.0.1",
kubeServiceIP,
kubeDNSServiceIP,
}
// Then, create certs for all nodes
nodes := []Node{}
nodes = append(nodes, p.Etcd.Nodes...)
nodes = append(nodes, p.Master.Nodes...)
nodes = append(nodes, p.Worker.Nodes...)
for _, n := range nodes {
fmt.Fprintf(lp.Log, "Generating certificates for %q\n", n.Host)
key, cert, err := generateNodeCert(p, &n, ca, defaultCertHosts)
if err != nil {
return fmt.Errorf("error during cluster cert generation: %v", err)
}
err = lp.writeFiles(key, cert, n.Host)
if err != nil {
return fmt.Errorf("error writing cert files for host %q: %v", n.Host, err)
}
}
// Finally, create cert for user `admin`
adminUser := "admin"
fmt.Fprintf(lp.Log, "Generating certificates for user %q\n", adminUser)
adminKey, adminCert, err := generateClientCert(p, adminUser, ca)
if err != nil {
return fmt.Errorf("error during admin cert generation: %v", err)
}
err = lp.writeFiles(adminKey, adminCert, adminUser)
if err != nil {
return fmt.Errorf("error writing cert files for user %q: %v", adminUser, err)
}
return nil
}
func (lp *LocalPKI) writeFiles(key, cert []byte, name string) error {
// Create destination dir if it doesn't exist
if _, err := os.Stat(lp.DestinationDir); os.IsNotExist(err) {
err := os.Mkdir(lp.DestinationDir, 0744)
if err != nil {
return fmt.Errorf("error creating destination dir: %v", err)
}
}
// Write private key with read-only for user
keyName := fmt.Sprintf("%s-key.pem", name)
dest := filepath.Join(lp.DestinationDir, keyName)
err := ioutil.WriteFile(dest, key, 0600)
if err != nil {
return fmt.Errorf("error writing private key: %v", err)
}
// Write cert
certName := fmt.Sprintf("%s.pem", name)
dest = filepath.Join(lp.DestinationDir, certName)
err = ioutil.WriteFile(dest, cert, 0644)
if err != nil {
return fmt.Errorf("error writing certificate: %v", err)
}
return nil
}
func generateNodeCert(p *Plan, n *Node, ca *tls.CA, initialHostList []string) (key, cert []byte, err error) {
hosts := append(initialHostList, n.Host, n.InternalIP, n.IP)
req := csr.CertificateRequest{
CN: p.Cluster.Name,
KeyRequest: &csr.BasicKeyRequest{
A: "rsa",
S: 2048,
},
Hosts: hosts,
Names: []csr.Name{
{
C: p.Cluster.Certificates.LocationCountry,
ST: p.Cluster.Certificates.LocationState,
L: p.Cluster.Certificates.LocationCity,
},
},
}
key, cert, err = tls.GenerateNewCertificate(ca, req)
if err != nil {
return nil, nil, fmt.Errorf("error generating certs for node %q: %v", n.Host, err)
}
return key, cert, err
}
func generateClientCert(p *Plan, user string, ca *tls.CA) (key, cert []byte, err error) {
req := csr.CertificateRequest{
CN: user,
KeyRequest: &csr.BasicKeyRequest{
A: "rsa",
S: 2048,
},
Hosts: []string{},
Names: []csr.Name{
{
C: p.Cluster.Certificates.LocationCountry,
ST: p.Cluster.Certificates.LocationState,
L: p.Cluster.Certificates.LocationCity,
},
},
}
key, cert, err = tls.GenerateNewCertificate(ca, req)
if err != nil {
return nil, nil, fmt.Errorf("error generating certs for user %q: %v", user, err)
}
return key, cert, err
}
No Ticket: Generate certs only once for each node
package install
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/apprenda/kismatic-platform/pkg/tls"
"github.com/cloudflare/cfssl/csr"
)
// The PKI provides a way for generating certificates for the cluster described by the Plan
type PKI interface {
GenerateClusterCerts(p *Plan) error
}
// LocalPKI is a file-based PKI
type LocalPKI struct {
CACsr string
CAConfigFile string
CASigningProfile string
DestinationDir string
Log io.Writer
}
// GenerateClusterCerts creates a Certificate Authority and Certificates
// for all nodes on the cluster.
func (lp *LocalPKI) GenerateClusterCerts(p *Plan) error {
if lp.Log == nil {
lp.Log = ioutil.Discard
}
// First, generate a CA
key, cert, err := tls.NewCACert(lp.CACsr)
if err != nil {
return fmt.Errorf("failed to create CA Cert: %v", err)
}
err = lp.writeFiles(key, cert, "ca")
if err != nil {
return fmt.Errorf("error writing CA files: %v", err)
}
ca := &tls.CA{
Key: key,
Cert: cert,
ConfigFile: lp.CAConfigFile,
Profile: lp.CASigningProfile,
}
// Add kubernetes service IP to certificates
kubeServiceIP, err := getKubernetesServiceIP(p)
if err != nil {
return fmt.Errorf("Error getting kubernetes service IP: %v", err)
}
kubeDNSServiceIP, err := getDNSServiceIP(p)
if err != nil {
return fmt.Errorf("Error getting DNS service IP: %v", err)
}
defaultCertHosts := []string{
p.Cluster.Name,
p.Cluster.Name + ".default",
p.Cluster.Name + ".default.svc",
p.Cluster.Name + ".default.svc.cluster.local",
"127.0.0.1",
kubeServiceIP,
kubeDNSServiceIP,
}
// Then, create certs for all nodes
nodes := []Node{}
nodes = append(nodes, p.Etcd.Nodes...)
nodes = append(nodes, p.Master.Nodes...)
nodes = append(nodes, p.Worker.Nodes...)
seenNodes := []string{}
for _, n := range nodes {
// Only generate certs once for each node, nodes can be in more than one group
if contains(seenNodes, n.Host) {
continue
}
seenNodes = append(seenNodes, n.Host)
fmt.Fprintf(lp.Log, "Generating certificates for %q\n", n.Host)
key, cert, err := generateNodeCert(p, &n, ca, defaultCertHosts)
if err != nil {
return fmt.Errorf("error during cluster cert generation: %v", err)
}
err = lp.writeFiles(key, cert, n.Host)
if err != nil {
return fmt.Errorf("error writing cert files for host %q: %v", n.Host, err)
}
}
// Finally, create cert for user `admin`
adminUser := "admin"
fmt.Fprintf(lp.Log, "Generating certificates for user %q\n", adminUser)
adminKey, adminCert, err := generateClientCert(p, adminUser, ca)
if err != nil {
return fmt.Errorf("error during admin cert generation: %v", err)
}
err = lp.writeFiles(adminKey, adminCert, adminUser)
if err != nil {
return fmt.Errorf("error writing cert files for user %q: %v", adminUser, err)
}
return nil
}
func (lp *LocalPKI) writeFiles(key, cert []byte, name string) error {
// Create destination dir if it doesn't exist
if _, err := os.Stat(lp.DestinationDir); os.IsNotExist(err) {
err := os.Mkdir(lp.DestinationDir, 0744)
if err != nil {
return fmt.Errorf("error creating destination dir: %v", err)
}
}
// Write private key with read-only for user
keyName := fmt.Sprintf("%s-key.pem", name)
dest := filepath.Join(lp.DestinationDir, keyName)
err := ioutil.WriteFile(dest, key, 0600)
if err != nil {
return fmt.Errorf("error writing private key: %v", err)
}
// Write cert
certName := fmt.Sprintf("%s.pem", name)
dest = filepath.Join(lp.DestinationDir, certName)
err = ioutil.WriteFile(dest, cert, 0644)
if err != nil {
return fmt.Errorf("error writing certificate: %v", err)
}
return nil
}
func generateNodeCert(p *Plan, n *Node, ca *tls.CA, initialHostList []string) (key, cert []byte, err error) {
hosts := append(initialHostList, n.Host, n.InternalIP, n.IP)
req := csr.CertificateRequest{
CN: p.Cluster.Name,
KeyRequest: &csr.BasicKeyRequest{
A: "rsa",
S: 2048,
},
Hosts: hosts,
Names: []csr.Name{
{
C: p.Cluster.Certificates.LocationCountry,
ST: p.Cluster.Certificates.LocationState,
L: p.Cluster.Certificates.LocationCity,
},
},
}
key, cert, err = tls.GenerateNewCertificate(ca, req)
if err != nil {
return nil, nil, fmt.Errorf("error generating certs for node %q: %v", n.Host, err)
}
return key, cert, err
}
func generateClientCert(p *Plan, user string, ca *tls.CA) (key, cert []byte, err error) {
req := csr.CertificateRequest{
CN: user,
KeyRequest: &csr.BasicKeyRequest{
A: "rsa",
S: 2048,
},
Hosts: []string{},
Names: []csr.Name{
{
C: p.Cluster.Certificates.LocationCountry,
ST: p.Cluster.Certificates.LocationState,
L: p.Cluster.Certificates.LocationCity,
},
},
}
key, cert, err = tls.GenerateNewCertificate(ca, req)
if err != nil {
return nil, nil, fmt.Errorf("error generating certs for user %q: %v", user, err)
}
return key, cert, err
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
|
package mount // import "github.com/docker/docker/pkg/mount"
import (
"sort"
"strconv"
"strings"
"github.com/sirupsen/logrus"
)
// mountError records an error from mount or unmount operation
type mountError struct {
op string
source, target string
flags uintptr
data string
err error
}
func (e *mountError) Error() string {
out := e.op + " "
if e.source != "" {
out += e.source + ":" + e.target
} else {
out += e.target
}
if e.flags != uintptr(0) {
out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16)
}
if e.data != "" {
out += ", data: " + e.data
}
out += ": " + e.err.Error()
return out
}
// Cause returns the underlying cause of the error
func (e *mountError) Cause() error {
return e.err
}
// FilterFunc is a type defining a callback function
// to filter out unwanted entries. It takes a pointer
// to an Info struct (not fully populated, currently
// only Mountpoint is filled in), and returns two booleans:
// - skip: true if the entry should be skipped
// - stop: true if parsing should be stopped after the entry
type FilterFunc func(*Info) (skip, stop bool)
// PrefixFilter discards all entries whose mount points
// do not start with a prefix specified
func PrefixFilter(prefix string) FilterFunc {
return func(m *Info) (bool, bool) {
skip := !strings.HasPrefix(m.Mountpoint, prefix)
return skip, false
}
}
// SingleEntryFilter looks for a specific entry
func SingleEntryFilter(mp string) FilterFunc {
return func(m *Info) (bool, bool) {
if m.Mountpoint == mp {
return false, true // don't skip, stop now
}
return true, false // skip, keep going
}
}
// ParentsFilter returns all entries whose mount points
// can be parents of a path specified, discarding others.
// For example, given `/var/lib/docker/something`, entries
// like `/var/lib/docker`, `/var` and `/` are returned.
func ParentsFilter(path string) FilterFunc {
return func(m *Info) (bool, bool) {
skip := !strings.HasPrefix(path, m.Mountpoint)
return skip, false
}
}
// GetMounts retrieves a list of mounts for the current running process,
// with an optional filter applied (use nil for no filter).
func GetMounts(f FilterFunc) ([]*Info, error) {
return parseMountTable(f)
}
// Mounted determines if a specified mountpoint has been mounted.
// On Linux it looks at /proc/self/mountinfo.
func Mounted(mountpoint string) (bool, error) {
entries, err := GetMounts(SingleEntryFilter(mountpoint))
if err != nil {
return false, err
}
return len(entries) > 0, nil
}
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func Mount(device, target, mType, options string) error {
flag, data := parseOptions(options)
if flag&REMOUNT != REMOUNT {
if mounted, err := Mounted(target); err != nil || mounted {
return err
}
}
return mount(device, target, mType, uintptr(flag), data)
}
// ForceMount will mount a filesystem according to the specified configuration,
// *regardless* if the target path is not already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func ForceMount(device, target, mType, options string) error {
flag, data := parseOptions(options)
return mount(device, target, mType, uintptr(flag), data)
}
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
// does a normal unmount.
func Unmount(target string) error {
return unmount(target, mntDetach)
}
// RecursiveUnmount unmounts the target and all mounts underneath, starting with
// the deepsest mount first.
func RecursiveUnmount(target string) error {
mounts, err := parseMountTable(PrefixFilter(target))
if err != nil {
return err
}
// Make the deepest mount be first
sort.Slice(mounts, func(i, j int) bool {
return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)
})
for i, m := range mounts {
logrus.Debugf("Trying to unmount %s", m.Mountpoint)
err = unmount(m.Mountpoint, mntDetach)
if err != nil {
if i == len(mounts)-1 { // last mount
if mounted, e := Mounted(m.Mountpoint); e != nil || mounted {
return err
}
} else {
// This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem
logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint)
}
}
logrus.Debugf("Unmounted %s", m.Mountpoint)
}
return nil
}
pkg/mount.RecursiveUnmount(): don't call Mounted()
Calling mount.Mounted() after an error from Unmount() is
questionable -- if umount failed, the mount is probably
still there anyway, it doesn't make sense to check it.
Signed-off-by: Kir Kolyshkin <3a017b8ddb3f9cf3e4a59978b004111bdeb97f08@gmail.com>
package mount // import "github.com/docker/docker/pkg/mount"
import (
"sort"
"strconv"
"strings"
"github.com/sirupsen/logrus"
)
// mountError records an error from mount or unmount operation
type mountError struct {
op string
source, target string
flags uintptr
data string
err error
}
func (e *mountError) Error() string {
out := e.op + " "
if e.source != "" {
out += e.source + ":" + e.target
} else {
out += e.target
}
if e.flags != uintptr(0) {
out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16)
}
if e.data != "" {
out += ", data: " + e.data
}
out += ": " + e.err.Error()
return out
}
// Cause returns the underlying cause of the error
func (e *mountError) Cause() error {
return e.err
}
// FilterFunc is a type defining a callback function
// to filter out unwanted entries. It takes a pointer
// to an Info struct (not fully populated, currently
// only Mountpoint is filled in), and returns two booleans:
// - skip: true if the entry should be skipped
// - stop: true if parsing should be stopped after the entry
type FilterFunc func(*Info) (skip, stop bool)
// PrefixFilter discards all entries whose mount points
// do not start with a prefix specified
func PrefixFilter(prefix string) FilterFunc {
return func(m *Info) (bool, bool) {
skip := !strings.HasPrefix(m.Mountpoint, prefix)
return skip, false
}
}
// SingleEntryFilter looks for a specific entry
func SingleEntryFilter(mp string) FilterFunc {
return func(m *Info) (bool, bool) {
if m.Mountpoint == mp {
return false, true // don't skip, stop now
}
return true, false // skip, keep going
}
}
// ParentsFilter returns all entries whose mount points
// can be parents of a path specified, discarding others.
// For example, given `/var/lib/docker/something`, entries
// like `/var/lib/docker`, `/var` and `/` are returned.
func ParentsFilter(path string) FilterFunc {
return func(m *Info) (bool, bool) {
skip := !strings.HasPrefix(path, m.Mountpoint)
return skip, false
}
}
// GetMounts retrieves a list of mounts for the current running process,
// with an optional filter applied (use nil for no filter).
func GetMounts(f FilterFunc) ([]*Info, error) {
return parseMountTable(f)
}
// Mounted determines if a specified mountpoint has been mounted.
// On Linux it looks at /proc/self/mountinfo.
func Mounted(mountpoint string) (bool, error) {
entries, err := GetMounts(SingleEntryFilter(mountpoint))
if err != nil {
return false, err
}
return len(entries) > 0, nil
}
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func Mount(device, target, mType, options string) error {
flag, data := parseOptions(options)
if flag&REMOUNT != REMOUNT {
if mounted, err := Mounted(target); err != nil || mounted {
return err
}
}
return mount(device, target, mType, uintptr(flag), data)
}
// ForceMount will mount a filesystem according to the specified configuration,
// *regardless* if the target path is not already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func ForceMount(device, target, mType, options string) error {
flag, data := parseOptions(options)
return mount(device, target, mType, uintptr(flag), data)
}
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
// does a normal unmount.
func Unmount(target string) error {
return unmount(target, mntDetach)
}
// RecursiveUnmount unmounts the target and all mounts underneath, starting with
// the deepsest mount first.
func RecursiveUnmount(target string) error {
mounts, err := parseMountTable(PrefixFilter(target))
if err != nil {
return err
}
// Make the deepest mount be first
sort.Slice(mounts, func(i, j int) bool {
return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)
})
for i, m := range mounts {
logrus.Debugf("Trying to unmount %s", m.Mountpoint)
err = unmount(m.Mountpoint, mntDetach)
if err != nil {
if i == len(mounts)-1 { // last mount
return err
} else {
// This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem
logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint)
}
}
logrus.Debugf("Unmounted %s", m.Mountpoint)
}
return nil
}
|
// +build linux
package mount
import (
"errors"
"fmt"
"strings"
"sync"
"syscall"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/mount"
)
// Ops defines the interface for keep track of volume driver mounts.
type Ops interface {
// String representation of the mount table
String() string
// Load mount table for all devices that match the prefix. An empty prefix may
// be provided.
Load(devPrefix string) error
// Inspect mount table for specified device. ErrEnoent may be returned.
Inspect(device string) (Info, error)
// HasMounts determines returns the number of mounts for the device.
HasMounts(devPath string)
// Exists returns true if the device is mounted at specified path.
// returned if the device does not exists.
Exists(device, path string) (bool, error)
// Mount device at mountpoint or increment refcnt if device is already mounted
// at specified mountpoint.
Mount(Minor int32, device, path, fs string, flags uintptr, data string) error
// Unmount device at mountpoint or decrement refcnt. If device has no
// mountpoints left after this operation, it is removed from the matrix.
// ErrEnoent is returned if the device or mountpoint for the device is not found.
Unmount(device, path string) error
}
// DeviceMap map device name to Info
type DeviceMap map[string]*Info
// PathInfo is a reference counted path
type PathInfo struct {
Path string
ref int
}
// Info per device
type Info struct {
Device string
Minor int
Mountpoint []PathInfo
Fs string
}
// Matrix implements Ops and keeps track of active mounts for volume drivers.
type Matrix struct {
sync.Mutex
mounts DeviceMap
}
var (
// ErrEnoent is returned for a non existent mount point
ErrEnoent = errors.New("Mountpath is not mounted")
// ErrEinval is returned is fields for an entry do no match
// existing fields
ErrEinval = errors.New("Invalid arguments for mount entry")
)
// New instance of Matrix
func New(devPrefix string) (*Matrix, error) {
m := &Matrix{
mounts: make(DeviceMap),
}
err := m.Load(devPrefix)
if err != nil {
return nil, err
}
return m, nil
}
// HasMounts determines returns the number of mounts for the device.
func (m *Matrix) HasMounts(devPath string) int {
m.Lock()
defer m.Unlock()
v, ok := m.mounts[devPath]
if !ok {
return 0
}
return len(v.Mountpoint)
}
// Exists scans mountpaths for specified device and returns true if path is one of the
// mountpaths. ErrEnoent may be retuned if the device is not found
func (m *Matrix) Exists(devPath string, path string) (bool, error) {
m.Lock()
defer m.Unlock()
v, ok := m.mounts[devPath]
if !ok {
return false, ErrEnoent
}
for _, p := range v.Mountpoint {
if p.Path == path {
return true, nil
}
}
return false, nil
}
// Mount new mountpoint for specified device.
func (m *Matrix) Mount(minor int, device, path, fs string, flags uintptr, data string) error {
m.Lock()
defer m.Unlock()
info, ok := m.mounts[device]
if !ok {
info = &Info{
Device: device,
Mountpoint: make([]PathInfo, 0),
Minor: minor,
Fs: fs,
}
m.mounts[device] = info
}
// Validate input params
if fs != info.Fs {
log.Warnf("%s Existing mountpoint has fs %q cannot change to %q",
device, info.Fs, fs)
return ErrEinval
}
// Try to find the mountpoint. If it already exists, then increment refcnt
for _, p := range info.Mountpoint {
if p.Path == path {
p.ref++
return nil
}
}
// The device is not mounted at path, mount it and add to its mountpoints.
err := syscall.Mount(device, path, fs, flags, data)
if err != nil {
return err
}
info.Mountpoint = append(info.Mountpoint, PathInfo{Path: path, ref: 1})
return nil
}
// Unmount device at mountpoint or decrement refcnt. If device has no
// mountpoints left after this operation, it is removed from the matrix.
// ErrEnoent is returned if the device or mountpoint for the device is not found.
func (m *Matrix) Unmount(device, path string) error {
m.Lock()
defer m.Unlock()
info, ok := m.mounts[device]
if !ok {
return ErrEnoent
}
for i, p := range info.Mountpoint {
if p.Path == path {
p.ref--
// Unmount only if refcnt is 0
if p.ref == 0 {
err := syscall.Unmount(path, 0)
if err != nil {
return err
}
// Blow away this mountpoint.
info.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1]
info.Mountpoint = info.Mountpoint[0 : len(info.Mountpoint)-1]
// If the device has no more mountpoints, remove it from the map
if len(info.Mountpoint) == 0 {
delete(m.mounts, device)
}
}
return nil
}
}
return ErrEnoent
}
// String representation of Matrix
func (m *Matrix) String() string {
return fmt.Sprintf("%#v", *m)
}
// Load mount table
func (m *Matrix) Load(devPrefix string) error {
info, err := mount.GetMounts()
if err != nil {
return err
}
for _, v := range info {
if !strings.HasPrefix(v.Source, devPrefix) {
continue
}
mount, ok := m.mounts[v.Source]
if !ok {
mount = &Info{
Device: v.Source,
Fs: v.Fstype,
Minor: v.Minor,
Mountpoint: make([]PathInfo, 0),
}
m.mounts[v.Source] = mount
}
// Allow Load to be called multiple times.
for _, p := range mount.Mountpoint {
if p.Path == v.Mountpoint {
continue
}
}
// XXX Reconstruct refs.
mount.Mountpoint = append(mount.Mountpoint, PathInfo{Path: v.Mountpoint, ref: 1})
}
return nil
}
expose device->mountinfo
// +build linux
package mount
import (
"errors"
"fmt"
"strings"
"sync"
"syscall"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/mount"
)
// Ops defines the interface for keep track of volume driver mounts.
type Ops interface {
// String representation of the mount table
String() string
// Load mount table for all devices that match the prefix. An empty prefix may
// be provided.
Load(devPrefix string) error
// Inspect mount table for specified device. ErrEnoent may be returned.
Inspect(device string) (Info, error)
// HasMounts determines returns the number of mounts for the device.
HasMounts(devPath string)
// Exists returns true if the device is mounted at specified path.
// returned if the device does not exists.
Exists(device, path string) (bool, error)
// Mount device at mountpoint or increment refcnt if device is already mounted
// at specified mountpoint.
Mount(Minor int32, device, path, fs string, flags uintptr, data string) error
// Unmount device at mountpoint or decrement refcnt. If device has no
// mountpoints left after this operation, it is removed from the matrix.
// ErrEnoent is returned if the device or mountpoint for the device is not found.
Unmount(device, path string) error
}
// DeviceMap map device name to Info
type DeviceMap map[string]*Info
// PathInfo is a reference counted path
type PathInfo struct {
Path string
ref int
}
// Info per device
type Info struct {
Device string
Minor int
Mountpoint []PathInfo
Fs string
}
// Matrix implements Ops and keeps track of active mounts for volume drivers.
type Matrix struct {
sync.Mutex
mounts DeviceMap
}
var (
// ErrEnoent is returned for a non existent mount point
ErrEnoent = errors.New("Mountpath is not mounted")
// ErrEinval is returned is fields for an entry do no match
// existing fields
ErrEinval = errors.New("Invalid arguments for mount entry")
)
// New instance of Matrix
func New(devPrefix string) (*Matrix, error) {
m := &Matrix{
mounts: make(DeviceMap),
}
err := m.Load(devPrefix)
if err != nil {
return nil, err
}
return m, nil
}
// HasMounts determines returns the number of mounts for the device.
func (m *Matrix) HasMounts(devPath string) int {
m.Lock()
defer m.Unlock()
v, ok := m.mounts[devPath]
if !ok {
return 0
}
return len(v.Mountpoint)
}
// Exists scans mountpaths for specified device and returns true if path is one of the
// mountpaths. ErrEnoent may be retuned if the device is not found
func (m *Matrix) Exists(devPath string, path string) (bool, error) {
m.Lock()
defer m.Unlock()
v, ok := m.mounts[devPath]
if !ok {
return false, ErrEnoent
}
for _, p := range v.Mountpoint {
if p.Path == path {
return true, nil
}
}
return false, nil
}
// Mount new mountpoint for specified device.
func (m *Matrix) Mount(minor int, device, path, fs string, flags uintptr, data string) error {
m.Lock()
defer m.Unlock()
info, ok := m.mounts[device]
if !ok {
info = &Info{
Device: device,
Mountpoint: make([]PathInfo, 0),
Minor: minor,
Fs: fs,
}
m.mounts[device] = info
}
// Validate input params
if fs != info.Fs {
log.Warnf("%s Existing mountpoint has fs %q cannot change to %q",
device, info.Fs, fs)
return ErrEinval
}
// Try to find the mountpoint. If it already exists, then increment refcnt
for _, p := range info.Mountpoint {
if p.Path == path {
p.ref++
return nil
}
}
// The device is not mounted at path, mount it and add to its mountpoints.
err := syscall.Mount(device, path, fs, flags, data)
if err != nil {
return err
}
info.Mountpoint = append(info.Mountpoint, PathInfo{Path: path, ref: 1})
return nil
}
// Unmount device at mountpoint or decrement refcnt. If device has no
// mountpoints left after this operation, it is removed from the matrix.
// ErrEnoent is returned if the device or mountpoint for the device is not found.
func (m *Matrix) Unmount(device, path string) error {
m.Lock()
defer m.Unlock()
info, ok := m.mounts[device]
if !ok {
return ErrEnoent
}
for i, p := range info.Mountpoint {
if p.Path == path {
p.ref--
// Unmount only if refcnt is 0
if p.ref == 0 {
err := syscall.Unmount(path, 0)
if err != nil {
return err
}
// Blow away this mountpoint.
info.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1]
info.Mountpoint = info.Mountpoint[0 : len(info.Mountpoint)-1]
// If the device has no more mountpoints, remove it from the map
if len(info.Mountpoint) == 0 {
delete(m.mounts, device)
}
}
return nil
}
}
return ErrEnoent
}
// String representation of Matrix
func (m *Matrix) String() string {
return fmt.Sprintf("%#v", *m)
}
// Load mount table
func (m *Matrix) Load(devPrefix string) error {
info, err := mount.GetMounts()
if err != nil {
return err
}
for _, v := range info {
if !strings.HasPrefix(v.Source, devPrefix) {
continue
}
mount, ok := m.mounts[v.Source]
if !ok {
mount = &Info{
Device: v.Source,
Fs: v.Fstype,
Minor: v.Minor,
Mountpoint: make([]PathInfo, 0),
}
m.mounts[v.Source] = mount
}
// Allow Load to be called multiple times.
for _, p := range mount.Mountpoint {
if p.Path == v.Mountpoint {
continue
}
}
// XXX Reconstruct refs.
mount.Mountpoint = append(mount.Mountpoint, PathInfo{Path: v.Mountpoint, ref: 1})
}
return nil
}
// Inspect mount table for device
func (m *Matrix) Inspect(devPath string) []PathInfo {
m.Lock()
defer m.Unlock()
v, ok := m.mounts[devPath]
if !ok {
return []PathInfo{}
}
return v.Mountpoint
}
|
// Copyright 2017 The Kubernetes Authors All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notes
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/google/go-github/github"
"github.com/pkg/errors"
)
// ReleaseNote is the type that represents the total sum of all the information
// we've gathered about a single release note.
type ReleaseNote struct {
// Commit is the SHA of the commit which is the source of this note. This is
// also effectively a unique ID for release notes.
Commit string `json:"commit"`
// Text is the actual content of the release note
Text string `json:"text"`
// Markdown is the markdown formatted note
Markdown string `json:"markdown"`
// Author is the GitHub username of the commit author
Author string `json:"author"`
// AuthorUrl is the GitHub URL of the commit author
AuthorUrl string `json:"author_url"`
// PrUrl is a URL to the PR
PrUrl string `json:"pr_url"`
// PrNumber is the number of the PR
PrNumber int `json:"pr_number"`
// Areas is a list of the labels beginning with area/
Areas []string `json:"areas,omitempty"`
// Kinds is a list of the labels beginning with kind/
Kinds []string `json:"kinds,omitempty"`
// SIGs is a list of the labels beginning with sig/
SIGs []string `json:"sigs,omitempty"`
// Indicates whether or not a note will appear as a new feature
Feature bool `json:"feature,omitempty"`
// Indicates whether or not a note is duplicated across SIGs
Duplicate bool `json:"duplicate,omitempty"`
// ActionRequired indicates whether or not the release-note-action-required
// label was set on the PR
ActionRequired bool `json:"action_required,omitempty"`
// Tags each note with a release version if specified
// If not specified, omitted
ReleaseVersion string `json:"release_version,omitempty"`
}
// ReleaseNoteList is a map of PR numbers referencing notes.
// To avoid needless loops, we need to be able to reference things by PR
// When we have to merge old and new entries, we want to be able to override
// the old entries with the new ones efficiently.
type ReleaseNoteList map[int]*ReleaseNote
// GithubApiOption is a type which allows for the expression of API configuration
// via the "functional option" pattern.
// For more information on this pattern, see the following blog post:
// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type GithubApiOption func(*githubApiConfig)
// githubApiConfig is a configuration struct that is used to express optional
// configuration for GitHub API requests
type githubApiConfig struct {
ctx context.Context
org string
repo string
branch string
}
// WithContext allows the caller to inject a context into GitHub API requests
func WithContext(ctx context.Context) GithubApiOption {
return func(c *githubApiConfig) {
c.ctx = ctx
}
}
// WithOrg allows the caller to override the GitHub organization for the API
// request. By default, it is usually "kubernetes".
func WithOrg(org string) GithubApiOption {
return func(c *githubApiConfig) {
c.org = org
}
}
// WithRepo allows the caller to override the GitHub repo for the API
// request. By default, it is usually "kubernetes".
func WithRepo(repo string) GithubApiOption {
return func(c *githubApiConfig) {
c.repo = repo
}
}
// WithBranch allows the caller to override the repo branch for the API
// request. By default, it is usually "master".
func WithBranch(branch string) GithubApiOption {
return func(c *githubApiConfig) {
c.branch = branch
}
}
// ListReleaseNotes produces a list of fully contextualized release notes
// starting from a given commit SHA and ending at starting a given commit SHA.
func ListReleaseNotes(
client *github.Client,
logger log.Logger,
branch,
start,
end,
relVer string,
opts ...GithubApiOption,
) (ReleaseNoteList, error) {
commits, err := ListCommitsWithNotes(client, logger, branch, start, end, opts...)
if err != nil {
return nil, err
}
dedupeCache := map[string]struct{}{}
notes := make(ReleaseNoteList)
for _, commit := range commits {
if commit.GetAuthor().GetLogin() != "k8s-ci-robot" {
continue
}
note, err := ReleaseNoteFromCommit(commit, client, relVer, opts...)
if err != nil {
level.Error(logger).Log(
"err", err,
"msg", "error getting the release note from commit while listing release notes",
"sha", commit.GetSHA(),
)
continue
}
if strings.TrimSpace(note.Text) == "NONE" {
continue
}
if _, ok := dedupeCache[note.Text]; !ok {
notes[note.PrNumber] = note
dedupeCache[note.Text] = struct{}{}
}
}
return notes, nil
}
// NoteTextFromString returns the text of the release note given a string which
// may contain the commit message, the PR description, etc.
// This is generally the content inside the ```release-note ``` stanza.
func NoteTextFromString(s string) (string, error) {
exps := []*regexp.Regexp{
// (?s) is needed for '.' to be matching on newlines, by default that's disabled
regexp.MustCompile("(?s)```release-note\\r\\n(?P<note>.+)\\r\\n```"),
regexp.MustCompile("(?s)```dev-release-note\\r\\n(?P<note>.+)"),
regexp.MustCompile("(?s)```\\r\\n(?P<note>.+)\\r\\n```"),
regexp.MustCompile("(?s)```release-note\n(?P<note>.+)\n```"),
}
for _, exp := range exps {
match := exp.FindStringSubmatch(s)
if len(match) == 0 {
continue
}
result := map[string]string{}
for i, name := range exp.SubexpNames() {
if i != 0 && name != "" {
result[name] = match[i]
}
}
note := strings.Replace(result["note"], "\r", "", -1)
note = stripActionRequired(note)
note = stripStar(note)
return note, nil
}
return "", errors.New("no matches found when parsing note text from commit string")
}
// ReleaseNoteFromCommit produces a full contextualized release note given a
// GitHub commit API resource.
func ReleaseNoteFromCommit(commit *github.RepositoryCommit, client *github.Client, relVer string, opts ...GithubApiOption) (*ReleaseNote, error) {
c := configFromOpts(opts...)
pr, err := PRFromCommit(client, commit, opts...)
if err != nil {
return nil, errors.Wrapf(err, "error parsing release note from commit %s", commit.GetSHA())
}
text, err := NoteTextFromString(pr.GetBody())
if err != nil {
return nil, err
}
author := pr.GetUser().GetLogin()
authorUrl := fmt.Sprintf("https://github.com/%s", author)
prUrl := fmt.Sprintf("https://github.com/%s/%s/pull/%d", c.org, c.repo, pr.GetNumber())
IsFeature := HasString(LabelsWithPrefix(pr, "kind"), "feature")
IsDuplicate := false
sigsListPretty := prettifySigList(LabelsWithPrefix(pr, "sig"))
noteSuffix := ""
if IsActionRequired(pr) || IsFeature {
if sigsListPretty != "" {
noteSuffix = fmt.Sprintf("Courtesy of %s", sigsListPretty)
}
} else if len(LabelsWithPrefix(pr, "sig")) > 1 {
IsDuplicate = true
}
markdown := fmt.Sprintf("%s ([#%d](%s), [@%s](%s))", text, pr.GetNumber(), prUrl, author, authorUrl)
if noteSuffix != "" {
markdown = fmt.Sprintf("%s %s", markdown, noteSuffix)
}
return &ReleaseNote{
Commit: commit.GetSHA(),
Text: text,
Markdown: markdown,
Author: author,
AuthorUrl: authorUrl,
PrUrl: prUrl,
PrNumber: pr.GetNumber(),
SIGs: LabelsWithPrefix(pr, "sig"),
Kinds: LabelsWithPrefix(pr, "kind"),
Areas: LabelsWithPrefix(pr, "area"),
Feature: IsFeature,
Duplicate: IsDuplicate,
ActionRequired: IsActionRequired(pr),
ReleaseVersion: relVer,
}, nil
}
// ListCommits lists all commits starting from a given commit SHA and ending at
// a given commit SHA.
func ListCommits(client *github.Client, branch, start, end string, opts ...GithubApiOption) ([]*github.RepositoryCommit, error) {
c := configFromOpts(opts...)
c.branch = branch
startCommit, _, err := client.Git.GetCommit(c.ctx, c.org, c.repo, start)
if err != nil {
return nil, err
}
endCommit, _, err := client.Git.GetCommit(c.ctx, c.org, c.repo, end)
if err != nil {
return nil, err
}
clo := &github.CommitsListOptions{
SHA: c.branch,
Since: *startCommit.Committer.Date,
Until: *endCommit.Committer.Date,
ListOptions: github.ListOptions{
Page: 1,
PerPage: 100,
},
}
commits, resp, err := client.Repositories.ListCommits(c.ctx, c.org, c.repo, clo)
if err != nil {
return nil, err
}
clo.ListOptions.Page++
for clo.ListOptions.Page <= resp.LastPage {
commitPage, _, err := client.Repositories.ListCommits(c.ctx, c.org, c.repo, clo)
if err != nil {
return nil, err
}
for _, commit := range commitPage {
commits = append(commits, commit)
}
clo.ListOptions.Page++
}
return commits, nil
}
// ListCommitsWithNotes list commits that have release notes starting from a
// given commit SHA and ending at a given commit SHA. This function is similar
// to ListCommits except that only commits with tagged release notes are
// returned.
func ListCommitsWithNotes(
client *github.Client,
logger log.Logger,
branch,
start,
end string,
opts ...GithubApiOption,
) ([]*github.RepositoryCommit, error) {
filteredCommits := []*github.RepositoryCommit{}
commits, err := ListCommits(client, branch, start, end, opts...)
if err != nil {
return nil, err
}
for _, commit := range commits {
pr, err := PRFromCommit(client, commit, opts...)
if err != nil {
if err.Error() == "no matches found when parsing PR from commit" {
continue
}
}
// exclusionFilters is a list of regular expressions that match commits that
// do NOT contain release notes. Notably, this is all of the variations of
// "release note none" that appear in the commit log.
exclusionFilters := []string{
"```release-note\\r\\nNONE",
"```release-note\\r\\n\\s+NONE",
"```release-note\\r\\nNONE",
"```release-note\\r\\n\"NONE\"",
"```release-note\\r\\nNone",
"```release-note\\r\\nnone",
"```release-note\\r\\nN/A",
"```release-note\\r\\n\\r\\n```",
"```release-note\\r\\n```",
"/release-note-none",
"\\r\\n\\r\\nNONE",
"```NONE\\r\\n```",
"```release-note \\r\\nNONE\\r\\n```",
"NONE\\r\\n```",
"\\r\\nNone",
"\\r\\nNONE\\r\\n",
}
excluded := false
for _, filter := range exclusionFilters {
match, err := regexp.MatchString(filter, pr.GetBody())
if err != nil {
return nil, err
}
if match {
excluded = true
break
}
}
if excluded {
continue
}
// Similarly, now that the known not-release-notes are filtered out, we can
// use some patterns to find actual release notes.
inclusionFilters := []string{
"release-note",
"Does this PR introduce a user-facing change?",
}
for _, filter := range inclusionFilters {
match, err := regexp.MatchString(filter, pr.GetBody())
if err != nil {
return nil, err
}
if match {
filteredCommits = append(filteredCommits, commit)
}
}
}
return filteredCommits, nil
}
// PRFromCommit return an API Pull Request struct given a commit struct. This is
// useful for going from a commit log to the PR (which contains useful info such
// as labels).
func PRFromCommit(client *github.Client, commit *github.RepositoryCommit, opts ...GithubApiOption) (*github.PullRequest, error) {
c := configFromOpts(opts...)
// Thankfully k8s-merge-robot commits the PR number consistently. If this ever
// stops being true, this definitely won't work anymore.
exp := regexp.MustCompile(`Merge pull request #(?P<number>\d+)`)
match := exp.FindStringSubmatch(*commit.Commit.Message)
if len(match) == 0 {
return nil, errors.New("no matches found when parsing PR from commit")
}
result := map[string]string{}
for i, name := range exp.SubexpNames() {
if i != 0 && name != "" {
result[name] = match[i]
}
}
number, err := strconv.Atoi(result["number"])
if err != nil {
return nil, err
}
// Given the PR number that we've now converted to an integer, get the PR from
// the API
pr, _, err := client.PullRequests.Get(c.ctx, c.org, c.repo, number)
return pr, err
}
// LabelsWithPrefix is a helper for fetching all labels on a PR that start with
// a given string. This pattern is used often in the k/k repo and we can take
// advantage of this to contextualize release note generation with the kind, sig,
// area, etc labels.
func LabelsWithPrefix(pr *github.PullRequest, prefix string) []string {
labels := []string{}
for _, label := range pr.Labels {
if strings.HasPrefix(*label.Name, prefix) {
labels = append(labels, strings.TrimPrefix(*label.Name, prefix+"/"))
}
}
return labels
}
// IsActionRequired indicates whether or not the release-note-action-required
// label was set on the PR.
func IsActionRequired(pr *github.PullRequest) bool {
for _, label := range pr.Labels {
if *label.Name == "release-note-action-required" {
return true
}
}
return false
}
// filterCommits is a helper that allows you to filter a set of commits by
// applying a set of regular expressions over the commit messages. If include is
// true, only commits that match at least one expression are returned. If include
// is false, only commits that match 0 of the expressions are returned.
func filterCommits(
client *github.Client,
logger log.Logger,
commits []*github.RepositoryCommit,
filters []string,
include bool,
opts ...GithubApiOption,
) ([]*github.RepositoryCommit, error) {
filteredCommits := []*github.RepositoryCommit{}
for _, commit := range commits {
body := commit.GetCommit().GetMessage()
if commit.GetAuthor().GetLogin() == "k8s-merge-robot" {
pr, err := PRFromCommit(client, commit, opts...)
if err != nil {
level.Info(logger).Log(
"msg", "error getting PR from k8s-merge-robot commit",
"err", err,
"sha", commit.GetSHA(),
)
continue
}
body = pr.GetBody()
}
skip := false
for _, filter := range filters {
match, err := regexp.MatchString(filter, body)
if err != nil {
return nil, err
}
if match && !include || !match && include {
skip = true
break
}
}
if skip {
continue
}
filteredCommits = append(filteredCommits, commit)
}
return filteredCommits, nil
}
// configFromOpts is an internal helper for turning a set of functional options
// into a populated *githubApiConfig struct with consistent defaults.
func configFromOpts(opts ...GithubApiOption) *githubApiConfig {
c := &githubApiConfig{
ctx: context.Background(),
org: "kubernetes",
repo: "kubernetes",
branch: "master",
}
for _, opt := range opts {
opt(c)
}
return c
}
func stripActionRequired(note string) string {
expressions := []string{
`(?i)\[action required\]\s`,
`(?i)action required:\s`,
}
for _, exp := range expressions {
re := regexp.MustCompile(exp)
note = re.ReplaceAllString(note, "")
}
return note
}
func stripStar(note string) string {
re := regexp.MustCompile(`(?i)\*\s`)
return re.ReplaceAllString(note, "")
}
func HasString(a []string, x string) bool {
for _, n := range a {
if x == n {
return true
}
}
return false
}
Find PRs that were squash merged
// Copyright 2017 The Kubernetes Authors All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notes
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/google/go-github/github"
"github.com/pkg/errors"
)
// ReleaseNote is the type that represents the total sum of all the information
// we've gathered about a single release note.
type ReleaseNote struct {
// Commit is the SHA of the commit which is the source of this note. This is
// also effectively a unique ID for release notes.
Commit string `json:"commit"`
// Text is the actual content of the release note
Text string `json:"text"`
// Markdown is the markdown formatted note
Markdown string `json:"markdown"`
// Author is the GitHub username of the commit author
Author string `json:"author"`
// AuthorUrl is the GitHub URL of the commit author
AuthorUrl string `json:"author_url"`
// PrUrl is a URL to the PR
PrUrl string `json:"pr_url"`
// PrNumber is the number of the PR
PrNumber int `json:"pr_number"`
// Areas is a list of the labels beginning with area/
Areas []string `json:"areas,omitempty"`
// Kinds is a list of the labels beginning with kind/
Kinds []string `json:"kinds,omitempty"`
// SIGs is a list of the labels beginning with sig/
SIGs []string `json:"sigs,omitempty"`
// Indicates whether or not a note will appear as a new feature
Feature bool `json:"feature,omitempty"`
// Indicates whether or not a note is duplicated across SIGs
Duplicate bool `json:"duplicate,omitempty"`
// ActionRequired indicates whether or not the release-note-action-required
// label was set on the PR
ActionRequired bool `json:"action_required,omitempty"`
// Tags each note with a release version if specified
// If not specified, omitted
ReleaseVersion string `json:"release_version,omitempty"`
}
// ReleaseNoteList is a map of PR numbers referencing notes.
// To avoid needless loops, we need to be able to reference things by PR
// When we have to merge old and new entries, we want to be able to override
// the old entries with the new ones efficiently.
type ReleaseNoteList map[int]*ReleaseNote
// GithubApiOption is a type which allows for the expression of API configuration
// via the "functional option" pattern.
// For more information on this pattern, see the following blog post:
// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type GithubApiOption func(*githubApiConfig)
// githubApiConfig is a configuration struct that is used to express optional
// configuration for GitHub API requests
type githubApiConfig struct {
ctx context.Context
org string
repo string
branch string
}
// WithContext allows the caller to inject a context into GitHub API requests
func WithContext(ctx context.Context) GithubApiOption {
return func(c *githubApiConfig) {
c.ctx = ctx
}
}
// WithOrg allows the caller to override the GitHub organization for the API
// request. By default, it is usually "kubernetes".
func WithOrg(org string) GithubApiOption {
return func(c *githubApiConfig) {
c.org = org
}
}
// WithRepo allows the caller to override the GitHub repo for the API
// request. By default, it is usually "kubernetes".
func WithRepo(repo string) GithubApiOption {
return func(c *githubApiConfig) {
c.repo = repo
}
}
// WithBranch allows the caller to override the repo branch for the API
// request. By default, it is usually "master".
func WithBranch(branch string) GithubApiOption {
return func(c *githubApiConfig) {
c.branch = branch
}
}
// ListReleaseNotes produces a list of fully contextualized release notes
// starting from a given commit SHA and ending at starting a given commit SHA.
func ListReleaseNotes(
client *github.Client,
logger log.Logger,
branch,
start,
end,
relVer string,
opts ...GithubApiOption,
) (ReleaseNoteList, error) {
commits, err := ListCommitsWithNotes(client, logger, branch, start, end, opts...)
if err != nil {
return nil, err
}
dedupeCache := map[string]struct{}{}
notes := make(ReleaseNoteList)
for _, commit := range commits {
if commit.GetAuthor().GetLogin() != "k8s-ci-robot" {
continue
}
note, err := ReleaseNoteFromCommit(commit, client, relVer, opts...)
if err != nil {
level.Error(logger).Log(
"err", err,
"msg", "error getting the release note from commit while listing release notes",
"sha", commit.GetSHA(),
)
continue
}
if strings.TrimSpace(note.Text) == "NONE" {
continue
}
if _, ok := dedupeCache[note.Text]; !ok {
notes[note.PrNumber] = note
dedupeCache[note.Text] = struct{}{}
}
}
return notes, nil
}
// NoteTextFromString returns the text of the release note given a string which
// may contain the commit message, the PR description, etc.
// This is generally the content inside the ```release-note ``` stanza.
func NoteTextFromString(s string) (string, error) {
exps := []*regexp.Regexp{
// (?s) is needed for '.' to be matching on newlines, by default that's disabled
regexp.MustCompile("(?s)```release-note\\r\\n(?P<note>.+)\\r\\n```"),
regexp.MustCompile("(?s)```dev-release-note\\r\\n(?P<note>.+)"),
regexp.MustCompile("(?s)```\\r\\n(?P<note>.+)\\r\\n```"),
regexp.MustCompile("(?s)```release-note\n(?P<note>.+)\n```"),
}
for _, exp := range exps {
match := exp.FindStringSubmatch(s)
if len(match) == 0 {
continue
}
result := map[string]string{}
for i, name := range exp.SubexpNames() {
if i != 0 && name != "" {
result[name] = match[i]
}
}
note := strings.Replace(result["note"], "\r", "", -1)
note = stripActionRequired(note)
note = stripStar(note)
return note, nil
}
return "", errors.New("no matches found when parsing note text from commit string")
}
// ReleaseNoteFromCommit produces a full contextualized release note given a
// GitHub commit API resource.
func ReleaseNoteFromCommit(commit *github.RepositoryCommit, client *github.Client, relVer string, opts ...GithubApiOption) (*ReleaseNote, error) {
c := configFromOpts(opts...)
pr, err := PRFromCommit(client, commit, opts...)
if err != nil {
return nil, errors.Wrapf(err, "error parsing release note from commit %s", commit.GetSHA())
}
text, err := NoteTextFromString(pr.GetBody())
if err != nil {
return nil, err
}
author := pr.GetUser().GetLogin()
authorUrl := fmt.Sprintf("https://github.com/%s", author)
prUrl := fmt.Sprintf("https://github.com/%s/%s/pull/%d", c.org, c.repo, pr.GetNumber())
IsFeature := HasString(LabelsWithPrefix(pr, "kind"), "feature")
IsDuplicate := false
sigsListPretty := prettifySigList(LabelsWithPrefix(pr, "sig"))
noteSuffix := ""
if IsActionRequired(pr) || IsFeature {
if sigsListPretty != "" {
noteSuffix = fmt.Sprintf("Courtesy of %s", sigsListPretty)
}
} else if len(LabelsWithPrefix(pr, "sig")) > 1 {
IsDuplicate = true
}
markdown := fmt.Sprintf("%s ([#%d](%s), [@%s](%s))", text, pr.GetNumber(), prUrl, author, authorUrl)
if noteSuffix != "" {
markdown = fmt.Sprintf("%s %s", markdown, noteSuffix)
}
return &ReleaseNote{
Commit: commit.GetSHA(),
Text: text,
Markdown: markdown,
Author: author,
AuthorUrl: authorUrl,
PrUrl: prUrl,
PrNumber: pr.GetNumber(),
SIGs: LabelsWithPrefix(pr, "sig"),
Kinds: LabelsWithPrefix(pr, "kind"),
Areas: LabelsWithPrefix(pr, "area"),
Feature: IsFeature,
Duplicate: IsDuplicate,
ActionRequired: IsActionRequired(pr),
ReleaseVersion: relVer,
}, nil
}
// ListCommits lists all commits starting from a given commit SHA and ending at
// a given commit SHA.
func ListCommits(client *github.Client, branch, start, end string, opts ...GithubApiOption) ([]*github.RepositoryCommit, error) {
c := configFromOpts(opts...)
c.branch = branch
startCommit, _, err := client.Git.GetCommit(c.ctx, c.org, c.repo, start)
if err != nil {
return nil, err
}
endCommit, _, err := client.Git.GetCommit(c.ctx, c.org, c.repo, end)
if err != nil {
return nil, err
}
clo := &github.CommitsListOptions{
SHA: c.branch,
Since: *startCommit.Committer.Date,
Until: *endCommit.Committer.Date,
ListOptions: github.ListOptions{
Page: 1,
PerPage: 100,
},
}
commits, resp, err := client.Repositories.ListCommits(c.ctx, c.org, c.repo, clo)
if err != nil {
return nil, err
}
clo.ListOptions.Page++
for clo.ListOptions.Page <= resp.LastPage {
commitPage, _, err := client.Repositories.ListCommits(c.ctx, c.org, c.repo, clo)
if err != nil {
return nil, err
}
for _, commit := range commitPage {
commits = append(commits, commit)
}
clo.ListOptions.Page++
}
return commits, nil
}
// ListCommitsWithNotes list commits that have release notes starting from a
// given commit SHA and ending at a given commit SHA. This function is similar
// to ListCommits except that only commits with tagged release notes are
// returned.
func ListCommitsWithNotes(
client *github.Client,
logger log.Logger,
branch,
start,
end string,
opts ...GithubApiOption,
) ([]*github.RepositoryCommit, error) {
filteredCommits := []*github.RepositoryCommit{}
commits, err := ListCommits(client, branch, start, end, opts...)
if err != nil {
return nil, err
}
for _, commit := range commits {
pr, err := PRFromCommit(client, commit, opts...)
if err != nil {
if err.Error() == "no matches found when parsing PR from commit" {
continue
}
}
// exclusionFilters is a list of regular expressions that match commits that
// do NOT contain release notes. Notably, this is all of the variations of
// "release note none" that appear in the commit log.
exclusionFilters := []string{
"```release-note\\r\\nNONE",
"```release-note\\r\\n\\s+NONE",
"```release-note\\r\\nNONE",
"```release-note\\r\\n\"NONE\"",
"```release-note\\r\\nNone",
"```release-note\\r\\nnone",
"```release-note\\r\\nN/A",
"```release-note\\r\\n\\r\\n```",
"```release-note\\r\\n```",
"/release-note-none",
"\\r\\n\\r\\nNONE",
"```NONE\\r\\n```",
"```release-note \\r\\nNONE\\r\\n```",
"NONE\\r\\n```",
"\\r\\nNone",
"\\r\\nNONE\\r\\n",
}
excluded := false
for _, filter := range exclusionFilters {
match, err := regexp.MatchString(filter, pr.GetBody())
if err != nil {
return nil, err
}
if match {
excluded = true
break
}
}
if excluded {
continue
}
// Similarly, now that the known not-release-notes are filtered out, we can
// use some patterns to find actual release notes.
inclusionFilters := []string{
"release-note",
"Does this PR introduce a user-facing change?",
}
for _, filter := range inclusionFilters {
match, err := regexp.MatchString(filter, pr.GetBody())
if err != nil {
return nil, err
}
if match {
filteredCommits = append(filteredCommits, commit)
}
}
}
return filteredCommits, nil
}
// PRFromCommit return an API Pull Request struct given a commit struct. This is
// useful for going from a commit log to the PR (which contains useful info such
// as labels).
func PRFromCommit(client *github.Client, commit *github.RepositoryCommit, opts ...GithubApiOption) (*github.PullRequest, error) {
c := configFromOpts(opts...)
// Thankfully k8s-merge-robot commits the PR number consistently. If this ever
// stops being true, this definitely won't work anymore.
exp := regexp.MustCompile(`Merge pull request #(?P<number>\d+)`)
match := exp.FindStringSubmatch(*commit.Commit.Message)
if len(match) == 0 {
// If the PR was squash merged, the regexp is different
match = regexp.MustCompile(`\(#(?P<number>\d+)\)`).FindStringSubmatch(*commit.Commit.Message)
if len(match) != 1 {
return nil, errors.New("no matches found when parsing PR from commit")
}
}
result := map[string]string{}
for i, name := range exp.SubexpNames() {
if i != 0 && name != "" {
result[name] = match[i]
}
}
number, err := strconv.Atoi(result["number"])
if err != nil {
return nil, err
}
// Given the PR number that we've now converted to an integer, get the PR from
// the API
pr, _, err := client.PullRequests.Get(c.ctx, c.org, c.repo, number)
return pr, err
}
// LabelsWithPrefix is a helper for fetching all labels on a PR that start with
// a given string. This pattern is used often in the k/k repo and we can take
// advantage of this to contextualize release note generation with the kind, sig,
// area, etc labels.
func LabelsWithPrefix(pr *github.PullRequest, prefix string) []string {
labels := []string{}
for _, label := range pr.Labels {
if strings.HasPrefix(*label.Name, prefix) {
labels = append(labels, strings.TrimPrefix(*label.Name, prefix+"/"))
}
}
return labels
}
// IsActionRequired indicates whether or not the release-note-action-required
// label was set on the PR.
func IsActionRequired(pr *github.PullRequest) bool {
for _, label := range pr.Labels {
if *label.Name == "release-note-action-required" {
return true
}
}
return false
}
// filterCommits is a helper that allows you to filter a set of commits by
// applying a set of regular expressions over the commit messages. If include is
// true, only commits that match at least one expression are returned. If include
// is false, only commits that match 0 of the expressions are returned.
func filterCommits(
client *github.Client,
logger log.Logger,
commits []*github.RepositoryCommit,
filters []string,
include bool,
opts ...GithubApiOption,
) ([]*github.RepositoryCommit, error) {
filteredCommits := []*github.RepositoryCommit{}
for _, commit := range commits {
body := commit.GetCommit().GetMessage()
if commit.GetAuthor().GetLogin() == "k8s-merge-robot" {
pr, err := PRFromCommit(client, commit, opts...)
if err != nil {
level.Info(logger).Log(
"msg", "error getting PR from k8s-merge-robot commit",
"err", err,
"sha", commit.GetSHA(),
)
continue
}
body = pr.GetBody()
}
skip := false
for _, filter := range filters {
match, err := regexp.MatchString(filter, body)
if err != nil {
return nil, err
}
if match && !include || !match && include {
skip = true
break
}
}
if skip {
continue
}
filteredCommits = append(filteredCommits, commit)
}
return filteredCommits, nil
}
// configFromOpts is an internal helper for turning a set of functional options
// into a populated *githubApiConfig struct with consistent defaults.
func configFromOpts(opts ...GithubApiOption) *githubApiConfig {
c := &githubApiConfig{
ctx: context.Background(),
org: "kubernetes",
repo: "kubernetes",
branch: "master",
}
for _, opt := range opts {
opt(c)
}
return c
}
func stripActionRequired(note string) string {
expressions := []string{
`(?i)\[action required\]\s`,
`(?i)action required:\s`,
}
for _, exp := range expressions {
re := regexp.MustCompile(exp)
note = re.ReplaceAllString(note, "")
}
return note
}
func stripStar(note string) string {
re := regexp.MustCompile(`(?i)\*\s`)
return re.ReplaceAllString(note, "")
}
func HasString(a []string, x string) bool {
for _, n := range a {
if x == n {
return true
}
}
return false
}
|
// Copyright 2014 Wandoujia Inc. All Rights Reserved.
// Licensed under the MIT (MIT-LICENSE.txt) license.
package proxy
import (
"bytes"
"encoding/json"
"errors"
"net"
"net/http"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/wandoulabs/codis/pkg/models"
"github.com/wandoulabs/codis/pkg/proxy/router"
"github.com/wandoulabs/codis/pkg/utils/log"
"github.com/wandoulabs/go-zookeeper/zk"
topo "github.com/wandoulabs/go-zookeeper/zk"
)
type Server struct {
conf *Config
topo *Topology
info models.ProxyInfo
groups map[int]int
lastActionSeq int
evtbus chan interface{}
router *router.Router
listener net.Listener
kill chan interface{}
wait sync.WaitGroup
stop sync.Once
}
func New(addr string, debugVarAddr string, conf *Config) *Server {
log.Infof("create proxy with config: %+v", conf)
proxyHost := strings.Split(addr, ":")[0]
debugHost := strings.Split(debugVarAddr, ":")[0]
hostname, err := os.Hostname()
if err != nil {
log.PanicErrorf(err, "get host name failed")
}
if proxyHost == "0.0.0.0" || strings.HasPrefix(proxyHost, "127.0.0.") || proxyHost == "" {
proxyHost = hostname
}
if debugHost == "0.0.0.0" || strings.HasPrefix(debugHost, "127.0.0.") || debugHost == "" {
debugHost = hostname
}
s := &Server{conf: conf, lastActionSeq: -1, groups: make(map[int]int)}
s.topo = NewTopo(conf.productName, conf.zkAddr, conf.fact, conf.provider, conf.zkSessionTimeout)
s.info.Id = conf.proxyId
s.info.State = models.PROXY_STATE_OFFLINE
s.info.Addr = proxyHost + ":" + strings.Split(addr, ":")[1]
s.info.DebugVarAddr = debugHost + ":" + strings.Split(debugVarAddr, ":")[1]
s.info.Pid = os.Getpid()
s.info.StartAt = time.Now().String()
s.kill = make(chan interface{})
log.Infof("proxy info = %+v", s.info)
if l, err := net.Listen(conf.proto, addr); err != nil {
log.PanicErrorf(err, "open listener failed")
} else {
s.listener = l
}
s.router = router.NewWithAuth(conf.passwd)
s.evtbus = make(chan interface{}, 1024)
s.register()
s.wait.Add(1)
go func() {
defer s.wait.Done()
s.serve()
}()
return s
}
func (s *Server) SetMyselfOnline() error {
log.Info("mark myself online")
info := models.ProxyInfo{
Id: s.conf.proxyId,
State: models.PROXY_STATE_ONLINE,
}
b, _ := json.Marshal(info)
url := "http://" + s.conf.dashboardAddr + "/api/proxy"
res, err := http.Post(url, "application/json", bytes.NewReader(b))
if err != nil {
return err
}
if res.StatusCode != 200 {
return errors.New("response code is not 200")
}
return nil
}
func (s *Server) serve() {
defer s.close()
if !s.waitOnline() {
return
}
s.rewatchNodes()
for i := 0; i < router.MaxSlotNum; i++ {
s.fillSlot(i)
}
log.Info("proxy is serving")
go func() {
defer s.close()
s.handleConns()
}()
s.loopEvents()
}
func (s *Server) handleConns() {
ch := make(chan net.Conn, 4096)
defer close(ch)
go func() {
for c := range ch {
x := router.NewSessionSize(c, s.conf.passwd, s.conf.maxBufSize, s.conf.maxTimeout)
go x.Serve(s.router, s.conf.maxPipeline)
}
}()
for {
c, err := s.listener.Accept()
if err != nil {
return
} else {
ch <- c
}
}
}
func (s *Server) Info() models.ProxyInfo {
return s.info
}
func (s *Server) Join() {
s.wait.Wait()
}
func (s *Server) Close() error {
s.close()
s.wait.Wait()
return nil
}
func (s *Server) close() {
s.stop.Do(func() {
s.listener.Close()
if s.router != nil {
s.router.Close()
}
close(s.kill)
})
}
func (s *Server) rewatchProxy() {
_, err := s.topo.WatchNode(path.Join(models.GetProxyPath(s.topo.ProductName), s.info.Id), s.evtbus)
if err != nil {
log.PanicErrorf(err, "watch node failed")
}
}
func (s *Server) rewatchNodes() []string {
nodes, err := s.topo.WatchChildren(models.GetWatchActionPath(s.topo.ProductName), s.evtbus)
if err != nil {
log.PanicErrorf(err, "watch children failed")
}
return nodes
}
func (s *Server) register() {
if _, err := s.topo.CreateProxyInfo(&s.info); err != nil {
log.PanicErrorf(err, "create proxy node failed")
}
if _, err := s.topo.CreateProxyFenceNode(&s.info); err != nil && err != zk.ErrNodeExists {
log.PanicErrorf(err, "create fence node failed")
}
log.Warn("********** Attention **********")
log.Warn("You should use `kill {pid}` rather than `kill -9 {pid}` to stop me,")
log.Warn("or the node resisted on zk will not be cleaned when I'm quiting and you must remove it manually")
log.Warn("*******************************")
}
func (s *Server) markOffline() {
s.topo.Close(s.info.Id)
s.info.State = models.PROXY_STATE_MARK_OFFLINE
}
func (s *Server) waitOnline() bool {
for {
info, err := s.topo.GetProxyInfo(s.info.Id)
if err != nil {
log.PanicErrorf(err, "get proxy info failed: %s", s.info.Id)
}
switch info.State {
case models.PROXY_STATE_MARK_OFFLINE:
log.Infof("mark offline, proxy got offline event: %s", s.info.Id)
s.markOffline()
return false
case models.PROXY_STATE_ONLINE:
s.info.State = info.State
log.Infof("we are online: %s", s.info.Id)
s.rewatchProxy()
return true
}
select {
case <-s.kill:
log.Infof("mark offline, proxy is killed: %s", s.info.Id)
s.markOffline()
return false
default:
}
log.Infof("wait to be online: %s", s.info.Id)
time.Sleep(3 * time.Second)
}
}
func getEventPath(evt interface{}) string {
return evt.(topo.Event).Path
}
func needResponse(receivers []string, self models.ProxyInfo) bool {
var info models.ProxyInfo
for _, v := range receivers {
err := json.Unmarshal([]byte(v), &info)
if err != nil {
if v == self.Id {
return true
}
return false
}
if info.Id == self.Id && info.Pid == self.Pid && info.StartAt == self.StartAt {
return true
}
}
return false
}
func groupMaster(groupInfo models.ServerGroup) string {
var master string
for _, server := range groupInfo.Servers {
if server.Type == models.SERVER_TYPE_MASTER {
if master != "" {
log.Panicf("two master not allowed: %+v", groupInfo)
}
master = server.Addr
}
}
if master == "" {
log.Panicf("master not found: %+v", groupInfo)
}
return master
}
func (s *Server) resetSlot(i int) {
s.router.ResetSlot(i)
}
func (s *Server) fillSlot(i int) {
slotInfo, slotGroup, err := s.topo.GetSlotByIndex(i)
if err != nil {
log.PanicErrorf(err, "get slot by index failed", i)
}
var from string
var addr = groupMaster(*slotGroup)
if slotInfo.State.Status == models.SLOT_STATUS_MIGRATE {
fromGroup, err := s.topo.GetGroup(slotInfo.State.MigrateStatus.From)
if err != nil {
log.PanicErrorf(err, "get migrate from failed")
}
from = groupMaster(*fromGroup)
if from == addr {
log.Panicf("set slot %04d migrate from %s to %s", i, from, addr)
}
}
s.groups[i] = slotInfo.GroupId
s.router.FillSlot(i, addr, from,
slotInfo.State.Status == models.SLOT_STATUS_PRE_MIGRATE)
}
func (s *Server) onSlotRangeChange(param *models.SlotMultiSetParam) {
log.Infof("slotRangeChange %+v", param)
for i := param.From; i <= param.To; i++ {
switch param.Status {
case models.SLOT_STATUS_OFFLINE:
s.resetSlot(i)
case models.SLOT_STATUS_ONLINE:
s.fillSlot(i)
default:
log.Panicf("can not handle status %v", param.Status)
}
}
}
func (s *Server) onGroupChange(groupId int) {
log.Infof("group changed %d", groupId)
for i, g := range s.groups {
if g == groupId {
s.fillSlot(i)
}
}
}
func (s *Server) responseAction(seq int64) {
log.Infof("send response seq = %d", seq)
err := s.topo.DoResponse(int(seq), &s.info)
if err != nil {
log.InfoErrorf(err, "send response seq = %d failed", seq)
}
}
func (s *Server) getActionObject(seq int, target interface{}) {
act := &models.Action{Target: target}
err := s.topo.GetActionWithSeqObject(int64(seq), act)
if err != nil {
log.PanicErrorf(err, "get action object failed, seq = %d", seq)
}
log.Infof("action %+v", act)
}
func (s *Server) checkAndDoTopoChange(seq int) bool {
act, err := s.topo.GetActionWithSeq(int64(seq))
if err != nil { //todo: error is not "not exist"
log.PanicErrorf(err, "action failed, seq = %d", seq)
}
if !needResponse(act.Receivers, s.info) { //no need to response
return false
}
log.Warnf("action %v receivers %v", seq, act.Receivers)
switch act.Type {
case models.ACTION_TYPE_SLOT_MIGRATE, models.ACTION_TYPE_SLOT_CHANGED,
models.ACTION_TYPE_SLOT_PREMIGRATE:
slot := &models.Slot{}
s.getActionObject(seq, slot)
s.fillSlot(slot.Id)
case models.ACTION_TYPE_SERVER_GROUP_CHANGED:
serverGroup := &models.ServerGroup{}
s.getActionObject(seq, serverGroup)
s.onGroupChange(serverGroup.Id)
case models.ACTION_TYPE_SERVER_GROUP_REMOVE:
//do not care
case models.ACTION_TYPE_MULTI_SLOT_CHANGED:
param := &models.SlotMultiSetParam{}
s.getActionObject(seq, param)
s.onSlotRangeChange(param)
default:
log.Panicf("unknown action %+v", act)
}
return true
}
func (s *Server) processAction(e interface{}) {
if strings.Index(getEventPath(e), models.GetProxyPath(s.topo.ProductName)) == 0 {
info, err := s.topo.GetProxyInfo(s.info.Id)
if err != nil {
log.PanicErrorf(err, "get proxy info failed: %s", s.info.Id)
}
switch info.State {
case models.PROXY_STATE_MARK_OFFLINE:
log.Infof("mark offline, proxy got offline event: %s", s.info.Id)
s.markOffline()
case models.PROXY_STATE_ONLINE:
s.rewatchProxy()
default:
log.Panicf("unknown proxy state %v", info)
}
return
}
//re-watch
nodes := s.rewatchNodes()
seqs, err := models.ExtraSeqList(nodes)
if err != nil {
log.PanicErrorf(err, "get seq list failed")
}
if len(seqs) == 0 || !s.topo.IsChildrenChangedEvent(e) {
return
}
//get last pos
index := -1
for i, seq := range seqs {
if s.lastActionSeq < seq {
index = i
break
}
}
if index < 0 {
return
}
actions := seqs[index:]
for _, seq := range actions {
exist, err := s.topo.Exist(path.Join(s.topo.GetActionResponsePath(seq), s.info.Id))
if err != nil {
log.PanicErrorf(err, "get action failed")
}
if exist {
continue
}
if s.checkAndDoTopoChange(seq) {
s.responseAction(int64(seq))
}
}
s.lastActionSeq = seqs[len(seqs)-1]
}
func (s *Server) loopEvents() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
var tick int = 0
for s.info.State == models.PROXY_STATE_ONLINE {
select {
case <-s.kill:
log.Infof("mark offline, proxy is killed: %s", s.info.Id)
s.markOffline()
case e := <-s.evtbus:
evtPath := getEventPath(e)
log.Infof("got event %s, %v, lastActionSeq %d", s.info.Id, e, s.lastActionSeq)
if strings.Index(evtPath, models.GetActionResponsePath(s.conf.productName)) == 0 {
seq, err := strconv.Atoi(path.Base(evtPath))
if err != nil {
log.ErrorErrorf(err, "parse action seq failed")
} else {
if seq < s.lastActionSeq {
log.Infof("ignore seq = %d", seq)
continue
}
}
}
s.processAction(e)
case <-ticker.C:
if maxTick := s.conf.pingPeriod; maxTick != 0 {
if tick++; tick >= maxTick {
s.router.KeepAlive()
tick = 0
}
}
}
}
}
only handle latest action
// Copyright 2014 Wandoujia Inc. All Rights Reserved.
// Licensed under the MIT (MIT-LICENSE.txt) license.
package proxy
import (
"bytes"
"encoding/json"
"errors"
"net"
"net/http"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/wandoulabs/codis/pkg/models"
"github.com/wandoulabs/codis/pkg/proxy/router"
"github.com/wandoulabs/codis/pkg/utils/log"
"github.com/wandoulabs/go-zookeeper/zk"
topo "github.com/wandoulabs/go-zookeeper/zk"
)
type Server struct {
conf *Config
topo *Topology
info models.ProxyInfo
groups map[int]int
lastActionSeq int
evtbus chan interface{}
router *router.Router
listener net.Listener
kill chan interface{}
wait sync.WaitGroup
stop sync.Once
}
func New(addr string, debugVarAddr string, conf *Config) *Server {
log.Infof("create proxy with config: %+v", conf)
proxyHost := strings.Split(addr, ":")[0]
debugHost := strings.Split(debugVarAddr, ":")[0]
hostname, err := os.Hostname()
if err != nil {
log.PanicErrorf(err, "get host name failed")
}
if proxyHost == "0.0.0.0" || strings.HasPrefix(proxyHost, "127.0.0.") || proxyHost == "" {
proxyHost = hostname
}
if debugHost == "0.0.0.0" || strings.HasPrefix(debugHost, "127.0.0.") || debugHost == "" {
debugHost = hostname
}
s := &Server{conf: conf, lastActionSeq: -1, groups: make(map[int]int)}
s.topo = NewTopo(conf.productName, conf.zkAddr, conf.fact, conf.provider, conf.zkSessionTimeout)
s.info.Id = conf.proxyId
s.info.State = models.PROXY_STATE_OFFLINE
s.info.Addr = proxyHost + ":" + strings.Split(addr, ":")[1]
s.info.DebugVarAddr = debugHost + ":" + strings.Split(debugVarAddr, ":")[1]
s.info.Pid = os.Getpid()
s.info.StartAt = time.Now().String()
s.kill = make(chan interface{})
log.Infof("proxy info = %+v", s.info)
if l, err := net.Listen(conf.proto, addr); err != nil {
log.PanicErrorf(err, "open listener failed")
} else {
s.listener = l
}
s.router = router.NewWithAuth(conf.passwd)
s.evtbus = make(chan interface{}, 1024)
s.register()
s.wait.Add(1)
go func() {
defer s.wait.Done()
s.serve()
}()
return s
}
func (s *Server) SetMyselfOnline() error {
log.Info("mark myself online")
info := models.ProxyInfo{
Id: s.conf.proxyId,
State: models.PROXY_STATE_ONLINE,
}
b, _ := json.Marshal(info)
url := "http://" + s.conf.dashboardAddr + "/api/proxy"
res, err := http.Post(url, "application/json", bytes.NewReader(b))
if err != nil {
return err
}
if res.StatusCode != 200 {
return errors.New("response code is not 200")
}
return nil
}
func (s *Server) serve() {
defer s.close()
if !s.waitOnline() {
return
}
s.rewatchNodes()
for i := 0; i < router.MaxSlotNum; i++ {
s.fillSlot(i)
}
log.Info("proxy is serving")
go func() {
defer s.close()
s.handleConns()
}()
s.loopEvents()
}
func (s *Server) handleConns() {
ch := make(chan net.Conn, 4096)
defer close(ch)
go func() {
for c := range ch {
x := router.NewSessionSize(c, s.conf.passwd, s.conf.maxBufSize, s.conf.maxTimeout)
go x.Serve(s.router, s.conf.maxPipeline)
}
}()
for {
c, err := s.listener.Accept()
if err != nil {
return
} else {
ch <- c
}
}
}
func (s *Server) Info() models.ProxyInfo {
return s.info
}
func (s *Server) Join() {
s.wait.Wait()
}
func (s *Server) Close() error {
s.close()
s.wait.Wait()
return nil
}
func (s *Server) close() {
s.stop.Do(func() {
s.listener.Close()
if s.router != nil {
s.router.Close()
}
close(s.kill)
})
}
func (s *Server) rewatchProxy() {
_, err := s.topo.WatchNode(path.Join(models.GetProxyPath(s.topo.ProductName), s.info.Id), s.evtbus)
if err != nil {
log.PanicErrorf(err, "watch node failed")
}
}
func (s *Server) rewatchNodes() []string {
nodes, err := s.topo.WatchChildren(models.GetWatchActionPath(s.topo.ProductName), s.evtbus)
if err != nil {
log.PanicErrorf(err, "watch children failed")
}
return nodes
}
func (s *Server) register() {
if _, err := s.topo.CreateProxyInfo(&s.info); err != nil {
log.PanicErrorf(err, "create proxy node failed")
}
if _, err := s.topo.CreateProxyFenceNode(&s.info); err != nil && err != zk.ErrNodeExists {
log.PanicErrorf(err, "create fence node failed")
}
log.Warn("********** Attention **********")
log.Warn("You should use `kill {pid}` rather than `kill -9 {pid}` to stop me,")
log.Warn("or the node resisted on zk will not be cleaned when I'm quiting and you must remove it manually")
log.Warn("*******************************")
}
func (s *Server) markOffline() {
s.topo.Close(s.info.Id)
s.info.State = models.PROXY_STATE_MARK_OFFLINE
}
func (s *Server) waitOnline() bool {
for {
info, err := s.topo.GetProxyInfo(s.info.Id)
if err != nil {
log.PanicErrorf(err, "get proxy info failed: %s", s.info.Id)
}
switch info.State {
case models.PROXY_STATE_MARK_OFFLINE:
log.Infof("mark offline, proxy got offline event: %s", s.info.Id)
s.markOffline()
return false
case models.PROXY_STATE_ONLINE:
s.info.State = info.State
log.Infof("we are online: %s", s.info.Id)
s.rewatchProxy()
return true
}
select {
case <-s.kill:
log.Infof("mark offline, proxy is killed: %s", s.info.Id)
s.markOffline()
return false
default:
}
log.Infof("wait to be online: %s", s.info.Id)
time.Sleep(3 * time.Second)
}
}
func getEventPath(evt interface{}) string {
return evt.(topo.Event).Path
}
func needResponse(receivers []string, self models.ProxyInfo) bool {
var info models.ProxyInfo
for _, v := range receivers {
err := json.Unmarshal([]byte(v), &info)
if err != nil {
if v == self.Id {
return true
}
return false
}
if info.Id == self.Id && info.Pid == self.Pid && info.StartAt == self.StartAt {
return true
}
}
return false
}
func groupMaster(groupInfo models.ServerGroup) string {
var master string
for _, server := range groupInfo.Servers {
if server.Type == models.SERVER_TYPE_MASTER {
if master != "" {
log.Panicf("two master not allowed: %+v", groupInfo)
}
master = server.Addr
}
}
if master == "" {
log.Panicf("master not found: %+v", groupInfo)
}
return master
}
func (s *Server) resetSlot(i int) {
s.router.ResetSlot(i)
}
func (s *Server) fillSlot(i int) {
slotInfo, slotGroup, err := s.topo.GetSlotByIndex(i)
if err != nil {
log.PanicErrorf(err, "get slot by index failed", i)
}
var from string
var addr = groupMaster(*slotGroup)
if slotInfo.State.Status == models.SLOT_STATUS_MIGRATE {
fromGroup, err := s.topo.GetGroup(slotInfo.State.MigrateStatus.From)
if err != nil {
log.PanicErrorf(err, "get migrate from failed")
}
from = groupMaster(*fromGroup)
if from == addr {
log.Panicf("set slot %04d migrate from %s to %s", i, from, addr)
}
}
s.groups[i] = slotInfo.GroupId
s.router.FillSlot(i, addr, from,
slotInfo.State.Status == models.SLOT_STATUS_PRE_MIGRATE)
}
func (s *Server) onSlotRangeChange(param *models.SlotMultiSetParam) {
log.Infof("slotRangeChange %+v", param)
for i := param.From; i <= param.To; i++ {
switch param.Status {
case models.SLOT_STATUS_OFFLINE:
s.resetSlot(i)
case models.SLOT_STATUS_ONLINE:
s.fillSlot(i)
default:
log.Panicf("can not handle status %v", param.Status)
}
}
}
func (s *Server) onGroupChange(groupId int) {
log.Infof("group changed %d", groupId)
for i, g := range s.groups {
if g == groupId {
s.fillSlot(i)
}
}
}
func (s *Server) responseAction(seq int64) {
log.Infof("send response seq = %d", seq)
err := s.topo.DoResponse(int(seq), &s.info)
if err != nil {
log.InfoErrorf(err, "send response seq = %d failed", seq)
}
}
func (s *Server) getActionObject(seq int, target interface{}) {
act := &models.Action{Target: target}
err := s.topo.GetActionWithSeqObject(int64(seq), act)
if err != nil {
log.PanicErrorf(err, "get action object failed, seq = %d", seq)
}
log.Infof("action %+v", act)
}
func (s *Server) checkAndDoTopoChange(seq int) bool {
act, err := s.topo.GetActionWithSeq(int64(seq))
if err != nil { //todo: error is not "not exist"
log.PanicErrorf(err, "action failed, seq = %d", seq)
}
if !needResponse(act.Receivers, s.info) { //no need to response
return false
}
log.Warnf("action %v receivers %v", seq, act.Receivers)
switch act.Type {
case models.ACTION_TYPE_SLOT_MIGRATE, models.ACTION_TYPE_SLOT_CHANGED,
models.ACTION_TYPE_SLOT_PREMIGRATE:
slot := &models.Slot{}
s.getActionObject(seq, slot)
s.fillSlot(slot.Id)
case models.ACTION_TYPE_SERVER_GROUP_CHANGED:
serverGroup := &models.ServerGroup{}
s.getActionObject(seq, serverGroup)
s.onGroupChange(serverGroup.Id)
case models.ACTION_TYPE_SERVER_GROUP_REMOVE:
//do not care
case models.ACTION_TYPE_MULTI_SLOT_CHANGED:
param := &models.SlotMultiSetParam{}
s.getActionObject(seq, param)
s.onSlotRangeChange(param)
default:
log.Panicf("unknown action %+v", act)
}
return true
}
func (s *Server) processAction(e interface{}) {
if strings.Index(getEventPath(e), models.GetProxyPath(s.topo.ProductName)) == 0 {
info, err := s.topo.GetProxyInfo(s.info.Id)
if err != nil {
log.PanicErrorf(err, "get proxy info failed: %s", s.info.Id)
}
switch info.State {
case models.PROXY_STATE_MARK_OFFLINE:
log.Infof("mark offline, proxy got offline event: %s", s.info.Id)
s.markOffline()
case models.PROXY_STATE_ONLINE:
s.rewatchProxy()
default:
log.Panicf("unknown proxy state %v", info)
}
return
}
//re-watch
nodes := s.rewatchNodes()
seqs, err := models.ExtraSeqList(nodes)
if err != nil {
log.PanicErrorf(err, "get seq list failed")
}
if len(seqs) == 0 || !s.topo.IsChildrenChangedEvent(e) {
return
}
//get last pos
index := -1
for i, seq := range seqs {
if s.lastActionSeq < seq {
index = i
//break
//only handle latest action
}
}
if index < 0 {
return
}
actions := seqs[index:]
for _, seq := range actions {
exist, err := s.topo.Exist(path.Join(s.topo.GetActionResponsePath(seq), s.info.Id))
if err != nil {
log.PanicErrorf(err, "get action failed")
}
if exist {
continue
}
if s.checkAndDoTopoChange(seq) {
s.responseAction(int64(seq))
}
}
s.lastActionSeq = seqs[len(seqs)-1]
}
func (s *Server) loopEvents() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
var tick int = 0
for s.info.State == models.PROXY_STATE_ONLINE {
select {
case <-s.kill:
log.Infof("mark offline, proxy is killed: %s", s.info.Id)
s.markOffline()
case e := <-s.evtbus:
evtPath := getEventPath(e)
log.Infof("got event %s, %v, lastActionSeq %d", s.info.Id, e, s.lastActionSeq)
if strings.Index(evtPath, models.GetActionResponsePath(s.conf.productName)) == 0 {
seq, err := strconv.Atoi(path.Base(evtPath))
if err != nil {
log.ErrorErrorf(err, "parse action seq failed")
} else {
if seq < s.lastActionSeq {
log.Infof("ignore seq = %d", seq)
continue
}
}
}
s.processAction(e)
case <-ticker.C:
if maxTick := s.conf.pingPeriod; maxTick != 0 {
if tick++; tick >= maxTick {
s.router.KeepAlive()
tick = 0
}
}
}
}
}
|
// Copyright 2016 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package repro
import (
"bytes"
"fmt"
"os"
"sort"
"sync"
"time"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/syz-manager/mgrconfig"
"github.com/google/syzkaller/vm"
)
type Stats struct {
Log []byte
ExtractProgTime time.Duration
MinimizeProgTime time.Duration
SimplifyProgTime time.Duration
ExtractCTime time.Duration
SimplifyCTime time.Duration
}
type Result struct {
Prog *prog.Prog
Duration time.Duration
Opts csource.Options
CRepro bool
Stats Stats
// Information about the final (non-symbolized) crash that we reproduced.
// Can be different from what we started reproducing.
Report *report.Report
}
type context struct {
cfg *mgrconfig.Config
reporter report.Reporter
crashTitle string
instances chan *instance
bootRequests chan int
stats Stats
report *report.Report
}
type instance struct {
*vm.Instance
index int
execprogBin string
executorBin string
}
func Run(crashLog []byte, cfg *mgrconfig.Config, reporter report.Reporter, vmPool *vm.Pool,
vmIndexes []int) (*Result, error) {
if len(vmIndexes) == 0 {
return nil, fmt.Errorf("no VMs provided")
}
target, err := prog.GetTarget(cfg.TargetOS, cfg.TargetArch)
if err != nil {
return nil, err
}
entries := target.ParseLog(crashLog)
if len(entries) == 0 {
return nil, fmt.Errorf("crash log does not contain any programs")
}
crashStart := len(crashLog) // assuming VM hanged
crashTitle := "hang"
if rep := reporter.Parse(crashLog); rep != nil {
crashStart = rep.StartPos
crashTitle = rep.Title
}
ctx := &context{
cfg: cfg,
reporter: reporter,
crashTitle: crashTitle,
instances: make(chan *instance, len(vmIndexes)),
bootRequests: make(chan int, len(vmIndexes)),
}
ctx.reproLog(0, "%v programs, %v VMs", len(entries), len(vmIndexes))
var wg sync.WaitGroup
wg.Add(len(vmIndexes))
for _, vmIndex := range vmIndexes {
ctx.bootRequests <- vmIndex
go func() {
defer wg.Done()
for vmIndex := range ctx.bootRequests {
var inst *instance
maxTry := 3
for try := 0; try < maxTry; try++ {
select {
case <-vm.Shutdown:
try = maxTry
continue
default:
}
vmInst, err := vmPool.Create(vmIndex)
if err != nil {
ctx.reproLog(0, "failed to create VM: %v", err)
time.Sleep(10 * time.Second)
continue
}
execprogBin, err := vmInst.Copy(cfg.SyzExecprogBin)
if err != nil {
ctx.reproLog(0, "failed to copy to VM: %v", err)
vmInst.Close()
time.Sleep(10 * time.Second)
continue
}
executorBin, err := vmInst.Copy(cfg.SyzExecutorBin)
if err != nil {
ctx.reproLog(0, "failed to copy to VM: %v", err)
vmInst.Close()
time.Sleep(10 * time.Second)
continue
}
inst = &instance{
Instance: vmInst,
index: vmIndex,
execprogBin: execprogBin,
executorBin: executorBin,
}
break
}
if inst == nil {
break
}
ctx.instances <- inst
}
}()
}
go func() {
wg.Wait()
close(ctx.instances)
}()
res, err := ctx.repro(entries, crashStart)
if err != nil {
return nil, err
}
if res != nil {
ctx.reproLog(3, "repro crashed as (corrupted=%v):\n%s",
ctx.report.Corrupted, ctx.report.Report)
// Try to rerun the repro if the report is corrupted.
for attempts := 0; ctx.report.Corrupted && attempts < 3; attempts++ {
ctx.reproLog(3, "report is corrupted, running repro again")
if res.CRepro {
_, err = ctx.testCProg(res.Prog, res.Duration, res.Opts)
} else {
_, err = ctx.testProg(res.Prog, res.Duration, res.Opts)
}
if err != nil {
return nil, err
}
}
ctx.reproLog(3, "final repro crashed as (corrupted=%v):\n%s",
ctx.report.Corrupted, ctx.report.Report)
res.Report = ctx.report
res.Stats = ctx.stats
}
close(ctx.bootRequests)
for inst := range ctx.instances {
inst.Close()
}
return res, err
}
func (ctx *context) repro(entries []*prog.LogEntry, crashStart int) (*Result, error) {
// Cut programs that were executed after crash.
for i, ent := range entries {
if ent.Start > crashStart {
entries = entries[:i]
break
}
}
reproStart := time.Now()
defer func() {
ctx.reproLog(3, "reproducing took %s", time.Since(reproStart))
}()
res, err := ctx.extractProg(entries)
if err != nil {
return nil, err
}
if res == nil {
return nil, nil
}
defer func() {
if res != nil {
res.Opts.Repro = false
}
}()
res, err = ctx.minimizeProg(res)
if err != nil {
return nil, err
}
// Try extracting C repro without simplifying options first.
res, err = ctx.extractC(res)
if err != nil {
return nil, err
}
// Simplify options and try extracting C repro.
if !res.CRepro {
res, err = ctx.simplifyProg(res)
if err != nil {
return nil, err
}
}
// Simplify C related options.
if res.CRepro {
res, err = ctx.simplifyC(res)
if err != nil {
return nil, err
}
}
return res, nil
}
func (ctx *context) extractProg(entries []*prog.LogEntry) (*Result, error) {
ctx.reproLog(2, "extracting reproducer from %v programs", len(entries))
start := time.Now()
defer func() {
ctx.stats.ExtractProgTime = time.Since(start)
}()
// Extract last program on every proc.
procs := make(map[int]int)
for i, ent := range entries {
procs[ent.Proc] = i
}
var indices []int
for _, idx := range procs {
indices = append(indices, idx)
}
sort.Ints(indices)
var lastEntries []*prog.LogEntry
for i := len(indices) - 1; i >= 0; i-- {
lastEntries = append(lastEntries, entries[indices[i]])
}
// The shortest duration is 10 seconds to detect simple crashes (i.e. no races and no hangs).
// The longest duration is 5 minutes to catch races and hangs. Note that this value must be larger
// than hang/no output detection duration in vm.MonitorExecution, which is currently set to 3 mins.
timeouts := []time.Duration{10 * time.Second, 1 * time.Minute, 5 * time.Minute}
for _, timeout := range timeouts {
// Execute each program separately to detect simple crashes caused by a single program.
// Programs are executed in reverse order, usually the last program is the guilty one.
res, err := ctx.extractProgSingle(reverseEntries(lastEntries), timeout)
if err != nil {
return nil, err
}
if res != nil {
ctx.reproLog(3, "found reproducer with %d syscalls", len(res.Prog.Calls))
return res, nil
}
// Don't try bisecting if there's only one entry.
if len(entries) == 1 {
continue
}
// Execute all programs and bisect the log to find multiple guilty programs.
res, err = ctx.extractProgBisect(reverseEntries(entries), timeout)
if err != nil {
return nil, err
}
if res != nil {
ctx.reproLog(3, "found reproducer with %d syscalls", len(res.Prog.Calls))
return res, nil
}
}
ctx.reproLog(0, "failed to extract reproducer")
return nil, nil
}
func (ctx *context) extractProgSingle(entries []*prog.LogEntry, duration time.Duration) (*Result, error) {
ctx.reproLog(3, "single: executing %d programs separately with timeout %s", len(entries), duration)
opts := csource.DefaultOpts(ctx.cfg)
for _, ent := range entries {
opts.Fault = ent.Fault
opts.FaultCall = ent.FaultCall
opts.FaultNth = ent.FaultNth
if opts.FaultCall < 0 || opts.FaultCall >= len(ent.P.Calls) {
opts.FaultCall = len(ent.P.Calls) - 1
}
crashed, err := ctx.testProg(ent.P, duration, opts)
if err != nil {
return nil, err
}
if crashed {
res := &Result{
Prog: ent.P,
Duration: duration * 3 / 2,
Opts: opts,
}
ctx.reproLog(3, "single: successfully extracted reproducer")
return res, nil
}
}
ctx.reproLog(3, "single: failed to extract reproducer")
return nil, nil
}
func (ctx *context) extractProgBisect(entries []*prog.LogEntry, baseDuration time.Duration) (*Result, error) {
ctx.reproLog(3, "bisect: bisecting %d programs with base timeout %s", len(entries), baseDuration)
opts := csource.DefaultOpts(ctx.cfg)
duration := func(entries int) time.Duration {
return baseDuration + time.Duration((entries/4))*time.Second
}
// Bisect the log to find multiple guilty programs.
entries, err := ctx.bisectProgs(entries, func(progs []*prog.LogEntry) (bool, error) {
return ctx.testProgs(progs, duration(len(progs)), opts)
})
if err != nil {
return nil, err
}
if len(entries) == 0 {
return nil, nil
}
// TODO: Minimize each program before concatenation.
// TODO: Return multiple programs if concatenation fails.
ctx.reproLog(3, "bisect: %d programs left: \n\n%s\n", len(entries), encodeEntries(entries))
ctx.reproLog(3, "bisect: trying to concatenate")
// Concatenate all programs into one.
prog := &prog.Prog{
Target: entries[0].P.Target,
}
for _, entry := range entries {
prog.Calls = append(prog.Calls, entry.P.Calls...)
}
dur := duration(len(entries)) * 3 / 2
// Execute the program without fault injection.
crashed, err := ctx.testProg(prog, dur, opts)
if err != nil {
return nil, err
}
if crashed {
res := &Result{
Prog: prog,
Duration: dur,
Opts: opts,
}
ctx.reproLog(3, "bisect: concatenation succeeded")
return res, nil
}
// Try with fault injection.
calls := 0
for _, entry := range entries {
if entry.Fault {
opts.FaultCall = calls + entry.FaultCall
opts.FaultNth = entry.FaultNth
if entry.FaultCall < 0 || entry.FaultCall >= len(entry.P.Calls) {
opts.FaultCall = calls + len(entry.P.Calls) - 1
}
crashed, err := ctx.testProg(prog, dur, opts)
if err != nil {
return nil, err
}
if crashed {
res := &Result{
Prog: prog,
Duration: dur,
Opts: opts,
}
ctx.reproLog(3, "bisect: concatenation succeeded with fault injection")
return res, nil
}
}
calls += len(entry.P.Calls)
}
ctx.reproLog(3, "bisect: concatenation failed")
return nil, nil
}
// Minimize calls and arguments.
func (ctx *context) minimizeProg(res *Result) (*Result, error) {
ctx.reproLog(2, "minimizing guilty program")
start := time.Now()
defer func() {
ctx.stats.MinimizeProgTime = time.Since(start)
}()
call := -1
if res.Opts.Fault {
call = res.Opts.FaultCall
}
res.Prog, res.Opts.FaultCall = prog.Minimize(res.Prog, call, true,
func(p1 *prog.Prog, callIndex int) bool {
crashed, err := ctx.testProg(p1, res.Duration, res.Opts)
if err != nil {
ctx.reproLog(0, "minimization failed with %v", err)
return false
}
return crashed
})
return res, nil
}
// Simplify repro options (threaded, collide, sandbox, etc).
func (ctx *context) simplifyProg(res *Result) (*Result, error) {
ctx.reproLog(2, "simplifying guilty program")
start := time.Now()
defer func() {
ctx.stats.SimplifyProgTime = time.Since(start)
}()
for _, simplify := range progSimplifies {
opts := res.Opts
if simplify(&opts) {
crashed, err := ctx.testProg(res.Prog, res.Duration, opts)
if err != nil {
return nil, err
}
if crashed {
res.Opts = opts
// Simplification successful, try extracting C repro.
res, err := ctx.extractC(res)
if err != nil {
return nil, err
}
if res.CRepro {
return res, nil
}
}
}
}
return res, nil
}
// Try triggering crash with a C reproducer.
func (ctx *context) extractC(res *Result) (*Result, error) {
ctx.reproLog(2, "extracting C reproducer")
start := time.Now()
defer func() {
ctx.stats.ExtractCTime = time.Since(start)
}()
crashed, err := ctx.testCProg(res.Prog, res.Duration, res.Opts)
if err != nil {
return nil, err
}
res.CRepro = crashed
return res, nil
}
// Try to simplify the C reproducer.
func (ctx *context) simplifyC(res *Result) (*Result, error) {
ctx.reproLog(2, "simplifying C reproducer")
start := time.Now()
defer func() {
ctx.stats.SimplifyCTime = time.Since(start)
}()
for _, simplify := range cSimplifies {
opts := res.Opts
if simplify(&opts) {
crashed, err := ctx.testCProg(res.Prog, res.Duration, opts)
if err != nil {
return nil, err
}
if crashed {
res.Opts = opts
}
}
}
return res, nil
}
func (ctx *context) testProg(p *prog.Prog, duration time.Duration, opts csource.Options) (crashed bool, err error) {
entry := prog.LogEntry{P: p}
if opts.Fault {
entry.Fault = true
entry.FaultCall = opts.FaultCall
entry.FaultNth = opts.FaultNth
}
return ctx.testProgs([]*prog.LogEntry{&entry}, duration, opts)
}
func (ctx *context) testProgs(entries []*prog.LogEntry, duration time.Duration, opts csource.Options) (
crashed bool, err error) {
inst := <-ctx.instances
if inst == nil {
return false, fmt.Errorf("all VMs failed to boot")
}
defer ctx.returnInstance(inst)
if len(entries) == 0 {
return false, fmt.Errorf("no programs to execute")
}
pstr := encodeEntries(entries)
progFile, err := osutil.WriteTempFile(pstr)
if err != nil {
return false, err
}
defer os.Remove(progFile)
vmProgFile, err := inst.Copy(progFile)
if err != nil {
return false, fmt.Errorf("failed to copy to VM: %v", err)
}
repeat := 1
if opts.Repeat {
repeat = 0
}
if !opts.Fault {
opts.FaultCall = -1
}
program := entries[0].P.String()
if len(entries) > 1 {
program = "["
for i, entry := range entries {
program += fmt.Sprintf("%v", len(entry.P.Calls))
if i != len(entries)-1 {
program += ", "
}
}
program += "]"
}
command := fmt.Sprintf("%v -executor %v -arch=%v -cover=0 -procs=%v -repeat=%v"+
" -sandbox %v -threaded=%v -collide=%v %v",
inst.execprogBin, inst.executorBin, ctx.cfg.TargetArch, opts.Procs, repeat,
opts.Sandbox, opts.Threaded, opts.Collide, vmProgFile)
ctx.reproLog(2, "testing program (duration=%v, %+v): %s", duration, opts, program)
return ctx.testImpl(inst.Instance, command, duration)
}
func (ctx *context) testCProg(p *prog.Prog, duration time.Duration, opts csource.Options) (crashed bool, err error) {
src, err := csource.Write(p, opts)
if err != nil {
return false, err
}
bin, err := csource.Build(p.Target, src)
if err != nil {
return false, err
}
defer os.Remove(bin)
ctx.reproLog(2, "testing compiled C program (duration=%v, %+v): %s", duration, opts, p)
crashed, err = ctx.testBin(bin, duration)
if err != nil {
return false, err
}
return crashed, nil
}
func (ctx *context) testBin(bin string, duration time.Duration) (crashed bool, err error) {
inst := <-ctx.instances
if inst == nil {
return false, fmt.Errorf("all VMs failed to boot")
}
defer ctx.returnInstance(inst)
bin, err = inst.Copy(bin)
if err != nil {
return false, fmt.Errorf("failed to copy to VM: %v", err)
}
return ctx.testImpl(inst.Instance, bin, duration)
}
func (ctx *context) testImpl(inst *vm.Instance, command string, duration time.Duration) (crashed bool, err error) {
outc, errc, err := inst.Run(duration, nil, command)
if err != nil {
return false, fmt.Errorf("failed to run command in VM: %v", err)
}
rep := inst.MonitorExecution(outc, errc, ctx.reporter, true)
if rep == nil {
ctx.reproLog(2, "program did not crash")
return false, nil
}
if rep.Suppressed {
ctx.reproLog(2, "suppressed program crash: %v", rep.Title)
return false, nil
}
ctx.report = rep
ctx.reproLog(2, "program crashed: %v", rep.Title)
return true, nil
}
func (ctx *context) returnInstance(inst *instance) {
ctx.bootRequests <- inst.index
inst.Close()
}
func (ctx *context) reproLog(level int, format string, args ...interface{}) {
prefix := fmt.Sprintf("reproducing crash '%v': ", ctx.crashTitle)
log.Logf(level, prefix+format, args...)
ctx.stats.Log = append(ctx.stats.Log, []byte(fmt.Sprintf(format, args...)+"\n")...)
}
func (ctx *context) bisectProgs(progs []*prog.LogEntry, pred func([]*prog.LogEntry) (bool, error)) (
[]*prog.LogEntry, error) {
ctx.reproLog(3, "bisect: bisecting %d programs", len(progs))
compose := func(guilty1, guilty2 [][]*prog.LogEntry, chunk []*prog.LogEntry) []*prog.LogEntry {
progs := []*prog.LogEntry{}
for _, c := range guilty1 {
progs = append(progs, c...)
}
progs = append(progs, chunk...)
for _, c := range guilty2 {
progs = append(progs, c...)
}
return progs
}
logGuilty := func(guilty [][]*prog.LogEntry) string {
log := "["
for i, chunk := range guilty {
log += fmt.Sprintf("<%d>", len(chunk))
if i != len(guilty)-1 {
log += ", "
}
}
log += "]"
return log
}
ctx.reproLog(3, "bisect: executing all %d programs", len(progs))
crashed, err := pred(progs)
if err != nil {
return nil, err
}
if !crashed {
ctx.reproLog(3, "bisect: didn't crash")
return nil, nil
}
guilty := [][]*prog.LogEntry{progs}
again:
ctx.reproLog(3, "bisect: guilty chunks: %v", logGuilty(guilty))
for i, chunk := range guilty {
if len(chunk) == 1 {
continue
}
guilty1 := guilty[:i]
guilty2 := guilty[i+1:]
ctx.reproLog(3, "bisect: guilty chunks split: %v, <%v>, %v", logGuilty(guilty1), len(chunk), logGuilty(guilty2))
chunk1 := chunk[0 : len(chunk)/2]
chunk2 := chunk[len(chunk)/2:]
ctx.reproLog(3, "bisect: chunk split: <%v> => <%v>, <%v>", len(chunk), len(chunk1), len(chunk2))
ctx.reproLog(3, "bisect: triggering crash without chunk #1")
progs := compose(guilty1, guilty2, chunk2)
crashed, err := pred(progs)
if err != nil {
return nil, err
}
if crashed {
guilty = nil
guilty = append(guilty, guilty1...)
guilty = append(guilty, chunk2)
guilty = append(guilty, guilty2...)
ctx.reproLog(3, "bisect: crashed, chunk #1 evicted")
goto again
}
ctx.reproLog(3, "bisect: triggering crash without chunk #2")
progs = compose(guilty1, guilty2, chunk1)
crashed, err = pred(progs)
if err != nil {
return nil, err
}
if crashed {
guilty = nil
guilty = append(guilty, guilty1...)
guilty = append(guilty, chunk1)
guilty = append(guilty, guilty2...)
ctx.reproLog(3, "bisect: crashed, chunk #2 evicted")
goto again
}
guilty = nil
guilty = append(guilty, guilty1...)
guilty = append(guilty, chunk1)
guilty = append(guilty, chunk2)
guilty = append(guilty, guilty2...)
ctx.reproLog(3, "bisect: not crashed, both chunks required")
goto again
}
progs = nil
for _, chunk := range guilty {
if len(chunk) != 1 {
return nil, fmt.Errorf("bad bisect result: %v", guilty)
}
progs = append(progs, chunk[0])
}
ctx.reproLog(3, "bisect: success, %d programs left", len(progs))
return progs, nil
}
func reverseEntries(entries []*prog.LogEntry) []*prog.LogEntry {
last := len(entries) - 1
for i := 0; i < len(entries)/2; i++ {
entries[i], entries[last-i] = entries[last-i], entries[i]
}
return entries
}
func encodeEntries(entries []*prog.LogEntry) []byte {
buf := new(bytes.Buffer)
for _, ent := range entries {
opts := ""
if ent.Fault {
opts = fmt.Sprintf(" (fault-call:%v fault-nth:%v)", ent.FaultCall, ent.FaultNth)
}
fmt.Fprintf(buf, "executing program %v%v:\n%v", ent.Proc, opts, string(ent.P.Serialize()))
}
return buf.Bytes()
}
type Simplify func(opts *csource.Options) bool
var progSimplifies = []Simplify{
func(opts *csource.Options) bool {
if !opts.Fault {
return false
}
opts.Fault = false
opts.FaultCall = 0
opts.FaultNth = 0
return true
},
func(opts *csource.Options) bool {
if !opts.Collide {
return false
}
opts.Collide = false
return true
},
func(opts *csource.Options) bool {
if opts.Collide || !opts.Threaded {
return false
}
opts.Threaded = false
return true
},
func(opts *csource.Options) bool {
if !opts.Repeat {
return false
}
opts.Repeat = false
opts.WaitRepeat = false
opts.EnableCgroups = false
opts.ResetNet = false
opts.Procs = 1
return true
},
func(opts *csource.Options) bool {
if opts.Procs == 1 {
return false
}
opts.Procs = 1
return true
},
func(opts *csource.Options) bool {
if opts.Sandbox == "none" {
return false
}
opts.Sandbox = "none"
return true
},
}
var cSimplifies = append(progSimplifies, []Simplify{
func(opts *csource.Options) bool {
if opts.Sandbox == "" {
return false
}
opts.Sandbox = ""
opts.EnableTun = false
opts.EnableCgroups = false
opts.EnableNetdev = false
opts.ResetNet = false
return true
},
func(opts *csource.Options) bool {
if !opts.EnableTun {
return false
}
opts.EnableTun = false
return true
},
func(opts *csource.Options) bool {
if !opts.EnableCgroups {
return false
}
opts.EnableCgroups = false
return true
},
func(opts *csource.Options) bool {
if !opts.EnableNetdev {
return false
}
opts.EnableNetdev = false
return true
},
func(opts *csource.Options) bool {
if !opts.ResetNet {
return false
}
opts.ResetNet = false
return true
},
func(opts *csource.Options) bool {
if !opts.UseTmpDir || opts.Sandbox == "namespace" || opts.EnableCgroups {
return false
}
opts.UseTmpDir = false
return true
},
func(opts *csource.Options) bool {
if !opts.HandleSegv {
return false
}
opts.HandleSegv = false
return true
},
func(opts *csource.Options) bool {
if !opts.WaitRepeat {
return false
}
opts.WaitRepeat = false
opts.EnableCgroups = false
opts.ResetNet = false
return true
},
}...)
pkg/repro: pass target OS to execprog
Pass target OS to execprog and pass executor the same way
manager passes it to fuzzer.
// Copyright 2016 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package repro
import (
"bytes"
"fmt"
"os"
"sort"
"sync"
"time"
"github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/log"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/report"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/syz-manager/mgrconfig"
"github.com/google/syzkaller/vm"
)
type Stats struct {
Log []byte
ExtractProgTime time.Duration
MinimizeProgTime time.Duration
SimplifyProgTime time.Duration
ExtractCTime time.Duration
SimplifyCTime time.Duration
}
type Result struct {
Prog *prog.Prog
Duration time.Duration
Opts csource.Options
CRepro bool
Stats Stats
// Information about the final (non-symbolized) crash that we reproduced.
// Can be different from what we started reproducing.
Report *report.Report
}
type context struct {
cfg *mgrconfig.Config
reporter report.Reporter
crashTitle string
instances chan *instance
bootRequests chan int
stats Stats
report *report.Report
}
type instance struct {
*vm.Instance
index int
execprogBin string
executorBin string
}
func Run(crashLog []byte, cfg *mgrconfig.Config, reporter report.Reporter, vmPool *vm.Pool,
vmIndexes []int) (*Result, error) {
if len(vmIndexes) == 0 {
return nil, fmt.Errorf("no VMs provided")
}
target, err := prog.GetTarget(cfg.TargetOS, cfg.TargetArch)
if err != nil {
return nil, err
}
entries := target.ParseLog(crashLog)
if len(entries) == 0 {
return nil, fmt.Errorf("crash log does not contain any programs")
}
crashStart := len(crashLog) // assuming VM hanged
crashTitle := "hang"
if rep := reporter.Parse(crashLog); rep != nil {
crashStart = rep.StartPos
crashTitle = rep.Title
}
ctx := &context{
cfg: cfg,
reporter: reporter,
crashTitle: crashTitle,
instances: make(chan *instance, len(vmIndexes)),
bootRequests: make(chan int, len(vmIndexes)),
}
ctx.reproLog(0, "%v programs, %v VMs", len(entries), len(vmIndexes))
var wg sync.WaitGroup
wg.Add(len(vmIndexes))
for _, vmIndex := range vmIndexes {
ctx.bootRequests <- vmIndex
go func() {
defer wg.Done()
for vmIndex := range ctx.bootRequests {
var inst *instance
maxTry := 3
for try := 0; try < maxTry; try++ {
select {
case <-vm.Shutdown:
try = maxTry
continue
default:
}
vmInst, err := vmPool.Create(vmIndex)
if err != nil {
ctx.reproLog(0, "failed to create VM: %v", err)
time.Sleep(10 * time.Second)
continue
}
execprogBin, err := vmInst.Copy(cfg.SyzExecprogBin)
if err != nil {
ctx.reproLog(0, "failed to copy to VM: %v", err)
vmInst.Close()
time.Sleep(10 * time.Second)
continue
}
executorBin, err := vmInst.Copy(cfg.SyzExecutorBin)
if err != nil {
ctx.reproLog(0, "failed to copy to VM: %v", err)
vmInst.Close()
time.Sleep(10 * time.Second)
continue
}
inst = &instance{
Instance: vmInst,
index: vmIndex,
execprogBin: execprogBin,
executorBin: executorBin,
}
break
}
if inst == nil {
break
}
ctx.instances <- inst
}
}()
}
go func() {
wg.Wait()
close(ctx.instances)
}()
res, err := ctx.repro(entries, crashStart)
if err != nil {
return nil, err
}
if res != nil {
ctx.reproLog(3, "repro crashed as (corrupted=%v):\n%s",
ctx.report.Corrupted, ctx.report.Report)
// Try to rerun the repro if the report is corrupted.
for attempts := 0; ctx.report.Corrupted && attempts < 3; attempts++ {
ctx.reproLog(3, "report is corrupted, running repro again")
if res.CRepro {
_, err = ctx.testCProg(res.Prog, res.Duration, res.Opts)
} else {
_, err = ctx.testProg(res.Prog, res.Duration, res.Opts)
}
if err != nil {
return nil, err
}
}
ctx.reproLog(3, "final repro crashed as (corrupted=%v):\n%s",
ctx.report.Corrupted, ctx.report.Report)
res.Report = ctx.report
res.Stats = ctx.stats
}
close(ctx.bootRequests)
for inst := range ctx.instances {
inst.Close()
}
return res, err
}
func (ctx *context) repro(entries []*prog.LogEntry, crashStart int) (*Result, error) {
// Cut programs that were executed after crash.
for i, ent := range entries {
if ent.Start > crashStart {
entries = entries[:i]
break
}
}
reproStart := time.Now()
defer func() {
ctx.reproLog(3, "reproducing took %s", time.Since(reproStart))
}()
res, err := ctx.extractProg(entries)
if err != nil {
return nil, err
}
if res == nil {
return nil, nil
}
defer func() {
if res != nil {
res.Opts.Repro = false
}
}()
res, err = ctx.minimizeProg(res)
if err != nil {
return nil, err
}
// Try extracting C repro without simplifying options first.
res, err = ctx.extractC(res)
if err != nil {
return nil, err
}
// Simplify options and try extracting C repro.
if !res.CRepro {
res, err = ctx.simplifyProg(res)
if err != nil {
return nil, err
}
}
// Simplify C related options.
if res.CRepro {
res, err = ctx.simplifyC(res)
if err != nil {
return nil, err
}
}
return res, nil
}
func (ctx *context) extractProg(entries []*prog.LogEntry) (*Result, error) {
ctx.reproLog(2, "extracting reproducer from %v programs", len(entries))
start := time.Now()
defer func() {
ctx.stats.ExtractProgTime = time.Since(start)
}()
// Extract last program on every proc.
procs := make(map[int]int)
for i, ent := range entries {
procs[ent.Proc] = i
}
var indices []int
for _, idx := range procs {
indices = append(indices, idx)
}
sort.Ints(indices)
var lastEntries []*prog.LogEntry
for i := len(indices) - 1; i >= 0; i-- {
lastEntries = append(lastEntries, entries[indices[i]])
}
// The shortest duration is 10 seconds to detect simple crashes (i.e. no races and no hangs).
// The longest duration is 5 minutes to catch races and hangs. Note that this value must be larger
// than hang/no output detection duration in vm.MonitorExecution, which is currently set to 3 mins.
timeouts := []time.Duration{10 * time.Second, 1 * time.Minute, 5 * time.Minute}
for _, timeout := range timeouts {
// Execute each program separately to detect simple crashes caused by a single program.
// Programs are executed in reverse order, usually the last program is the guilty one.
res, err := ctx.extractProgSingle(reverseEntries(lastEntries), timeout)
if err != nil {
return nil, err
}
if res != nil {
ctx.reproLog(3, "found reproducer with %d syscalls", len(res.Prog.Calls))
return res, nil
}
// Don't try bisecting if there's only one entry.
if len(entries) == 1 {
continue
}
// Execute all programs and bisect the log to find multiple guilty programs.
res, err = ctx.extractProgBisect(reverseEntries(entries), timeout)
if err != nil {
return nil, err
}
if res != nil {
ctx.reproLog(3, "found reproducer with %d syscalls", len(res.Prog.Calls))
return res, nil
}
}
ctx.reproLog(0, "failed to extract reproducer")
return nil, nil
}
func (ctx *context) extractProgSingle(entries []*prog.LogEntry, duration time.Duration) (*Result, error) {
ctx.reproLog(3, "single: executing %d programs separately with timeout %s", len(entries), duration)
opts := csource.DefaultOpts(ctx.cfg)
for _, ent := range entries {
opts.Fault = ent.Fault
opts.FaultCall = ent.FaultCall
opts.FaultNth = ent.FaultNth
if opts.FaultCall < 0 || opts.FaultCall >= len(ent.P.Calls) {
opts.FaultCall = len(ent.P.Calls) - 1
}
crashed, err := ctx.testProg(ent.P, duration, opts)
if err != nil {
return nil, err
}
if crashed {
res := &Result{
Prog: ent.P,
Duration: duration * 3 / 2,
Opts: opts,
}
ctx.reproLog(3, "single: successfully extracted reproducer")
return res, nil
}
}
ctx.reproLog(3, "single: failed to extract reproducer")
return nil, nil
}
func (ctx *context) extractProgBisect(entries []*prog.LogEntry, baseDuration time.Duration) (*Result, error) {
ctx.reproLog(3, "bisect: bisecting %d programs with base timeout %s", len(entries), baseDuration)
opts := csource.DefaultOpts(ctx.cfg)
duration := func(entries int) time.Duration {
return baseDuration + time.Duration((entries/4))*time.Second
}
// Bisect the log to find multiple guilty programs.
entries, err := ctx.bisectProgs(entries, func(progs []*prog.LogEntry) (bool, error) {
return ctx.testProgs(progs, duration(len(progs)), opts)
})
if err != nil {
return nil, err
}
if len(entries) == 0 {
return nil, nil
}
// TODO: Minimize each program before concatenation.
// TODO: Return multiple programs if concatenation fails.
ctx.reproLog(3, "bisect: %d programs left: \n\n%s\n", len(entries), encodeEntries(entries))
ctx.reproLog(3, "bisect: trying to concatenate")
// Concatenate all programs into one.
prog := &prog.Prog{
Target: entries[0].P.Target,
}
for _, entry := range entries {
prog.Calls = append(prog.Calls, entry.P.Calls...)
}
dur := duration(len(entries)) * 3 / 2
// Execute the program without fault injection.
crashed, err := ctx.testProg(prog, dur, opts)
if err != nil {
return nil, err
}
if crashed {
res := &Result{
Prog: prog,
Duration: dur,
Opts: opts,
}
ctx.reproLog(3, "bisect: concatenation succeeded")
return res, nil
}
// Try with fault injection.
calls := 0
for _, entry := range entries {
if entry.Fault {
opts.FaultCall = calls + entry.FaultCall
opts.FaultNth = entry.FaultNth
if entry.FaultCall < 0 || entry.FaultCall >= len(entry.P.Calls) {
opts.FaultCall = calls + len(entry.P.Calls) - 1
}
crashed, err := ctx.testProg(prog, dur, opts)
if err != nil {
return nil, err
}
if crashed {
res := &Result{
Prog: prog,
Duration: dur,
Opts: opts,
}
ctx.reproLog(3, "bisect: concatenation succeeded with fault injection")
return res, nil
}
}
calls += len(entry.P.Calls)
}
ctx.reproLog(3, "bisect: concatenation failed")
return nil, nil
}
// Minimize calls and arguments.
func (ctx *context) minimizeProg(res *Result) (*Result, error) {
ctx.reproLog(2, "minimizing guilty program")
start := time.Now()
defer func() {
ctx.stats.MinimizeProgTime = time.Since(start)
}()
call := -1
if res.Opts.Fault {
call = res.Opts.FaultCall
}
res.Prog, res.Opts.FaultCall = prog.Minimize(res.Prog, call, true,
func(p1 *prog.Prog, callIndex int) bool {
crashed, err := ctx.testProg(p1, res.Duration, res.Opts)
if err != nil {
ctx.reproLog(0, "minimization failed with %v", err)
return false
}
return crashed
})
return res, nil
}
// Simplify repro options (threaded, collide, sandbox, etc).
func (ctx *context) simplifyProg(res *Result) (*Result, error) {
ctx.reproLog(2, "simplifying guilty program")
start := time.Now()
defer func() {
ctx.stats.SimplifyProgTime = time.Since(start)
}()
for _, simplify := range progSimplifies {
opts := res.Opts
if simplify(&opts) {
crashed, err := ctx.testProg(res.Prog, res.Duration, opts)
if err != nil {
return nil, err
}
if crashed {
res.Opts = opts
// Simplification successful, try extracting C repro.
res, err := ctx.extractC(res)
if err != nil {
return nil, err
}
if res.CRepro {
return res, nil
}
}
}
}
return res, nil
}
// Try triggering crash with a C reproducer.
func (ctx *context) extractC(res *Result) (*Result, error) {
ctx.reproLog(2, "extracting C reproducer")
start := time.Now()
defer func() {
ctx.stats.ExtractCTime = time.Since(start)
}()
crashed, err := ctx.testCProg(res.Prog, res.Duration, res.Opts)
if err != nil {
return nil, err
}
res.CRepro = crashed
return res, nil
}
// Try to simplify the C reproducer.
func (ctx *context) simplifyC(res *Result) (*Result, error) {
ctx.reproLog(2, "simplifying C reproducer")
start := time.Now()
defer func() {
ctx.stats.SimplifyCTime = time.Since(start)
}()
for _, simplify := range cSimplifies {
opts := res.Opts
if simplify(&opts) {
crashed, err := ctx.testCProg(res.Prog, res.Duration, opts)
if err != nil {
return nil, err
}
if crashed {
res.Opts = opts
}
}
}
return res, nil
}
func (ctx *context) testProg(p *prog.Prog, duration time.Duration, opts csource.Options) (crashed bool, err error) {
entry := prog.LogEntry{P: p}
if opts.Fault {
entry.Fault = true
entry.FaultCall = opts.FaultCall
entry.FaultNth = opts.FaultNth
}
return ctx.testProgs([]*prog.LogEntry{&entry}, duration, opts)
}
func (ctx *context) testProgs(entries []*prog.LogEntry, duration time.Duration, opts csource.Options) (
crashed bool, err error) {
inst := <-ctx.instances
if inst == nil {
return false, fmt.Errorf("all VMs failed to boot")
}
defer ctx.returnInstance(inst)
if len(entries) == 0 {
return false, fmt.Errorf("no programs to execute")
}
pstr := encodeEntries(entries)
progFile, err := osutil.WriteTempFile(pstr)
if err != nil {
return false, err
}
defer os.Remove(progFile)
vmProgFile, err := inst.Copy(progFile)
if err != nil {
return false, fmt.Errorf("failed to copy to VM: %v", err)
}
repeat := 1
if opts.Repeat {
repeat = 0
}
if !opts.Fault {
opts.FaultCall = -1
}
program := entries[0].P.String()
if len(entries) > 1 {
program = "["
for i, entry := range entries {
program += fmt.Sprintf("%v", len(entry.P.Calls))
if i != len(entries)-1 {
program += ", "
}
}
program += "]"
}
command := fmt.Sprintf("%v -executor=%v -os=%v -arch=%v -cover=0 -procs=%v -repeat=%v"+
" -sandbox %v -threaded=%v -collide=%v %v",
inst.execprogBin, inst.executorBin, ctx.cfg.TargetOS, ctx.cfg.TargetArch, opts.Procs, repeat,
opts.Sandbox, opts.Threaded, opts.Collide, vmProgFile)
ctx.reproLog(2, "testing program (duration=%v, %+v): %s", duration, opts, program)
return ctx.testImpl(inst.Instance, command, duration)
}
func (ctx *context) testCProg(p *prog.Prog, duration time.Duration, opts csource.Options) (crashed bool, err error) {
src, err := csource.Write(p, opts)
if err != nil {
return false, err
}
bin, err := csource.Build(p.Target, src)
if err != nil {
return false, err
}
defer os.Remove(bin)
ctx.reproLog(2, "testing compiled C program (duration=%v, %+v): %s", duration, opts, p)
crashed, err = ctx.testBin(bin, duration)
if err != nil {
return false, err
}
return crashed, nil
}
func (ctx *context) testBin(bin string, duration time.Duration) (crashed bool, err error) {
inst := <-ctx.instances
if inst == nil {
return false, fmt.Errorf("all VMs failed to boot")
}
defer ctx.returnInstance(inst)
bin, err = inst.Copy(bin)
if err != nil {
return false, fmt.Errorf("failed to copy to VM: %v", err)
}
return ctx.testImpl(inst.Instance, bin, duration)
}
func (ctx *context) testImpl(inst *vm.Instance, command string, duration time.Duration) (crashed bool, err error) {
outc, errc, err := inst.Run(duration, nil, command)
if err != nil {
return false, fmt.Errorf("failed to run command in VM: %v", err)
}
rep := inst.MonitorExecution(outc, errc, ctx.reporter, true)
if rep == nil {
ctx.reproLog(2, "program did not crash")
return false, nil
}
if rep.Suppressed {
ctx.reproLog(2, "suppressed program crash: %v", rep.Title)
return false, nil
}
ctx.report = rep
ctx.reproLog(2, "program crashed: %v", rep.Title)
return true, nil
}
func (ctx *context) returnInstance(inst *instance) {
ctx.bootRequests <- inst.index
inst.Close()
}
func (ctx *context) reproLog(level int, format string, args ...interface{}) {
prefix := fmt.Sprintf("reproducing crash '%v': ", ctx.crashTitle)
log.Logf(level, prefix+format, args...)
ctx.stats.Log = append(ctx.stats.Log, []byte(fmt.Sprintf(format, args...)+"\n")...)
}
func (ctx *context) bisectProgs(progs []*prog.LogEntry, pred func([]*prog.LogEntry) (bool, error)) (
[]*prog.LogEntry, error) {
ctx.reproLog(3, "bisect: bisecting %d programs", len(progs))
compose := func(guilty1, guilty2 [][]*prog.LogEntry, chunk []*prog.LogEntry) []*prog.LogEntry {
progs := []*prog.LogEntry{}
for _, c := range guilty1 {
progs = append(progs, c...)
}
progs = append(progs, chunk...)
for _, c := range guilty2 {
progs = append(progs, c...)
}
return progs
}
logGuilty := func(guilty [][]*prog.LogEntry) string {
log := "["
for i, chunk := range guilty {
log += fmt.Sprintf("<%d>", len(chunk))
if i != len(guilty)-1 {
log += ", "
}
}
log += "]"
return log
}
ctx.reproLog(3, "bisect: executing all %d programs", len(progs))
crashed, err := pred(progs)
if err != nil {
return nil, err
}
if !crashed {
ctx.reproLog(3, "bisect: didn't crash")
return nil, nil
}
guilty := [][]*prog.LogEntry{progs}
again:
ctx.reproLog(3, "bisect: guilty chunks: %v", logGuilty(guilty))
for i, chunk := range guilty {
if len(chunk) == 1 {
continue
}
guilty1 := guilty[:i]
guilty2 := guilty[i+1:]
ctx.reproLog(3, "bisect: guilty chunks split: %v, <%v>, %v", logGuilty(guilty1), len(chunk), logGuilty(guilty2))
chunk1 := chunk[0 : len(chunk)/2]
chunk2 := chunk[len(chunk)/2:]
ctx.reproLog(3, "bisect: chunk split: <%v> => <%v>, <%v>", len(chunk), len(chunk1), len(chunk2))
ctx.reproLog(3, "bisect: triggering crash without chunk #1")
progs := compose(guilty1, guilty2, chunk2)
crashed, err := pred(progs)
if err != nil {
return nil, err
}
if crashed {
guilty = nil
guilty = append(guilty, guilty1...)
guilty = append(guilty, chunk2)
guilty = append(guilty, guilty2...)
ctx.reproLog(3, "bisect: crashed, chunk #1 evicted")
goto again
}
ctx.reproLog(3, "bisect: triggering crash without chunk #2")
progs = compose(guilty1, guilty2, chunk1)
crashed, err = pred(progs)
if err != nil {
return nil, err
}
if crashed {
guilty = nil
guilty = append(guilty, guilty1...)
guilty = append(guilty, chunk1)
guilty = append(guilty, guilty2...)
ctx.reproLog(3, "bisect: crashed, chunk #2 evicted")
goto again
}
guilty = nil
guilty = append(guilty, guilty1...)
guilty = append(guilty, chunk1)
guilty = append(guilty, chunk2)
guilty = append(guilty, guilty2...)
ctx.reproLog(3, "bisect: not crashed, both chunks required")
goto again
}
progs = nil
for _, chunk := range guilty {
if len(chunk) != 1 {
return nil, fmt.Errorf("bad bisect result: %v", guilty)
}
progs = append(progs, chunk[0])
}
ctx.reproLog(3, "bisect: success, %d programs left", len(progs))
return progs, nil
}
func reverseEntries(entries []*prog.LogEntry) []*prog.LogEntry {
last := len(entries) - 1
for i := 0; i < len(entries)/2; i++ {
entries[i], entries[last-i] = entries[last-i], entries[i]
}
return entries
}
func encodeEntries(entries []*prog.LogEntry) []byte {
buf := new(bytes.Buffer)
for _, ent := range entries {
opts := ""
if ent.Fault {
opts = fmt.Sprintf(" (fault-call:%v fault-nth:%v)", ent.FaultCall, ent.FaultNth)
}
fmt.Fprintf(buf, "executing program %v%v:\n%v", ent.Proc, opts, string(ent.P.Serialize()))
}
return buf.Bytes()
}
type Simplify func(opts *csource.Options) bool
var progSimplifies = []Simplify{
func(opts *csource.Options) bool {
if !opts.Fault {
return false
}
opts.Fault = false
opts.FaultCall = 0
opts.FaultNth = 0
return true
},
func(opts *csource.Options) bool {
if !opts.Collide {
return false
}
opts.Collide = false
return true
},
func(opts *csource.Options) bool {
if opts.Collide || !opts.Threaded {
return false
}
opts.Threaded = false
return true
},
func(opts *csource.Options) bool {
if !opts.Repeat {
return false
}
opts.Repeat = false
opts.WaitRepeat = false
opts.EnableCgroups = false
opts.ResetNet = false
opts.Procs = 1
return true
},
func(opts *csource.Options) bool {
if opts.Procs == 1 {
return false
}
opts.Procs = 1
return true
},
func(opts *csource.Options) bool {
if opts.Sandbox == "none" {
return false
}
opts.Sandbox = "none"
return true
},
}
var cSimplifies = append(progSimplifies, []Simplify{
func(opts *csource.Options) bool {
if opts.Sandbox == "" {
return false
}
opts.Sandbox = ""
opts.EnableTun = false
opts.EnableCgroups = false
opts.EnableNetdev = false
opts.ResetNet = false
return true
},
func(opts *csource.Options) bool {
if !opts.EnableTun {
return false
}
opts.EnableTun = false
return true
},
func(opts *csource.Options) bool {
if !opts.EnableCgroups {
return false
}
opts.EnableCgroups = false
return true
},
func(opts *csource.Options) bool {
if !opts.EnableNetdev {
return false
}
opts.EnableNetdev = false
return true
},
func(opts *csource.Options) bool {
if !opts.ResetNet {
return false
}
opts.ResetNet = false
return true
},
func(opts *csource.Options) bool {
if !opts.UseTmpDir || opts.Sandbox == "namespace" || opts.EnableCgroups {
return false
}
opts.UseTmpDir = false
return true
},
func(opts *csource.Options) bool {
if !opts.HandleSegv {
return false
}
opts.HandleSegv = false
return true
},
func(opts *csource.Options) bool {
if !opts.WaitRepeat {
return false
}
opts.WaitRepeat = false
opts.EnableCgroups = false
opts.ResetNet = false
return true
},
}...)
|
/*
Copyright 2015 The Camlistore Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"encoding/json"
"errors"
"fmt"
"html/template"
"net/http"
"strconv"
"strings"
"camlistore.org/pkg/blobserver"
"camlistore.org/pkg/httputil"
"camlistore.org/pkg/types/clientconfig"
"go4.org/jsonconfig"
)
const helpHTML string = `<html>
<head>
<title>Help</title>
</head>
<body>
<h2>Help</h2>
<h3>Web User Interface</h3>
<p><a href='https://camlistore.org/doc/search-ui'>Search bar predicates.</a></p>
<h3>Client tools</h3>
<p>
You can download the Camlistore command line tools for Linux, Mac, and Windows at:
<ul>
<li><a href="https://camlistore.org/download">camlistore.org/download</a></li>
</ul>
</p>
<p>You will need to use the following <a href='https://camlistore.org/doc/client-config'>client configuration</a> in order to access this server using the command line tools.</p>
<pre>{{ .ClientConfigJSON }}</pre>
{{ .SecringDownloadHint }}
<h3>Anything Else?</h3>
<p>See the Camlistore <a href='https://camlistore.org/doc/'>online documentation</a> and <a href='https://camlistore.org/community/'>community contacts</a>.</p>
</body>
</html>`
// HelpHandler publishes information related to accessing the server
type HelpHandler struct {
clientConfig *clientconfig.Config // generated from serverConfig
serverConfig jsonconfig.Obj // low-level config
goTemplate *template.Template // for rendering
serverSecRing string
}
// SetServerConfig enables the handler to receive the server config
// before InitHandler, which generates a client config from the server config, is called.
func (hh *HelpHandler) SetServerConfig(config jsonconfig.Obj) {
if hh.serverConfig == nil {
hh.serverConfig = config
}
}
func init() {
blobserver.RegisterHandlerConstructor("help", newHelpFromConfig)
}
// fixServerInConfig checks if cc contains a meaningful server (for a client).
// If not, a newly allocated clone of cc is returned, except req.Host is used for
// the hostname of the server. Otherwise, cc is returned.
func fixServerInConfig(cc *clientconfig.Config, req *http.Request) (*clientconfig.Config, error) {
if cc == nil {
return nil, errors.New("nil client config")
}
if len(cc.Servers) == 0 || cc.Servers["default"] == nil || cc.Servers["default"].Server == "" {
return nil, errors.New("no Server in client config")
}
listen := strings.TrimPrefix(strings.TrimPrefix(cc.Servers["default"].Server, "http://"), "https://")
if !(strings.HasPrefix(listen, "0.0.0.0") || strings.HasPrefix(listen, ":")) {
return cc, nil
}
newCC := *cc
server := newCC.Servers["default"]
if req.TLS != nil {
server.Server = "https://" + req.Host
} else {
server.Server = "http://" + req.Host
}
newCC.Servers["default"] = server
return &newCC, nil
}
func (hh *HelpHandler) InitHandler(hl blobserver.FindHandlerByTyper) error {
if hh.serverConfig == nil {
return fmt.Errorf("HelpHandler's serverConfig must be set before calling its InitHandler")
}
clientConfig, err := clientconfig.GenerateClientConfig(hh.serverConfig)
if err != nil {
return fmt.Errorf("error generating client config: %v", err)
}
hh.clientConfig = clientConfig
hh.serverSecRing = clientConfig.IdentitySecretRing
clientConfig.IdentitySecretRing = "/home/you/.config/camlistore/identity-secring.gpg"
tmpl, err := template.New("help").Parse(helpHTML)
if err != nil {
return fmt.Errorf("error creating template: %v", err)
}
hh.goTemplate = tmpl
return nil
}
func newHelpFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) {
return &HelpHandler{}, nil
}
func (hh *HelpHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
suffix := httputil.PathSuffix(req)
if !httputil.IsGet(req) {
http.Error(rw, "Illegal help method.", http.StatusMethodNotAllowed)
return
}
switch suffix {
case "":
cc, err := fixServerInConfig(hh.clientConfig, req)
if err != nil {
httputil.ServeError(rw, req, err)
return
}
if clientConfig := req.FormValue("clientConfig"); clientConfig != "" {
if clientConfigOnly, err := strconv.ParseBool(clientConfig); err == nil && clientConfigOnly {
httputil.ReturnJSON(rw, cc)
return
}
}
hh.serveHelpHTML(cc, rw, req)
default:
http.Error(rw, "Illegal help path.", http.StatusNotFound)
}
}
func (hh *HelpHandler) serveHelpHTML(cc *clientconfig.Config, rw http.ResponseWriter, req *http.Request) {
jsonBytes, err := json.MarshalIndent(cc, "", " ")
if err != nil {
httputil.ServeError(rw, req, fmt.Errorf("could not serialize client config JSON: %v", err))
return
}
var hint template.HTML
if strings.HasPrefix(hh.serverSecRing, "/gcs/") {
bucketdir := strings.TrimPrefix(hh.serverSecRing, "/gcs/")
bucketdir = strings.TrimSuffix(bucketdir, "/identity-secring.gpg")
hint = template.HTML(fmt.Sprintf("<p>Download your GnuPG secret ring from <a href=\"https://console.developers.google.com/storage/browser/%s/\">https://console.developers.google.com/storage/browser/%s/</a> and place it in your <a href='https://camlistore.org/doc/client-config'>Camlistore client config directory</a>. Keep it private. It's not encrypted or password-protected and anybody in possession of it can create Camlistore claims as your identity.</p>\n",
bucketdir, bucketdir))
}
hh.goTemplate.Execute(rw, struct {
ClientConfigJSON string
SecringDownloadHint template.HTML
}{
ClientConfigJSON: string(jsonBytes),
SecringDownloadHint: hint,
})
}
website: update help page community URL
Remove the trailing slash from the community URL
Fixes #967
Change-Id: I487eaa71d6a1536293c6aa655dc280d9f2cdb276
/*
Copyright 2015 The Camlistore Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"encoding/json"
"errors"
"fmt"
"html/template"
"net/http"
"strconv"
"strings"
"camlistore.org/pkg/blobserver"
"camlistore.org/pkg/httputil"
"camlistore.org/pkg/types/clientconfig"
"go4.org/jsonconfig"
)
const helpHTML string = `<html>
<head>
<title>Help</title>
</head>
<body>
<h2>Help</h2>
<h3>Web User Interface</h3>
<p><a href='https://camlistore.org/doc/search-ui'>Search bar predicates.</a></p>
<h3>Client tools</h3>
<p>
You can download the Camlistore command line tools for Linux, Mac, and Windows at:
<ul>
<li><a href="https://camlistore.org/download">camlistore.org/download</a></li>
</ul>
</p>
<p>You will need to use the following <a href='https://camlistore.org/doc/client-config'>client configuration</a> in order to access this server using the command line tools.</p>
<pre>{{ .ClientConfigJSON }}</pre>
{{ .SecringDownloadHint }}
<h3>Anything Else?</h3>
<p>See the Camlistore <a href='https://camlistore.org/doc/'>online documentation</a> and <a href='https://camlistore.org/community'>community contacts</a>.</p>
</body>
</html>`
// HelpHandler publishes information related to accessing the server
type HelpHandler struct {
clientConfig *clientconfig.Config // generated from serverConfig
serverConfig jsonconfig.Obj // low-level config
goTemplate *template.Template // for rendering
serverSecRing string
}
// SetServerConfig enables the handler to receive the server config
// before InitHandler, which generates a client config from the server config, is called.
func (hh *HelpHandler) SetServerConfig(config jsonconfig.Obj) {
if hh.serverConfig == nil {
hh.serverConfig = config
}
}
func init() {
blobserver.RegisterHandlerConstructor("help", newHelpFromConfig)
}
// fixServerInConfig checks if cc contains a meaningful server (for a client).
// If not, a newly allocated clone of cc is returned, except req.Host is used for
// the hostname of the server. Otherwise, cc is returned.
func fixServerInConfig(cc *clientconfig.Config, req *http.Request) (*clientconfig.Config, error) {
if cc == nil {
return nil, errors.New("nil client config")
}
if len(cc.Servers) == 0 || cc.Servers["default"] == nil || cc.Servers["default"].Server == "" {
return nil, errors.New("no Server in client config")
}
listen := strings.TrimPrefix(strings.TrimPrefix(cc.Servers["default"].Server, "http://"), "https://")
if !(strings.HasPrefix(listen, "0.0.0.0") || strings.HasPrefix(listen, ":")) {
return cc, nil
}
newCC := *cc
server := newCC.Servers["default"]
if req.TLS != nil {
server.Server = "https://" + req.Host
} else {
server.Server = "http://" + req.Host
}
newCC.Servers["default"] = server
return &newCC, nil
}
func (hh *HelpHandler) InitHandler(hl blobserver.FindHandlerByTyper) error {
if hh.serverConfig == nil {
return fmt.Errorf("HelpHandler's serverConfig must be set before calling its InitHandler")
}
clientConfig, err := clientconfig.GenerateClientConfig(hh.serverConfig)
if err != nil {
return fmt.Errorf("error generating client config: %v", err)
}
hh.clientConfig = clientConfig
hh.serverSecRing = clientConfig.IdentitySecretRing
clientConfig.IdentitySecretRing = "/home/you/.config/camlistore/identity-secring.gpg"
tmpl, err := template.New("help").Parse(helpHTML)
if err != nil {
return fmt.Errorf("error creating template: %v", err)
}
hh.goTemplate = tmpl
return nil
}
func newHelpFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) {
return &HelpHandler{}, nil
}
func (hh *HelpHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
suffix := httputil.PathSuffix(req)
if !httputil.IsGet(req) {
http.Error(rw, "Illegal help method.", http.StatusMethodNotAllowed)
return
}
switch suffix {
case "":
cc, err := fixServerInConfig(hh.clientConfig, req)
if err != nil {
httputil.ServeError(rw, req, err)
return
}
if clientConfig := req.FormValue("clientConfig"); clientConfig != "" {
if clientConfigOnly, err := strconv.ParseBool(clientConfig); err == nil && clientConfigOnly {
httputil.ReturnJSON(rw, cc)
return
}
}
hh.serveHelpHTML(cc, rw, req)
default:
http.Error(rw, "Illegal help path.", http.StatusNotFound)
}
}
func (hh *HelpHandler) serveHelpHTML(cc *clientconfig.Config, rw http.ResponseWriter, req *http.Request) {
jsonBytes, err := json.MarshalIndent(cc, "", " ")
if err != nil {
httputil.ServeError(rw, req, fmt.Errorf("could not serialize client config JSON: %v", err))
return
}
var hint template.HTML
if strings.HasPrefix(hh.serverSecRing, "/gcs/") {
bucketdir := strings.TrimPrefix(hh.serverSecRing, "/gcs/")
bucketdir = strings.TrimSuffix(bucketdir, "/identity-secring.gpg")
hint = template.HTML(fmt.Sprintf("<p>Download your GnuPG secret ring from <a href=\"https://console.developers.google.com/storage/browser/%s/\">https://console.developers.google.com/storage/browser/%s/</a> and place it in your <a href='https://camlistore.org/doc/client-config'>Camlistore client config directory</a>. Keep it private. It's not encrypted or password-protected and anybody in possession of it can create Camlistore claims as your identity.</p>\n",
bucketdir, bucketdir))
}
hh.goTemplate.Execute(rw, struct {
ClientConfigJSON string
SecringDownloadHint template.HTML
}{
ClientConfigJSON: string(jsonBytes),
SecringDownloadHint: hint,
})
}
|
package setup
import (
"bytes"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"github.com/dcos/dcos-cli/pkg/config"
"github.com/dcos/dcos-cli/pkg/dcos"
"github.com/dcos/dcos-cli/pkg/httpclient"
"github.com/dcos/dcos-cli/pkg/internal/corecli"
"github.com/dcos/dcos-cli/pkg/internal/cosmos"
"github.com/dcos/dcos-cli/pkg/login"
"github.com/dcos/dcos-cli/pkg/mesos"
"github.com/dcos/dcos-cli/pkg/plugin"
"github.com/dcos/dcos-cli/pkg/prompt"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
"github.com/vbauerster/mpb"
)
// Opts are options for a setup.
type Opts struct {
Fs afero.Fs
Errout io.Writer
Prompt *prompt.Prompt
Logger *logrus.Logger
LoginFlow *login.Flow
ConfigManager *config.Manager
PluginManager *plugin.Manager
EnvLookup func(key string) (string, bool)
Deprecated func(msg string) error
}
// Setup represents a cluster setup.
type Setup struct {
fs afero.Fs
errout io.Writer
prompt *prompt.Prompt
logger *logrus.Logger
loginFlow *login.Flow
configManager *config.Manager
pluginManager *plugin.Manager
envLookup func(key string) (string, bool)
deprecated func(msg string) error
}
// New creates a new setup.
func New(opts Opts) *Setup {
return &Setup{
fs: opts.Fs,
errout: opts.Errout,
prompt: opts.Prompt,
logger: opts.Logger,
loginFlow: opts.LoginFlow,
configManager: opts.ConfigManager,
pluginManager: opts.PluginManager,
envLookup: opts.EnvLookup,
deprecated: opts.Deprecated,
}
}
// Configure triggers the setup flow.
func (s *Setup) Configure(flags *Flags, clusterURL string, attach bool) (*config.Cluster, error) {
if err := flags.Resolve(); err != nil {
return nil, err
}
s.logger.Info("Setting up the cluster...")
// Create a Cluster and an HTTP client with the few information already available.
cluster := config.NewCluster(nil)
cluster.SetURL(clusterURL)
httpOpts := []httpclient.Option{
httpclient.Timeout(5 * time.Second),
httpclient.Logger(s.logger),
}
for i := 0; i < 2; i++ {
// When using an HTTPS URL, configure TLS for the HTTP client accordingly.
if strings.HasPrefix(clusterURL, "https://") {
tlsConfig, err := s.configureTLS(httpOpts, flags)
if err != nil {
return nil, err
}
cluster.SetTLS(config.TLS{Insecure: tlsConfig.InsecureSkipVerify})
httpOpts = append(httpOpts, httpclient.TLS(tlsConfig))
}
// Make sure we continue the setup flow with the canonical cluster URL.
canonicalClusterURL, err := detectCanonicalClusterURL(cluster.URL(), httpOpts)
if err == nil {
if canonicalClusterURL != cluster.URL() {
s.logger.Warnf("Continuing cluster setup with: %s", canonicalClusterURL)
cluster.SetURL(canonicalClusterURL)
}
break
}
// Download the DC/OS CA bundle when getting an unknown authority error.
if s.isX509UnknownAuthorityError(err) && len(flags.caBundle) == 0 {
flags.caBundle, err = s.downloadDCOSCABundle(clusterURL, httpOpts)
if err != nil {
return nil, err
}
continue
}
return nil, err
}
// Login to get the ACS token, unless it is already present as an env var.
acsToken, _ := s.envLookup("DCOS_CLUSTER_SETUP_ACS_TOKEN")
if acsToken == "" {
httpClient := httpclient.New(cluster.URL(), httpOpts...)
var err error
acsToken, err = s.loginFlow.Start(flags.loginFlags, httpClient)
if err == login.ErrAuthDisabled {
s.logger.Warn("This cluster does not require authenticated requests. Skipping login.")
} else if err != nil {
return nil, err
}
}
cluster.SetACSToken(acsToken)
httpClient := httpclient.New(cluster.URL(), append(httpOpts, httpclient.ACSToken(cluster.ACSToken()))...)
// Read cluster ID from cluster metadata.
metadata, err := dcos.NewClient(httpClient).Metadata()
if err != nil {
return nil, err
}
if flags.name != "" {
// A custom cluster name has been passed as a flag.
cluster.SetName(flags.name)
} else if stateSummary, err := mesos.NewClient(httpClient).StateSummary(); err == nil {
// Read cluster name from Mesos state summary.
cluster.SetName(stateSummary.Cluster)
} else {
// Fallback to cluster ID as cluster name.
cluster.SetName(metadata.ClusterID)
}
// Create the config for the given cluster.
err = s.configManager.Save(cluster.Config(), metadata.ClusterID, flags.caBundle)
if err != nil {
return nil, err
}
if attach {
err = s.configManager.Attach(cluster.Config())
if err != nil {
return nil, err
}
s.logger.Infof("You are now attached to cluster %s", cluster.ID())
}
// Install default plugins (dcos-core-cli and dcos-enterprise-cli).
s.pluginManager.SetCluster(cluster)
if err = s.installDefaultPlugins(httpClient); err != nil {
return nil, err
}
s.logger.Infof("%s is now setup", clusterURL)
return cluster, nil
}
// detectCanonicalClusterURL returns the URL with the response of the HEAD request.
func detectCanonicalClusterURL(clusterURL string, httpOpts []httpclient.Option) (string, error) {
httpClient := httpclient.New(clusterURL, httpOpts...)
req, err := httpClient.NewRequest("HEAD", "/", nil)
if err != nil {
return "", err
}
resp, err := httpClient.Do(req)
if err != nil {
return "", err
}
if resp.StatusCode == 200 {
resp.Request.URL.Host = strings.ToLower(resp.Request.URL.Host)
return strings.TrimRight(resp.Request.URL.String(), "/"), nil
}
return "", fmt.Errorf("couldn't detect a canonical cluster URL")
}
// configureTLS creates the TLS configuration for a given set of flags.
func (s *Setup) configureTLS(httpOpts []httpclient.Option, flags *Flags) (*tls.Config, error) {
// Return early with an insecure TLS config when `--insecure` is passed.
if flags.insecure {
return &tls.Config{InsecureSkipVerify: true}, nil
}
// Create a cert pool from the CA bundle PEM. The user is prompted for manual
// verification of the certificate authority, unless `--no-check` or `--ca-bundle`
// were explicitly passed.
var certPool *x509.CertPool
if len(flags.caBundle) > 0 {
var err error
certPool, err = s.decodePEMCerts(flags.caBundle, !flags.noCheck)
if err != nil {
return nil, err
}
}
return &tls.Config{RootCAs: certPool}, nil
}
// isX509UnknownAuthorityError checks whether an error is of type x509.UnknownAuthorityError.
func (s *Setup) isX509UnknownAuthorityError(err error) bool {
urlErr, ok := err.(*url.Error)
if !ok {
return false
}
_, ok = urlErr.Err.(x509.UnknownAuthorityError)
return ok
}
// downloadDCOSCABundle downloads the cluster certificate authority at "/ca/dcos-ca.crt".
func (s *Setup) downloadDCOSCABundle(clusterURL string, httpOpts []httpclient.Option) ([]byte, error) {
insecureHTTPClient := httpclient.New(clusterURL, append(httpOpts, httpclient.TLS(&tls.Config{
InsecureSkipVerify: true,
}))...)
resp, err := insecureHTTPClient.Get("/ca/dcos-ca.crt")
if err != nil {
return nil, err
}
defer resp.Body.Close()
caPEM, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return caPEM, nil
}
// decodePEMCerts creates a x509.CertPool struct based on a PEM certificate authority bundle.
// When prompt is set to true, the first certificate in the bundle is prompted for user confirmation.
func (s *Setup) decodePEMCerts(caPEM []byte, prompt bool) (*x509.CertPool, error) {
var certPool *x509.CertPool
for len(caPEM) > 0 {
var block *pem.Block
block, caPEM = pem.Decode(caPEM)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
continue
}
if certPool == nil {
certPool = x509.NewCertPool()
if prompt {
if err := s.promptCA(cert); err != nil {
return nil, err
}
}
}
certPool.AddCert(cert)
}
return certPool, nil
}
// installDefaultPlugins installs the dcos-core-cli and (if applicable) the dcos-enterprise-cli plugin.
// The installation of the core plugin only works with DC/OS >= 1.10 and the installation of the EE plugin only works
// with DC/OS >= 1.12 due to the lack of a "Variant" key in the "/dcos-metadata/dcos-version.json" endpoint before.
func (s *Setup) installDefaultPlugins(httpClient *httpclient.Client) error {
version, err := dcos.NewClient(httpClient).Version()
if err != nil {
return fmt.Errorf("unable to get DC/OS version, installation of the plugins aborted: %s", err)
}
if regexp.MustCompile(`^1\.[7-9]\D*`).MatchString(version.Version) {
return errors.New("DC/OS version of the cluster < 1.10, installation of the plugins aborted")
}
var wg sync.WaitGroup
pbar := mpb.New(mpb.WithOutput(s.errout), mpb.WithWaitGroup(&wg))
wg.Add(2)
// Install dcos-enterprise-cli.
go func() {
// Install dcos-enterprise-cli if the DC/OS variant metadata is "enterprise".
if version.DCOSVariant == "enterprise" {
if err := s.installPlugin("dcos-enterprise-cli", httpClient, version, pbar); err != nil {
s.logger.Debug(err)
}
} else if version.DCOSVariant == "" {
// We add this message if the DC/OS variant is "" (DC/OS < 1.12)
// or if there was an error while installing the EE plugin.
s.logger.Error("Please run “dcos package install dcos-enterprise-cli” if you use a DC/OS Enterprise cluster")
}
wg.Done()
}()
// Install dcos-core-cli.
errCore := s.installPlugin("dcos-core-cli", httpClient, version, pbar)
wg.Done()
pbar.Wait()
if errCore != nil {
// Extract the dcos-core-cli bundle if it coudln't be downloaded.
errCore = corecli.InstallPlugin(s.fs, s.pluginManager, s.deprecated)
}
return errCore
}
// installPlugin installs a plugin by its name.
func (s *Setup) installPlugin(name string, httpClient *httpclient.Client, version *dcos.Version, pbar *mpb.Progress) error {
s.logger.Infof("Installing %s...", name)
if skip, _ := s.envLookup("DCOS_CLUSTER_SETUP_SKIP_CANONICAL_URL_INSTALL"); skip != "1" {
err := s.installPluginFromCanonicalURL(name, version, pbar)
if err == nil {
return nil
}
s.logger.Debug(err)
}
if skip, _ := s.envLookup("DCOS_CLUSTER_SETUP_SKIP_COSMOS_INSTALL"); skip != "1" {
return s.installPluginFromCosmos(name, httpClient, pbar)
}
return errors.New("skipping plugin installation from Cosmos (DCOS_CLUSTER_SETUP_SKIP_COSMOS_INSTALL=1)")
}
// installPluginFromCanonicalURL installs a plugin using its canonical URL.
func (s *Setup) installPluginFromCanonicalURL(name string, version *dcos.Version, pbar *mpb.Progress) error {
domain := "downloads.dcos.io"
if name == "dcos-enterprise-cli" {
domain = "downloads.mesosphere.io"
}
platform := runtime.GOOS
matches := regexp.MustCompile(`^(\d+)\.(\d+)\D*`).FindStringSubmatch(version.Version)
if matches == nil {
return fmt.Errorf("unable to parse DC/OS version %s", version.Version)
}
dcosVersion := matches[1] + "." + matches[2]
url := fmt.Sprintf(
"https://%s/cli/releases/plugins/%s/%s/x86-64/%s-%s-patch.latest.zip",
domain, name, platform, name, dcosVersion,
)
httpClient := httpclient.New("")
req, err := httpClient.NewRequest("HEAD", url, nil)
if err != nil {
return err
}
resp, err := httpClient.Do(req)
if err != nil {
return err
}
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
url = fmt.Sprintf(
"https://%s/cli/testing/plugins/%s/%s/x86-64/%s-%s-patch.x.zip",
domain, name, platform, name, dcosVersion,
)
}
return s.pluginManager.Install(url, &plugin.InstallOpts{
Name: name,
Update: true,
ProgressBar: pbar,
})
}
// installPluginFromCosmos installs a plugin through Cosmos.
func (s *Setup) installPluginFromCosmos(name string, httpClient *httpclient.Client, pbar *mpb.Progress) error {
// Get package information from Cosmos.
pkgInfo, err := cosmos.NewClient(httpClient).DescribePackage(name)
if err != nil {
return err
}
// Get the download URL for the current platform.
p, ok := pkgInfo.Package.Resource.CLI.Plugins[runtime.GOOS]["x86-64"]
if !ok {
return fmt.Errorf("'%s' isn't available for '%s')", name, runtime.GOOS)
}
var checksum plugin.Checksum
for _, contentHash := range p.ContentHash {
switch contentHash.Algo {
case "sha256":
checksum.Hasher = sha256.New()
checksum.Value = contentHash.Value
}
}
return s.pluginManager.Install(p.URL, &plugin.InstallOpts{
Name: pkgInfo.Package.Name,
Update: true,
Checksum: checksum,
ProgressBar: pbar,
PostInstall: func(fs afero.Fs, pluginDir string) error {
pkgInfoFilepath := filepath.Join(pluginDir, "package.json")
pkgInfoFile, err := fs.OpenFile(pkgInfoFilepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer pkgInfoFile.Close()
return json.NewEncoder(pkgInfoFile).Encode(pkgInfo.Package)
},
})
}
// promptCA prompts information about the certificate authority to the user.
// They are then expected to manually confirm that they trust it.
func (s *Setup) promptCA(cert *x509.Certificate) error {
var fingerprintBuf bytes.Buffer
for i, val := range sha256.Sum256(cert.Raw) {
fingerprintBuf.WriteString(fmt.Sprintf("%02X", val))
if i != sha256.Size-1 {
fingerprintBuf.WriteString(":")
}
}
msg := `Cluster Certificate Authority:
Issuer: %s
Validity:
From: %s
Until: %s
SHA256 fingerprint: %s
Do you trust it? [y/n] `
return s.prompt.Confirm(fmt.Sprintf(
msg,
cert.Issuer,
cert.NotBefore,
cert.NotAfter,
fingerprintBuf.String(),
), "")
}
Display new available commands after `dcos cluster setup`
We've extracted the job, marathon, node, package, service, and task
commands into a `dcos-core-cli` plugin which gets automatically
installed during `dcos cluster setup`.
To make this new behaviour clear to users we're displaying the new
available commands at the end of the setup flow:
```
$ dcos cluster setup ...
[...]
dcos-core-cli [==============================================================================] 22.6 MiB / 22.6 MiB
dcos-enterprise-cli [==============================================================================] 26.0 MiB / 26.0 MiB
New commands available: backup, job, license, marathon, node, package, security, service, task
```
package setup
import (
"bytes"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/dcos/dcos-cli/pkg/config"
"github.com/dcos/dcos-cli/pkg/dcos"
"github.com/dcos/dcos-cli/pkg/httpclient"
"github.com/dcos/dcos-cli/pkg/internal/corecli"
"github.com/dcos/dcos-cli/pkg/internal/cosmos"
"github.com/dcos/dcos-cli/pkg/login"
"github.com/dcos/dcos-cli/pkg/mesos"
"github.com/dcos/dcos-cli/pkg/plugin"
"github.com/dcos/dcos-cli/pkg/prompt"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
"github.com/vbauerster/mpb"
)
// Opts are options for a setup.
type Opts struct {
Fs afero.Fs
Errout io.Writer
Prompt *prompt.Prompt
Logger *logrus.Logger
LoginFlow *login.Flow
ConfigManager *config.Manager
PluginManager *plugin.Manager
EnvLookup func(key string) (string, bool)
Deprecated func(msg string) error
}
// Setup represents a cluster setup.
type Setup struct {
fs afero.Fs
errout io.Writer
prompt *prompt.Prompt
logger *logrus.Logger
loginFlow *login.Flow
configManager *config.Manager
pluginManager *plugin.Manager
envLookup func(key string) (string, bool)
deprecated func(msg string) error
}
// New creates a new setup.
func New(opts Opts) *Setup {
return &Setup{
fs: opts.Fs,
errout: opts.Errout,
prompt: opts.Prompt,
logger: opts.Logger,
loginFlow: opts.LoginFlow,
configManager: opts.ConfigManager,
pluginManager: opts.PluginManager,
envLookup: opts.EnvLookup,
deprecated: opts.Deprecated,
}
}
// Configure triggers the setup flow.
func (s *Setup) Configure(flags *Flags, clusterURL string, attach bool) (*config.Cluster, error) {
if err := flags.Resolve(); err != nil {
return nil, err
}
s.logger.Info("Setting up the cluster...")
// Create a Cluster and an HTTP client with the few information already available.
cluster := config.NewCluster(nil)
cluster.SetURL(clusterURL)
httpOpts := []httpclient.Option{
httpclient.Timeout(5 * time.Second),
httpclient.Logger(s.logger),
}
for i := 0; i < 2; i++ {
// When using an HTTPS URL, configure TLS for the HTTP client accordingly.
if strings.HasPrefix(clusterURL, "https://") {
tlsConfig, err := s.configureTLS(httpOpts, flags)
if err != nil {
return nil, err
}
cluster.SetTLS(config.TLS{Insecure: tlsConfig.InsecureSkipVerify})
httpOpts = append(httpOpts, httpclient.TLS(tlsConfig))
}
// Make sure we continue the setup flow with the canonical cluster URL.
canonicalClusterURL, err := detectCanonicalClusterURL(cluster.URL(), httpOpts)
if err == nil {
if canonicalClusterURL != cluster.URL() {
s.logger.Warnf("Continuing cluster setup with: %s", canonicalClusterURL)
cluster.SetURL(canonicalClusterURL)
}
break
}
// Download the DC/OS CA bundle when getting an unknown authority error.
if s.isX509UnknownAuthorityError(err) && len(flags.caBundle) == 0 {
flags.caBundle, err = s.downloadDCOSCABundle(clusterURL, httpOpts)
if err != nil {
return nil, err
}
continue
}
return nil, err
}
// Login to get the ACS token, unless it is already present as an env var.
acsToken, _ := s.envLookup("DCOS_CLUSTER_SETUP_ACS_TOKEN")
if acsToken == "" {
httpClient := httpclient.New(cluster.URL(), httpOpts...)
var err error
acsToken, err = s.loginFlow.Start(flags.loginFlags, httpClient)
if err == login.ErrAuthDisabled {
s.logger.Warn("This cluster does not require authenticated requests. Skipping login.")
} else if err != nil {
return nil, err
}
}
cluster.SetACSToken(acsToken)
httpClient := httpclient.New(cluster.URL(), append(httpOpts, httpclient.ACSToken(cluster.ACSToken()))...)
// Read cluster ID from cluster metadata.
metadata, err := dcos.NewClient(httpClient).Metadata()
if err != nil {
return nil, err
}
if flags.name != "" {
// A custom cluster name has been passed as a flag.
cluster.SetName(flags.name)
} else if stateSummary, err := mesos.NewClient(httpClient).StateSummary(); err == nil {
// Read cluster name from Mesos state summary.
cluster.SetName(stateSummary.Cluster)
} else {
// Fallback to cluster ID as cluster name.
cluster.SetName(metadata.ClusterID)
}
// Create the config for the given cluster.
err = s.configManager.Save(cluster.Config(), metadata.ClusterID, flags.caBundle)
if err != nil {
return nil, err
}
if attach {
err = s.configManager.Attach(cluster.Config())
if err != nil {
return nil, err
}
s.logger.Infof("You are now attached to cluster %s", cluster.ID())
}
// Install default plugins (dcos-core-cli and dcos-enterprise-cli).
s.pluginManager.SetCluster(cluster)
if err = s.installDefaultPlugins(httpClient); err != nil {
return nil, err
}
s.logger.Infof("%s is now setup", clusterURL)
return cluster, nil
}
// detectCanonicalClusterURL returns the URL with the response of the HEAD request.
func detectCanonicalClusterURL(clusterURL string, httpOpts []httpclient.Option) (string, error) {
httpClient := httpclient.New(clusterURL, httpOpts...)
req, err := httpClient.NewRequest("HEAD", "/", nil)
if err != nil {
return "", err
}
resp, err := httpClient.Do(req)
if err != nil {
return "", err
}
if resp.StatusCode == 200 {
resp.Request.URL.Host = strings.ToLower(resp.Request.URL.Host)
return strings.TrimRight(resp.Request.URL.String(), "/"), nil
}
return "", fmt.Errorf("couldn't detect a canonical cluster URL")
}
// configureTLS creates the TLS configuration for a given set of flags.
func (s *Setup) configureTLS(httpOpts []httpclient.Option, flags *Flags) (*tls.Config, error) {
// Return early with an insecure TLS config when `--insecure` is passed.
if flags.insecure {
return &tls.Config{InsecureSkipVerify: true}, nil
}
// Create a cert pool from the CA bundle PEM. The user is prompted for manual
// verification of the certificate authority, unless `--no-check` or `--ca-bundle`
// were explicitly passed.
var certPool *x509.CertPool
if len(flags.caBundle) > 0 {
var err error
certPool, err = s.decodePEMCerts(flags.caBundle, !flags.noCheck)
if err != nil {
return nil, err
}
}
return &tls.Config{RootCAs: certPool}, nil
}
// isX509UnknownAuthorityError checks whether an error is of type x509.UnknownAuthorityError.
func (s *Setup) isX509UnknownAuthorityError(err error) bool {
urlErr, ok := err.(*url.Error)
if !ok {
return false
}
_, ok = urlErr.Err.(x509.UnknownAuthorityError)
return ok
}
// downloadDCOSCABundle downloads the cluster certificate authority at "/ca/dcos-ca.crt".
func (s *Setup) downloadDCOSCABundle(clusterURL string, httpOpts []httpclient.Option) ([]byte, error) {
insecureHTTPClient := httpclient.New(clusterURL, append(httpOpts, httpclient.TLS(&tls.Config{
InsecureSkipVerify: true,
}))...)
resp, err := insecureHTTPClient.Get("/ca/dcos-ca.crt")
if err != nil {
return nil, err
}
defer resp.Body.Close()
caPEM, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return caPEM, nil
}
// decodePEMCerts creates a x509.CertPool struct based on a PEM certificate authority bundle.
// When prompt is set to true, the first certificate in the bundle is prompted for user confirmation.
func (s *Setup) decodePEMCerts(caPEM []byte, prompt bool) (*x509.CertPool, error) {
var certPool *x509.CertPool
for len(caPEM) > 0 {
var block *pem.Block
block, caPEM = pem.Decode(caPEM)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
continue
}
if certPool == nil {
certPool = x509.NewCertPool()
if prompt {
if err := s.promptCA(cert); err != nil {
return nil, err
}
}
}
certPool.AddCert(cert)
}
return certPool, nil
}
// installDefaultPlugins installs the dcos-core-cli and (if applicable) the dcos-enterprise-cli plugin.
// The installation of the core plugin only works with DC/OS >= 1.10 and the installation of the EE plugin only works
// with DC/OS >= 1.12 due to the lack of a "Variant" key in the "/dcos-metadata/dcos-version.json" endpoint before.
func (s *Setup) installDefaultPlugins(httpClient *httpclient.Client) error {
version, err := dcos.NewClient(httpClient).Version()
if err != nil {
return fmt.Errorf("unable to get DC/OS version, installation of the plugins aborted: %s", err)
}
if regexp.MustCompile(`^1\.[7-9]\D*`).MatchString(version.Version) {
return errors.New("DC/OS version of the cluster < 1.10, installation of the plugins aborted")
}
var wg sync.WaitGroup
pbar := mpb.New(mpb.WithOutput(s.errout), mpb.WithWaitGroup(&wg))
wg.Add(2)
installedPlugins := []string{"dcos-core-cli"}
// Install dcos-enterprise-cli.
go func() {
// Install dcos-enterprise-cli if the DC/OS variant metadata is "enterprise".
if version.DCOSVariant == "enterprise" {
if err := s.installPlugin("dcos-enterprise-cli", httpClient, version, pbar); err != nil {
s.logger.Debug(err)
} else {
installedPlugins = append(installedPlugins, "dcos-enterprise-cli")
}
} else if version.DCOSVariant == "" {
// We add this message if the DC/OS variant is "" (DC/OS < 1.12)
// or if there was an error while installing the EE plugin.
s.logger.Error("Please run “dcos package install dcos-enterprise-cli” if you use a DC/OS Enterprise cluster")
}
wg.Done()
}()
// Install dcos-core-cli.
errCore := s.installPlugin("dcos-core-cli", httpClient, version, pbar)
wg.Done()
pbar.Wait()
if errCore != nil {
// Extract the dcos-core-cli bundle if it coudln't be downloaded.
errCore = corecli.InstallPlugin(s.fs, s.pluginManager, s.deprecated)
if errCore != nil {
return errCore
}
}
var newCommands []string
for _, installedPlugin := range installedPlugins {
p, err := s.pluginManager.Plugin(installedPlugin)
if err != nil {
s.logger.Debug(err)
continue
}
for _, command := range p.Commands {
newCommands = append(newCommands, command.Name)
}
}
sort.Strings(newCommands)
fmt.Fprintf(s.errout, "New commands available: %s\n", strings.Join(newCommands, ", "))
return nil
}
// installPlugin installs a plugin by its name.
func (s *Setup) installPlugin(name string, httpClient *httpclient.Client, version *dcos.Version, pbar *mpb.Progress) error {
s.logger.Infof("Installing %s...", name)
if skip, _ := s.envLookup("DCOS_CLUSTER_SETUP_SKIP_CANONICAL_URL_INSTALL"); skip != "1" {
err := s.installPluginFromCanonicalURL(name, version, pbar)
if err == nil {
return nil
}
s.logger.Debug(err)
}
if skip, _ := s.envLookup("DCOS_CLUSTER_SETUP_SKIP_COSMOS_INSTALL"); skip != "1" {
return s.installPluginFromCosmos(name, httpClient, pbar)
}
return errors.New("skipping plugin installation from Cosmos (DCOS_CLUSTER_SETUP_SKIP_COSMOS_INSTALL=1)")
}
// installPluginFromCanonicalURL installs a plugin using its canonical URL.
func (s *Setup) installPluginFromCanonicalURL(name string, version *dcos.Version, pbar *mpb.Progress) error {
domain := "downloads.dcos.io"
if name == "dcos-enterprise-cli" {
domain = "downloads.mesosphere.io"
}
platform := runtime.GOOS
matches := regexp.MustCompile(`^(\d+)\.(\d+)\D*`).FindStringSubmatch(version.Version)
if matches == nil {
return fmt.Errorf("unable to parse DC/OS version %s", version.Version)
}
dcosVersion := matches[1] + "." + matches[2]
url := fmt.Sprintf(
"https://%s/cli/releases/plugins/%s/%s/x86-64/%s-%s-patch.latest.zip",
domain, name, platform, name, dcosVersion,
)
httpClient := httpclient.New("")
req, err := httpClient.NewRequest("HEAD", url, nil)
if err != nil {
return err
}
resp, err := httpClient.Do(req)
if err != nil {
return err
}
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
url = fmt.Sprintf(
"https://%s/cli/testing/plugins/%s/%s/x86-64/%s-%s-patch.x.zip",
domain, name, platform, name, dcosVersion,
)
}
return s.pluginManager.Install(url, &plugin.InstallOpts{
Name: name,
Update: true,
ProgressBar: pbar,
})
}
// installPluginFromCosmos installs a plugin through Cosmos.
func (s *Setup) installPluginFromCosmos(name string, httpClient *httpclient.Client, pbar *mpb.Progress) error {
// Get package information from Cosmos.
pkgInfo, err := cosmos.NewClient(httpClient).DescribePackage(name)
if err != nil {
return err
}
// Get the download URL for the current platform.
p, ok := pkgInfo.Package.Resource.CLI.Plugins[runtime.GOOS]["x86-64"]
if !ok {
return fmt.Errorf("'%s' isn't available for '%s')", name, runtime.GOOS)
}
var checksum plugin.Checksum
for _, contentHash := range p.ContentHash {
switch contentHash.Algo {
case "sha256":
checksum.Hasher = sha256.New()
checksum.Value = contentHash.Value
}
}
return s.pluginManager.Install(p.URL, &plugin.InstallOpts{
Name: pkgInfo.Package.Name,
Update: true,
Checksum: checksum,
ProgressBar: pbar,
PostInstall: func(fs afero.Fs, pluginDir string) error {
pkgInfoFilepath := filepath.Join(pluginDir, "package.json")
pkgInfoFile, err := fs.OpenFile(pkgInfoFilepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer pkgInfoFile.Close()
return json.NewEncoder(pkgInfoFile).Encode(pkgInfo.Package)
},
})
}
// promptCA prompts information about the certificate authority to the user.
// They are then expected to manually confirm that they trust it.
func (s *Setup) promptCA(cert *x509.Certificate) error {
var fingerprintBuf bytes.Buffer
for i, val := range sha256.Sum256(cert.Raw) {
fingerprintBuf.WriteString(fmt.Sprintf("%02X", val))
if i != sha256.Size-1 {
fingerprintBuf.WriteString(":")
}
}
msg := `Cluster Certificate Authority:
Issuer: %s
Validity:
From: %s
Until: %s
SHA256 fingerprint: %s
Do you trust it? [y/n] `
return s.prompt.Confirm(fmt.Sprintf(
msg,
cert.Issuer,
cert.NotBefore,
cert.NotAfter,
fingerprintBuf.String(),
), "")
}
|
package store
import (
"os"
"path"
"strings"
)
type Event struct {
Type int
Seqn uint64
Path string
Value string
}
const (
Set = (1<<iota)
Del
Add
Rem
)
var (
BadPathError = os.NewError("bad path")
)
type Store struct {
applyCh chan apply
reqCh chan req
watchCh chan watch
watches map[string][]watch
todo map[uint64]apply
}
type apply struct {
seqn uint64
op int
k string
v string
}
type req struct {
k string
ch chan reply
}
type reply struct {
v string
ok bool
}
type watch struct {
ch chan Event
k string
}
func NewStore() *Store {
s := &Store{
applyCh: make(chan apply),
reqCh: make(chan req),
watchCh: make(chan watch),
todo: make(map[uint64]apply),
watches: make(map[string][]watch),
}
go s.process()
return s
}
func checkPath(k string) os.Error {
switch {
case len(k) < 1,
k[0] != '/',
strings.Count(k, "=") > 0,
strings.Count(k, " ") > 0:
return BadPathError
}
return nil
}
func EncodeSet(path, v string) (mutation string, err os.Error) {
if err = checkPath(path); err != nil {
return
}
return path + "=" + v, nil
}
func EncodeDel(path string) (mutation string, err os.Error) {
if err := checkPath(path); err != nil {
return
}
return path, nil
}
func decode(mutation string) (op int, path, v string, err os.Error) {
parts := strings.Split(mutation, "=", 2)
if err = checkPath(parts[0]); err != nil {
return
}
switch len(parts) {
case 1:
return Del, parts[0], "", nil
case 2:
return Set, parts[0], parts[1], nil
}
panic("can't happen")
}
func (s *Store) notify(ev int, seqn uint64, k, v string) {
for _, w := range s.watches[k] {
w.ch <- Event{ev, seqn, k, v}
}
}
func append(ws *[]watch, w watch) {
l := len(*ws)
if l + 1 > cap(*ws) {
ns := make([]watch, (l + 1)*2)
copy(ns, *ws)
*ws = ns
}
*ws = (*ws)[0:l + 1]
(*ws)[l] = w
}
func (s *Store) process() {
next := uint64(1)
values := make(map[string]string)
for {
select {
case a := <-s.applyCh:
if a.seqn >= next {
s.todo[a.seqn] = a
}
for t, ok := s.todo[next]; ok; t, ok = s.todo[next] {
switch t.op {
case Set:
go s.notify(Set, a.seqn, t.k, t.v)
if _, ok := values[t.k]; !ok {
dirname, basename := path.Split(t.k)
go s.notify(Add, a.seqn, dirname, basename)
}
values[t.k] = t.v
case Del:
values[t.k] = "", false
}
s.todo[next] = apply{}, false
next++
}
case r := <-s.reqCh:
v, ok := values[r.k]
r.ch <- reply{v, ok}
case w := <-s.watchCh:
watches := s.watches[w.k]
append(&watches, w)
s.watches[w.k] = watches
}
}
}
func (s *Store) Apply(seqn uint64, mutation string) {
op, path, v, err := decode(mutation)
if err != nil {
return
}
s.applyCh <- apply{seqn, op, path, v}
}
// For a missing path, `ok == false`. Otherwise, it is `true`.
func (s *Store) Lookup(path string) (v string, ok bool) {
ch := make(chan reply)
s.reqCh <- req{path, ch}
rep := <-ch
return rep.v, rep.ok
}
// `eventMask` is one or more of `Set`, `Del`, `Add`, and `Rem`, bitwise OR-ed
// together.
func (s *Store) Watch(path string, eventMask byte) (events chan Event) {
ch := make(chan Event)
s.watchCh <- watch{ch, path}
return ch
}
use uint consistently for ops
package store
import (
"os"
"path"
"strings"
)
type Event struct {
Type uint
Seqn uint64
Path string
Value string
}
const (
Set = uint(1<<iota)
Del
Add
Rem
)
var (
BadPathError = os.NewError("bad path")
)
type Store struct {
applyCh chan apply
reqCh chan req
watchCh chan watch
watches map[string][]watch
todo map[uint64]apply
}
type apply struct {
seqn uint64
op uint
k string
v string
}
type req struct {
k string
ch chan reply
}
type reply struct {
v string
ok bool
}
type watch struct {
ch chan Event
mask uint
k string
}
func NewStore() *Store {
s := &Store{
applyCh: make(chan apply),
reqCh: make(chan req),
watchCh: make(chan watch),
todo: make(map[uint64]apply),
watches: make(map[string][]watch),
}
go s.process()
return s
}
func checkPath(k string) os.Error {
switch {
case len(k) < 1,
k[0] != '/',
strings.Count(k, "=") > 0,
strings.Count(k, " ") > 0:
return BadPathError
}
return nil
}
func EncodeSet(path, v string) (mutation string, err os.Error) {
if err = checkPath(path); err != nil {
return
}
return path + "=" + v, nil
}
func EncodeDel(path string) (mutation string, err os.Error) {
if err := checkPath(path); err != nil {
return
}
return path, nil
}
func decode(mutation string) (op uint, path, v string, err os.Error) {
parts := strings.Split(mutation, "=", 2)
if err = checkPath(parts[0]); err != nil {
return
}
switch len(parts) {
case 1:
return Del, parts[0], "", nil
case 2:
return Set, parts[0], parts[1], nil
}
panic("can't happen")
}
func (s *Store) notify(ev uint, seqn uint64, k, v string) {
for _, w := range s.watches[k] {
w.ch <- Event{ev, seqn, k, v}
}
}
func append(ws *[]watch, w watch) {
l := len(*ws)
if l + 1 > cap(*ws) {
ns := make([]watch, (l + 1)*2)
copy(ns, *ws)
*ws = ns
}
*ws = (*ws)[0:l + 1]
(*ws)[l] = w
}
func (s *Store) process() {
next := uint64(1)
values := make(map[string]string)
for {
select {
case a := <-s.applyCh:
if a.seqn >= next {
s.todo[a.seqn] = a
}
for t, ok := s.todo[next]; ok; t, ok = s.todo[next] {
switch t.op {
case Set:
go s.notify(Set, a.seqn, t.k, t.v)
if _, ok := values[t.k]; !ok {
dirname, basename := path.Split(t.k)
go s.notify(Add, a.seqn, dirname, basename)
}
values[t.k] = t.v
case Del:
values[t.k] = "", false
}
s.todo[next] = apply{}, false
next++
}
case r := <-s.reqCh:
v, ok := values[r.k]
r.ch <- reply{v, ok}
case w := <-s.watchCh:
watches := s.watches[w.k]
append(&watches, w)
s.watches[w.k] = watches
}
}
}
func (s *Store) Apply(seqn uint64, mutation string) {
op, path, v, err := decode(mutation)
if err != nil {
return
}
s.applyCh <- apply{seqn, op, path, v}
}
// For a missing path, `ok == false`. Otherwise, it is `true`.
func (s *Store) Lookup(path string) (v string, ok bool) {
ch := make(chan reply)
s.reqCh <- req{path, ch}
rep := <-ch
return rep.v, rep.ok
}
// `eventMask` is one or more of `Set`, `Del`, `Add`, and `Rem`, bitwise OR-ed
// together.
func (s *Store) Watch(path string, eventMask uint) (events chan Event) {
ch := make(chan Event)
s.watchCh <- watch{ch, 0, path}
return ch
}
|
package xpi
import (
"archive/zip"
"bytes"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"fmt"
"github.com/pkg/errors"
"io"
"io/ioutil"
"strings"
"unicode/utf8"
)
// consts and vars for formatFilename
const (
maxLineByteLen = 72
maxContinuedByteLen = 70 // -1 for leading space and -1 for trailing \n newline
)
var maxFirstLineByteLen = maxLineByteLen - (len([]byte("Name: ")) + 1) // + 1 for a \n newline
// formatFilename formats filename lines to satisfy:
//
// No line may be longer than 72 bytes (not characters), in its
// UTF8-encoded form. If a value would make the initial line longer
// than this, it should be continued on extra lines (each starting
// with a single SPACE).
//
// https://docs.oracle.com/javase/8/docs/technotes/guides/jar/jar.html#Signed_JAR_File
// refed from: https://source.android.com/security/apksigning/#v1
func formatFilename(filename []byte) (formatted []byte, err error) {
if !utf8.Valid(filename) {
err = errors.Errorf("apk: invalid UTF8 in filename %s", filename)
return
}
var (
filenameLen = len(filename)
writtenFileBytes = 0 // number of bytes of the filename we've written
)
if filenameLen <= maxFirstLineByteLen {
formatted = filename
return
}
formatted = append(formatted, filename[:maxFirstLineByteLen]...)
writtenFileBytes += maxFirstLineByteLen
for {
if filenameLen-writtenFileBytes <= 0 {
break
} else if filenameLen-writtenFileBytes < maxContinuedByteLen {
formatted = append(formatted, []byte("\n ")...)
formatted = append(formatted, filename[writtenFileBytes:]...)
break
} else {
formatted = append(formatted, []byte("\n ")...)
formatted = append(formatted, filename[writtenFileBytes:writtenFileBytes+maxContinuedByteLen]...)
writtenFileBytes += maxContinuedByteLen
}
}
return
}
func makePKCS7Manifest(input []byte, metafiles []Metafile) (manifest []byte, err error) {
for _, f := range metafiles {
if !f.IsNameValid() {
err = errors.Errorf("Cannot pack metafile with invalid path %s", f.Name)
return
}
}
manifest, err = makeJARManifest(input)
if err != nil {
return
}
mw := bytes.NewBuffer(manifest)
for _, f := range metafiles {
fmt.Fprintf(mw, "Name: %s\nDigest-Algorithms: SHA1 SHA256\n", f.Name)
h1 := sha1.New()
h1.Write(f.Body)
fmt.Fprintf(mw, "SHA1-Digest: %s\n", base64.StdEncoding.EncodeToString(h1.Sum(nil)))
h2 := sha256.New()
h2.Write(f.Body)
fmt.Fprintf(mw, "SHA256-Digest: %s\n\n", base64.StdEncoding.EncodeToString(h2.Sum(nil)))
}
return mw.Bytes(), err
}
// makeJARManifestAndSignatureFile writes hashes for all entries in a zip to a
// manifest file then hashes the manifest file to write a signature
// file and returns both
func makeJARManifestAndSignatureFile(input []byte) (manifest, sigfile []byte, err error) {
manifest, err = makeJARManifest(input)
if err != nil {
return
}
sigfile, err = makeJARSignatureFile(manifest)
if err != nil {
return
}
return
}
// makeJARManifest calculates a sha1 and sha256 hash for each zip entry and writes them to a manifest file
func makeJARManifest(input []byte) (manifest []byte, err error) {
inputReader := bytes.NewReader(input)
r, err := zip.NewReader(inputReader, int64(len(input)))
if err != nil {
return
}
// generate the manifest file by calculating a sha1 and sha256 hashes for each zip entry
mw := bytes.NewBuffer(manifest)
manifest = []byte(fmt.Sprintf("Manifest-Version: 1.0\n\n"))
for _, f := range r.File {
if isJARSignatureFile(f.Name) {
// reserved signature files do not get included in the manifest
continue
}
if f.FileInfo().IsDir() {
// directories do not get included
continue
}
rc, err := f.Open()
if err != nil {
return manifest, err
}
data, err := ioutil.ReadAll(rc)
if err != nil {
return manifest, err
}
fmt.Fprintf(mw, "Name: %s\nDigest-Algorithms: SHA1 SHA256\n", f.Name)
h1 := sha1.New()
h1.Write(data)
fmt.Fprintf(mw, "SHA1-Digest: %s\n", base64.StdEncoding.EncodeToString(h1.Sum(nil)))
h2 := sha256.New()
h2.Write(data)
fmt.Fprintf(mw, "SHA256-Digest: %s\n\n", base64.StdEncoding.EncodeToString(h2.Sum(nil)))
}
manifestBody := mw.Bytes()
manifest = append(manifest, manifestBody...)
return
}
// makeJARSignatureFile calculates a signature file by hashing the manifest with sha1 and sha256
func makeJARSignatureFile(manifest []byte) (sigfile []byte, err error) {
sw := bytes.NewBuffer(sigfile)
fmt.Fprint(sw, "Signature-Version: 1.0\n")
h1 := sha1.New()
h1.Write(manifest)
fmt.Fprintf(sw, "SHA1-Digest-Manifest: %s\n", base64.StdEncoding.EncodeToString(h1.Sum(nil)))
h2 := sha256.New()
h2.Write(manifest)
fmt.Fprintf(sw, "SHA256-Digest-Manifest: %s\n\n", base64.StdEncoding.EncodeToString(h2.Sum(nil)))
sigfile = sw.Bytes()
return
}
// Metafile is a file to pack into a JAR at .Name with contents .Body
type Metafile struct {
Name string
Body []byte
}
// IsNameValid checks whether a Metafile.Name is non-nil and begins
// with "META-INF/" functions taking Metafile args should validate
// names before reading or writing them to JARs
func (m *Metafile) IsNameValid() bool {
return m != nil && strings.HasPrefix(m.Name, "META-INF/")
}
// repackJARWithMetafiles inserts metafiles in the input JAR file and returns a JAR ZIP archive
func repackJARWithMetafiles(input []byte, metafiles []Metafile) (output []byte, err error) {
for _, f := range metafiles {
if !f.IsNameValid() {
err = errors.Errorf("Cannot pack metafile with invalid path %s", f.Name)
return
}
}
var (
rc io.ReadCloser
fwhead *zip.FileHeader
fw io.Writer
data []byte
)
inputReader := bytes.NewReader(input)
r, err := zip.NewReader(inputReader, int64(len(input)))
if err != nil {
return
}
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new zip archive.
w := zip.NewWriter(buf)
// Iterate through the files in the archive,
for _, f := range r.File {
// skip signature files, we have new ones we'll add at the end
if isJARSignatureFile(f.Name) {
continue
}
rc, err = f.Open()
if err != nil {
return
}
fwhead := &zip.FileHeader{
Name: f.Name,
Method: zip.Deflate,
}
// insert the file into the archive
fw, err = w.CreateHeader(fwhead)
if err != nil {
return
}
data, err = ioutil.ReadAll(rc)
if err != nil {
return
}
_, err = fw.Write(data)
if err != nil {
return
}
rc.Close()
}
// insert the signature files. Those will be compressed
// so we don't have to worry about their alignment
for _, meta := range metafiles {
fwhead = &zip.FileHeader{
Name: meta.Name,
Method: zip.Deflate,
}
fw, err = w.CreateHeader(fwhead)
if err != nil {
return
}
_, err = fw.Write(meta.Body)
if err != nil {
return
}
}
// Make sure to check the error on Close.
err = w.Close()
if err != nil {
return
}
output = buf.Bytes()
return
}
// repackJAR inserts the manifest, signature file and pkcs7 signature in the input JAR file,
// and return a JAR ZIP archive
func repackJAR(input, manifest, sigfile, signature []byte) (output []byte, err error) {
var metas = []Metafile{
{pkcs7ManifestPath, manifest},
{pkcs7SignatureFilePath, sigfile},
{pkcs7SigPath, signature},
}
return repackJARWithMetafiles(input, metas)
}
// The JAR format defines a number of signature files stored under the META-INF directory
// META-INF/MANIFEST.MF
// META-INF/*.SF
// META-INF/*.DSA
// META-INF/*.RSA
// META-INF/SIG-*
// and their lowercase variants
// https://docs.oracle.com/javase/8/docs/technotes/guides/jar/jar.html#Signed_JAR_File
func isJARSignatureFile(name string) bool {
if strings.HasPrefix(name, "META-INF/") {
name = strings.TrimPrefix(name, "META-INF/")
if name == "MANIFEST.MF" || name == "manifest.mf" ||
strings.HasSuffix(name, ".SF") || strings.HasSuffix(name, ".sf") ||
strings.HasSuffix(name, ".RSA") || strings.HasSuffix(name, ".rsa") ||
strings.HasSuffix(name, ".DSA") || strings.HasSuffix(name, ".dsa") ||
strings.HasPrefix(name, "SIG-") || strings.HasPrefix(name, "sig-") {
return true
}
}
return false
}
// readFileFromZIP reads a given filename out of a ZIP and returns it or an error
func readFileFromZIP(signedXPI []byte, filename string) ([]byte, error) {
zipReader := bytes.NewReader(signedXPI)
r, err := zip.NewReader(zipReader, int64(len(signedXPI)))
if err != nil {
return nil, errors.Wrap(err, "Error reading ZIP")
}
for _, f := range r.File {
if f.Name == filename {
rc, err := f.Open()
defer rc.Close()
if err != nil {
return nil, errors.Wrapf(err, "Error opening file %s in ZIP", filename)
}
data, err := ioutil.ReadAll(rc)
if err != nil {
return nil, errors.Wrapf(err, "Error reading file %s in ZIP", filename)
}
return data, nil
}
}
return nil, errors.Errorf("failed to find %s in ZIP", filename)
}
signer: in xpi signer wrap long filenames in jar manifest
package xpi
import (
"archive/zip"
"bytes"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"fmt"
"github.com/pkg/errors"
"io"
"io/ioutil"
"strings"
"unicode/utf8"
)
// consts and vars for formatFilename
const (
maxLineByteLen = 72
maxContinuedByteLen = 70 // -1 for leading space and -1 for trailing \n newline
)
var maxFirstLineByteLen = maxLineByteLen - (len([]byte("Name: ")) + 1) // + 1 for a \n newline
// formatFilename formats filename lines to satisfy:
//
// No line may be longer than 72 bytes (not characters), in its
// UTF8-encoded form. If a value would make the initial line longer
// than this, it should be continued on extra lines (each starting
// with a single SPACE).
//
// https://docs.oracle.com/javase/8/docs/technotes/guides/jar/jar.html#Signed_JAR_File
// refed from: https://source.android.com/security/apksigning/#v1
func formatFilename(filename []byte) (formatted []byte, err error) {
if !utf8.Valid(filename) {
err = errors.Errorf("apk: invalid UTF8 in filename %s", filename)
return
}
var (
filenameLen = len(filename)
writtenFileBytes = 0 // number of bytes of the filename we've written
)
if filenameLen <= maxFirstLineByteLen {
formatted = filename
return
}
formatted = append(formatted, filename[:maxFirstLineByteLen]...)
writtenFileBytes += maxFirstLineByteLen
for {
if filenameLen-writtenFileBytes <= 0 {
break
} else if filenameLen-writtenFileBytes < maxContinuedByteLen {
formatted = append(formatted, []byte("\n ")...)
formatted = append(formatted, filename[writtenFileBytes:]...)
break
} else {
formatted = append(formatted, []byte("\n ")...)
formatted = append(formatted, filename[writtenFileBytes:writtenFileBytes+maxContinuedByteLen]...)
writtenFileBytes += maxContinuedByteLen
}
}
return
}
func makePKCS7Manifest(input []byte, metafiles []Metafile) (manifest []byte, err error) {
for _, f := range metafiles {
if !f.IsNameValid() {
err = errors.Errorf("Cannot pack metafile with invalid path %s", f.Name)
return
}
}
manifest, err = makeJARManifest(input)
if err != nil {
return
}
mw := bytes.NewBuffer(manifest)
for _, f := range metafiles {
fmt.Fprintf(mw, "Name: %s\nDigest-Algorithms: SHA1 SHA256\n", f.Name)
h1 := sha1.New()
h1.Write(f.Body)
fmt.Fprintf(mw, "SHA1-Digest: %s\n", base64.StdEncoding.EncodeToString(h1.Sum(nil)))
h2 := sha256.New()
h2.Write(f.Body)
fmt.Fprintf(mw, "SHA256-Digest: %s\n\n", base64.StdEncoding.EncodeToString(h2.Sum(nil)))
}
return mw.Bytes(), err
}
// makeJARManifestAndSignatureFile writes hashes for all entries in a zip to a
// manifest file then hashes the manifest file to write a signature
// file and returns both
func makeJARManifestAndSignatureFile(input []byte) (manifest, sigfile []byte, err error) {
manifest, err = makeJARManifest(input)
if err != nil {
return
}
sigfile, err = makeJARSignatureFile(manifest)
if err != nil {
return
}
return
}
// makeJARManifest calculates a sha1 and sha256 hash for each zip entry and writes them to a manifest file
func makeJARManifest(input []byte) (manifest []byte, err error) {
inputReader := bytes.NewReader(input)
r, err := zip.NewReader(inputReader, int64(len(input)))
if err != nil {
return
}
// generate the manifest file by calculating a sha1 and sha256 hashes for each zip entry
mw := bytes.NewBuffer(manifest)
manifest = []byte(fmt.Sprintf("Manifest-Version: 1.0\n\n"))
for _, f := range r.File {
if isJARSignatureFile(f.Name) {
// reserved signature files do not get included in the manifest
continue
}
if f.FileInfo().IsDir() {
// directories do not get included
continue
}
rc, err := f.Open()
if err != nil {
return manifest, err
}
data, err := ioutil.ReadAll(rc)
if err != nil {
return manifest, err
}
filename, err := formatFilename([]byte(f.Name))
if err != nil {
return manifest, err
}
h1 := sha1.New()
h1.Write(data)
h2 := sha256.New()
h2.Write(data)
fmt.Fprintf(mw, "Name: %s\nDigest-Algorithms: SHA1 SHA256\nSHA1-Digest: %s\nSHA256-Digest: %s\n\n",
filename,
base64.StdEncoding.EncodeToString(h1.Sum(nil)),
base64.StdEncoding.EncodeToString(h2.Sum(nil)))
}
manifestBody := mw.Bytes()
manifest = append(manifest, manifestBody...)
return
}
// makeJARSignatureFile calculates a signature file by hashing the manifest with sha1 and sha256
func makeJARSignatureFile(manifest []byte) (sigfile []byte, err error) {
sw := bytes.NewBuffer(sigfile)
fmt.Fprint(sw, "Signature-Version: 1.0\n")
h1 := sha1.New()
h1.Write(manifest)
fmt.Fprintf(sw, "SHA1-Digest-Manifest: %s\n", base64.StdEncoding.EncodeToString(h1.Sum(nil)))
h2 := sha256.New()
h2.Write(manifest)
fmt.Fprintf(sw, "SHA256-Digest-Manifest: %s\n\n", base64.StdEncoding.EncodeToString(h2.Sum(nil)))
sigfile = sw.Bytes()
return
}
// Metafile is a file to pack into a JAR at .Name with contents .Body
type Metafile struct {
Name string
Body []byte
}
// IsNameValid checks whether a Metafile.Name is non-nil and begins
// with "META-INF/" functions taking Metafile args should validate
// names before reading or writing them to JARs
func (m *Metafile) IsNameValid() bool {
return m != nil && strings.HasPrefix(m.Name, "META-INF/")
}
// repackJARWithMetafiles inserts metafiles in the input JAR file and returns a JAR ZIP archive
func repackJARWithMetafiles(input []byte, metafiles []Metafile) (output []byte, err error) {
for _, f := range metafiles {
if !f.IsNameValid() {
err = errors.Errorf("Cannot pack metafile with invalid path %s", f.Name)
return
}
}
var (
rc io.ReadCloser
fwhead *zip.FileHeader
fw io.Writer
data []byte
)
inputReader := bytes.NewReader(input)
r, err := zip.NewReader(inputReader, int64(len(input)))
if err != nil {
return
}
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new zip archive.
w := zip.NewWriter(buf)
// Iterate through the files in the archive,
for _, f := range r.File {
// skip signature files, we have new ones we'll add at the end
if isJARSignatureFile(f.Name) {
continue
}
rc, err = f.Open()
if err != nil {
return
}
fwhead := &zip.FileHeader{
Name: f.Name,
Method: zip.Deflate,
}
// insert the file into the archive
fw, err = w.CreateHeader(fwhead)
if err != nil {
return
}
data, err = ioutil.ReadAll(rc)
if err != nil {
return
}
_, err = fw.Write(data)
if err != nil {
return
}
rc.Close()
}
// insert the signature files. Those will be compressed
// so we don't have to worry about their alignment
for _, meta := range metafiles {
fwhead = &zip.FileHeader{
Name: meta.Name,
Method: zip.Deflate,
}
fw, err = w.CreateHeader(fwhead)
if err != nil {
return
}
_, err = fw.Write(meta.Body)
if err != nil {
return
}
}
// Make sure to check the error on Close.
err = w.Close()
if err != nil {
return
}
output = buf.Bytes()
return
}
// repackJAR inserts the manifest, signature file and pkcs7 signature in the input JAR file,
// and return a JAR ZIP archive
func repackJAR(input, manifest, sigfile, signature []byte) (output []byte, err error) {
var metas = []Metafile{
{pkcs7ManifestPath, manifest},
{pkcs7SignatureFilePath, sigfile},
{pkcs7SigPath, signature},
}
return repackJARWithMetafiles(input, metas)
}
// The JAR format defines a number of signature files stored under the META-INF directory
// META-INF/MANIFEST.MF
// META-INF/*.SF
// META-INF/*.DSA
// META-INF/*.RSA
// META-INF/SIG-*
// and their lowercase variants
// https://docs.oracle.com/javase/8/docs/technotes/guides/jar/jar.html#Signed_JAR_File
func isJARSignatureFile(name string) bool {
if strings.HasPrefix(name, "META-INF/") {
name = strings.TrimPrefix(name, "META-INF/")
if name == "MANIFEST.MF" || name == "manifest.mf" ||
strings.HasSuffix(name, ".SF") || strings.HasSuffix(name, ".sf") ||
strings.HasSuffix(name, ".RSA") || strings.HasSuffix(name, ".rsa") ||
strings.HasSuffix(name, ".DSA") || strings.HasSuffix(name, ".dsa") ||
strings.HasPrefix(name, "SIG-") || strings.HasPrefix(name, "sig-") {
return true
}
}
return false
}
// readFileFromZIP reads a given filename out of a ZIP and returns it or an error
func readFileFromZIP(signedXPI []byte, filename string) ([]byte, error) {
zipReader := bytes.NewReader(signedXPI)
r, err := zip.NewReader(zipReader, int64(len(signedXPI)))
if err != nil {
return nil, errors.Wrap(err, "Error reading ZIP")
}
for _, f := range r.File {
if f.Name == filename {
rc, err := f.Open()
defer rc.Close()
if err != nil {
return nil, errors.Wrapf(err, "Error opening file %s in ZIP", filename)
}
data, err := ioutil.ReadAll(rc)
if err != nil {
return nil, errors.Wrapf(err, "Error reading file %s in ZIP", filename)
}
return data, nil
}
}
return nil, errors.Errorf("failed to find %s in ZIP", filename)
}
|
package plugins
import (
"bytes"
"fmt"
"log"
"regexp"
"time"
"unicode"
)
type AutobanMatches struct {
Matcher *regexp.Regexp
Time string `json:"time"`
Regex string `json:"match"`
Strip []string `json:"strip"`
Reason string `json:"reason"`
}
type Autoban struct {
plugin
autobanStats
autoBans []AutobanMatches
user string
}
type autobanStats struct {
prior []*IRCMessage
}
type AutobanConf struct {
Matches []AutobanMatches `json:"matchers"`
}
func (plug *Autoban) Setup(write chan IRCMessage, conf PluginConf) {
plug.user = conf.UserName
plug.write = write
plug.event = make(chan IRCMessage, 1000)
var buffer bytes.Buffer
buffer.WriteString(`(?i:`)
// Because range copies the elements, which is totally idiotic
matches := conf.Autoban.Matches
for i := 0; i < len(matches); i++ {
buffer.WriteString(matches[i].Regex)
if (i != len(matches)-1) {
buffer.WriteString(`|`)
}
matches[i].Matcher = regexp.MustCompile(matches[i].Regex)
}
buffer.WriteString(`)`)
plug.match = regexp.MustCompile(buffer.String())
plug.autoBans = conf.Autoban.Matches
go plug.Action()
return
}
func removeNongraphic(msg string) string {
var buffer bytes.Buffer
for _, char := range msg {
if unicode.IsGraphic(char) && !unicode.IsSpace(char) {
buffer.WriteRune(char)
}
}
return buffer.String()
}
func removeColors(msg string) string {
return regexp.MustCompile("\x03.").ReplaceAllLiteralString(msg, "")
}
func (plug Autoban) computeReasonAndTime(msg *IRCMessage, match bool, spam bool) (notify string, chanserv string) {
time := ""
reason := ":("
if spam {
time = "5"
reason = "spam is bad and you should feel bad"
notify = fmt.Sprintf(
"Banning user `%s` with `%s` from `%s` for spam at `%s`",
msg.User,
msg.Mask,
msg.Channel,
msg.When)
chanserv = fmt.Sprintf(
"akick %s add *!*@%s !T %s %s | Laala b& '%s' for spam",
msg.Channel,
msg.Mask,
time,
reason,
msg.User)
}
if match {
for index, matcher := range plug.autoBans {
cleaned := matcher.doCleanup(msg.Msg)
if matcher.Matcher.MatchString(cleaned) {
time = plug.autoBans[index].Time
reason = plug.autoBans[index].Reason
}
}
notify = fmt.Sprintf(
"Banning user `%s` with `%s` from `%s` for `%s` at `%s`",
msg.User,
msg.Mask,
msg.Channel,
msg.Msg,
msg.When)
chanserv = fmt.Sprintf(
"akick %s add *!*@%s !T %s %s | Laala b& '%s' for '%s'",
msg.Channel,
msg.Mask,
time,
reason,
msg.User,
msg.Msg)
}
return
}
func (plug Autoban) Ban(msg *IRCMessage, match bool, spam bool) {
logMsg, banMsg := plug.computeReasonAndTime(msg, match, spam)
log.Println(logMsg)
plug.write <- IRCMessage{
Channel: "Rodya",
Msg: logMsg,
User: msg.User,
When: msg.When,
Unlimited: true,
}
if len(msg.Mask) < 3 {
log.Println("msg.Mask too short to ban! %s", msg.Mask)
return
}
log.Println(banMsg)
plug.write <- IRCMessage{
Channel: "ChanServ",
Msg: banMsg,
User: msg.User,
When: msg.When,
Unlimited: true,
}
}
func (plug Autoban) Action() {
for msg := range plug.event {
go plug.Ban(&msg, true, false)
}
}
func (matcher AutobanMatches) doCleanup(msg string) string {
cleaned := msg
for _, stripper := range matcher.Strip {
switch stripper {
case "colors":
cleaned = removeColors(cleaned)
case "nongraphic":
cleaned = removeNongraphic(cleaned)
}
}
return cleaned
}
func computeStats(msgs []*IRCMessage, last *IRCMessage) bool {
count := 0
time := time.Now().Add(time.Second * -5)
for _, msg := range msgs {
if msg.User == last.User && msg.When.After(time) {
count += 1
}
}
log.Println(last.User, count)
if count >= 5 {
return true
}
return false
}
func (plug *Autoban) Match(msg *IRCMessage) bool {
if msg.Channel == plug.user {
return false
}
plug.prior = append(plug.prior, msg)
if len(plug.prior) > 110 {
plug.prior = plug.prior[len(plug.prior)-100 : len(plug.prior)]
}
ban := computeStats(plug.prior, msg)
if ban {
plug.Ban(msg, false, true)
return false
}
var cleaned string
matched := false
for _, matcher := range plug.autoBans {
cleaned = matcher.doCleanup(msg.Msg)
matched = plug.match.MatchString(cleaned)
if matched {
break
}
}
if matched {
plug.event <- *msg
}
return matched
}
func (plug Autoban) Event() chan IRCMessage {
return plug.event
}
autoban: customizable notification user
package plugins
import (
"bytes"
"fmt"
"log"
"regexp"
"time"
"unicode"
)
type AutobanMatches struct {
Matcher *regexp.Regexp
Time string `json:"time"`
Regex string `json:"match"`
Strip []string `json:"strip"`
Reason string `json:"reason"`
}
type Autoban struct {
plugin
autobanStats
autoBans []AutobanMatches
user string
self string
}
type autobanStats struct {
prior []*IRCMessage
}
type AutobanConf struct {
Matches []AutobanMatches `json:"matchers"`
User string `json:"notify_user"`
}
func (plug *Autoban) Setup(write chan IRCMessage, conf PluginConf) {
plug.user = conf.Autoban.User
plug.self = conf.UserName
plug.write = write
plug.event = make(chan IRCMessage, 1000)
var buffer bytes.Buffer
buffer.WriteString(`(?i:`)
// Because range copies the elements, which is totally idiotic
matches := conf.Autoban.Matches
for i := 0; i < len(matches); i++ {
buffer.WriteString(matches[i].Regex)
if i != len(matches)-1 {
buffer.WriteString(`|`)
}
matches[i].Matcher = regexp.MustCompile(matches[i].Regex)
}
buffer.WriteString(`)`)
plug.match = regexp.MustCompile(buffer.String())
plug.autoBans = conf.Autoban.Matches
go plug.Action()
return
}
func removeNongraphic(msg string) string {
var buffer bytes.Buffer
for _, char := range msg {
if unicode.IsGraphic(char) && !unicode.IsSpace(char) {
buffer.WriteRune(char)
}
}
return buffer.String()
}
func removeColors(msg string) string {
return regexp.MustCompile("\x03.").ReplaceAllLiteralString(msg, "")
}
func (plug Autoban) computeReasonAndTime(msg *IRCMessage, match bool, spam bool) (notify string, chanserv string) {
time := ""
reason := ":("
if spam {
time = "5"
reason = "spam is bad and you should feel bad"
notify = fmt.Sprintf(
"Banning user `%s` with `%s` from `%s` for spam at `%s`",
msg.User,
msg.Mask,
msg.Channel,
msg.When)
chanserv = fmt.Sprintf(
"akick %s add *!*@%s !T %s %s | Laala b& '%s' for spam",
msg.Channel,
msg.Mask,
time,
reason,
msg.User)
}
if match {
for index, matcher := range plug.autoBans {
cleaned := matcher.doCleanup(msg.Msg)
if matcher.Matcher.MatchString(cleaned) {
time = plug.autoBans[index].Time
reason = plug.autoBans[index].Reason
}
}
notify = fmt.Sprintf(
"Banning user `%s` with `%s` from `%s` for `%s` at `%s`",
msg.User,
msg.Mask,
msg.Channel,
msg.Msg,
msg.When)
chanserv = fmt.Sprintf(
"akick %s add *!*@%s !T %s %s | Laala b& '%s' for '%s'",
msg.Channel,
msg.Mask,
time,
reason,
msg.User,
msg.Msg)
}
return
}
func (plug Autoban) Ban(msg *IRCMessage, match bool, spam bool) {
logMsg, banMsg := plug.computeReasonAndTime(msg, match, spam)
log.Println(logMsg)
if len(plug.user) > 0 {
plug.write <- IRCMessage{
Channel: plug.user,
Msg: logMsg,
User: msg.User,
When: msg.When,
Unlimited: true,
}
}
if len(msg.Mask) < 3 {
log.Println("msg.Mask too short to ban! %s", msg.Mask)
return
}
log.Println(banMsg)
plug.write <- IRCMessage{
Channel: "ChanServ",
Msg: banMsg,
User: msg.User,
When: msg.When,
Unlimited: true,
}
}
func (plug Autoban) Action() {
for msg := range plug.event {
go plug.Ban(&msg, true, false)
}
}
func (matcher AutobanMatches) doCleanup(msg string) string {
cleaned := msg
for _, stripper := range matcher.Strip {
switch stripper {
case "colors":
cleaned = removeColors(cleaned)
case "nongraphic":
cleaned = removeNongraphic(cleaned)
}
}
return cleaned
}
func computeStats(msgs []*IRCMessage, last *IRCMessage) bool {
count := 0
time := time.Now().Add(time.Second * -5)
for _, msg := range msgs {
if msg.User == last.User && msg.When.After(time) {
count += 1
}
}
log.Println(last.User, count)
if count >= 5 {
return true
}
return false
}
func (plug *Autoban) Match(msg *IRCMessage) bool {
if msg.Channel == plug.self {
return false
}
plug.prior = append(plug.prior, msg)
if len(plug.prior) > 110 {
plug.prior = plug.prior[len(plug.prior)-100 : len(plug.prior)]
}
ban := computeStats(plug.prior, msg)
if ban {
plug.Ban(msg, false, true)
return false
}
var cleaned string
matched := false
for _, matcher := range plug.autoBans {
cleaned = matcher.doCleanup(msg.Msg)
matched = plug.match.MatchString(cleaned)
if matched {
break
}
}
if matched {
plug.event <- *msg
}
return matched
}
func (plug Autoban) Event() chan IRCMessage {
return plug.event
}
|
package plugins
import (
"os"
"ircclient"
"time"
"container/list"
"json"
"sync"
"fmt"
"log"
)
const notifyBefore = 12 // TODO: config
type LecturePlugin struct {
ic *ircclient.IRCClient
notifications *list.List
done chan bool
update chan bool
lock sync.Mutex
}
type notification struct {
when int64
entry configEntry
}
type configEntry struct {
Name string // AuD
Time string // Mon 13:15
Channel string // #faui2k11
LongName string // Algorithmen und Datenstrukturen
Academic string // Brinda
Venue string // H11
}
// Gets the _next_ time "date" matches, in seconds.
func nextAt(date string) (int64, os.Error) {
timertime, err := time.Parse("Mon 15:04", date)
if err != nil {
return 0, err
}
curtime := time.LocalTime()
// XXX - This is still quite ugly. Any ideas
// on how to improve it?
weekday := timertime.Weekday
save1, save2, save3 := timertime.Hour, timertime.Minute, timertime.Second
*timertime = *curtime
timertime.Hour, timertime.Minute, timertime.Second = save1, save2, save3
for timertime.Weekday != weekday || timertime.Seconds()-curtime.Seconds() <= notifyBefore {
timertime = time.SecondsToLocalTime(timertime.Seconds() + (24 * 60 * 60))
}
return timertime.Seconds(), nil
}
// Fills the list of notifications with all lectures and
// the timestamp they take place
// Returns: Time for next lecture, -1 if no lecture is registered
func (l *LecturePlugin) fillNotificationList() int64 {
var retval int64 = -1
l.notifications = list.New()
l.lock.Lock()
defer l.lock.Unlock()
options := l.ic.GetOptions("Lectures")
for _, key := range options {
value := l.ic.GetStringOption("Lectures", key)
var lecture configEntry
if err := json.Unmarshal([]byte(value), &lecture); err != nil {
// panics should only happen during initialization, during runtime,
// all config entries are checked before insertion.
panic("LecturePlugin: Invalid JSON for key " + key + " : " + err.String())
}
time, err := nextAt(lecture.Time)
if err != nil {
log.Printf("Unable to parse time value for lecture %s: %s\n", lecture.Name, err.String())
continue
}
notifyTime := time - notifyBefore
l.notifications.PushFront(notification{notifyTime, lecture})
if notifyTime < retval || retval == -1 {
retval = notifyTime
}
}
return retval
}
func (l *LecturePlugin) sendNotifications() {
for {
// TODO: container/heap and selective re-adding or so.
// However, this should work for now...
nextNotification := l.fillNotificationList()
var timerChan <-chan int64
// If nextNotification is less than zero, just wait indefinitely on this chan
if nextNotification < 0 {
timerChan = make(chan int64)
} else {
timerChan = time.After((nextNotification - time.Seconds()) * 1e9)
}
select {
case <-l.done:
return
case <-timerChan:
case <-l.update:
// Send notifications and refresh timer
}
l.lock.Lock()
for e := l.notifications.Front(); e != nil; e = e.Next() {
notify := e.Value.(notification)
entry := notify.entry
log.Println("Trying")
if notify.when <= time.Seconds() {
l.ic.SendLine("PRIVMSG " + entry.Channel + " :inb4 (" + entry.Time + "): \"" + entry.LongName + "\" (" + entry.Name + ") bei " + entry.Academic + ", Ort: " + entry.Venue)
}
}
l.lock.Unlock()
}
}
func (l *LecturePlugin) Register(cl *ircclient.IRCClient) {
l.ic = cl
l.done = make(chan bool)
l.update = make(chan bool)
go l.sendNotifications()
cl.RegisterCommandHandler("addlecture", 0, 300, l)
// TODO: dellecture
}
func (l *LecturePlugin) String() string {
return "lecture"
}
func (l *LecturePlugin) Info() string {
return "lecture notifications"
}
func (l *LecturePlugin) ProcessLine(msg *ircclient.IRCMessage) {
}
func (l *LecturePlugin) ProcessCommand(cmd *ircclient.IRCCommand) {
switch cmd.Command {
case "reglecture":
if len(cmd.Args) != 6 {
l.ic.Reply(cmd, "reglecture takes exactly 6 arguments:")
l.ic.Reply(cmd, "Syntax: reglecture NAME TIME CHANNEL LONGNAME ACADEMIC VENUE")
l.ic.Reply(cmd, "Example: reglecture AuD \"Mon 13:15\" #faui2k11 \"Algorithmen und Datenstrukturen\" Brinda H11")
return
}
lecture := configEntry{cmd.Args[0], cmd.Args[1], cmd.Args[2], cmd.Args[3], cmd.Args[4], cmd.Args[5]}
_, err := time.Parse("Mon 15:04", lecture.Time)
if err != nil {
l.ic.Reply(cmd, "Invalid date specified: "+err.String())
return
}
jlecture, _ := json.Marshal(lecture)
l.ic.SetStringOption("Lectures", fmt.Sprintf("%d", time.Seconds()), string(jlecture))
l.ic.Reply(cmd, "Lecture added.")
l.fillNotificationList()
l.update <- true
case "dellecture":
// TODO
}
}
func (l *LecturePlugin) Unregister() {
l.done <- true
}
Fix typo in lecture.go, set sane constants
package plugins
import (
"os"
"ircclient"
"time"
"container/list"
"json"
"sync"
"fmt"
"log"
)
const notifyBefore = 600 // TODO: config
type LecturePlugin struct {
ic *ircclient.IRCClient
notifications *list.List
done chan bool
update chan bool
lock sync.Mutex
}
type notification struct {
when int64
entry configEntry
}
type configEntry struct {
Name string // AuD
Time string // Mon 13:15
Channel string // #faui2k11
LongName string // Algorithmen und Datenstrukturen
Academic string // Brinda
Venue string // H11
}
// Gets the _next_ time "date" matches, in seconds.
func nextAt(date string) (int64, os.Error) {
timertime, err := time.Parse("Mon 15:04", date)
if err != nil {
return 0, err
}
curtime := time.LocalTime()
// XXX - This is still quite ugly. Any ideas
// on how to improve it?
weekday := timertime.Weekday
save1, save2, save3 := timertime.Hour, timertime.Minute, timertime.Second
*timertime = *curtime
timertime.Hour, timertime.Minute, timertime.Second = save1, save2, save3
for timertime.Weekday != weekday || timertime.Seconds()-curtime.Seconds() <= notifyBefore {
timertime = time.SecondsToLocalTime(timertime.Seconds() + (24 * 60 * 60))
}
return timertime.Seconds(), nil
}
// Fills the list of notifications with all lectures and
// the timestamp they take place
// Returns: Time for next lecture, -1 if no lecture is registered
func (l *LecturePlugin) fillNotificationList() int64 {
var retval int64 = -1
l.notifications = list.New()
l.lock.Lock()
defer l.lock.Unlock()
options := l.ic.GetOptions("Lectures")
for _, key := range options {
value := l.ic.GetStringOption("Lectures", key)
var lecture configEntry
if err := json.Unmarshal([]byte(value), &lecture); err != nil {
// panics should only happen during initialization, during runtime,
// all config entries are checked before insertion.
panic("LecturePlugin: Invalid JSON for key " + key + " : " + err.String())
}
time, err := nextAt(lecture.Time)
if err != nil {
log.Printf("Unable to parse time value for lecture %s: %s\n", lecture.Name, err.String())
continue
}
notifyTime := time - notifyBefore
l.notifications.PushFront(notification{notifyTime, lecture})
if notifyTime < retval || retval == -1 {
retval = notifyTime
}
}
return retval
}
func (l *LecturePlugin) sendNotifications() {
for {
// TODO: container/heap and selective re-adding or so.
// However, this should work for now...
nextNotification := l.fillNotificationList()
var timerChan <-chan int64
// If nextNotification is less than zero, just wait indefinitely on this chan
if nextNotification < 0 {
timerChan = make(chan int64)
} else {
timerChan = time.After((nextNotification - time.Seconds()) * 1e9)
}
select {
case <-l.done:
return
case <-timerChan:
case <-l.update:
// Send notifications and refresh timer
}
l.lock.Lock()
for e := l.notifications.Front(); e != nil; e = e.Next() {
notify := e.Value.(notification)
entry := notify.entry
if notify.when <= time.Seconds() {
l.ic.SendLine("PRIVMSG " + entry.Channel + " :inb4 (" + entry.Time + "): \"" + entry.LongName + "\" (" + entry.Name + ") bei " + entry.Academic + ", Ort: " + entry.Venue)
}
}
l.lock.Unlock()
}
}
func (l *LecturePlugin) Register(cl *ircclient.IRCClient) {
l.ic = cl
l.done = make(chan bool)
l.update = make(chan bool)
go l.sendNotifications()
cl.RegisterCommandHandler("reglecture", 0, 300, l)
// TODO: dellecture
}
func (l *LecturePlugin) String() string {
return "lecture"
}
func (l *LecturePlugin) Info() string {
return "lecture notifications"
}
func (l *LecturePlugin) ProcessLine(msg *ircclient.IRCMessage) {
}
func (l *LecturePlugin) ProcessCommand(cmd *ircclient.IRCCommand) {
switch cmd.Command {
case "reglecture":
if len(cmd.Args) != 6 {
l.ic.Reply(cmd, "reglecture takes exactly 6 arguments:")
l.ic.Reply(cmd, "Syntax: reglecture NAME TIME CHANNEL LONGNAME ACADEMIC VENUE")
l.ic.Reply(cmd, "Example: reglecture AuD \"Mon 13:15\" #faui2k11 \"Algorithmen und Datenstrukturen\" Brinda H11")
return
}
lecture := configEntry{cmd.Args[0], cmd.Args[1], cmd.Args[2], cmd.Args[3], cmd.Args[4], cmd.Args[5]}
_, err := time.Parse("Mon 15:04", lecture.Time)
if err != nil {
l.ic.Reply(cmd, "Invalid date specified: "+err.String())
return
}
jlecture, _ := json.Marshal(lecture)
l.ic.SetStringOption("Lectures", fmt.Sprintf("%d", time.Seconds()), string(jlecture))
l.ic.Reply(cmd, "Lecture added.")
l.fillNotificationList()
l.update <- true
case "dellecture":
// TODO
}
}
func (l *LecturePlugin) Unregister() {
l.done <- true
}
|
package plugins
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
)
// List maps a Buffalo command to a slice of Command
type List map[string]Commands
// Available plugins for the `buffalo` command.
// It will look in $PATH and the `./plugins` directory.
//
// Requirements:
// * file/command must be executable
// * file/command must start with `buffalo-`
// * file/command must respond to `available` and return JSON of
// plugins.Commands{}
//
// Limit full path scan with direct plugin path
func Available() (List, error) {
list := List{}
paths := []string{"plugins"}
paths = append(paths, strings.Split(os.Getenv("BUFFALO_PLUGIN_PATH"), ";")...)
for _, p := range paths {
if ignorePath(p) {
continue
}
if _, err := os.Stat(p); err != nil {
continue
}
err := filepath.Walk(p, func(path string, info os.FileInfo, err error) error {
if err != nil {
// May indicate a permissions problem with the path, skip it
return nil
}
if info.IsDir() {
return nil
}
base := filepath.Base(path)
if strings.HasPrefix(base, "buffalo-") {
commands := askBin(path)
for _, c := range commands {
bc := c.BuffaloCommand
if _, ok := list[bc]; !ok {
list[bc] = Commands{}
}
c.Binary = path
list[bc] = append(list[bc], c)
}
}
return nil
})
if err != nil {
return nil, errors.WithStack(err)
}
}
return list, nil
}
func askBin(path string) Commands {
commands := Commands{}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, path, "available")
bb := &bytes.Buffer{}
cmd.Stdout = bb
cmd.Stderr = bb
err := cmd.Run()
if err != nil {
fmt.Printf("[PLUGIN] error loading plugin %s: %s\n%s\n", path, err, bb.String())
return commands
}
err = json.NewDecoder(bb).Decode(&commands)
if err != nil {
fmt.Printf("[PLUGIN] error loading plugin %s: %s\n", path, err)
return commands
}
return commands
}
func ignorePath(p string) bool {
p = strings.ToLower(p)
for _, x := range []string{`c:\windows`, `c:\program`} {
if strings.HasPrefix(p, x) {
return true
}
}
return false
}
fixed breakable change to how plugins are loaded
package plugins
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/gobuffalo/envy"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// List maps a Buffalo command to a slice of Command
type List map[string]Commands
// Available plugins for the `buffalo` command.
// It will look in $PATH and the `./plugins` directory.
//
// Requirements:
// * file/command must be executable
// * file/command must start with `buffalo-`
// * file/command must respond to `available` and return JSON of
// plugins.Commands{}
//
// Limit full path scan with direct plugin path
func Available() (List, error) {
list := List{}
paths := []string{"plugins"}
from, err := envy.MustGet("BUFFALO_PLUGIN_PATH")
if err != nil {
logrus.Warn(warningMessage)
from = envy.Get("PATH", "")
}
if runtime.GOOS == "windows" {
paths = append(paths, strings.Split(from, ";")...)
} else {
paths = append(paths, strings.Split(from, ":")...)
}
for _, p := range paths {
if ignorePath(p) {
continue
}
if _, err := os.Stat(p); err != nil {
continue
}
err := filepath.Walk(p, func(path string, info os.FileInfo, err error) error {
if err != nil {
// May indicate a permissions problem with the path, skip it
return nil
}
if info.IsDir() {
return nil
}
base := filepath.Base(path)
if strings.HasPrefix(base, "buffalo-") {
commands := askBin(path)
for _, c := range commands {
bc := c.BuffaloCommand
if _, ok := list[bc]; !ok {
list[bc] = Commands{}
}
c.Binary = path
list[bc] = append(list[bc], c)
}
}
return nil
})
if err != nil {
return nil, errors.WithStack(err)
}
}
return list, nil
}
func askBin(path string) Commands {
commands := Commands{}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, path, "available")
bb := &bytes.Buffer{}
cmd.Stdout = bb
cmd.Stderr = bb
err := cmd.Run()
if err != nil {
fmt.Printf("[PLUGIN] error loading plugin %s: %s\n%s\n", path, err, bb.String())
return commands
}
err = json.NewDecoder(bb).Decode(&commands)
if err != nil {
fmt.Printf("[PLUGIN] error loading plugin %s: %s\n", path, err)
return commands
}
return commands
}
func ignorePath(p string) bool {
p = strings.ToLower(p)
for _, x := range []string{`c:\windows`, `c:\program`} {
if strings.HasPrefix(p, x) {
return true
}
}
return false
}
const warningMessage = `Could not find BUFFALO_PLUGIN_PATH environment variable, default to PATH instead.
Consider setting the BUFFALO_PLUGIN_PATH variable to speed up loading of plugins and/or to set a custom path for locating them.`
|
package main
import (
"github.com/jmcvetta/neoism"
"github.com/rcrowley/go-metrics"
"log"
"time"
)
type CypherRunner interface {
CypherBatch(queries []*neoism.CypherQuery) error
}
func NewBatchCypherRunner(cypherRunner CypherRunner, count int, duration time.Duration) CypherRunner {
cr := BatchCypherRunner{cypherRunner, make(chan cypherQueryBatch), count, duration}
go cr.batcher()
return &cr
}
type BatchCypherRunner struct {
cr CypherRunner
ch chan cypherQueryBatch
count int
duration time.Duration
}
func (bcr *BatchCypherRunner) CypherBatch(queries []*neoism.CypherQuery) error {
errCh := make(chan error)
bcr.ch <- cypherQueryBatch{queries, errCh}
return <-errCh
}
type cypherQueryBatch struct {
queries []*neoism.CypherQuery
err chan error
}
func (bcr *BatchCypherRunner) batcher() {
g := metrics.GetOrRegisterGauge("batchQueueSize", metrics.DefaultRegistry)
b := metrics.GetOrRegisterMeter("batchThroughput", metrics.DefaultRegistry)
var currentQueries []*neoism.CypherQuery
var currentErrorChannels []chan error
timer := time.NewTimer(1 * time.Second)
for {
select {
case cb := <-bcr.ch:
for _, query := range cb.queries {
currentQueries = append(currentQueries, query)
g.Update(int64(len(currentQueries)))
}
currentErrorChannels = append(currentErrorChannels, cb.err)
if len(currentQueries) < bcr.count {
timer.Reset(bcr.duration)
continue
}
case <-timer.C:
//do nothing
}
if len(currentQueries) > 0 {
t := metrics.GetOrRegisterTimer("execute-neo4j-batch", metrics.DefaultRegistry)
var err error
t.Time(func() {
err = bcr.cr.CypherBatch(currentQueries)
})
if err != nil {
log.Println("ERROR Got error running batch, error=%v", err)
}
for _, cec := range currentErrorChannels {
cec <- err
}
b.Mark(int64(len(currentQueries)))
g.Update(0)
currentQueries = currentQueries[0:0] // clears the slice
currentErrorChannels = currentErrorChannels[0:0]
}
}
}
fix log formatting.
package main
import (
"github.com/jmcvetta/neoism"
"github.com/rcrowley/go-metrics"
"log"
"time"
)
type CypherRunner interface {
CypherBatch(queries []*neoism.CypherQuery) error
}
func NewBatchCypherRunner(cypherRunner CypherRunner, count int, duration time.Duration) CypherRunner {
cr := BatchCypherRunner{cypherRunner, make(chan cypherQueryBatch), count, duration}
go cr.batcher()
return &cr
}
type BatchCypherRunner struct {
cr CypherRunner
ch chan cypherQueryBatch
count int
duration time.Duration
}
func (bcr *BatchCypherRunner) CypherBatch(queries []*neoism.CypherQuery) error {
errCh := make(chan error)
bcr.ch <- cypherQueryBatch{queries, errCh}
return <-errCh
}
type cypherQueryBatch struct {
queries []*neoism.CypherQuery
err chan error
}
func (bcr *BatchCypherRunner) batcher() {
g := metrics.GetOrRegisterGauge("batchQueueSize", metrics.DefaultRegistry)
b := metrics.GetOrRegisterMeter("batchThroughput", metrics.DefaultRegistry)
var currentQueries []*neoism.CypherQuery
var currentErrorChannels []chan error
timer := time.NewTimer(1 * time.Second)
for {
select {
case cb := <-bcr.ch:
for _, query := range cb.queries {
currentQueries = append(currentQueries, query)
g.Update(int64(len(currentQueries)))
}
currentErrorChannels = append(currentErrorChannels, cb.err)
if len(currentQueries) < bcr.count {
timer.Reset(bcr.duration)
continue
}
case <-timer.C:
//do nothing
}
if len(currentQueries) > 0 {
t := metrics.GetOrRegisterTimer("execute-neo4j-batch", metrics.DefaultRegistry)
var err error
t.Time(func() {
err = bcr.cr.CypherBatch(currentQueries)
})
if err != nil {
log.Printf("ERROR Got error running batch, error=%v\n", err)
}
for _, cec := range currentErrorChannels {
cec <- err
}
b.Mark(int64(len(currentQueries)))
g.Update(0)
currentQueries = currentQueries[0:0] // clears the slice
currentErrorChannels = currentErrorChannels[0:0]
}
}
}
|
package main
import (
"net/http"
"github.com/spring1843/chat-server/src/chat"
"github.com/spring1843/chat-server/src/config"
"github.com/spring1843/chat-server/src/drivers/rest"
"github.com/spring1843/chat-server/src/drivers/telnet"
"github.com/spring1843/chat-server/src/drivers/websocket"
"github.com/spring1843/chat-server/src/shared/logs"
)
const staticWebContentDir = "../bin/web"
func bootstrap(config config.Config) {
chatServer := chat.NewServer()
chatServer.Listen()
logs.Info("Chat Server started")
logs.FatalIfErrf(startTelnet(config, chatServer), "Could not start telnet server.")
restHandler := rest.GetHandler(chatServer)
websocket.SetWebSocket(chatServer)
fs := http.FileServer(http.Dir(staticWebContentDir))
http.Handle("/api/", restHandler)
http.HandleFunc("/ws", websocket.Handler)
http.Handle("/", fs)
go func() {
logs.Infof("Serving static files, Rest, WebSocket on http:/%s/", config.WebAddress)
logs.FatalIfErrf(http.ListenAndServeTLS(config.WebAddress, "tls.crt", "tls.key", nil), "Could not start Rest server.")
}()
}
func startTelnet(config config.Config, chatServer *chat.Server) error {
err := telnet.Start(chatServer, config)
if err != nil {
return err
}
logs.Info("Telnet server started")
return nil
}
If config address is empty for telnet or web, do not serve
package main
import (
"net/http"
"github.com/spring1843/chat-server/src/chat"
"github.com/spring1843/chat-server/src/config"
"github.com/spring1843/chat-server/src/drivers/rest"
"github.com/spring1843/chat-server/src/drivers/telnet"
"github.com/spring1843/chat-server/src/drivers/websocket"
"github.com/spring1843/chat-server/src/shared/logs"
)
const staticWebContentDir = "../bin/web"
func bootstrap(config config.Config) {
chatServer := chat.NewServer()
chatServer.Listen()
logs.Info("Chat Server started")
if config.TelnetAddress != "" {
logs.FatalIfErrf(startTelnet(config, chatServer), "Could not start telnet server.")
} else {
logs.Warnf("TelnetAddress is empty, not running Telnet Driver")
}
if config.WebAddress != "" {
logs.FatalIfErrf(startWeb(config, chatServer), "Could not start web server.")
} else {
logs.Warnf("WebAddress is empty, not running Web Drivers")
}
}
func startTelnet(config config.Config, chatServer *chat.Server) error {
err := telnet.Start(chatServer, config)
if err != nil {
return err
}
logs.Info("Telnet server started")
return nil
}
func startWeb(config config.Config, chatServer *chat.Server) error {
restHandler := rest.GetHandler(chatServer)
websocket.SetWebSocket(chatServer)
fs := http.FileServer(http.Dir(staticWebContentDir))
http.Handle("/api/", restHandler)
http.HandleFunc("/ws", websocket.Handler)
http.Handle("/", fs)
go func() {
logs.Infof("Serving static files, Rest, WebSocket on http:/%s/", config.WebAddress)
logs.FatalIfErrf(http.ListenAndServeTLS(config.WebAddress, "tls.crt", "tls.key", nil), "Could not start Rest server.")
}()
return nil
}
|
package outputreport
import (
"context"
"fmt"
"time"
"github.com/tsaikd/gogstash/config"
"github.com/tsaikd/gogstash/config/logevent"
)
// ModuleName is the name used in config file
const ModuleName = "report"
// OutputConfig holds the configuration json fields and internal objects
type OutputConfig struct {
config.OutputConfig
Interval int `json:"interval,omitempty"`
TimeFormat string `json:"time_format,omitempty"`
ReportPrefix string `json:"report_prefix,omitempty"`
ProcessCount int `json:"-"`
}
// DefaultOutputConfig returns an OutputConfig struct with default values
func DefaultOutputConfig() OutputConfig {
return OutputConfig{
OutputConfig: config.OutputConfig{
CommonConfig: config.CommonConfig{
Type: ModuleName,
},
},
Interval: 5,
TimeFormat: "[2/Jan/2006:15:04:05 -0700]",
}
}
// InitHandler initialize the output plugin
func InitHandler(ctx context.Context, raw *config.ConfigRaw) (config.TypeOutputConfig, error) {
conf := DefaultOutputConfig()
if err := config.ReflectConfig(raw, &conf); err != nil {
return nil, err
}
go conf.reportLoop(ctx)
return &conf, nil
}
// Output event
func (t *OutputConfig) Output(ctx context.Context, event logevent.LogEvent) (err error) {
t.ProcessCount++
return
}
func (t *OutputConfig) reportLoop(ctx context.Context) (err error) {
startChan := make(chan bool, 1) // startup tick
ticker := time.NewTicker(time.Duration(t.Interval) * time.Second)
defer ticker.Stop()
startChan <- true
for {
select {
case <-ctx.Done():
return nil
case <-startChan:
t.report()
case <-ticker.C:
t.report()
}
}
}
func (t *OutputConfig) report() {
if t.ProcessCount > 0 {
fmt.Printf(
"%s %sProcess %d events\n",
time.Now().Format(t.TimeFormat),
t.ReportPrefix,
t.ProcessCount,
)
t.ProcessCount = 0
}
return
}
output/report: fix for racing condition
package outputreport
import (
"context"
"fmt"
"sync/atomic"
"time"
"github.com/tsaikd/gogstash/config"
"github.com/tsaikd/gogstash/config/logevent"
)
// ModuleName is the name used in config file
const ModuleName = "report"
// OutputConfig holds the configuration json fields and internal objects
type OutputConfig struct {
config.OutputConfig
Interval int `json:"interval,omitempty"`
TimeFormat string `json:"time_format,omitempty"`
ReportPrefix string `json:"report_prefix,omitempty"`
ProcessCount int64 `json:"-"`
}
// DefaultOutputConfig returns an OutputConfig struct with default values
func DefaultOutputConfig() OutputConfig {
return OutputConfig{
OutputConfig: config.OutputConfig{
CommonConfig: config.CommonConfig{
Type: ModuleName,
},
},
Interval: 5,
TimeFormat: "[2/Jan/2006:15:04:05 -0700]",
}
}
// InitHandler initialize the output plugin
func InitHandler(ctx context.Context, raw *config.ConfigRaw) (config.TypeOutputConfig, error) {
conf := DefaultOutputConfig()
if err := config.ReflectConfig(raw, &conf); err != nil {
return nil, err
}
go conf.reportLoop(ctx)
return &conf, nil
}
// Output event
func (t *OutputConfig) Output(ctx context.Context, event logevent.LogEvent) (err error) {
atomic.AddInt64(&t.ProcessCount, 1)
return
}
func (t *OutputConfig) reportLoop(ctx context.Context) {
startChan := make(chan bool, 1) // startup tick
ticker := time.NewTicker(time.Duration(t.Interval) * time.Second)
defer ticker.Stop()
startChan <- true
for {
select {
case <-ctx.Done():
return
case <-startChan:
t.report()
case <-ticker.C:
t.report()
}
}
}
func (t *OutputConfig) report() {
count := atomic.LoadInt64(&t.ProcessCount)
if count < 1 {
return
}
fmt.Printf(
"%s %sProcess %d events\n",
time.Now().Format(t.TimeFormat),
t.ReportPrefix,
count,
)
atomic.StoreInt64(&t.ProcessCount, 0)
}
|
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package drive
import (
"errors"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/cheggaaa/pb"
"github.com/mattn/go-isatty"
expirableCache "github.com/odeke-em/cache"
"github.com/odeke-em/drive/config"
"github.com/odeke-em/log"
)
var (
ErrNoContext = errors.New("not in a drive context")
)
const (
DriveIgnoreSuffix = ".driveignore"
)
type Options struct {
// Depth is the number of pages/ listing recursion depth
Depth int
// Exports contains the formats to export your Google Docs + Sheets to
// e.g ["csv" "txt"]
Exports []string
// ExportsDir is the directory to put the exported Google Docs + Sheets.
// If not provided, will export them to the same dir as the source files are
ExportsDir string
// Force once set always converts NoChange into an Addition
Force bool
// Hidden discovers hidden paths if set
Hidden bool
IgnoreRegexp *regexp.Regexp
// IgnoreChecksum when set avoids the step
// of comparing checksums as a final check.
IgnoreChecksum bool
// IgnoreConflict when set turns off the conflict resolution safety.
IgnoreConflict bool
// Allows listing of content in trash
InTrash bool
Meta *map[string][]string
Mount *config.Mount
// NoClobber when set prevents overwriting of stale content
NoClobber bool
// NoPrompt overwrites any prompt pauses
NoPrompt bool
Path string
// PageSize determines the number of results returned per API call
PageSize int64
Recursive bool
// Sources is a of list all paths that are
// within the scope/path of the current gd context
Sources []string
// TypeMask contains the result of setting different type bits e.g
// Folder to search only for folders etc.
TypeMask int
// Piped when set means to infer content to or from stdin
Piped bool
// Quiet when set toggles only logging of errors to stderrs as
// well as reading from stdin in this case stdout is not logged to
Quiet bool
StdoutIsTty bool
IgnoreNameClashes bool
ExcludeCrudMask CrudValue
ExplicitlyExport bool
Md5sum bool
indexingOnly bool
Verbose bool
FixClashes bool
Match bool
Starred bool
// BaseLocal when set, during a diff uses the local file
// as the base otherwise remote is used as the base
BaseLocal bool
}
type Commands struct {
context *config.Context
rem *Remote
opts *Options
rcOpts *Options
log *log.Logger
progress *pb.ProgressBar
mkdirAllCache *expirableCache.OperationCache
}
func (opts *Options) canPrompt() bool {
if opts == nil || !opts.StdoutIsTty {
return false
}
if opts.Quiet {
return false
}
return !opts.NoPrompt
}
func (opts *Options) canPreview() bool {
if opts == nil || !opts.StdoutIsTty {
return false
}
if opts.Quiet {
return false
}
return true
}
func rcPathChecker(absDir string) (string, error) {
p := rcPath(absDir)
statInfo, err := os.Stat(p)
if err != nil && !os.IsNotExist(err) {
return "", err
}
if statInfo == nil {
return "", os.ErrNotExist
}
return p, nil
}
func (opts *Options) rcPath() (string, error) {
lastCurPath := ""
for curPath := opts.Path; curPath != ""; curPath = path.Dir(curPath) {
localRCP, err := rcPathChecker(curPath)
if err == nil && localRCP != "" {
return localRCP, nil
}
if false && err != nil && !os.IsNotExist(err) {
return "", err
}
if lastCurPath == curPath { // Avoid getting a stalemate incase path.Dir cannot progress
break
}
lastCurPath = curPath
}
return rcPathChecker(FsHomeDir)
}
func New(context *config.Context, opts *Options) *Commands {
var r *Remote
if context != nil {
r = NewRemoteContext(context)
}
stdin, stdout, stderr := os.Stdin, os.Stdout, os.Stderr
var logger *log.Logger = nil
if opts != nil {
if opts.Quiet {
stdout = nil
}
if stdout != nil {
opts.StdoutIsTty = isatty.IsTerminal(stdout.Fd())
}
if stdout == nil && opts.Piped {
panic("piped requires stdout to be non-nil")
}
logger = log.New(stdin, stdout, stderr)
// should always start with /
opts.Path = path.Clean(path.Join("/", opts.Path))
if !opts.Force {
ignoresPath := filepath.Join(context.AbsPath, DriveIgnoreSuffix)
ignoreRegexp, regErr := combineIgnores(ignoresPath)
if regErr != nil {
logger.LogErrf("combining ignores from path %s and internally: %v\n", ignoresPath, regErr)
}
opts.IgnoreRegexp = ignoreRegexp
}
}
if logger == nil {
panic("logger cannot be nil")
}
return &Commands{
context: context,
rem: r,
opts: opts,
log: logger,
mkdirAllCache: expirableCache.New(),
}
}
func combineIgnores(ignoresPath string) (*regexp.Regexp, error) {
clauses, err := readCommentedFile(ignoresPath, "#")
if err != nil && !os.IsNotExist(err) {
return nil, err
}
clauses = append(clauses, internalIgnores()...)
if len(clauses) < 1 {
return nil, nil
}
regExComp, regErr := regexp.Compile(strings.Join(clauses, "|"))
if regErr != nil {
return nil, regErr
}
return regExComp, nil
}
func (g *Commands) taskStart(tasks int64) {
if tasks > 0 {
g.progress = newProgressBar(tasks)
}
}
func newProgressBar(total int64) *pb.ProgressBar {
pbf := pb.New64(total)
pbf.Start()
return pbf
}
func (g *Commands) taskAdd(n int64) {
if g.progress != nil {
g.progress.Add64(n)
}
}
func (g *Commands) taskFinish() {
if g.progress != nil {
g.progress.Finish()
}
}
new: define a custom logger if opts==nil
Fixes #524.
For commands such as `init`, the opts are nil so use
the vanilla logger for such cases and don't panic.
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package drive
import (
"errors"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/cheggaaa/pb"
"github.com/mattn/go-isatty"
expirableCache "github.com/odeke-em/cache"
"github.com/odeke-em/drive/config"
"github.com/odeke-em/log"
)
var (
ErrNoContext = errors.New("not in a drive context")
)
const (
DriveIgnoreSuffix = ".driveignore"
)
type Options struct {
// Depth is the number of pages/ listing recursion depth
Depth int
// Exports contains the formats to export your Google Docs + Sheets to
// e.g ["csv" "txt"]
Exports []string
// ExportsDir is the directory to put the exported Google Docs + Sheets.
// If not provided, will export them to the same dir as the source files are
ExportsDir string
// Force once set always converts NoChange into an Addition
Force bool
// Hidden discovers hidden paths if set
Hidden bool
IgnoreRegexp *regexp.Regexp
// IgnoreChecksum when set avoids the step
// of comparing checksums as a final check.
IgnoreChecksum bool
// IgnoreConflict when set turns off the conflict resolution safety.
IgnoreConflict bool
// Allows listing of content in trash
InTrash bool
Meta *map[string][]string
Mount *config.Mount
// NoClobber when set prevents overwriting of stale content
NoClobber bool
// NoPrompt overwrites any prompt pauses
NoPrompt bool
Path string
// PageSize determines the number of results returned per API call
PageSize int64
Recursive bool
// Sources is a of list all paths that are
// within the scope/path of the current gd context
Sources []string
// TypeMask contains the result of setting different type bits e.g
// Folder to search only for folders etc.
TypeMask int
// Piped when set means to infer content to or from stdin
Piped bool
// Quiet when set toggles only logging of errors to stderrs as
// well as reading from stdin in this case stdout is not logged to
Quiet bool
StdoutIsTty bool
IgnoreNameClashes bool
ExcludeCrudMask CrudValue
ExplicitlyExport bool
Md5sum bool
indexingOnly bool
Verbose bool
FixClashes bool
Match bool
Starred bool
// BaseLocal when set, during a diff uses the local file
// as the base otherwise remote is used as the base
BaseLocal bool
}
type Commands struct {
context *config.Context
rem *Remote
opts *Options
rcOpts *Options
log *log.Logger
progress *pb.ProgressBar
mkdirAllCache *expirableCache.OperationCache
}
func (opts *Options) canPrompt() bool {
if opts == nil || !opts.StdoutIsTty {
return false
}
if opts.Quiet {
return false
}
return !opts.NoPrompt
}
func (opts *Options) canPreview() bool {
if opts == nil || !opts.StdoutIsTty {
return false
}
if opts.Quiet {
return false
}
return true
}
func rcPathChecker(absDir string) (string, error) {
p := rcPath(absDir)
statInfo, err := os.Stat(p)
if err != nil && !os.IsNotExist(err) {
return "", err
}
if statInfo == nil {
return "", os.ErrNotExist
}
return p, nil
}
func (opts *Options) rcPath() (string, error) {
lastCurPath := ""
for curPath := opts.Path; curPath != ""; curPath = path.Dir(curPath) {
localRCP, err := rcPathChecker(curPath)
if err == nil && localRCP != "" {
return localRCP, nil
}
if false && err != nil && !os.IsNotExist(err) {
return "", err
}
if lastCurPath == curPath { // Avoid getting a stalemate incase path.Dir cannot progress
break
}
lastCurPath = curPath
}
return rcPathChecker(FsHomeDir)
}
func New(context *config.Context, opts *Options) *Commands {
var r *Remote
if context != nil {
r = NewRemoteContext(context)
}
stdin, stdout, stderr := os.Stdin, os.Stdout, os.Stderr
var logger *log.Logger = nil
if opts == nil {
logger = log.New(stdin, stdout, stderr)
} else {
if opts.Quiet {
stdout = nil
}
if stdout != nil {
opts.StdoutIsTty = isatty.IsTerminal(stdout.Fd())
}
if stdout == nil && opts.Piped {
panic("piped requires stdout to be non-nil")
}
logger = log.New(stdin, stdout, stderr)
// should always start with /
opts.Path = path.Clean(path.Join("/", opts.Path))
if !opts.Force {
ignoresPath := filepath.Join(context.AbsPath, DriveIgnoreSuffix)
ignoreRegexp, regErr := combineIgnores(ignoresPath)
if regErr != nil {
logger.LogErrf("combining ignores from path %s and internally: %v\n", ignoresPath, regErr)
}
opts.IgnoreRegexp = ignoreRegexp
}
}
return &Commands{
context: context,
rem: r,
opts: opts,
log: logger,
mkdirAllCache: expirableCache.New(),
}
}
func combineIgnores(ignoresPath string) (*regexp.Regexp, error) {
clauses, err := readCommentedFile(ignoresPath, "#")
if err != nil && !os.IsNotExist(err) {
return nil, err
}
clauses = append(clauses, internalIgnores()...)
if len(clauses) < 1 {
return nil, nil
}
regExComp, regErr := regexp.Compile(strings.Join(clauses, "|"))
if regErr != nil {
return nil, regErr
}
return regExComp, nil
}
func (g *Commands) taskStart(tasks int64) {
if tasks > 0 {
g.progress = newProgressBar(tasks)
}
}
func newProgressBar(total int64) *pb.ProgressBar {
pbf := pb.New64(total)
pbf.Start()
return pbf
}
func (g *Commands) taskAdd(n int64) {
if g.progress != nil {
g.progress.Add64(n)
}
}
func (g *Commands) taskFinish() {
if g.progress != nil {
g.progress.Finish()
}
}
|
/*
Copyright 2012 Sergey Cherepanov (https://github.com/cheggaaa)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cnst
const (
VERSION = "0.3.9"
SIGN = "Anteater " + VERSION
)
v0.3.10
/*
Copyright 2012 Sergey Cherepanov (https://github.com/cheggaaa)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cnst
const (
VERSION = "0.3.10"
SIGN = "Anteater " + VERSION
)
|
package comm
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"time"
"github.com/pkg/errors"
"github.com/schollz/logger"
)
const MAXBYTES = 1000000
// Comm is some basic TCP communication
type Comm struct {
connection net.Conn
}
// NewConnection gets a new comm to a tcp address
func NewConnection(address string, timelimit ...time.Duration) (c *Comm, err error) {
tlimit := 30 * time.Second
if len(timelimit) > 0 {
tlimit = timelimit[0]
}
connection, err := net.DialTimeout("tcp", address, tlimit)
if err != nil {
return
}
c = New(connection)
return
}
// New returns a new comm
func New(c net.Conn) *Comm {
c.SetReadDeadline(time.Now().Add(3 * time.Hour))
c.SetDeadline(time.Now().Add(3 * time.Hour))
c.SetWriteDeadline(time.Now().Add(3 * time.Hour))
comm := new(Comm)
comm.connection = c
return comm
}
// Connection returns the net.Conn connection
func (c *Comm) Connection() net.Conn {
return c.connection
}
// Close closes the connection
func (c *Comm) Close() {
c.connection.Close()
}
func (c *Comm) Write(b []byte) (int, error) {
header := new(bytes.Buffer)
err := binary.Write(header, binary.LittleEndian, uint32(len(b)))
if err != nil {
fmt.Println("binary.Write failed:", err)
}
tmpCopy := append(header.Bytes(), b...)
n, err := c.connection.Write(tmpCopy)
if n != len(tmpCopy) {
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("wanted to write %d but wrote %d", len(b), n))
} else {
err = fmt.Errorf("wanted to write %d but wrote %d", len(b), n)
}
}
// log.Printf("wanted to write %d but wrote %d", n, len(b))
return n, err
}
func (c *Comm) Read() (buf []byte, numBytes int, bs []byte, err error) {
// long read deadline in case waiting for file
c.connection.SetReadDeadline(time.Now().Add(3 * time.Hour))
// read until we get 4 bytes for the header
var header []byte
numBytes = 4
for {
tmp := make([]byte, numBytes-len(header))
n, errRead := c.connection.Read(tmp)
if errRead != nil {
err = errRead
logger.Debug("initial read error: %s", err.Error())
return
}
header = append(header, tmp[:n]...)
if numBytes == len(header) {
break
}
}
var numBytesUint32 uint32
rbuf := bytes.NewReader(header)
err = binary.Read(rbuf, binary.LittleEndian, &numBytesUint32)
if err != nil {
err = fmt.Errorf("binary.Read failed: %s", err.Error())
logger.Debug(err.Error())
return
}
numBytes = int(numBytesUint32)
if numBytes > MAXBYTES {
err = fmt.Errorf("too many bytes: %d", numBytes)
logger.Debug(err)
return
}
buf = make([]byte, 0)
// shorten the reading deadline in case getting weird data
c.connection.SetReadDeadline(time.Now().Add(10 * time.Second))
for {
// log.Debugf("bytes: %d/%d", len(buf), numBytes)
tmp := make([]byte, numBytes-len(buf))
n, errRead := c.connection.Read(tmp)
if errRead != nil {
err = errRead
logger.Debug("consecutive read error: %s", err.Error())
return
}
buf = append(buf, tmp[:n]...)
if numBytes == len(buf) {
break
}
}
return
}
// Send a message
func (c *Comm) Send(message []byte) (err error) {
_, err = c.Write(message)
return
}
// Receive a message
func (c *Comm) Receive() (b []byte, err error) {
b, _, _, err = c.Read()
return
}
whoops
package comm
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"time"
"github.com/pkg/errors"
"github.com/schollz/logger"
)
const MAXBYTES = 1000000
// Comm is some basic TCP communication
type Comm struct {
connection net.Conn
}
// NewConnection gets a new comm to a tcp address
func NewConnection(address string, timelimit ...time.Duration) (c *Comm, err error) {
tlimit := 30 * time.Second
if len(timelimit) > 0 {
tlimit = timelimit[0]
}
connection, err := net.DialTimeout("tcp", address, tlimit)
if err != nil {
return
}
c = New(connection)
return
}
// New returns a new comm
func New(c net.Conn) *Comm {
c.SetReadDeadline(time.Now().Add(3 * time.Hour))
c.SetDeadline(time.Now().Add(3 * time.Hour))
c.SetWriteDeadline(time.Now().Add(3 * time.Hour))
comm := new(Comm)
comm.connection = c
return comm
}
// Connection returns the net.Conn connection
func (c *Comm) Connection() net.Conn {
return c.connection
}
// Close closes the connection
func (c *Comm) Close() {
c.connection.Close()
}
func (c *Comm) Write(b []byte) (int, error) {
header := new(bytes.Buffer)
err := binary.Write(header, binary.LittleEndian, uint32(len(b)))
if err != nil {
fmt.Println("binary.Write failed:", err)
}
tmpCopy := append(header.Bytes(), b...)
n, err := c.connection.Write(tmpCopy)
if n != len(tmpCopy) {
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("wanted to write %d but wrote %d", len(b), n))
} else {
err = fmt.Errorf("wanted to write %d but wrote %d", len(b), n)
}
}
// log.Printf("wanted to write %d but wrote %d", n, len(b))
return n, err
}
func (c *Comm) Read() (buf []byte, numBytes int, bs []byte, err error) {
// long read deadline in case waiting for file
c.connection.SetReadDeadline(time.Now().Add(3 * time.Hour))
// read until we get 4 bytes for the header
var header []byte
numBytes = 4
for {
tmp := make([]byte, numBytes-len(header))
n, errRead := c.connection.Read(tmp)
if errRead != nil {
err = errRead
logger.Debugf("initial read error: %s", err.Error())
return
}
header = append(header, tmp[:n]...)
if numBytes == len(header) {
break
}
}
var numBytesUint32 uint32
rbuf := bytes.NewReader(header)
err = binary.Read(rbuf, binary.LittleEndian, &numBytesUint32)
if err != nil {
err = fmt.Errorf("binary.Read failed: %s", err.Error())
logger.Debug(err.Error())
return
}
numBytes = int(numBytesUint32)
if numBytes > MAXBYTES {
err = fmt.Errorf("too many bytes: %d", numBytes)
logger.Debug(err)
return
}
buf = make([]byte, 0)
// shorten the reading deadline in case getting weird data
c.connection.SetReadDeadline(time.Now().Add(10 * time.Second))
for {
// log.Debugf("bytes: %d/%d", len(buf), numBytes)
tmp := make([]byte, numBytes-len(buf))
n, errRead := c.connection.Read(tmp)
if errRead != nil {
err = errRead
logger.Debugf("consecutive read error: %s", err.Error())
return
}
buf = append(buf, tmp[:n]...)
if numBytes == len(buf) {
break
}
}
return
}
// Send a message
func (c *Comm) Send(message []byte) (err error) {
_, err = c.Write(message)
return
}
// Receive a message
func (c *Comm) Receive() (b []byte, err error) {
b, _, _, err = c.Read()
return
}
|
package boxstrapper_test
import (
"testing"
. "github.com/gaffo/boxstrapper"
"github.com/stretchr/testify/assert"
)
func TestReadEmptyRepo(t *testing.T) {
assert := assert.New(t)
storage := NewFilesystemStorage("/nonexistent")
data, err := storage.ReadPackages()
assert.NotNil(err)
assert.Equal("", data)
}
TestReadPackages_RepoWithPackagefile
package boxstrapper_test
import (
"testing"
. "github.com/gaffo/boxstrapper"
"github.com/stretchr/testify/assert"
"io/ioutil"
"os"
)
func TestReadPackages_EmptyRepo(t *testing.T) {
assert := assert.New(t)
storage := NewFilesystemStorage("/nonexistent")
data, err := storage.ReadPackages()
assert.NotNil(err)
assert.Equal("", data)
}
func TestReadPackages_RepoWithPackagefile(t *testing.T) {
assert := assert.New(t)
_ = os.MkdirAll("tmp/r1", os.ModePerm)
defer func () {
_ = os.RemoveAll("tmp")
}()
_ = ioutil.WriteFile("tmp/r1/packages.bss", []byte(`contents`), os.ModePerm)
storage := NewFilesystemStorage("tmp/r1")
data, err := storage.ReadPackages()
assert.Nil(err)
assert.Equal(`contents`, data)
} |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Most of this is copied straight from heka's TOML config setup
package simplepush
import (
"fmt"
"reflect"
"regexp"
"github.com/bbangert/toml"
)
// Extensible sections
type AvailableExtensions map[string]func() HasConfigStruct
type ExtensibleGlobals struct {
Typ string `toml:"type"`
}
// Generic config section with any set of options
type ConfigFile map[string]toml.Primitive
// Interface for something that needs a set of config options
type HasConfigStruct interface {
// Returns a default-value-populated configuration structure into which
// the plugin's TOML configuration will be deserialized.
ConfigStruct() interface{}
// The configuration loaded after ConfigStruct will be passed in
// Throwing an error here will cause the application to stop loading
Init(app *Application, config interface{}) error
}
var unknownOptionRegex = regexp.MustCompile("^Configuration contains key \\[(?P<key>\\S+)\\]")
// Plugin types
type PluginType int
const (
PluginApp PluginType = iota + 1
PluginLogger
PluginPinger
PluginMetrics
PluginStore
PluginRouter
PluginLocator
PluginServer
PluginHandlers
)
var pluginNames = map[PluginType]string{
PluginApp: "app",
PluginLogger: "logger",
PluginPinger: "pinger",
PluginMetrics: "metrics",
PluginStore: "store",
PluginLocator: "locator",
PluginServer: "server",
PluginHandlers: "handlers",
}
func (t PluginType) String() string {
return pluginNames[t]
}
// Plugin loaders
type PluginLoaders map[PluginType]func(*Application) (HasConfigStruct, error)
func (l PluginLoaders) loadPlugin(plugin PluginType, app *Application) (HasConfigStruct, error) {
if loader, ok := l[plugin]; ok {
return loader(app)
}
return nil, fmt.Errorf("Missing plugin loader for %s", plugin)
}
func (l PluginLoaders) Load(logging int) (*Application, error) {
var (
obj HasConfigStruct
err error
)
// We have a somewhat convoluted setup process to ensure prerequisites are
// available on the Application at each stage of application setup
// Setup the base application first
app := new(Application)
if obj, err = l.loadPlugin(PluginApp, app); err != nil {
return nil, err
}
// Next, many things require the logger, and logger has no other deps
if obj, err = l.loadPlugin(PluginLogger, app); err != nil {
return nil, err
}
logger := obj.(Logger)
if logging > 0 {
logger.SetFilter(LogLevel(logging))
logger.Log(LogLevel(logging), "config", "Setting minimum logging level from CLI",
LogFields{"level": fmt.Sprintf("%d", LogLevel(logging))})
}
if err = app.SetLogger(logger); err != nil {
return nil, err
}
// Next, metrics, Deps: Logger
if obj, err = l.loadPlugin(PluginMetrics, app); err != nil {
return nil, err
}
metrics := obj.(*Metrics)
if err = app.SetMetrics(metrics); err != nil {
return nil, err
}
// Load the Proprietary Ping element. Deps: Logger, Metrics
if obj, err = l.loadPlugin(PluginPinger, app); err != nil {
return nil, err
}
propping := obj.(PropPinger)
if err = app.SetPropPinger(propping); err != nil {
return nil, err
}
// Next, storage, Deps: Logger, Metrics
if obj, err = l.loadPlugin(PluginStore, app); err != nil {
return nil, err
}
store := obj.(Store)
if err = app.SetStore(store); err != nil {
return nil, err
}
// Next, setup the router, Deps: Logger, Metrics
if obj, err = l.loadPlugin(PluginRouter, app); err != nil {
return nil, err
}
router := obj.(*Router)
if err = app.SetRouter(router); err != nil {
return nil, err
}
// Set up the node discovery mechanism. Deps: Logger, Metrics, Router.
if obj, err = l.loadPlugin(PluginLocator, app); err != nil {
return nil, err
}
locator := obj.(Locator)
if err = router.SetLocator(locator); err != nil {
return nil, err
}
// Finally, setup the handlers, Deps: Logger, Metrics
if obj, err = l.loadPlugin(PluginServer, app); err != nil {
return nil, err
}
serv := obj.(*Serv)
app.SetServer(serv)
if obj, err = l.loadPlugin(PluginHandlers, app); err != nil {
return nil, err
}
handlers := obj.(*Handler)
app.SetHandlers(handlers)
return app, nil
}
func LoadConfigStruct(config toml.Primitive, configable HasConfigStruct) (
configStruct interface{}, err error) {
configStruct = configable.ConfigStruct()
// Global section options
// SimplePush defines some common parameters
// that are defined in the ExtensibleGlobals struct.
// Use reflection to extract the ExtensibleGlobals fields or TOML tag
// name if available
spParams := make(map[string]interface{})
pg := ExtensibleGlobals{}
rt := reflect.ValueOf(pg).Type()
for i := 0; i < rt.NumField(); i++ {
sft := rt.Field(i)
kname := sft.Tag.Get("toml")
if len(kname) == 0 {
kname = sft.Name
}
spParams[kname] = true
}
if err = toml.PrimitiveDecodeStrict(config, configStruct, spParams); err != nil {
configStruct = nil
matches := unknownOptionRegex.FindStringSubmatch(err.Error())
if len(matches) == 2 {
// We've got an unrecognized config option.
err = fmt.Errorf("Unknown config setting: %s", matches[1])
}
}
return
}
// Loads the config for a section supplied, configures the supplied object, and initializes
func LoadConfigForSection(app *Application, sectionName string, obj HasConfigStruct,
configFile ConfigFile) (err error) {
conf, ok := configFile[sectionName]
if !ok {
return fmt.Errorf("Error loading config file, section: %s", sectionName)
}
confStruct := obj.ConfigStruct()
if err = toml.PrimitiveDecode(conf, confStruct); err != nil {
err = fmt.Errorf("Unable to decode config for section '%s': %s",
sectionName, err)
return
}
err = obj.Init(app, confStruct)
return
}
// Load an extensible section that has a type keyword
func LoadExtensibleSection(app *Application, sectionName string,
extensions AvailableExtensions, configFile ConfigFile) (HasConfigStruct, error) {
var err error
confSection := new(ExtensibleGlobals)
conf, ok := configFile[sectionName]
if !ok {
return nil, fmt.Errorf("Error loading config file, section: %s", sectionName)
}
if err = toml.PrimitiveDecode(conf, confSection); err != nil {
return nil, err
}
ext, ok := extensions[confSection.Typ]
if !ok {
ext, ok = extensions["default"]
if !ok {
return nil, fmt.Errorf("No type '%s' available to load for section '%s'",
confSection.Typ, sectionName)
}
//TODO: Add log info to indicate using "default"
}
obj := ext()
loadedConfig, err := LoadConfigStruct(conf, obj)
if err != nil {
return nil, err
}
err = obj.Init(app, loadedConfig)
return obj, err
}
// Handles reading a TOML based configuration file, and loading an
// initialized Application, ready to Run
func LoadApplicationFromFileName(filename string, logging int) (app *Application, err error) {
var configFile ConfigFile
if _, err = toml.DecodeFile(filename, &configFile); err != nil {
return nil, fmt.Errorf("Error decoding config file: %s", err)
}
loaders := PluginLoaders{
PluginApp: func(app *Application) (HasConfigStruct, error) {
return nil, LoadConfigForSection(nil, "default", app, configFile)
},
PluginLogger: func(app *Application) (HasConfigStruct, error) {
return LoadExtensibleSection(app, "logging", AvailableLoggers, configFile)
},
PluginPinger: func(app *Application) (HasConfigStruct, error) {
return LoadExtensibleSection(app, "propping", AvailablePings, configFile)
},
PluginMetrics: func(app *Application) (HasConfigStruct, error) {
metrics := new(Metrics)
if err := LoadConfigForSection(app, "metrics", metrics, configFile); err != nil {
return nil, err
}
return metrics, nil
},
PluginStore: func(app *Application) (HasConfigStruct, error) {
return LoadExtensibleSection(app, "storage", AvailableStores, configFile)
},
PluginRouter: func(app *Application) (HasConfigStruct, error) {
router := NewRouter()
if err := LoadConfigForSection(app, "router", router, configFile); err != nil {
return nil, err
}
return router, nil
},
PluginLocator: func(app *Application) (HasConfigStruct, error) {
return LoadExtensibleSection(app, "discovery", AvailableLocators, configFile)
},
PluginServer: func(app *Application) (HasConfigStruct, error) {
serv := NewServer()
configStruct := serv.ConfigStruct()
if err = toml.PrimitiveDecode(configFile["default"], configStruct); err != nil {
return nil, err
}
serv.Init(app, configStruct)
return serv, nil
},
PluginHandlers: func(app *Application) (HasConfigStruct, error) {
handlers := new(Handler)
handlers.Init(app, nil)
return handlers, nil
},
}
return loaders.Load(logging)
}
Config: Load proprietary pings after storage.
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Most of this is copied straight from heka's TOML config setup
package simplepush
import (
"fmt"
"reflect"
"regexp"
"github.com/bbangert/toml"
)
// Extensible sections
type AvailableExtensions map[string]func() HasConfigStruct
type ExtensibleGlobals struct {
Typ string `toml:"type"`
}
// Generic config section with any set of options
type ConfigFile map[string]toml.Primitive
// Interface for something that needs a set of config options
type HasConfigStruct interface {
// Returns a default-value-populated configuration structure into which
// the plugin's TOML configuration will be deserialized.
ConfigStruct() interface{}
// The configuration loaded after ConfigStruct will be passed in
// Throwing an error here will cause the application to stop loading
Init(app *Application, config interface{}) error
}
var unknownOptionRegex = regexp.MustCompile("^Configuration contains key \\[(?P<key>\\S+)\\]")
// Plugin types
type PluginType int
const (
PluginApp PluginType = iota + 1
PluginLogger
PluginPinger
PluginMetrics
PluginStore
PluginRouter
PluginLocator
PluginServer
PluginHandlers
)
var pluginNames = map[PluginType]string{
PluginApp: "app",
PluginLogger: "logger",
PluginPinger: "pinger",
PluginMetrics: "metrics",
PluginStore: "store",
PluginLocator: "locator",
PluginServer: "server",
PluginHandlers: "handlers",
}
func (t PluginType) String() string {
return pluginNames[t]
}
// Plugin loaders
type PluginLoaders map[PluginType]func(*Application) (HasConfigStruct, error)
func (l PluginLoaders) loadPlugin(plugin PluginType, app *Application) (HasConfigStruct, error) {
if loader, ok := l[plugin]; ok {
return loader(app)
}
return nil, fmt.Errorf("Missing plugin loader for %s", plugin)
}
func (l PluginLoaders) Load(logging int) (*Application, error) {
var (
obj HasConfigStruct
err error
)
// We have a somewhat convoluted setup process to ensure prerequisites are
// available on the Application at each stage of application setup
// Setup the base application first
app := new(Application)
if obj, err = l.loadPlugin(PluginApp, app); err != nil {
return nil, err
}
// Next, many things require the logger, and logger has no other deps
if obj, err = l.loadPlugin(PluginLogger, app); err != nil {
return nil, err
}
logger := obj.(Logger)
if logging > 0 {
logger.SetFilter(LogLevel(logging))
logger.Log(LogLevel(logging), "config", "Setting minimum logging level from CLI",
LogFields{"level": fmt.Sprintf("%d", LogLevel(logging))})
}
if err = app.SetLogger(logger); err != nil {
return nil, err
}
// Next, metrics, Deps: Logger
if obj, err = l.loadPlugin(PluginMetrics, app); err != nil {
return nil, err
}
metrics := obj.(*Metrics)
if err = app.SetMetrics(metrics); err != nil {
return nil, err
}
// Next, storage, Deps: Logger, Metrics
if obj, err = l.loadPlugin(PluginStore, app); err != nil {
return nil, err
}
store := obj.(Store)
if err = app.SetStore(store); err != nil {
return nil, err
}
// Load the Proprietary Ping element. Deps: Logger, Metrics, Storage
if obj, err = l.loadPlugin(PluginPinger, app); err != nil {
return nil, err
}
propping := obj.(PropPinger)
if err = app.SetPropPinger(propping); err != nil {
return nil, err
}
// Next, setup the router, Deps: Logger, Metrics
if obj, err = l.loadPlugin(PluginRouter, app); err != nil {
return nil, err
}
router := obj.(*Router)
if err = app.SetRouter(router); err != nil {
return nil, err
}
// Set up the node discovery mechanism. Deps: Logger, Metrics, Router.
if obj, err = l.loadPlugin(PluginLocator, app); err != nil {
return nil, err
}
locator := obj.(Locator)
if err = router.SetLocator(locator); err != nil {
return nil, err
}
// Finally, setup the handlers, Deps: Logger, Metrics
if obj, err = l.loadPlugin(PluginServer, app); err != nil {
return nil, err
}
serv := obj.(*Serv)
app.SetServer(serv)
if obj, err = l.loadPlugin(PluginHandlers, app); err != nil {
return nil, err
}
handlers := obj.(*Handler)
app.SetHandlers(handlers)
return app, nil
}
func LoadConfigStruct(config toml.Primitive, configable HasConfigStruct) (
configStruct interface{}, err error) {
configStruct = configable.ConfigStruct()
// Global section options
// SimplePush defines some common parameters
// that are defined in the ExtensibleGlobals struct.
// Use reflection to extract the ExtensibleGlobals fields or TOML tag
// name if available
spParams := make(map[string]interface{})
pg := ExtensibleGlobals{}
rt := reflect.ValueOf(pg).Type()
for i := 0; i < rt.NumField(); i++ {
sft := rt.Field(i)
kname := sft.Tag.Get("toml")
if len(kname) == 0 {
kname = sft.Name
}
spParams[kname] = true
}
if err = toml.PrimitiveDecodeStrict(config, configStruct, spParams); err != nil {
configStruct = nil
matches := unknownOptionRegex.FindStringSubmatch(err.Error())
if len(matches) == 2 {
// We've got an unrecognized config option.
err = fmt.Errorf("Unknown config setting: %s", matches[1])
}
}
return
}
// Loads the config for a section supplied, configures the supplied object, and initializes
func LoadConfigForSection(app *Application, sectionName string, obj HasConfigStruct,
configFile ConfigFile) (err error) {
conf, ok := configFile[sectionName]
if !ok {
return fmt.Errorf("Error loading config file, section: %s", sectionName)
}
confStruct := obj.ConfigStruct()
if err = toml.PrimitiveDecode(conf, confStruct); err != nil {
err = fmt.Errorf("Unable to decode config for section '%s': %s",
sectionName, err)
return
}
err = obj.Init(app, confStruct)
return
}
// Load an extensible section that has a type keyword
func LoadExtensibleSection(app *Application, sectionName string,
extensions AvailableExtensions, configFile ConfigFile) (HasConfigStruct, error) {
var err error
confSection := new(ExtensibleGlobals)
conf, ok := configFile[sectionName]
if !ok {
return nil, fmt.Errorf("Error loading config file, section: %s", sectionName)
}
if err = toml.PrimitiveDecode(conf, confSection); err != nil {
return nil, err
}
ext, ok := extensions[confSection.Typ]
if !ok {
ext, ok = extensions["default"]
if !ok {
return nil, fmt.Errorf("No type '%s' available to load for section '%s'",
confSection.Typ, sectionName)
}
//TODO: Add log info to indicate using "default"
}
obj := ext()
loadedConfig, err := LoadConfigStruct(conf, obj)
if err != nil {
return nil, err
}
err = obj.Init(app, loadedConfig)
return obj, err
}
// Handles reading a TOML based configuration file, and loading an
// initialized Application, ready to Run
func LoadApplicationFromFileName(filename string, logging int) (app *Application, err error) {
var configFile ConfigFile
if _, err = toml.DecodeFile(filename, &configFile); err != nil {
return nil, fmt.Errorf("Error decoding config file: %s", err)
}
loaders := PluginLoaders{
PluginApp: func(app *Application) (HasConfigStruct, error) {
return nil, LoadConfigForSection(nil, "default", app, configFile)
},
PluginLogger: func(app *Application) (HasConfigStruct, error) {
return LoadExtensibleSection(app, "logging", AvailableLoggers, configFile)
},
PluginPinger: func(app *Application) (HasConfigStruct, error) {
return LoadExtensibleSection(app, "propping", AvailablePings, configFile)
},
PluginMetrics: func(app *Application) (HasConfigStruct, error) {
metrics := new(Metrics)
if err := LoadConfigForSection(app, "metrics", metrics, configFile); err != nil {
return nil, err
}
return metrics, nil
},
PluginStore: func(app *Application) (HasConfigStruct, error) {
return LoadExtensibleSection(app, "storage", AvailableStores, configFile)
},
PluginRouter: func(app *Application) (HasConfigStruct, error) {
router := NewRouter()
if err := LoadConfigForSection(app, "router", router, configFile); err != nil {
return nil, err
}
return router, nil
},
PluginLocator: func(app *Application) (HasConfigStruct, error) {
return LoadExtensibleSection(app, "discovery", AvailableLocators, configFile)
},
PluginServer: func(app *Application) (HasConfigStruct, error) {
serv := NewServer()
configStruct := serv.ConfigStruct()
if err = toml.PrimitiveDecode(configFile["default"], configStruct); err != nil {
return nil, err
}
serv.Init(app, configStruct)
return serv, nil
},
PluginHandlers: func(app *Application) (HasConfigStruct, error) {
handlers := new(Handler)
handlers.Init(app, nil)
return handlers, nil
},
}
return loaders.Load(logging)
}
|
package match
import (
"fmt"
"github.com/tisp-lang/tisp/src/lib/ast"
)
type valueRenamer struct {
nameMap map[string]string
}
func newValueRenamer(m map[string]string) valueRenamer {
return valueRenamer{m}
}
func (r valueRenamer) rename(v interface{}) interface{} {
switch x := v.(type) {
case string:
if n, ok := r.nameMap[x]; ok {
return n
}
return x
case ast.App:
ps := x.Arguments().Positionals()
newPs := make([]ast.PositionalArgument, 0, len(ps))
for _, p := range ps {
newPs = append(newPs, ast.NewPositionalArgument(r.rename(p.Value()), p.Expanded()))
}
ks := x.Arguments().Keywords()
newKs := make([]ast.KeywordArgument, 0, len(ks))
for _, k := range ks {
newKs = append(newKs, ast.NewKeywordArgument(k.Name(), r.rename(k.Value())))
}
ds := x.Arguments().ExpandedDicts()
newDs := make([]interface{}, 0, len(ds))
for _, d := range ds {
newDs = append(newDs, r.rename(d))
}
return ast.NewApp(
r.rename(x.Function()),
ast.NewArguments(newPs, newKs, newDs),
x.DebugInfo())
case ast.Switch:
cs := make([]ast.SwitchCase, 0, len(x.Cases()))
for _, c := range x.Cases() {
cs = append(cs, ast.NewSwitchCase(c.Pattern(), r.rename(c.Value())))
}
d := interface{}(nil)
if x.DefaultCase() != nil {
d = r.rename(x.DefaultCase())
}
return newSwitch(r.rename(x.Value()), cs, d)
}
panic(fmt.Errorf("Invalid value: %#v", v))
}
Refactor value_renamer.go
package match
import (
"fmt"
"github.com/tisp-lang/tisp/src/lib/ast"
)
type valueRenamer struct {
nameMap map[string]string
}
func newValueRenamer(m map[string]string) valueRenamer {
return valueRenamer{m}
}
func (r valueRenamer) rename(v interface{}) interface{} {
switch x := v.(type) {
case string:
if n, ok := r.nameMap[x]; ok {
return n
}
return x
case ast.App:
args := x.Arguments()
ps := make([]ast.PositionalArgument, 0, len(args.Positionals()))
for _, p := range args.Positionals() {
ps = append(ps, ast.NewPositionalArgument(r.rename(p.Value()), p.Expanded()))
}
ks := make([]ast.KeywordArgument, 0, len(args.Keywords()))
for _, k := range args.Keywords() {
ks = append(ks, ast.NewKeywordArgument(k.Name(), r.rename(k.Value())))
}
ds := make([]interface{}, 0, len(args.ExpandedDicts()))
for _, d := range args.ExpandedDicts() {
ds = append(ds, r.rename(d))
}
return ast.NewApp(r.rename(x.Function()), ast.NewArguments(ps, ks, ds), x.DebugInfo())
case ast.Switch:
cs := make([]ast.SwitchCase, 0, len(x.Cases()))
for _, c := range x.Cases() {
cs = append(cs, ast.NewSwitchCase(c.Pattern(), r.rename(c.Value())))
}
d := interface{}(nil)
if x.DefaultCase() != nil {
d = r.rename(x.DefaultCase())
}
return newSwitch(r.rename(x.Value()), cs, d)
}
panic(fmt.Errorf("Invalid value: %#v", v))
}
|
// Copyright (c) 2017, 0qdk4o. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
)
// Tow array A and B have same count elements as N, elements come from A add elements
// from B produce new array that having N*N count elements, how to select smallest
// K elements from new array
// VIA: google interview? maybe
// suppose:
// A: 2, 6, 4, 7, 15, 9
// B: 5, 12, 11, 4, 9, 13
// K: 10
// so:
// N = 6
// len(sum array) = 6*6 = 36
//
// select K elements from A and B and sort these elemens
// * K >= N, sort A and B directly
// * K < N, select smallest K elements from A and B, then sort
//
// sum array looks like:
// 2+4 2+5 2+9 2+11 2+12 2+13
// 4+4 4+5 4+9 4+11 4+12 4+13
// 6+4 6+5 6+9 6+11 6+12 6+13
// 7+4 7+5 7+9 7+11 7+12 7+13
// 9+4 9+5 9+9 9+11 9+12 9+13
// 15+4 15+5 15+9 15+11 15+12 15+13
//
// A[0]+B[0] = 2+4, this element is smallest value in the new sum array
// from left to right in the same row, the sum value is increase by degrees
// from top to bottom in the same column, the sum value is increase by degrees
// But it can not say that values from diffrent row and column are increase
// by degrees. such as A[0] + B[2] = 2+9, A[2] + B[1] = 6+5. we should compare
// values from diffrent row and column and find next smallest element from sum
// array
func nextSmallest(a, b []int, c *[]int64) (res int64) {
csize := len(*c)
if csize < 1 {
panic("invalid compare array")
}
var sum int
for n := csize - 1; n >= 0; n-- {
tempi := (*c)[n] >> 32
tempj := (*c)[n] & 0xffffffff
if n == csize-1 {
sum = a[tempi] + b[tempj]
continue
}
if sum > a[tempi]+b[tempj] {
sum = a[tempi] + b[tempj]
(*c)[n], (*c)[csize-1] = (*c)[csize-1], (*c)[n]
}
}
res = (*c)[csize-1]
*c = (*c)[:csize-1]
return
}
func appendToComarr(c *[]int64, v int64) {
for i := 0; i < len(*c); i++ {
if (*c)[i] == v {
return
}
}
*c = append(*c, v)
}
func selectSumK(a, b []int, k int) []int {
if len(a) != len(b) {
return nil
}
size := len(a)
sumArray := make([]int, k)
count := 1
i := 0
j := 0
sumArray[0] = a[i] + b[j]
comarr := make([]int64, 0, k)
for count < k {
if i+1 < size {
appendToComarr(&comarr, int64(i+1)<<32|int64(j))
}
if j+1 < size {
appendToComarr(&comarr, int64(i)<<32|int64(j+1))
}
tempIndex := nextSmallest(a, b, &comarr)
i = int(tempIndex >> 32)
j = int(tempIndex & 0xffffffff)
sumArray[count] = a[i] + b[j]
count++
}
return sumArray
}
// quick select algorithm for sort
func qsort(a []int, start, end int) {
if start >= end {
return
}
if end >= len(a) {
end = len(a) - 1
}
pivot := a[(start+end)/2]
i := start
j := end
for {
for ; i <= end && a[i] < pivot; i++ {
}
for ; j >= start && a[j] > pivot; j-- {
}
if i >= j {
break
}
a[i], a[j] = a[j], a[i]
i++
j--
}
qsort(a, start, i-1)
qsort(a, j+1, end)
}
// quick select
// k specify smallest elements count
func quickSelectK(a []int, start, end, k int) {
if start >= end || k-1 < start || k-1 > end {
return
}
pivot := a[(start+end)/2]
i := start
j := end
for {
for ; i <= end && a[i] < pivot; i++ {
}
for ; j >= start && a[j] > pivot; j-- {
}
if i >= j {
break
}
a[i], a[j] = a[j], a[i]
i++
j--
}
if k-1 < i {
quickSelectK(a, start, i-1, k)
} else if k-1 > j {
quickSelectK(a, j+1, end, k)
}
}
// output: 6 7 8 9 10 11 11 11 12 13
func main() {
arrA := []int{2, 6, 4, 7, 15, 9}
arrB := []int{5, 12, 11, 4, 9, 13}
k := 10
quickSelectK(arrA, 0, len(arrA)-1, k)
quickSelectK(arrB, 0, len(arrB)-1, k)
qsort(arrA, 0, k-1)
qsort(arrB, 0, k-1)
sumArr := selectSumK(arrA, arrB, k)
for i := 0; i < k; i++ {
fmt.Printf("%d ", sumArr[i])
}
fmt.Println()
}
improve select performance
there are two types for elements added to compare array. suppose
current smallest sum = A[i] + B[j], next element needed to added to
compare array are:
1) (A[i+1],B[j])
2) (A[i],B[j+1])
if there is already exist element (A[i+1],B[j-1]) for 1) and
(A[i-1], B[j+1]) for 2) in the compare array, obviously, element `1)`
or `2)` is bigger than elements in the compare array. there is no
need to add this element to compare array.
// Copyright (c) 2017, 0qdk4o. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
)
// Tow array A and B have same count elements as N, elements come from A add elements
// from B produce new array that having N*N count elements, how to select smallest
// K elements from new array
// VIA: google interview? maybe
// suppose:
// A: 2, 6, 4, 7, 15, 9
// B: 5, 12, 11, 4, 9, 13
// K: 10
// so:
// N = 6
// len(sum array) = 6*6 = 36
//
// select K elements from A and B and sort these elemens
// * K >= N, sort A and B directly
// * K < N, select smallest K elements from A and B, then sort
//
// sum array looks like:
// 2+4 2+5 2+9 2+11 2+12 2+13
// 4+4 4+5 4+9 4+11 4+12 4+13
// 6+4 6+5 6+9 6+11 6+12 6+13
// 7+4 7+5 7+9 7+11 7+12 7+13
// 9+4 9+5 9+9 9+11 9+12 9+13
// 15+4 15+5 15+9 15+11 15+12 15+13
//
// A[0]+B[0] = 2+4, this element is smallest value in the new sum array
// from left to right in the same row, the sum value is increase by degrees
// from top to bottom in the same column, the sum value is increase by degrees
// But it can not say that values from diffrent row and column are increase
// by degrees. such as A[0] + B[2] = 2+9, A[2] + B[1] = 6+5. we should compare
// values from diffrent row and column and find next smallest element from sum
// array
func nextSmallest(a, b []int, c *[]int64) (res int64) {
csize := len(*c)
if csize < 1 {
panic("invalid compare array")
}
var sum int
for n := csize - 1; n >= 0; n-- {
tempi := (*c)[n] >> 32
tempj := (*c)[n] & 0xffffffff
if n == csize-1 {
sum = a[tempi] + b[tempj]
continue
}
if sum > a[tempi]+b[tempj] {
sum = a[tempi] + b[tempj]
(*c)[n], (*c)[csize-1] = (*c)[csize-1], (*c)[n]
}
}
res = (*c)[csize-1]
*c = (*c)[:csize-1]
return
}
func appendToComarr(c *[]int64, v int64) {
vi, vj := v>>32, v&0xffffffff
for i := 0; i < len(*c); i++ {
tempi, tempj := (*c)[i]>>32, (*c)[i]&0xffffffff
if vi >= tempi && vj >= tempj {
return
}
}
*c = append(*c, v)
}
func selectSumK(a, b []int, k int) []int {
if len(a) != len(b) {
return nil
}
size := len(a)
sumArray := make([]int, k)
count := 1
i := 0
j := 0
sumArray[0] = a[i] + b[j]
comarr := make([]int64, 0, k)
for count < k {
if i+1 < size {
appendToComarr(&comarr, int64(i+1)<<32|int64(j))
}
if j+1 < size {
appendToComarr(&comarr, int64(i)<<32|int64(j+1))
}
tempIndex := nextSmallest(a, b, &comarr)
i = int(tempIndex >> 32)
j = int(tempIndex & 0xffffffff)
sumArray[count] = a[i] + b[j]
count++
}
return sumArray
}
// quick select algorithm for sort
func qsort(a []int, start, end int) {
if start >= end {
return
}
if end >= len(a) {
end = len(a) - 1
}
pivot := a[(start+end)/2]
i := start
j := end
for {
for ; i <= end && a[i] < pivot; i++ {
}
for ; j >= start && a[j] > pivot; j-- {
}
if i >= j {
break
}
a[i], a[j] = a[j], a[i]
i++
j--
}
qsort(a, start, i-1)
qsort(a, j+1, end)
}
// quick select
// k specify smallest elements count
func quickSelectK(a []int, start, end, k int) {
if start >= end || k-1 < start || k-1 > end {
return
}
pivot := a[(start+end)/2]
i := start
j := end
for {
for ; i <= end && a[i] < pivot; i++ {
}
for ; j >= start && a[j] > pivot; j-- {
}
if i >= j {
break
}
a[i], a[j] = a[j], a[i]
i++
j--
}
if k-1 < i {
quickSelectK(a, start, i-1, k)
} else if k-1 > j {
quickSelectK(a, j+1, end, k)
}
}
// output: 6 7 8 9 10 11 11 11 12 13
func main() {
arrA := []int{2, 6, 4, 7, 15, 9}
arrB := []int{5, 12, 11, 4, 9, 13}
k := 10
quickSelectK(arrA, 0, len(arrA)-1, k)
quickSelectK(arrB, 0, len(arrB)-1, k)
qsort(arrA, 0, k-1)
qsort(arrB, 0, k-1)
sumArr := selectSumK(arrA, arrB, k)
for i := 0; i < k; i++ {
fmt.Printf("%d ", sumArr[i])
}
fmt.Println()
}
|
package main
import (
"flag"
"fmt"
"github.com/hanwen/termite/termite"
"log"
"os"
"path/filepath"
"rpc"
"strings"
)
// TODO - this file is a mess. Clean it up.
const _SHELL = "/bin/sh"
func TryRunDirect(cmd string) {
if cmd == ":" {
os.Exit(0)
}
parsed := termite.ParseCommand(cmd)
if len(parsed) == 0 {
return
}
if parsed[0] == "echo" {
fmt.Println(strings.Join(parsed[1:], " "))
os.Exit(0)
}
if parsed[0] == "true" {
os.Exit(0)
}
if parsed[0] == "false" {
os.Exit(1)
}
// TODO mkdir, rm, expr, others?
}
func Refresh() {
socket := termite.FindSocket()
conn := termite.OpenSocketConnection(socket, termite.RPC_CHANNEL)
client := rpc.NewClient(conn)
req := 1
rep := 1
err := client.Call("LocalMaster.RefreshAttributeCache", &req, &rep)
client.Close()
if err != nil {
log.Fatal("LocalMaster.RefreshAttributeCache: ", err)
}
conn.Close()
}
func cleanEnv(input []string) []string {
env := []string{}
for _, v := range input {
comps := strings.SplitN(v, "=", 2)
if comps[1] == "termite-make" {
// TODO - more generic.
v = fmt.Sprintf("%s=%s", comps[0], "make")
} else if comps[0] == "MAKE_SHELL" {
continue
}
env = append(env, v)
}
return env
}
func Inspect(files []string) {
socket := termite.FindSocket()
conn := termite.OpenSocketConnection(socket, termite.RPC_CHANNEL)
client := rpc.NewClient(conn)
wd, _ := os.Getwd()
for _, p := range files {
if p[0] != '/' {
p = filepath.Join(wd, p)
}
req := termite.AttrRequest{Name: p}
rep := termite.AttrResponse{}
err := client.Call("LocalMaster.InspectFile", &req, &rep)
if err != nil {
log.Fatal("LocalMaster.InspectFile: ", err)
}
for _, a := range rep.Attrs {
log.Printf("%v", *a)
}
}
}
func TryRunLocally(command string, topdir string) (exit *os.Waitmsg, rule termite.LocalRule) {
decider := termite.NewLocalDecider(topdir)
if !(len(os.Args) == 3 && os.Args[0] == _SHELL && os.Args[1] == "-c") {
return
}
rule = decider.ShouldRunLocally(command)
if rule.Local {
env := os.Environ()
if !rule.Recurse {
env = cleanEnv(env)
}
proc, err := os.StartProcess(_SHELL, os.Args, &os.ProcAttr{
Env: env,
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
})
if err != nil {
log.Fatalf("os.StartProcess() for %s: %v", command, err)
}
msg, err := proc.Wait(0)
if err != nil {
log.Fatalf("proc.Wait() for %s: %v", command, err)
}
return msg, rule
}
return
}
func main() {
command := flag.String("c", "", "command to run.")
refresh := flag.Bool("refresh", false, "refresh master file cache.")
inspect := flag.Bool("inspect", false, "inspect files on master.")
debug := flag.Bool("dbg", false, "set on debugging in request.")
flag.Parse()
if *refresh {
Refresh()
}
if *inspect {
Inspect(flag.Args())
}
if *command == "" {
return
}
os.Args[0] = _SHELL
TryRunDirect(*command)
socket := termite.FindSocket()
if socket == "" {
log.Fatal("Could not find .termite-socket")
}
topDir, _ := filepath.Split(socket)
localWaitMsg, localRule := TryRunLocally(*command, topDir)
if localWaitMsg != nil && !localRule.SkipRefresh {
Refresh()
}
wd, err := os.Getwd()
if err != nil {
log.Fatal("Getwd", err)
}
conn := termite.OpenSocketConnection(socket, termite.RPC_CHANNEL)
// TODO - could skip the shell if we can deduce it is a
// no-frills command invocation.
req := termite.WorkRequest{
Binary: _SHELL,
Argv: []string{"/bin/sh", "-c", *command},
Env: cleanEnv(os.Environ()),
Dir: wd,
RanLocally: localWaitMsg != nil,
}
req.Debug = localRule.Debug || os.Getenv("TERMITE_DEBUG") != "" || *debug
client := rpc.NewClient(conn)
rep := termite.WorkReply{}
err = client.Call("LocalMaster.Run", &req, &rep)
client.Close()
if err != nil {
log.Fatal("LocalMaster.Run: ", err)
}
os.Stdout.Write([]byte(rep.Stdout))
os.Stderr.Write([]byte(rep.Stderr))
// TODO -something with signals.
if localWaitMsg == nil {
localWaitMsg = &rep.Exit
if localWaitMsg.ExitStatus() != 0 {
log.Printf("Failed: %q", *command)
}
}
conn.Close()
os.Exit(localWaitMsg.ExitStatus())
}
Fix shell-wrapper.
package main
import (
"flag"
"fmt"
"github.com/hanwen/termite/termite"
"log"
"os"
"path/filepath"
"rpc"
"strings"
)
// TODO - this file is a mess. Clean it up.
const _SHELL = "/bin/sh"
func TryRunDirect(cmd string) {
if cmd == ":" {
os.Exit(0)
}
parsed := termite.ParseCommand(cmd)
if len(parsed) == 0 {
return
}
if parsed[0] == "echo" {
fmt.Println(strings.Join(parsed[1:], " "))
os.Exit(0)
}
if parsed[0] == "true" {
os.Exit(0)
}
if parsed[0] == "false" {
os.Exit(1)
}
// TODO mkdir, rm, expr, others?
}
func Refresh() {
socket := termite.FindSocket()
conn := termite.OpenSocketConnection(socket, termite.RPC_CHANNEL)
client := rpc.NewClient(conn)
req := 1
rep := 1
err := client.Call("LocalMaster.RefreshAttributeCache", &req, &rep)
client.Close()
if err != nil {
log.Fatal("LocalMaster.RefreshAttributeCache: ", err)
}
conn.Close()
}
func cleanEnv(input []string) []string {
env := []string{}
for _, v := range input {
comps := strings.SplitN(v, "=", 2)
if comps[1] == "termite-make" {
// TODO - more generic.
v = fmt.Sprintf("%s=%s", comps[0], "make")
} else if comps[0] == "MAKE_SHELL" {
continue
}
env = append(env, v)
}
return env
}
func Inspect(files []string) {
socket := termite.FindSocket()
conn := termite.OpenSocketConnection(socket, termite.RPC_CHANNEL)
client := rpc.NewClient(conn)
wd, _ := os.Getwd()
for _, p := range files {
if p[0] != '/' {
p = filepath.Join(wd, p)
}
req := termite.AttrRequest{Name: p}
rep := termite.AttrResponse{}
err := client.Call("LocalMaster.InspectFile", &req, &rep)
if err != nil {
log.Fatal("LocalMaster.InspectFile: ", err)
}
for _, a := range rep.Attrs {
log.Printf("%v", *a)
}
}
}
func TryRunLocally(command string, topdir string) (exit *os.Waitmsg, rule termite.LocalRule) {
decider := termite.NewLocalDecider(topdir)
if !(len(os.Args) == 3 && os.Args[0] == _SHELL && os.Args[1] == "-c") {
return
}
rule = decider.ShouldRunLocally(command)
if rule.Local {
env := os.Environ()
if !rule.Recurse {
env = cleanEnv(env)
}
proc, err := os.StartProcess(_SHELL, os.Args, &os.ProcAttr{
Env: env,
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
})
if err != nil {
log.Fatalf("os.StartProcess() for %s: %v", command, err)
}
msg, err := proc.Wait(0)
if err != nil {
log.Fatalf("proc.Wait() for %s: %v", command, err)
}
return msg, rule
}
return
}
func main() {
command := flag.String("c", "", "command to run.")
refresh := flag.Bool("refresh", false, "refresh master file cache.")
inspect := flag.Bool("inspect", false, "inspect files on master.")
debug := flag.Bool("dbg", false, "set on debugging in request.")
flag.Parse()
if *refresh {
Refresh()
}
if *inspect {
Inspect(flag.Args())
}
if *command == "" {
return
}
os.Args[0] = _SHELL
TryRunDirect(*command)
socket := termite.FindSocket()
if socket == "" {
log.Fatal("Could not find .termite-socket")
}
topDir, _ := filepath.Split(socket)
localWaitMsg, localRule := TryRunLocally(*command, topDir)
if localWaitMsg != nil && !localRule.SkipRefresh {
Refresh()
}
wd, err := os.Getwd()
if err != nil {
log.Fatal("Getwd", err)
}
conn := termite.OpenSocketConnection(socket, termite.RPC_CHANNEL)
// TODO - could skip the shell if we can deduce it is a
// no-frills command invocation.
req := termite.WorkRequest{
Binary: _SHELL,
Argv: []string{"/bin/sh", "-c", *command},
Env: cleanEnv(os.Environ()),
Dir: wd,
RanLocally: localWaitMsg != nil,
}
req.Debug = localRule.Debug || os.Getenv("TERMITE_DEBUG") != "" || *debug
client := rpc.NewClient(conn)
rep := termite.WorkResponse{}
err = client.Call("LocalMaster.Run", &req, &rep)
client.Close()
if err != nil {
log.Fatal("LocalMaster.Run: ", err)
}
os.Stdout.Write([]byte(rep.Stdout))
os.Stderr.Write([]byte(rep.Stderr))
// TODO -something with signals.
if localWaitMsg == nil {
localWaitMsg = &rep.Exit
if localWaitMsg.ExitStatus() != 0 {
log.Printf("Failed: %q", *command)
}
}
conn.Close()
os.Exit(localWaitMsg.ExitStatus())
}
|
package croc
import (
"bytes"
"crypto/rand"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/denisbrodbeck/machineid"
"github.com/pkg/errors"
"github.com/schollz/croc/v8/src/comm"
"github.com/schollz/croc/v8/src/compress"
"github.com/schollz/croc/v8/src/crypt"
"github.com/schollz/croc/v8/src/message"
"github.com/schollz/croc/v8/src/models"
"github.com/schollz/croc/v8/src/tcp"
"github.com/schollz/croc/v8/src/utils"
log "github.com/schollz/logger"
"github.com/schollz/pake/v2"
"github.com/schollz/peerdiscovery"
"github.com/schollz/progressbar/v3"
"github.com/schollz/spinner"
"github.com/tscholl2/siec"
)
func init() {
log.SetLevel("debug")
}
// Debug toggles debug mode
func Debug(debug bool) {
if debug {
log.SetLevel("debug")
} else {
log.SetLevel("warn")
}
}
// Options specifies user specific options
type Options struct {
IsSender bool
SharedSecret string
Debug bool
RelayAddress string
RelayPorts []string
RelayPassword string
Stdout bool
NoPrompt bool
NoMultiplexing bool
DisableLocal bool
Ask bool
}
// Client holds the state of the croc transfer
type Client struct {
Options Options
Pake *pake.Pake
Key []byte
ExternalIP, ExternalIPConnected string
// steps involved in forming relationship
Step1ChannelSecured bool
Step2FileInfoTransfered bool
Step3RecipientRequestFile bool
Step4FileTransfer bool
Step5CloseChannels bool
SuccessfulTransfer bool
// send / receive information of all files
FilesToTransfer []FileInfo
FilesToTransferCurrentNum int
FilesHasFinished map[int]struct{}
// send / receive information of current file
CurrentFile *os.File
CurrentFileChunkRanges []int64
CurrentFileChunks []int64
TotalSent int64
TotalChunksTransfered int
chunkMap map[uint64]struct{}
// tcp connections
conn []*comm.Comm
bar *progressbar.ProgressBar
spinner *spinner.Spinner
longestFilename int
firstSend bool
mutex *sync.Mutex
fread *os.File
numfinished int
quit chan bool
}
// Chunk contains information about the
// needed bytes
type Chunk struct {
Bytes []byte `json:"b,omitempty"`
Location int64 `json:"l,omitempty"`
}
// FileInfo registers the information about the file
type FileInfo struct {
Name string `json:"n,omitempty"`
FolderRemote string `json:"fr,omitempty"`
FolderSource string `json:"fs,omitempty"`
Hash []byte `json:"h,omitempty"`
Size int64 `json:"s,omitempty"`
ModTime time.Time `json:"m,omitempty"`
IsCompressed bool `json:"c,omitempty"`
IsEncrypted bool `json:"e,omitempty"`
}
// RemoteFileRequest requests specific bytes
type RemoteFileRequest struct {
CurrentFileChunkRanges []int64
FilesToTransferCurrentNum int
MachineID string
}
// SenderInfo lists the files to be transferred
type SenderInfo struct {
FilesToTransfer []FileInfo
MachineID string
Ask bool
}
// New establishes a new connection for transferring files between two instances.
func New(ops Options) (c *Client, err error) {
c = new(Client)
c.FilesHasFinished = make(map[int]struct{})
// setup basic info
c.Options = ops
Debug(c.Options.Debug)
log.Debugf("options: %+v", c.Options)
if len(c.Options.SharedSecret) < 4 {
err = fmt.Errorf("code is too short")
return
}
c.conn = make([]*comm.Comm, 16)
// initialize pake
if c.Options.IsSender {
c.Pake, err = pake.Init([]byte(c.Options.SharedSecret), 1, siec.SIEC255(), 1*time.Microsecond)
} else {
c.Pake, err = pake.Init([]byte(c.Options.SharedSecret), 0, siec.SIEC255(), 1*time.Microsecond)
}
if err != nil {
return
}
c.mutex = &sync.Mutex{}
return
}
// TransferOptions for sending
type TransferOptions struct {
PathToFiles []string
KeepPathInRemote bool
}
func (c *Client) sendCollectFiles(options TransferOptions) (err error) {
c.FilesToTransfer = make([]FileInfo, len(options.PathToFiles))
totalFilesSize := int64(0)
for i, pathToFile := range options.PathToFiles {
var fstats os.FileInfo
var fullPath string
fullPath, err = filepath.Abs(pathToFile)
if err != nil {
return
}
fullPath = filepath.Clean(fullPath)
var folderName string
folderName, _ = filepath.Split(fullPath)
fstats, err = os.Stat(fullPath)
if err != nil {
return
}
if len(fstats.Name()) > c.longestFilename {
c.longestFilename = len(fstats.Name())
}
c.FilesToTransfer[i] = FileInfo{
Name: fstats.Name(),
FolderRemote: ".",
FolderSource: folderName,
Size: fstats.Size(),
ModTime: fstats.ModTime(),
}
c.FilesToTransfer[i].Hash, err = utils.HashFile(fullPath)
totalFilesSize += fstats.Size()
if err != nil {
return
}
if options.KeepPathInRemote {
var curFolder string
curFolder, err = os.Getwd()
if err != nil {
return
}
curFolder, err = filepath.Abs(curFolder)
if err != nil {
return
}
if !strings.HasPrefix(folderName, curFolder) {
err = fmt.Errorf("remote directory must be relative to current")
return
}
c.FilesToTransfer[i].FolderRemote = strings.TrimPrefix(folderName, curFolder)
c.FilesToTransfer[i].FolderRemote = filepath.ToSlash(c.FilesToTransfer[i].FolderRemote)
c.FilesToTransfer[i].FolderRemote = strings.TrimPrefix(c.FilesToTransfer[i].FolderRemote, "/")
if c.FilesToTransfer[i].FolderRemote == "" {
c.FilesToTransfer[i].FolderRemote = "."
}
}
log.Debugf("file %d info: %+v", i, c.FilesToTransfer[i])
}
log.Debugf("longestFilename: %+v", c.longestFilename)
fname := fmt.Sprintf("%d files", len(c.FilesToTransfer))
if len(c.FilesToTransfer) == 1 {
fname = fmt.Sprintf("'%s'", c.FilesToTransfer[0].Name)
}
fmt.Fprintf(os.Stderr, "Sending %s (%s)\n", fname, utils.ByteCountDecimal(totalFilesSize))
return
}
func (c *Client) setupLocalRelay() {
// setup the relay locally
for _, port := range c.Options.RelayPorts {
go func(portStr string) {
debugString := "warn"
if c.Options.Debug {
debugString = "debug"
}
err := tcp.Run(debugString, portStr, c.Options.RelayPassword, strings.Join(c.Options.RelayPorts[1:], ","))
if err != nil {
panic(err)
}
}(port)
}
}
func (c *Client) broadcastOnLocalNetwork() {
// look for peers first
discoveries, err := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: -1,
Payload: []byte("croc" + c.Options.RelayPorts[0]),
Delay: 10 * time.Millisecond,
TimeLimit: 30 * time.Second,
})
log.Debugf("discoveries: %+v", discoveries)
if err != nil {
log.Debug(err.Error())
}
}
func (c *Client) transferOverLocalRelay(options TransferOptions, errchan chan<- error) {
time.Sleep(500 * time.Millisecond)
log.Debug("establishing connection")
var banner string
conn, banner, ipaddr, err := tcp.ConnectToTCPServer("localhost:"+c.Options.RelayPorts[0], c.Options.RelayPassword, c.Options.SharedSecret[:3])
log.Debugf("banner: %s", banner)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("could not connect to localhost:%s", c.Options.RelayPorts[0]))
log.Debug(err)
// not really an error because it will try to connect over the actual relay
return
}
log.Debugf("local connection established: %+v", conn)
for {
data, _ := conn.Receive()
if bytes.Equal(data, []byte("handshake")) {
break
} else if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
} else {
log.Debugf("instead of handshake got: %s", data)
}
}
c.conn[0] = conn
log.Debug("exchanged header message")
c.Options.RelayAddress = "localhost"
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
c.ExternalIP = ipaddr
errchan <- c.transfer(options)
}
// Send will send the specified file
func (c *Client) Send(options TransferOptions) (err error) {
err = c.sendCollectFiles(options)
if err != nil {
return
}
otherRelay := ""
if c.Options.RelayAddress != models.DEFAULT_RELAY {
otherRelay = "--relay " + c.Options.RelayAddress + " "
}
fmt.Fprintf(os.Stderr, "Code is: %s\nOn the other computer run\n\ncroc %s%s\n", c.Options.SharedSecret, otherRelay, c.Options.SharedSecret)
if c.Options.Ask {
machid, _ := machineid.ID()
fmt.Fprintf(os.Stderr, "\rYour machine ID is '%s'\n", machid)
}
// // c.spinner.Suffix = " waiting for recipient..."
// c.spinner.Start()
// create channel for quitting
// connect to the relay for messaging
errchan := make(chan error, 1)
if !c.Options.DisableLocal {
// add two things to the error channel
errchan = make(chan error, 2)
c.setupLocalRelay()
go c.broadcastOnLocalNetwork()
go c.transferOverLocalRelay(options, errchan)
}
go func() {
if !strings.Contains(c.Options.RelayAddress, ":") {
// try the default port, 9009
c.Options.RelayAddress += ":9009"
}
log.Debugf("establishing connection to %s", c.Options.RelayAddress)
var banner string
conn, banner, ipaddr, err := tcp.ConnectToTCPServer(c.Options.RelayAddress, c.Options.RelayPassword, c.Options.SharedSecret[:3], 5*time.Second)
log.Debugf("banner: %s", banner)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("could not connect to %s", c.Options.RelayAddress))
log.Debug(err)
errchan <- err
return
}
log.Debugf("connection established: %+v", conn)
for {
log.Debug("waiting for bytes")
data, errConn := conn.Receive()
if errConn != nil {
log.Debugf("[%+v] had error: %s", conn, errConn.Error())
}
if bytes.Equal(data, []byte("ips?")) {
// recipient wants to try to connect to local ips
var ips []string
// only get local ips if the local is enabled
if !c.Options.DisableLocal {
// get list of local ips
ips, err = utils.GetLocalIPs()
if err != nil {
log.Debugf("error getting local ips: %s", err.Error())
}
// prepend the port that is being listened to
ips = append([]string{c.Options.RelayPorts[0]}, ips...)
}
bips, _ := json.Marshal(ips)
conn.Send(bips)
} else if bytes.Equal(data, []byte("handshake")) {
break
} else if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
continue
} else {
log.Debugf("[%+v] got weird bytes: %+v", conn, data)
// throttle the reading
errchan <- fmt.Errorf("gracefully refusing using the public relay")
return
}
}
c.conn[0] = conn
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
c.ExternalIP = ipaddr
log.Debug("exchanged header message")
errchan <- c.transfer(options)
}()
err = <-errchan
if err == nil {
// return if no error
return
} else {
log.Debugf("error from errchan: %s", err.Error())
}
if !c.Options.DisableLocal {
if strings.Contains(err.Error(), "refusing files") || strings.Contains(err.Error(), "EOF") || strings.Contains(err.Error(), "bad password") {
errchan <- err
}
err = <-errchan
}
return err
}
// Receive will receive a file
func (c *Client) Receive() (err error) {
fmt.Fprintf(os.Stderr, "connecting...")
// recipient will look for peers first
// and continue if it doesn't find any within 100 ms
usingLocal := false
if !c.Options.DisableLocal {
log.Debug("attempt to discover peers")
discoveries, err := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: 1,
Payload: []byte("ok"),
Delay: 10 * time.Millisecond,
TimeLimit: 100 * time.Millisecond,
})
if err == nil && len(discoveries) > 0 {
for i := 0; i < len(discoveries); i++ {
log.Debugf("discovery %d has payload: %+v", i, discoveries[i])
if !bytes.HasPrefix(discoveries[i].Payload, []byte("croc")) {
log.Debug("skipping discovery")
continue
}
log.Debug("switching to local")
c.Options.RelayAddress = fmt.Sprintf("%s:%s",
discoveries[0].Address,
bytes.TrimPrefix(discoveries[0].Payload, []byte("croc")),
)
c.ExternalIPConnected = c.Options.RelayAddress
usingLocal = true
break
}
}
log.Debugf("discoveries: %+v", discoveries)
log.Debug("establishing connection")
}
if !strings.Contains(c.Options.RelayAddress, ":") {
// try the default port, 9009
c.Options.RelayAddress += ":9009"
}
log.Debugf("establishing receiver connection to %s", c.Options.RelayAddress)
var banner string
c.conn[0], banner, c.ExternalIP, err = tcp.ConnectToTCPServer(c.Options.RelayAddress, c.Options.RelayPassword, c.Options.SharedSecret[:3])
log.Debugf("banner: %s", banner)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("could not connect to %s", c.Options.RelayAddress))
return
}
log.Debugf("receiver connection established: %+v", c.conn[0])
if !usingLocal && !c.Options.DisableLocal {
// ask the sender for their local ips and port
// and try to connect to them
log.Debug("sending ips?")
var data []byte
c.conn[0].Send([]byte("ips?"))
data, err = c.conn[0].Receive()
if err != nil {
return
}
log.Debugf("ips data: %s", data)
var ips []string
json.Unmarshal(data, &ips)
if len(ips) > 1 {
port := ips[0]
ips = ips[1:]
for _, ip := range ips {
ipv4Addr, ipv4Net, errNet := net.ParseCIDR(fmt.Sprintf("%s/24", ip))
log.Debugf("ipv4Add4: %+v, ipv4Net: %+v, err: %+v", ipv4Addr, ipv4Net, errNet)
localIps, _ := utils.GetLocalIPs()
haveLocalIP := false
for _, localIP := range localIps {
localIPparsed := net.ParseIP(localIP)
if ipv4Net.Contains(localIPparsed) {
haveLocalIP = true
break
}
}
if !haveLocalIP {
log.Debugf("%s is not a local IP, skipping", ip)
continue
}
serverTry := fmt.Sprintf("%s:%s", ip, port)
conn, banner2, externalIP, errConn := tcp.ConnectToTCPServer(serverTry, c.Options.RelayPassword, c.Options.SharedSecret[:3], 50*time.Millisecond)
if errConn != nil {
log.Debugf("could not connect to " + serverTry)
continue
}
log.Debugf("local connection established to %s", serverTry)
log.Debugf("banner: %s", banner2)
// reset to the local port
banner = banner2
c.Options.RelayAddress = serverTry
c.ExternalIP = externalIP
c.conn[0].Close()
c.conn[0] = nil
c.conn[0] = conn
break
}
}
}
c.conn[0].Send([]byte("handshake"))
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
log.Debug("exchanged header message")
fmt.Fprintf(os.Stderr, "\rsecuring channel...")
return c.transfer(TransferOptions{})
}
func (c *Client) transfer(options TransferOptions) (err error) {
// connect to the server
// quit with c.quit <- true
c.quit = make(chan bool)
// if recipient, initialize with sending pake information
log.Debug("ready")
if !c.Options.IsSender && !c.Step1ChannelSecured {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "pake",
Bytes: c.Pake.Bytes(),
})
if err != nil {
return
}
}
// listen for incoming messages and process them
for {
var data []byte
var done bool
data, err = c.conn[0].Receive()
if err != nil {
log.Debugf("got error receiving: %s", err.Error())
if !c.Step1ChannelSecured {
err = fmt.Errorf("could not secure channel")
}
break
}
done, err = c.processMessage(data)
if err != nil {
log.Debugf("got error processing: %s", err.Error())
break
}
if done {
break
}
}
// purge errors that come from successful transfer
if c.SuccessfulTransfer {
if err != nil {
log.Debugf("purging error: %s", err)
}
err = nil
}
if c.Options.Stdout && !c.Options.IsSender {
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
os.Remove(pathToFile)
}
return
}
func (c *Client) processMessageFileInfo(m message.Message) (done bool, err error) {
var senderInfo SenderInfo
err = json.Unmarshal(m.Bytes, &senderInfo)
if err != nil {
log.Error(err)
return
}
c.FilesToTransfer = senderInfo.FilesToTransfer
fname := fmt.Sprintf("%d files", len(c.FilesToTransfer))
if len(c.FilesToTransfer) == 1 {
fname = fmt.Sprintf("'%s'", c.FilesToTransfer[0].Name)
}
totalSize := int64(0)
for _, fi := range c.FilesToTransfer {
totalSize += fi.Size
if len(fi.Name) > c.longestFilename {
c.longestFilename = len(fi.Name)
}
}
// c.spinner.Stop()
if !c.Options.NoPrompt || c.Options.Ask || senderInfo.Ask {
if c.Options.Ask || senderInfo.Ask {
machID, _ := machineid.ID()
fmt.Fprintf(os.Stderr, "\rYour machine id is '%s'.\nAccept %s (%s) from '%s'? (y/n) ", machID, fname, utils.ByteCountDecimal(totalSize), senderInfo.MachineID)
} else {
fmt.Fprintf(os.Stderr, "\rAccept %s (%s)? (y/n) ", fname, utils.ByteCountDecimal(totalSize))
}
if strings.ToLower(strings.TrimSpace(utils.GetInput(""))) != "y" {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "error",
Message: "refusing files",
})
return true, fmt.Errorf("refused files")
}
} else {
fmt.Fprintf(os.Stderr, "\rReceiving %s (%s) \n", fname, utils.ByteCountDecimal(totalSize))
}
fmt.Fprintf(os.Stderr, "\nReceiving (<-%s)\n", c.ExternalIPConnected)
log.Debug(c.FilesToTransfer)
c.Step2FileInfoTransfered = true
return
}
func (c *Client) procesMesssagePake(m message.Message) (err error) {
log.Debug("received pake payload")
// if // c.spinner.Suffix != " performing PAKE..." {
// // c.spinner.Stop()
// // c.spinner.Suffix = " performing PAKE..."
// // c.spinner.Start()
// }
notVerified := !c.Pake.IsVerified()
err = c.Pake.Update(m.Bytes)
if err != nil {
return
}
if (notVerified && c.Pake.IsVerified() && !c.Options.IsSender) || !c.Pake.IsVerified() {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "pake",
Bytes: c.Pake.Bytes(),
})
}
if c.Pake.IsVerified() {
if c.Options.IsSender {
log.Debug("generating salt")
salt := make([]byte, 8)
rand.Read(salt)
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "salt",
Bytes: salt,
})
if err != nil {
return
}
}
// connects to the other ports of the server for transfer
var wg sync.WaitGroup
wg.Add(len(c.Options.RelayPorts))
for i := 0; i < len(c.Options.RelayPorts); i++ {
log.Debugf("port: [%s]", c.Options.RelayPorts[i])
go func(j int) {
defer wg.Done()
server := fmt.Sprintf("%s:%s", strings.Split(c.Options.RelayAddress, ":")[0], c.Options.RelayPorts[j])
log.Debugf("connecting to %s", server)
c.conn[j+1], _, _, err = tcp.ConnectToTCPServer(
server,
c.Options.RelayPassword,
fmt.Sprintf("%s-%d", utils.SHA256(c.Options.SharedSecret)[:7], j),
)
if err != nil {
panic(err)
}
log.Debugf("connected to %s", server)
if !c.Options.IsSender {
go c.receiveData(j)
}
}(i)
}
wg.Wait()
}
return
}
func (c *Client) processMessageSalt(m message.Message) (done bool, err error) {
log.Debug("received salt")
if !c.Options.IsSender {
log.Debug("sending salt back")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "salt",
Bytes: m.Bytes,
})
}
log.Debugf("session key is verified, generating encryption with salt: %x", m.Bytes)
key, err := c.Pake.SessionKey()
if err != nil {
return true, err
}
c.Key, _, err = crypt.New(key, m.Bytes)
if err != nil {
return true, err
}
log.Debugf("key = %+x", c.Key)
if c.Options.IsSender {
log.Debug("sending external IP")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "externalip",
Message: c.ExternalIP,
Bytes: m.Bytes,
})
}
return
}
func (c *Client) processExternalIP(m message.Message) (done bool, err error) {
log.Debugf("received external IP: %+v", m)
if !c.Options.IsSender {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "externalip",
Message: c.ExternalIP,
})
if err != nil {
return true, err
}
}
if c.ExternalIPConnected == "" {
// it can be preset by the local relay
c.ExternalIPConnected = m.Message
}
log.Debugf("connected as %s -> %s", c.ExternalIP, c.ExternalIPConnected)
c.Step1ChannelSecured = true
return
}
func (c *Client) processMessage(payload []byte) (done bool, err error) {
m, err := message.Decode(c.Key, payload)
if err != nil {
err = fmt.Errorf("problem with decoding: %s", err.Error())
return
}
switch m.Type {
case "finished":
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "finished",
})
done = true
c.SuccessfulTransfer = true
return
case "pake":
err = c.procesMesssagePake(m)
if err != nil {
err = errors.Wrap(err, "pake not successful")
}
case "salt":
done, err = c.processMessageSalt(m)
case "externalip":
done, err = c.processExternalIP(m)
case "error":
// c.spinner.Stop()
fmt.Print("\r")
err = fmt.Errorf("peer error: %s", m.Message)
return true, err
case "fileinfo":
done, err = c.processMessageFileInfo(m)
case "recipientready":
var remoteFile RemoteFileRequest
err = json.Unmarshal(m.Bytes, &remoteFile)
if err != nil {
return
}
c.FilesToTransferCurrentNum = remoteFile.FilesToTransferCurrentNum
c.CurrentFileChunkRanges = remoteFile.CurrentFileChunkRanges
c.CurrentFileChunks = utils.ChunkRangesToChunks(c.CurrentFileChunkRanges)
log.Debugf("current file chunks: %+v", c.CurrentFileChunks)
c.mutex.Lock()
c.chunkMap = make(map[uint64]struct{})
for _, chunk := range c.CurrentFileChunks {
c.chunkMap[uint64(chunk)] = struct{}{}
}
c.mutex.Unlock()
c.Step3RecipientRequestFile = true
if c.Options.Ask {
fmt.Fprintf(os.Stderr, "Send to machine '%s'? (y/n) ", remoteFile.MachineID)
if strings.ToLower(strings.TrimSpace(utils.GetInput(""))) != "y" {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "error",
Message: "refusing files",
})
done = true
err = fmt.Errorf("refused files")
return
}
}
case "close-sender":
c.bar.Finish()
log.Debug("close-sender received...")
c.Step4FileTransfer = false
c.Step3RecipientRequestFile = false
log.Debug("sending close-recipient")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "close-recipient",
})
case "close-recipient":
c.Step4FileTransfer = false
c.Step3RecipientRequestFile = false
}
if err != nil {
log.Debugf("got error from processing message: %s", err.Error())
return
}
err = c.updateState()
if err != nil {
log.Debugf("got error from updating state: %s", err.Error())
return
}
return
}
func (c *Client) updateIfSenderChannelSecured() (err error) {
if c.Options.IsSender && c.Step1ChannelSecured && !c.Step2FileInfoTransfered {
var b []byte
machID, _ := machineid.ID()
b, err = json.Marshal(SenderInfo{
FilesToTransfer: c.FilesToTransfer,
MachineID: machID,
Ask: c.Options.Ask,
})
if err != nil {
log.Error(err)
return
}
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "fileinfo",
Bytes: b,
})
if err != nil {
return
}
c.Step2FileInfoTransfered = true
}
return
}
func (c *Client) recipientInitializeFile() (err error) {
// start initiating the process to receive a new file
log.Debugf("working on file %d", c.FilesToTransferCurrentNum)
// recipient sets the file
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
folderForFile, _ := filepath.Split(pathToFile)
os.MkdirAll(folderForFile, os.ModePerm)
var errOpen error
c.CurrentFile, errOpen = os.OpenFile(
pathToFile,
os.O_WRONLY, 0666)
var truncate bool // default false
c.CurrentFileChunks = []int64{}
c.CurrentFileChunkRanges = []int64{}
if errOpen == nil {
stat, _ := c.CurrentFile.Stat()
truncate = stat.Size() != c.FilesToTransfer[c.FilesToTransferCurrentNum].Size
if truncate == false {
// recipient requests the file and chunks (if empty, then should receive all chunks)
// TODO: determine the missing chunks
c.CurrentFileChunkRanges = utils.MissingChunks(
pathToFile,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Size,
models.TCP_BUFFER_SIZE/2,
)
}
} else {
c.CurrentFile, errOpen = os.Create(pathToFile)
if errOpen != nil {
errOpen = errors.Wrap(errOpen, "could not create "+pathToFile)
log.Error(errOpen)
return errOpen
}
truncate = true
}
if truncate {
err := c.CurrentFile.Truncate(c.FilesToTransfer[c.FilesToTransferCurrentNum].Size)
if err != nil {
err = errors.Wrap(err, "could not truncate "+pathToFile)
log.Error(err)
return err
}
}
return
}
func (c *Client) recipientGetFileReady(finished bool) (err error) {
if finished {
// TODO: do the last finishing stuff
log.Debug("finished")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "finished",
})
if err != nil {
panic(err)
}
c.SuccessfulTransfer = true
c.FilesHasFinished[c.FilesToTransferCurrentNum] = struct{}{}
}
err = c.recipientInitializeFile()
if err != nil {
return
}
c.TotalSent = 0
machID, _ := machineid.ID()
bRequest, _ := json.Marshal(RemoteFileRequest{
CurrentFileChunkRanges: c.CurrentFileChunkRanges,
FilesToTransferCurrentNum: c.FilesToTransferCurrentNum,
MachineID: machID,
})
log.Debug("converting to chunk range")
c.CurrentFileChunks = utils.ChunkRangesToChunks(c.CurrentFileChunkRanges)
if !finished {
// setup the progressbar
c.setBar()
}
log.Debugf("sending recipient ready with %d chunks", len(c.CurrentFileChunks))
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "recipientready",
Bytes: bRequest,
})
if err != nil {
return
}
c.Step3RecipientRequestFile = true
return
}
func (c *Client) createEmptyFileAndFinish(fileInfo FileInfo, i int) (err error) {
log.Debugf("touching file with folder / name")
if !utils.Exists(fileInfo.FolderRemote) {
err = os.MkdirAll(fileInfo.FolderRemote, os.ModePerm)
if err != nil {
log.Error(err)
return
}
}
emptyFile, errCreate := os.Create(path.Join(fileInfo.FolderRemote, fileInfo.Name))
if errCreate != nil {
log.Error(errCreate)
err = errCreate
return
}
emptyFile.Close()
// setup the progressbar
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[i].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[i].Name
}
c.bar = progressbar.NewOptions64(1,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
)
c.bar.Finish()
return
}
func (c *Client) updateIfRecipientHasFileInfo() (err error) {
if !(!c.Options.IsSender && c.Step2FileInfoTransfered && !c.Step3RecipientRequestFile) {
return
}
// find the next file to transfer and send that number
// if the files are the same size, then look for missing chunks
finished := true
for i, fileInfo := range c.FilesToTransfer {
if _, ok := c.FilesHasFinished[i]; ok {
continue
}
log.Debugf("checking %+v", fileInfo)
if i < c.FilesToTransferCurrentNum {
continue
}
fileHash, errHash := utils.HashFile(path.Join(fileInfo.FolderRemote, fileInfo.Name))
if fileInfo.Size == 0 {
err = c.createEmptyFileAndFinish(fileInfo, i)
if err != nil {
return
}
continue
}
log.Debugf("%s %+x %+x %+v", fileInfo.Name, fileHash, fileInfo.Hash, errHash)
if !bytes.Equal(fileHash, fileInfo.Hash) {
log.Debugf("hashes are not equal %x != %x", fileHash, fileInfo.Hash)
} else {
log.Debugf("hashes are equal %x == %x", fileHash, fileInfo.Hash)
}
if errHash != nil {
// probably can't find, its okay
log.Debug(errHash)
}
if errHash != nil || !bytes.Equal(fileHash, fileInfo.Hash) {
finished = false
c.FilesToTransferCurrentNum = i
break
}
// TODO: print out something about this file already existing
}
err = c.recipientGetFileReady(finished)
return
}
func (c *Client) updateState() (err error) {
err = c.updateIfSenderChannelSecured()
if err != nil {
return
}
err = c.updateIfRecipientHasFileInfo()
if err != nil {
return
}
if c.Options.IsSender && c.Step3RecipientRequestFile && !c.Step4FileTransfer {
log.Debug("start sending data!")
if !c.firstSend {
fmt.Fprintf(os.Stderr, "\nSending (->%s)\n", c.ExternalIPConnected)
c.firstSend = true
// if there are empty files, show them as already have been transferred now
for i := range c.FilesToTransfer {
if c.FilesToTransfer[i].Size == 0 {
// setup the progressbar and takedown the progress bar for empty files
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[i].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[i].Name
}
c.bar = progressbar.NewOptions64(1,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
)
c.bar.Finish()
}
}
}
c.Step4FileTransfer = true
// setup the progressbar
c.setBar()
c.TotalSent = 0
log.Debug("beginning sending comms")
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderSource,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
c.fread, err = os.Open(pathToFile)
c.numfinished = 0
if err != nil {
return
}
for i := 0; i < len(c.Options.RelayPorts); i++ {
log.Debugf("starting sending over comm %d", i)
go c.sendData(i)
}
}
return
}
func (c *Client) setBar() {
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[c.FilesToTransferCurrentNum].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[c.FilesToTransferCurrentNum].Name
}
c.bar = progressbar.NewOptions64(
c.FilesToTransfer[c.FilesToTransferCurrentNum].Size,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
progressbar.OptionThrottle(100*time.Millisecond),
)
byteToDo := int64(len(c.CurrentFileChunks) * models.TCP_BUFFER_SIZE / 2)
if byteToDo > 0 {
bytesDone := c.FilesToTransfer[c.FilesToTransferCurrentNum].Size - byteToDo
log.Debug(byteToDo)
log.Debug(c.FilesToTransfer[c.FilesToTransferCurrentNum].Size)
log.Debug(bytesDone)
if bytesDone > 0 {
c.bar.Add64(bytesDone)
}
}
}
func (c *Client) receiveData(i int) {
log.Debugf("%d receiving data", i)
for {
data, err := c.conn[i+1].Receive()
if err != nil {
break
}
if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
continue
}
data, err = crypt.Decrypt(data, c.Key)
if err != nil {
panic(err)
}
data = compress.Decompress(data)
// get position
var position uint64
rbuf := bytes.NewReader(data[:8])
err = binary.Read(rbuf, binary.LittleEndian, &position)
if err != nil {
panic(err)
}
positionInt64 := int64(position)
c.mutex.Lock()
_, err = c.CurrentFile.WriteAt(data[8:], positionInt64)
c.mutex.Unlock()
if err != nil {
panic(err)
}
c.bar.Add(len(data[8:]))
c.TotalSent += int64(len(data[8:]))
c.TotalChunksTransfered++
if c.TotalChunksTransfered == len(c.CurrentFileChunks) || c.TotalSent == c.FilesToTransfer[c.FilesToTransferCurrentNum].Size {
log.Debug("finished receiving!")
c.CurrentFile.Close()
if c.Options.Stdout || strings.HasPrefix(c.FilesToTransfer[c.FilesToTransferCurrentNum].Name, "croc-stdin") {
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
b, _ := ioutil.ReadFile(pathToFile)
fmt.Print(string(b))
}
log.Debug("sending close-sender")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "close-sender",
})
if err != nil {
panic(err)
}
}
}
return
}
func (c *Client) sendData(i int) {
defer func() {
log.Debugf("finished with %d", i)
c.numfinished++
if c.numfinished == len(c.Options.RelayPorts) {
log.Debug("closing file")
c.fread.Close()
}
}()
var readingPos int64
pos := uint64(0)
curi := float64(0)
for {
// Read file
data := make([]byte, models.TCP_BUFFER_SIZE/2)
// log.Debugf("%d trying to read", i)
n, errRead := c.fread.ReadAt(data, readingPos)
// log.Debugf("%d read %d bytes", i, n)
readingPos += int64(n)
if math.Mod(curi, float64(len(c.Options.RelayPorts))) == float64(i) {
// check to see if this is a chunk that the recipient wants
usableChunk := true
c.mutex.Lock()
if len(c.chunkMap) != 0 {
if _, ok := c.chunkMap[pos]; !ok {
usableChunk = false
} else {
delete(c.chunkMap, pos)
}
}
c.mutex.Unlock()
if usableChunk {
// log.Debugf("sending chunk %d", pos)
posByte := make([]byte, 8)
binary.LittleEndian.PutUint64(posByte, pos)
dataToSend, err := crypt.Encrypt(
compress.Compress(
append(posByte, data[:n]...),
),
c.Key,
)
if err != nil {
panic(err)
}
err = c.conn[i+1].Send(dataToSend)
if err != nil {
panic(err)
}
c.bar.Add(n)
c.TotalSent += int64(n)
// time.Sleep(100 * time.Millisecond)
} else {
// log.Debugf("skipping chunk %d", pos)
}
}
curi++
pos += uint64(n)
if errRead != nil {
if errRead == io.EOF {
break
}
panic(errRead)
}
}
return
}
default to port 9009 if port is lost during discovery
fixes #222
package croc
import (
"bytes"
"crypto/rand"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/denisbrodbeck/machineid"
"github.com/pkg/errors"
"github.com/schollz/croc/v8/src/comm"
"github.com/schollz/croc/v8/src/compress"
"github.com/schollz/croc/v8/src/crypt"
"github.com/schollz/croc/v8/src/message"
"github.com/schollz/croc/v8/src/models"
"github.com/schollz/croc/v8/src/tcp"
"github.com/schollz/croc/v8/src/utils"
log "github.com/schollz/logger"
"github.com/schollz/pake/v2"
"github.com/schollz/peerdiscovery"
"github.com/schollz/progressbar/v3"
"github.com/schollz/spinner"
"github.com/tscholl2/siec"
)
func init() {
log.SetLevel("debug")
}
// Debug toggles debug mode
func Debug(debug bool) {
if debug {
log.SetLevel("debug")
} else {
log.SetLevel("warn")
}
}
// Options specifies user specific options
type Options struct {
IsSender bool
SharedSecret string
Debug bool
RelayAddress string
RelayPorts []string
RelayPassword string
Stdout bool
NoPrompt bool
NoMultiplexing bool
DisableLocal bool
Ask bool
}
// Client holds the state of the croc transfer
type Client struct {
Options Options
Pake *pake.Pake
Key []byte
ExternalIP, ExternalIPConnected string
// steps involved in forming relationship
Step1ChannelSecured bool
Step2FileInfoTransfered bool
Step3RecipientRequestFile bool
Step4FileTransfer bool
Step5CloseChannels bool
SuccessfulTransfer bool
// send / receive information of all files
FilesToTransfer []FileInfo
FilesToTransferCurrentNum int
FilesHasFinished map[int]struct{}
// send / receive information of current file
CurrentFile *os.File
CurrentFileChunkRanges []int64
CurrentFileChunks []int64
TotalSent int64
TotalChunksTransfered int
chunkMap map[uint64]struct{}
// tcp connections
conn []*comm.Comm
bar *progressbar.ProgressBar
spinner *spinner.Spinner
longestFilename int
firstSend bool
mutex *sync.Mutex
fread *os.File
numfinished int
quit chan bool
}
// Chunk contains information about the
// needed bytes
type Chunk struct {
Bytes []byte `json:"b,omitempty"`
Location int64 `json:"l,omitempty"`
}
// FileInfo registers the information about the file
type FileInfo struct {
Name string `json:"n,omitempty"`
FolderRemote string `json:"fr,omitempty"`
FolderSource string `json:"fs,omitempty"`
Hash []byte `json:"h,omitempty"`
Size int64 `json:"s,omitempty"`
ModTime time.Time `json:"m,omitempty"`
IsCompressed bool `json:"c,omitempty"`
IsEncrypted bool `json:"e,omitempty"`
}
// RemoteFileRequest requests specific bytes
type RemoteFileRequest struct {
CurrentFileChunkRanges []int64
FilesToTransferCurrentNum int
MachineID string
}
// SenderInfo lists the files to be transferred
type SenderInfo struct {
FilesToTransfer []FileInfo
MachineID string
Ask bool
}
// New establishes a new connection for transferring files between two instances.
func New(ops Options) (c *Client, err error) {
c = new(Client)
c.FilesHasFinished = make(map[int]struct{})
// setup basic info
c.Options = ops
Debug(c.Options.Debug)
log.Debugf("options: %+v", c.Options)
if len(c.Options.SharedSecret) < 4 {
err = fmt.Errorf("code is too short")
return
}
c.conn = make([]*comm.Comm, 16)
// initialize pake
if c.Options.IsSender {
c.Pake, err = pake.Init([]byte(c.Options.SharedSecret), 1, siec.SIEC255(), 1*time.Microsecond)
} else {
c.Pake, err = pake.Init([]byte(c.Options.SharedSecret), 0, siec.SIEC255(), 1*time.Microsecond)
}
if err != nil {
return
}
c.mutex = &sync.Mutex{}
return
}
// TransferOptions for sending
type TransferOptions struct {
PathToFiles []string
KeepPathInRemote bool
}
func (c *Client) sendCollectFiles(options TransferOptions) (err error) {
c.FilesToTransfer = make([]FileInfo, len(options.PathToFiles))
totalFilesSize := int64(0)
for i, pathToFile := range options.PathToFiles {
var fstats os.FileInfo
var fullPath string
fullPath, err = filepath.Abs(pathToFile)
if err != nil {
return
}
fullPath = filepath.Clean(fullPath)
var folderName string
folderName, _ = filepath.Split(fullPath)
fstats, err = os.Stat(fullPath)
if err != nil {
return
}
if len(fstats.Name()) > c.longestFilename {
c.longestFilename = len(fstats.Name())
}
c.FilesToTransfer[i] = FileInfo{
Name: fstats.Name(),
FolderRemote: ".",
FolderSource: folderName,
Size: fstats.Size(),
ModTime: fstats.ModTime(),
}
c.FilesToTransfer[i].Hash, err = utils.HashFile(fullPath)
totalFilesSize += fstats.Size()
if err != nil {
return
}
if options.KeepPathInRemote {
var curFolder string
curFolder, err = os.Getwd()
if err != nil {
return
}
curFolder, err = filepath.Abs(curFolder)
if err != nil {
return
}
if !strings.HasPrefix(folderName, curFolder) {
err = fmt.Errorf("remote directory must be relative to current")
return
}
c.FilesToTransfer[i].FolderRemote = strings.TrimPrefix(folderName, curFolder)
c.FilesToTransfer[i].FolderRemote = filepath.ToSlash(c.FilesToTransfer[i].FolderRemote)
c.FilesToTransfer[i].FolderRemote = strings.TrimPrefix(c.FilesToTransfer[i].FolderRemote, "/")
if c.FilesToTransfer[i].FolderRemote == "" {
c.FilesToTransfer[i].FolderRemote = "."
}
}
log.Debugf("file %d info: %+v", i, c.FilesToTransfer[i])
}
log.Debugf("longestFilename: %+v", c.longestFilename)
fname := fmt.Sprintf("%d files", len(c.FilesToTransfer))
if len(c.FilesToTransfer) == 1 {
fname = fmt.Sprintf("'%s'", c.FilesToTransfer[0].Name)
}
fmt.Fprintf(os.Stderr, "Sending %s (%s)\n", fname, utils.ByteCountDecimal(totalFilesSize))
return
}
func (c *Client) setupLocalRelay() {
// setup the relay locally
for _, port := range c.Options.RelayPorts {
go func(portStr string) {
debugString := "warn"
if c.Options.Debug {
debugString = "debug"
}
err := tcp.Run(debugString, portStr, c.Options.RelayPassword, strings.Join(c.Options.RelayPorts[1:], ","))
if err != nil {
panic(err)
}
}(port)
}
}
func (c *Client) broadcastOnLocalNetwork() {
// look for peers first
discoveries, err := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: -1,
Payload: []byte("croc" + c.Options.RelayPorts[0]),
Delay: 10 * time.Millisecond,
TimeLimit: 30 * time.Second,
})
log.Debugf("discoveries: %+v", discoveries)
if err != nil {
log.Debug(err.Error())
}
}
func (c *Client) transferOverLocalRelay(options TransferOptions, errchan chan<- error) {
time.Sleep(500 * time.Millisecond)
log.Debug("establishing connection")
var banner string
conn, banner, ipaddr, err := tcp.ConnectToTCPServer("localhost:"+c.Options.RelayPorts[0], c.Options.RelayPassword, c.Options.SharedSecret[:3])
log.Debugf("banner: %s", banner)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("could not connect to localhost:%s", c.Options.RelayPorts[0]))
log.Debug(err)
// not really an error because it will try to connect over the actual relay
return
}
log.Debugf("local connection established: %+v", conn)
for {
data, _ := conn.Receive()
if bytes.Equal(data, []byte("handshake")) {
break
} else if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
} else {
log.Debugf("instead of handshake got: %s", data)
}
}
c.conn[0] = conn
log.Debug("exchanged header message")
c.Options.RelayAddress = "localhost"
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
c.ExternalIP = ipaddr
errchan <- c.transfer(options)
}
// Send will send the specified file
func (c *Client) Send(options TransferOptions) (err error) {
err = c.sendCollectFiles(options)
if err != nil {
return
}
otherRelay := ""
if c.Options.RelayAddress != models.DEFAULT_RELAY {
otherRelay = "--relay " + c.Options.RelayAddress + " "
}
fmt.Fprintf(os.Stderr, "Code is: %s\nOn the other computer run\n\ncroc %s%s\n", c.Options.SharedSecret, otherRelay, c.Options.SharedSecret)
if c.Options.Ask {
machid, _ := machineid.ID()
fmt.Fprintf(os.Stderr, "\rYour machine ID is '%s'\n", machid)
}
// // c.spinner.Suffix = " waiting for recipient..."
// c.spinner.Start()
// create channel for quitting
// connect to the relay for messaging
errchan := make(chan error, 1)
if !c.Options.DisableLocal {
// add two things to the error channel
errchan = make(chan error, 2)
c.setupLocalRelay()
go c.broadcastOnLocalNetwork()
go c.transferOverLocalRelay(options, errchan)
}
go func() {
if !strings.Contains(c.Options.RelayAddress, ":") {
// try the default port, 9009
c.Options.RelayAddress += ":9009"
}
log.Debugf("establishing connection to %s", c.Options.RelayAddress)
var banner string
conn, banner, ipaddr, err := tcp.ConnectToTCPServer(c.Options.RelayAddress, c.Options.RelayPassword, c.Options.SharedSecret[:3], 5*time.Second)
log.Debugf("banner: %s", banner)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("could not connect to %s", c.Options.RelayAddress))
log.Debug(err)
errchan <- err
return
}
log.Debugf("connection established: %+v", conn)
for {
log.Debug("waiting for bytes")
data, errConn := conn.Receive()
if errConn != nil {
log.Debugf("[%+v] had error: %s", conn, errConn.Error())
}
if bytes.Equal(data, []byte("ips?")) {
// recipient wants to try to connect to local ips
var ips []string
// only get local ips if the local is enabled
if !c.Options.DisableLocal {
// get list of local ips
ips, err = utils.GetLocalIPs()
if err != nil {
log.Debugf("error getting local ips: %s", err.Error())
}
// prepend the port that is being listened to
ips = append([]string{c.Options.RelayPorts[0]}, ips...)
}
bips, _ := json.Marshal(ips)
conn.Send(bips)
} else if bytes.Equal(data, []byte("handshake")) {
break
} else if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
continue
} else {
log.Debugf("[%+v] got weird bytes: %+v", conn, data)
// throttle the reading
errchan <- fmt.Errorf("gracefully refusing using the public relay")
return
}
}
c.conn[0] = conn
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
c.ExternalIP = ipaddr
log.Debug("exchanged header message")
errchan <- c.transfer(options)
}()
err = <-errchan
if err == nil {
// return if no error
return
} else {
log.Debugf("error from errchan: %s", err.Error())
}
if !c.Options.DisableLocal {
if strings.Contains(err.Error(), "refusing files") || strings.Contains(err.Error(), "EOF") || strings.Contains(err.Error(), "bad password") {
errchan <- err
}
err = <-errchan
}
return err
}
// Receive will receive a file
func (c *Client) Receive() (err error) {
fmt.Fprintf(os.Stderr, "connecting...")
// recipient will look for peers first
// and continue if it doesn't find any within 100 ms
usingLocal := false
if !c.Options.DisableLocal {
log.Debug("attempt to discover peers")
discoveries, err := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: 1,
Payload: []byte("ok"),
Delay: 10 * time.Millisecond,
TimeLimit: 100 * time.Millisecond,
})
if err == nil && len(discoveries) > 0 {
for i := 0; i < len(discoveries); i++ {
log.Debugf("discovery %d has payload: %+v", i, discoveries[i])
if !bytes.HasPrefix(discoveries[i].Payload, []byte("croc")) {
log.Debug("skipping discovery")
continue
}
log.Debug("switching to local")
portToUse := string(bytes.TrimPrefix(discoveries[0].Payload, []byte("croc")))
if portToUse == "" {
portToUse = "9009"
}
c.Options.RelayAddress = fmt.Sprintf("%s:%s",
discoveries[0].Address,
portToUse,
)
c.ExternalIPConnected = c.Options.RelayAddress
usingLocal = true
break
}
}
log.Debugf("discoveries: %+v", discoveries)
log.Debug("establishing connection")
}
if !strings.Contains(c.Options.RelayAddress, ":") {
// try the default port, 9009
c.Options.RelayAddress += ":9009"
}
log.Debugf("establishing receiver connection to %s", c.Options.RelayAddress)
var banner string
c.conn[0], banner, c.ExternalIP, err = tcp.ConnectToTCPServer(c.Options.RelayAddress, c.Options.RelayPassword, c.Options.SharedSecret[:3])
log.Debugf("banner: %s", banner)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("could not connect to %s", c.Options.RelayAddress))
return
}
log.Debugf("receiver connection established: %+v", c.conn[0])
if !usingLocal && !c.Options.DisableLocal {
// ask the sender for their local ips and port
// and try to connect to them
log.Debug("sending ips?")
var data []byte
c.conn[0].Send([]byte("ips?"))
data, err = c.conn[0].Receive()
if err != nil {
return
}
log.Debugf("ips data: %s", data)
var ips []string
json.Unmarshal(data, &ips)
if len(ips) > 1 {
port := ips[0]
ips = ips[1:]
for _, ip := range ips {
ipv4Addr, ipv4Net, errNet := net.ParseCIDR(fmt.Sprintf("%s/24", ip))
log.Debugf("ipv4Add4: %+v, ipv4Net: %+v, err: %+v", ipv4Addr, ipv4Net, errNet)
localIps, _ := utils.GetLocalIPs()
haveLocalIP := false
for _, localIP := range localIps {
localIPparsed := net.ParseIP(localIP)
if ipv4Net.Contains(localIPparsed) {
haveLocalIP = true
break
}
}
if !haveLocalIP {
log.Debugf("%s is not a local IP, skipping", ip)
continue
}
serverTry := fmt.Sprintf("%s:%s", ip, port)
conn, banner2, externalIP, errConn := tcp.ConnectToTCPServer(serverTry, c.Options.RelayPassword, c.Options.SharedSecret[:3], 50*time.Millisecond)
if errConn != nil {
log.Debugf("could not connect to " + serverTry)
continue
}
log.Debugf("local connection established to %s", serverTry)
log.Debugf("banner: %s", banner2)
// reset to the local port
banner = banner2
c.Options.RelayAddress = serverTry
c.ExternalIP = externalIP
c.conn[0].Close()
c.conn[0] = nil
c.conn[0] = conn
break
}
}
}
c.conn[0].Send([]byte("handshake"))
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
log.Debug("exchanged header message")
fmt.Fprintf(os.Stderr, "\rsecuring channel...")
return c.transfer(TransferOptions{})
}
func (c *Client) transfer(options TransferOptions) (err error) {
// connect to the server
// quit with c.quit <- true
c.quit = make(chan bool)
// if recipient, initialize with sending pake information
log.Debug("ready")
if !c.Options.IsSender && !c.Step1ChannelSecured {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "pake",
Bytes: c.Pake.Bytes(),
})
if err != nil {
return
}
}
// listen for incoming messages and process them
for {
var data []byte
var done bool
data, err = c.conn[0].Receive()
if err != nil {
log.Debugf("got error receiving: %s", err.Error())
if !c.Step1ChannelSecured {
err = fmt.Errorf("could not secure channel")
}
break
}
done, err = c.processMessage(data)
if err != nil {
log.Debugf("got error processing: %s", err.Error())
break
}
if done {
break
}
}
// purge errors that come from successful transfer
if c.SuccessfulTransfer {
if err != nil {
log.Debugf("purging error: %s", err)
}
err = nil
}
if c.Options.Stdout && !c.Options.IsSender {
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
os.Remove(pathToFile)
}
return
}
func (c *Client) processMessageFileInfo(m message.Message) (done bool, err error) {
var senderInfo SenderInfo
err = json.Unmarshal(m.Bytes, &senderInfo)
if err != nil {
log.Error(err)
return
}
c.FilesToTransfer = senderInfo.FilesToTransfer
fname := fmt.Sprintf("%d files", len(c.FilesToTransfer))
if len(c.FilesToTransfer) == 1 {
fname = fmt.Sprintf("'%s'", c.FilesToTransfer[0].Name)
}
totalSize := int64(0)
for _, fi := range c.FilesToTransfer {
totalSize += fi.Size
if len(fi.Name) > c.longestFilename {
c.longestFilename = len(fi.Name)
}
}
// c.spinner.Stop()
if !c.Options.NoPrompt || c.Options.Ask || senderInfo.Ask {
if c.Options.Ask || senderInfo.Ask {
machID, _ := machineid.ID()
fmt.Fprintf(os.Stderr, "\rYour machine id is '%s'.\nAccept %s (%s) from '%s'? (y/n) ", machID, fname, utils.ByteCountDecimal(totalSize), senderInfo.MachineID)
} else {
fmt.Fprintf(os.Stderr, "\rAccept %s (%s)? (y/n) ", fname, utils.ByteCountDecimal(totalSize))
}
if strings.ToLower(strings.TrimSpace(utils.GetInput(""))) != "y" {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "error",
Message: "refusing files",
})
return true, fmt.Errorf("refused files")
}
} else {
fmt.Fprintf(os.Stderr, "\rReceiving %s (%s) \n", fname, utils.ByteCountDecimal(totalSize))
}
fmt.Fprintf(os.Stderr, "\nReceiving (<-%s)\n", c.ExternalIPConnected)
log.Debug(c.FilesToTransfer)
c.Step2FileInfoTransfered = true
return
}
func (c *Client) procesMesssagePake(m message.Message) (err error) {
log.Debug("received pake payload")
// if // c.spinner.Suffix != " performing PAKE..." {
// // c.spinner.Stop()
// // c.spinner.Suffix = " performing PAKE..."
// // c.spinner.Start()
// }
notVerified := !c.Pake.IsVerified()
err = c.Pake.Update(m.Bytes)
if err != nil {
return
}
if (notVerified && c.Pake.IsVerified() && !c.Options.IsSender) || !c.Pake.IsVerified() {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "pake",
Bytes: c.Pake.Bytes(),
})
}
if c.Pake.IsVerified() {
if c.Options.IsSender {
log.Debug("generating salt")
salt := make([]byte, 8)
rand.Read(salt)
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "salt",
Bytes: salt,
})
if err != nil {
return
}
}
// connects to the other ports of the server for transfer
var wg sync.WaitGroup
wg.Add(len(c.Options.RelayPorts))
for i := 0; i < len(c.Options.RelayPorts); i++ {
log.Debugf("port: [%s]", c.Options.RelayPorts[i])
go func(j int) {
defer wg.Done()
server := fmt.Sprintf("%s:%s", strings.Split(c.Options.RelayAddress, ":")[0], c.Options.RelayPorts[j])
log.Debugf("connecting to %s", server)
c.conn[j+1], _, _, err = tcp.ConnectToTCPServer(
server,
c.Options.RelayPassword,
fmt.Sprintf("%s-%d", utils.SHA256(c.Options.SharedSecret)[:7], j),
)
if err != nil {
panic(err)
}
log.Debugf("connected to %s", server)
if !c.Options.IsSender {
go c.receiveData(j)
}
}(i)
}
wg.Wait()
}
return
}
func (c *Client) processMessageSalt(m message.Message) (done bool, err error) {
log.Debug("received salt")
if !c.Options.IsSender {
log.Debug("sending salt back")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "salt",
Bytes: m.Bytes,
})
}
log.Debugf("session key is verified, generating encryption with salt: %x", m.Bytes)
key, err := c.Pake.SessionKey()
if err != nil {
return true, err
}
c.Key, _, err = crypt.New(key, m.Bytes)
if err != nil {
return true, err
}
log.Debugf("key = %+x", c.Key)
if c.Options.IsSender {
log.Debug("sending external IP")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "externalip",
Message: c.ExternalIP,
Bytes: m.Bytes,
})
}
return
}
func (c *Client) processExternalIP(m message.Message) (done bool, err error) {
log.Debugf("received external IP: %+v", m)
if !c.Options.IsSender {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "externalip",
Message: c.ExternalIP,
})
if err != nil {
return true, err
}
}
if c.ExternalIPConnected == "" {
// it can be preset by the local relay
c.ExternalIPConnected = m.Message
}
log.Debugf("connected as %s -> %s", c.ExternalIP, c.ExternalIPConnected)
c.Step1ChannelSecured = true
return
}
func (c *Client) processMessage(payload []byte) (done bool, err error) {
m, err := message.Decode(c.Key, payload)
if err != nil {
err = fmt.Errorf("problem with decoding: %s", err.Error())
return
}
switch m.Type {
case "finished":
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "finished",
})
done = true
c.SuccessfulTransfer = true
return
case "pake":
err = c.procesMesssagePake(m)
if err != nil {
err = errors.Wrap(err, "pake not successful")
}
case "salt":
done, err = c.processMessageSalt(m)
case "externalip":
done, err = c.processExternalIP(m)
case "error":
// c.spinner.Stop()
fmt.Print("\r")
err = fmt.Errorf("peer error: %s", m.Message)
return true, err
case "fileinfo":
done, err = c.processMessageFileInfo(m)
case "recipientready":
var remoteFile RemoteFileRequest
err = json.Unmarshal(m.Bytes, &remoteFile)
if err != nil {
return
}
c.FilesToTransferCurrentNum = remoteFile.FilesToTransferCurrentNum
c.CurrentFileChunkRanges = remoteFile.CurrentFileChunkRanges
c.CurrentFileChunks = utils.ChunkRangesToChunks(c.CurrentFileChunkRanges)
log.Debugf("current file chunks: %+v", c.CurrentFileChunks)
c.mutex.Lock()
c.chunkMap = make(map[uint64]struct{})
for _, chunk := range c.CurrentFileChunks {
c.chunkMap[uint64(chunk)] = struct{}{}
}
c.mutex.Unlock()
c.Step3RecipientRequestFile = true
if c.Options.Ask {
fmt.Fprintf(os.Stderr, "Send to machine '%s'? (y/n) ", remoteFile.MachineID)
if strings.ToLower(strings.TrimSpace(utils.GetInput(""))) != "y" {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "error",
Message: "refusing files",
})
done = true
err = fmt.Errorf("refused files")
return
}
}
case "close-sender":
c.bar.Finish()
log.Debug("close-sender received...")
c.Step4FileTransfer = false
c.Step3RecipientRequestFile = false
log.Debug("sending close-recipient")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "close-recipient",
})
case "close-recipient":
c.Step4FileTransfer = false
c.Step3RecipientRequestFile = false
}
if err != nil {
log.Debugf("got error from processing message: %s", err.Error())
return
}
err = c.updateState()
if err != nil {
log.Debugf("got error from updating state: %s", err.Error())
return
}
return
}
func (c *Client) updateIfSenderChannelSecured() (err error) {
if c.Options.IsSender && c.Step1ChannelSecured && !c.Step2FileInfoTransfered {
var b []byte
machID, _ := machineid.ID()
b, err = json.Marshal(SenderInfo{
FilesToTransfer: c.FilesToTransfer,
MachineID: machID,
Ask: c.Options.Ask,
})
if err != nil {
log.Error(err)
return
}
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "fileinfo",
Bytes: b,
})
if err != nil {
return
}
c.Step2FileInfoTransfered = true
}
return
}
func (c *Client) recipientInitializeFile() (err error) {
// start initiating the process to receive a new file
log.Debugf("working on file %d", c.FilesToTransferCurrentNum)
// recipient sets the file
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
folderForFile, _ := filepath.Split(pathToFile)
os.MkdirAll(folderForFile, os.ModePerm)
var errOpen error
c.CurrentFile, errOpen = os.OpenFile(
pathToFile,
os.O_WRONLY, 0666)
var truncate bool // default false
c.CurrentFileChunks = []int64{}
c.CurrentFileChunkRanges = []int64{}
if errOpen == nil {
stat, _ := c.CurrentFile.Stat()
truncate = stat.Size() != c.FilesToTransfer[c.FilesToTransferCurrentNum].Size
if truncate == false {
// recipient requests the file and chunks (if empty, then should receive all chunks)
// TODO: determine the missing chunks
c.CurrentFileChunkRanges = utils.MissingChunks(
pathToFile,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Size,
models.TCP_BUFFER_SIZE/2,
)
}
} else {
c.CurrentFile, errOpen = os.Create(pathToFile)
if errOpen != nil {
errOpen = errors.Wrap(errOpen, "could not create "+pathToFile)
log.Error(errOpen)
return errOpen
}
truncate = true
}
if truncate {
err := c.CurrentFile.Truncate(c.FilesToTransfer[c.FilesToTransferCurrentNum].Size)
if err != nil {
err = errors.Wrap(err, "could not truncate "+pathToFile)
log.Error(err)
return err
}
}
return
}
func (c *Client) recipientGetFileReady(finished bool) (err error) {
if finished {
// TODO: do the last finishing stuff
log.Debug("finished")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "finished",
})
if err != nil {
panic(err)
}
c.SuccessfulTransfer = true
c.FilesHasFinished[c.FilesToTransferCurrentNum] = struct{}{}
}
err = c.recipientInitializeFile()
if err != nil {
return
}
c.TotalSent = 0
machID, _ := machineid.ID()
bRequest, _ := json.Marshal(RemoteFileRequest{
CurrentFileChunkRanges: c.CurrentFileChunkRanges,
FilesToTransferCurrentNum: c.FilesToTransferCurrentNum,
MachineID: machID,
})
log.Debug("converting to chunk range")
c.CurrentFileChunks = utils.ChunkRangesToChunks(c.CurrentFileChunkRanges)
if !finished {
// setup the progressbar
c.setBar()
}
log.Debugf("sending recipient ready with %d chunks", len(c.CurrentFileChunks))
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "recipientready",
Bytes: bRequest,
})
if err != nil {
return
}
c.Step3RecipientRequestFile = true
return
}
func (c *Client) createEmptyFileAndFinish(fileInfo FileInfo, i int) (err error) {
log.Debugf("touching file with folder / name")
if !utils.Exists(fileInfo.FolderRemote) {
err = os.MkdirAll(fileInfo.FolderRemote, os.ModePerm)
if err != nil {
log.Error(err)
return
}
}
emptyFile, errCreate := os.Create(path.Join(fileInfo.FolderRemote, fileInfo.Name))
if errCreate != nil {
log.Error(errCreate)
err = errCreate
return
}
emptyFile.Close()
// setup the progressbar
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[i].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[i].Name
}
c.bar = progressbar.NewOptions64(1,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
)
c.bar.Finish()
return
}
func (c *Client) updateIfRecipientHasFileInfo() (err error) {
if !(!c.Options.IsSender && c.Step2FileInfoTransfered && !c.Step3RecipientRequestFile) {
return
}
// find the next file to transfer and send that number
// if the files are the same size, then look for missing chunks
finished := true
for i, fileInfo := range c.FilesToTransfer {
if _, ok := c.FilesHasFinished[i]; ok {
continue
}
log.Debugf("checking %+v", fileInfo)
if i < c.FilesToTransferCurrentNum {
continue
}
fileHash, errHash := utils.HashFile(path.Join(fileInfo.FolderRemote, fileInfo.Name))
if fileInfo.Size == 0 {
err = c.createEmptyFileAndFinish(fileInfo, i)
if err != nil {
return
}
continue
}
log.Debugf("%s %+x %+x %+v", fileInfo.Name, fileHash, fileInfo.Hash, errHash)
if !bytes.Equal(fileHash, fileInfo.Hash) {
log.Debugf("hashes are not equal %x != %x", fileHash, fileInfo.Hash)
} else {
log.Debugf("hashes are equal %x == %x", fileHash, fileInfo.Hash)
}
if errHash != nil {
// probably can't find, its okay
log.Debug(errHash)
}
if errHash != nil || !bytes.Equal(fileHash, fileInfo.Hash) {
finished = false
c.FilesToTransferCurrentNum = i
break
}
// TODO: print out something about this file already existing
}
err = c.recipientGetFileReady(finished)
return
}
func (c *Client) updateState() (err error) {
err = c.updateIfSenderChannelSecured()
if err != nil {
return
}
err = c.updateIfRecipientHasFileInfo()
if err != nil {
return
}
if c.Options.IsSender && c.Step3RecipientRequestFile && !c.Step4FileTransfer {
log.Debug("start sending data!")
if !c.firstSend {
fmt.Fprintf(os.Stderr, "\nSending (->%s)\n", c.ExternalIPConnected)
c.firstSend = true
// if there are empty files, show them as already have been transferred now
for i := range c.FilesToTransfer {
if c.FilesToTransfer[i].Size == 0 {
// setup the progressbar and takedown the progress bar for empty files
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[i].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[i].Name
}
c.bar = progressbar.NewOptions64(1,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
)
c.bar.Finish()
}
}
}
c.Step4FileTransfer = true
// setup the progressbar
c.setBar()
c.TotalSent = 0
log.Debug("beginning sending comms")
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderSource,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
c.fread, err = os.Open(pathToFile)
c.numfinished = 0
if err != nil {
return
}
for i := 0; i < len(c.Options.RelayPorts); i++ {
log.Debugf("starting sending over comm %d", i)
go c.sendData(i)
}
}
return
}
func (c *Client) setBar() {
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[c.FilesToTransferCurrentNum].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[c.FilesToTransferCurrentNum].Name
}
c.bar = progressbar.NewOptions64(
c.FilesToTransfer[c.FilesToTransferCurrentNum].Size,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
progressbar.OptionThrottle(100*time.Millisecond),
)
byteToDo := int64(len(c.CurrentFileChunks) * models.TCP_BUFFER_SIZE / 2)
if byteToDo > 0 {
bytesDone := c.FilesToTransfer[c.FilesToTransferCurrentNum].Size - byteToDo
log.Debug(byteToDo)
log.Debug(c.FilesToTransfer[c.FilesToTransferCurrentNum].Size)
log.Debug(bytesDone)
if bytesDone > 0 {
c.bar.Add64(bytesDone)
}
}
}
func (c *Client) receiveData(i int) {
log.Debugf("%d receiving data", i)
for {
data, err := c.conn[i+1].Receive()
if err != nil {
break
}
if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
continue
}
data, err = crypt.Decrypt(data, c.Key)
if err != nil {
panic(err)
}
data = compress.Decompress(data)
// get position
var position uint64
rbuf := bytes.NewReader(data[:8])
err = binary.Read(rbuf, binary.LittleEndian, &position)
if err != nil {
panic(err)
}
positionInt64 := int64(position)
c.mutex.Lock()
_, err = c.CurrentFile.WriteAt(data[8:], positionInt64)
c.mutex.Unlock()
if err != nil {
panic(err)
}
c.bar.Add(len(data[8:]))
c.TotalSent += int64(len(data[8:]))
c.TotalChunksTransfered++
if c.TotalChunksTransfered == len(c.CurrentFileChunks) || c.TotalSent == c.FilesToTransfer[c.FilesToTransferCurrentNum].Size {
log.Debug("finished receiving!")
c.CurrentFile.Close()
if c.Options.Stdout || strings.HasPrefix(c.FilesToTransfer[c.FilesToTransferCurrentNum].Name, "croc-stdin") {
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
b, _ := ioutil.ReadFile(pathToFile)
fmt.Print(string(b))
}
log.Debug("sending close-sender")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "close-sender",
})
if err != nil {
panic(err)
}
}
}
return
}
func (c *Client) sendData(i int) {
defer func() {
log.Debugf("finished with %d", i)
c.numfinished++
if c.numfinished == len(c.Options.RelayPorts) {
log.Debug("closing file")
c.fread.Close()
}
}()
var readingPos int64
pos := uint64(0)
curi := float64(0)
for {
// Read file
data := make([]byte, models.TCP_BUFFER_SIZE/2)
// log.Debugf("%d trying to read", i)
n, errRead := c.fread.ReadAt(data, readingPos)
// log.Debugf("%d read %d bytes", i, n)
readingPos += int64(n)
if math.Mod(curi, float64(len(c.Options.RelayPorts))) == float64(i) {
// check to see if this is a chunk that the recipient wants
usableChunk := true
c.mutex.Lock()
if len(c.chunkMap) != 0 {
if _, ok := c.chunkMap[pos]; !ok {
usableChunk = false
} else {
delete(c.chunkMap, pos)
}
}
c.mutex.Unlock()
if usableChunk {
// log.Debugf("sending chunk %d", pos)
posByte := make([]byte, 8)
binary.LittleEndian.PutUint64(posByte, pos)
dataToSend, err := crypt.Encrypt(
compress.Compress(
append(posByte, data[:n]...),
),
c.Key,
)
if err != nil {
panic(err)
}
err = c.conn[i+1].Send(dataToSend)
if err != nil {
panic(err)
}
c.bar.Add(n)
c.TotalSent += int64(n)
// time.Sleep(100 * time.Millisecond)
} else {
// log.Debugf("skipping chunk %d", pos)
}
}
curi++
pos += uint64(n)
if errRead != nil {
if errRead == io.EOF {
break
}
panic(errRead)
}
}
return
}
|
package parser
import (
"github.com/Tamrin007/monkey/ast"
"github.com/Tamrin007/monkey/lexer"
"github.com/Tamrin007/monkey/token"
)
type Parser struct {
l *lexer.Lexer
curToken token.Token
peekToken token.Token
}
func New(l *lexer.Lexer) *Parser {
p := &Parser{l: l}
p.nextToken()
p.nextToken()
return p
}
func (p *Parser) nextToken() {
p.curToken = p.peekToken
p.peekToken = p.l.NextToken()
}
func (p *Parser) ParseProgram() *ast.Program {
return nil
}
implement parser for let statement
package parser
import (
"fmt"
"github.com/Tamrin007/monkey/ast"
"github.com/Tamrin007/monkey/lexer"
"github.com/Tamrin007/monkey/token"
)
type Parser struct {
l *lexer.Lexer
errors []string
curToken token.Token
peekToken token.Token
}
func New(l *lexer.Lexer) *Parser {
p := &Parser{
l: l,
errors: []string{},
}
p.nextToken()
p.nextToken()
return p
}
func (p *Parser) Errors() []string {
return p.errors
}
func (p *Parser) peekError(t token.TokenType) {
msg := fmt.Sprintf("expected next token to be %s, got %s instead", t, p.peekToken.Type)
p.errors = append(p.errors, msg)
}
func (p *Parser) nextToken() {
p.curToken = p.peekToken
p.peekToken = p.l.NextToken()
}
func (p *Parser) ParseProgram() *ast.Program {
program := &ast.Program{}
program.Statements = []ast.Statement{}
for p.curToken.Type != token.EOF {
stmt := p.parseStatement()
if stmt != nil {
program.Statements = append(program.Statements, stmt)
}
p.nextToken()
}
return program
}
func (p *Parser) parseStatement() ast.Statement {
switch p.curToken.Type {
case token.LET:
return p.parseLetStatement()
default:
return nil
}
}
func (p *Parser) parseLetStatement() *ast.LetStatement {
stmt := &ast.LetStatement{Token: p.curToken}
if !p.expectPeek(token.IDENT) {
return nil
}
stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal}
if !p.expectPeek(token.ASSIGN) {
return nil
}
// TODO: We're skiping the expressions until we encounter a semicolon
for !p.curTokenIs(token.SEMICOLON) {
p.nextToken()
}
return stmt
}
func (p *Parser) curTokenIs(t token.TokenType) bool {
return p.curToken.Type == t
}
func (p *Parser) peekTokenIs(t token.TokenType) bool {
return p.peekToken.Type == t
}
func (p *Parser) expectPeek(t token.TokenType) bool {
if p.peekTokenIs(t) {
p.nextToken()
return true
}
p.peekError(t)
return false
}
|
package main
import (
"os"
"log"
"strconv"
"database/sql"
"encoding/json"
_ "github.com/mattn/go-sqlite3"
"github.com/mediocregopher/radix.v2/redis"
)
func boolToInt(val bool) uint {
if val {
return 1
}
return 0
}
type Town struct {
Id uint32 `json:"id"`
Name string `json:"name"`
NameTr string `json:"name_tr"`
RegionId uint32 `json:"region_id"`
RegionalCenter bool `json:"regional_center"`
Latitude float32 `json:"latitude"`
Longitude float32 `json:"longitude"`
Zoom uint32 `json:"zoom"`
}
type Region struct {
Id uint32 `json:"id"`
Name string `json:"name"`
NameTr string `json:"name_tr"`
Latitude float32 `json:"latitude"`
Longitude float32 `json:"longitude"`
Zoom uint32 `json:"zoom"`
}
type CashPoint struct {
Id uint32 `json:"id"`
Type string
BankId uint32
TownId uint32
Longitude float32
Latitude float32
Address string
AddressComment string
MetroName string
FreeAccess bool
MainOffice bool
WithoutWeekend bool
RoundTheClock bool
WorksAsShop bool
Schedule string
Tel string
Additional string
Rub bool
Usd bool
Eur bool
CashIn bool
}
func migrateTowns(townsDb *sql.DB, redisCli *redis.Client) {
var townsCount int
err := townsDb.QueryRow(`SELECT COUNT(*) FROM towns`).Scan(&townsCount)
if err != nil {
log.Fatalf("migrate: towns: %v\n", err)
}
rows, err := townsDb.Query(`SELECT id, name, name_tr, region_id,
regional_center, latitude,
longitude, zoom FROM towns`)
if err != nil {
log.Fatalf("migrate: towns: %v\n", err)
}
currentTownIdx := 1
for rows.Next() {
town := new(Town)
err = rows.Scan(&town.Id, &town.Name, &town.NameTr,
&town.RegionId, &town.RegionalCenter,
&town.Latitude, &town.Longitude, &town.Zoom)
if err != nil {
log.Fatal(err)
}
jsonData, err := json.Marshal(town)
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SET", "town:" + strconv.FormatUint(uint64(town.Id), 10), string(jsonData)).Err
if err != nil {
log.Fatal(err)
}
currentTownIdx++
if currentTownIdx % 500 == 0 {
log.Printf("[%d/%d] Towns processed\n", currentTownIdx, townsCount)
}
}
log.Printf("[%d/%d] Towns processed\n", townsCount, townsCount)
}
func migrateRegions(townsDb *sql.DB, redisCli *redis.Client) {
var regionsCount int
err := townsDb.QueryRow(`SELECT COUNT(*) FROM regions`).Scan(®ionsCount)
if err != nil {
log.Fatalf("migrate: regions: %v\n", err)
}
rows, err := townsDb.Query(`SELECT id, name, name_tr,
latitude, longitude, zoom FROM regions`)
if err != nil {
log.Fatalf("migrate: regions: %v\n", err)
}
currentRegionIdx := 1
for rows.Next() {
region := new(Region)
err = rows.Scan(®ion.Id, ®ion.Name, ®ion.NameTr,
®ion.Latitude, ®ion.Longitude, ®ion.Zoom)
if err != nil {
log.Fatal(err)
}
jsonData, err := json.Marshal(region)
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SET", "region:" + strconv.FormatUint(uint64(region.Id), 10), string(jsonData)).Err
if err != nil {
log.Fatal(err)
}
currentRegionIdx++
if currentRegionIdx % 500 == 0 {
log.Printf("[%d/%d] Regions processed\n", currentRegionIdx, regionsCount)
}
}
log.Printf("[%d/%d] Regions processed\n", regionsCount, regionsCount)
}
func migrateCashpoints(cpDb *sql.DB, redisCli *redis.Client) {
var cashpointsCount int
err := cpDb.QueryRow(`SELECT COUNT(*) FROM cashpoints`).Scan(&cashpointsCount)
if err != nil {
log.Fatalf("migrate: cashpoints: %v\n", err)
}
rows, err := cpDb.Query(`SELECT id, type, bank_id, town_id,
longitude, latitude,
address, address_comment,
metro_name, free_access,
main_office, without_weekend,
round_the_clock, works_as_shop,
schedule_general, tel, additional,
rub, usd, eur, cash_in FROM cashpoints`)
if err != nil {
log.Fatalf("migrate: cashpoints: %v\n", err)
}
currentCashpointIndex := 1
for rows.Next() {
cp := new(CashPoint)
err = rows.Scan(&cp.Id, &cp.Type, &cp.BankId, &cp.TownId,
&cp.Longitude, &cp.Latitude,
&cp.Address, &cp.AddressComment,
&cp.MetroName, &cp.FreeAccess,
&cp.MainOffice, &cp.WithoutWeekend,
&cp.RoundTheClock, &cp.WorksAsShop,
&cp.Schedule, &cp.Tel, &cp.Additional,
&cp.Rub, &cp.Usd, &cp.Eur, &cp.CashIn)
if err != nil {
log.Fatal(err)
}
cashpointIdStr := strconv.FormatUint(uint64(cp.Id), 10)
townIdStr := strconv.FormatUint(uint64(cp.TownId), 10)
bankIdStr := strconv.FormatUint(uint64(cp.BankId), 10)
jsonData, err := json.Marshal(cp)
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SET", "cp:" + cashpointIdStr, string(jsonData)).Err
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("GEOADD", "cashpoints", cp.Longitude, cp.Latitude, cp.Id).Err
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SADD", "town:" + townIdStr + ":cp", cp.Id).Err
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SADD", "bank:" + bankIdStr + ":cp", cp.Id).Err
if err != nil {
log.Fatal(err)
}
currentCashpointIndex++
if currentCashpointIndex % 500 == 0 {
log.Printf("[%d/%d] Cashpoints processed\n", currentCashpointIndex, cashpointsCount)
}
}
log.Printf("[%d/%d] Cashpoints processed\n", cashpointsCount, cashpointsCount)
}
func migrate(townsDb *sql.DB, cpDb *sql.DB, redisCli *redis.Client) {
migrateTowns(townsDb, redisCli)
migrateRegions(townsDb, redisCli)
migrateCashpoints(cpDb, redisCli)
}
func main() {
args := os.Args[1:]
if len(args) == 0 {
log.Fatal("Towns db file path is not specified")
}
if len(args) == 1 {
log.Fatal("Cashpoints db file path is not specified")
}
if len(args) == 2 {
log.Fatal("Redis database url is not specified")
}
townsDbPath := args[0]
cashpointsDbPath := args[1]
redisUrl := args[2]
townsDb, err := sql.Open("sqlite3", townsDbPath)
if err != nil {
log.Fatal(err)
}
defer townsDb.Close()
cashpointsDb, err := sql.Open("sqlite3", cashpointsDbPath)
if err != nil {
log.Fatal(err)
}
defer cashpointsDb.Close()
redisCli, err := redis.Dial("tcp", redisUrl)
if err != nil {
log.Fatal(err)
}
defer redisCli.Close()
migrate(townsDb, cashpointsDb, redisCli)
}
[C|Server] Migration tool fixed json [Un]marshaling
Signed-off-by: Alexey Knyshev <30ef9584b0d2dcc12fa498407e3b3ba273c50e01@gmail.com>
package main
import (
"os"
"log"
"strconv"
"database/sql"
"encoding/json"
_ "github.com/mattn/go-sqlite3"
"github.com/mediocregopher/radix.v2/redis"
)
func boolToInt(val bool) uint {
if val {
return 1
}
return 0
}
type Town struct {
Id uint32 `json:"id"`
Name string `json:"name"`
NameTr string `json:"name_tr"`
RegionId uint32 `json:"region_id"`
RegionalCenter bool `json:"regional_center"`
Latitude float32 `json:"latitude"`
Longitude float32 `json:"longitude"`
Zoom uint32 `json:"zoom"`
}
type Region struct {
Id uint32 `json:"id"`
Name string `json:"name"`
NameTr string `json:"name_tr"`
Latitude float32 `json:"latitude"`
Longitude float32 `json:"longitude"`
Zoom uint32 `json:"zoom"`
}
type CashPoint struct {
Id uint32 `json:"id"`
Type string `json:"type"`
BankId uint32 `json:"bank_id"`
TownId uint32 `json:"town_id"`
Longitude float32 `json:"longitude"`
Latitude float32 `json:"latitude"`
Address string `json:"address"`
AddressComment string `json:"address_comment"`
MetroName string `json:"metro_name"`
FreeAccess bool `json:"free_access"`
MainOffice bool `json:"main_office"`
WithoutWeekend bool `json:"without_weekend"`
RoundTheClock bool `json:"round_the_clock"`
WorksAsShop bool `json:"works_as_shop"`
Schedule string `json:"schedule"`
Tel string `json:"tel"`
Additional string `json:"additional"`
Rub bool `json:"rub"`
Usd bool `json:"usd"`
Eur bool `json:"eur"`
CashIn bool `json:"cash_in"`
}
func migrateTowns(townsDb *sql.DB, redisCli *redis.Client) {
var townsCount int
err := townsDb.QueryRow(`SELECT COUNT(*) FROM towns`).Scan(&townsCount)
if err != nil {
log.Fatalf("migrate: towns: %v\n", err)
}
rows, err := townsDb.Query(`SELECT id, name, name_tr, region_id,
regional_center, latitude,
longitude, zoom FROM towns`)
if err != nil {
log.Fatalf("migrate: towns: %v\n", err)
}
currentTownIdx := 1
for rows.Next() {
town := new(Town)
err = rows.Scan(&town.Id, &town.Name, &town.NameTr,
&town.RegionId, &town.RegionalCenter,
&town.Latitude, &town.Longitude, &town.Zoom)
if err != nil {
log.Fatal(err)
}
jsonData, err := json.Marshal(town)
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SET", "town:" + strconv.FormatUint(uint64(town.Id), 10), string(jsonData)).Err
if err != nil {
log.Fatal(err)
}
currentTownIdx++
if currentTownIdx % 500 == 0 {
log.Printf("[%d/%d] Towns processed\n", currentTownIdx, townsCount)
}
}
log.Printf("[%d/%d] Towns processed\n", townsCount, townsCount)
}
func migrateRegions(townsDb *sql.DB, redisCli *redis.Client) {
var regionsCount int
err := townsDb.QueryRow(`SELECT COUNT(*) FROM regions`).Scan(®ionsCount)
if err != nil {
log.Fatalf("migrate: regions: %v\n", err)
}
rows, err := townsDb.Query(`SELECT id, name, name_tr,
latitude, longitude, zoom FROM regions`)
if err != nil {
log.Fatalf("migrate: regions: %v\n", err)
}
currentRegionIdx := 1
for rows.Next() {
region := new(Region)
err = rows.Scan(®ion.Id, ®ion.Name, ®ion.NameTr,
®ion.Latitude, ®ion.Longitude, ®ion.Zoom)
if err != nil {
log.Fatal(err)
}
jsonData, err := json.Marshal(region)
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SET", "region:" + strconv.FormatUint(uint64(region.Id), 10), string(jsonData)).Err
if err != nil {
log.Fatal(err)
}
currentRegionIdx++
if currentRegionIdx % 500 == 0 {
log.Printf("[%d/%d] Regions processed\n", currentRegionIdx, regionsCount)
}
}
log.Printf("[%d/%d] Regions processed\n", regionsCount, regionsCount)
}
func migrateCashpoints(cpDb *sql.DB, redisCli *redis.Client) {
var cashpointsCount int
err := cpDb.QueryRow(`SELECT COUNT(*) FROM cashpoints`).Scan(&cashpointsCount)
if err != nil {
log.Fatalf("migrate: cashpoints: %v\n", err)
}
rows, err := cpDb.Query(`SELECT id, type, bank_id, town_id,
longitude, latitude,
address, address_comment,
metro_name, free_access,
main_office, without_weekend,
round_the_clock, works_as_shop,
schedule_general, tel, additional,
rub, usd, eur, cash_in FROM cashpoints`)
if err != nil {
log.Fatalf("migrate: cashpoints: %v\n", err)
}
currentCashpointIndex := 1
for rows.Next() {
cp := new(CashPoint)
err = rows.Scan(&cp.Id, &cp.Type, &cp.BankId, &cp.TownId,
&cp.Longitude, &cp.Latitude,
&cp.Address, &cp.AddressComment,
&cp.MetroName, &cp.FreeAccess,
&cp.MainOffice, &cp.WithoutWeekend,
&cp.RoundTheClock, &cp.WorksAsShop,
&cp.Schedule, &cp.Tel, &cp.Additional,
&cp.Rub, &cp.Usd, &cp.Eur, &cp.CashIn)
if err != nil {
log.Fatal(err)
}
cashpointIdStr := strconv.FormatUint(uint64(cp.Id), 10)
townIdStr := strconv.FormatUint(uint64(cp.TownId), 10)
bankIdStr := strconv.FormatUint(uint64(cp.BankId), 10)
jsonData, err := json.Marshal(cp)
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SET", "cp:" + cashpointIdStr, string(jsonData)).Err
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("GEOADD", "cashpoints", cp.Longitude, cp.Latitude, cp.Id).Err
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SADD", "town:" + townIdStr + ":cp", cp.Id).Err
if err != nil {
log.Fatal(err)
}
err = redisCli.Cmd("SADD", "bank:" + bankIdStr + ":cp", cp.Id).Err
if err != nil {
log.Fatal(err)
}
currentCashpointIndex++
if currentCashpointIndex % 500 == 0 {
log.Printf("[%d/%d] Cashpoints processed\n", currentCashpointIndex, cashpointsCount)
}
}
log.Printf("[%d/%d] Cashpoints processed\n", cashpointsCount, cashpointsCount)
}
func migrate(townsDb *sql.DB, cpDb *sql.DB, redisCli *redis.Client) {
migrateTowns(townsDb, redisCli)
migrateRegions(townsDb, redisCli)
migrateCashpoints(cpDb, redisCli)
}
func main() {
args := os.Args[1:]
if len(args) == 0 {
log.Fatal("Towns db file path is not specified")
}
if len(args) == 1 {
log.Fatal("Cashpoints db file path is not specified")
}
if len(args) == 2 {
log.Fatal("Redis database url is not specified")
}
townsDbPath := args[0]
cashpointsDbPath := args[1]
redisUrl := args[2]
townsDb, err := sql.Open("sqlite3", townsDbPath)
if err != nil {
log.Fatal(err)
}
defer townsDb.Close()
cashpointsDb, err := sql.Open("sqlite3", cashpointsDbPath)
if err != nil {
log.Fatal(err)
}
defer cashpointsDb.Close()
redisCli, err := redis.Dial("tcp", redisUrl)
if err != nil {
log.Fatal(err)
}
defer redisCli.Close()
migrate(townsDb, cashpointsDb, redisCli)
}
|
/*
Copyright (c) 2010 Andrea Fazzi
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package prettytest
import (
"testing"
"os"
"io/ioutil"
)
var state, beforeState, afterState, beforeAllState, afterAllState int
type testSuite struct { Suite }
type beforeAfterSuite struct { Suite }
type bddFormatterSuite struct { Suite }
func (suite *testSuite) TestTrueFalse() {
suite.True(true)
suite.False(false)
}
func (suite *testSuite) TestEqualNotEqual() {
suite.Equal("foo", "foo")
suite.NotEqual("foo", "bar")
}
func (suite *testSuite) TestNil() {
suite.Nil(nil)
}
func (suite *testSuite) TestNotNil() {
suite.NotNil([]byte{1,2,3})
}
func (suite *testSuite) TestPath() {
ioutil.WriteFile("testfile", nil, 0600)
suite.Path("testfile")
// suite.Path("foo")
// suite.True(suite.Failed())
}
func (suite *testSuite) TestPending() {
suite.Pending()
}
func (suite *testSuite) After() {
os.Remove("testfile")
}
func (suite *beforeAfterSuite) Before() {
state += 2
beforeState++
}
func (suite *beforeAfterSuite) After() {
state--
afterState--
}
func (suite *beforeAfterSuite) BeforeAll() {
state = 0
beforeAllState++
}
func (suite *beforeAfterSuite) AfterAll() {
state = 0
afterAllState--
}
func (suite *beforeAfterSuite) TestSetup_1() {
suite.Equal(2, state)
}
func (suite *beforeAfterSuite) TestSetup_2() {
suite.Equal(3, state)
}
func TestPrettyTest(t *testing.T) {
Run(
t,
new(testSuite),
new(beforeAfterSuite),
)
if beforeAllState != 1 {
t.Errorf("beforeAllState should be 1 after all tests but was %d\n", beforeAllState)
}
if afterAllState != -1 {
t.Errorf("afterAllState should be -1 after all tests but was %d\n", afterAllState)
}
}
func (suite *bddFormatterSuite) Should_use_green_on_passing_examples() {
suite.True(true)
}
func (suite *bddFormatterSuite) Should_use_yellow_on_pending_examples() {
suite.Pending()
}
func TestBDDStyleSpecs(t *testing.T) {
RunWithFormatter(
t,
&BDDFormatter{Description: "BDD Formatter"},
new(bddFormatterSuite),
)
}
-remove tail spaces.
/*
Copyright (c) 2010 Andrea Fazzi
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package prettytest
import (
"testing"
"os"
"io/ioutil"
)
var state, beforeState, afterState, beforeAllState, afterAllState int
type testSuite struct { Suite }
type beforeAfterSuite struct { Suite }
type bddFormatterSuite struct { Suite }
func (suite *testSuite) TestTrueFalse() {
suite.True(true)
suite.False(false)
}
func (suite *testSuite) TestEqualNotEqual() {
suite.Equal("foo", "foo")
suite.NotEqual("foo", "bar")
}
func (suite *testSuite) TestNil() {
suite.Nil(nil)
}
func (suite *testSuite) TestNotNil() {
suite.NotNil([]byte{1,2,3})
}
func (suite *testSuite) TestPath() {
ioutil.WriteFile("testfile", nil, 0600)
suite.Path("testfile")
// suite.Path("foo")
// suite.True(suite.Failed())
}
func (suite *testSuite) TestPending() {
suite.Pending()
}
func (suite *testSuite) After() {
os.Remove("testfile")
}
func (suite *beforeAfterSuite) Before() {
state += 2
beforeState++
}
func (suite *beforeAfterSuite) After() {
state--
afterState--
}
func (suite *beforeAfterSuite) BeforeAll() {
state = 0
beforeAllState++
}
func (suite *beforeAfterSuite) AfterAll() {
state = 0
afterAllState--
}
func (suite *beforeAfterSuite) TestSetup_1() {
suite.Equal(2, state)
}
func (suite *beforeAfterSuite) TestSetup_2() {
suite.Equal(3, state)
}
func TestPrettyTest(t *testing.T) {
Run(
t,
new(testSuite),
new(beforeAfterSuite),
)
if beforeAllState != 1 {
t.Errorf("beforeAllState should be 1 after all tests but was %d\n", beforeAllState)
}
if afterAllState != -1 {
t.Errorf("afterAllState should be -1 after all tests but was %d\n", afterAllState)
}
}
func (suite *bddFormatterSuite) Should_use_green_on_passing_examples() {
suite.True(true)
}
func (suite *bddFormatterSuite) Should_use_yellow_on_pending_examples() {
suite.Pending()
}
func TestBDDStyleSpecs(t *testing.T) {
RunWithFormatter(
t,
&BDDFormatter{Description: "BDD Formatter"},
new(bddFormatterSuite),
)
}
|
package spruce
import (
"regexp"
yaml "gopkg.in/yaml.v2"
"github.com/JulzDiverse/aviator"
"github.com/JulzDiverse/aviator/filemanager"
"github.com/geofffranks/simpleyaml"
. "github.com/geofffranks/spruce"
"github.com/starkandwayne/goutils/ansi"
)
type SpruceClient struct {
store aviator.FileStore
}
var concourseRegex = `(\{\{|\+\+)([-\_\.\/\w\p{L}\/]+)(\}\}|\+\+)`
var re = regexp.MustCompile("(" + concourseRegex + ")")
var dere = regexp.MustCompile("['\"](" + concourseRegex + ")[\"']")
func New() *SpruceClient {
return &SpruceClient{
filemanager.Store(),
}
}
func NewWithFileFilemanager(filemanager aviator.FileStore) *SpruceClient {
return &SpruceClient{
filemanager,
}
}
func (sc *SpruceClient) MergeWithOpts(options aviator.MergeConf) ([]byte, error) {
root := make(map[interface{}]interface{})
err := sc.mergeAllDocs(root, options.Files, options.FallbackAppend)
if err != nil {
return nil, err
}
ev := &Evaluator{Tree: root, SkipEval: options.SkipEval}
err = ev.Run(options.Prune, options.CherryPicks)
resultYml, err := yaml.Marshal(ev.Tree)
if err != nil {
return nil, err
}
return resultYml, err
}
func (sc *SpruceClient) MergeWithOptsRaw(options aviator.MergeConf) (map[interface{}]interface{}, error) {
root := make(map[interface{}]interface{})
err := sc.mergeAllDocs(root, options.Files, options.FallbackAppend)
if err != nil {
return nil, err
}
ev := &Evaluator{Tree: root, SkipEval: options.SkipEval}
err = ev.Run(options.Prune, options.CherryPicks)
return ev.Tree, err
}
func (sc *SpruceClient) mergeAllDocs(root map[interface{}]interface{}, paths []string, fallbackAppend bool) error {
m := &Merger{AppendByDefault: fallbackAppend}
for _, path := range paths {
var err error
data, ok := sc.store.ReadFile(path)
if !ok {
return ansi.Errorf("@R{Error reading file from filesystem or internal datastore} @m{%s} \n", path)
}
data = quoteConcourse(data)
doc, err := parseYAML(data)
if err != nil {
return ansi.Errorf("@m{%s}: @R{%s}\n", path, err.Error())
}
m.Merge(root, doc)
}
return m.Error()
}
func parseYAML(data []byte) (map[interface{}]interface{}, error) {
y, err := simpleyaml.NewYaml(data)
if err != nil {
return nil, err
}
doc, err := y.Map()
if err != nil {
return nil, ansi.Errorf("@R{Root of YAML document is not a hash/map}: %s\n", err.Error())
}
return doc, nil
}
func quoteConcourse(input []byte) []byte {
return re.ReplaceAll(input, []byte("\"$1\""))
}
func dequoteConcourse(input []byte) string {
return dere.ReplaceAllString(string(input), "$1")
}
add error handling for spurce
package spruce
import (
"regexp"
yaml "gopkg.in/yaml.v2"
"github.com/JulzDiverse/aviator"
"github.com/JulzDiverse/aviator/filemanager"
"github.com/geofffranks/simpleyaml"
. "github.com/geofffranks/spruce"
"github.com/starkandwayne/goutils/ansi"
)
type SpruceClient struct {
store aviator.FileStore
}
var concourseRegex = `(\{\{|\+\+)([-\_\.\/\w\p{L}\/]+)(\}\}|\+\+)`
var re = regexp.MustCompile("(" + concourseRegex + ")")
var dere = regexp.MustCompile("['\"](" + concourseRegex + ")[\"']")
func New() *SpruceClient {
return &SpruceClient{
filemanager.Store(),
}
}
func NewWithFileFilemanager(filemanager aviator.FileStore) *SpruceClient {
return &SpruceClient{
filemanager,
}
}
func (sc *SpruceClient) MergeWithOpts(options aviator.MergeConf) ([]byte, error) {
root := make(map[interface{}]interface{})
err := sc.mergeAllDocs(root, options.Files, options.FallbackAppend)
if err != nil {
return nil, err
}
ev := &Evaluator{Tree: root, SkipEval: options.SkipEval}
err = ev.Run(options.Prune, options.CherryPicks)
if err != nil {
return nil, err
}
resultYml, err := yaml.Marshal(ev.Tree)
if err != nil {
return nil, err
}
return resultYml, nil
}
func (sc *SpruceClient) MergeWithOptsRaw(options aviator.MergeConf) (map[interface{}]interface{}, error) {
root := make(map[interface{}]interface{})
err := sc.mergeAllDocs(root, options.Files, options.FallbackAppend)
if err != nil {
return nil, err
}
ev := &Evaluator{Tree: root, SkipEval: options.SkipEval}
err = ev.Run(options.Prune, options.CherryPicks)
return ev.Tree, err
}
func (sc *SpruceClient) mergeAllDocs(root map[interface{}]interface{}, paths []string, fallbackAppend bool) error {
m := &Merger{AppendByDefault: fallbackAppend}
for _, path := range paths {
var err error
data, ok := sc.store.ReadFile(path)
if !ok {
return ansi.Errorf("@R{Error reading file from filesystem or internal datastore} @m{%s} \n", path)
}
data = quoteConcourse(data)
doc, err := parseYAML(data)
if err != nil {
return ansi.Errorf("@m{%s}: @R{%s}\n", path, err.Error())
}
err = m.Merge(root, doc)
if err != nil {
return err
}
}
return m.Error()
}
func parseYAML(data []byte) (map[interface{}]interface{}, error) {
y, err := simpleyaml.NewYaml(data)
if err != nil {
return nil, err
}
doc, err := y.Map()
if err != nil {
return nil, ansi.Errorf("@R{Root of YAML document is not a hash/map}: %s\n", err.Error())
}
return doc, nil
}
func quoteConcourse(input []byte) []byte {
return re.ReplaceAll(input, []byte("\"$1\""))
}
func dequoteConcourse(input []byte) string {
return dere.ReplaceAllString(string(input), "$1")
}
|
//
// Copyright © 2012 Guy M. Allard
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Show a number of queue writers and readers operating concurrently.
// Try to be realistic about workloads.
// Receiver checks messages for proper queue and message number.
/*
Send and receive many STOMP messages using multiple queues and goroutines
to service each send or receive instance. Each sender and receiver
operates under a unique network connection.
*/
package main
import (
"crypto/rand"
"fmt"
"github.com/gmallard/stompngo"
"github.com/gmallard/stompngo_examples/sngecomm"
"log"
"math/big"
"net"
"runtime"
"strings"
"sync"
"time"
)
var exampid = "srmgor_11c:"
var wgsend sync.WaitGroup
var wgrecv sync.WaitGroup
// We 'stagger' between each message send and message receive for a random
// amount of time.
// Vary these for experimental purposes. YMMV.
var max int64 = 1e9 // Max stagger time (nanoseconds)
var min int64 = max / 10 // Min stagger time (nanoseconds)
// Vary these for experimental purposes. YMMV.
var send_factor int64 = 1 // Send factor time
var recv_factor int64 = 1 // Receive factor time
// Wait flags
var send_wait = true
var recv_wait = true
// Number of messages
var nmsgs = 1
// Get a duration between min amd max
func timeBetween(min, max int64) int64 {
br, _ := rand.Int(rand.Reader, big.NewInt(max-min)) // Ignore errors here
return (br.Add(big.NewInt(min), br).Int64()) / 2
}
func sendMessages(conn *stompngo.Connection, qnum int) {
qns := fmt.Sprintf("%d", qnum) // queue number
qp := sngecomm.Dest() // queue name prefix
q := qp + "." + qns
fmt.Println(exampid, "send queue name:", q, qnum)
h := stompngo.Headers{"destination", q} // send Headers
if sngecomm.Persistent() {
h = h.Add("persistent", "true")
}
fmt.Println(exampid, "send starts", nmsgs, qnum)
// Send messages
for n := 1; n <= nmsgs; n++ {
si := fmt.Sprintf("%d", n)
// Generate a message to send ...............
m := exampid + "|" + "payload" + "|qnum:" + qns + "|msgnum:" + si
fmt.Println(exampid, "send message", m, qnum)
e := conn.Send(h, m)
if e != nil {
log.Fatalln(exampid, "send:", e, qnum)
}
if send_wait {
runtime.Gosched() // yield for this example
time.Sleep(time.Duration(send_factor * timeBetween(min, max))) // Time to build next message
}
}
}
func receiveMessages(conn *stompngo.Connection, qnum int) {
qns := fmt.Sprintf("%d", qnum) // queue number
qp := sngecomm.Dest() // queue name prefix
q := qp + "." + qns
fmt.Println(exampid, "recv queue name:", q, qnum)
// Subscribe
u := stompngo.Uuid() // A unique subscription ID
h := stompngo.Headers{"destination", q, "id", u}
r, e := conn.Subscribe(h)
if e != nil {
log.Fatalln(exampid, "recv subscribe", e, qnum)
}
// Receive messages
fmt.Println(exampid, "recv starts", nmsgs, qnum)
for n := 1; n <= nmsgs; n++ {
d := <-r
if d.Error != nil {
log.Fatalln(exampid, "recv read:", d.Error, qnum)
}
// Process the inbound message .................
m := d.Message.BodyString()
fmt.Println(exampid, "recv message", m, qnum)
// Sanity check the queue and message numbers
mns := fmt.Sprintf("%d", n) // message number
t := "|qnum:" + qns + "|msgnum:" + mns
if !strings.Contains(m, t) {
log.Fatalln(exampid, "recv bad message", m, t, qnum)
}
if recv_wait {
runtime.Gosched() // yield for this example
time.Sleep(time.Duration(recv_factor * timeBetween(min, max))) // Time to process this message
}
}
// Unsubscribe
e = conn.Unsubscribe(h)
if e != nil {
log.Fatalln(exampid, "recv unsubscribe", e, qnum)
}
//
}
func runReceiver(qnum int) {
fmt.Println(exampid, "recv start for queue number", qnum)
// Network Open
h, p := sngecomm.HostAndPort11() // a 1.1 connect
n, e := net.Dial("tcp", net.JoinHostPort(h, p))
if e != nil {
log.Fatalln(exampid, "recv nectonnr:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "recv network open complete", qnum)
fmt.Println(exampid, "recv network local", n.LocalAddr().String(), qnum)
fmt.Println(exampid, "recv network remote", n.RemoteAddr().String(), qnum)
// Stomp connect, 1.1
ch := stompngo.Headers{"host", h, "accept-version", "1.1"}
conn, e := stompngo.Connect(n, ch)
if e != nil {
log.Fatalln(exampid, "recv stompconnect:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "recv connection complete:", qnum)
// Receives
receiveMessages(conn, qnum)
fmt.Println(exampid, "recv receives complete:", qnum)
// Disconnect from Stomp server
eh := stompngo.Headers{"recv_discqueue", fmt.Sprintf("%d", qnum)}
e = conn.Disconnect(eh)
if e != nil {
log.Fatalln(exampid, "recv disconnects:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "recv disconnected:", qnum)
// Network close
e = n.Close()
if e != nil {
log.Fatalln(exampid, "recv netcloser", qnum, e) // Handle this ......
}
fmt.Println(exampid, "recv network close complete", qnum)
fmt.Println(exampid, "recv end for queue number", qnum)
wgrecv.Done()
}
func runSender(qnum int) {
fmt.Println(exampid, "send start for queue number", qnum)
// Network Open
h, p := sngecomm.HostAndPort11() // a 1.1 connect
n, e := net.Dial("tcp", net.JoinHostPort(h, p))
if e != nil {
log.Fatalln(exampid, "send nectonnr:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "send network open complete", qnum)
fmt.Println(exampid, "send network local", n.LocalAddr().String(), qnum)
fmt.Println(exampid, "send network remote", n.RemoteAddr().String(), qnum)
// Stomp connect, 1.1
ch := stompngo.Headers{"host", h, "accept-version", "1.1"}
conn, e := stompngo.Connect(n, ch)
if e != nil {
log.Fatalln(exampid, "send stompconnect:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "send connection complete:", qnum)
//
sendMessages(conn, qnum)
fmt.Println(exampid, "send sends complete:", qnum)
// Disconnect from Stomp server
eh := stompngo.Headers{"send_discqueue", fmt.Sprintf("%d", qnum)}
e = conn.Disconnect(eh)
if e != nil {
log.Fatalln(exampid, "send disconnects:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "send disconnected:", qnum)
// Network close
e = n.Close()
if e != nil {
log.Fatalln(exampid, "send netcloser", qnum, e) // Handle this ......
}
fmt.Println(exampid, "send network close complete", qnum)
fmt.Println(exampid, "send end for queue number", qnum)
wgsend.Done()
}
func main() {
fmt.Println(exampid, "starts")
if sngecomm.SetMAXPROCS() {
nc := runtime.NumCPU()
fmt.Println(exampid, "number of CPUs is:", nc)
c := runtime.GOMAXPROCS(nc)
fmt.Println(exampid, "previous number of GOMAXPROCS is:", c)
fmt.Println(exampid, "current number of GOMAXPROCS is:", runtime.GOMAXPROCS(-1))
}
//
send_wait = sngecomm.SendWait()
recv_wait = sngecomm.RecvWait()
//
numq := sngecomm.Nqs()
fmt.Println(exampid, "numq:", numq)
nmsgs = sngecomm.Nmsgs() // message count
fmt.Println(exampid, "nmsgs:", nmsgs)
//
fmt.Println(exampid, "starting receivers")
for q := 1; q <= numq; q++ {
wgrecv.Add(1)
go runReceiver(q)
}
fmt.Println(exampid, "started receivers")
//
fmt.Println(exampid, "starting senders")
for q := 1; q <= numq; q++ {
wgsend.Add(1)
go runSender(q)
}
fmt.Println(exampid, "started senders")
//
wgsend.Wait()
fmt.Println(exampid, "senders complete")
wgrecv.Wait()
fmt.Println(exampid, "receivers complete")
//
fmt.Println(exampid, "ends")
}
Emit local connection info on errors.
//
// Copyright © 2012 Guy M. Allard
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Show a number of queue writers and readers operating concurrently.
// Try to be realistic about workloads.
// Receiver checks messages for proper queue and message number.
/*
Send and receive many STOMP messages using multiple queues and goroutines
to service each send or receive instance. Each sender and receiver
operates under a unique network connection.
*/
package main
import (
"crypto/rand"
"fmt"
"github.com/gmallard/stompngo"
"github.com/gmallard/stompngo_examples/sngecomm"
"log"
"math/big"
"net"
"runtime"
"strings"
"sync"
"time"
)
var exampid = "srmgor_11c:"
var wgsend sync.WaitGroup
var wgrecv sync.WaitGroup
// We 'stagger' between each message send and message receive for a random
// amount of time.
// Vary these for experimental purposes. YMMV.
var max int64 = 1e9 // Max stagger time (nanoseconds)
var min int64 = max / 10 // Min stagger time (nanoseconds)
// Vary these for experimental purposes. YMMV.
var send_factor int64 = 1 // Send factor time
var recv_factor int64 = 1 // Receive factor time
// Wait flags
var send_wait = true
var recv_wait = true
// Number of messages
var nmsgs = 1
// Get a duration between min amd max
func timeBetween(min, max int64) int64 {
br, _ := rand.Int(rand.Reader, big.NewInt(max-min)) // Ignore errors here
return (br.Add(big.NewInt(min), br).Int64()) / 2
}
func sendMessages(conn *stompngo.Connection, qnum int, nc net.Conn) {
qns := fmt.Sprintf("%d", qnum) // queue number
qp := sngecomm.Dest() // queue name prefix
q := qp + "." + qns
fmt.Println(exampid, "send queue name:", q, qnum)
h := stompngo.Headers{"destination", q} // send Headers
if sngecomm.Persistent() {
h = h.Add("persistent", "true")
}
fmt.Println(exampid, "send starts", nmsgs, qnum)
// Send messages
for n := 1; n <= nmsgs; n++ {
si := fmt.Sprintf("%d", n)
// Generate a message to send ...............
m := exampid + "|" + "payload" + "|qnum:" + qns + "|msgnum:" + si
fmt.Println(exampid, "send message", m, qnum)
e := conn.Send(h, m)
if e != nil {
log.Fatalln(exampid, "send:", e, nc.LocalAddr().String(), qnum)
}
if send_wait {
runtime.Gosched() // yield for this example
time.Sleep(time.Duration(send_factor * timeBetween(min, max))) // Time to build next message
}
}
}
func receiveMessages(conn *stompngo.Connection, qnum int, nc net.Conn) {
qns := fmt.Sprintf("%d", qnum) // queue number
qp := sngecomm.Dest() // queue name prefix
q := qp + "." + qns
fmt.Println(exampid, "recv queue name:", q, qnum)
// Subscribe
u := stompngo.Uuid() // A unique subscription ID
h := stompngo.Headers{"destination", q, "id", u}
r, e := conn.Subscribe(h)
if e != nil {
log.Fatalln(exampid, "recv subscribe", e, nc.LocalAddr().String(), qnum)
}
// Receive messages
fmt.Println(exampid, "recv starts", nmsgs, qnum)
for n := 1; n <= nmsgs; n++ {
d := <-r
if d.Error != nil {
log.Fatalln(exampid, "recv read:", d.Error, nc.LocalAddr().String(), qnum)
}
// Process the inbound message .................
m := d.Message.BodyString()
fmt.Println(exampid, "recv message", m, qnum)
// Sanity check the queue and message numbers
mns := fmt.Sprintf("%d", n) // message number
t := "|qnum:" + qns + "|msgnum:" + mns
if !strings.Contains(m, t) {
log.Fatalln(exampid, "recv bad message", m, t, nc.LocalAddr().String(), qnum)
}
if recv_wait {
runtime.Gosched() // yield for this example
time.Sleep(time.Duration(recv_factor * timeBetween(min, max))) // Time to process this message
}
}
// Unsubscribe
e = conn.Unsubscribe(h)
if e != nil {
log.Fatalln(exampid, "recv unsubscribe", e, nc.LocalAddr().String(), qnum)
}
//
}
func runReceiver(qnum int) {
fmt.Println(exampid, "recv start for queue number", qnum)
// Network Open
h, p := sngecomm.HostAndPort11() // a 1.1 connect
n, e := net.Dial("tcp", net.JoinHostPort(h, p))
if e != nil {
log.Fatalln(exampid, "recv nectonnr:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "recv network open complete", qnum)
fmt.Println(exampid, "recv network local", n.LocalAddr().String(), qnum)
fmt.Println(exampid, "recv network remote", n.RemoteAddr().String(), qnum)
// Stomp connect, 1.1
ch := stompngo.Headers{"host", h, "accept-version", "1.1"}
conn, e := stompngo.Connect(n, ch)
if e != nil {
log.Fatalln(exampid, "recv stompconnect:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "recv connection complete:", qnum)
// Receives
receiveMessages(conn, qnum, n)
fmt.Println(exampid, "recv receives complete:", qnum)
// Disconnect from Stomp server
eh := stompngo.Headers{"recv_discqueue", fmt.Sprintf("%d", qnum)}
e = conn.Disconnect(eh)
if e != nil {
log.Fatalln(exampid, "recv disconnects:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "recv disconnected:", qnum)
// Network close
e = n.Close()
if e != nil {
log.Fatalln(exampid, "recv netcloser", qnum, e) // Handle this ......
}
fmt.Println(exampid, "recv network close complete", qnum)
fmt.Println(exampid, "recv end for queue number", qnum)
wgrecv.Done()
}
func runSender(qnum int) {
fmt.Println(exampid, "send start for queue number", qnum)
// Network Open
h, p := sngecomm.HostAndPort11() // a 1.1 connect
n, e := net.Dial("tcp", net.JoinHostPort(h, p))
if e != nil {
log.Fatalln(exampid, "send nectonnr:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "send network open complete", qnum)
fmt.Println(exampid, "send network local", n.LocalAddr().String(), qnum)
fmt.Println(exampid, "send network remote", n.RemoteAddr().String(), qnum)
// Stomp connect, 1.1
ch := stompngo.Headers{"host", h, "accept-version", "1.1"}
conn, e := stompngo.Connect(n, ch)
if e != nil {
log.Fatalln(exampid, "send stompconnect:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "send connection complete:", qnum)
//
sendMessages(conn, qnum, n)
fmt.Println(exampid, "send sends complete:", qnum)
// Disconnect from Stomp server
eh := stompngo.Headers{"send_discqueue", fmt.Sprintf("%d", qnum)}
e = conn.Disconnect(eh)
if e != nil {
log.Fatalln(exampid, "send disconnects:", qnum, e) // Handle this ......
}
fmt.Println(exampid, "send disconnected:", qnum)
// Network close
e = n.Close()
if e != nil {
log.Fatalln(exampid, "send netcloser", qnum, e) // Handle this ......
}
fmt.Println(exampid, "send network close complete", qnum)
fmt.Println(exampid, "send end for queue number", qnum)
wgsend.Done()
}
func main() {
fmt.Println(exampid, "starts")
if sngecomm.SetMAXPROCS() {
nc := runtime.NumCPU()
fmt.Println(exampid, "number of CPUs is:", nc)
c := runtime.GOMAXPROCS(nc)
fmt.Println(exampid, "previous number of GOMAXPROCS is:", c)
fmt.Println(exampid, "current number of GOMAXPROCS is:", runtime.GOMAXPROCS(-1))
}
//
send_wait = sngecomm.SendWait()
recv_wait = sngecomm.RecvWait()
//
numq := sngecomm.Nqs()
fmt.Println(exampid, "numq:", numq)
nmsgs = sngecomm.Nmsgs() // message count
fmt.Println(exampid, "nmsgs:", nmsgs)
//
fmt.Println(exampid, "starting receivers")
for q := 1; q <= numq; q++ {
wgrecv.Add(1)
go runReceiver(q)
}
fmt.Println(exampid, "started receivers")
//
fmt.Println(exampid, "starting senders")
for q := 1; q <= numq; q++ {
wgsend.Add(1)
go runSender(q)
}
fmt.Println(exampid, "started senders")
//
wgsend.Wait()
fmt.Println(exampid, "senders complete")
wgrecv.Wait()
fmt.Println(exampid, "receivers complete")
//
fmt.Println(exampid, "ends")
}
|
package croc
import (
"bytes"
"crypto/rand"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/denisbrodbeck/machineid"
log "github.com/schollz/logger"
"github.com/schollz/pake/v2"
"github.com/schollz/peerdiscovery"
"github.com/schollz/progressbar/v3"
"github.com/schollz/spinner"
"github.com/tscholl2/siec"
"github.com/schollz/croc/v8/src/comm"
"github.com/schollz/croc/v8/src/compress"
"github.com/schollz/croc/v8/src/crypt"
"github.com/schollz/croc/v8/src/message"
"github.com/schollz/croc/v8/src/models"
"github.com/schollz/croc/v8/src/tcp"
"github.com/schollz/croc/v8/src/utils"
)
func init() {
log.SetLevel("debug")
}
// Debug toggles debug mode
func Debug(debug bool) {
if debug {
log.SetLevel("debug")
} else {
log.SetLevel("warn")
}
}
// Options specifies user specific options
type Options struct {
IsSender bool
SharedSecret string
Debug bool
RelayAddress string
RelayPorts []string
RelayPassword string
Stdout bool
NoPrompt bool
NoMultiplexing bool
DisableLocal bool
Ask bool
}
// Client holds the state of the croc transfer
type Client struct {
Options Options
Pake *pake.Pake
Key []byte
ExternalIP, ExternalIPConnected string
// steps involved in forming relationship
Step1ChannelSecured bool
Step2FileInfoTransfered bool
Step3RecipientRequestFile bool
Step4FileTransfer bool
Step5CloseChannels bool
SuccessfulTransfer bool
// send / receive information of all files
FilesToTransfer []FileInfo
FilesToTransferCurrentNum int
FilesHasFinished map[int]struct{}
// send / receive information of current file
CurrentFile *os.File
CurrentFileChunkRanges []int64
CurrentFileChunks []int64
TotalSent int64
TotalChunksTransfered int
chunkMap map[uint64]struct{}
// tcp connections
conn []*comm.Comm
bar *progressbar.ProgressBar
spinner *spinner.Spinner
longestFilename int
firstSend bool
mutex *sync.Mutex
fread *os.File
numfinished int
quit chan bool
}
// Chunk contains information about the
// needed bytes
type Chunk struct {
Bytes []byte `json:"b,omitempty"`
Location int64 `json:"l,omitempty"`
}
// FileInfo registers the information about the file
type FileInfo struct {
Name string `json:"n,omitempty"`
FolderRemote string `json:"fr,omitempty"`
FolderSource string `json:"fs,omitempty"`
Hash []byte `json:"h,omitempty"`
Size int64 `json:"s,omitempty"`
ModTime time.Time `json:"m,omitempty"`
IsCompressed bool `json:"c,omitempty"`
IsEncrypted bool `json:"e,omitempty"`
}
// RemoteFileRequest requests specific bytes
type RemoteFileRequest struct {
CurrentFileChunkRanges []int64
FilesToTransferCurrentNum int
MachineID string
}
// SenderInfo lists the files to be transferred
type SenderInfo struct {
FilesToTransfer []FileInfo
MachineID string
Ask bool
}
// New establishes a new connection for transferring files between two instances.
func New(ops Options) (c *Client, err error) {
c = new(Client)
c.FilesHasFinished = make(map[int]struct{})
// setup basic info
c.Options = ops
Debug(c.Options.Debug)
log.Debugf("options: %+v", c.Options)
if len(c.Options.SharedSecret) < 4 {
err = fmt.Errorf("code is too short")
return
}
c.conn = make([]*comm.Comm, 16)
// initialize pake
if c.Options.IsSender {
c.Pake, err = pake.Init([]byte(c.Options.SharedSecret), 1, siec.SIEC255(), 1*time.Microsecond)
} else {
c.Pake, err = pake.Init([]byte(c.Options.SharedSecret), 0, siec.SIEC255(), 1*time.Microsecond)
}
if err != nil {
return
}
c.mutex = &sync.Mutex{}
return
}
// TransferOptions for sending
type TransferOptions struct {
PathToFiles []string
KeepPathInRemote bool
}
func (c *Client) sendCollectFiles(options TransferOptions) (err error) {
c.FilesToTransfer = make([]FileInfo, len(options.PathToFiles))
totalFilesSize := int64(0)
for i, pathToFile := range options.PathToFiles {
var fstats os.FileInfo
var fullPath string
fullPath, err = filepath.Abs(pathToFile)
if err != nil {
return
}
fullPath = filepath.Clean(fullPath)
var folderName string
folderName, _ = filepath.Split(fullPath)
fstats, err = os.Stat(fullPath)
if err != nil {
return
}
if len(fstats.Name()) > c.longestFilename {
c.longestFilename = len(fstats.Name())
}
c.FilesToTransfer[i] = FileInfo{
Name: fstats.Name(),
FolderRemote: ".",
FolderSource: folderName,
Size: fstats.Size(),
ModTime: fstats.ModTime(),
}
c.FilesToTransfer[i].Hash, err = utils.HashFile(fullPath)
totalFilesSize += fstats.Size()
if err != nil {
return
}
if options.KeepPathInRemote {
var curFolder string
curFolder, err = os.Getwd()
if err != nil {
return
}
curFolder, err = filepath.Abs(curFolder)
if err != nil {
return
}
if !strings.HasPrefix(folderName, curFolder) {
err = fmt.Errorf("remote directory must be relative to current")
return
}
c.FilesToTransfer[i].FolderRemote = strings.TrimPrefix(folderName, curFolder)
c.FilesToTransfer[i].FolderRemote = filepath.ToSlash(c.FilesToTransfer[i].FolderRemote)
c.FilesToTransfer[i].FolderRemote = strings.TrimPrefix(c.FilesToTransfer[i].FolderRemote, "/")
if c.FilesToTransfer[i].FolderRemote == "" {
c.FilesToTransfer[i].FolderRemote = "."
}
}
log.Debugf("file %d info: %+v", i, c.FilesToTransfer[i])
}
log.Debugf("longestFilename: %+v", c.longestFilename)
fname := fmt.Sprintf("%d files", len(c.FilesToTransfer))
if len(c.FilesToTransfer) == 1 {
fname = fmt.Sprintf("'%s'", c.FilesToTransfer[0].Name)
}
fmt.Fprintf(os.Stderr, "Sending %s (%s)\n", fname, utils.ByteCountDecimal(totalFilesSize))
return
}
func (c *Client) setupLocalRelay() {
// setup the relay locally
for _, port := range c.Options.RelayPorts {
go func(portStr string) {
debugString := "warn"
if c.Options.Debug {
debugString = "debug"
}
err := tcp.Run(debugString, portStr, c.Options.RelayPassword, strings.Join(c.Options.RelayPorts[1:], ","))
if err != nil {
panic(err)
}
}(port)
}
}
func (c *Client) broadcastOnLocalNetwork() {
// look for peers first
discoveries, err := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: -1,
Payload: []byte("croc" + c.Options.RelayPorts[0]),
Delay: 10 * time.Millisecond,
TimeLimit: 30 * time.Second,
})
log.Debugf("discoveries: %+v", discoveries)
if err != nil {
log.Debug(err)
}
}
func (c *Client) transferOverLocalRelay(options TransferOptions, errchan chan<- error) {
time.Sleep(500 * time.Millisecond)
log.Debug("establishing connection")
var banner string
conn, banner, ipaddr, err := tcp.ConnectToTCPServer("localhost:"+c.Options.RelayPorts[0], c.Options.RelayPassword, c.Options.SharedSecret[:3])
log.Debugf("banner: %s", banner)
if err != nil {
err = fmt.Errorf("could not connect to localhost:%s: %w", c.Options.RelayPorts[0], err)
log.Debug(err)
// not really an error because it will try to connect over the actual relay
return
}
log.Debugf("local connection established: %+v", conn)
for {
data, _ := conn.Receive()
if bytes.Equal(data, []byte("handshake")) {
break
} else if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
} else {
log.Debugf("instead of handshake got: %s", data)
}
}
c.conn[0] = conn
log.Debug("exchanged header message")
c.Options.RelayAddress = "localhost"
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
c.ExternalIP = ipaddr
errchan <- c.transfer(options)
}
// Send will send the specified file
func (c *Client) Send(options TransferOptions) (err error) {
err = c.sendCollectFiles(options)
if err != nil {
return
}
otherRelay := ""
if c.Options.RelayAddress != models.DEFAULT_RELAY {
otherRelay = "--relay " + c.Options.RelayAddress + " "
}
fmt.Fprintf(os.Stderr, "Code is: %s\nOn the other computer run\n\ncroc %s%s\n", c.Options.SharedSecret, otherRelay, c.Options.SharedSecret)
if c.Options.Ask {
machid, _ := machineid.ID()
fmt.Fprintf(os.Stderr, "\rYour machine ID is '%s'\n", machid)
}
// // c.spinner.Suffix = " waiting for recipient..."
// c.spinner.Start()
// create channel for quitting
// connect to the relay for messaging
errchan := make(chan error, 1)
if !c.Options.DisableLocal {
// add two things to the error channel
errchan = make(chan error, 2)
c.setupLocalRelay()
go c.broadcastOnLocalNetwork()
go c.transferOverLocalRelay(options, errchan)
}
go func() {
host, port, err := net.SplitHostPort(c.Options.RelayAddress)
if err != nil {
log.Errorf("bad relay address %s", c.Options.RelayAddress)
return
}
// Default port to :9009
if port == "" {
port = "9009"
}
c.Options.RelayAddress = net.JoinHostPort(host, port)
log.Debugf("establishing connection to %s", c.Options.RelayAddress)
var banner string
conn, banner, ipaddr, err := tcp.ConnectToTCPServer(c.Options.RelayAddress, c.Options.RelayPassword, c.Options.SharedSecret[:3], 5*time.Second)
log.Debugf("banner: %s", banner)
if err != nil {
err = fmt.Errorf("could not connect to %s: %w", c.Options.RelayAddress, err)
log.Debug(err)
errchan <- err
return
}
log.Debugf("connection established: %+v", conn)
for {
log.Debug("waiting for bytes")
data, errConn := conn.Receive()
if errConn != nil {
log.Debugf("[%+v] had error: %s", conn, errConn.Error())
}
if bytes.Equal(data, []byte("ips?")) {
// recipient wants to try to connect to local ips
var ips []string
// only get local ips if the local is enabled
if !c.Options.DisableLocal {
// get list of local ips
ips, err = utils.GetLocalIPs()
if err != nil {
log.Debugf("error getting local ips: %v", err)
}
// prepend the port that is being listened to
ips = append([]string{c.Options.RelayPorts[0]}, ips...)
}
bips, _ := json.Marshal(ips)
if err := conn.Send(bips); err != nil {
log.Errorf("error sending: %v", err)
}
} else if bytes.Equal(data, []byte("handshake")) {
break
} else if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
continue
} else {
log.Debugf("[%+v] got weird bytes: %+v", conn, data)
// throttle the reading
errchan <- fmt.Errorf("gracefully refusing using the public relay")
return
}
}
c.conn[0] = conn
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
c.ExternalIP = ipaddr
log.Debug("exchanged header message")
errchan <- c.transfer(options)
}()
err = <-errchan
if err == nil {
// return if no error
return
} else {
log.Debugf("error from errchan: %v", err)
}
if !c.Options.DisableLocal {
if strings.Contains(err.Error(), "refusing files") || strings.Contains(err.Error(), "EOF") || strings.Contains(err.Error(), "bad password") {
errchan <- err
}
err = <-errchan
}
return err
}
// Receive will receive a file
func (c *Client) Receive() (err error) {
fmt.Fprintf(os.Stderr, "connecting...")
// recipient will look for peers first
// and continue if it doesn't find any within 100 ms
usingLocal := false
if !c.Options.DisableLocal {
log.Debug("attempt to discover peers")
discoveries, err := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: 1,
Payload: []byte("ok"),
Delay: 10 * time.Millisecond,
TimeLimit: 100 * time.Millisecond,
})
if err == nil && len(discoveries) > 0 {
for i := 0; i < len(discoveries); i++ {
log.Debugf("discovery %d has payload: %+v", i, discoveries[i])
if !bytes.HasPrefix(discoveries[i].Payload, []byte("croc")) {
log.Debug("skipping discovery")
continue
}
log.Debug("switching to local")
portToUse := string(bytes.TrimPrefix(discoveries[0].Payload, []byte("croc")))
if portToUse == "" {
portToUse = "9009"
}
c.Options.RelayAddress = fmt.Sprintf("%s:%s",
discoveries[0].Address,
portToUse,
)
c.ExternalIPConnected = c.Options.RelayAddress
usingLocal = true
break
}
}
log.Debugf("discoveries: %+v", discoveries)
log.Debug("establishing connection")
}
host, port, err := net.SplitHostPort(c.Options.RelayAddress)
if err != nil {
log.Errorf("bad relay address %s", c.Options.RelayAddress)
return
}
// Default port to :9009
if port == "" {
port = "9009"
}
c.Options.RelayAddress = net.JoinHostPort(host, port)
log.Debugf("establishing receiver connection to %s", c.Options.RelayAddress)
var banner string
c.conn[0], banner, c.ExternalIP, err = tcp.ConnectToTCPServer(c.Options.RelayAddress, c.Options.RelayPassword, c.Options.SharedSecret[:3])
log.Debugf("banner: %s", banner)
if err != nil {
err = fmt.Errorf("could not connect to %s: %w", c.Options.RelayAddress, err)
log.Debug(err)
return
}
log.Debugf("receiver connection established: %+v", c.conn[0])
if !usingLocal && !c.Options.DisableLocal {
// ask the sender for their local ips and port
// and try to connect to them
log.Debug("sending ips?")
var data []byte
if err := c.conn[0].Send([]byte("ips?")); err != nil {
log.Errorf("ips send error: %v", err)
}
data, err = c.conn[0].Receive()
if err != nil {
return
}
log.Debugf("ips data: %s", data)
var ips []string
if err := json.Unmarshal(data, &ips); err != nil {
log.Errorf("ips unmarshal error: %v", err)
}
if len(ips) > 1 {
port := ips[0]
ips = ips[1:]
for _, ip := range ips {
ipv4Addr, ipv4Net, errNet := net.ParseCIDR(fmt.Sprintf("%s/24", ip))
log.Debugf("ipv4Add4: %+v, ipv4Net: %+v, err: %+v", ipv4Addr, ipv4Net, errNet)
localIps, _ := utils.GetLocalIPs()
haveLocalIP := false
for _, localIP := range localIps {
localIPparsed := net.ParseIP(localIP)
if ipv4Net.Contains(localIPparsed) {
haveLocalIP = true
break
}
}
if !haveLocalIP {
log.Debugf("%s is not a local IP, skipping", ip)
continue
}
serverTry := fmt.Sprintf("%s:%s", ip, port)
conn, banner2, externalIP, errConn := tcp.ConnectToTCPServer(serverTry, c.Options.RelayPassword, c.Options.SharedSecret[:3], 50*time.Millisecond)
if errConn != nil {
log.Debugf("could not connect to " + serverTry)
continue
}
log.Debugf("local connection established to %s", serverTry)
log.Debugf("banner: %s", banner2)
// reset to the local port
banner = banner2
c.Options.RelayAddress = serverTry
c.ExternalIP = externalIP
c.conn[0].Close()
c.conn[0] = nil
c.conn[0] = conn
break
}
}
}
if err := c.conn[0].Send([]byte("handshake")); err != nil {
log.Errorf("handshake send error: %v", err)
}
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
log.Debug("exchanged header message")
fmt.Fprintf(os.Stderr, "\rsecuring channel...")
return c.transfer(TransferOptions{})
}
func (c *Client) transfer(options TransferOptions) (err error) {
// connect to the server
// quit with c.quit <- true
c.quit = make(chan bool)
// if recipient, initialize with sending pake information
log.Debug("ready")
if !c.Options.IsSender && !c.Step1ChannelSecured {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "pake",
Bytes: c.Pake.Bytes(),
})
if err != nil {
return
}
}
// listen for incoming messages and process them
for {
var data []byte
var done bool
data, err = c.conn[0].Receive()
if err != nil {
log.Debugf("got error receiving: %v", err)
if !c.Step1ChannelSecured {
err = fmt.Errorf("could not secure channel")
}
break
}
done, err = c.processMessage(data)
if err != nil {
log.Debugf("got error processing: %v", err)
break
}
if done {
break
}
}
// purge errors that come from successful transfer
if c.SuccessfulTransfer {
if err != nil {
log.Debugf("purging error: %s", err)
}
err = nil
}
if c.Options.Stdout && !c.Options.IsSender {
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
if err := os.Remove(pathToFile); err != nil {
log.Warnf("error removing %s: %v", pathToFile, err)
}
}
return
}
func (c *Client) processMessageFileInfo(m message.Message) (done bool, err error) {
var senderInfo SenderInfo
err = json.Unmarshal(m.Bytes, &senderInfo)
if err != nil {
log.Error(err)
return
}
c.FilesToTransfer = senderInfo.FilesToTransfer
fname := fmt.Sprintf("%d files", len(c.FilesToTransfer))
if len(c.FilesToTransfer) == 1 {
fname = fmt.Sprintf("'%s'", c.FilesToTransfer[0].Name)
}
totalSize := int64(0)
for _, fi := range c.FilesToTransfer {
totalSize += fi.Size
if len(fi.Name) > c.longestFilename {
c.longestFilename = len(fi.Name)
}
}
// c.spinner.Stop()
if !c.Options.NoPrompt || c.Options.Ask || senderInfo.Ask {
if c.Options.Ask || senderInfo.Ask {
machID, _ := machineid.ID()
fmt.Fprintf(os.Stderr, "\rYour machine id is '%s'.\nAccept %s (%s) from '%s'? (y/n) ", machID, fname, utils.ByteCountDecimal(totalSize), senderInfo.MachineID)
} else {
fmt.Fprintf(os.Stderr, "\rAccept %s (%s)? (y/n) ", fname, utils.ByteCountDecimal(totalSize))
}
if strings.ToLower(strings.TrimSpace(utils.GetInput(""))) != "y" {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "error",
Message: "refusing files",
})
return true, fmt.Errorf("refused files")
}
} else {
fmt.Fprintf(os.Stderr, "\rReceiving %s (%s) \n", fname, utils.ByteCountDecimal(totalSize))
}
fmt.Fprintf(os.Stderr, "\nReceiving (<-%s)\n", c.ExternalIPConnected)
log.Debug(c.FilesToTransfer)
c.Step2FileInfoTransfered = true
return
}
func (c *Client) procesMessagePake(m message.Message) (err error) {
log.Debug("received pake payload")
// if // c.spinner.Suffix != " performing PAKE..." {
// // c.spinner.Stop()
// // c.spinner.Suffix = " performing PAKE..."
// // c.spinner.Start()
// }
notVerified := !c.Pake.IsVerified()
err = c.Pake.Update(m.Bytes)
if err != nil {
return
}
if (notVerified && c.Pake.IsVerified() && !c.Options.IsSender) || !c.Pake.IsVerified() {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "pake",
Bytes: c.Pake.Bytes(),
})
}
if c.Pake.IsVerified() {
if c.Options.IsSender {
log.Debug("generating salt")
salt := make([]byte, 8)
if _, rerr := rand.Read(salt); err != nil {
log.Errorf("can't generate random numbers: %v", rerr)
return
}
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "salt",
Bytes: salt,
})
if err != nil {
return
}
}
// connects to the other ports of the server for transfer
var wg sync.WaitGroup
wg.Add(len(c.Options.RelayPorts))
for i := 0; i < len(c.Options.RelayPorts); i++ {
log.Debugf("port: [%s]", c.Options.RelayPorts[i])
go func(j int) {
defer wg.Done()
host, _, err := net.SplitHostPort(c.Options.RelayAddress)
if err != nil {
log.Errorf("bad relay address %s", c.Options.RelayAddress)
return
}
server := net.JoinHostPort(host, c.Options.RelayPorts[j])
log.Debugf("connecting to %s", server)
c.conn[j+1], _, _, err = tcp.ConnectToTCPServer(
server,
c.Options.RelayPassword,
fmt.Sprintf("%s-%d", utils.SHA256(c.Options.SharedSecret)[:7], j),
)
if err != nil {
panic(err)
}
log.Debugf("connected to %s", server)
if !c.Options.IsSender {
go c.receiveData(j)
}
}(i)
}
wg.Wait()
}
return
}
func (c *Client) processMessageSalt(m message.Message) (done bool, err error) {
log.Debug("received salt")
if !c.Options.IsSender {
log.Debug("sending salt back")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "salt",
Bytes: m.Bytes,
})
}
log.Debugf("session key is verified, generating encryption with salt: %x", m.Bytes)
key, err := c.Pake.SessionKey()
if err != nil {
return true, err
}
c.Key, _, err = crypt.New(key, m.Bytes)
if err != nil {
return true, err
}
log.Debugf("key = %+x", c.Key)
if c.Options.IsSender {
log.Debug("sending external IP")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "externalip",
Message: c.ExternalIP,
Bytes: m.Bytes,
})
}
return
}
func (c *Client) processExternalIP(m message.Message) (done bool, err error) {
log.Debugf("received external IP: %+v", m)
if !c.Options.IsSender {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "externalip",
Message: c.ExternalIP,
})
if err != nil {
return true, err
}
}
if c.ExternalIPConnected == "" {
// it can be preset by the local relay
c.ExternalIPConnected = m.Message
}
log.Debugf("connected as %s -> %s", c.ExternalIP, c.ExternalIPConnected)
c.Step1ChannelSecured = true
return
}
func (c *Client) processMessage(payload []byte) (done bool, err error) {
m, err := message.Decode(c.Key, payload)
if err != nil {
err = fmt.Errorf("problem with decoding: %w", err)
log.Debug(err)
return
}
switch m.Type {
case "finished":
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "finished",
})
done = true
c.SuccessfulTransfer = true
return
case "pake":
err = c.procesMessagePake(m)
if err != nil {
err = fmt.Errorf("pake not successful: %w", err)
log.Debug(err)
}
case "salt":
done, err = c.processMessageSalt(m)
case "externalip":
done, err = c.processExternalIP(m)
case "error":
// c.spinner.Stop()
fmt.Print("\r")
err = fmt.Errorf("peer error: %s", m.Message)
return true, err
case "fileinfo":
done, err = c.processMessageFileInfo(m)
case "recipientready":
var remoteFile RemoteFileRequest
err = json.Unmarshal(m.Bytes, &remoteFile)
if err != nil {
return
}
c.FilesToTransferCurrentNum = remoteFile.FilesToTransferCurrentNum
c.CurrentFileChunkRanges = remoteFile.CurrentFileChunkRanges
c.CurrentFileChunks = utils.ChunkRangesToChunks(c.CurrentFileChunkRanges)
log.Debugf("current file chunks: %+v", c.CurrentFileChunks)
c.mutex.Lock()
c.chunkMap = make(map[uint64]struct{})
for _, chunk := range c.CurrentFileChunks {
c.chunkMap[uint64(chunk)] = struct{}{}
}
c.mutex.Unlock()
c.Step3RecipientRequestFile = true
if c.Options.Ask {
fmt.Fprintf(os.Stderr, "Send to machine '%s'? (y/n) ", remoteFile.MachineID)
if strings.ToLower(strings.TrimSpace(utils.GetInput(""))) != "y" {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "error",
Message: "refusing files",
})
done = true
err = fmt.Errorf("refused files")
return
}
}
case "close-sender":
c.bar.Finish()
log.Debug("close-sender received...")
c.Step4FileTransfer = false
c.Step3RecipientRequestFile = false
log.Debug("sending close-recipient")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "close-recipient",
})
case "close-recipient":
c.Step4FileTransfer = false
c.Step3RecipientRequestFile = false
}
if err != nil {
log.Debugf("got error from processing message: %v", err)
return
}
err = c.updateState()
if err != nil {
log.Debugf("got error from updating state: %v", err)
return
}
return
}
func (c *Client) updateIfSenderChannelSecured() (err error) {
if c.Options.IsSender && c.Step1ChannelSecured && !c.Step2FileInfoTransfered {
var b []byte
machID, _ := machineid.ID()
b, err = json.Marshal(SenderInfo{
FilesToTransfer: c.FilesToTransfer,
MachineID: machID,
Ask: c.Options.Ask,
})
if err != nil {
log.Error(err)
return
}
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "fileinfo",
Bytes: b,
})
if err != nil {
return
}
c.Step2FileInfoTransfered = true
}
return
}
func (c *Client) recipientInitializeFile() (err error) {
// start initiating the process to receive a new file
log.Debugf("working on file %d", c.FilesToTransferCurrentNum)
// recipient sets the file
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
folderForFile, _ := filepath.Split(pathToFile)
folderForFileBase := filepath.Base(folderForFile)
if folderForFileBase != "." && folderForFileBase != "" {
if err := os.MkdirAll(folderForFile, os.ModePerm); err != nil {
log.Errorf("can't create %s: %v", folderForFile, err)
}
}
var errOpen error
c.CurrentFile, errOpen = os.OpenFile(
pathToFile,
os.O_WRONLY, 0666)
var truncate bool // default false
c.CurrentFileChunks = []int64{}
c.CurrentFileChunkRanges = []int64{}
if errOpen == nil {
stat, _ := c.CurrentFile.Stat()
truncate = stat.Size() != c.FilesToTransfer[c.FilesToTransferCurrentNum].Size
if truncate == false {
// recipient requests the file and chunks (if empty, then should receive all chunks)
// TODO: determine the missing chunks
c.CurrentFileChunkRanges = utils.MissingChunks(
pathToFile,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Size,
models.TCP_BUFFER_SIZE/2,
)
}
} else {
c.CurrentFile, errOpen = os.Create(pathToFile)
if errOpen != nil {
errOpen = fmt.Errorf("could not create %s: %w", pathToFile, errOpen)
log.Error(errOpen)
return errOpen
}
truncate = true
}
if truncate {
err := c.CurrentFile.Truncate(c.FilesToTransfer[c.FilesToTransferCurrentNum].Size)
if err != nil {
err = fmt.Errorf("could not truncate %s: %w", pathToFile, err)
log.Error(err)
return err
}
}
return
}
func (c *Client) recipientGetFileReady(finished bool) (err error) {
if finished {
// TODO: do the last finishing stuff
log.Debug("finished")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "finished",
})
if err != nil {
panic(err)
}
c.SuccessfulTransfer = true
c.FilesHasFinished[c.FilesToTransferCurrentNum] = struct{}{}
}
err = c.recipientInitializeFile()
if err != nil {
return
}
c.TotalSent = 0
machID, _ := machineid.ID()
bRequest, _ := json.Marshal(RemoteFileRequest{
CurrentFileChunkRanges: c.CurrentFileChunkRanges,
FilesToTransferCurrentNum: c.FilesToTransferCurrentNum,
MachineID: machID,
})
log.Debug("converting to chunk range")
c.CurrentFileChunks = utils.ChunkRangesToChunks(c.CurrentFileChunkRanges)
if !finished {
// setup the progressbar
c.setBar()
}
log.Debugf("sending recipient ready with %d chunks", len(c.CurrentFileChunks))
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "recipientready",
Bytes: bRequest,
})
if err != nil {
return
}
c.Step3RecipientRequestFile = true
return
}
func (c *Client) createEmptyFileAndFinish(fileInfo FileInfo, i int) (err error) {
log.Debugf("touching file with folder / name")
if !utils.Exists(fileInfo.FolderRemote) {
err = os.MkdirAll(fileInfo.FolderRemote, os.ModePerm)
if err != nil {
log.Error(err)
return
}
}
emptyFile, errCreate := os.Create(path.Join(fileInfo.FolderRemote, fileInfo.Name))
if errCreate != nil {
log.Error(errCreate)
err = errCreate
return
}
emptyFile.Close()
// setup the progressbar
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[i].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[i].Name
}
c.bar = progressbar.NewOptions64(1,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
)
c.bar.Finish()
return
}
func (c *Client) updateIfRecipientHasFileInfo() (err error) {
if !(!c.Options.IsSender && c.Step2FileInfoTransfered && !c.Step3RecipientRequestFile) {
return
}
// find the next file to transfer and send that number
// if the files are the same size, then look for missing chunks
finished := true
for i, fileInfo := range c.FilesToTransfer {
if _, ok := c.FilesHasFinished[i]; ok {
continue
}
log.Debugf("checking %+v", fileInfo)
if i < c.FilesToTransferCurrentNum {
continue
}
fileHash, errHash := utils.HashFile(path.Join(fileInfo.FolderRemote, fileInfo.Name))
if fileInfo.Size == 0 {
err = c.createEmptyFileAndFinish(fileInfo, i)
if err != nil {
return
}
continue
}
log.Debugf("%s %+x %+x %+v", fileInfo.Name, fileHash, fileInfo.Hash, errHash)
if !bytes.Equal(fileHash, fileInfo.Hash) {
log.Debugf("hashes are not equal %x != %x", fileHash, fileInfo.Hash)
} else {
log.Debugf("hashes are equal %x == %x", fileHash, fileInfo.Hash)
}
if errHash != nil {
// probably can't find, its okay
log.Debug(errHash)
}
if errHash != nil || !bytes.Equal(fileHash, fileInfo.Hash) {
finished = false
c.FilesToTransferCurrentNum = i
break
}
// TODO: print out something about this file already existing
}
err = c.recipientGetFileReady(finished)
return
}
func (c *Client) updateState() (err error) {
err = c.updateIfSenderChannelSecured()
if err != nil {
return
}
err = c.updateIfRecipientHasFileInfo()
if err != nil {
return
}
if c.Options.IsSender && c.Step3RecipientRequestFile && !c.Step4FileTransfer {
log.Debug("start sending data!")
if !c.firstSend {
fmt.Fprintf(os.Stderr, "\nSending (->%s)\n", c.ExternalIPConnected)
c.firstSend = true
// if there are empty files, show them as already have been transferred now
for i := range c.FilesToTransfer {
if c.FilesToTransfer[i].Size == 0 {
// setup the progressbar and takedown the progress bar for empty files
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[i].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[i].Name
}
c.bar = progressbar.NewOptions64(1,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
)
c.bar.Finish()
}
}
}
c.Step4FileTransfer = true
// setup the progressbar
c.setBar()
c.TotalSent = 0
log.Debug("beginning sending comms")
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderSource,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
c.fread, err = os.Open(pathToFile)
c.numfinished = 0
if err != nil {
return
}
for i := 0; i < len(c.Options.RelayPorts); i++ {
log.Debugf("starting sending over comm %d", i)
go c.sendData(i)
}
}
return
}
func (c *Client) setBar() {
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[c.FilesToTransferCurrentNum].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[c.FilesToTransferCurrentNum].Name
}
c.bar = progressbar.NewOptions64(
c.FilesToTransfer[c.FilesToTransferCurrentNum].Size,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
progressbar.OptionThrottle(100*time.Millisecond),
)
byteToDo := int64(len(c.CurrentFileChunks) * models.TCP_BUFFER_SIZE / 2)
if byteToDo > 0 {
bytesDone := c.FilesToTransfer[c.FilesToTransferCurrentNum].Size - byteToDo
log.Debug(byteToDo)
log.Debug(c.FilesToTransfer[c.FilesToTransferCurrentNum].Size)
log.Debug(bytesDone)
if bytesDone > 0 {
c.bar.Add64(bytesDone)
}
}
}
func (c *Client) receiveData(i int) {
log.Debugf("%d receiving data", i)
for {
data, err := c.conn[i+1].Receive()
if err != nil {
break
}
if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
continue
}
data, err = crypt.Decrypt(data, c.Key)
if err != nil {
panic(err)
}
data = compress.Decompress(data)
// get position
var position uint64
rbuf := bytes.NewReader(data[:8])
err = binary.Read(rbuf, binary.LittleEndian, &position)
if err != nil {
panic(err)
}
positionInt64 := int64(position)
c.mutex.Lock()
_, err = c.CurrentFile.WriteAt(data[8:], positionInt64)
c.mutex.Unlock()
if err != nil {
panic(err)
}
c.bar.Add(len(data[8:]))
c.TotalSent += int64(len(data[8:]))
c.TotalChunksTransfered++
if c.TotalChunksTransfered == len(c.CurrentFileChunks) || c.TotalSent == c.FilesToTransfer[c.FilesToTransferCurrentNum].Size {
log.Debug("finished receiving!")
if err := c.CurrentFile.Close(); err != nil {
log.Errorf("error closing %s: %v", c.CurrentFile.Name(), err)
}
if c.Options.Stdout || strings.HasPrefix(c.FilesToTransfer[c.FilesToTransferCurrentNum].Name, "croc-stdin") {
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
b, _ := ioutil.ReadFile(pathToFile)
fmt.Print(string(b))
}
log.Debug("sending close-sender")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "close-sender",
})
if err != nil {
panic(err)
}
}
}
return
}
func (c *Client) sendData(i int) {
defer func() {
log.Debugf("finished with %d", i)
c.numfinished++
if c.numfinished == len(c.Options.RelayPorts) {
log.Debug("closing file")
if err := c.fread.Close(); err != nil {
log.Errorf("error closing file: %v", err)
}
}
}()
var readingPos int64
pos := uint64(0)
curi := float64(0)
for {
// Read file
data := make([]byte, models.TCP_BUFFER_SIZE/2)
// log.Debugf("%d trying to read", i)
n, errRead := c.fread.ReadAt(data, readingPos)
// log.Debugf("%d read %d bytes", i, n)
readingPos += int64(n)
if math.Mod(curi, float64(len(c.Options.RelayPorts))) == float64(i) {
// check to see if this is a chunk that the recipient wants
usableChunk := true
c.mutex.Lock()
if len(c.chunkMap) != 0 {
if _, ok := c.chunkMap[pos]; !ok {
usableChunk = false
} else {
delete(c.chunkMap, pos)
}
}
c.mutex.Unlock()
if usableChunk {
// log.Debugf("sending chunk %d", pos)
posByte := make([]byte, 8)
binary.LittleEndian.PutUint64(posByte, pos)
dataToSend, err := crypt.Encrypt(
compress.Compress(
append(posByte, data[:n]...),
),
c.Key,
)
if err != nil {
panic(err)
}
err = c.conn[i+1].Send(dataToSend)
if err != nil {
panic(err)
}
c.bar.Add(n)
c.TotalSent += int64(n)
// time.Sleep(100 * time.Millisecond)
} else {
// log.Debugf("skipping chunk %d", pos)
}
}
curi++
pos += uint64(n)
if errRead != nil {
if errRead == io.EOF {
break
}
panic(errRead)
}
}
return
}
attempt to use ipv6 discoveries
package croc
import (
"bytes"
"crypto/rand"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/denisbrodbeck/machineid"
log "github.com/schollz/logger"
"github.com/schollz/pake/v2"
"github.com/schollz/peerdiscovery"
"github.com/schollz/progressbar/v3"
"github.com/schollz/spinner"
"github.com/tscholl2/siec"
"github.com/schollz/croc/v8/src/comm"
"github.com/schollz/croc/v8/src/compress"
"github.com/schollz/croc/v8/src/crypt"
"github.com/schollz/croc/v8/src/message"
"github.com/schollz/croc/v8/src/models"
"github.com/schollz/croc/v8/src/tcp"
"github.com/schollz/croc/v8/src/utils"
)
func init() {
log.SetLevel("debug")
}
// Debug toggles debug mode
func Debug(debug bool) {
if debug {
log.SetLevel("debug")
} else {
log.SetLevel("warn")
}
}
// Options specifies user specific options
type Options struct {
IsSender bool
SharedSecret string
Debug bool
RelayAddress string
RelayPorts []string
RelayPassword string
Stdout bool
NoPrompt bool
NoMultiplexing bool
DisableLocal bool
Ask bool
}
// Client holds the state of the croc transfer
type Client struct {
Options Options
Pake *pake.Pake
Key []byte
ExternalIP, ExternalIPConnected string
// steps involved in forming relationship
Step1ChannelSecured bool
Step2FileInfoTransfered bool
Step3RecipientRequestFile bool
Step4FileTransfer bool
Step5CloseChannels bool
SuccessfulTransfer bool
// send / receive information of all files
FilesToTransfer []FileInfo
FilesToTransferCurrentNum int
FilesHasFinished map[int]struct{}
// send / receive information of current file
CurrentFile *os.File
CurrentFileChunkRanges []int64
CurrentFileChunks []int64
TotalSent int64
TotalChunksTransfered int
chunkMap map[uint64]struct{}
// tcp connections
conn []*comm.Comm
bar *progressbar.ProgressBar
spinner *spinner.Spinner
longestFilename int
firstSend bool
mutex *sync.Mutex
fread *os.File
numfinished int
quit chan bool
}
// Chunk contains information about the
// needed bytes
type Chunk struct {
Bytes []byte `json:"b,omitempty"`
Location int64 `json:"l,omitempty"`
}
// FileInfo registers the information about the file
type FileInfo struct {
Name string `json:"n,omitempty"`
FolderRemote string `json:"fr,omitempty"`
FolderSource string `json:"fs,omitempty"`
Hash []byte `json:"h,omitempty"`
Size int64 `json:"s,omitempty"`
ModTime time.Time `json:"m,omitempty"`
IsCompressed bool `json:"c,omitempty"`
IsEncrypted bool `json:"e,omitempty"`
}
// RemoteFileRequest requests specific bytes
type RemoteFileRequest struct {
CurrentFileChunkRanges []int64
FilesToTransferCurrentNum int
MachineID string
}
// SenderInfo lists the files to be transferred
type SenderInfo struct {
FilesToTransfer []FileInfo
MachineID string
Ask bool
}
// New establishes a new connection for transferring files between two instances.
func New(ops Options) (c *Client, err error) {
c = new(Client)
c.FilesHasFinished = make(map[int]struct{})
// setup basic info
c.Options = ops
Debug(c.Options.Debug)
log.Debugf("options: %+v", c.Options)
if len(c.Options.SharedSecret) < 4 {
err = fmt.Errorf("code is too short")
return
}
c.conn = make([]*comm.Comm, 16)
// initialize pake
if c.Options.IsSender {
c.Pake, err = pake.Init([]byte(c.Options.SharedSecret), 1, siec.SIEC255(), 1*time.Microsecond)
} else {
c.Pake, err = pake.Init([]byte(c.Options.SharedSecret), 0, siec.SIEC255(), 1*time.Microsecond)
}
if err != nil {
return
}
c.mutex = &sync.Mutex{}
return
}
// TransferOptions for sending
type TransferOptions struct {
PathToFiles []string
KeepPathInRemote bool
}
func (c *Client) sendCollectFiles(options TransferOptions) (err error) {
c.FilesToTransfer = make([]FileInfo, len(options.PathToFiles))
totalFilesSize := int64(0)
for i, pathToFile := range options.PathToFiles {
var fstats os.FileInfo
var fullPath string
fullPath, err = filepath.Abs(pathToFile)
if err != nil {
return
}
fullPath = filepath.Clean(fullPath)
var folderName string
folderName, _ = filepath.Split(fullPath)
fstats, err = os.Stat(fullPath)
if err != nil {
return
}
if len(fstats.Name()) > c.longestFilename {
c.longestFilename = len(fstats.Name())
}
c.FilesToTransfer[i] = FileInfo{
Name: fstats.Name(),
FolderRemote: ".",
FolderSource: folderName,
Size: fstats.Size(),
ModTime: fstats.ModTime(),
}
c.FilesToTransfer[i].Hash, err = utils.HashFile(fullPath)
totalFilesSize += fstats.Size()
if err != nil {
return
}
if options.KeepPathInRemote {
var curFolder string
curFolder, err = os.Getwd()
if err != nil {
return
}
curFolder, err = filepath.Abs(curFolder)
if err != nil {
return
}
if !strings.HasPrefix(folderName, curFolder) {
err = fmt.Errorf("remote directory must be relative to current")
return
}
c.FilesToTransfer[i].FolderRemote = strings.TrimPrefix(folderName, curFolder)
c.FilesToTransfer[i].FolderRemote = filepath.ToSlash(c.FilesToTransfer[i].FolderRemote)
c.FilesToTransfer[i].FolderRemote = strings.TrimPrefix(c.FilesToTransfer[i].FolderRemote, "/")
if c.FilesToTransfer[i].FolderRemote == "" {
c.FilesToTransfer[i].FolderRemote = "."
}
}
log.Debugf("file %d info: %+v", i, c.FilesToTransfer[i])
}
log.Debugf("longestFilename: %+v", c.longestFilename)
fname := fmt.Sprintf("%d files", len(c.FilesToTransfer))
if len(c.FilesToTransfer) == 1 {
fname = fmt.Sprintf("'%s'", c.FilesToTransfer[0].Name)
}
fmt.Fprintf(os.Stderr, "Sending %s (%s)\n", fname, utils.ByteCountDecimal(totalFilesSize))
return
}
func (c *Client) setupLocalRelay() {
// setup the relay locally
for _, port := range c.Options.RelayPorts {
go func(portStr string) {
debugString := "warn"
if c.Options.Debug {
debugString = "debug"
}
err := tcp.Run(debugString, portStr, c.Options.RelayPassword, strings.Join(c.Options.RelayPorts[1:], ","))
if err != nil {
panic(err)
}
}(port)
}
}
func (c *Client) broadcastOnLocalNetwork(useipv6 bool) {
// look for peers first
settings := peerdiscovery.Settings{
Limit: -1,
Payload: []byte("croc" + c.Options.RelayPorts[0]),
Delay: 10 * time.Millisecond,
TimeLimit: 30 * time.Second,
}
if useipv6 {
settings.IPVersion = peerdiscovery.IPv6
}
discoveries, err := peerdiscovery.Discover(settings)
log.Debugf("discoveries: %+v", discoveries)
if err != nil {
log.Debug(err)
}
}
func (c *Client) transferOverLocalRelay(options TransferOptions, errchan chan<- error) {
time.Sleep(500 * time.Millisecond)
log.Debug("establishing connection")
var banner string
conn, banner, ipaddr, err := tcp.ConnectToTCPServer("localhost:"+c.Options.RelayPorts[0], c.Options.RelayPassword, c.Options.SharedSecret[:3])
log.Debugf("banner: %s", banner)
if err != nil {
err = fmt.Errorf("could not connect to localhost:%s: %w", c.Options.RelayPorts[0], err)
log.Debug(err)
// not really an error because it will try to connect over the actual relay
return
}
log.Debugf("local connection established: %+v", conn)
for {
data, _ := conn.Receive()
if bytes.Equal(data, []byte("handshake")) {
break
} else if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
} else {
log.Debugf("instead of handshake got: %s", data)
}
}
c.conn[0] = conn
log.Debug("exchanged header message")
c.Options.RelayAddress = "localhost"
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
c.ExternalIP = ipaddr
errchan <- c.transfer(options)
}
// Send will send the specified file
func (c *Client) Send(options TransferOptions) (err error) {
err = c.sendCollectFiles(options)
if err != nil {
return
}
otherRelay := ""
if c.Options.RelayAddress != models.DEFAULT_RELAY {
otherRelay = "--relay " + c.Options.RelayAddress + " "
}
fmt.Fprintf(os.Stderr, "Code is: %s\nOn the other computer run\n\ncroc %s%s\n", c.Options.SharedSecret, otherRelay, c.Options.SharedSecret)
if c.Options.Ask {
machid, _ := machineid.ID()
fmt.Fprintf(os.Stderr, "\rYour machine ID is '%s'\n", machid)
}
// // c.spinner.Suffix = " waiting for recipient..."
// c.spinner.Start()
// create channel for quitting
// connect to the relay for messaging
errchan := make(chan error, 1)
if !c.Options.DisableLocal {
// add two things to the error channel
errchan = make(chan error, 2)
c.setupLocalRelay()
go c.broadcastOnLocalNetwork(true)
go c.broadcastOnLocalNetwork(false)
go c.transferOverLocalRelay(options, errchan)
}
go func() {
host, port, err := net.SplitHostPort(c.Options.RelayAddress)
if err != nil {
log.Errorf("bad relay address %s", c.Options.RelayAddress)
return
}
// Default port to :9009
if port == "" {
port = "9009"
}
c.Options.RelayAddress = net.JoinHostPort(host, port)
log.Debugf("establishing connection to %s", c.Options.RelayAddress)
var banner string
conn, banner, ipaddr, err := tcp.ConnectToTCPServer(c.Options.RelayAddress, c.Options.RelayPassword, c.Options.SharedSecret[:3], 5*time.Second)
log.Debugf("banner: %s", banner)
if err != nil {
err = fmt.Errorf("could not connect to %s: %w", c.Options.RelayAddress, err)
log.Debug(err)
errchan <- err
return
}
log.Debugf("connection established: %+v", conn)
for {
log.Debug("waiting for bytes")
data, errConn := conn.Receive()
if errConn != nil {
log.Debugf("[%+v] had error: %s", conn, errConn.Error())
}
if bytes.Equal(data, []byte("ips?")) {
// recipient wants to try to connect to local ips
var ips []string
// only get local ips if the local is enabled
if !c.Options.DisableLocal {
// get list of local ips
ips, err = utils.GetLocalIPs()
if err != nil {
log.Debugf("error getting local ips: %v", err)
}
// prepend the port that is being listened to
ips = append([]string{c.Options.RelayPorts[0]}, ips...)
}
bips, _ := json.Marshal(ips)
if err := conn.Send(bips); err != nil {
log.Errorf("error sending: %v", err)
}
} else if bytes.Equal(data, []byte("handshake")) {
break
} else if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
continue
} else {
log.Debugf("[%+v] got weird bytes: %+v", conn, data)
// throttle the reading
errchan <- fmt.Errorf("gracefully refusing using the public relay")
return
}
}
c.conn[0] = conn
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
c.ExternalIP = ipaddr
log.Debug("exchanged header message")
errchan <- c.transfer(options)
}()
err = <-errchan
if err == nil {
// return if no error
return
} else {
log.Debugf("error from errchan: %v", err)
}
if !c.Options.DisableLocal {
if strings.Contains(err.Error(), "refusing files") || strings.Contains(err.Error(), "EOF") || strings.Contains(err.Error(), "bad password") {
errchan <- err
}
err = <-errchan
}
return err
}
// Receive will receive a file
func (c *Client) Receive() (err error) {
fmt.Fprintf(os.Stderr, "connecting...")
// recipient will look for peers first
// and continue if it doesn't find any within 100 ms
usingLocal := false
if !c.Options.DisableLocal {
log.Debug("attempt to discover peers")
var discoveries []peerdiscovery.Discovered
var wgDiscovery sync.WaitGroup
var dmux sync.Mutex
wgDiscovery.Add(2)
go func() {
defer wgDiscovery.Done()
ipv4discoveries, err1 := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: 1,
Payload: []byte("ok"),
Delay: 10 * time.Millisecond,
TimeLimit: 100 * time.Millisecond,
})
if err1 == nil && len(ipv4discoveries) > 0 {
dmux.Lock()
err = err1
discoveries = ipv4discoveries
dmux.Unlock()
}
}()
go func() {
defer wgDiscovery.Done()
ipv6discoveries, err1 := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: 1,
Payload: []byte("ok"),
Delay: 10 * time.Millisecond,
TimeLimit: 100 * time.Millisecond,
IPVersion: peerdiscovery.IPv6,
})
if err1 == nil && len(ipv6discoveries) > 0 {
dmux.Lock()
err = err1
discoveries = ipv6discoveries
dmux.Unlock()
}
}()
wgDiscovery.Wait()
if err == nil && len(discoveries) > 0 {
for i := 0; i < len(discoveries); i++ {
log.Debugf("discovery %d has payload: %+v", i, discoveries[i])
if !bytes.HasPrefix(discoveries[i].Payload, []byte("croc")) {
log.Debug("skipping discovery")
continue
}
log.Debug("switching to local")
portToUse := string(bytes.TrimPrefix(discoveries[0].Payload, []byte("croc")))
if portToUse == "" {
portToUse = "9009"
}
c.Options.RelayAddress = fmt.Sprintf("%s:%s",
discoveries[0].Address,
portToUse,
)
c.ExternalIPConnected = c.Options.RelayAddress
usingLocal = true
break
}
}
log.Debugf("discoveries: %+v", discoveries)
log.Debug("establishing connection")
}
host, port, err := net.SplitHostPort(c.Options.RelayAddress)
if err != nil {
log.Errorf("bad relay address %s", c.Options.RelayAddress)
return
}
// Default port to :9009
if port == "" {
port = "9009"
}
c.Options.RelayAddress = net.JoinHostPort(host, port)
log.Debugf("establishing receiver connection to %s", c.Options.RelayAddress)
var banner string
c.conn[0], banner, c.ExternalIP, err = tcp.ConnectToTCPServer(c.Options.RelayAddress, c.Options.RelayPassword, c.Options.SharedSecret[:3])
log.Debugf("banner: %s", banner)
if err != nil {
err = fmt.Errorf("could not connect to %s: %w", c.Options.RelayAddress, err)
log.Debug(err)
return
}
log.Debugf("receiver connection established: %+v", c.conn[0])
if !usingLocal && !c.Options.DisableLocal {
// ask the sender for their local ips and port
// and try to connect to them
log.Debug("sending ips?")
var data []byte
if err := c.conn[0].Send([]byte("ips?")); err != nil {
log.Errorf("ips send error: %v", err)
}
data, err = c.conn[0].Receive()
if err != nil {
return
}
log.Debugf("ips data: %s", data)
var ips []string
if err := json.Unmarshal(data, &ips); err != nil {
log.Errorf("ips unmarshal error: %v", err)
}
if len(ips) > 1 {
port := ips[0]
ips = ips[1:]
for _, ip := range ips {
ipv4Addr, ipv4Net, errNet := net.ParseCIDR(fmt.Sprintf("%s/24", ip))
log.Debugf("ipv4Add4: %+v, ipv4Net: %+v, err: %+v", ipv4Addr, ipv4Net, errNet)
localIps, _ := utils.GetLocalIPs()
haveLocalIP := false
for _, localIP := range localIps {
localIPparsed := net.ParseIP(localIP)
if ipv4Net.Contains(localIPparsed) {
haveLocalIP = true
break
}
}
if !haveLocalIP {
log.Debugf("%s is not a local IP, skipping", ip)
continue
}
serverTry := fmt.Sprintf("%s:%s", ip, port)
conn, banner2, externalIP, errConn := tcp.ConnectToTCPServer(serverTry, c.Options.RelayPassword, c.Options.SharedSecret[:3], 50*time.Millisecond)
if errConn != nil {
log.Debugf("could not connect to " + serverTry)
continue
}
log.Debugf("local connection established to %s", serverTry)
log.Debugf("banner: %s", banner2)
// reset to the local port
banner = banner2
c.Options.RelayAddress = serverTry
c.ExternalIP = externalIP
c.conn[0].Close()
c.conn[0] = nil
c.conn[0] = conn
break
}
}
}
if err := c.conn[0].Send([]byte("handshake")); err != nil {
log.Errorf("handshake send error: %v", err)
}
c.Options.RelayPorts = strings.Split(banner, ",")
if c.Options.NoMultiplexing {
log.Debug("no multiplexing")
c.Options.RelayPorts = []string{c.Options.RelayPorts[0]}
}
log.Debug("exchanged header message")
fmt.Fprintf(os.Stderr, "\rsecuring channel...")
return c.transfer(TransferOptions{})
}
func (c *Client) transfer(options TransferOptions) (err error) {
// connect to the server
// quit with c.quit <- true
c.quit = make(chan bool)
// if recipient, initialize with sending pake information
log.Debug("ready")
if !c.Options.IsSender && !c.Step1ChannelSecured {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "pake",
Bytes: c.Pake.Bytes(),
})
if err != nil {
return
}
}
// listen for incoming messages and process them
for {
var data []byte
var done bool
data, err = c.conn[0].Receive()
if err != nil {
log.Debugf("got error receiving: %v", err)
if !c.Step1ChannelSecured {
err = fmt.Errorf("could not secure channel")
}
break
}
done, err = c.processMessage(data)
if err != nil {
log.Debugf("got error processing: %v", err)
break
}
if done {
break
}
}
// purge errors that come from successful transfer
if c.SuccessfulTransfer {
if err != nil {
log.Debugf("purging error: %s", err)
}
err = nil
}
if c.Options.Stdout && !c.Options.IsSender {
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
if err := os.Remove(pathToFile); err != nil {
log.Warnf("error removing %s: %v", pathToFile, err)
}
}
return
}
func (c *Client) processMessageFileInfo(m message.Message) (done bool, err error) {
var senderInfo SenderInfo
err = json.Unmarshal(m.Bytes, &senderInfo)
if err != nil {
log.Error(err)
return
}
c.FilesToTransfer = senderInfo.FilesToTransfer
fname := fmt.Sprintf("%d files", len(c.FilesToTransfer))
if len(c.FilesToTransfer) == 1 {
fname = fmt.Sprintf("'%s'", c.FilesToTransfer[0].Name)
}
totalSize := int64(0)
for _, fi := range c.FilesToTransfer {
totalSize += fi.Size
if len(fi.Name) > c.longestFilename {
c.longestFilename = len(fi.Name)
}
}
// c.spinner.Stop()
if !c.Options.NoPrompt || c.Options.Ask || senderInfo.Ask {
if c.Options.Ask || senderInfo.Ask {
machID, _ := machineid.ID()
fmt.Fprintf(os.Stderr, "\rYour machine id is '%s'.\nAccept %s (%s) from '%s'? (y/n) ", machID, fname, utils.ByteCountDecimal(totalSize), senderInfo.MachineID)
} else {
fmt.Fprintf(os.Stderr, "\rAccept %s (%s)? (y/n) ", fname, utils.ByteCountDecimal(totalSize))
}
if strings.ToLower(strings.TrimSpace(utils.GetInput(""))) != "y" {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "error",
Message: "refusing files",
})
return true, fmt.Errorf("refused files")
}
} else {
fmt.Fprintf(os.Stderr, "\rReceiving %s (%s) \n", fname, utils.ByteCountDecimal(totalSize))
}
fmt.Fprintf(os.Stderr, "\nReceiving (<-%s)\n", c.ExternalIPConnected)
log.Debug(c.FilesToTransfer)
c.Step2FileInfoTransfered = true
return
}
func (c *Client) procesMessagePake(m message.Message) (err error) {
log.Debug("received pake payload")
// if // c.spinner.Suffix != " performing PAKE..." {
// // c.spinner.Stop()
// // c.spinner.Suffix = " performing PAKE..."
// // c.spinner.Start()
// }
notVerified := !c.Pake.IsVerified()
err = c.Pake.Update(m.Bytes)
if err != nil {
return
}
if (notVerified && c.Pake.IsVerified() && !c.Options.IsSender) || !c.Pake.IsVerified() {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "pake",
Bytes: c.Pake.Bytes(),
})
}
if c.Pake.IsVerified() {
if c.Options.IsSender {
log.Debug("generating salt")
salt := make([]byte, 8)
if _, rerr := rand.Read(salt); err != nil {
log.Errorf("can't generate random numbers: %v", rerr)
return
}
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "salt",
Bytes: salt,
})
if err != nil {
return
}
}
// connects to the other ports of the server for transfer
var wg sync.WaitGroup
wg.Add(len(c.Options.RelayPorts))
for i := 0; i < len(c.Options.RelayPorts); i++ {
log.Debugf("port: [%s]", c.Options.RelayPorts[i])
go func(j int) {
defer wg.Done()
host, _, err := net.SplitHostPort(c.Options.RelayAddress)
if err != nil {
log.Errorf("bad relay address %s", c.Options.RelayAddress)
return
}
server := net.JoinHostPort(host, c.Options.RelayPorts[j])
log.Debugf("connecting to %s", server)
c.conn[j+1], _, _, err = tcp.ConnectToTCPServer(
server,
c.Options.RelayPassword,
fmt.Sprintf("%s-%d", utils.SHA256(c.Options.SharedSecret)[:7], j),
)
if err != nil {
panic(err)
}
log.Debugf("connected to %s", server)
if !c.Options.IsSender {
go c.receiveData(j)
}
}(i)
}
wg.Wait()
}
return
}
func (c *Client) processMessageSalt(m message.Message) (done bool, err error) {
log.Debug("received salt")
if !c.Options.IsSender {
log.Debug("sending salt back")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "salt",
Bytes: m.Bytes,
})
}
log.Debugf("session key is verified, generating encryption with salt: %x", m.Bytes)
key, err := c.Pake.SessionKey()
if err != nil {
return true, err
}
c.Key, _, err = crypt.New(key, m.Bytes)
if err != nil {
return true, err
}
log.Debugf("key = %+x", c.Key)
if c.Options.IsSender {
log.Debug("sending external IP")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "externalip",
Message: c.ExternalIP,
Bytes: m.Bytes,
})
}
return
}
func (c *Client) processExternalIP(m message.Message) (done bool, err error) {
log.Debugf("received external IP: %+v", m)
if !c.Options.IsSender {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "externalip",
Message: c.ExternalIP,
})
if err != nil {
return true, err
}
}
if c.ExternalIPConnected == "" {
// it can be preset by the local relay
c.ExternalIPConnected = m.Message
}
log.Debugf("connected as %s -> %s", c.ExternalIP, c.ExternalIPConnected)
c.Step1ChannelSecured = true
return
}
func (c *Client) processMessage(payload []byte) (done bool, err error) {
m, err := message.Decode(c.Key, payload)
if err != nil {
err = fmt.Errorf("problem with decoding: %w", err)
log.Debug(err)
return
}
switch m.Type {
case "finished":
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "finished",
})
done = true
c.SuccessfulTransfer = true
return
case "pake":
err = c.procesMessagePake(m)
if err != nil {
err = fmt.Errorf("pake not successful: %w", err)
log.Debug(err)
}
case "salt":
done, err = c.processMessageSalt(m)
case "externalip":
done, err = c.processExternalIP(m)
case "error":
// c.spinner.Stop()
fmt.Print("\r")
err = fmt.Errorf("peer error: %s", m.Message)
return true, err
case "fileinfo":
done, err = c.processMessageFileInfo(m)
case "recipientready":
var remoteFile RemoteFileRequest
err = json.Unmarshal(m.Bytes, &remoteFile)
if err != nil {
return
}
c.FilesToTransferCurrentNum = remoteFile.FilesToTransferCurrentNum
c.CurrentFileChunkRanges = remoteFile.CurrentFileChunkRanges
c.CurrentFileChunks = utils.ChunkRangesToChunks(c.CurrentFileChunkRanges)
log.Debugf("current file chunks: %+v", c.CurrentFileChunks)
c.mutex.Lock()
c.chunkMap = make(map[uint64]struct{})
for _, chunk := range c.CurrentFileChunks {
c.chunkMap[uint64(chunk)] = struct{}{}
}
c.mutex.Unlock()
c.Step3RecipientRequestFile = true
if c.Options.Ask {
fmt.Fprintf(os.Stderr, "Send to machine '%s'? (y/n) ", remoteFile.MachineID)
if strings.ToLower(strings.TrimSpace(utils.GetInput(""))) != "y" {
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "error",
Message: "refusing files",
})
done = true
err = fmt.Errorf("refused files")
return
}
}
case "close-sender":
c.bar.Finish()
log.Debug("close-sender received...")
c.Step4FileTransfer = false
c.Step3RecipientRequestFile = false
log.Debug("sending close-recipient")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "close-recipient",
})
case "close-recipient":
c.Step4FileTransfer = false
c.Step3RecipientRequestFile = false
}
if err != nil {
log.Debugf("got error from processing message: %v", err)
return
}
err = c.updateState()
if err != nil {
log.Debugf("got error from updating state: %v", err)
return
}
return
}
func (c *Client) updateIfSenderChannelSecured() (err error) {
if c.Options.IsSender && c.Step1ChannelSecured && !c.Step2FileInfoTransfered {
var b []byte
machID, _ := machineid.ID()
b, err = json.Marshal(SenderInfo{
FilesToTransfer: c.FilesToTransfer,
MachineID: machID,
Ask: c.Options.Ask,
})
if err != nil {
log.Error(err)
return
}
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "fileinfo",
Bytes: b,
})
if err != nil {
return
}
c.Step2FileInfoTransfered = true
}
return
}
func (c *Client) recipientInitializeFile() (err error) {
// start initiating the process to receive a new file
log.Debugf("working on file %d", c.FilesToTransferCurrentNum)
// recipient sets the file
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
folderForFile, _ := filepath.Split(pathToFile)
folderForFileBase := filepath.Base(folderForFile)
if folderForFileBase != "." && folderForFileBase != "" {
if err := os.MkdirAll(folderForFile, os.ModePerm); err != nil {
log.Errorf("can't create %s: %v", folderForFile, err)
}
}
var errOpen error
c.CurrentFile, errOpen = os.OpenFile(
pathToFile,
os.O_WRONLY, 0666)
var truncate bool // default false
c.CurrentFileChunks = []int64{}
c.CurrentFileChunkRanges = []int64{}
if errOpen == nil {
stat, _ := c.CurrentFile.Stat()
truncate = stat.Size() != c.FilesToTransfer[c.FilesToTransferCurrentNum].Size
if truncate == false {
// recipient requests the file and chunks (if empty, then should receive all chunks)
// TODO: determine the missing chunks
c.CurrentFileChunkRanges = utils.MissingChunks(
pathToFile,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Size,
models.TCP_BUFFER_SIZE/2,
)
}
} else {
c.CurrentFile, errOpen = os.Create(pathToFile)
if errOpen != nil {
errOpen = fmt.Errorf("could not create %s: %w", pathToFile, errOpen)
log.Error(errOpen)
return errOpen
}
truncate = true
}
if truncate {
err := c.CurrentFile.Truncate(c.FilesToTransfer[c.FilesToTransferCurrentNum].Size)
if err != nil {
err = fmt.Errorf("could not truncate %s: %w", pathToFile, err)
log.Error(err)
return err
}
}
return
}
func (c *Client) recipientGetFileReady(finished bool) (err error) {
if finished {
// TODO: do the last finishing stuff
log.Debug("finished")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "finished",
})
if err != nil {
panic(err)
}
c.SuccessfulTransfer = true
c.FilesHasFinished[c.FilesToTransferCurrentNum] = struct{}{}
}
err = c.recipientInitializeFile()
if err != nil {
return
}
c.TotalSent = 0
machID, _ := machineid.ID()
bRequest, _ := json.Marshal(RemoteFileRequest{
CurrentFileChunkRanges: c.CurrentFileChunkRanges,
FilesToTransferCurrentNum: c.FilesToTransferCurrentNum,
MachineID: machID,
})
log.Debug("converting to chunk range")
c.CurrentFileChunks = utils.ChunkRangesToChunks(c.CurrentFileChunkRanges)
if !finished {
// setup the progressbar
c.setBar()
}
log.Debugf("sending recipient ready with %d chunks", len(c.CurrentFileChunks))
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "recipientready",
Bytes: bRequest,
})
if err != nil {
return
}
c.Step3RecipientRequestFile = true
return
}
func (c *Client) createEmptyFileAndFinish(fileInfo FileInfo, i int) (err error) {
log.Debugf("touching file with folder / name")
if !utils.Exists(fileInfo.FolderRemote) {
err = os.MkdirAll(fileInfo.FolderRemote, os.ModePerm)
if err != nil {
log.Error(err)
return
}
}
emptyFile, errCreate := os.Create(path.Join(fileInfo.FolderRemote, fileInfo.Name))
if errCreate != nil {
log.Error(errCreate)
err = errCreate
return
}
emptyFile.Close()
// setup the progressbar
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[i].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[i].Name
}
c.bar = progressbar.NewOptions64(1,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
)
c.bar.Finish()
return
}
func (c *Client) updateIfRecipientHasFileInfo() (err error) {
if !(!c.Options.IsSender && c.Step2FileInfoTransfered && !c.Step3RecipientRequestFile) {
return
}
// find the next file to transfer and send that number
// if the files are the same size, then look for missing chunks
finished := true
for i, fileInfo := range c.FilesToTransfer {
if _, ok := c.FilesHasFinished[i]; ok {
continue
}
log.Debugf("checking %+v", fileInfo)
if i < c.FilesToTransferCurrentNum {
continue
}
fileHash, errHash := utils.HashFile(path.Join(fileInfo.FolderRemote, fileInfo.Name))
if fileInfo.Size == 0 {
err = c.createEmptyFileAndFinish(fileInfo, i)
if err != nil {
return
}
continue
}
log.Debugf("%s %+x %+x %+v", fileInfo.Name, fileHash, fileInfo.Hash, errHash)
if !bytes.Equal(fileHash, fileInfo.Hash) {
log.Debugf("hashes are not equal %x != %x", fileHash, fileInfo.Hash)
} else {
log.Debugf("hashes are equal %x == %x", fileHash, fileInfo.Hash)
}
if errHash != nil {
// probably can't find, its okay
log.Debug(errHash)
}
if errHash != nil || !bytes.Equal(fileHash, fileInfo.Hash) {
finished = false
c.FilesToTransferCurrentNum = i
break
}
// TODO: print out something about this file already existing
}
err = c.recipientGetFileReady(finished)
return
}
func (c *Client) updateState() (err error) {
err = c.updateIfSenderChannelSecured()
if err != nil {
return
}
err = c.updateIfRecipientHasFileInfo()
if err != nil {
return
}
if c.Options.IsSender && c.Step3RecipientRequestFile && !c.Step4FileTransfer {
log.Debug("start sending data!")
if !c.firstSend {
fmt.Fprintf(os.Stderr, "\nSending (->%s)\n", c.ExternalIPConnected)
c.firstSend = true
// if there are empty files, show them as already have been transferred now
for i := range c.FilesToTransfer {
if c.FilesToTransfer[i].Size == 0 {
// setup the progressbar and takedown the progress bar for empty files
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[i].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[i].Name
}
c.bar = progressbar.NewOptions64(1,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
)
c.bar.Finish()
}
}
}
c.Step4FileTransfer = true
// setup the progressbar
c.setBar()
c.TotalSent = 0
log.Debug("beginning sending comms")
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderSource,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
c.fread, err = os.Open(pathToFile)
c.numfinished = 0
if err != nil {
return
}
for i := 0; i < len(c.Options.RelayPorts); i++ {
log.Debugf("starting sending over comm %d", i)
go c.sendData(i)
}
}
return
}
func (c *Client) setBar() {
description := fmt.Sprintf("%-*s", c.longestFilename, c.FilesToTransfer[c.FilesToTransferCurrentNum].Name)
if len(c.FilesToTransfer) == 1 {
description = c.FilesToTransfer[c.FilesToTransferCurrentNum].Name
}
c.bar = progressbar.NewOptions64(
c.FilesToTransfer[c.FilesToTransferCurrentNum].Size,
progressbar.OptionOnCompletion(func() {
fmt.Fprintf(os.Stderr, " ✔️\n")
}),
progressbar.OptionSetWidth(20),
progressbar.OptionSetDescription(description),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionShowBytes(true),
progressbar.OptionShowCount(),
progressbar.OptionSetWriter(os.Stderr),
progressbar.OptionThrottle(100*time.Millisecond),
)
byteToDo := int64(len(c.CurrentFileChunks) * models.TCP_BUFFER_SIZE / 2)
if byteToDo > 0 {
bytesDone := c.FilesToTransfer[c.FilesToTransferCurrentNum].Size - byteToDo
log.Debug(byteToDo)
log.Debug(c.FilesToTransfer[c.FilesToTransferCurrentNum].Size)
log.Debug(bytesDone)
if bytesDone > 0 {
c.bar.Add64(bytesDone)
}
}
}
func (c *Client) receiveData(i int) {
log.Debugf("%d receiving data", i)
for {
data, err := c.conn[i+1].Receive()
if err != nil {
break
}
if bytes.Equal(data, []byte{1}) {
log.Debug("got ping")
continue
}
data, err = crypt.Decrypt(data, c.Key)
if err != nil {
panic(err)
}
data = compress.Decompress(data)
// get position
var position uint64
rbuf := bytes.NewReader(data[:8])
err = binary.Read(rbuf, binary.LittleEndian, &position)
if err != nil {
panic(err)
}
positionInt64 := int64(position)
c.mutex.Lock()
_, err = c.CurrentFile.WriteAt(data[8:], positionInt64)
c.mutex.Unlock()
if err != nil {
panic(err)
}
c.bar.Add(len(data[8:]))
c.TotalSent += int64(len(data[8:]))
c.TotalChunksTransfered++
if c.TotalChunksTransfered == len(c.CurrentFileChunks) || c.TotalSent == c.FilesToTransfer[c.FilesToTransferCurrentNum].Size {
log.Debug("finished receiving!")
if err := c.CurrentFile.Close(); err != nil {
log.Errorf("error closing %s: %v", c.CurrentFile.Name(), err)
}
if c.Options.Stdout || strings.HasPrefix(c.FilesToTransfer[c.FilesToTransferCurrentNum].Name, "croc-stdin") {
pathToFile := path.Join(
c.FilesToTransfer[c.FilesToTransferCurrentNum].FolderRemote,
c.FilesToTransfer[c.FilesToTransferCurrentNum].Name,
)
b, _ := ioutil.ReadFile(pathToFile)
fmt.Print(string(b))
}
log.Debug("sending close-sender")
err = message.Send(c.conn[0], c.Key, message.Message{
Type: "close-sender",
})
if err != nil {
panic(err)
}
}
}
return
}
func (c *Client) sendData(i int) {
defer func() {
log.Debugf("finished with %d", i)
c.numfinished++
if c.numfinished == len(c.Options.RelayPorts) {
log.Debug("closing file")
if err := c.fread.Close(); err != nil {
log.Errorf("error closing file: %v", err)
}
}
}()
var readingPos int64
pos := uint64(0)
curi := float64(0)
for {
// Read file
data := make([]byte, models.TCP_BUFFER_SIZE/2)
// log.Debugf("%d trying to read", i)
n, errRead := c.fread.ReadAt(data, readingPos)
// log.Debugf("%d read %d bytes", i, n)
readingPos += int64(n)
if math.Mod(curi, float64(len(c.Options.RelayPorts))) == float64(i) {
// check to see if this is a chunk that the recipient wants
usableChunk := true
c.mutex.Lock()
if len(c.chunkMap) != 0 {
if _, ok := c.chunkMap[pos]; !ok {
usableChunk = false
} else {
delete(c.chunkMap, pos)
}
}
c.mutex.Unlock()
if usableChunk {
// log.Debugf("sending chunk %d", pos)
posByte := make([]byte, 8)
binary.LittleEndian.PutUint64(posByte, pos)
dataToSend, err := crypt.Encrypt(
compress.Compress(
append(posByte, data[:n]...),
),
c.Key,
)
if err != nil {
panic(err)
}
err = c.conn[i+1].Send(dataToSend)
if err != nil {
panic(err)
}
c.bar.Add(n)
c.TotalSent += int64(n)
// time.Sleep(100 * time.Millisecond)
} else {
// log.Debugf("skipping chunk %d", pos)
}
}
curi++
pos += uint64(n)
if errRead != nil {
if errRead == io.EOF {
break
}
panic(errRead)
}
}
return
}
|
package parser
import (
"bufio"
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/czerwonk/bird_exporter/protocol"
"github.com/prometheus/common/log"
)
var (
protocolRegex *regexp.Regexp
descriptionRegex *regexp.Regexp
routeRegex *regexp.Regexp
uptimeRegex *regexp.Regexp
routeChangeRegex *regexp.Regexp
filterRegex *regexp.Regexp
channelRegex *regexp.Regexp
)
type context struct {
current *protocol.Protocol
line string
handled bool
protocols []*protocol.Protocol
ipVersion string
}
func init() {
protocolRegex = regexp.MustCompile(`^(?:1002\-)?([^\s]+)\s+(BGP|OSPF|Direct|Device|Kernel|Static)\s+([^\s]+)\s+([^\s]+)\s+(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}|[^\s]+)(?:\s+(.*?))?$`)
descriptionRegex = regexp.MustCompile(`Description:\s+(.*)`)
routeRegex = regexp.MustCompile(`^\s+Routes:\s+(\d+) imported, (?:(\d+) filtered, )?(\d+) exported(?:, (\d+) preferred)?`)
uptimeRegex = regexp.MustCompile(`^(?:((\d+):(\d{2}):(\d{2}))|(\d+)|(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}))$`)
routeChangeRegex = regexp.MustCompile(`(Import|Export) (updates|withdraws):\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s*`)
filterRegex = regexp.MustCompile(`(Input|Output) filter:\s+(.*)`)
channelRegex = regexp.MustCompile(`Channel ipv(4|6)`)
}
// ParseProtocols parses bird output and returns protocol.Protocol structs
func ParseProtocols(data []byte, ipVersion string) []*protocol.Protocol {
reader := bytes.NewReader(data)
scanner := bufio.NewScanner(reader)
c := &context{protocols: make([]*protocol.Protocol, 0), ipVersion: ipVersion}
var handlers = []func(*context){
handleEmptyLine,
parseLineForProtocol,
parseLineForDescription,
parseLineForChannel,
parseLineForRoutes,
parseLineForRouteChanges,
parseLineForFilterName,
}
for scanner.Scan() {
c.line = strings.TrimRight(scanner.Text(), " ")
c.handled = false
for _, h := range handlers {
if !c.handled {
h(c)
}
}
}
return c.protocols
}
func handleEmptyLine(c *context) {
if c.line != "" {
return
}
c.current = nil
c.handled = true
}
func parseLineForProtocol(c *context) {
match := protocolRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
proto := parseProto(match[2])
ut := parseUptime(match[5])
c.current = protocol.NewProtocol(match[1], proto, c.ipVersion, ut)
c.current.Up = parseState(match[4])
fillAttributes(c.current, match)
c.protocols = append(c.protocols, c.current)
c.handled = true
}
func parseLineForDescription(c *context) {
match := descriptionRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
if len(match) <= 1 {
return
}
c.current.Description = strings.Join(match[1:], " ")
}
func parseProto(val string) int {
switch val {
case "BGP":
return protocol.BGP
case "OSPF":
return protocol.OSPF
case "Direct":
return protocol.Direct
case "Kernel":
return protocol.Kernel
case "Static":
return protocol.Static
}
return protocol.PROTO_UNKNOWN
}
func parseState(state string) int {
if state == "up" {
return 1
}
return 0
}
func parseUptime(value string) int {
match := uptimeRegex.FindStringSubmatch(value)
if match == nil {
return 0
}
if len(match[1]) > 0 {
return parseUptimeForDuration(match)
}
if len(match[5]) > 0 {
return parseUptimeForTimestamp(value)
}
return parseUptimeForIso(value)
}
func parseUptimeForIso(s string) int {
start, err := time.Parse("2006-01-02 15:04:05", s)
if err != nil {
log.Errorln(err)
return 0
}
return int(time.Since(start).Seconds())
}
func parseUptimeForDuration(duration []string) int {
h := parseInt(duration[2])
m := parseInt(duration[3])
s := parseInt(duration[4])
str := fmt.Sprintf("%dh%dm%ds", h, m, s)
d, err := time.ParseDuration(str)
if err != nil {
log.Errorln(err)
return 0
}
return int(d.Seconds())
}
func parseUptimeForTimestamp(timestamp string) int {
since := parseInt(timestamp)
s := time.Unix(since, 0)
d := time.Since(s)
return int(d.Seconds())
}
func parseLineForChannel(c *context) {
if c.ipVersion != "" || c.current == nil {
return
}
channel := channelRegex.FindStringSubmatch(c.line)
if channel == nil {
return
}
if len(c.current.IPVersion) == 0 {
c.current.IPVersion = channel[1]
} else {
c.current = &protocol.Protocol{
Name: c.current.Name,
Proto: c.current.Proto,
Up: c.current.Up,
Uptime: c.current.Uptime,
IPVersion: channel[1],
}
c.protocols = append(c.protocols, c.current)
}
c.handled = true
}
func parseLineForRoutes(c *context) {
if c.current == nil {
return
}
match := routeRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
c.current.Imported, _ = strconv.ParseInt(match[1], 10, 64)
c.current.Exported, _ = strconv.ParseInt(match[3], 10, 64)
if len(match[2]) > 0 {
c.current.Filtered, _ = strconv.ParseInt(match[2], 10, 64)
}
if len(match[4]) > 0 {
c.current.Preferred, _ = strconv.ParseInt(match[4], 10, 64)
}
c.handled = true
}
func parseLineForRouteChanges(c *context) {
if c.current == nil {
return
}
match := routeChangeRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
x := getRouteChangeCount(match, c.current)
x.Received = parseRouteChangeValue(match[3])
x.Rejected = parseRouteChangeValue(match[4])
x.Filtered = parseRouteChangeValue(match[5])
x.Ignored = parseRouteChangeValue(match[6])
x.Accepted = parseRouteChangeValue(match[7])
c.handled = true
}
func getRouteChangeCount(values []string, p *protocol.Protocol) *protocol.RouteChangeCount {
if values[1] == "Import" {
if values[2] == "updates" {
return &p.ImportUpdates
}
return &p.ImportWithdraws
}
if values[2] == "updates" {
return &p.ExportUpdates
}
return &p.ExportWithdraws
}
func parseRouteChangeValue(value string) int64 {
if value == "---" {
return 0
}
return parseInt(value)
}
func parseLineForFilterName(c *context) {
if c.current == nil {
return
}
match := filterRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
if match[1] == "Input" {
c.current.ImportFilter = match[2]
} else {
c.current.ExportFilter = match[2]
}
c.handled = true
}
func parseInt(value string) int64 {
i, err := strconv.ParseInt(value, 10, 64)
if err != nil {
log.Errorln(err)
return 0
}
return i
}
func fillAttributes(p *protocol.Protocol, m []string) {
if p.Proto == protocol.OSPF {
p.Attributes["running"] = float64(parseOspfRunning(m[6]))
}
}
func parseOspfRunning(state string) int {
if state == "Running" {
return 1
}
return 0
}
Add missing bird protocols which caused segfaults in parser
Closes #34
package parser
import (
"bufio"
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/czerwonk/bird_exporter/protocol"
"github.com/prometheus/common/log"
)
var (
protocolRegex *regexp.Regexp
descriptionRegex *regexp.Regexp
routeRegex *regexp.Regexp
uptimeRegex *regexp.Regexp
routeChangeRegex *regexp.Regexp
filterRegex *regexp.Regexp
channelRegex *regexp.Regexp
)
type context struct {
current *protocol.Protocol
line string
handled bool
protocols []*protocol.Protocol
ipVersion string
}
func init() {
protocolRegex = regexp.MustCompile(`^(?:1002\-)?([^\s]+)\s+(MRT|BGP|BFD|OSPF|RPKI|RIP|RAdv|Pipe|Perf|Direct|Babel|Device|Kernel|Static)\s+([^\s]+)\s+([^\s]+)\s+(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}|[^\s]+)(?:\s+(.*?))?$`)
descriptionRegex = regexp.MustCompile(`Description:\s+(.*)`)
routeRegex = regexp.MustCompile(`^\s+Routes:\s+(\d+) imported, (?:(\d+) filtered, )?(\d+) exported(?:, (\d+) preferred)?`)
uptimeRegex = regexp.MustCompile(`^(?:((\d+):(\d{2}):(\d{2}))|(\d+)|(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}))$`)
routeChangeRegex = regexp.MustCompile(`(Import|Export) (updates|withdraws):\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s*`)
filterRegex = regexp.MustCompile(`(Input|Output) filter:\s+(.*)`)
channelRegex = regexp.MustCompile(`Channel ipv(4|6)`)
}
// ParseProtocols parses bird output and returns protocol.Protocol structs
func ParseProtocols(data []byte, ipVersion string) []*protocol.Protocol {
reader := bytes.NewReader(data)
scanner := bufio.NewScanner(reader)
c := &context{protocols: make([]*protocol.Protocol, 0), ipVersion: ipVersion}
var handlers = []func(*context){
handleEmptyLine,
parseLineForProtocol,
parseLineForDescription,
parseLineForChannel,
parseLineForRoutes,
parseLineForRouteChanges,
parseLineForFilterName,
}
for scanner.Scan() {
c.line = strings.TrimRight(scanner.Text(), " ")
c.handled = false
for _, h := range handlers {
if !c.handled {
h(c)
}
}
}
return c.protocols
}
func handleEmptyLine(c *context) {
if c.line != "" {
return
}
c.current = nil
c.handled = true
}
func parseLineForProtocol(c *context) {
match := protocolRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
proto := parseProto(match[2])
ut := parseUptime(match[5])
c.current = protocol.NewProtocol(match[1], proto, c.ipVersion, ut)
c.current.Up = parseState(match[4])
fillAttributes(c.current, match)
c.protocols = append(c.protocols, c.current)
c.handled = true
}
func parseLineForDescription(c *context) {
match := descriptionRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
if len(match) <= 1 {
return
}
c.current.Description = strings.Join(match[1:], " ")
}
func parseProto(val string) int {
switch val {
case "BGP":
return protocol.BGP
case "OSPF":
return protocol.OSPF
case "Direct":
return protocol.Direct
case "Kernel":
return protocol.Kernel
case "Static":
return protocol.Static
}
return protocol.PROTO_UNKNOWN
}
func parseState(state string) int {
if state == "up" {
return 1
}
return 0
}
func parseUptime(value string) int {
match := uptimeRegex.FindStringSubmatch(value)
if match == nil {
return 0
}
if len(match[1]) > 0 {
return parseUptimeForDuration(match)
}
if len(match[5]) > 0 {
return parseUptimeForTimestamp(value)
}
return parseUptimeForIso(value)
}
func parseUptimeForIso(s string) int {
start, err := time.Parse("2006-01-02 15:04:05", s)
if err != nil {
log.Errorln(err)
return 0
}
return int(time.Since(start).Seconds())
}
func parseUptimeForDuration(duration []string) int {
h := parseInt(duration[2])
m := parseInt(duration[3])
s := parseInt(duration[4])
str := fmt.Sprintf("%dh%dm%ds", h, m, s)
d, err := time.ParseDuration(str)
if err != nil {
log.Errorln(err)
return 0
}
return int(d.Seconds())
}
func parseUptimeForTimestamp(timestamp string) int {
since := parseInt(timestamp)
s := time.Unix(since, 0)
d := time.Since(s)
return int(d.Seconds())
}
func parseLineForChannel(c *context) {
if c.ipVersion != "" || c.current == nil {
return
}
channel := channelRegex.FindStringSubmatch(c.line)
if channel == nil {
return
}
if len(c.current.IPVersion) == 0 {
c.current.IPVersion = channel[1]
} else {
c.current = &protocol.Protocol{
Name: c.current.Name,
Proto: c.current.Proto,
Up: c.current.Up,
Uptime: c.current.Uptime,
IPVersion: channel[1],
}
c.protocols = append(c.protocols, c.current)
}
c.handled = true
}
func parseLineForRoutes(c *context) {
if c.current == nil {
return
}
match := routeRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
c.current.Imported, _ = strconv.ParseInt(match[1], 10, 64)
c.current.Exported, _ = strconv.ParseInt(match[3], 10, 64)
if len(match[2]) > 0 {
c.current.Filtered, _ = strconv.ParseInt(match[2], 10, 64)
}
if len(match[4]) > 0 {
c.current.Preferred, _ = strconv.ParseInt(match[4], 10, 64)
}
c.handled = true
}
func parseLineForRouteChanges(c *context) {
if c.current == nil {
return
}
match := routeChangeRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
x := getRouteChangeCount(match, c.current)
x.Received = parseRouteChangeValue(match[3])
x.Rejected = parseRouteChangeValue(match[4])
x.Filtered = parseRouteChangeValue(match[5])
x.Ignored = parseRouteChangeValue(match[6])
x.Accepted = parseRouteChangeValue(match[7])
c.handled = true
}
func getRouteChangeCount(values []string, p *protocol.Protocol) *protocol.RouteChangeCount {
if values[1] == "Import" {
if values[2] == "updates" {
return &p.ImportUpdates
}
return &p.ImportWithdraws
}
if values[2] == "updates" {
return &p.ExportUpdates
}
return &p.ExportWithdraws
}
func parseRouteChangeValue(value string) int64 {
if value == "---" {
return 0
}
return parseInt(value)
}
func parseLineForFilterName(c *context) {
if c.current == nil {
return
}
match := filterRegex.FindStringSubmatch(c.line)
if match == nil {
return
}
if match[1] == "Input" {
c.current.ImportFilter = match[2]
} else {
c.current.ExportFilter = match[2]
}
c.handled = true
}
func parseInt(value string) int64 {
i, err := strconv.ParseInt(value, 10, 64)
if err != nil {
log.Errorln(err)
return 0
}
return i
}
func fillAttributes(p *protocol.Protocol, m []string) {
if p.Proto == protocol.OSPF {
p.Attributes["running"] = float64(parseOspfRunning(m[6]))
}
}
func parseOspfRunning(state string) int {
if state == "Running" {
return 1
}
return 0
}
|
package parser
import (
"os"
"strings"
"sync"
"github.com/dustinblackman/tidalwave/logger"
"github.com/dustinblackman/tidalwave/sqlquery"
"github.com/spf13/viper"
"github.com/tidwall/gjson"
)
// LogQueryStruct contains all information about a log file, including the matching entries to the query.
type LogQueryStruct struct {
LogPath string
LineNumbers [][]int
}
func processLine(query *sqlquery.QueryParams, line []byte) []byte {
// If there were select statements, join those in to a smaller JSON object.
if len(query.Selects) > 0 {
selectedEntries := []string{}
for idx, res := range gjson.GetManyBytes(line, query.Selects...) {
keyPath := query.Selects[idx]
keySplit := strings.Split(keyPath, ".")
lastKey := keySplit[len(keySplit)-1]
if res.Type == gjson.Number || res.Type == gjson.JSON {
selectedEntries = append(selectedEntries, `"`+lastKey+`":`+res.String())
} else if res.Type == gjson.True {
selectedEntries = append(selectedEntries, `"`+lastKey+`":true`)
} else if res.Type == gjson.False {
selectedEntries = append(selectedEntries, `"`+lastKey+`":false`)
} else if res.Type == gjson.Null {
selectedEntries = append(selectedEntries, `"`+lastKey+`":null`)
} else {
selectedEntries = append(selectedEntries, `"`+lastKey+`":"`+res.String()+`"`)
}
}
return []byte("{" + strings.Join(selectedEntries, ",") + "}")
}
return line
}
func searchParse(query *sqlquery.QueryParams, logStruct *LogQueryStruct, coreLimit <-chan bool, submitChannel chan<- []byte, wg *sync.WaitGroup) {
defer wg.Done()
logger.Logger.Debugf("Processing: %s", logStruct.LogPath)
file, err := os.Open(logStruct.LogPath)
if err != nil {
logger.Logger.Fatal(err)
}
defer file.Close()
lineNumber := -1
lastLineNumber := -1
scanner := createScanner(file)
for scanner.Scan() {
line := scanner.Bytes()
lineNumber++
if query.ProcessLine(&line) {
if viper.GetBool("skip-sort") {
submitChannel <- line
continue
}
if lineNumber == (lastLineNumber+1) && lineNumber != 0 {
logStruct.LineNumbers[len(logStruct.LineNumbers)-1][1] = lineNumber
} else {
logStruct.LineNumbers = append(logStruct.LineNumbers, []int{lineNumber, lineNumber})
}
lastLineNumber = lineNumber
}
}
if err := scanner.Err(); err != nil {
logger.Logger.Fatal(err)
}
<-coreLimit
}
func searchSubmit(query *sqlquery.QueryParams, logStruct *LogQueryStruct, submitChannel chan<- []byte) {
file, err := os.Open(logStruct.LogPath)
if err != nil {
logger.Logger.Fatal(err)
}
defer file.Close()
scanner := createScanner(file)
lineNumber := -1
// TODO: Handle scanner errors
for scanner.Scan() {
line := scanner.Bytes()
lineNumber++
acceptLine := false
// TODO: Can this be better? Faster?
for _, lineRange := range logStruct.LineNumbers {
if lineNumber >= lineRange[0] && lineNumber <= lineRange[1] {
acceptLine = true
break
}
}
if !acceptLine {
continue
}
submitChannel <- processLine(query, line)
}
}
// Search executes a normal match query over log results.
// SELECT * FROM testapp WHERE date > '2016-10-05'
func (tp *TidalwaveParser) Search() chan []byte {
var wg sync.WaitGroup
logsLen := len(tp.LogPaths)
wg.Add(logsLen)
submitChannel := make(chan []byte, 10000)
go func() {
coreLimit := make(chan bool, tp.MaxParallelism)
logs := make([]LogQueryStruct, logsLen)
for idx, logPath := range tp.LogPaths {
logs[idx] = LogQueryStruct{LogPath: logPath}
go searchParse(tp.Query, &logs[idx], coreLimit, submitChannel, &wg)
coreLimit <- true
}
wg.Wait()
if !viper.GetBool("skip-sort") {
for idx := range logs {
if len(logs[idx].LineNumbers) > 0 {
searchSubmit(tp.Query, &logs[idx], submitChannel)
}
}
}
close(submitChannel)
}()
return submitChannel
}
switch scanner for reader
package parser
import (
"bufio"
"io"
"os"
"strings"
"sync"
"github.com/dustinblackman/tidalwave/logger"
"github.com/dustinblackman/tidalwave/sqlquery"
"github.com/spf13/viper"
"github.com/tidwall/gjson"
)
// LogQueryStruct contains all information about a log file, including the matching entries to the query.
type LogQueryStruct struct {
LogPath string
LineNumbers [][]int
}
func processLine(query *sqlquery.QueryParams, line []byte) []byte {
// If there were select statements, join those in to a smaller JSON object.
if len(query.Selects) > 0 {
selectedEntries := []string{}
for idx, res := range gjson.GetManyBytes(line, query.Selects...) {
keyPath := query.Selects[idx]
keySplit := strings.Split(keyPath, ".")
lastKey := keySplit[len(keySplit)-1]
if res.Type == gjson.Number || res.Type == gjson.JSON {
selectedEntries = append(selectedEntries, `"`+lastKey+`":`+res.String())
} else if res.Type == gjson.True {
selectedEntries = append(selectedEntries, `"`+lastKey+`":true`)
} else if res.Type == gjson.False {
selectedEntries = append(selectedEntries, `"`+lastKey+`":false`)
} else if res.Type == gjson.Null {
selectedEntries = append(selectedEntries, `"`+lastKey+`":null`)
} else {
selectedEntries = append(selectedEntries, `"`+lastKey+`":"`+res.String()+`"`)
}
}
return []byte("{" + strings.Join(selectedEntries, ",") + "}")
}
return line
}
func searchParse(query *sqlquery.QueryParams, logStruct *LogQueryStruct, coreLimit <-chan bool, submitChannel chan<- []byte, wg *sync.WaitGroup) {
defer wg.Done()
logger.Logger.Debugf("Processing: %s", logStruct.LogPath)
file, err := os.Open(logStruct.LogPath)
if err != nil {
logger.Logger.Fatal(err)
}
defer file.Close()
lineNumber := -1
lastLineNumber := -1
reader := bufio.NewReader(file)
delim := byte('\n')
for {
line, err := reader.ReadBytes(delim)
lineNumber++
if err == io.EOF {
break
}
if err != nil {
logger.Logger.Fatal(err)
}
if query.ProcessLine(&line) {
if viper.GetBool("skip-sort") {
submitChannel <- line
continue
}
if lineNumber == (lastLineNumber+1) && lineNumber != 0 {
logStruct.LineNumbers[len(logStruct.LineNumbers)-1][1] = lineNumber
} else {
logStruct.LineNumbers = append(logStruct.LineNumbers, []int{lineNumber, lineNumber})
}
lastLineNumber = lineNumber
}
}
<-coreLimit
}
func searchSubmit(query *sqlquery.QueryParams, logStruct *LogQueryStruct, submitChannel chan<- []byte) {
file, err := os.Open(logStruct.LogPath)
if err != nil {
logger.Logger.Fatal(err)
}
defer file.Close()
scanner := createScanner(file)
lineNumber := -1
// TODO: Handle scanner errors
for scanner.Scan() {
line := scanner.Bytes()
lineNumber++
acceptLine := false
// TODO: Can this be better? Faster?
for _, lineRange := range logStruct.LineNumbers {
if lineNumber >= lineRange[0] && lineNumber <= lineRange[1] {
acceptLine = true
break
}
}
if !acceptLine {
continue
}
submitChannel <- processLine(query, line)
}
}
// Search executes a normal match query over log results.
// SELECT * FROM testapp WHERE date > '2016-10-05'
func (tp *TidalwaveParser) Search() chan []byte {
var wg sync.WaitGroup
logsLen := len(tp.LogPaths)
wg.Add(logsLen)
submitChannel := make(chan []byte, 10000)
go func() {
coreLimit := make(chan bool, tp.MaxParallelism)
logs := make([]LogQueryStruct, logsLen)
for idx, logPath := range tp.LogPaths {
logs[idx] = LogQueryStruct{LogPath: logPath}
go searchParse(tp.Query, &logs[idx], coreLimit, submitChannel, &wg)
coreLimit <- true
}
wg.Wait()
if !viper.GetBool("skip-sort") {
for idx := range logs {
if len(logs[idx].LineNumbers) > 0 {
searchSubmit(tp.Query, &logs[idx], submitChannel)
}
}
}
close(submitChannel)
}()
return submitChannel
}
|
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package process
import (
"gopkg.in/juju/charm.v6-unstable"
)
// Status represents the status of a worload process.
type Status string
// Status values specific to workload processes.
const (
StatusPending Status = "pending"
StatusActive Status = "active"
StatusFailed Status = "failed"
StatusStopped Status = "stopped"
)
// ProcessInfo holds information about a process that Juju needs.
type ProcessInfo struct {
charm.Process
// Status is the overall Juju status of the workload process.
Status Status
// Space is the networking space with which the process was started.
Space string
// EnvVars is the set of environment variables with which the
// process was started.
EnvVars map[string]string
// Details is the information about the process which the plugin provided.
Details ProcessDetails
}
Change Status to an int (with a String method).
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package process
import (
"gopkg.in/juju/charm.v6-unstable"
)
// Status values specific to workload processes.
const (
StatusPending Status = iota
StatusActive
StatusFailed
StatusStopped
)
// Status represents the status of a worload process.
type Status string
// String implements fmt.Stringer.
func (s Status) String() string {
switch status {
case StatusPending:
return "pending"
case StatusActive:
return "active"
case StatusFailed:
return "failed"
case StatusStopped:
return "stopped"
}
return "Unknown"
}
// ProcessInfo holds information about a process that Juju needs.
type ProcessInfo struct {
charm.Process
// Status is the overall Juju status of the workload process.
Status Status
// Space is the networking space with which the process was started.
Space string
// EnvVars is the set of environment variables with which the
// process was started.
EnvVars map[string]string
// Details is the information about the process which the plugin provided.
Details ProcessDetails
}
|
// Copyright © 2017 Wei Shen <shenwei356@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package process
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/cznic/sortutil"
"github.com/pkg/errors"
"github.com/shenwei356/go-logging"
psutil "github.com/shirou/gopsutil/process"
)
// Log is *logging.Logger
var Log *logging.Logger
func init() {
if Log == nil {
logFormat := logging.MustStringFormatter(`%{color}[%{level:.4s}]%{color:reset} %{message}`)
backend := logging.NewLogBackend(os.Stderr, "", 0)
backendFormatter := logging.NewBackendFormatter(backend, logFormat)
logging.SetBackend(backendFormatter)
Log = logging.MustGetLogger("process")
}
}
// Command is the Command struct
type Command struct {
ID uint64 // ID
Cmd string // command
Cancel chan struct{} // channel for close
Timeout time.Duration // time out
ctx context.Context // context.WithTimeout
ctxCancel context.CancelFunc // cancel func for timetout
Ch chan string // channel for stdout
reader *bufio.Reader // reader for stdout
tmpfile string // tmpfile for stdout
tmpfh *os.File // file handler for tmpfile
finishSendOutput bool // a flag of whether finished sending output to Ch
Err error // Error
Duration time.Duration // runtime
dryrun bool
exitStatus int
}
// NewCommand create a Command
func NewCommand(id uint64, cmdStr string, cancel chan struct{}, timeout time.Duration) *Command {
command := &Command{
ID: id,
Cmd: strings.TrimLeft(cmdStr, " "),
Cancel: cancel,
Timeout: timeout,
}
return command
}
func (c *Command) String() string {
return fmt.Sprintf("cmd #%d: %s", c.ID, c.Cmd)
}
// Verbose decides whether print extra information
var Verbose bool
var tmpfilePrefix = fmt.Sprintf("rush.%d.", os.Getpid())
// TmpOutputDataBuffer is buffer size for output of a command before saving to tmpfile,
// default 1M.
var TmpOutputDataBuffer = 1048576 // 1M
// OutputChunkSize is buffer size of output string chunk sent to channel, default 16K.
var OutputChunkSize = 16384 // 16K
// Run runs a command and send output to command.Ch in background.
func (c *Command) Run(opts *Options) (chan string, error) {
// create a return chan here; we will set the c.Ch in the parent
ch := make(chan string, 1)
if c.dryrun {
ch <- c.Cmd + "\n"
close(ch)
c.finishSendOutput = true
return ch, nil
}
c.Err = c.run(opts)
// don't return here, keep going so we can display
// the output from commands that error
var readErr error = nil
if Verbose {
Log.Infof("finish cmd #%d in %s: %s", c.ID, c.Duration, c.Cmd)
}
go func() {
if c.tmpfile != "" { // data saved in tempfile
c.reader = bufio.NewReader(c.tmpfh)
}
buf := make([]byte, OutputChunkSize)
var n int
var i int
var b bytes.Buffer
var bb []byte
var existedN int
// var N uint64
for {
n, readErr = c.reader.Read(buf)
existedN = b.Len()
b.Write(buf[0:n])
if readErr != nil {
if readErr == io.EOF {
if b.Len() > 0 {
// if Verbose {
// N += uint64(b.Len())
// }
ch <- b.String() // string(buf[0:n])
}
b.Reset()
readErr = nil
}
break
}
bb = b.Bytes()
i = bytes.LastIndexByte(bb, '\n')
if i < 0 {
continue
}
// if Verbose {
// N += uint64(len(bb[0 : i+1]))
// }
ch <- string(bb[0 : i+1]) // string(buf[0:n])
b.Reset()
if i-existedN+1 < n {
// ------ ======i========n
// existed buf
// 5 4 6
b.Write(buf[i-existedN+1 : n])
}
// N += n
}
// if Verbose {
// Log.Debugf("cmd #%d sent %d bytes\n", c.ID, N)
// }
// if Verbose {
// Log.Infof("finish reading data from: %s", c.Cmd)
// }
close(ch)
c.finishSendOutput = true
}()
if c.Err != nil {
return ch, c.Err
} else {
if readErr != nil {
return ch, readErr
} else {
return ch, nil
}
}
}
var isWindows bool = runtime.GOOS == "windows"
func getShell() string {
var shell string
if isWindows {
shell = os.Getenv("COMSPEC")
if shell == "" {
shell = "C:\\WINDOWS\\System32\\cmd.exe"
}
} else {
shell = os.Getenv("SHELL")
if shell == "" {
shell = "sh"
}
}
return shell
}
// Cleanup removes tmpfile
func (c *Command) Cleanup() error {
var err error
if c.tmpfh != nil {
// if Verbose {
// Log.Infof("close tmpfh for: %s", c.Cmd)
// }
err = c.tmpfh.Close()
if err != nil {
return err
}
}
if c.tmpfile != "" {
if Verbose {
Log.Infof("remove tmpfile (%s) for command: %s", c.tmpfile, c.Cmd)
}
err = os.Remove(c.tmpfile)
}
return err
}
// ErrTimeout means command timeout
var ErrTimeout = fmt.Errorf("time out")
// ErrCancelled means command being cancelled
var ErrCancelled = fmt.Errorf("cancelled")
func (c *Command) getExitStatus(err error) int {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
return waitStatus.ExitStatus()
}
// no error, so return exitStatus 0
return 0
}
func isProcessRunning(pid int) bool {
_, err := os.FindProcess(pid)
if err != nil {
return false
}
return true
}
// ensure Windows processes go away
func killWindowsProcessTreeRecursive(childProcess *psutil.Process) {
grandChildren, err := childProcess.Children()
if grandChildren != nil && err == nil {
for _, value := range grandChildren {
killWindowsProcessTreeRecursive(value)
}
}
attempts := 1
for {
if Verbose {
Log.Infof("taskkill /t /f /pid %s", strconv.Itoa(int(childProcess.Pid)))
}
out, err := exec.Command("taskkill", "/t", "/f", "/pid", strconv.Itoa(int(childProcess.Pid))).Output()
if Verbose {
if err != nil {
Log.Error(err)
}
Log.Infof("%s", out)
}
if !isProcessRunning(int(childProcess.Pid)) {
break
} else {
time.Sleep(10 * time.Millisecond)
attempts += 1
if attempts > 30 {
break
}
}
}
}
// run a command and pass output to c.reader.
// Note that output returns only after finishing run.
// This function is mainly borrowed from https://github.com/brentp/gargs .
func (c *Command) run(opts *Options) error {
t := time.Now()
chCancelMonitor := make(chan struct{})
defer func() {
close(chCancelMonitor)
c.Duration = time.Now().Sub(t)
}()
var command *exec.Cmd
qcmd := fmt.Sprintf(`%s`, c.Cmd)
if Verbose {
Log.Infof("start cmd #%d: %s", c.ID, qcmd)
}
if c.Timeout > 0 {
c.ctx, c.ctxCancel = context.WithTimeout(context.Background(), c.Timeout)
if isWindows {
command = exec.CommandContext(c.ctx, getShell())
c.setWindowsCommandAttr(command, qcmd)
} else {
command = exec.CommandContext(c.ctx, getShell(), "-c", qcmd)
}
} else {
if isWindows {
command = exec.Command(getShell())
c.setWindowsCommandAttr(command, qcmd)
} else {
command = exec.Command(getShell(), "-c", qcmd)
}
}
pipeStdout, err := command.StdoutPipe()
if err != nil {
return errors.Wrapf(err, "get stdout pipe of cmd #%d: %s", c.ID, c.Cmd)
}
defer pipeStdout.Close()
command.Stderr = os.Stderr
err = command.Start()
if err != nil {
return errors.Wrapf(err, "start cmd #%d: %s", c.ID, c.Cmd)
}
bpipe := bufio.NewReaderSize(pipeStdout, TmpOutputDataBuffer)
chErr := make(chan error, 2) // may come from three sources, must be buffered
chEndBeforeTimeout := make(chan struct{})
go func() {
select {
case <-c.Cancel:
if Verbose {
Log.Warningf("cancel cmd #%d: %s", c.ID, c.Cmd)
}
chErr <- ErrCancelled
if opts.KillOnCtrlC {
if isWindows {
childProcess, err := psutil.NewProcess(int32(command.Process.Pid))
if err != nil {
Log.Error(err)
}
killWindowsProcessTreeRecursive(childProcess)
} else {
command.Process.Kill()
}
}
case <-chCancelMonitor:
// default: // must not use default, if you must use, use for loop
}
}()
// detect timeout
if c.Timeout > 0 {
go func() { // goroutine #T
select {
case <-c.ctx.Done():
chErr <- ErrTimeout
c.ctxCancel()
return
case <-chEndBeforeTimeout:
chErr <- nil
return
}
}()
}
// --------------------------------
// handle output
var readed []byte
if c.Timeout > 0 {
// known shortcoming: this goroutine will remains even after timeout!
// this will cause data race.
go func() { // goroutine #P
// Peek is blocked method, it waits command even after timeout!!
readed, err = bpipe.Peek(TmpOutputDataBuffer)
chErr <- err
}()
err = <-chErr // from timeout #T or peek #P
} else {
readed, err = bpipe.Peek(TmpOutputDataBuffer)
}
// less than TmpOutputDataBuffer bytes in output...
if err == bufio.ErrBufferFull || err == io.EOF {
if c.Timeout > 0 {
go func() { // goroutine #W
err1 := command.Wait()
chErr <- err1
close(chEndBeforeTimeout)
}()
err = <-chErr // from timeout #T or normal exit #W
<-chErr // from normal exit #W or timeout #T
} else {
err = command.Wait()
}
if opts.PropExitStatus {
c.exitStatus = c.getExitStatus(err)
}
// get reader even on error, so we can still print the stdout and stderr of the failed child process
c.reader = bufio.NewReader(bytes.NewReader(readed))
if err != nil {
return errors.Wrapf(err, "wait cmd #%d: %s", c.ID, c.Cmd)
}
return nil
}
// more than TmpOutputDataBuffer bytes in output. must use tmpfile
if err != nil {
return errors.Wrapf(err, "run cmd #%d: %s", c.ID, c.Cmd)
}
c.tmpfh, err = ioutil.TempFile("", tmpfilePrefix)
if err != nil {
return errors.Wrapf(err, "create tmpfile for cmd #%d: %s", c.ID, c.Cmd)
}
c.tmpfile = c.tmpfh.Name()
if Verbose {
Log.Infof("create tmpfile (%s) for command: %s", c.tmpfile, c.Cmd)
}
btmp := bufio.NewWriter(c.tmpfh)
_, err = io.CopyBuffer(btmp, bpipe, readed)
if err != nil {
return errors.Wrapf(err, "save buffered data to tmpfile: %s", c.tmpfile)
}
if c, ok := pipeStdout.(io.ReadCloser); ok {
c.Close()
}
btmp.Flush()
_, err = c.tmpfh.Seek(0, 0)
if err == nil {
if c.Timeout > 0 {
go func() { // goroutine #3
err1 := command.Wait()
close(chEndBeforeTimeout)
chErr <- err1
}()
err = <-chErr // from timeout or normal exit
<-chErr // wait unfinished goroutine
} else {
err = command.Wait()
}
}
if opts.PropExitStatus {
c.exitStatus = c.getExitStatus(err)
}
if err != nil {
return errors.Wrapf(err, "wait cmd #%d: %s", c.ID, c.Cmd)
}
return nil
}
// Options contains the options
type Options struct {
DryRun bool // just print command
Jobs int // max jobs number
KeepOrder bool // keep output order
Retries int // max retry chances
RetryInterval time.Duration // retry interval
PrintRetryOutput bool // print output from retries
Timeout time.Duration // timeout
StopOnErr bool // stop on any error
PropExitStatus bool // propagate child exit status
KillOnCtrlC bool // kill child processes on ctrl-c
RecordSuccessfulCmd bool // send successful command to channel
Verbose bool
}
// Run4Output runs commands in parallel from channel chCmdStr,
// and returns an output text channel,
// and a done channel to ensure safe exit.
func Run4Output(opts *Options, cancel chan struct{}, chCmdStr chan string) (chan string, chan string, chan int, chan int) {
if opts.Verbose {
Verbose = true
}
chCmd, chSuccessfulCmd, doneChCmd, chExitStatus := Run(opts, cancel, chCmdStr)
chOut := make(chan string, opts.Jobs)
done := make(chan int)
go func() {
var wg sync.WaitGroup
if !opts.KeepOrder { // do not keep order
tokens := make(chan int, opts.Jobs)
RECEIVECMD:
for c := range chCmd {
select {
case <-cancel:
if Verbose {
Log.Debugf("cancel receiving finished cmd")
}
break RECEIVECMD
default: // needed
}
wg.Add(1)
tokens <- 1
go func(c *Command) {
defer func() {
wg.Done()
<-tokens
}()
// read data from channel and outpput
// var N uint64
for msg := range c.Ch {
// if Verbose {
// N += uint64(len(msg))
// }
chOut <- msg
}
c.Cleanup()
// if Verbose {
// Log.Debugf("receive %d bytes from cmd #%d\n", N, c.ID)
// }
// if Verbose {
// Log.Infof("finish receiving data from: %s", c.Cmd)
// }
}(c)
}
} else { // keep order
wg.Add(1)
var id uint64 = 1
var c, c1 *Command
var ok bool
cmds := make(map[uint64]*Command)
RECEIVECMD2:
for c = range chCmd {
select {
case <-cancel:
if Verbose {
Log.Debugf("cancel receiving finished cmd")
}
break RECEIVECMD2
default: // needed
}
if c.ID == id { // your turn
for msg := range c.Ch {
chOut <- msg
}
c.Cleanup()
id++
} else { // wait the ID come out
for {
if c1, ok = cmds[id]; ok {
for msg := range c1.Ch {
chOut <- msg
}
c1.Cleanup()
delete(cmds, c1.ID)
id++
} else {
break
}
}
cmds[c.ID] = c
}
}
if len(cmds) > 0 {
ids := make(sortutil.Uint64Slice, len(cmds))
i := 0
for id = range cmds {
ids[i] = id
i++
}
sort.Sort(ids)
for _, id = range ids {
c := cmds[id]
for msg := range c.Ch {
chOut <- msg
}
c.Cleanup()
}
}
wg.Done()
}
<-doneChCmd
wg.Wait()
close(chOut)
// if Verbose {
// Log.Infof("finish sending all output")
// }
done <- 1
}()
return chOut, chSuccessfulCmd, done, chExitStatus
}
// write strings and report done
func combineWorker(input <-chan string, output chan<- string, wg *sync.WaitGroup) {
defer wg.Done()
for val := range input {
output <- val
}
}
// combine strings in input order
func combine(inputs []<-chan string, output chan<- string) {
group := new(sync.WaitGroup)
go func() {
for _, input := range inputs {
group.Add(1)
go combineWorker(input, output, group)
group.Wait() // preserve input order
}
close(output)
}()
}
// Run runs commands in parallel from channel chCmdStr,
// and returns a Command channel,
// and a done channel to ensure safe exit.
func Run(opts *Options, cancel chan struct{}, chCmdStr chan string) (chan *Command, chan string, chan int, chan int) {
if opts.Verbose {
Verbose = true
}
chCmd := make(chan *Command, opts.Jobs)
var chSuccessfulCmd chan string
if opts.RecordSuccessfulCmd {
chSuccessfulCmd = make(chan string, opts.Jobs)
}
done := make(chan int)
var chExitStatus chan int
if opts.PropExitStatus {
chExitStatus = make(chan int)
}
go func() {
var wg sync.WaitGroup
tokens := make(chan int, opts.Jobs)
var id uint64 = 1
var stop bool
RECEIVECMD:
for cmdStr := range chCmdStr {
select {
case <-cancel:
if Verbose {
Log.Debugf("cancel receiving commands")
}
break RECEIVECMD
default: // needed
}
if stop {
break
}
wg.Add(1)
tokens <- 1
go func(id uint64, cmdStr string) {
defer func() {
wg.Done()
<-tokens
}()
command := NewCommand(id, cmdStr, cancel, opts.Timeout)
if opts.DryRun {
command.dryrun = true
}
chances := opts.Retries
var outputsToPrint []<-chan string
for {
ch, err := command.Run(opts)
if err != nil { // fail to run
if chances == 0 || opts.StopOnErr {
// print final output
outputsToPrint = append(outputsToPrint, ch)
Log.Error(err)
if opts.PropExitStatus {
chExitStatus <- command.exitStatus
}
command.Ch = make(chan string, 1)
combine(outputsToPrint, command.Ch)
chCmd <- command
} else {
Log.Warning(err)
}
if opts.StopOnErr {
select {
case <-cancel: // already closed
default:
Log.Error("stop on first error(s)")
close(cancel)
close(chCmd)
if opts.RecordSuccessfulCmd {
close(chSuccessfulCmd)
}
if opts.PropExitStatus {
close(chExitStatus)
}
done <- 1
}
stop = true
return
}
if chances > 0 {
if opts.PrintRetryOutput {
outputsToPrint = append(outputsToPrint, ch)
}
if Verbose && opts.Retries > 0 {
Log.Warningf("retry %d/%d times: %s",
opts.Retries-chances+1,
opts.Retries, command.Cmd)
}
chances--
<-time.After(opts.RetryInterval)
continue
}
return
}
// print final output
outputsToPrint = append(outputsToPrint, ch)
if opts.PropExitStatus {
chExitStatus <- command.exitStatus
}
break
}
command.Ch = make(chan string, 1)
combine(outputsToPrint, command.Ch)
chCmd <- command
if opts.RecordSuccessfulCmd {
chSuccessfulCmd <- cmdStr
}
}(id, cmdStr)
id++
}
wg.Wait()
if !stop {
close(chCmd)
if opts.RecordSuccessfulCmd {
close(chSuccessfulCmd)
}
}
if opts.PropExitStatus {
close(chExitStatus)
}
done <- 1
}()
return chCmd, chSuccessfulCmd, done, chExitStatus
}
don't read if reader is nil
// Copyright © 2017 Wei Shen <shenwei356@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package process
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/cznic/sortutil"
"github.com/pkg/errors"
"github.com/shenwei356/go-logging"
psutil "github.com/shirou/gopsutil/process"
)
// Log is *logging.Logger
var Log *logging.Logger
func init() {
if Log == nil {
logFormat := logging.MustStringFormatter(`%{color}[%{level:.4s}]%{color:reset} %{message}`)
backend := logging.NewLogBackend(os.Stderr, "", 0)
backendFormatter := logging.NewBackendFormatter(backend, logFormat)
logging.SetBackend(backendFormatter)
Log = logging.MustGetLogger("process")
}
}
// Command is the Command struct
type Command struct {
ID uint64 // ID
Cmd string // command
Cancel chan struct{} // channel for close
Timeout time.Duration // time out
ctx context.Context // context.WithTimeout
ctxCancel context.CancelFunc // cancel func for timetout
Ch chan string // channel for stdout
reader *bufio.Reader // reader for stdout
tmpfile string // tmpfile for stdout
tmpfh *os.File // file handler for tmpfile
finishSendOutput bool // a flag of whether finished sending output to Ch
Err error // Error
Duration time.Duration // runtime
dryrun bool
exitStatus int
}
// NewCommand create a Command
func NewCommand(id uint64, cmdStr string, cancel chan struct{}, timeout time.Duration) *Command {
command := &Command{
ID: id,
Cmd: strings.TrimLeft(cmdStr, " "),
Cancel: cancel,
Timeout: timeout,
}
return command
}
func (c *Command) String() string {
return fmt.Sprintf("cmd #%d: %s", c.ID, c.Cmd)
}
// Verbose decides whether print extra information
var Verbose bool
var tmpfilePrefix = fmt.Sprintf("rush.%d.", os.Getpid())
// TmpOutputDataBuffer is buffer size for output of a command before saving to tmpfile,
// default 1M.
var TmpOutputDataBuffer = 1048576 // 1M
// OutputChunkSize is buffer size of output string chunk sent to channel, default 16K.
var OutputChunkSize = 16384 // 16K
// Run runs a command and send output to command.Ch in background.
func (c *Command) Run(opts *Options) (chan string, error) {
// create a return chan here; we will set the c.Ch in the parent
ch := make(chan string, 1)
if c.dryrun {
ch <- c.Cmd + "\n"
close(ch)
c.finishSendOutput = true
return ch, nil
}
c.Err = c.run(opts)
// don't return here, keep going so we can display
// the output from commands that error
var readErr error = nil
if Verbose {
Log.Infof("finish cmd #%d in %s: %s", c.ID, c.Duration, c.Cmd)
}
go func() {
if c.tmpfile != "" { // data saved in tempfile
c.reader = bufio.NewReader(c.tmpfh)
}
buf := make([]byte, OutputChunkSize)
var n int
var i int
var b bytes.Buffer
var bb []byte
var existedN int
// var N uint64
for {
if c.reader != nil {
n, readErr = c.reader.Read(buf)
} else {
n = 0
readErr = io.EOF
}
existedN = b.Len()
b.Write(buf[0:n])
if readErr != nil {
if readErr == io.EOF {
if b.Len() > 0 {
// if Verbose {
// N += uint64(b.Len())
// }
ch <- b.String() // string(buf[0:n])
}
b.Reset()
readErr = nil
}
break
}
bb = b.Bytes()
i = bytes.LastIndexByte(bb, '\n')
if i < 0 {
continue
}
// if Verbose {
// N += uint64(len(bb[0 : i+1]))
// }
ch <- string(bb[0 : i+1]) // string(buf[0:n])
b.Reset()
if i-existedN+1 < n {
// ------ ======i========n
// existed buf
// 5 4 6
b.Write(buf[i-existedN+1 : n])
}
// N += n
}
// if Verbose {
// Log.Debugf("cmd #%d sent %d bytes\n", c.ID, N)
// }
// if Verbose {
// Log.Infof("finish reading data from: %s", c.Cmd)
// }
close(ch)
c.finishSendOutput = true
}()
if c.Err != nil {
return ch, c.Err
} else {
if readErr != nil {
return ch, readErr
} else {
return ch, nil
}
}
}
var isWindows bool = runtime.GOOS == "windows"
func getShell() string {
var shell string
if isWindows {
shell = os.Getenv("COMSPEC")
if shell == "" {
shell = "C:\\WINDOWS\\System32\\cmd.exe"
}
} else {
shell = os.Getenv("SHELL")
if shell == "" {
shell = "sh"
}
}
return shell
}
// Cleanup removes tmpfile
func (c *Command) Cleanup() error {
var err error
if c.tmpfh != nil {
// if Verbose {
// Log.Infof("close tmpfh for: %s", c.Cmd)
// }
err = c.tmpfh.Close()
if err != nil {
return err
}
}
if c.tmpfile != "" {
if Verbose {
Log.Infof("remove tmpfile (%s) for command: %s", c.tmpfile, c.Cmd)
}
err = os.Remove(c.tmpfile)
}
return err
}
// ErrTimeout means command timeout
var ErrTimeout = fmt.Errorf("time out")
// ErrCancelled means command being cancelled
var ErrCancelled = fmt.Errorf("cancelled")
func (c *Command) getExitStatus(err error) int {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
return waitStatus.ExitStatus()
}
// no error, so return exitStatus 0
return 0
}
func isProcessRunning(pid int) bool {
_, err := os.FindProcess(pid)
if err != nil {
return false
}
return true
}
// ensure Windows processes go away
func killWindowsProcessTreeRecursive(childProcess *psutil.Process) {
grandChildren, err := childProcess.Children()
if grandChildren != nil && err == nil {
for _, value := range grandChildren {
killWindowsProcessTreeRecursive(value)
}
}
attempts := 1
for {
if Verbose {
Log.Infof("taskkill /t /f /pid %s", strconv.Itoa(int(childProcess.Pid)))
}
out, err := exec.Command("taskkill", "/t", "/f", "/pid", strconv.Itoa(int(childProcess.Pid))).Output()
if Verbose {
if err != nil {
Log.Error(err)
}
Log.Infof("%s", out)
}
if !isProcessRunning(int(childProcess.Pid)) {
break
} else {
time.Sleep(10 * time.Millisecond)
attempts += 1
if attempts > 30 {
break
}
}
}
}
// run a command and pass output to c.reader.
// Note that output returns only after finishing run.
// This function is mainly borrowed from https://github.com/brentp/gargs .
func (c *Command) run(opts *Options) error {
t := time.Now()
chCancelMonitor := make(chan struct{})
defer func() {
close(chCancelMonitor)
c.Duration = time.Now().Sub(t)
}()
var command *exec.Cmd
qcmd := fmt.Sprintf(`%s`, c.Cmd)
if Verbose {
Log.Infof("start cmd #%d: %s", c.ID, qcmd)
}
if c.Timeout > 0 {
c.ctx, c.ctxCancel = context.WithTimeout(context.Background(), c.Timeout)
if isWindows {
command = exec.CommandContext(c.ctx, getShell())
c.setWindowsCommandAttr(command, qcmd)
} else {
command = exec.CommandContext(c.ctx, getShell(), "-c", qcmd)
}
} else {
if isWindows {
command = exec.Command(getShell())
c.setWindowsCommandAttr(command, qcmd)
} else {
command = exec.Command(getShell(), "-c", qcmd)
}
}
pipeStdout, err := command.StdoutPipe()
if err != nil {
return errors.Wrapf(err, "get stdout pipe of cmd #%d: %s", c.ID, c.Cmd)
}
defer pipeStdout.Close()
command.Stderr = os.Stderr
err = command.Start()
if err != nil {
return errors.Wrapf(err, "start cmd #%d: %s", c.ID, c.Cmd)
}
bpipe := bufio.NewReaderSize(pipeStdout, TmpOutputDataBuffer)
chErr := make(chan error, 2) // may come from three sources, must be buffered
chEndBeforeTimeout := make(chan struct{})
go func() {
select {
case <-c.Cancel:
if Verbose {
Log.Warningf("cancel cmd #%d: %s", c.ID, c.Cmd)
}
chErr <- ErrCancelled
if opts.KillOnCtrlC {
if isWindows {
childProcess, err := psutil.NewProcess(int32(command.Process.Pid))
if err != nil {
Log.Error(err)
}
killWindowsProcessTreeRecursive(childProcess)
} else {
command.Process.Kill()
}
}
case <-chCancelMonitor:
// default: // must not use default, if you must use, use for loop
}
}()
// detect timeout
if c.Timeout > 0 {
go func() { // goroutine #T
select {
case <-c.ctx.Done():
chErr <- ErrTimeout
c.ctxCancel()
return
case <-chEndBeforeTimeout:
chErr <- nil
return
}
}()
}
// --------------------------------
// handle output
var readed []byte
if c.Timeout > 0 {
// known shortcoming: this goroutine will remains even after timeout!
// this will cause data race.
go func() { // goroutine #P
// Peek is blocked method, it waits command even after timeout!!
readed, err = bpipe.Peek(TmpOutputDataBuffer)
chErr <- err
}()
err = <-chErr // from timeout #T or peek #P
} else {
readed, err = bpipe.Peek(TmpOutputDataBuffer)
}
// less than TmpOutputDataBuffer bytes in output...
if err == bufio.ErrBufferFull || err == io.EOF {
if c.Timeout > 0 {
go func() { // goroutine #W
err1 := command.Wait()
chErr <- err1
close(chEndBeforeTimeout)
}()
err = <-chErr // from timeout #T or normal exit #W
<-chErr // from normal exit #W or timeout #T
} else {
err = command.Wait()
}
if opts.PropExitStatus {
c.exitStatus = c.getExitStatus(err)
}
// get reader even on error, so we can still print the stdout and stderr of the failed child process
c.reader = bufio.NewReader(bytes.NewReader(readed))
if err != nil {
return errors.Wrapf(err, "wait cmd #%d: %s", c.ID, c.Cmd)
}
return nil
}
// more than TmpOutputDataBuffer bytes in output. must use tmpfile
if err != nil {
return errors.Wrapf(err, "run cmd #%d: %s", c.ID, c.Cmd)
}
c.tmpfh, err = ioutil.TempFile("", tmpfilePrefix)
if err != nil {
return errors.Wrapf(err, "create tmpfile for cmd #%d: %s", c.ID, c.Cmd)
}
c.tmpfile = c.tmpfh.Name()
if Verbose {
Log.Infof("create tmpfile (%s) for command: %s", c.tmpfile, c.Cmd)
}
btmp := bufio.NewWriter(c.tmpfh)
_, err = io.CopyBuffer(btmp, bpipe, readed)
if err != nil {
return errors.Wrapf(err, "save buffered data to tmpfile: %s", c.tmpfile)
}
if c, ok := pipeStdout.(io.ReadCloser); ok {
c.Close()
}
btmp.Flush()
_, err = c.tmpfh.Seek(0, 0)
if err == nil {
if c.Timeout > 0 {
go func() { // goroutine #3
err1 := command.Wait()
close(chEndBeforeTimeout)
chErr <- err1
}()
err = <-chErr // from timeout or normal exit
<-chErr // wait unfinished goroutine
} else {
err = command.Wait()
}
}
if opts.PropExitStatus {
c.exitStatus = c.getExitStatus(err)
}
if err != nil {
return errors.Wrapf(err, "wait cmd #%d: %s", c.ID, c.Cmd)
}
return nil
}
// Options contains the options
type Options struct {
DryRun bool // just print command
Jobs int // max jobs number
KeepOrder bool // keep output order
Retries int // max retry chances
RetryInterval time.Duration // retry interval
PrintRetryOutput bool // print output from retries
Timeout time.Duration // timeout
StopOnErr bool // stop on any error
PropExitStatus bool // propagate child exit status
KillOnCtrlC bool // kill child processes on ctrl-c
RecordSuccessfulCmd bool // send successful command to channel
Verbose bool
}
// Run4Output runs commands in parallel from channel chCmdStr,
// and returns an output text channel,
// and a done channel to ensure safe exit.
func Run4Output(opts *Options, cancel chan struct{}, chCmdStr chan string) (chan string, chan string, chan int, chan int) {
if opts.Verbose {
Verbose = true
}
chCmd, chSuccessfulCmd, doneChCmd, chExitStatus := Run(opts, cancel, chCmdStr)
chOut := make(chan string, opts.Jobs)
done := make(chan int)
go func() {
var wg sync.WaitGroup
if !opts.KeepOrder { // do not keep order
tokens := make(chan int, opts.Jobs)
RECEIVECMD:
for c := range chCmd {
select {
case <-cancel:
if Verbose {
Log.Debugf("cancel receiving finished cmd")
}
break RECEIVECMD
default: // needed
}
wg.Add(1)
tokens <- 1
go func(c *Command) {
defer func() {
wg.Done()
<-tokens
}()
// read data from channel and outpput
// var N uint64
for msg := range c.Ch {
// if Verbose {
// N += uint64(len(msg))
// }
chOut <- msg
}
c.Cleanup()
// if Verbose {
// Log.Debugf("receive %d bytes from cmd #%d\n", N, c.ID)
// }
// if Verbose {
// Log.Infof("finish receiving data from: %s", c.Cmd)
// }
}(c)
}
} else { // keep order
wg.Add(1)
var id uint64 = 1
var c, c1 *Command
var ok bool
cmds := make(map[uint64]*Command)
RECEIVECMD2:
for c = range chCmd {
select {
case <-cancel:
if Verbose {
Log.Debugf("cancel receiving finished cmd")
}
break RECEIVECMD2
default: // needed
}
if c.ID == id { // your turn
for msg := range c.Ch {
chOut <- msg
}
c.Cleanup()
id++
} else { // wait the ID come out
for {
if c1, ok = cmds[id]; ok {
for msg := range c1.Ch {
chOut <- msg
}
c1.Cleanup()
delete(cmds, c1.ID)
id++
} else {
break
}
}
cmds[c.ID] = c
}
}
if len(cmds) > 0 {
ids := make(sortutil.Uint64Slice, len(cmds))
i := 0
for id = range cmds {
ids[i] = id
i++
}
sort.Sort(ids)
for _, id = range ids {
c := cmds[id]
for msg := range c.Ch {
chOut <- msg
}
c.Cleanup()
}
}
wg.Done()
}
<-doneChCmd
wg.Wait()
close(chOut)
// if Verbose {
// Log.Infof("finish sending all output")
// }
done <- 1
}()
return chOut, chSuccessfulCmd, done, chExitStatus
}
// write strings and report done
func combineWorker(input <-chan string, output chan<- string, wg *sync.WaitGroup) {
defer wg.Done()
for val := range input {
output <- val
}
}
// combine strings in input order
func combine(inputs []<-chan string, output chan<- string) {
group := new(sync.WaitGroup)
go func() {
for _, input := range inputs {
group.Add(1)
go combineWorker(input, output, group)
group.Wait() // preserve input order
}
close(output)
}()
}
// Run runs commands in parallel from channel chCmdStr,
// and returns a Command channel,
// and a done channel to ensure safe exit.
func Run(opts *Options, cancel chan struct{}, chCmdStr chan string) (chan *Command, chan string, chan int, chan int) {
if opts.Verbose {
Verbose = true
}
chCmd := make(chan *Command, opts.Jobs)
var chSuccessfulCmd chan string
if opts.RecordSuccessfulCmd {
chSuccessfulCmd = make(chan string, opts.Jobs)
}
done := make(chan int)
var chExitStatus chan int
if opts.PropExitStatus {
chExitStatus = make(chan int)
}
go func() {
var wg sync.WaitGroup
tokens := make(chan int, opts.Jobs)
var id uint64 = 1
var stop bool
RECEIVECMD:
for cmdStr := range chCmdStr {
select {
case <-cancel:
if Verbose {
Log.Debugf("cancel receiving commands")
}
break RECEIVECMD
default: // needed
}
if stop {
break
}
wg.Add(1)
tokens <- 1
go func(id uint64, cmdStr string) {
defer func() {
wg.Done()
<-tokens
}()
command := NewCommand(id, cmdStr, cancel, opts.Timeout)
if opts.DryRun {
command.dryrun = true
}
chances := opts.Retries
var outputsToPrint []<-chan string
for {
ch, err := command.Run(opts)
if err != nil { // fail to run
if chances == 0 || opts.StopOnErr {
// print final output
outputsToPrint = append(outputsToPrint, ch)
Log.Error(err)
if opts.PropExitStatus {
chExitStatus <- command.exitStatus
}
command.Ch = make(chan string, 1)
combine(outputsToPrint, command.Ch)
chCmd <- command
} else {
Log.Warning(err)
}
if opts.StopOnErr {
select {
case <-cancel: // already closed
default:
Log.Error("stop on first error(s)")
close(cancel)
close(chCmd)
if opts.RecordSuccessfulCmd {
close(chSuccessfulCmd)
}
if opts.PropExitStatus {
close(chExitStatus)
}
done <- 1
}
stop = true
return
}
if chances > 0 {
if opts.PrintRetryOutput {
outputsToPrint = append(outputsToPrint, ch)
}
if Verbose && opts.Retries > 0 {
Log.Warningf("retry %d/%d times: %s",
opts.Retries-chances+1,
opts.Retries, command.Cmd)
}
chances--
<-time.After(opts.RetryInterval)
continue
}
return
}
// print final output
outputsToPrint = append(outputsToPrint, ch)
if opts.PropExitStatus {
chExitStatus <- command.exitStatus
}
break
}
command.Ch = make(chan string, 1)
combine(outputsToPrint, command.Ch)
chCmd <- command
if opts.RecordSuccessfulCmd {
chSuccessfulCmd <- cmdStr
}
}(id, cmdStr)
id++
}
wg.Wait()
if !stop {
close(chCmd)
if opts.RecordSuccessfulCmd {
close(chSuccessfulCmd)
}
}
if opts.PropExitStatus {
close(chExitStatus)
}
done <- 1
}()
return chCmd, chSuccessfulCmd, done, chExitStatus
}
|
package score
import (
"encoding/json"
"errors"
"strings"
)
// hiddenSecret is used to replace the global secret when parsing.
var hiddenSecret = "hidden"
// ErrScoreNotFound is returned if the parse string did not contain a
// JSON score string.
var ErrScoreNotFound = errors.New("score not found in string")
// Parse returns a score object for the provided JSON string s
// which contains secret.
func Parse(s, secret string) (*Score, error) {
if strings.Contains(s, secret) {
var sc Score
err := json.Unmarshal([]byte(s), &sc)
if err == nil {
if sc.Secret == secret {
sc.Secret = hiddenSecret // overwrite secret
}
return &sc, nil
}
if strings.Contains(err.Error(), secret) {
// this is probably not necessary, but to be safe
return nil, errors.New("error suppressed to avoid revealing secret")
}
return nil, err
}
return nil, ErrScoreNotFound
}
// HasPrefix returns true if the provided string s has a parsable prefix string.
func HasPrefix(s string) bool {
return strings.HasPrefix(s, `{"Secret":`)
}
score: made HasPrefix() more robst.
package score
import (
"encoding/json"
"errors"
"strings"
)
// hiddenSecret is used to replace the global secret when parsing.
var hiddenSecret = "hidden"
// ErrScoreNotFound is returned if the parse string did not contain a
// JSON score string.
var ErrScoreNotFound = errors.New("score not found in string")
// Parse returns a score object for the provided JSON string s
// which contains secret.
func Parse(s, secret string) (*Score, error) {
if strings.Contains(s, secret) {
var sc Score
err := json.Unmarshal([]byte(s), &sc)
if err == nil {
if sc.Secret == secret {
sc.Secret = hiddenSecret // overwrite secret
}
return &sc, nil
}
if strings.Contains(err.Error(), secret) {
// this is probably not necessary, but to be safe
return nil, errors.New("error suppressed to avoid revealing secret")
}
return nil, err
}
return nil, ErrScoreNotFound
}
// HasPrefix returns true if the provided string s has a parsable prefix string.
func HasPrefix(s string) bool {
prefixes := []string{
`{"Secret":`,
`{"TestName":`,
`{"Score":`,
`{"MaxScore":`,
`{"Weight":`,
}
for _, prefix := range prefixes {
if strings.HasPrefix(s, prefix) {
return true
}
}
return false
}
|
package main
import (
"math/rand"
"strconv"
"strings"
"time"
)
/*
Diversity ruleset definitions
*/
var validDiversityActiveItems = [...]int{
// Rebirth items
33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
44, 45, 47, 49, 56, 58, 65, 66, 77, 78,
83, 84, 85, 86, 93, 97, 102, 105, 107, 111,
123, 124, 126, 127, 130, 133, 135, 136, 137, 145,
146, 147, 158, 160, 164, 166, 171, 175, 177, 181,
186, 192, 282, 285, 286, 287, 288, 289, 290, 291, // D100 (283) and D4 (284) are banned
292, 293, 294, 295, 296, 297, 298, 323, 324, 325,
326, 338,
// Afterbirth items
347, 348, 349, 351, 352, 357, 382, 383, 386, 396,
406, 419, 421, 422, 427, 434, 437, 439, 441,
// Afterbirth+ items
475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
485, 486, 487, 488, 490, 504, 507, 510, // D Infinity (489) is banned
// Booster Pack items
512, 515, 516, 521, 522, 523, 527, 536, 545,
// Repentance items
263, 555, 556, 557, 577, 578, 580, 582, 585, 604,
605, 609, 611, 623, 625, 628, 631, 635, 638, 639, // Genesis (622) and R Key (636) are banned
640, 642, 650, 653, 655, 685, 687, 704, 705, 706, // Esau Jr (703) is banned
709, 712, 719, 720, 722, 723, 728, 729, // Recall (714) is banned #TODO: add later the missing items
}
var validDiversityPassiveItems = [...]int{
// Rebirth items
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 17, 18, 19, 20, 21, 27, // <3 (15), Raw Liver (16), Lunch (22), Dinner (23), Dessert (24), Breakfast (25), and Rotten Meat (26) are banned
28, 32, 46, 48, 50, 51, 52, 53, 54, 55, 57, // Mom's Underwear (29), Moms Heels (30) and Moms Lipstick (31) are banned
60, 62, 63, 64, 67, 68, 69, 70, 71, 72,
73, 74, 75, 76, 79, 80, 81, 82, 87, 88,
89, 90, 91, 94, 95, 96, 98, 99, 100, 101, // Super Bandage (92) is banned
103, 104, 106, 108, 109, 110, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 125, 128, 129,
131, 132, 134, 138, 139, 140, 141, 142, 143, 144,
148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
159, 161, 162, 163, 165, 167, 168, 169, 170, 172,
173, 174, 178, 179, 180, 182, 183, 184, 185, 187, // Stem Cells (176) is banned
188, 189, 190, 191, 193, 195, 196, 197, 198, 199, // Magic 8 Ball (194) is banned
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
220, 221, 222, 223, 224, 225, 227, 228, 229, 230, // Black Lotus (226) is banned
231, 232, 233, 234, 236, 237, 240, 241, 242, 243, // Key Piece #1 (238) and Key Piece #2 (239) are banned
244, 245, 246, 247, 248, 249, 250, 251, 252, 254, // Magic Scab (253) is banned
255, 256, 257, 259, 260, 261, 262, 264, 265, 266, // Missing No. (258) is banned
267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
277, 278, 279, 280, 281, 299, 300, 301, 302, 303,
304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
314, 315, 316, 317, 318, 319, 320, 321, 322, 327,
328, 329, 330, 331, 332, 333, 335, 336, 337, 340, // The Body (334) and Safety Pin (339) are banned
341, 342, 343, 345, // Match Book (344) and A Snack (346) are banned
// Afterbirth items
350, 353, 354, 356, 358, 359, 360, 361, 362, 363, // Mom's Pearls (355) is banned
364, 365, 366, 367, 368, 369, 370, 371, 372, 373,
374, 375, 376, 377, 378, 379, 380, 381, 384, 385,
387, 388, 389, 390, 391, 392, 393, 394, 395, 397,
398, 399, 400, 401, 402, 403, 404, 405, 407, 408,
409, 410, 411, 412, 413, 414, 415, 416, 417, 418,
420, 423, 424, 425, 426, 429, 430, 431, 432, 433, // PJs (428) is banned
435, 436, 438, 440,
// Afterbirth+ items
442, 443, 444, 445, 446, 447, 448, 449, 450, 451,
452, 453, 454, 457, 458, 459, 460, 461, 462, 463, // Dad's Lost Coin (455) and Moldy Bread (456) are banned
464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
474, 491, 492, 493, 494, 495, 496, 497, 498, 499,
500, 501, 502, 503, 505, 506, 508, 509,
// Booster Pack #1 items
511, 513, 514, 517, 518, 519,
// Booster Pack #2 items
520, 524, 525,
// Booster Pack #3 items
526, 528, 529,
// Booster Pack #4 items
530, 531, 532, 533, // Schoolbag (534) is given on every run already
// Booster Pack #5 items
535, 537, 538, 539, 540, 541, 542, 543, 544, 546,
547, 548, 549,
// Repentance items
553, 554, 558, 559, 560, 561, 562, 563, 564, 565,
566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
576, 579, 581, 583, 584, 586, 588, 589, 591, 592,
593, 594, 595, 596, 597, 598, 599, 600, 601, 602,
603, 606, 607, 608, 610, 612, 614, 615, 616, 617,
618, 619, 621, 624, 629, 632, 633, 634, 637, 641, // Knife piece #1 (626) and Knife piece #2 (627) are banned
643, 644, 645, 646, 647, 649, 651, 652, 654, 657,
658, 659, 660, 661, 663, 664, 665, 667, 669, 670, // Dad's Note (668) is banned
671, 672, 673, 674, 675, 676, 677, 679, 680, 681,
682, 683, 684, 686, 688, 689, 690, 691, 692, 693,
694, 695, 696, 697, 698, 699, 700, 701, 702, 708, // Supper (707) is banned
716, 717, 724, 725, 726, 727, // #TODO: add later the missing items
}
var validDiversityTrinkets = [...]int{
// Rebirth trinkets
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
// Afterbirth trinkets
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 86, 87, 88, 89, 90, // Karma (85) is banned
// Afterbirth+ trinkets
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
111, 112, 113, 114, 115, 116, 117, 118, 119,
// Booster pack trinkets
120, 121, 122, 123, 124, 125, 126, 127, 128,
// Repentance trinkets
129, 130, 131, 132, 133, 134, 135, 136, 137,
138, 139, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155,
156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173,
174, 175, 176, 177, 178, 179, 180, 181, 182,
183, 184, 185, 186, 187, 188, 189,
// Golden trinkets
32769, 32770, 32771, 32772, 32773, 32774, 32775,
32776, 32777, 32778, 32779, 32780, 32781, 32782,
32783, 32784, 32785, 32786, 32787, 32788, 32789,
32790, 32791, 32792, 32793, 32794, 32795, 32796,
32797, 32798, 32799, 32800, 32801, 32802, 32803,
32804, 32805, 32806, 32807, 32808, 32809, 32810,
32811, 32812, 32813, 32814, 32816, 32817, 32818,
32819, 32820, 32821, 32822, 32823, 32824, 32825,
32826, 32827, 32828, 32829, 32830, 32831, 32832,
32833, 32834, 32835, 32836, 32837, 32838, 32839,
32840, 32841, 32842, 32843, 32844, 32845, 32846,
32847, 32848, 32849, 32850, 32851, 32852, 32854, // Golden Karma (32853) is banned
32855, 32856, 32857, 32858, 32859, 32860, 32861,
32862, 32863, 32864, 32865, 32866, 32867, 32868,
32869, 32870, 32871, 32872, 32873, 32874, 32875,
32876, 32877, 32878, 32879, 32880, 32881, 32882,
32883, 32884, 32885, 32886, 32887, 32888, 32889,
32890, 32891, 32892, 32893, 32894, 32895, 32896,
32897, 32898, 32899, 32900, 32901, 32902, 32903,
32904, 32905, 32906, 32907, 32908, 32909, 32910,
32911, 32912, 32913, 32914, 32915, 32916, 32917,
32918, 32919, 32920, 32921, 32922, 32923, 32924,
32925, 32926, 32927, 32928, 32929, 32930, 32931,
32932, 32933, 32934, 32935, 32936, 32937, 32938,
32939, 32940, 32941, 32942, 32943, 32944, 32945,
32946, 32947, 32948, 32949, 32950, 32951, 32952,
32953, 32954, 32955, 32956, 32957,
}
/*
Diversity helper functions
*/
func diversityGetSeed(ruleset Ruleset) string {
// Seed the random number generator
rand.Seed(time.Now().UnixNano())
// Get 1 random unique active item
var items []int
item := validDiversityActiveItems[rand.Intn(len(validDiversityActiveItems))] // nolint: gosec
items = append(items, item)
// Get 3 random unique passive items
for i := 1; i <= 3; i++ {
for {
// Initialize the PRNG and get a random element from the slice
// (if we don't do this, it will use a seed of 1)
randomIndex := rand.Intn(len(validDiversityPassiveItems)) // nolint: gosec
item := validDiversityPassiveItems[randomIndex]
// Do character specific item bans
if ruleset.Character == "Cain" {
if item == 46 { // Lucky Foot
continue
}
} else if ruleset.Character == "Eve" {
if item == 117 { // Dead Bird
continue
} else if item == 122 { // Whore of Babylon
continue
}
} else if ruleset.Character == "Samson" {
if item == 157 { // Bloody Lust
continue
}
} else if ruleset.Character == "Lazarus" {
if item == 214 { // Anemic
continue
}
} else if ruleset.Character == "The Lost" {
if item == 313 { // Holy Mantle
continue
}
} else if ruleset.Character == "Lilith" {
if item == 412 { // Cambion Conception
continue
}
} else if ruleset.Character == "Keeper" {
if item == 230 { // Abaddon
continue
} else if item == 672 { // A Pound of Flesh
continue
}
} else if ruleset.Character == "Bethany" {
if item == 230 { // Abaddon
continue
} else if item == 584 { // Book of virtues
continue
}
} else if ruleset.Character == "Tainted Lilith" {
if item == 678 { // C Section
continue
}
} else if ruleset.Character == "Tainted Keeper" {
if item == 230 { // Abaddon
continue
} else if item == 672 { // A Pound of Flesh
continue
}
}
// Ensure this item is unique
if intInSlice(item, items) {
continue
}
items = append(items, item)
break
}
}
// Get 1 random trinket
randomIndex := rand.Intn(len(validDiversityTrinkets)) // nolint: gosec
trinket := validDiversityTrinkets[randomIndex]
items = append(items, trinket)
// The "seed" value is used to communicate the 5 random diversity items to the client
seed := ""
for _, item := range items {
seed += strconv.Itoa(item) + ","
}
seed = strings.TrimSuffix(seed, ",") // Remove the trailing comma
return seed
}
fixing golden trinket spawn chance
package main
import (
"math/rand"
"strconv"
"strings"
"time"
)
/*
Diversity ruleset definitions
*/
var GOLDEN_TRINKET_MODIFIER = 32768
var validDiversityActiveItems = [...]int{
// Rebirth items
33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
44, 45, 47, 49, 56, 58, 65, 66, 77, 78,
83, 84, 85, 86, 93, 97, 102, 105, 107, 111,
123, 124, 126, 127, 130, 133, 135, 136, 137, 145,
146, 147, 158, 160, 164, 166, 171, 175, 177, 181,
186, 192, 282, 285, 286, 287, 288, 289, 290, 291, // D100 (283) and D4 (284) are banned
292, 293, 294, 295, 296, 297, 298, 323, 324, 325,
326, 338,
// Afterbirth items
347, 348, 349, 351, 352, 357, 382, 383, 386, 396,
406, 419, 421, 422, 427, 434, 437, 439, 441,
// Afterbirth+ items
475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
485, 486, 487, 488, 490, 504, 507, 510, // D Infinity (489) is banned
// Booster Pack items
512, 515, 516, 521, 522, 523, 527, 536, 545,
// Repentance items
263, 555, 556, 557, 577, 578, 580, 582, 585, 604,
605, 609, 611, 623, 625, 628, 631, 635, 638, 639, // Genesis (622) and R Key (636) are banned
640, 642, 650, 653, 655, 685, 687, 704, 705, 706, // Esau Jr (703) is banned
709, 712, 719, 720, 722, 723, 728, 729, // Recall (714) is banned
// TODO: After the next patch, add the missing items
}
var validDiversityPassiveItems = [...]int{
// Rebirth items
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
// <3 (15), Raw Liver (16), Lunch (22), Dinner (23), Dessert (24), Breakfast (25),
// and Rotten Meat (26) are banned
11, 12, 13, 14, 17, 18, 19, 20, 21, 27,
// Mom's Underwear (29), Moms Heels (30) and Moms Lipstick (31) are banned
28, 32, 46, 48, 50, 51, 52, 53, 54, 55, 57,
60, 62, 63, 64, 67, 68, 69, 70, 71, 72,
73, 74, 75, 76, 79, 80, 81, 82, 87, 88,
89, 90, 91, 94, 95, 96, 98, 99, 100, 101, // Super Bandage (92) is banned
103, 104, 106, 108, 109, 110, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 125, 128, 129,
131, 132, 134, 138, 139, 140, 141, 142, 143, 144,
148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
159, 161, 162, 163, 165, 167, 168, 169, 170, 172,
173, 174, 178, 179, 180, 182, 183, 184, 185, 187, // Stem Cells (176) is banned
188, 189, 190, 191, 193, 195, 196, 197, 198, 199, // Magic 8 Ball (194) is banned
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
220, 221, 222, 223, 224, 225, 227, 228, 229, 230, // Black Lotus (226) is banned
// Key Piece #1 (238) and Key Piece #2 (239) are banned
231, 232, 233, 234, 236, 237, 240, 241, 242, 243,
244, 245, 246, 247, 248, 249, 250, 251, 252, 254, // Magic Scab (253) is banned
255, 256, 257, 259, 260, 261, 262, 264, 265, 266, // Missing No. (258) is banned
267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
277, 278, 279, 280, 281, 299, 300, 301, 302, 303,
304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
314, 315, 316, 317, 318, 319, 320, 321, 322, 327,
// The Body (334) and Safety Pin (339) are banned
328, 329, 330, 331, 332, 333, 335, 336, 337, 340,
341, 342, 343, 345, // Match Book (344) and A Snack (346) are banned
// Afterbirth items
350, 353, 354, 356, 358, 359, 360, 361, 362, 363, // Mom's Pearls (355) is banned
364, 365, 366, 367, 368, 369, 370, 371, 372, 373,
374, 375, 376, 377, 378, 379, 380, 381, 384, 385,
387, 388, 389, 390, 391, 392, 393, 394, 395, 397,
398, 399, 400, 401, 402, 403, 404, 405, 407, 408,
409, 410, 411, 412, 413, 414, 415, 416, 417, 418,
420, 423, 424, 425, 426, 429, 430, 431, 432, 433, // PJs (428) is banned
435, 436, 438, 440,
// Afterbirth+ items
442, 443, 444, 445, 446, 447, 448, 449, 450, 451,
// Dad's Lost Coin (455) and Moldy Bread (456) are banned
452, 453, 454, 457, 458, 459, 460, 461, 462, 463,
464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
474, 491, 492, 493, 494, 495, 496, 497, 498, 499,
500, 501, 502, 503, 505, 506, 508, 509,
// Booster Pack items
511, 513, 514, 517, 518, 519, 520, 524, 525, 526,
528, 529, 530, 531, 532, 533, 534, 535, 537, 538,
539, 540, 541, 542, 543, 544, 546, 547, 548, 549,
// Repentance items
553, 554, 558, 559, 560, 561, 562, 563, 564, 565,
566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
576, 579, 581, 583, 584, 586, 588, 589, 591, 592,
593, 594, 595, 596, 597, 598, 599, 600, 601, 602,
603, 606, 607, 608, 610, 612, 614, 615, 616, 617,
// Knife Piece #1 (626) and Knife Piece #2 (627) are banned
618, 619, 621, 624, 629, 632, 633, 634, 637, 641,
643, 644, 645, 646, 647, 649, 651, 652, 654, 657,
658, 659, 660, 661, 663, 664, 665, 667, 669, 670, // Dad's Note (668) is banned
671, 672, 673, 674, 675, 676, 677, 679, 680, 681,
682, 683, 684, 686, 688, 689, 690, 691, 692, 693,
694, 695, 696, 697, 698, 699, 700, 701, 702, 708, // Supper (707) is banned
716, 717, 724, 725, 726, 727,
// TODO: After the next patch, add the missing items
}
var validDiversityTrinkets = [...]int{
// Rebirth trinkets
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
// Afterbirth trinkets
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 86, 87, 88, 89, 90, // Karma (85) is banned
// Afterbirth+ trinkets
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
111, 112, 113, 114, 115, 116, 117, 118, 119,
// Booster pack trinkets
120, 121, 122, 123, 124, 125, 126, 127, 128,
// Repentance trinkets
129, 130, 131, 132, 133, 134, 135, 136, 137,
138, 139, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155,
156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173,
174, 175, 176, 177, 178, 179, 180, 181, 182,
183, 184, 185, 186, 187, 188, 189,
}
/*
Diversity helper functions
*/
func diversityGetSeed(ruleset Ruleset) string {
// Seed the random number generator
rand.Seed(time.Now().UnixNano())
// Get 1 random unique active item
var items []int
item := validDiversityActiveItems[rand.Intn(len(validDiversityActiveItems))] // nolint: gosec
items = append(items, item)
// Get 3 random unique passive items
for i := 1; i <= 3; i++ {
for {
// Initialize the PRNG and get a random element from the slice
// (if we don't do this, it will use a seed of 1)
randomIndex := rand.Intn(len(validDiversityPassiveItems)) // nolint: gosec
item := validDiversityPassiveItems[randomIndex]
// Do character specific item bans
if ruleset.Character == "Cain" {
if item == 46 { // Lucky Foot
continue
}
} else if ruleset.Character == "Eve" {
if item == 117 { // Dead Bird
continue
} else if item == 122 { // Whore of Babylon
continue
}
} else if ruleset.Character == "Samson" {
if item == 157 { // Bloody Lust
continue
}
} else if ruleset.Character == "Lazarus" {
if item == 214 { // Anemic
continue
}
} else if ruleset.Character == "The Lost" {
if item == 313 { // Holy Mantle
continue
}
} else if ruleset.Character == "Lilith" {
if item == 412 { // Cambion Conception
continue
}
} else if ruleset.Character == "Keeper" {
if item == 230 { // Abaddon
continue
} else if item == 672 { // A Pound of Flesh
continue
}
} else if ruleset.Character == "Bethany" {
if item == 230 { // Abaddon
continue
} else if item == 584 { // Book of virtues
continue
}
} else if ruleset.Character == "Tainted Lilith" {
if item == 678 { // C Section
continue
}
} else if ruleset.Character == "Tainted Keeper" {
if item == 230 { // Abaddon
continue
} else if item == 672 { // A Pound of Flesh
continue
}
}
// Ensure this item is unique
if intInSlice(item, items) {
continue
}
items = append(items, item)
break
}
}
// Get 1 random trinket
randomIndex := rand.Intn(len(validDiversityTrinkets)) // nolint: gosec
trinket := validDiversityTrinkets[randomIndex]
// The server has a 10% chance to make the trinket golden
if rand.Intn(10) == 0 {
trinket += GOLDEN_TRINKET_MODIFIER
}
items = append(items, trinket)
// The "seed" value is used to communicate the 5 random diversity items to the client
seed := ""
for _, item := range items {
seed += strconv.Itoa(item) + ","
}
seed = strings.TrimSuffix(seed, ",") // Remove the trailing comma
return seed
}
|
// vim: tabstop=2 shiftwidth=2
package keymgr
import (
"bufio"
"os"
"time"
"fmt"
"strings"
"encoding/hex"
"crypto/sha256"
//"github.com/codahale/blake2"
)
type secret struct {
keyid []byte // keyid
sk []byte // Secret Key
from time.Time // Valid from
until time.Time // Valid Until
}
type Secring struct {
secringFile string // Filename of secret keyring
pubkeyFile string // Public keyfile (key.txt)
sec map[string]secret
name string // Local remailer's name
address string // Local remailer's email address
myKeyid []byte // Keyid this remailer is advertising
validity time.Duration // Period of key validity
grace time.Duration // Period of grace after key expiry
exit bool // Is this an Exit type remailer?
version string // Yamn version string
}
// NewSecring is a constructor for the Secret Keyring
func NewSecring(secfile, pubkey string) *Secring {
return &Secring{
secringFile: secfile,
pubkeyFile: pubkey,
sec: make(map[string]secret),
}
}
// ListKeyids returns a string slice of all in-memory secret keyids
func (s *Secring) ListKeyids() (keyids []string) {
keyids = make([]string, 0, len(s.sec))
for k := range s.sec {
keyids = append(keyids, k)
}
return
}
// SetName validates and sets the remailer name
func (s *Secring) SetName(name string) {
var err error
l := len(name)
if l < 2 || l > 12 {
err = fmt.Errorf("Remailer name must be between 2 and 12 chars, not %d.", l)
panic(err)
}
s.name = strings.ToLower(name)
}
// SetAddress validates and sets the remailer address
func (s *Secring) SetAddress(addy string) {
var err error
l := len(addy)
if l < 3 || l > 80 {
err = fmt.Errorf(
"Remailer address must be between 2 and 80 chars, not %d.", l)
panic(err)
}
index := strings.Index(addy, "@")
if index == -1 {
err = fmt.Errorf("%s: Remailer address doesn't contain an @.", addy)
panic(err)
} else if index == 0 || l - index < 3 {
err = fmt.Errorf("%s: Invalid remailer address.", addy)
panic(err)
}
s.address = strings.ToLower(addy)
}
// SetExit defines if this is a Middle or Exit remailer
func (s *Secring) SetExit(exit bool) {
s.exit = exit
}
// SetValidity defines the time duration over which a key is deemed valid
func (s *Secring) SetValidity(valid, grace int) {
s.validity = time.Duration(24 * valid) * time.Hour
s.grace = time.Duration(24 * grace) * time.Hour
}
// SetVersion sets the version string used on keys
func (s *Secring) SetVersion(v string) {
s.version = "4:" + v
}
// Count returns the number of secret keys in memory
func (s *Secring) Count() int {
return len(s.sec)
}
// Insert puts a new secret key into memory and returns its keyid
func (s *Secring) Insert(pub, sec []byte) (keyidstr string) {
var err error
if len(pub) != 32 {
err = fmt.Errorf("Invalid pubkey length. Wanted=32, Got=%d", len(pub))
panic(err)
}
if len(sec) != 32 {
err = fmt.Errorf("Invalid seckey length. Wanted=32, Got=%d", len(pub))
panic(err)
}
key := new(secret)
digest := sha256.New()
digest.Write(pub)
key.keyid = digest.Sum(nil)[:16]
keyidstr = hex.EncodeToString(key.keyid)
// Validity dates
key.from = time.Now()
key.until = time.Now().Add(s.validity)
// The secret key itself
key.sk = sec
s.sec[keyidstr] = *key
return
}
func (s *Secring) WritePublic(pub []byte, keyidstr string) {
var err error
if len(pub) != 32 {
err = fmt.Errorf("Invalid pubkey length. Wanted=32, Got=%d", len(pub))
panic(err)
}
var capstring string
// M = Middle, E = Exit
if s.exit {
capstring += "E"
} else {
capstring += "M"
}
key, exists := s.sec[keyidstr]
if ! exists {
err = fmt.Errorf("%s: Keyid does not exist", keyidstr)
panic(err)
}
header := s.name + " "
header += s.address + " "
header += keyidstr + " "
header += s.version + " "
header += capstring + " "
header += key.from.UTC().Format(date_format) + " "
header += key.until.UTC().Format(date_format)
// Open the file for writing
f, err := os.Create(s.pubkeyFile)
if err != nil {
panic(err)
}
defer f.Close()
w := bufio.NewWriter(f)
fmt.Fprintln(w, header)
fmt.Fprintln(w, "")
fmt.Fprintln(w, "-----Begin Mix Key-----")
fmt.Fprintln(w, keyidstr)
fmt.Fprintln(w, hex.EncodeToString(pub))
fmt.Fprintln(w, "-----End Mix Key-----")
err = w.Flush()
if err != nil {
panic(err)
}
}
// WriteSecret adds the selected secret key to the secret keyring file
func (s *Secring) WriteSecret(keyidstr string) {
var err error
key, exists := s.sec[keyidstr]
if ! exists {
err = fmt.Errorf("%s: Keyid does not exist", keyidstr)
panic(err)
}
f, err := os.OpenFile(
s.secringFile,
os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
panic(err)
}
defer f.Close()
keydata := "\n-----Begin Mixmaster Secret Key-----\n"
keydata += fmt.Sprintf("Created: %s\n", key.from.UTC().Format(date_format))
keydata += fmt.Sprintf("Expires: %s\n", key.until.UTC().Format(date_format))
keydata += keyidstr + "\n"
keydata += hex.EncodeToString(key.sk) + "\n"
keydata += "-----End Mixmaster Secret Key-----\n"
_, err = f.WriteString(keydata)
if err != nil {
panic(err)
}
}
// WriteMyKey writes the local public key to filename with current
// configurtaion settings.
func (s *Secring) WriteMyKey(filename string) (keyidstr string) {
infile, err := os.Open(s.pubkeyFile)
if err != nil {
panic(err)
}
defer infile.Close()
// Create a tmp file rather than overwriting directly
outfile, err := os.Create(filename)
if err != nil {
panic(err)
}
defer outfile.Close()
in := bufio.NewScanner(infile)
out := bufio.NewWriter(outfile)
var line string
for in.Scan() {
line = in.Text()
elements := strings.Fields(line)
if len(elements) == 7 {
var capstring string
// M = Middle, E = Exit
if s.exit {
capstring += "E"
} else {
capstring += "M"
}
// Extract the keyid so we can return it
keyidstr = elements[2]
if len(keyidstr) != 32 {
err = fmt.Errorf(
"Invalid public keyid length. Expected=32, Got=%d.",
len(keyidstr))
panic(err)
}
header := s.name + " "
header += s.address + " "
header += keyidstr + " "
header += s.version + " "
header += capstring + " "
header += elements[5] + " "
header += elements[6]
fmt.Fprintln(out, header)
} else {
fmt.Fprintln(out, line)
}
}
err = out.Flush()
if err != nil {
panic(err)
}
return
}
// Return the Secret struct that corresponds to the requested Keyid
func (s *Secring) Get(keyid string) (sec secret, err error) {
var exists bool
sec, exists = s.sec[keyid]
if ! exists {
err = fmt.Errorf("%s: Keyid not found in secret keyring", keyid)
return
}
return
}
// Return the Secret Key that corresponds to the requested Keyid
func (s *Secring) GetSK(keyid string) (sk []byte, err error) {
sec, exists := s.sec[keyid]
if ! exists {
err = fmt.Errorf("%s: Keyid not found in secret keyring", keyid)
return
}
sk = sec.sk
return
}
// Purge deletes expired keys and writes current ones to a backup secring
func (s *Secring) Purge() (active, expired, purged int) {
/*
active - Keys that have not yet expired
expired - Keys that have expired but not yet exceeded their grace period
purged - Keys that are beyond their grace period
*/
// Rename the secring file to a tmp name, just in case this screws up.
err := os.Rename(s.secringFile, s.secringFile + ".tmp")
if err != nil {
panic(err)
}
// Create a new secring file
f, err := os.Create(s.secringFile)
if err != nil {
panic(err)
return
}
defer f.Close()
// Iterate key and value of Secring in memory
for k, m := range s.sec {
purgeDate := m.until.Add(s.grace)
if time.Now().After(purgeDate) {
delete(s.sec, k)
purged++
} else {
keydata := "-----Begin Mixmaster Secret Key-----\n"
keydata += fmt.Sprintf("Created: %s\n", m.from.Format(date_format))
keydata += fmt.Sprintf("Expires: %s\n", m.until.Format(date_format))
keydata += hex.EncodeToString(m.keyid) + "\n"
keydata += hex.EncodeToString(m.sk) + "\n"
keydata += "-----End Mixmaster Secret Key-----\n\n"
_, err = f.WriteString(keydata)
if err != nil {
panic(err)
}
if time.Now().After(m.until) {
expired++
} else {
active++
}
}
}
return
}
// ImportSecring reads a YAML secring.mix file into memory
func (s *Secring) ImportSecring() (err error) {
var f *os.File
f, err = os.Open(s.secringFile)
if err != nil {
return
}
defer f.Close()
scanner := bufio.NewScanner(f)
var line string //Each line within secring.mix
var skdata []byte // Decoded secret key
var keyidMapKey string // String representation of keyid to key map with
var valid time.Time
var expire time.Time
var sec *secret
now := time.Now().UTC()
key_phase := 0
/* Key phases are:
0 Expecting Begin cutmark
1 Expecting Valid-from date
2 Expecting Valid-to date
3 Expecting Keyid line
4 Expecting secret key
5 Got End cutmark
*/
for scanner.Scan() {
line = scanner.Text()
switch key_phase {
case 0:
// Expecting begin cutmark
if line == "-----Begin Mixmaster Secret Key-----" {
sec = new(secret)
key_phase = 1
}
case 1:
// Valid-from date
if line[:9] == "Created: " {
valid, err = time.Parse(date_format, line[9:])
if err != nil {
fmt.Fprintln(os.Stderr, "Malformed Created date")
key_phase = 0
continue
}
} else {
fmt.Fprintln(os.Stderr, "Expected Created line")
key_phase = 0
continue
}
if valid.After(now) {
// Key is not yet valid
fmt.Fprintln(os.Stderr, "Key is not valid yet")
key_phase = 0
continue
}
sec.from = valid
key_phase = 2
case 2:
// Expire date
if line[:9] == "Expires: " {
expire, err = time.Parse(date_format, line[9:])
if err != nil {
fmt.Fprintln(os.Stderr, "Malformed Expires date")
key_phase = 0
continue
}
} else {
fmt.Fprintln(os.Stderr, "Expected Expires line")
key_phase = 0
continue
}
if expire.Before(now) {
// Key has expired (but we don't really care)
fmt.Fprintln(os.Stderr, "Expired key on secret")
}
sec.until = expire
key_phase = 3
case 3:
if len(line) != 32 {
// Invalid keyid length
key_phase = 0
continue
}
var keyid []byte
keyid, err = hex.DecodeString(line)
if err != nil {
// Non hex keyid
fmt.Fprintln(os.Stderr, err)
key_phase = 0
continue
}
sec.keyid = keyid
// Retain a textual representation to key the secring map with
keyidMapKey = line
key_phase = 4
case 4:
// Expecting Private key
skdata, err = hex.DecodeString(line)
if err != nil {
// Non hex Private key
fmt.Fprintln(os.Stderr, err)
key_phase = 0
}
if len(skdata) != 32 {
fmt.Fprintln(os.Stderr, "Incorrect key length")
key_phase = 0
continue
}
sec.sk = skdata
key_phase = 5
case 5:
// Expecting end cutmark
if line == "-----End Mixmaster Secret Key-----" {
s.sec[keyidMapKey] = *sec
key_phase = 0
}
} // End of switch
} // End of file lines loop
return
}
Don't report expired keys to stderr
// vim: tabstop=2 shiftwidth=2
package keymgr
import (
"bufio"
"os"
"time"
"fmt"
"strings"
"encoding/hex"
"crypto/sha256"
//"github.com/codahale/blake2"
)
type secret struct {
keyid []byte // keyid
sk []byte // Secret Key
from time.Time // Valid from
until time.Time // Valid Until
}
type Secring struct {
secringFile string // Filename of secret keyring
pubkeyFile string // Public keyfile (key.txt)
sec map[string]secret
name string // Local remailer's name
address string // Local remailer's email address
myKeyid []byte // Keyid this remailer is advertising
validity time.Duration // Period of key validity
grace time.Duration // Period of grace after key expiry
exit bool // Is this an Exit type remailer?
version string // Yamn version string
}
// NewSecring is a constructor for the Secret Keyring
func NewSecring(secfile, pubkey string) *Secring {
return &Secring{
secringFile: secfile,
pubkeyFile: pubkey,
sec: make(map[string]secret),
}
}
// ListKeyids returns a string slice of all in-memory secret keyids
func (s *Secring) ListKeyids() (keyids []string) {
keyids = make([]string, 0, len(s.sec))
for k := range s.sec {
keyids = append(keyids, k)
}
return
}
// SetName validates and sets the remailer name
func (s *Secring) SetName(name string) {
var err error
l := len(name)
if l < 2 || l > 12 {
err = fmt.Errorf("Remailer name must be between 2 and 12 chars, not %d.", l)
panic(err)
}
s.name = strings.ToLower(name)
}
// SetAddress validates and sets the remailer address
func (s *Secring) SetAddress(addy string) {
var err error
l := len(addy)
if l < 3 || l > 80 {
err = fmt.Errorf(
"Remailer address must be between 2 and 80 chars, not %d.", l)
panic(err)
}
index := strings.Index(addy, "@")
if index == -1 {
err = fmt.Errorf("%s: Remailer address doesn't contain an @.", addy)
panic(err)
} else if index == 0 || l - index < 3 {
err = fmt.Errorf("%s: Invalid remailer address.", addy)
panic(err)
}
s.address = strings.ToLower(addy)
}
// SetExit defines if this is a Middle or Exit remailer
func (s *Secring) SetExit(exit bool) {
s.exit = exit
}
// SetValidity defines the time duration over which a key is deemed valid
func (s *Secring) SetValidity(valid, grace int) {
s.validity = time.Duration(24 * valid) * time.Hour
s.grace = time.Duration(24 * grace) * time.Hour
}
// SetVersion sets the version string used on keys
func (s *Secring) SetVersion(v string) {
s.version = "4:" + v
}
// Count returns the number of secret keys in memory
func (s *Secring) Count() int {
return len(s.sec)
}
// Insert puts a new secret key into memory and returns its keyid
func (s *Secring) Insert(pub, sec []byte) (keyidstr string) {
var err error
if len(pub) != 32 {
err = fmt.Errorf("Invalid pubkey length. Wanted=32, Got=%d", len(pub))
panic(err)
}
if len(sec) != 32 {
err = fmt.Errorf("Invalid seckey length. Wanted=32, Got=%d", len(pub))
panic(err)
}
key := new(secret)
digest := sha256.New()
digest.Write(pub)
key.keyid = digest.Sum(nil)[:16]
keyidstr = hex.EncodeToString(key.keyid)
// Validity dates
key.from = time.Now()
key.until = time.Now().Add(s.validity)
// The secret key itself
key.sk = sec
s.sec[keyidstr] = *key
return
}
func (s *Secring) WritePublic(pub []byte, keyidstr string) {
var err error
if len(pub) != 32 {
err = fmt.Errorf("Invalid pubkey length. Wanted=32, Got=%d", len(pub))
panic(err)
}
var capstring string
// M = Middle, E = Exit
if s.exit {
capstring += "E"
} else {
capstring += "M"
}
key, exists := s.sec[keyidstr]
if ! exists {
err = fmt.Errorf("%s: Keyid does not exist", keyidstr)
panic(err)
}
header := s.name + " "
header += s.address + " "
header += keyidstr + " "
header += s.version + " "
header += capstring + " "
header += key.from.UTC().Format(date_format) + " "
header += key.until.UTC().Format(date_format)
// Open the file for writing
f, err := os.Create(s.pubkeyFile)
if err != nil {
panic(err)
}
defer f.Close()
w := bufio.NewWriter(f)
fmt.Fprintln(w, header)
fmt.Fprintln(w, "")
fmt.Fprintln(w, "-----Begin Mix Key-----")
fmt.Fprintln(w, keyidstr)
fmt.Fprintln(w, hex.EncodeToString(pub))
fmt.Fprintln(w, "-----End Mix Key-----")
err = w.Flush()
if err != nil {
panic(err)
}
}
// WriteSecret adds the selected secret key to the secret keyring file
func (s *Secring) WriteSecret(keyidstr string) {
var err error
key, exists := s.sec[keyidstr]
if ! exists {
err = fmt.Errorf("%s: Keyid does not exist", keyidstr)
panic(err)
}
f, err := os.OpenFile(
s.secringFile,
os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
panic(err)
}
defer f.Close()
keydata := "\n-----Begin Mixmaster Secret Key-----\n"
keydata += fmt.Sprintf("Created: %s\n", key.from.UTC().Format(date_format))
keydata += fmt.Sprintf("Expires: %s\n", key.until.UTC().Format(date_format))
keydata += keyidstr + "\n"
keydata += hex.EncodeToString(key.sk) + "\n"
keydata += "-----End Mixmaster Secret Key-----\n"
_, err = f.WriteString(keydata)
if err != nil {
panic(err)
}
}
// WriteMyKey writes the local public key to filename with current
// configurtaion settings.
func (s *Secring) WriteMyKey(filename string) (keyidstr string) {
infile, err := os.Open(s.pubkeyFile)
if err != nil {
panic(err)
}
defer infile.Close()
// Create a tmp file rather than overwriting directly
outfile, err := os.Create(filename)
if err != nil {
panic(err)
}
defer outfile.Close()
in := bufio.NewScanner(infile)
out := bufio.NewWriter(outfile)
var line string
for in.Scan() {
line = in.Text()
elements := strings.Fields(line)
if len(elements) == 7 {
var capstring string
// M = Middle, E = Exit
if s.exit {
capstring += "E"
} else {
capstring += "M"
}
// Extract the keyid so we can return it
keyidstr = elements[2]
if len(keyidstr) != 32 {
err = fmt.Errorf(
"Invalid public keyid length. Expected=32, Got=%d.",
len(keyidstr))
panic(err)
}
header := s.name + " "
header += s.address + " "
header += keyidstr + " "
header += s.version + " "
header += capstring + " "
header += elements[5] + " "
header += elements[6]
fmt.Fprintln(out, header)
} else {
fmt.Fprintln(out, line)
}
}
err = out.Flush()
if err != nil {
panic(err)
}
return
}
// Return the Secret struct that corresponds to the requested Keyid
func (s *Secring) Get(keyid string) (sec secret, err error) {
var exists bool
sec, exists = s.sec[keyid]
if ! exists {
err = fmt.Errorf("%s: Keyid not found in secret keyring", keyid)
return
}
return
}
// Return the Secret Key that corresponds to the requested Keyid
func (s *Secring) GetSK(keyid string) (sk []byte, err error) {
sec, exists := s.sec[keyid]
if ! exists {
err = fmt.Errorf("%s: Keyid not found in secret keyring", keyid)
return
}
sk = sec.sk
return
}
// Purge deletes expired keys and writes current ones to a backup secring
func (s *Secring) Purge() (active, expired, purged int) {
/*
active - Keys that have not yet expired
expired - Keys that have expired but not yet exceeded their grace period
purged - Keys that are beyond their grace period
*/
// Rename the secring file to a tmp name, just in case this screws up.
err := os.Rename(s.secringFile, s.secringFile + ".tmp")
if err != nil {
panic(err)
}
// Create a new secring file
f, err := os.Create(s.secringFile)
if err != nil {
panic(err)
return
}
defer f.Close()
// Iterate key and value of Secring in memory
for k, m := range s.sec {
purgeDate := m.until.Add(s.grace)
if time.Now().After(purgeDate) {
delete(s.sec, k)
purged++
} else {
keydata := "-----Begin Mixmaster Secret Key-----\n"
keydata += fmt.Sprintf("Created: %s\n", m.from.Format(date_format))
keydata += fmt.Sprintf("Expires: %s\n", m.until.Format(date_format))
keydata += hex.EncodeToString(m.keyid) + "\n"
keydata += hex.EncodeToString(m.sk) + "\n"
keydata += "-----End Mixmaster Secret Key-----\n\n"
_, err = f.WriteString(keydata)
if err != nil {
panic(err)
}
if time.Now().After(m.until) {
expired++
} else {
active++
}
}
}
return
}
// ImportSecring reads a YAML secring.mix file into memory
func (s *Secring) ImportSecring() (err error) {
var f *os.File
f, err = os.Open(s.secringFile)
if err != nil {
return
}
defer f.Close()
scanner := bufio.NewScanner(f)
var line string //Each line within secring.mix
var skdata []byte // Decoded secret key
var keyidMapKey string // String representation of keyid to key map with
var valid time.Time
var expire time.Time
var sec *secret
now := time.Now().UTC()
key_phase := 0
/* Key phases are:
0 Expecting Begin cutmark
1 Expecting Valid-from date
2 Expecting Valid-to date
3 Expecting Keyid line
4 Expecting secret key
5 Got End cutmark
*/
for scanner.Scan() {
line = scanner.Text()
switch key_phase {
case 0:
// Expecting begin cutmark
if line == "-----Begin Mixmaster Secret Key-----" {
sec = new(secret)
key_phase = 1
}
case 1:
// Valid-from date
if line[:9] == "Created: " {
valid, err = time.Parse(date_format, line[9:])
if err != nil {
fmt.Fprintln(os.Stderr, "Malformed Created date")
key_phase = 0
continue
}
} else {
fmt.Fprintln(os.Stderr, "Expected Created line")
key_phase = 0
continue
}
if valid.After(now) {
// Key is not yet valid
fmt.Fprintln(os.Stderr, "Key is not valid yet")
key_phase = 0
continue
}
sec.from = valid
key_phase = 2
case 2:
// Expire date
if line[:9] == "Expires: " {
expire, err = time.Parse(date_format, line[9:])
if err != nil {
fmt.Fprintln(os.Stderr, "Malformed Expires date")
key_phase = 0
continue
}
} else {
fmt.Fprintln(os.Stderr, "Expected Expires line")
key_phase = 0
continue
}
if expire.Before(now) {
// Key has expired (but we don't really care)
}
sec.until = expire
key_phase = 3
case 3:
if len(line) != 32 {
// Invalid keyid length
key_phase = 0
continue
}
var keyid []byte
keyid, err = hex.DecodeString(line)
if err != nil {
// Non hex keyid
fmt.Fprintln(os.Stderr, err)
key_phase = 0
continue
}
sec.keyid = keyid
// Retain a textual representation to key the secring map with
keyidMapKey = line
key_phase = 4
case 4:
// Expecting Private key
skdata, err = hex.DecodeString(line)
if err != nil {
// Non hex Private key
fmt.Fprintln(os.Stderr, err)
key_phase = 0
}
if len(skdata) != 32 {
fmt.Fprintln(os.Stderr, "Incorrect key length")
key_phase = 0
continue
}
sec.sk = skdata
key_phase = 5
case 5:
// Expecting end cutmark
if line == "-----End Mixmaster Secret Key-----" {
s.sec[keyidMapKey] = *sec
key_phase = 0
}
} // End of switch
} // End of file lines loop
return
}
|
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package controller
import (
"github.com/juju/errors"
"github.com/juju/loggo"
"github.com/juju/names"
"github.com/juju/juju/api"
"github.com/juju/juju/api/base"
"github.com/juju/juju/apiserver/params"
)
var logger = loggo.GetLogger("juju.api.controller")
// Client provides methods that the Juju client command uses to interact
// with the Juju controller.
type Client struct {
base.ClientFacade
facade base.FacadeCaller
}
// NewClient creates a new `Client` based on an existing authenticated API
// connection.
func NewClient(st base.APICallCloser) *Client {
frontend, backend := base.NewClientFacade(st, "Controller")
logger.Tracef("%#v", frontend)
return &Client{ClientFacade: frontend, facade: backend}
}
// AllModels allows controller administrators to get the list of all the
// models in the controller.
func (c *Client) AllModels() ([]base.UserModel, error) {
var models params.UserModelList
err := c.facade.FacadeCall("AllModels", nil, &models)
if err != nil {
return nil, errors.Trace(err)
}
result := make([]base.UserModel, len(models.UserModels))
for i, model := range models.UserModels {
owner, err := names.ParseUserTag(model.OwnerTag)
if err != nil {
return nil, errors.Annotatef(err, "OwnerTag %q at position %d", model.OwnerTag, i)
}
result[i] = base.UserModel{
Name: model.Name,
UUID: model.UUID,
Owner: owner.Canonical(),
LastConnection: model.LastConnection,
}
}
return result, nil
}
// ModelConfig returns all model settings for the
// controller model.
func (c *Client) ModelConfig() (map[string]interface{}, error) {
result := params.ModelConfigResults{}
err := c.facade.FacadeCall("ModelConfig", nil, &result)
return result.Config, err
}
// DestroyController puts the controller model into a "dying" state,
// and removes all non-manager machine instances. Underlying DestroyModel
// calls will fail if there are any manually-provisioned non-manager machines
// in state.
func (c *Client) DestroyController(destroyModels bool) error {
args := params.DestroyControllerArgs{
DestroyModels: destroyModels,
}
return c.facade.FacadeCall("DestroyController", args, nil)
}
// ListBlockedModels returns a list of all models within the controller
// which have at least one block in place.
func (c *Client) ListBlockedModels() ([]params.ModelBlockInfo, error) {
result := params.ModelBlockInfoList{}
err := c.facade.FacadeCall("ListBlockedModels", nil, &result)
return result.Models, err
}
// RemoveBlocks removes all the blocks in the controller.
func (c *Client) RemoveBlocks() error {
args := params.RemoveBlocksArgs{All: true}
return c.facade.FacadeCall("RemoveBlocks", args, nil)
}
// WatchAllModels returns an AllWatcher, from which you can request
// the Next collection of Deltas (for all models).
func (c *Client) WatchAllModels() (*api.AllWatcher, error) {
info := new(api.WatchAll)
if err := c.facade.FacadeCall("WatchAllModels", nil, info); err != nil {
return nil, err
}
return api.NewAllModelWatcher(c.facade.RawAPICaller(), &info.AllWatcherId), nil
}
// ModelStatus returns a status summary for each model tag passed in.
func (c *Client) ModelStatus(tags ...names.ModelTag) ([]base.ModelStatus, error) {
result := params.ModelStatusResults{}
models := make([]params.Entity, len(tags))
for i, tag := range tags {
models[i] = params.Entity{Tag: tag.String()}
}
req := params.Entities{
Entities: models,
}
if err := c.facade.FacadeCall("ModelStatus", req, &result); err != nil {
return nil, err
}
results := make([]base.ModelStatus, len(result.Results))
for i, r := range result.Results {
model, err := names.ParseModelTag(r.ModelTag)
if err != nil {
return nil, errors.Annotatef(err, "ModelTag %q at position %d", r.ModelTag, i)
}
owner, err := names.ParseUserTag(r.OwnerTag)
if err != nil {
return nil, errors.Annotatef(err, "OwnerTag %q at position %d", r.OwnerTag, i)
}
results[i] = base.ModelStatus{
UUID: model.Id(),
Life: r.Life,
Owner: owner.Canonical(),
HostedMachineCount: r.HostedMachineCount,
ServiceCount: r.ServiceCount,
}
}
return results, nil
}
// ModelMigrationSpec holds the details required to start the
// migration of a single model.
type ModelMigrationSpec struct {
ModelUUID string
TargetControllerUUID string
TargetAddrs []string
TargetCACert string
TargetUser string
TargetPassword string
}
// InitiateModelMigration attempts to start a migration for the
// specified model, returning the migration's ID.
//
// The API server supports starting multiple migrations in one request
// but we don't need that at the client side yet (and may never) so
// this call just supports starting one migration at a time.
func (c *Client) InitiateModelMigration(spec ModelMigrationSpec) (string, error) {
args := params.InitiateModelMigrationArgs{
Specs: []params.ModelMigrationSpec{{
ModelTag: names.NewModelTag(spec.ModelUUID).String(),
TargetInfo: params.ModelMigrationTargetInfo{
ControllerTag: names.NewModelTag(spec.TargetControllerUUID).String(),
Addrs: spec.TargetAddrs,
CACert: spec.TargetCACert,
AuthTag: names.NewUserTag(spec.TargetUser).String(),
Password: spec.TargetPassword,
},
}},
}
response := params.InitiateModelMigrationResults{}
if err := c.facade.FacadeCall("InitiateModelMigration", args, &response); err != nil {
return "", errors.Trace(err)
}
if len(response.Results) != 1 {
return "", errors.New("unexpected number of results returned")
}
result := response.Results[0]
if result.Error != nil {
return "", errors.Trace(result.Error)
}
return result.Id, nil
}
api/controller: Add validation of ModelMigrationSpec
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package controller
import (
"github.com/juju/errors"
"github.com/juju/loggo"
"github.com/juju/names"
"github.com/juju/juju/api"
"github.com/juju/juju/api/base"
"github.com/juju/juju/apiserver/params"
)
var logger = loggo.GetLogger("juju.api.controller")
// Client provides methods that the Juju client command uses to interact
// with the Juju controller.
type Client struct {
base.ClientFacade
facade base.FacadeCaller
}
// NewClient creates a new `Client` based on an existing authenticated API
// connection.
func NewClient(st base.APICallCloser) *Client {
frontend, backend := base.NewClientFacade(st, "Controller")
logger.Tracef("%#v", frontend)
return &Client{ClientFacade: frontend, facade: backend}
}
// AllModels allows controller administrators to get the list of all the
// models in the controller.
func (c *Client) AllModels() ([]base.UserModel, error) {
var models params.UserModelList
err := c.facade.FacadeCall("AllModels", nil, &models)
if err != nil {
return nil, errors.Trace(err)
}
result := make([]base.UserModel, len(models.UserModels))
for i, model := range models.UserModels {
owner, err := names.ParseUserTag(model.OwnerTag)
if err != nil {
return nil, errors.Annotatef(err, "OwnerTag %q at position %d", model.OwnerTag, i)
}
result[i] = base.UserModel{
Name: model.Name,
UUID: model.UUID,
Owner: owner.Canonical(),
LastConnection: model.LastConnection,
}
}
return result, nil
}
// ModelConfig returns all model settings for the
// controller model.
func (c *Client) ModelConfig() (map[string]interface{}, error) {
result := params.ModelConfigResults{}
err := c.facade.FacadeCall("ModelConfig", nil, &result)
return result.Config, err
}
// DestroyController puts the controller model into a "dying" state,
// and removes all non-manager machine instances. Underlying DestroyModel
// calls will fail if there are any manually-provisioned non-manager machines
// in state.
func (c *Client) DestroyController(destroyModels bool) error {
args := params.DestroyControllerArgs{
DestroyModels: destroyModels,
}
return c.facade.FacadeCall("DestroyController", args, nil)
}
// ListBlockedModels returns a list of all models within the controller
// which have at least one block in place.
func (c *Client) ListBlockedModels() ([]params.ModelBlockInfo, error) {
result := params.ModelBlockInfoList{}
err := c.facade.FacadeCall("ListBlockedModels", nil, &result)
return result.Models, err
}
// RemoveBlocks removes all the blocks in the controller.
func (c *Client) RemoveBlocks() error {
args := params.RemoveBlocksArgs{All: true}
return c.facade.FacadeCall("RemoveBlocks", args, nil)
}
// WatchAllModels returns an AllWatcher, from which you can request
// the Next collection of Deltas (for all models).
func (c *Client) WatchAllModels() (*api.AllWatcher, error) {
info := new(api.WatchAll)
if err := c.facade.FacadeCall("WatchAllModels", nil, info); err != nil {
return nil, err
}
return api.NewAllModelWatcher(c.facade.RawAPICaller(), &info.AllWatcherId), nil
}
// ModelStatus returns a status summary for each model tag passed in.
func (c *Client) ModelStatus(tags ...names.ModelTag) ([]base.ModelStatus, error) {
result := params.ModelStatusResults{}
models := make([]params.Entity, len(tags))
for i, tag := range tags {
models[i] = params.Entity{Tag: tag.String()}
}
req := params.Entities{
Entities: models,
}
if err := c.facade.FacadeCall("ModelStatus", req, &result); err != nil {
return nil, err
}
results := make([]base.ModelStatus, len(result.Results))
for i, r := range result.Results {
model, err := names.ParseModelTag(r.ModelTag)
if err != nil {
return nil, errors.Annotatef(err, "ModelTag %q at position %d", r.ModelTag, i)
}
owner, err := names.ParseUserTag(r.OwnerTag)
if err != nil {
return nil, errors.Annotatef(err, "OwnerTag %q at position %d", r.OwnerTag, i)
}
results[i] = base.ModelStatus{
UUID: model.Id(),
Life: r.Life,
Owner: owner.Canonical(),
HostedMachineCount: r.HostedMachineCount,
ServiceCount: r.ServiceCount,
}
}
return results, nil
}
// ModelMigrationSpec holds the details required to start the
// migration of a single model.
type ModelMigrationSpec struct {
ModelUUID string
TargetControllerUUID string
TargetAddrs []string
TargetCACert string
TargetUser string
TargetPassword string
}
// Validate performs sanity checks on the migration configuration it
// holds.
func (s *ModelMigrationSpec) Validate() error {
if !names.IsValidModel(s.ModelUUID) {
return errors.NotValidf("model UUID")
}
if !names.IsValidModel(s.TargetControllerUUID) {
return errors.NotValidf("controller UUID")
}
if len(s.TargetAddrs) < 1 {
return errors.NotValidf("empty target API addresses")
}
if s.TargetCACert == "" {
return errors.NotValidf("empty target CA cert")
}
if !names.IsValidUser(s.TargetUser) {
return errors.NotValidf("target user")
}
if s.TargetPassword == "" {
return errors.NotValidf("empty target password")
}
return nil
}
// InitiateModelMigration attempts to start a migration for the
// specified model, returning the migration's ID.
//
// The API server supports starting multiple migrations in one request
// but we don't need that at the client side yet (and may never) so
// this call just supports starting one migration at a time.
func (c *Client) InitiateModelMigration(spec ModelMigrationSpec) (string, error) {
if err := spec.Validate(); err != nil {
return "", errors.Trace(err)
}
args := params.InitiateModelMigrationArgs{
Specs: []params.ModelMigrationSpec{{
ModelTag: names.NewModelTag(spec.ModelUUID).String(),
TargetInfo: params.ModelMigrationTargetInfo{
ControllerTag: names.NewModelTag(spec.TargetControllerUUID).String(),
Addrs: spec.TargetAddrs,
CACert: spec.TargetCACert,
AuthTag: names.NewUserTag(spec.TargetUser).String(),
Password: spec.TargetPassword,
},
}},
}
response := params.InitiateModelMigrationResults{}
if err := c.facade.FacadeCall("InitiateModelMigration", args, &response); err != nil {
return "", errors.Trace(err)
}
if len(response.Results) != 1 {
return "", errors.New("unexpected number of results returned")
}
result := response.Results[0]
if result.Error != nil {
return "", errors.Trace(result.Error)
}
return result.Id, nil
}
|
package model
type personalisation struct {
}
Model work
package model
import "github.com/pkg/errors"
// Personalisation - Represents information used to personalize a message sent through GovNotify service.
type Personalisation interface {
asMap() string
}
type personalisation struct {
_store map[string]string
}
func (p *personalisation) asMap() map[string]string {
return p._store
}
func (p *personalisation) Build(name string, value string) (personalisation, error) {
var res personalisation
var err error
if name == "" || value == "" {
errors.Wrap(err, "Personalisation when set must have at least one field")
return res, err
}
p._store = make(map[string]string)
p._store[name] = value
return res, err
}
|
// Package purchase enables purchase of goods and services within Ava.
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"math/rand"
"reflect"
"strconv"
"strings"
"time"
"github.com/avabot/ava/Godeps/_workspace/src/github.com/renstrom/fuzzysearch/fuzzy"
"github.com/avabot/ava/shared/datatypes"
"github.com/avabot/ava/shared/language"
"github.com/avabot/ava/shared/pkg"
"github.com/avabot/ava/shared/prefs"
"github.com/avabot/ava/shared/task"
)
type Purchase string
var ErrEmptyRecommendations = errors.New("empty recommendations")
var port = flag.Int("port", 0, "Port used to communicate with Ava.")
var p *pkg.Pkg
var ctx *dt.Ctx
// resp enables the Run() function to skip to the FollowUp function if basic
// requirements are met.
var resp *dt.Resp
const (
StateNone float64 = iota
StatePreferences
StateBudget
StateSetRecommendations
StateRecommendationsAlterBudget
StateRecommendationsAlterQuery
StateMakeRecommendation
StateProductSelection
StateContinueShopping
StateShippingAddress
StatePurchase
StateAuth
StateComplete
)
const pkgName string = "purchase"
var statesShipping = map[string]bool{
"CA": true,
}
// TODO add support for upselling and promotions. Then a Task interface for
// follow ups
func main() {
flag.Parse()
rand.Seed(time.Now().UnixNano())
var err error
ctx, err = dt.NewContext()
if err != nil {
log.Fatalln(err)
}
trigger := &dt.StructuredInput{
Commands: language.Purchase(),
Objects: language.Alcohol(),
}
p, err = pkg.NewPackage(pkgName, *port, trigger)
if err != nil {
log.Fatalln("creating package", p.Config.Name, err)
}
purchase := new(Purchase)
if err := p.Register(purchase); err != nil {
log.Fatalln("registering package ", err)
}
}
func (t *Purchase) Run(m *dt.Msg, respMsg *dt.RespMsg) error {
ctx.Msg = m
resp = m.NewResponse()
resp.State = map[string]interface{}{
"state": StateNone, // maintains state
"query": "", // search query
"budget": "", // suggested price
"recommendations": dt.ProductSels{}, // search results
"offset": uint(0), // index in search
"shippingAddress": &dt.Address{},
"productsSelected": dt.ProductSels{},
}
si := m.Input.StructuredInput
query := ""
for _, o := range si.Objects {
query += o + " "
}
tastePref, err := prefs.Get(ctx.DB, resp.UserID, pkgName,
prefs.KeyTaste)
if err != nil {
return err
}
if len(tastePref) == 0 {
resp.State["query"] = query + " " + tastePref
resp.State["state"] = StatePreferences
resp.Sentence = "Sure. What do you usually look for in a wine? (e.g. dry, fruity, sweet, earthy, oak, etc.)"
return p.SaveResponse(respMsg, resp)
}
resp.State["query"] = tastePref
budgetPref, err := prefs.Get(ctx.DB, resp.UserID, pkgName,
prefs.KeyBudget)
if err != nil {
return err
}
if len(budgetPref) > 0 {
resp.State["budget"], err = strconv.ParseUint(budgetPref, 10,
64)
if err != nil {
return err
}
resp.State["state"] = StateSetRecommendations
updateState(m, resp, respMsg)
return p.SaveResponse(respMsg, resp)
}
resp.State["state"] = StateBudget
resp.Sentence = "Sure. How much do you usually pay for a bottle?"
return t.FollowUp(m, respMsg)
}
func (t *Purchase) FollowUp(m *dt.Msg, respMsg *dt.RespMsg) error {
ctx.Msg = m
if resp == nil {
if err := m.GetLastResponse(ctx.DB); err != nil {
return err
}
resp = m.LastResponse
}
// have we already made the purchase?
if getState() == StateComplete {
// if so, reset state to allow for other purchases
return t.Run(m, respMsg)
}
resp.Sentence = ""
// allow the user to direct the conversation, e.g. say "something more
// expensive" and have Ava respond appropriately
var kw bool
if getState() > StateSetRecommendations {
log.Println("handling keywords")
var err error
kw, err = handleKeywords(m, resp, respMsg)
if err != nil {
return err
}
}
if !kw {
// if purchase has not been made, move user through the
// package's states
log.Println("updating state", getState())
if err := updateState(m, resp, respMsg); err != nil {
return err
}
}
return p.SaveResponse(respMsg, resp)
}
func updateState(m *dt.Msg, resp *dt.Resp, respMsg *dt.RespMsg) error {
log.Println("state", getState())
switch getState() {
case StatePreferences:
// TODO ensure Ava remembers past answers for preferences
// "I know you're going to love this"
if getBudget() == 0 {
resp.State["query"] = getQuery() + " " + m.Input.Sentence
resp.State["state"] = StateBudget
resp.Sentence = "Ok. How much do you usually pay for a bottle of wine?"
if err := prefs.Save(ctx.DB, resp.UserID, pkgName,
prefs.KeyTaste, getQuery()); err != nil {
log.Println("err: saving budget pref")
return err
}
} else {
resp.State["state"] = StateSetRecommendations
return updateState(m, resp, respMsg)
}
case StateBudget:
// TODO ensure Ava remembers past answers for budget
val, err := language.ExtractCurrency(m.Input.Sentence)
if err != nil {
log.Println("err extracting currency")
return err
}
if !val.Valid {
return nil
}
resp.State["budget"] = val.Int64
resp.State["state"] = StateSetRecommendations
err = prefs.Save(ctx.DB, resp.UserID, pkgName, prefs.KeyBudget,
strconv.FormatUint(getBudget(), 10))
if err != nil {
log.Println("err: saving budget pref")
return err
}
fallthrough
case StateSetRecommendations:
log.Println("setting recs")
err := setRecs(resp, respMsg)
if err != nil {
log.Println("err setting recs")
return err
}
resp.State["state"] = StateMakeRecommendation
return updateState(m, resp, respMsg)
case StateRecommendationsAlterBudget:
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
return nil
}
if yes.Bool {
resp.State["budget"] = uint64(15000)
} else {
resp.State["query"] = "wine"
if getBudget() < 1500 {
resp.State["budget"] = uint64(1500)
}
}
resp.State["offset"] = 0
resp.State["state"] = StateSetRecommendations
err := prefs.Save(ctx.DB, resp.UserID, pkgName, prefs.KeyBudget,
strconv.FormatUint(getBudget(), 10))
if err != nil {
return err
}
return updateState(m, resp, respMsg)
case StateRecommendationsAlterQuery:
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
return nil
}
if yes.Bool {
resp.State["query"] = "wine"
if getBudget() < 1500 {
resp.State["budget"] = uint64(1500)
}
} else {
resp.Sentence = "Ok. Let me know if there's anything else with which I can help you."
}
resp.State["offset"] = 0
resp.State["state"] = StateSetRecommendations
err := prefs.Save(ctx.DB, resp.UserID, pkgName, prefs.KeyBudget,
strconv.FormatUint(getBudget(), 10))
if err != nil {
return err
}
return updateState(m, resp, respMsg)
case StateMakeRecommendation:
log.Println("recommending product")
if err := recommendProduct(resp, respMsg); err != nil {
return err
}
case StateProductSelection:
// was the recommendation Ava made good?
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
log.Println("StateProductSelection: yes invalid")
return nil
}
if !yes.Bool {
resp.State["offset"] = getOffset() + 1
log.Println("updating offset", getOffset())
resp.State["state"] = StateMakeRecommendation
return updateState(m, resp, respMsg)
}
log.Println("StateProductSelection: yes valid and true")
count := language.ExtractCount(m.Input.Sentence)
if count.Valid {
if count.Int64 == 0 {
// asked to order 0 wines. trigger confused
// reply
return nil
}
}
selection, err := currentSelection(resp.State)
if err == ErrEmptyRecommendations {
resp.Sentence = "I couldn't find any wines like that. "
if getBudget() < 5000 {
resp.Sentence += "Should we look among the more expensive bottles?"
resp.State["state"] = StateRecommendationsAlterBudget
} else {
resp.Sentence += "Should we expand your search to more wines?"
resp.State["state"] = StateRecommendationsAlterQuery
}
return updateState(m, resp, respMsg)
}
if err != nil {
return err
}
if !count.Valid || count.Int64 <= 1 {
count.Int64 = 1
resp.Sentence = "Ok, I've added it to your cart. Should we look for a few more?"
} else if uint(count.Int64) > selection.Stock {
resp.Sentence = "I'm sorry, but I don't have that many available. Should we do "
return nil
} else {
resp.Sentence = fmt.Sprintf(
"Ok, I'll add %d to your cart. Should we look for a few more?",
count.Int64)
}
prod := dt.ProductSel{
Product: selection,
Count: uint(count.Int64),
}
resp.State["productsSelected"] = append(getSelectedProducts(),
prod)
resp.State["state"] = StateContinueShopping
case StateContinueShopping:
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
return nil
}
if yes.Bool {
resp.State["offset"] = getOffset() + 1
resp.State["state"] = StateMakeRecommendation
} else {
resp.State["state"] = StateShippingAddress
}
return updateState(m, resp, respMsg)
case StateShippingAddress:
prods := getSelectedProducts()
if len(prods) == 0 {
resp.Sentence = "You haven't picked any products. Should we keep looking?"
resp.State["state"] = StateContinueShopping
return nil
}
// tasks are multi-step processes often useful across several
// packages
var addr *dt.Address
tsk, err := task.New(ctx, resp, respMsg)
if err != nil {
return err
}
done, err := tsk.RequestAddress(&addr, len(prods))
if err != nil {
return err
}
if !done {
return nil
}
if addr == nil {
return errors.New("addr is nil")
}
if !statesShipping[addr.State] {
resp.Sentence = "I'm sorry, but I can't legally ship wine to that state."
}
resp.State["shippingAddress"] = addr
tmp := fmt.Sprintf("$%.2f including shipping and tax. ",
float64(prods.Prices(addr)["total"])/100)
tmp += "Should I place the order?"
resp.Sentence = fmt.Sprintf("Ok. It comes to %s", tmp)
resp.State["state"] = StatePurchase
case StatePurchase:
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
return nil
}
if !yes.Bool {
resp.Sentence = "Ok."
return nil
}
resp.State["state"] = StateAuth
return updateState(m, resp, respMsg)
case StateAuth:
// TODO ensure Ava follows up to ensure the delivery occured,
// get feedback, etc.
prods := getSelectedProducts()
purchase, err := dt.NewPurchase(ctx, &dt.PurchaseConfig{
User: m.User,
ShippingAddress: getShippingAddress(),
VendorID: prods[0].VendorID,
ProductSels: prods,
})
if err != nil {
return err
}
tsk, err := task.New(ctx, resp, respMsg)
if err != nil {
return err
}
log.Println("task init")
done, err := tsk.RequestPurchase(task.MethodZip,
getSelectedProducts(), purchase)
log.Println("task fired. request purchase")
if err == task.ErrInvalidAuth {
resp.Sentence = "I'm sorry but that doesn't match what I have. You could try to add a new card here: https://avabot.com/?/cards/new"
return nil
}
if err != nil {
log.Println("err requesting purchase")
return err
}
if !done {
log.Println("purchase incomplete")
return nil
}
resp.State["state"] = StateComplete
resp.Sentence = "Great! I've placed the order. You'll receive a confirmation by email."
}
return nil
}
func currentSelection(state map[string]interface{}) (*dt.Product, error) {
recs := getRecommendations()
l := uint(len(recs))
if l == 0 {
log.Println("!!! empty recs !!!")
log.Println("query", getQuery())
log.Println("offset", getOffset())
log.Println("budget", getBudget())
log.Println("selectedProducts", len(getSelectedProducts()))
return nil, ErrEmptyRecommendations
}
offset := getOffset()
if l <= offset {
err := errors.New("offset exceeds recommendation length")
return nil, err
}
return &recs[offset], nil
}
func handleKeywords(m *dt.Msg, resp *dt.Resp, respMsg *dt.RespMsg) (bool,
error) {
words := strings.Fields(strings.ToLower(m.Input.Sentence))
modifier := 1
for _, word := range words {
switch word {
case "detail", "details", "description", "more about", "review",
"rating", "rated":
r := rand.Intn(3)
switch r {
case 0:
resp.Sentence = "Every wine I recommend is at the top of its craft."
case 1:
resp.Sentence = "I only recommend the best."
case 2:
resp.Sentence = "This wine has been personally selected by leading wine experts."
}
case "price", "cost", "shipping", "how much", "total":
prices := getSelectedProducts().
Prices(getShippingAddress())
s := fmt.Sprintf("The items cost $%.2f, ",
float64(prices["products"])/100)
s += fmt.Sprintf("shipping is $%.2f, ",
float64(prices["shipping"])/100)
if prices["tax"] > 0.0 {
s += fmt.Sprintf("and tax is $%.2f, ",
float64(prices["tax"])/100)
}
s += fmt.Sprintf("totaling $%.2f.",
float64(prices["total"])/100)
resp.Sentence = s
case "find", "search", "show":
resp.State["offset"] = 0
resp.State["query"] = m.Input.Sentence
resp.State["state"] = StateSetRecommendations
err := prefs.Save(ctx.DB, ctx.Msg.User.ID, pkgName,
prefs.KeyTaste, m.Input.Sentence)
if err != nil {
return false, err
}
case "similar", "else", "different", "looking", "look":
resp.State["offset"] = getOffset() + 1
resp.State["state"] = StateMakeRecommendation
case "expensive", "event", "nice", "nicer", "cheap", "cheaper":
// perfect example of a need for stemming
if word == "cheap" || word == "cheaper" {
modifier = -1
}
budg := getBudget()
var tmp int
if budg >= 10000 {
tmp = int(budg) + (5000 * modifier)
} else if budg >= 5000 {
tmp = int(budg) + (2500 * modifier)
} else {
tmp = int(budg) + (1500 * modifier)
}
if tmp <= 0 {
tmp = 1000
}
resp.State["budget"] = uint64(tmp)
resp.State["state"] = StateSetRecommendations
err := prefs.Save(ctx.DB, ctx.Msg.User.ID, pkgName,
prefs.KeyBudget, strconv.Itoa(tmp))
if err != nil {
return false, err
}
case "more", "special":
modifier = 1
case "less":
modifier = -1
case "cart":
prods := getSelectedProducts()
var prodNames []string
for _, prod := range prods {
name := fmt.Sprintf("%s ($%.2f)", prod.Name,
float64(prod.Price)/100)
prodNames = append(prodNames, name)
}
if len(prods) == 0 {
resp.Sentence = "You haven't picked any wines, yet."
} else if len(prods) == 1 {
resp.Sentence = "You've picked a " +
prodNames[0] + "."
} else {
resp.Sentence = fmt.Sprintf(
"You've picked %d wines: ", len(prods))
resp.Sentence += language.SliceToString(
prodNames, "and") + "."
}
if len(prods) > 0 {
var tmp string
r := rand.Intn(2)
switch r {
case 0:
tmp = " Should we keep looking or checkout?"
case 1:
tmp = " Should we add some more or checkout?"
}
// 255 is the database varchar limit, but we should aim
// to be below 140 (sms)
if len(resp.Sentence) > 140-len(tmp) {
// 4 refers to the length of the ellipsis
resp.Sentence = resp.Sentence[0 : 140-len(tmp)-4]
resp.Sentence += "... "
}
resp.Sentence += tmp
}
resp.State["state"] = StateContinueShopping
case "checkout", "check":
prods := getSelectedProducts()
if len(prods) == 1 {
tmp := fmt.Sprintf(
"Ok. Where should I ship your bottle of %s?",
prods[0].Name)
resp.Sentence = tmp
} else if len(prods) > 1 {
resp.Sentence = fmt.Sprintf(
"Ok. Where should I ship these %d bottles?",
len(prods))
}
resp.State["state"] = StateShippingAddress
case "remove", "rid", "drop":
prods := getSelectedProducts()
var prodNames []string
for _, prod := range prods {
prodNames = append(prodNames, prod.Name)
}
var matches []string
for _, w := range strings.Fields(m.Input.Sentence) {
if len(w) <= 3 {
continue
}
tmp := fuzzy.FindFold(w, prodNames)
if len(tmp) > 0 {
matches = append(matches, tmp...)
}
}
if len(matches) == 0 {
resp.Sentence = "I couldn't find a wine like that in your cart."
} else if len(matches) == 1 {
resp.Sentence = fmt.Sprintf(
"Ok, I'll remove %s.", matches[0])
removeSelectedProduct(matches[0])
} else {
resp.Sentence = "Ok, I'll remove those."
for _, match := range matches {
removeSelectedProduct(match)
}
}
r := rand.Intn(2)
switch r {
case 0:
resp.Sentence += " Is there something else I can help you find?"
case 1:
resp.Sentence += " Would you like to find another?"
}
resp.State["state"] = StateContinueShopping
case "help", "command":
resp.Sentence = "At any time you can ask to see your cart, checkout, find something different (dry, fruity, earthy, etc.), or find something more or less expensive."
}
}
return len(resp.Sentence) > 0, nil
}
func recommendProduct(resp *dt.Resp, respMsg *dt.RespMsg) error {
recs := getRecommendations()
if len(recs) == 0 {
words := strings.Fields(getQuery())
if len(words) == 1 {
resp.Sentence = "I couldn't find any wines like that. "
if getBudget() < 5000 {
resp.Sentence += "Should we look among the more expensive bottles?"
resp.State["state"] = StateRecommendationsAlterBudget
} else {
resp.Sentence += "Should we expand your search to more wines?"
resp.State["state"] = StateRecommendationsAlterQuery
}
return nil
} else {
resp.State["query"] = "simple"
return nil
}
}
log.Println("showing product")
offset := getOffset()
product := recs[offset]
var size string
if len(product.Size) > 0 {
size = fmt.Sprintf(" (%s)", strings.ToLower(product.Size))
}
tmp := fmt.Sprintf("A %s%s for $%.2f. ", product.Name, size,
float64(product.Price)/100)
if len(product.Reviews) > 0 {
summary, err := language.Summarize(
product.Reviews[0].Body, "products_alcohol")
if err != nil {
return err
}
if len(summary) > 0 {
tmp += summary + " "
}
}
r := rand.Intn(2)
switch r {
case 0:
tmp += "Does that sound good"
case 1:
tmp += "Should I add it to your cart"
}
if len(getSelectedProducts()) > 0 {
r = rand.Intn(6)
switch r {
case 0:
tmp += " as well?"
case 1:
tmp += " too?"
case 2:
tmp += " also?"
case 3, 4, 5:
tmp += "?"
}
} else {
tmp += "?"
}
if product.Stock > 1 {
val := product.Stock
if val > 12 {
val = 12
}
r = rand.Intn(2)
switch r {
case 0:
tmp += fmt.Sprintf(" You can order up to %d of them.",
val)
case 1:
tmp += fmt.Sprintf(" You can get 1 to %d of them.", val)
}
}
resp.Sentence = language.SuggestedProduct(tmp, offset)
resp.State["state"] = StateProductSelection
return nil
}
func setRecs(resp *dt.Resp, respMsg *dt.RespMsg) error {
results, err := ctx.EC.FindProducts(getQuery(), "alcohol", getBudget(),
20)
if err != nil {
return err
}
if len(results) == 0 {
resp.Sentence = "I'm sorry. I couldn't find anything like that."
}
// TODO - better recommendations
// results = sales.SortByRecommendation(results)
resp.State["recommendations"] = results
return nil
}
// TODO customize the type of resp.State, forcing all reads and writes through
// these getter/setter functions to preserve and handle types across interface{}
func getOffset() uint {
switch resp.State["offset"].(type) {
case uint:
return resp.State["offset"].(uint)
case int:
return uint(resp.State["offset"].(int))
case float64:
return uint(resp.State["offset"].(float64))
default:
log.Println("warn: couldn't get offset: invalid type",
reflect.TypeOf(resp.State["offset"]))
}
return uint(0)
}
func getQuery() string {
return resp.State["query"].(string)
}
func getBudget() uint64 {
switch resp.State["budget"].(type) {
case uint64:
return resp.State["budget"].(uint64)
case float64:
return uint64(resp.State["budget"].(float64))
default:
log.Println("warn: couldn't get budget: invalid type",
reflect.TypeOf(resp.State["budget"]))
}
return uint64(0)
}
func getShippingAddress() *dt.Address {
addr, ok := resp.State["shippingAddress"].(*dt.Address)
if !ok {
return nil
}
return addr
}
func getSelectedProducts() dt.ProductSels {
products, ok := resp.State["productsSelected"].([]dt.ProductSel)
if !ok {
prodMap, ok := resp.State["productsSelected"].(interface{})
if !ok {
log.Println("productsSelected not found",
resp.State["productsSelected"])
return nil
}
byt, err := json.Marshal(prodMap)
if err != nil {
log.Println("err: marshaling products", err)
}
if err = json.Unmarshal(byt, &products); err != nil {
log.Println("err: unmarshaling products", err)
}
}
return products
}
func removeSelectedProduct(name string) {
log.Println("removing", name, "from cart")
prods := getSelectedProducts()
var success bool
for i, prod := range prods {
if name == prod.Name {
resp.State["productsSelected"] = append(prods[:i],
prods[i+1:]...)
log.Println("removed", name)
success = true
}
}
if !success {
log.Println("failed to remove", name, "from", prods)
}
}
func getRecommendations() []dt.Product {
products, ok := resp.State["recommendations"].([]dt.Product)
if !ok {
prodMap, ok := resp.State["recommendations"].(interface{})
if !ok {
log.Println("recommendations not found",
resp.State["recommendations"])
return nil
}
byt, err := json.Marshal(prodMap)
if err != nil {
log.Println("err: marshaling products", err)
}
if err = json.Unmarshal(byt, &products); err != nil {
log.Println("err: unmarshaling products", err)
}
return nil
}
return products
}
func getState() float64 {
state, ok := resp.State["state"].(float64)
if !ok {
state = 0.0
}
return state
}
Add 'much' modifier
// Package purchase enables purchase of goods and services within Ava.
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"math/rand"
"reflect"
"strconv"
"strings"
"time"
"github.com/avabot/ava/Godeps/_workspace/src/github.com/renstrom/fuzzysearch/fuzzy"
"github.com/avabot/ava/shared/datatypes"
"github.com/avabot/ava/shared/language"
"github.com/avabot/ava/shared/pkg"
"github.com/avabot/ava/shared/prefs"
"github.com/avabot/ava/shared/task"
)
type Purchase string
var ErrEmptyRecommendations = errors.New("empty recommendations")
var port = flag.Int("port", 0, "Port used to communicate with Ava.")
var p *pkg.Pkg
var ctx *dt.Ctx
// resp enables the Run() function to skip to the FollowUp function if basic
// requirements are met.
var resp *dt.Resp
const (
StateNone float64 = iota
StatePreferences
StateBudget
StateSetRecommendations
StateRecommendationsAlterBudget
StateRecommendationsAlterQuery
StateMakeRecommendation
StateProductSelection
StateContinueShopping
StateShippingAddress
StatePurchase
StateAuth
StateComplete
)
const pkgName string = "purchase"
var statesShipping = map[string]bool{
"CA": true,
}
// TODO add support for upselling and promotions. Then a Task interface for
// follow ups
func main() {
flag.Parse()
rand.Seed(time.Now().UnixNano())
var err error
ctx, err = dt.NewContext()
if err != nil {
log.Fatalln(err)
}
trigger := &dt.StructuredInput{
Commands: language.Purchase(),
Objects: language.Alcohol(),
}
p, err = pkg.NewPackage(pkgName, *port, trigger)
if err != nil {
log.Fatalln("creating package", p.Config.Name, err)
}
purchase := new(Purchase)
if err := p.Register(purchase); err != nil {
log.Fatalln("registering package ", err)
}
}
func (t *Purchase) Run(m *dt.Msg, respMsg *dt.RespMsg) error {
ctx.Msg = m
resp = m.NewResponse()
resp.State = map[string]interface{}{
"state": StateNone, // maintains state
"query": "", // search query
"budget": "", // suggested price
"recommendations": dt.ProductSels{}, // search results
"offset": uint(0), // index in search
"shippingAddress": &dt.Address{},
"productsSelected": dt.ProductSels{},
}
si := m.Input.StructuredInput
query := ""
for _, o := range si.Objects {
query += o + " "
}
tastePref, err := prefs.Get(ctx.DB, resp.UserID, pkgName,
prefs.KeyTaste)
if err != nil {
return err
}
if len(tastePref) == 0 {
resp.State["query"] = query + " " + tastePref
resp.State["state"] = StatePreferences
resp.Sentence = "Sure. What do you usually look for in a wine? (e.g. dry, fruity, sweet, earthy, oak, etc.)"
return p.SaveResponse(respMsg, resp)
}
resp.State["query"] = tastePref
budgetPref, err := prefs.Get(ctx.DB, resp.UserID, pkgName,
prefs.KeyBudget)
if err != nil {
return err
}
if len(budgetPref) > 0 {
resp.State["budget"], err = strconv.ParseUint(budgetPref, 10,
64)
if err != nil {
return err
}
resp.State["state"] = StateSetRecommendations
updateState(m, resp, respMsg)
return p.SaveResponse(respMsg, resp)
}
resp.State["state"] = StateBudget
resp.Sentence = "Sure. How much do you usually pay for a bottle?"
return t.FollowUp(m, respMsg)
}
func (t *Purchase) FollowUp(m *dt.Msg, respMsg *dt.RespMsg) error {
ctx.Msg = m
if resp == nil {
if err := m.GetLastResponse(ctx.DB); err != nil {
return err
}
resp = m.LastResponse
}
// have we already made the purchase?
if getState() == StateComplete {
// if so, reset state to allow for other purchases
return t.Run(m, respMsg)
}
resp.Sentence = ""
// allow the user to direct the conversation, e.g. say "something more
// expensive" and have Ava respond appropriately
var kw bool
if getState() > StateSetRecommendations {
log.Println("handling keywords")
var err error
kw, err = handleKeywords(m, resp, respMsg)
if err != nil {
return err
}
}
if !kw {
// if purchase has not been made, move user through the
// package's states
log.Println("updating state", getState())
if err := updateState(m, resp, respMsg); err != nil {
return err
}
}
return p.SaveResponse(respMsg, resp)
}
func updateState(m *dt.Msg, resp *dt.Resp, respMsg *dt.RespMsg) error {
log.Println("state", getState())
switch getState() {
case StatePreferences:
// TODO ensure Ava remembers past answers for preferences
// "I know you're going to love this"
if getBudget() == 0 {
resp.State["query"] = getQuery() + " " + m.Input.Sentence
resp.State["state"] = StateBudget
resp.Sentence = "Ok. How much do you usually pay for a bottle of wine?"
if err := prefs.Save(ctx.DB, resp.UserID, pkgName,
prefs.KeyTaste, getQuery()); err != nil {
log.Println("err: saving budget pref")
return err
}
} else {
resp.State["state"] = StateSetRecommendations
return updateState(m, resp, respMsg)
}
case StateBudget:
// TODO ensure Ava remembers past answers for budget
val, err := language.ExtractCurrency(m.Input.Sentence)
if err != nil {
log.Println("err extracting currency")
return err
}
if !val.Valid {
return nil
}
resp.State["budget"] = val.Int64
resp.State["state"] = StateSetRecommendations
err = prefs.Save(ctx.DB, resp.UserID, pkgName, prefs.KeyBudget,
strconv.FormatUint(getBudget(), 10))
if err != nil {
log.Println("err: saving budget pref")
return err
}
fallthrough
case StateSetRecommendations:
log.Println("setting recs")
err := setRecs(resp, respMsg)
if err != nil {
log.Println("err setting recs")
return err
}
resp.State["state"] = StateMakeRecommendation
return updateState(m, resp, respMsg)
case StateRecommendationsAlterBudget:
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
return nil
}
if yes.Bool {
resp.State["budget"] = uint64(15000)
} else {
resp.State["query"] = "wine"
if getBudget() < 1500 {
resp.State["budget"] = uint64(1500)
}
}
resp.State["offset"] = 0
resp.State["state"] = StateSetRecommendations
err := prefs.Save(ctx.DB, resp.UserID, pkgName, prefs.KeyBudget,
strconv.FormatUint(getBudget(), 10))
if err != nil {
return err
}
return updateState(m, resp, respMsg)
case StateRecommendationsAlterQuery:
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
return nil
}
if yes.Bool {
resp.State["query"] = "wine"
if getBudget() < 1500 {
resp.State["budget"] = uint64(1500)
}
} else {
resp.Sentence = "Ok. Let me know if there's anything else with which I can help you."
}
resp.State["offset"] = 0
resp.State["state"] = StateSetRecommendations
err := prefs.Save(ctx.DB, resp.UserID, pkgName, prefs.KeyBudget,
strconv.FormatUint(getBudget(), 10))
if err != nil {
return err
}
return updateState(m, resp, respMsg)
case StateMakeRecommendation:
log.Println("recommending product")
if err := recommendProduct(resp, respMsg); err != nil {
return err
}
case StateProductSelection:
// was the recommendation Ava made good?
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
log.Println("StateProductSelection: yes invalid")
return nil
}
if !yes.Bool {
resp.State["offset"] = getOffset() + 1
log.Println("updating offset", getOffset())
resp.State["state"] = StateMakeRecommendation
return updateState(m, resp, respMsg)
}
log.Println("StateProductSelection: yes valid and true")
count := language.ExtractCount(m.Input.Sentence)
if count.Valid {
if count.Int64 == 0 {
// asked to order 0 wines. trigger confused
// reply
return nil
}
}
selection, err := currentSelection(resp.State)
if err == ErrEmptyRecommendations {
resp.Sentence = "I couldn't find any wines like that. "
if getBudget() < 5000 {
resp.Sentence += "Should we look among the more expensive bottles?"
resp.State["state"] = StateRecommendationsAlterBudget
} else {
resp.Sentence += "Should we expand your search to more wines?"
resp.State["state"] = StateRecommendationsAlterQuery
}
return updateState(m, resp, respMsg)
}
if err != nil {
return err
}
if !count.Valid || count.Int64 <= 1 {
count.Int64 = 1
resp.Sentence = "Ok, I've added it to your cart. Should we look for a few more?"
} else if uint(count.Int64) > selection.Stock {
resp.Sentence = "I'm sorry, but I don't have that many available. Should we do "
return nil
} else {
resp.Sentence = fmt.Sprintf(
"Ok, I'll add %d to your cart. Should we look for a few more?",
count.Int64)
}
prod := dt.ProductSel{
Product: selection,
Count: uint(count.Int64),
}
resp.State["productsSelected"] = append(getSelectedProducts(),
prod)
resp.State["state"] = StateContinueShopping
case StateContinueShopping:
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
return nil
}
if yes.Bool {
resp.State["offset"] = getOffset() + 1
resp.State["state"] = StateMakeRecommendation
} else {
resp.State["state"] = StateShippingAddress
}
return updateState(m, resp, respMsg)
case StateShippingAddress:
prods := getSelectedProducts()
if len(prods) == 0 {
resp.Sentence = "You haven't picked any products. Should we keep looking?"
resp.State["state"] = StateContinueShopping
return nil
}
// tasks are multi-step processes often useful across several
// packages
var addr *dt.Address
tsk, err := task.New(ctx, resp, respMsg)
if err != nil {
return err
}
done, err := tsk.RequestAddress(&addr, len(prods))
if err != nil {
return err
}
if !done {
return nil
}
if addr == nil {
return errors.New("addr is nil")
}
if !statesShipping[addr.State] {
resp.Sentence = "I'm sorry, but I can't legally ship wine to that state."
}
resp.State["shippingAddress"] = addr
tmp := fmt.Sprintf("$%.2f including shipping and tax. ",
float64(prods.Prices(addr)["total"])/100)
tmp += "Should I place the order?"
resp.Sentence = fmt.Sprintf("Ok. It comes to %s", tmp)
resp.State["state"] = StatePurchase
case StatePurchase:
yes := language.ExtractYesNo(m.Input.Sentence)
if !yes.Valid {
return nil
}
if !yes.Bool {
resp.Sentence = "Ok."
return nil
}
resp.State["state"] = StateAuth
return updateState(m, resp, respMsg)
case StateAuth:
// TODO ensure Ava follows up to ensure the delivery occured,
// get feedback, etc.
prods := getSelectedProducts()
purchase, err := dt.NewPurchase(ctx, &dt.PurchaseConfig{
User: m.User,
ShippingAddress: getShippingAddress(),
VendorID: prods[0].VendorID,
ProductSels: prods,
})
if err != nil {
return err
}
tsk, err := task.New(ctx, resp, respMsg)
if err != nil {
return err
}
log.Println("task init")
done, err := tsk.RequestPurchase(task.MethodZip,
getSelectedProducts(), purchase)
log.Println("task fired. request purchase")
if err == task.ErrInvalidAuth {
resp.Sentence = "I'm sorry but that doesn't match what I have. You could try to add a new card here: https://avabot.com/?/cards/new"
return nil
}
if err != nil {
log.Println("err requesting purchase")
return err
}
if !done {
log.Println("purchase incomplete")
return nil
}
resp.State["state"] = StateComplete
resp.Sentence = "Great! I've placed the order. You'll receive a confirmation by email."
}
return nil
}
func currentSelection(state map[string]interface{}) (*dt.Product, error) {
recs := getRecommendations()
l := uint(len(recs))
if l == 0 {
log.Println("!!! empty recs !!!")
log.Println("query", getQuery())
log.Println("offset", getOffset())
log.Println("budget", getBudget())
log.Println("selectedProducts", len(getSelectedProducts()))
return nil, ErrEmptyRecommendations
}
offset := getOffset()
if l <= offset {
err := errors.New("offset exceeds recommendation length")
return nil, err
}
return &recs[offset], nil
}
func handleKeywords(m *dt.Msg, resp *dt.Resp, respMsg *dt.RespMsg) (bool,
error) {
words := strings.Fields(strings.ToLower(m.Input.Sentence))
modifier := 1
for _, word := range words {
switch word {
case "detail", "details", "description", "more about", "review",
"rating", "rated":
r := rand.Intn(3)
switch r {
case 0:
resp.Sentence = "Every wine I recommend is at the top of its craft."
case 1:
resp.Sentence = "I only recommend the best."
case 2:
resp.Sentence = "This wine has been personally selected by leading wine experts."
}
case "price", "cost", "shipping", "total":
prices := getSelectedProducts().
Prices(getShippingAddress())
s := fmt.Sprintf("The items cost $%.2f, ",
float64(prices["products"])/100)
s += fmt.Sprintf("shipping is $%.2f, ",
float64(prices["shipping"])/100)
if prices["tax"] > 0.0 {
s += fmt.Sprintf("and tax is $%.2f, ",
float64(prices["tax"])/100)
}
s += fmt.Sprintf("totaling $%.2f.",
float64(prices["total"])/100)
resp.Sentence = s
case "find", "search", "show":
resp.State["offset"] = 0
resp.State["query"] = m.Input.Sentence
resp.State["state"] = StateSetRecommendations
err := prefs.Save(ctx.DB, ctx.Msg.User.ID, pkgName,
prefs.KeyTaste, m.Input.Sentence)
if err != nil {
return false, err
}
case "similar", "else", "different", "looking", "look":
resp.State["offset"] = getOffset() + 1
resp.State["state"] = StateMakeRecommendation
case "expensive", "event", "nice", "nicer", "cheap", "cheaper":
// perfect example of a need for stemming
if word == "cheap" || word == "cheaper" {
modifier = -1
}
budg := getBudget()
var tmp int
if budg >= 10000 {
tmp = int(budg) + (5000 * modifier)
} else if budg >= 5000 {
tmp = int(budg) + (2500 * modifier)
} else {
tmp = int(budg) + (1500 * modifier)
}
if tmp <= 0 {
tmp = 1000
}
resp.State["budget"] = uint64(tmp)
resp.State["state"] = StateSetRecommendations
err := prefs.Save(ctx.DB, ctx.Msg.User.ID, pkgName,
prefs.KeyBudget, strconv.Itoa(tmp))
if err != nil {
return false, err
}
case "cart":
prods := getSelectedProducts()
var prodNames []string
for _, prod := range prods {
name := fmt.Sprintf("%s ($%.2f)", prod.Name,
float64(prod.Price)/100)
prodNames = append(prodNames, name)
}
if len(prods) == 0 {
resp.Sentence = "You haven't picked any wines, yet."
} else if len(prods) == 1 {
resp.Sentence = "You've picked a " +
prodNames[0] + "."
} else {
resp.Sentence = fmt.Sprintf(
"You've picked %d wines: ", len(prods))
resp.Sentence += language.SliceToString(
prodNames, "and") + "."
}
if len(prods) > 0 {
var tmp string
r := rand.Intn(2)
switch r {
case 0:
tmp = " Should we keep looking or checkout?"
case 1:
tmp = " Should we add some more or checkout?"
}
// 255 is the database varchar limit, but we should aim
// to be below 140 (sms)
if len(resp.Sentence) > 140-len(tmp) {
// 4 refers to the length of the ellipsis
resp.Sentence = resp.Sentence[0 : 140-len(tmp)-4]
resp.Sentence += "... "
}
resp.Sentence += tmp
}
resp.State["state"] = StateContinueShopping
case "checkout", "check":
prods := getSelectedProducts()
if len(prods) == 1 {
tmp := fmt.Sprintf(
"Ok. Where should I ship your bottle of %s?",
prods[0].Name)
resp.Sentence = tmp
} else if len(prods) > 1 {
resp.Sentence = fmt.Sprintf(
"Ok. Where should I ship these %d bottles?",
len(prods))
}
resp.State["state"] = StateShippingAddress
case "remove", "rid", "drop":
prods := getSelectedProducts()
var prodNames []string
for _, prod := range prods {
prodNames = append(prodNames, prod.Name)
}
var matches []string
for _, w := range strings.Fields(m.Input.Sentence) {
if len(w) <= 3 {
continue
}
tmp := fuzzy.FindFold(w, prodNames)
if len(tmp) > 0 {
matches = append(matches, tmp...)
}
}
if len(matches) == 0 {
resp.Sentence = "I couldn't find a wine like that in your cart."
} else if len(matches) == 1 {
resp.Sentence = fmt.Sprintf(
"Ok, I'll remove %s.", matches[0])
removeSelectedProduct(matches[0])
} else {
resp.Sentence = "Ok, I'll remove those."
for _, match := range matches {
removeSelectedProduct(match)
}
}
r := rand.Intn(2)
switch r {
case 0:
resp.Sentence += " Is there something else I can help you find?"
case 1:
resp.Sentence += " Would you like to find another?"
}
resp.State["state"] = StateContinueShopping
case "help", "command":
resp.Sentence = "At any time you can ask to see your cart, checkout, find something different (dry, fruity, earthy, etc.), or find something more or less expensive."
case "more", "special":
modifier *= modifier
case "less":
modifier *= modifier
case "much", "very", "extremely":
modifier *= 2
}
}
return len(resp.Sentence) > 0, nil
}
func recommendProduct(resp *dt.Resp, respMsg *dt.RespMsg) error {
recs := getRecommendations()
if len(recs) == 0 {
words := strings.Fields(getQuery())
if len(words) == 1 {
resp.Sentence = "I couldn't find any wines like that. "
if getBudget() < 5000 {
resp.Sentence += "Should we look among the more expensive bottles?"
resp.State["state"] = StateRecommendationsAlterBudget
} else {
resp.Sentence += "Should we expand your search to more wines?"
resp.State["state"] = StateRecommendationsAlterQuery
}
return nil
} else {
resp.State["query"] = "simple"
return nil
}
}
log.Println("showing product")
offset := getOffset()
product := recs[offset]
var size string
if len(product.Size) > 0 {
size = fmt.Sprintf(" (%s)", strings.ToLower(product.Size))
}
tmp := fmt.Sprintf("A %s%s for $%.2f. ", product.Name, size,
float64(product.Price)/100)
if len(product.Reviews) > 0 {
summary, err := language.Summarize(
product.Reviews[0].Body, "products_alcohol")
if err != nil {
return err
}
if len(summary) > 0 {
tmp += summary + " "
}
}
r := rand.Intn(2)
switch r {
case 0:
tmp += "Does that sound good"
case 1:
tmp += "Should I add it to your cart"
}
if len(getSelectedProducts()) > 0 {
r = rand.Intn(6)
switch r {
case 0:
tmp += " as well?"
case 1:
tmp += " too?"
case 2:
tmp += " also?"
case 3, 4, 5:
tmp += "?"
}
} else {
tmp += "?"
}
if product.Stock > 1 {
val := product.Stock
if val > 12 {
val = 12
}
r = rand.Intn(2)
switch r {
case 0:
tmp += fmt.Sprintf(" You can order up to %d of them.",
val)
case 1:
tmp += fmt.Sprintf(" You can get 1 to %d of them.", val)
}
}
resp.Sentence = language.SuggestedProduct(tmp, offset)
resp.State["state"] = StateProductSelection
return nil
}
func setRecs(resp *dt.Resp, respMsg *dt.RespMsg) error {
results, err := ctx.EC.FindProducts(getQuery(), "alcohol", getBudget(),
20)
if err != nil {
return err
}
if len(results) == 0 {
resp.Sentence = "I'm sorry. I couldn't find anything like that."
}
// TODO - better recommendations
// results = sales.SortByRecommendation(results)
resp.State["recommendations"] = results
return nil
}
// TODO customize the type of resp.State, forcing all reads and writes through
// these getter/setter functions to preserve and handle types across interface{}
func getOffset() uint {
switch resp.State["offset"].(type) {
case uint:
return resp.State["offset"].(uint)
case int:
return uint(resp.State["offset"].(int))
case float64:
return uint(resp.State["offset"].(float64))
default:
log.Println("warn: couldn't get offset: invalid type",
reflect.TypeOf(resp.State["offset"]))
}
return uint(0)
}
func getQuery() string {
return resp.State["query"].(string)
}
func getBudget() uint64 {
switch resp.State["budget"].(type) {
case uint64:
return resp.State["budget"].(uint64)
case float64:
return uint64(resp.State["budget"].(float64))
default:
log.Println("warn: couldn't get budget: invalid type",
reflect.TypeOf(resp.State["budget"]))
}
return uint64(0)
}
func getShippingAddress() *dt.Address {
addr, ok := resp.State["shippingAddress"].(*dt.Address)
if !ok {
return nil
}
return addr
}
func getSelectedProducts() dt.ProductSels {
products, ok := resp.State["productsSelected"].([]dt.ProductSel)
if !ok {
prodMap, ok := resp.State["productsSelected"].(interface{})
if !ok {
log.Println("productsSelected not found",
resp.State["productsSelected"])
return nil
}
byt, err := json.Marshal(prodMap)
if err != nil {
log.Println("err: marshaling products", err)
}
if err = json.Unmarshal(byt, &products); err != nil {
log.Println("err: unmarshaling products", err)
}
}
return products
}
func removeSelectedProduct(name string) {
log.Println("removing", name, "from cart")
prods := getSelectedProducts()
var success bool
for i, prod := range prods {
if name == prod.Name {
resp.State["productsSelected"] = append(prods[:i],
prods[i+1:]...)
log.Println("removed", name)
success = true
}
}
if !success {
log.Println("failed to remove", name, "from", prods)
}
}
func getRecommendations() []dt.Product {
products, ok := resp.State["recommendations"].([]dt.Product)
if !ok {
prodMap, ok := resp.State["recommendations"].(interface{})
if !ok {
log.Println("recommendations not found",
resp.State["recommendations"])
return nil
}
byt, err := json.Marshal(prodMap)
if err != nil {
log.Println("err: marshaling products", err)
}
if err = json.Unmarshal(byt, &products); err != nil {
log.Println("err: unmarshaling products", err)
}
return nil
}
return products
}
func getState() float64 {
state, ok := resp.State["state"].(float64)
if !ok {
state = 0.0
}
return state
}
|
package sockaddr_test
import (
"fmt"
"math/rand"
"testing"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/go-sockaddr"
)
func init() {
lib.SeedMathRand()
}
// NOTE: A number of these code paths are exercised in template/ and
// cmd/sockaddr/
// sockAddrStringInputs allows for easy test creation by developers.
// Parallel arrays of string inputs are converted to their SockAddr
// equivalents for use by unit tests.
type sockAddrStringInputs []struct {
inputAddrs []string
sortedAddrs []string
sortedTypes []sockaddr.SockAddrType
sortFuncs []sockaddr.CmpAddrFunc
numIPv4Inputs int
numIPv6Inputs int
numUnixInputs int
}
func convertToSockAddrs(t *testing.T, inputs []string) sockaddr.SockAddrs {
sockAddrs := make(sockaddr.SockAddrs, 0, len(inputs))
for i, input := range inputs {
sa, err := sockaddr.NewSockAddr(input)
if err != nil {
t.Fatalf("[%d] Invalid SockAddr input for %+q: %v", i, input, err)
}
sockAddrs = append(sockAddrs, sa)
}
return sockAddrs
}
// shuffleStrings randomly shuffles the list of strings
func shuffleStrings(list []string) {
for i := range list {
j := rand.Intn(i + 1)
list[i], list[j] = list[j], list[i]
}
}
func TestSockAddr_SockAddrs_AscAddress(t *testing.T) {
testInputs := sockAddrStringInputs{
{ // testNum: 0
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscAddress,
},
numIPv4Inputs: 9,
numIPv6Inputs: 1,
numUnixInputs: 0,
inputAddrs: []string{
"10.0.0.0/8",
"172.16.1.3/12",
"128.95.120.2:53",
"128.95.120.2/32",
"192.168.0.0/16",
"128.95.120.1/32",
"192.168.1.10/24",
"128.95.120.2:8600",
"240.0.0.1/4",
"::",
},
sortedAddrs: []string{
"10.0.0.0/8",
"128.95.120.1/32",
"128.95.120.2:53",
"128.95.120.2/32",
"128.95.120.2:8600",
"172.16.1.3/12",
"192.168.0.0/16",
"192.168.1.10/24",
"240.0.0.1/4",
"::",
},
},
}
for idx, test := range testInputs {
t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
shuffleStrings(test.inputAddrs)
inputSockAddrs := convertToSockAddrs(t, test.inputAddrs)
sas := convertToSockAddrs(t, test.sortedAddrs)
sortedIPv4Addrs, nonIPv4Addrs := sas.FilterByType(sockaddr.TypeIPv4)
if l := len(sortedIPv4Addrs); l != test.numIPv4Inputs {
t.Fatalf("[%d] Missing IPv4Addrs: expected %d, received %d", idx, test.numIPv4Inputs, l)
}
if len(nonIPv4Addrs) != test.numIPv6Inputs+test.numUnixInputs {
t.Fatalf("[%d] Non-IPv4 Address in input", idx)
}
// Copy inputAddrs so we can manipulate it. wtb const.
sockAddrs := append(sockaddr.SockAddrs(nil), inputSockAddrs...)
filteredAddrs, _ := sockAddrs.FilterByType(sockaddr.TypeIPv4)
sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(filteredAddrs)
ipv4SockAddrs, nonIPv4s := filteredAddrs.FilterByType(sockaddr.TypeIPv4)
if len(nonIPv4s) != 0 {
t.Fatalf("[%d] bad", idx)
}
for i, ipv4SockAddr := range ipv4SockAddrs {
ipv4Addr := sockaddr.ToIPv4Addr(ipv4SockAddr)
sortedIPv4Addr := sockaddr.ToIPv4Addr(sortedIPv4Addrs[i])
if ipv4Addr.Address != sortedIPv4Addr.Address {
t.Errorf("[%d/%d] Sort equality failed: expected %s, received %s", idx, i, sortedIPv4Addrs[i], ipv4Addr)
}
}
})
}
}
func TestSockAddr_SockAddrs_AscPrivate(t *testing.T) {
testInputs := []struct {
sortFuncs []sockaddr.CmpAddrFunc
inputAddrs []string
sortedAddrs []string
}{
{ // testNum: 0
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscType,
sockaddr.AscPrivate,
sockaddr.AscAddress,
sockaddr.AscType,
sockaddr.AscAddress,
sockaddr.AscPort,
},
inputAddrs: []string{
"10.0.0.0/8",
"172.16.1.3/12",
"192.168.0.0/16",
"192.168.0.0/16",
"192.168.1.10/24",
"128.95.120.1/32",
"128.95.120.2/32",
"128.95.120.2:53",
"128.95.120.2:8600",
"240.0.0.1/4",
"::",
},
sortedAddrs: []string{
"10.0.0.0/8",
"172.16.1.3/12",
"192.168.0.0/16",
"192.168.0.0/16",
"192.168.1.10/24",
"240.0.0.1/4",
"128.95.120.1/32",
"128.95.120.2/32",
// "128.95.120.2:53",
// "128.95.120.2:8600",
// "::",
},
},
{
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscType,
sockaddr.AscPrivate,
sockaddr.AscAddress,
},
inputAddrs: []string{
"1.2.3.4:53",
"192.168.1.2",
"/tmp/foo",
"[cc::1]:8600",
"[::1]:53",
},
sortedAddrs: []string{
"/tmp/foo",
"192.168.1.2",
"1.2.3.4:53",
"[::1]:53",
"[cc::1]:8600",
},
},
{
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscType,
sockaddr.AscPrivate,
sockaddr.AscAddress,
},
inputAddrs: []string{
"/tmp/foo",
"/tmp/bar",
"1.2.3.4",
},
sortedAddrs: []string{
"/tmp/bar",
"/tmp/foo",
"1.2.3.4",
},
},
}
for idx, test := range testInputs {
t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
sortedAddrs := convertToSockAddrs(t, test.sortedAddrs)
inputAddrs := append([]string(nil), test.inputAddrs...)
shuffleStrings(inputAddrs)
inputSockAddrs := convertToSockAddrs(t, inputAddrs)
sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs)
for i, sockAddr := range sortedAddrs {
if !sockAddr.Equal(inputSockAddrs[i]) {
t.Logf("Input Addrs:\t%+v", inputAddrs)
t.Logf("Sorted Addrs:\t%+v", inputSockAddrs)
t.Logf("Expected Addrs:\t%+v", test.sortedAddrs)
t.Fatalf("[%d/%d] Sort AscType/AscAddress failed: expected %+q, received %+q", idx, i, sockAddr, inputSockAddrs[i])
}
}
})
}
}
func TestSockAddr_SockAddrs_AscPort(t *testing.T) {
testInputs := []struct {
name string
sortFuncs []sockaddr.CmpAddrFunc
inputAddrs []string
sortedAddrs []string
}{
{
name: "simple port test",
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscPort,
sockaddr.AscType,
},
inputAddrs: []string{
"1.2.3.4:53",
"/tmp/foo",
"[::1]:53",
},
sortedAddrs: []string{
"/tmp/foo",
"1.2.3.4:53",
"[::1]:53",
},
},
{
name: "simple port test",
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscPort,
sockaddr.AscType,
},
inputAddrs: []string{
"1.2.3.4:53",
"/tmp/foo",
},
sortedAddrs: []string{
"/tmp/foo",
"1.2.3.4:53",
},
},
}
for idx, test := range testInputs {
t.Run(test.name, func(t *testing.T) {
sortedAddrs := convertToSockAddrs(t, test.sortedAddrs)
inputAddrs := append([]string(nil), test.inputAddrs...)
shuffleStrings(inputAddrs)
inputSockAddrs := convertToSockAddrs(t, inputAddrs)
sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs)
for i, sockAddr := range sortedAddrs {
if !sockAddr.Equal(inputSockAddrs[i]) {
t.Logf("Input Addrs:\t%+v", inputAddrs)
t.Logf("Sorted Addrs:\t%+v", inputSockAddrs)
t.Logf("Expected Addrs:\t%+v", test.sortedAddrs)
t.Fatalf("[%d/%d] Sort AscType/AscAddress failed: expected %+q, received %+q", idx, i, sockAddr, inputSockAddrs[i])
}
}
})
}
}
func TestSockAddr_SockAddrs_AscType(t *testing.T) {
testInputs := sockAddrStringInputs{
{ // testNum: 0
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscType,
},
inputAddrs: []string{
"10.0.0.0/8",
"172.16.1.3/12",
"128.95.120.2:53",
"::",
"128.95.120.2/32",
"192.168.0.0/16",
"128.95.120.1/32",
"192.168.1.10/24",
"128.95.120.2:8600",
"240.0.0.1/4",
},
sortedTypes: []sockaddr.SockAddrType{
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv6,
},
},
}
for idx, test := range testInputs {
t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
shuffleStrings(test.inputAddrs)
inputSockAddrs := convertToSockAddrs(t, test.inputAddrs)
sortedAddrs := convertToSockAddrs(t, test.sortedAddrs)
sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs)
for i, sockAddr := range sortedAddrs {
if sockAddr.Type() != sortedAddrs[i].Type() {
t.Errorf("[%d/%d] Sort AscType failed: expected %+q, received %+q", idx, i, sortedAddrs[i], sockAddr)
}
}
})
}
}
test: remove test dependency on consul by inlining single test function
package sockaddr_test
import (
crand "crypto/rand"
"fmt"
"math"
"math/big"
"math/rand"
"testing"
"time"
sockaddr "github.com/hashicorp/go-sockaddr"
)
func init() {
seedMathRand()
}
// seedMathRand provides weak, but guaranteed seeding, which is better than
// running with Go's default seed of 1.
func seedMathRand() {
n, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
rand.Seed(time.Now().UTC().UnixNano())
return
}
rand.Seed(n.Int64())
}
// NOTE: A number of these code paths are exercised in template/ and
// cmd/sockaddr/
// sockAddrStringInputs allows for easy test creation by developers.
// Parallel arrays of string inputs are converted to their SockAddr
// equivalents for use by unit tests.
type sockAddrStringInputs []struct {
inputAddrs []string
sortedAddrs []string
sortedTypes []sockaddr.SockAddrType
sortFuncs []sockaddr.CmpAddrFunc
numIPv4Inputs int
numIPv6Inputs int
numUnixInputs int
}
func convertToSockAddrs(t *testing.T, inputs []string) sockaddr.SockAddrs {
sockAddrs := make(sockaddr.SockAddrs, 0, len(inputs))
for i, input := range inputs {
sa, err := sockaddr.NewSockAddr(input)
if err != nil {
t.Fatalf("[%d] Invalid SockAddr input for %+q: %v", i, input, err)
}
sockAddrs = append(sockAddrs, sa)
}
return sockAddrs
}
// shuffleStrings randomly shuffles the list of strings
func shuffleStrings(list []string) {
for i := range list {
j := rand.Intn(i + 1)
list[i], list[j] = list[j], list[i]
}
}
func TestSockAddr_SockAddrs_AscAddress(t *testing.T) {
testInputs := sockAddrStringInputs{
{ // testNum: 0
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscAddress,
},
numIPv4Inputs: 9,
numIPv6Inputs: 1,
numUnixInputs: 0,
inputAddrs: []string{
"10.0.0.0/8",
"172.16.1.3/12",
"128.95.120.2:53",
"128.95.120.2/32",
"192.168.0.0/16",
"128.95.120.1/32",
"192.168.1.10/24",
"128.95.120.2:8600",
"240.0.0.1/4",
"::",
},
sortedAddrs: []string{
"10.0.0.0/8",
"128.95.120.1/32",
"128.95.120.2:53",
"128.95.120.2/32",
"128.95.120.2:8600",
"172.16.1.3/12",
"192.168.0.0/16",
"192.168.1.10/24",
"240.0.0.1/4",
"::",
},
},
}
for idx, test := range testInputs {
t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
shuffleStrings(test.inputAddrs)
inputSockAddrs := convertToSockAddrs(t, test.inputAddrs)
sas := convertToSockAddrs(t, test.sortedAddrs)
sortedIPv4Addrs, nonIPv4Addrs := sas.FilterByType(sockaddr.TypeIPv4)
if l := len(sortedIPv4Addrs); l != test.numIPv4Inputs {
t.Fatalf("[%d] Missing IPv4Addrs: expected %d, received %d", idx, test.numIPv4Inputs, l)
}
if len(nonIPv4Addrs) != test.numIPv6Inputs+test.numUnixInputs {
t.Fatalf("[%d] Non-IPv4 Address in input", idx)
}
// Copy inputAddrs so we can manipulate it. wtb const.
sockAddrs := append(sockaddr.SockAddrs(nil), inputSockAddrs...)
filteredAddrs, _ := sockAddrs.FilterByType(sockaddr.TypeIPv4)
sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(filteredAddrs)
ipv4SockAddrs, nonIPv4s := filteredAddrs.FilterByType(sockaddr.TypeIPv4)
if len(nonIPv4s) != 0 {
t.Fatalf("[%d] bad", idx)
}
for i, ipv4SockAddr := range ipv4SockAddrs {
ipv4Addr := sockaddr.ToIPv4Addr(ipv4SockAddr)
sortedIPv4Addr := sockaddr.ToIPv4Addr(sortedIPv4Addrs[i])
if ipv4Addr.Address != sortedIPv4Addr.Address {
t.Errorf("[%d/%d] Sort equality failed: expected %s, received %s", idx, i, sortedIPv4Addrs[i], ipv4Addr)
}
}
})
}
}
func TestSockAddr_SockAddrs_AscPrivate(t *testing.T) {
testInputs := []struct {
sortFuncs []sockaddr.CmpAddrFunc
inputAddrs []string
sortedAddrs []string
}{
{ // testNum: 0
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscType,
sockaddr.AscPrivate,
sockaddr.AscAddress,
sockaddr.AscType,
sockaddr.AscAddress,
sockaddr.AscPort,
},
inputAddrs: []string{
"10.0.0.0/8",
"172.16.1.3/12",
"192.168.0.0/16",
"192.168.0.0/16",
"192.168.1.10/24",
"128.95.120.1/32",
"128.95.120.2/32",
"128.95.120.2:53",
"128.95.120.2:8600",
"240.0.0.1/4",
"::",
},
sortedAddrs: []string{
"10.0.0.0/8",
"172.16.1.3/12",
"192.168.0.0/16",
"192.168.0.0/16",
"192.168.1.10/24",
"240.0.0.1/4",
"128.95.120.1/32",
"128.95.120.2/32",
// "128.95.120.2:53",
// "128.95.120.2:8600",
// "::",
},
},
{
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscType,
sockaddr.AscPrivate,
sockaddr.AscAddress,
},
inputAddrs: []string{
"1.2.3.4:53",
"192.168.1.2",
"/tmp/foo",
"[cc::1]:8600",
"[::1]:53",
},
sortedAddrs: []string{
"/tmp/foo",
"192.168.1.2",
"1.2.3.4:53",
"[::1]:53",
"[cc::1]:8600",
},
},
{
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscType,
sockaddr.AscPrivate,
sockaddr.AscAddress,
},
inputAddrs: []string{
"/tmp/foo",
"/tmp/bar",
"1.2.3.4",
},
sortedAddrs: []string{
"/tmp/bar",
"/tmp/foo",
"1.2.3.4",
},
},
}
for idx, test := range testInputs {
t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
sortedAddrs := convertToSockAddrs(t, test.sortedAddrs)
inputAddrs := append([]string(nil), test.inputAddrs...)
shuffleStrings(inputAddrs)
inputSockAddrs := convertToSockAddrs(t, inputAddrs)
sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs)
for i, sockAddr := range sortedAddrs {
if !sockAddr.Equal(inputSockAddrs[i]) {
t.Logf("Input Addrs:\t%+v", inputAddrs)
t.Logf("Sorted Addrs:\t%+v", inputSockAddrs)
t.Logf("Expected Addrs:\t%+v", test.sortedAddrs)
t.Fatalf("[%d/%d] Sort AscType/AscAddress failed: expected %+q, received %+q", idx, i, sockAddr, inputSockAddrs[i])
}
}
})
}
}
func TestSockAddr_SockAddrs_AscPort(t *testing.T) {
testInputs := []struct {
name string
sortFuncs []sockaddr.CmpAddrFunc
inputAddrs []string
sortedAddrs []string
}{
{
name: "simple port test",
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscPort,
sockaddr.AscType,
},
inputAddrs: []string{
"1.2.3.4:53",
"/tmp/foo",
"[::1]:53",
},
sortedAddrs: []string{
"/tmp/foo",
"1.2.3.4:53",
"[::1]:53",
},
},
{
name: "simple port test",
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscPort,
sockaddr.AscType,
},
inputAddrs: []string{
"1.2.3.4:53",
"/tmp/foo",
},
sortedAddrs: []string{
"/tmp/foo",
"1.2.3.4:53",
},
},
}
for idx, test := range testInputs {
t.Run(test.name, func(t *testing.T) {
sortedAddrs := convertToSockAddrs(t, test.sortedAddrs)
inputAddrs := append([]string(nil), test.inputAddrs...)
shuffleStrings(inputAddrs)
inputSockAddrs := convertToSockAddrs(t, inputAddrs)
sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs)
for i, sockAddr := range sortedAddrs {
if !sockAddr.Equal(inputSockAddrs[i]) {
t.Logf("Input Addrs:\t%+v", inputAddrs)
t.Logf("Sorted Addrs:\t%+v", inputSockAddrs)
t.Logf("Expected Addrs:\t%+v", test.sortedAddrs)
t.Fatalf("[%d/%d] Sort AscType/AscAddress failed: expected %+q, received %+q", idx, i, sockAddr, inputSockAddrs[i])
}
}
})
}
}
func TestSockAddr_SockAddrs_AscType(t *testing.T) {
testInputs := sockAddrStringInputs{
{ // testNum: 0
sortFuncs: []sockaddr.CmpAddrFunc{
sockaddr.AscType,
},
inputAddrs: []string{
"10.0.0.0/8",
"172.16.1.3/12",
"128.95.120.2:53",
"::",
"128.95.120.2/32",
"192.168.0.0/16",
"128.95.120.1/32",
"192.168.1.10/24",
"128.95.120.2:8600",
"240.0.0.1/4",
},
sortedTypes: []sockaddr.SockAddrType{
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv4,
sockaddr.TypeIPv6,
},
},
}
for idx, test := range testInputs {
t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) {
shuffleStrings(test.inputAddrs)
inputSockAddrs := convertToSockAddrs(t, test.inputAddrs)
sortedAddrs := convertToSockAddrs(t, test.sortedAddrs)
sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs)
for i, sockAddr := range sortedAddrs {
if sockAddr.Type() != sortedAddrs[i].Type() {
t.Errorf("[%d/%d] Sort AscType failed: expected %+q, received %+q", idx, i, sortedAddrs[i], sockAddr)
}
}
})
}
}
|
package lib
var Prims = map[string]int{} // func
remove
|
package main
import (
"fmt"
"github.com/BurntSushi/bcbgo/cmd/util"
"github.com/BurntSushi/bcbgo/rmsd"
)
func init() {
u := "pdb-file chain-id start stop pdb-file chain-id start stop"
util.FlagParse(u, "")
util.AssertNArg(8)
}
func main() {
pdbf1, chain1, s1, e1 := util.Arg(0), util.Arg(1), util.Arg(2), util.Arg(3)
pdbf2, chain2, s2, e2 := util.Arg(4), util.Arg(5), util.Arg(6), util.Arg(7)
entry1 := util.PDBRead(pdbf1)
entry2 := util.PDBRead(pdbf2)
s1n, e1n := util.ParseInt(s1), util.ParseInt(e1)
s2n, e2n := util.ParseInt(s2), util.ParseInt(e2)
r, err := rmsd.PDB(entry1, chain1[0], s1n, e1n, entry2, chain2[0], s2n, e2n)
util.Assert(err)
fmt.Println(r)
}
Moved the "rmsd" package to its own repository and renamed it
to the "structure" package.
package main
import (
"fmt"
"github.com/BurntSushi/bcbgo/cmd/util"
"github.com/TuftsBCB/structure"
)
func init() {
u := "pdb-file chain-id start stop pdb-file chain-id start stop"
util.FlagParse(u, "")
util.AssertNArg(8)
}
func main() {
pdbf1, chain1, s1, e1 := util.Arg(0), util.Arg(1), util.Arg(2), util.Arg(3)
pdbf2, chain2, s2, e2 := util.Arg(4), util.Arg(5), util.Arg(6), util.Arg(7)
entry1 := util.PDBRead(pdbf1)
entry2 := util.PDBRead(pdbf2)
s1n, e1n := util.ParseInt(s1), util.ParseInt(e1)
s2n, e2n := util.ParseInt(s2), util.ParseInt(e2)
r, err := structure.PDB(
entry1, chain1[0], s1n, e1n, entry2, chain2[0], s2n, e2n)
util.Assert(err)
fmt.Println(r)
}
|
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package project
import (
"bytes"
"encoding/xml"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"sort"
"strings"
"v.io/jiri/collect"
"v.io/jiri/gitutil"
"v.io/jiri/googlesource"
"v.io/jiri/jiri"
"v.io/jiri/runutil"
"v.io/jiri/tool"
"v.io/x/lib/cmdline"
"v.io/x/lib/set"
)
var JiriProject = "release.go.jiri"
var JiriName = "jiri"
var JiriPackage = "v.io/jiri"
// CL represents a changelist.
type CL struct {
// Author identifies the author of the changelist.
Author string
// Email identifies the author's email.
Email string
// Description holds the description of the changelist.
Description string
}
// Manifest represents a setting used for updating the universe.
type Manifest struct {
Hooks []Hook `xml:"hooks>hook"`
Imports []Import `xml:"imports>import"`
FileImports []FileImport `xml:"imports>fileimport"`
Label string `xml:"label,attr,omitempty"`
Projects []Project `xml:"projects>project"`
Tools []Tool `xml:"tools>tool"`
XMLName struct{} `xml:"manifest"`
}
// ManifestFromBytes returns a manifest parsed from data, with defaults filled
// in.
func ManifestFromBytes(data []byte) (*Manifest, error) {
m := new(Manifest)
if err := xml.Unmarshal(data, m); err != nil {
return nil, err
}
if err := m.fillDefaults(); err != nil {
return nil, err
}
return m, nil
}
// ManifestFromFile returns a manifest parsed from the contents of filename,
// with defaults filled in.
func ManifestFromFile(jirix *jiri.X, filename string) (*Manifest, error) {
data, err := jirix.NewSeq().ReadFile(filename)
if err != nil {
return nil, err
}
m, err := ManifestFromBytes(data)
if err != nil {
return nil, fmt.Errorf("invalid manifest %s: %v", filename, err)
}
return m, nil
}
var (
newlineBytes = []byte("\n")
emptyHooksBytes = []byte("\n <hooks></hooks>\n")
emptyImportsBytes = []byte("\n <imports></imports>\n")
emptyProjectsBytes = []byte("\n <projects></projects>\n")
emptyToolsBytes = []byte("\n <tools></tools>\n")
endElemBytes = []byte("/>\n")
endHookBytes = []byte("></hook>\n")
endImportBytes = []byte("></import>\n")
endFileImportBytes = []byte("></fileimport>\n")
endProjectBytes = []byte("></project>\n")
endToolBytes = []byte("></tool>\n")
endImportSoloBytes = []byte("></import>")
endProjectSoloBytes = []byte("></project>")
endElemSoloBytes = []byte("/>")
)
// deepCopy returns a deep copy of Manifest.
func (m *Manifest) deepCopy() *Manifest {
x := new(Manifest)
x.Label = m.Label
// First make copies of all slices.
x.Hooks = append([]Hook(nil), m.Hooks...)
x.Imports = append([]Import(nil), m.Imports...)
x.FileImports = append([]FileImport(nil), m.FileImports...)
x.Projects = append([]Project(nil), m.Projects...)
x.Tools = append([]Tool(nil), m.Tools...)
// Now make copies of sub-slices.
for index, hook := range x.Hooks {
x.Hooks[index].Args = append([]HookArg(nil), hook.Args...)
}
return x
}
// ToBytes returns m as serialized bytes, with defaults unfilled.
func (m *Manifest) ToBytes() ([]byte, error) {
m = m.deepCopy() // avoid changing manifest when unfilling defaults.
if err := m.unfillDefaults(); err != nil {
return nil, err
}
data, err := xml.MarshalIndent(m, "", " ")
if err != nil {
return nil, fmt.Errorf("manifest xml.Marshal failed: %v", err)
}
// It's hard (impossible?) to get xml.Marshal to elide some of the empty
// elements, or produce short empty elements, so we post-process the data.
data = bytes.Replace(data, emptyHooksBytes, newlineBytes, -1)
data = bytes.Replace(data, emptyImportsBytes, newlineBytes, -1)
data = bytes.Replace(data, emptyProjectsBytes, newlineBytes, -1)
data = bytes.Replace(data, emptyToolsBytes, newlineBytes, -1)
data = bytes.Replace(data, endHookBytes, endElemBytes, -1)
data = bytes.Replace(data, endImportBytes, endElemBytes, -1)
data = bytes.Replace(data, endFileImportBytes, endElemBytes, -1)
data = bytes.Replace(data, endProjectBytes, endElemBytes, -1)
data = bytes.Replace(data, endToolBytes, endElemBytes, -1)
if !bytes.HasSuffix(data, newlineBytes) {
data = append(data, '\n')
}
return data, nil
}
func safeWriteFile(jirix *jiri.X, filename string, data []byte) error {
tmp := filename + ".tmp"
return jirix.NewSeq().
MkdirAll(filepath.Dir(filename), 0755).
WriteFile(tmp, data, 0644).
Rename(tmp, filename).
Done()
}
// ToFile writes the manifest m to a file with the given filename, with defaults
// unfilled.
func (m *Manifest) ToFile(jirix *jiri.X, filename string) error {
data, err := m.ToBytes()
if err != nil {
return err
}
return safeWriteFile(jirix, filename, data)
}
func (m *Manifest) fillDefaults() error {
for index := range m.Imports {
if err := m.Imports[index].fillDefaults(); err != nil {
return err
}
}
for index := range m.FileImports {
if err := m.FileImports[index].validate(); err != nil {
return err
}
}
for index := range m.Projects {
if err := m.Projects[index].fillDefaults(); err != nil {
return err
}
}
for index := range m.Tools {
if err := m.Tools[index].fillDefaults(); err != nil {
return err
}
}
return nil
}
func (m *Manifest) unfillDefaults() error {
for index := range m.Imports {
if err := m.Imports[index].unfillDefaults(); err != nil {
return err
}
}
for index := range m.FileImports {
if err := m.FileImports[index].validate(); err != nil {
return err
}
}
for index := range m.Projects {
if err := m.Projects[index].unfillDefaults(); err != nil {
return err
}
}
for index := range m.Tools {
if err := m.Tools[index].unfillDefaults(); err != nil {
return err
}
}
return nil
}
// Hooks maps hook names to their detailed description.
type Hooks map[string]Hook
// Hook represents a post-update project hook.
type Hook struct {
// Name is the hook name.
Name string `xml:"name,attr,omitempty"`
// Project is the name of the project the hook is associated with.
Project string `xml:"project,attr,omitempty"`
// Path is the path of the hook relative to its project's root.
Path string `xml:"path,attr,omitempty"`
// Interpreter is an optional program used to interpret the hook (i.e. python). Unlike Path,
// Interpreter is relative to the environment's PATH and not the project's root.
Interpreter string `xml:"interpreter,attr,omitempty"`
// Arguments for the hook.
Args []HookArg `xml:"arg,omitempty"`
XMLName struct{} `xml:"hook"`
}
type HookArg struct {
Arg string `xml:",chardata"`
XMLName struct{} `xml:"arg"`
}
// Import represents a remote manifest import.
type Import struct {
// Manifest file to use from the remote manifest project.
Manifest string `xml:"manifest,attr,omitempty"`
// Root path, prepended to the manifest project path, as well as all projects
// specified in the manifest file.
Root string `xml:"root,attr,omitempty"`
// Project description of the manifest repository.
Project
XMLName struct{} `xml:"import"`
}
// ToFile writes the import i to a file with the given filename, with defaults
// unfilled.
func (i Import) ToFile(jirix *jiri.X, filename string) error {
if err := i.unfillDefaults(); err != nil {
return err
}
data, err := xml.Marshal(i)
if err != nil {
return fmt.Errorf("import xml.Marshal failed: %v", err)
}
// Same logic as Manifest.ToBytes, to make the output more compact.
data = bytes.Replace(data, endImportSoloBytes, endElemSoloBytes, -1)
return safeWriteFile(jirix, filename, data)
}
func (i *Import) fillDefaults() error {
if i.Remote != "" {
if i.Path == "" {
i.Path = "manifest"
}
if err := i.Project.fillDefaults(); err != nil {
return err
}
}
return i.validate()
}
func (i *Import) unfillDefaults() error {
if i.Remote != "" {
if i.Path == "manifest" {
i.Path = ""
}
if err := i.Project.unfillDefaults(); err != nil {
return err
}
}
return i.validate()
}
func (i *Import) validate() error {
// After our transition is done, the "import" element will always denote
// remote imports, and the "remote" and "manifest" attributes will be
// required. During the transition we allow old-style local imports, which
// only set the "name" attribute.
//
// This is a bit tricky, since the "name" attribute is allowed in both old and
// new styles, but have different semantics. We distinguish between old and
// new styles based on the existence of the "remote" attribute.
oldStyle := *i
oldStyle.Name = ""
switch {
case i.Name != "" && oldStyle == Import{}:
// Only "name" is set, this is the old-style.
case i.Remote != "" && i.Manifest != "":
// At least "remote" and "manifest" are set, this is the new-style.
default:
return fmt.Errorf("bad import: neither old style (only name is set) or new style (at least remote and manifest are set): %+v", *i)
}
return nil
}
// remoteKey returns a key based on the remote and manifest, used for
// cycle-detection. It's only valid for new-style remote imports; it's empty
// for the old-style local imports.
func (i *Import) remoteKey() string {
if i.Remote == "" {
return ""
}
// We don't join the remote and manifest with a slash, since that might not be
// unique. E.g.
// remote: https://foo.com/a/b remote: https://foo.com/a
// manifest: c manifest: b/c
// In both cases, the key would be https://foo.com/a/b/c.
return i.Remote + " + " + i.Manifest
}
// FileImport represents a file-based import.
type FileImport struct {
// Manifest file to import from.
File string `xml:"file,attr,omitempty"`
XMLName struct{} `xml:"fileimport"`
}
func (i *FileImport) validate() error {
if i.File == "" {
return fmt.Errorf("bad fileimport: must specify file: %+v", *i)
}
return nil
}
// ProjectKey is a unique string for a project.
type ProjectKey string
// MakeProjectKey returns the project key, given the project name and remote.
func MakeProjectKey(name, remote string) ProjectKey {
return ProjectKey(name + projectKeySeparator + remote)
}
// projectKeySeparator is a reserved string used in ProjectKeys. It cannot
// occur in Project names.
const projectKeySeparator = "="
// ProjectKeys is a slice of ProjectKeys implementing the Sort interface.
type ProjectKeys []ProjectKey
func (pks ProjectKeys) Len() int { return len(pks) }
func (pks ProjectKeys) Less(i, j int) bool { return string(pks[i]) < string(pks[j]) }
func (pks ProjectKeys) Swap(i, j int) { pks[i], pks[j] = pks[j], pks[i] }
// Project represents a jiri project.
type Project struct {
// Name is the project name.
Name string `xml:"name,attr,omitempty"`
// Path is the path used to store the project locally. Project
// manifest uses paths that are relative to the $JIRI_ROOT
// environment variable. When a manifest is parsed (e.g. in
// RemoteProjects), the program logic converts the relative
// paths to an absolute paths, using the current value of the
// $JIRI_ROOT environment variable as a prefix.
Path string `xml:"path,attr,omitempty"`
// Protocol is the version control protocol used by the
// project. If not set, "git" is used as the default.
Protocol string `xml:"protocol,attr,omitempty"`
// Remote is the project remote.
Remote string `xml:"remote,attr,omitempty"`
// RemoteBranch is the name of the remote branch to track. It doesn't affect
// the name of the local branch that jiri maintains, which is always "master".
RemoteBranch string `xml:"remotebranch,attr,omitempty"`
// Revision is the revision the project should be advanced to
// during "jiri update". If not set, "HEAD" is used as the
// default.
Revision string `xml:"revision,attr,omitempty"`
// GerritHost is the gerrit host where project CLs will be sent.
GerritHost string `xml:"gerrithost,attr,omitempty"`
// GitHooks is a directory containing git hooks that will be installed for
// this project.
GitHooks string `xml:"githooks,attr,omitempty"`
XMLName struct{} `xml:"project"`
}
var (
startUpperProjectBytes = []byte("<Project")
startLowerProjectBytes = []byte("<project")
endUpperProjectBytes = []byte("</Project>")
endLowerProjectBytes = []byte("</project>")
)
// ProjectFromFile returns a project parsed from the contents of filename,
// with defaults filled in.
func ProjectFromFile(jirix *jiri.X, filename string) (*Project, error) {
data, err := jirix.NewSeq().ReadFile(filename)
if err != nil {
return nil, err
}
// Previous versions of the jiri tool had a bug where the project start and
// end elements were in upper-case, since the XMLName field was missing. That
// bug is now fixed, but the xml.Unmarshal call is case-sensitive, and will
// fail if it sees the upper-case version. This hack rewrites the elements to
// the lower-case version.
//
// TODO(toddw): Remove when the transition to new manifests is complete.
data = bytes.Replace(data, startUpperProjectBytes, startLowerProjectBytes, -1)
data = bytes.Replace(data, endUpperProjectBytes, endLowerProjectBytes, -1)
p := new(Project)
if err := xml.Unmarshal(data, p); err != nil {
return nil, err
}
if err := p.fillDefaults(); err != nil {
return nil, err
}
return p, nil
}
// ToFile writes the project p to a file with the given filename, with defaults
// unfilled.
func (p Project) ToFile(jirix *jiri.X, filename string) error {
if err := p.unfillDefaults(); err != nil {
return err
}
data, err := xml.Marshal(p)
if err != nil {
return fmt.Errorf("project xml.Marshal failed: %v", err)
}
// Same logic as Manifest.ToBytes, to make the output more compact.
data = bytes.Replace(data, endProjectSoloBytes, endElemSoloBytes, -1)
return safeWriteFile(jirix, filename, data)
}
// Key returns a unique ProjectKey for the project.
func (p Project) Key() ProjectKey {
return MakeProjectKey(p.Name, p.Remote)
}
func (p *Project) fillDefaults() error {
if p.Protocol == "" {
p.Protocol = "git"
}
if p.RemoteBranch == "" {
p.RemoteBranch = "master"
}
if p.Revision == "" {
p.Revision = "HEAD"
}
return p.validate()
}
func (p *Project) unfillDefaults() error {
if p.Protocol == "git" {
p.Protocol = ""
}
if p.RemoteBranch == "master" {
p.RemoteBranch = ""
}
if p.Revision == "HEAD" {
p.Revision = ""
}
return p.validate()
}
func (p *Project) validate() error {
if strings.Contains(p.Name, projectKeySeparator) {
return fmt.Errorf("bad project: name cannot contain %q: %+v", projectKeySeparator, *p)
}
if p.Protocol != "" && p.Protocol != "git" {
return fmt.Errorf("bad project: only git protocol is supported: %+v", *p)
}
return nil
}
// Projects maps ProjectKeys to Projects.
type Projects map[ProjectKey]Project
// Find returns all projects in Projects with the given key or name.
func (ps Projects) Find(keyOrName string) Projects {
projects := Projects{}
if p, ok := ps[ProjectKey(keyOrName)]; ok {
projects[ProjectKey(keyOrName)] = p
} else {
for key, p := range ps {
if keyOrName == p.Name {
projects[key] = p
}
}
}
return projects
}
// FindUnique returns the project in Projects with the given key or name, and
// returns an error if none or multiple matching projects are found.
func (ps Projects) FindUnique(keyOrName string) (Project, error) {
var p Project
projects := ps.Find(keyOrName)
if len(projects) == 0 {
return p, fmt.Errorf("no projects found with key or name %q", keyOrName)
}
if len(projects) > 1 {
return p, fmt.Errorf("multiple projects found with name %q", keyOrName)
}
// Return the only project in projects.
for _, project := range projects {
p = project
}
return p, nil
}
// Tools maps jiri tool names, to their detailed description.
type Tools map[string]Tool
// Tool represents a jiri tool.
type Tool struct {
// Data is a relative path to a directory for storing tool data
// (e.g. tool configuration files). The purpose of this field is to
// decouple the configuration of the data directory from the tool
// itself so that the location of the data directory can change
// without the need to change the tool.
Data string `xml:"data,attr,omitempty"`
// Name is the name of the tool binary.
Name string `xml:"name,attr,omitempty"`
// Package is the package path of the tool.
Package string `xml:"package,attr,omitempty"`
// Project identifies the project that contains the tool. If not
// set, "https://vanadium.googlesource.com/<JiriProject>" is
// used as the default.
Project string `xml:"project,attr,omitempty"`
XMLName struct{} `xml:"tool"`
}
func (t *Tool) fillDefaults() error {
if t.Data == "" {
t.Data = "data"
}
if t.Project == "" {
t.Project = "https://vanadium.googlesource.com/" + JiriProject
}
return nil
}
func (t *Tool) unfillDefaults() error {
if t.Data == "data" {
t.Data = ""
}
// Don't unfill the jiri project setting, since that's not meant to be
// optional.
return nil
}
// ScanMode determines whether LocalProjects should scan the local filesystem
// for projects (FullScan), or optimistically assume that the local projects
// will match those in the manifest (FastScan).
type ScanMode bool
const (
FastScan = ScanMode(false)
FullScan = ScanMode(true)
)
type UnsupportedProtocolErr string
func (e UnsupportedProtocolErr) Error() string {
return "unsupported protocol: " + string(e)
}
// Update represents an update of projects as a map from
// project names to a collections of commits.
type Update map[string][]CL
// CreateSnapshot creates a manifest that encodes the current state of
// master branches of all projects and writes this snapshot out to the
// given file.
func CreateSnapshot(jirix *jiri.X, path string) error {
jirix.TimerPush("create snapshot")
defer jirix.TimerPop()
manifest := Manifest{}
// Add all local projects to manifest.
localProjects, err := LocalProjects(jirix, FullScan)
if err != nil {
return err
}
for _, project := range localProjects {
relPath, err := filepath.Rel(jirix.Root, project.Path)
if err != nil {
return err
}
project.Path = relPath
manifest.Projects = append(manifest.Projects, project)
}
// Add all tools and hooks from the current manifest to the
// snapshot manifest.
_, tools, hooks, err := readManifest(jirix)
if err != nil {
return err
}
for _, tool := range tools {
manifest.Tools = append(manifest.Tools, tool)
}
for _, hook := range hooks {
manifest.Hooks = append(manifest.Hooks, hook)
}
return manifest.ToFile(jirix, path)
}
// CurrentManifest returns a manifest that identifies the result of
// the most recent "jiri update" invocation.
func CurrentManifest(jirix *jiri.X) (*Manifest, error) {
filename := filepath.Join(jirix.Root, ".current_manifest")
m, err := ManifestFromFile(jirix, filename)
if runutil.IsNotExist(err) {
fmt.Fprintf(jirix.Stderr(), `WARNING: Could not find %s.
The contents of this file are stored as metadata in binaries the jiri
tool builds. To fix this problem, please run "jiri update".
`, filename)
return &Manifest{}, nil
}
return m, err
}
// writeCurrentManifest writes the given manifest to a file that
// stores the result of the most recent "jiri update" invocation.
func writeCurrentManifest(jirix *jiri.X, manifest *Manifest) error {
filename := filepath.Join(jirix.Root, ".current_manifest")
return manifest.ToFile(jirix, filename)
}
// CurrentProjectKey gets the key of the current project from the current
// directory by reading the jiri project metadata located in a directory at the
// root of the current repository.
func CurrentProjectKey(jirix *jiri.X) (ProjectKey, error) {
topLevel, err := jirix.Git().TopLevel()
if err != nil {
return "", nil
}
metadataDir := filepath.Join(topLevel, jiri.ProjectMetaDir)
if _, err := jirix.NewSeq().Stat(metadataDir); err == nil {
project, err := ProjectFromFile(jirix, filepath.Join(metadataDir, jiri.ProjectMetaFile))
if err != nil {
return "", err
}
return project.Key(), nil
}
return "", nil
}
// setProjectRevisions sets the current project revision from the master for
// each project as found on the filesystem
func setProjectRevisions(jirix *jiri.X, projects Projects) (_ Projects, e error) {
for name, project := range projects {
switch project.Protocol {
case "git":
revision, err := jirix.Git(tool.RootDirOpt(project.Path)).CurrentRevisionOfBranch("master")
if err != nil {
return nil, err
}
project.Revision = revision
default:
return nil, UnsupportedProtocolErr(project.Protocol)
}
projects[name] = project
}
return projects, nil
}
// LocalProjects returns projects on the local filesystem. If all projects in
// the manifest exist locally and scanMode is set to FastScan, then only the
// projects in the manifest that exist locally will be returned. Otherwise, a
// full scan of the filesystem will take place, and all found projects will be
// returned.
func LocalProjects(jirix *jiri.X, scanMode ScanMode) (Projects, error) {
jirix.TimerPush("local projects")
defer jirix.TimerPop()
if scanMode == FastScan {
// Fast path: Full scan was not requested, and all projects in
// manifest exist on local filesystem. We just use the projects
// directly from the manifest.
manifestProjects, _, err := ReadManifest(jirix)
if err != nil {
return nil, err
}
projectsExist, err := projectsExistLocally(jirix, manifestProjects)
if err != nil {
return nil, err
}
if projectsExist {
return setProjectRevisions(jirix, manifestProjects)
}
}
// Slow path: Either full scan was requested, or projects exist in manifest
// that were not found locally. Do a recursive scan of all projects under
// JIRI_ROOT.
projects := Projects{}
jirix.TimerPush("scan fs")
err := findLocalProjects(jirix, jirix.Root, projects)
jirix.TimerPop()
if err != nil {
return nil, err
}
return setProjectRevisions(jirix, projects)
}
// projectsExistLocally returns true iff all the given projects exist on the
// local filesystem.
// Note that this may return true even if there are projects on the local
// filesystem not included in the provided projects argument.
func projectsExistLocally(jirix *jiri.X, projects Projects) (bool, error) {
jirix.TimerPush("match manifest")
defer jirix.TimerPop()
for _, p := range projects {
isLocal, err := isLocalProject(jirix, p.Path)
if err != nil {
return false, err
}
if !isLocal {
return false, nil
}
}
return true, nil
}
// PollProjects returns the set of changelists that exist remotely but not
// locally. Changes are grouped by projects and contain author identification
// and a description of their content.
func PollProjects(jirix *jiri.X, projectSet map[string]struct{}) (_ Update, e error) {
jirix.TimerPush("poll projects")
defer jirix.TimerPop()
// Switch back to current working directory when we're done.
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
// Gather local & remote project data.
localProjects, err := LocalProjects(jirix, FastScan)
if err != nil {
return nil, err
}
remoteProjects, _, _, err := readManifest(jirix)
if err != nil {
return nil, err
}
// Compute difference between local and remote.
update := Update{}
ops := computeOperations(localProjects, remoteProjects, false, "")
s := jirix.NewSeq()
for _, op := range ops {
name := op.Project().Name
// If given a project set, limit our results to those projects in the set.
if len(projectSet) > 0 {
if _, ok := projectSet[name]; !ok {
continue
}
}
// We only inspect this project if an update operation is required.
cls := []CL{}
if updateOp, ok := op.(updateOperation); ok {
switch updateOp.project.Protocol {
case "git":
// Enter project directory - this assumes absolute paths.
if err := s.Chdir(updateOp.destination).Done(); err != nil {
return nil, err
}
// Fetch the latest from origin.
if err := jirix.Git().FetchRefspec("origin", updateOp.project.RemoteBranch); err != nil {
return nil, err
}
// Collect commits visible from FETCH_HEAD that aren't visible from master.
commitsText, err := jirix.Git().Log("FETCH_HEAD", "master", "%an%n%ae%n%B")
if err != nil {
return nil, err
}
// Format those commits and add them to the results.
for _, commitText := range commitsText {
if got, want := len(commitText), 3; got < want {
return nil, fmt.Errorf("Unexpected length of %v: got %v, want at least %v", commitText, got, want)
}
cls = append(cls, CL{
Author: commitText[0],
Email: commitText[1],
Description: strings.Join(commitText[2:], "\n"),
})
}
default:
return nil, UnsupportedProtocolErr(updateOp.project.Protocol)
}
}
update[name] = cls
}
return update, nil
}
// ReadManifest retrieves and parses the manifest that determines what
// projects and tools are part of the jiri universe.
func ReadManifest(jirix *jiri.X) (Projects, Tools, error) {
p, t, _, e := readManifest(jirix)
return p, t, e
}
// getManifestRemote returns the remote url of the origin from the manifest
// repo.
// TODO(nlacasse,toddw): Once the manifest project is specified in the
// manifest, we should get the remote directly from the manifest, and not from
// the filesystem.
func getManifestRemote(jirix *jiri.X, manifestPath string) (string, error) {
var remote string
return remote, jirix.NewSeq().Pushd(manifestPath).Call(
func() (e error) {
remote, e = jirix.Git().RemoteUrl("origin")
return
}, "get manifest origin").Done()
}
func readManifest(jirix *jiri.X) (Projects, Tools, Hooks, error) {
jirix.TimerPush("read manifest")
defer jirix.TimerPop()
file, err := jirix.ResolveManifestPath(jirix.Manifest())
if err != nil {
return nil, nil, nil, err
}
var imp importer
projects, tools, hooks := Projects{}, Tools{}, Hooks{}
if err := imp.Load(jirix, jirix.Root, file, "", projects, tools, hooks); err != nil {
return nil, nil, nil, err
}
return projects, tools, hooks, nil
}
func updateManifestProjects(jirix *jiri.X) error {
jirix.TimerPush("update manifest")
defer jirix.TimerPop()
if jirix.UsingOldManifests() {
return updateManifestProjectsDeprecated(jirix)
}
// Update the repositories corresponding to all remote imports.
//
// TODO(toddw): Cache local projects in jirix, so that we don't need to
// perform multiple full scans.
localProjects, err := LocalProjects(jirix, FullScan)
if err != nil {
return err
}
file, err := jirix.ResolveManifestPath(jirix.Manifest())
if err != nil {
return err
}
var imp importer
return imp.Update(jirix, jirix.Root, file, "", localProjects)
}
func updateManifestProjectsDeprecated(jirix *jiri.X) error {
manifestPath := filepath.Join(jirix.Root, ".manifest")
manifestRemote, err := getManifestRemote(jirix, manifestPath)
if err != nil {
return err
}
project := Project{
Path: manifestPath,
Protocol: "git",
Remote: manifestRemote,
Revision: "HEAD",
RemoteBranch: "master",
}
return resetProject(jirix, project)
}
// UpdateUniverse updates all local projects and tools to match the
// remote counterparts identified by the given manifest. Optionally,
// the 'gc' flag can be used to indicate that local projects that no
// longer exist remotely should be removed.
func UpdateUniverse(jirix *jiri.X, gc bool) (e error) {
jirix.TimerPush("update universe")
defer jirix.TimerPop()
// 0. Update all manifest projects to match their remote counterparts, and
// read the manifest file.
if err := updateManifestProjects(jirix); err != nil {
return err
}
remoteProjects, remoteTools, remoteHooks, err := readManifest(jirix)
if err != nil {
return err
}
s := jirix.NewSeq()
// 1. Update all local projects to match their remote counterparts.
if err := updateProjects(jirix, remoteProjects, gc); err != nil {
return err
}
// 2. Build all tools in a temporary directory.
tmpDir, err := s.TempDir("", "tmp-jiri-tools-build")
if err != nil {
return fmt.Errorf("TempDir() failed: %v", err)
}
defer collect.Error(func() error { return s.RemoveAll(tmpDir).Done() }, &e)
if err := buildToolsFromMaster(jirix, remoteTools, tmpDir); err != nil {
return err
}
// 3. Install the tools into $JIRI_ROOT/.jiri_root/bin.
if err := InstallTools(jirix, tmpDir); err != nil {
return err
}
// 4. Run all specified hooks
return runHooks(jirix, remoteHooks)
}
// ApplyToLocalMaster applies an operation expressed as the given function to
// the local master branch of the given projects.
func ApplyToLocalMaster(jirix *jiri.X, projects Projects, fn func() error) (e error) {
cwd, err := os.Getwd()
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
s := jirix.NewSeq()
// Loop through all projects, checking out master and stashing any unstaged
// changes.
for _, project := range projects {
p := project
if err := s.Chdir(p.Path).Done(); err != nil {
return err
}
switch p.Protocol {
case "git":
branch, err := jirix.Git().CurrentBranchName()
if err != nil {
return err
}
stashed, err := jirix.Git().Stash()
if err != nil {
return err
}
if err := jirix.Git().CheckoutBranch("master"); err != nil {
return err
}
// After running the function, return to this project's directory,
// checkout the original branch, and stash pop if necessary.
defer collect.Error(func() error {
if err := s.Chdir(p.Path).Done(); err != nil {
return err
}
if err := jirix.Git().CheckoutBranch(branch); err != nil {
return err
}
if stashed {
return jirix.Git().StashPop()
}
return nil
}, &e)
default:
return UnsupportedProtocolErr(p.Protocol)
}
}
return fn()
}
// BuildTools builds the given tools and places the resulting binaries into the
// given directory.
func BuildTools(jirix *jiri.X, tools Tools, outputDir string) error {
jirix.TimerPush("build tools")
defer jirix.TimerPop()
if len(tools) == 0 {
// Nothing to do here...
return nil
}
projects, err := LocalProjects(jirix, FastScan)
if err != nil {
return err
}
toolPkgs := []string{}
workspaceSet := map[string]bool{}
for _, tool := range tools {
toolPkgs = append(toolPkgs, tool.Package)
toolProject, err := projects.FindUnique(tool.Project)
if err != nil {
return err
}
// Identify the Go workspace the tool is in. To this end we use a
// heuristic that identifies the maximal suffix of the project path
// that corresponds to a prefix of the package name.
workspace := ""
for i := 0; i < len(toolProject.Path); i++ {
if toolProject.Path[i] == filepath.Separator {
if strings.HasPrefix("src/"+tool.Package, filepath.ToSlash(toolProject.Path[i+1:])) {
workspace = toolProject.Path[:i]
break
}
}
}
if workspace == "" {
return fmt.Errorf("could not identify go workspace for tool %v", tool.Name)
}
workspaceSet[workspace] = true
}
workspaces := []string{}
for workspace := range workspaceSet {
workspaces = append(workspaces, workspace)
}
if envGoPath := os.Getenv("GOPATH"); envGoPath != "" {
workspaces = append(workspaces, strings.Split(envGoPath, string(filepath.ListSeparator))...)
}
s := jirix.NewSeq()
var stderr bytes.Buffer
// We unset GOARCH and GOOS because jiri update should always build for the
// native architecture and OS. Also, as of go1.5, setting GOBIN is not
// compatible with GOARCH or GOOS.
env := map[string]string{
"GOARCH": "",
"GOOS": "",
"GOBIN": outputDir,
"GOPATH": strings.Join(workspaces, string(filepath.ListSeparator)),
}
args := append([]string{"install"}, toolPkgs...)
if err := s.Env(env).Capture(ioutil.Discard, &stderr).Last("go", args...); err != nil {
return fmt.Errorf("tool build failed\n%v", stderr.String())
}
return nil
}
// buildToolsFromMaster builds and installs all jiri tools using the version
// available in the local master branch of the tools repository. Notably, this
// function does not perform any version control operation on the master
// branch.
func buildToolsFromMaster(jirix *jiri.X, tools Tools, outputDir string) error {
localProjects, err := LocalProjects(jirix, FastScan)
if err != nil {
return err
}
failed := false
toolsToBuild, toolProjects := Tools{}, Projects{}
toolNames := []string{} // Used for logging purposes.
for _, tool := range tools {
// Skip tools with no package specified. Besides increasing
// robustness, this step also allows us to create jiri root
// fakes without having to provide an implementation for the "jiri"
// tool, which every manifest needs to specify.
if tool.Package == "" {
continue
}
project, err := localProjects.FindUnique(tool.Project)
if err != nil {
return err
}
toolProjects[project.Key()] = project
toolsToBuild[tool.Name] = tool
toolNames = append(toolNames, tool.Name)
}
updateFn := func() error {
return ApplyToLocalMaster(jirix, toolProjects, func() error {
return BuildTools(jirix, toolsToBuild, outputDir)
})
}
// Always log the output of updateFn, irrespective of
// the value of the verbose flag.
if err := jirix.NewSeq().Verbose(true).Call(updateFn, "build tools: %v", strings.Join(toolNames, " ")).Done(); err != nil {
fmt.Fprintf(jirix.Stderr(), "%v\n", err)
failed = true
}
if failed {
return cmdline.ErrExitCode(2)
}
return nil
}
// CleanupProjects restores the given jiri projects back to their master
// branches and gets rid of all the local changes. If "cleanupBranches" is
// true, it will also delete all the non-master branches.
func CleanupProjects(jirix *jiri.X, projects Projects, cleanupBranches bool) (e error) {
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("Getwd() failed: %v", err)
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(wd).Done() }, &e)
s := jirix.NewSeq()
for _, project := range projects {
localProjectDir := project.Path
if err := s.Chdir(localProjectDir).Done(); err != nil {
return err
}
if err := resetLocalProject(jirix, cleanupBranches, project.RemoteBranch); err != nil {
return err
}
}
return nil
}
// resetLocalProject checks out the master branch, cleans up untracked files
// and uncommitted changes, and optionally deletes all the other branches.
func resetLocalProject(jirix *jiri.X, cleanupBranches bool, remoteBranch string) error {
// Check out master and clean up changes.
curBranchName, err := jirix.Git().CurrentBranchName()
if err != nil {
return err
}
if curBranchName != "master" {
if err := jirix.Git().CheckoutBranch("master", gitutil.ForceOpt(true)); err != nil {
return err
}
}
if err := jirix.Git().RemoveUntrackedFiles(); err != nil {
return err
}
// Discard any uncommitted changes.
if remoteBranch == "" {
remoteBranch = "master"
}
if err := jirix.Git().Reset("origin/" + remoteBranch); err != nil {
return err
}
// Delete all the other branches.
// At this point we should be at the master branch.
branches, _, err := jirix.Git().GetBranches()
if err != nil {
return err
}
for _, branch := range branches {
if branch == "master" {
continue
}
if cleanupBranches {
if err := jirix.Git().DeleteBranch(branch, gitutil.ForceOpt(true)); err != nil {
return nil
}
}
}
return nil
}
// isLocalProject returns true if there is a project at the given path.
func isLocalProject(jirix *jiri.X, path string) (bool, error) {
// Existence of a metadata directory is how we know we've found a
// Jiri-maintained project.
metadataDir := filepath.Join(path, jiri.ProjectMetaDir)
if _, err := jirix.NewSeq().Stat(metadataDir); err != nil {
if runutil.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
// ProjectAtPath returns a Project struct corresponding to the project at the
// path in the filesystem.
func ProjectAtPath(jirix *jiri.X, path string) (Project, error) {
metadataFile := filepath.Join(path, jiri.ProjectMetaDir, jiri.ProjectMetaFile)
project, err := ProjectFromFile(jirix, metadataFile)
if err != nil {
return Project{}, err
}
project.Path = filepath.Join(jirix.Root, project.Path)
return *project, nil
}
// findLocalProjects scans the filesystem for all projects. Note that project
// directories can be nested recursively.
func findLocalProjects(jirix *jiri.X, path string, projects Projects) error {
isLocal, err := isLocalProject(jirix, path)
if err != nil {
return err
}
if isLocal {
project, err := ProjectAtPath(jirix, path)
if err != nil {
return err
}
if path != project.Path {
return fmt.Errorf("project %v has path %v but was found in %v", project.Name, project.Path, path)
}
if p, ok := projects[project.Key()]; ok {
return fmt.Errorf("name conflict: both %v and %v contain project with key %v", p.Path, project.Path, project.Key())
}
projects[project.Key()] = project
}
// Recurse into all the sub directories.
fileInfos, err := jirix.NewSeq().ReadDir(path)
if err != nil {
return err
}
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() && !strings.HasPrefix(fileInfo.Name(), ".") {
if err := findLocalProjects(jirix, filepath.Join(path, fileInfo.Name()), projects); err != nil {
return err
}
}
}
return nil
}
// InstallTools installs the tools from the given directory into
// $JIRI_ROOT/.jiri_root/bin.
func InstallTools(jirix *jiri.X, dir string) error {
jirix.TimerPush("install tools")
defer jirix.TimerPop()
if jirix.DryRun() {
// In "dry run" mode, no binaries are built.
return nil
}
fis, err := ioutil.ReadDir(dir)
if err != nil {
return fmt.Errorf("ReadDir(%v) failed: %v", dir, err)
}
binDir := jirix.BinDir()
if err := jirix.NewSeq().MkdirAll(binDir, 0755).Done(); err != nil {
return fmt.Errorf("MkdirAll(%v) failed: %v", binDir, err)
}
failed := false
s := jirix.NewSeq()
for _, fi := range fis {
installFn := func() error {
src := filepath.Join(dir, fi.Name())
dst := filepath.Join(binDir, fi.Name())
return jirix.NewSeq().Rename(src, dst).Done()
}
if err := s.Verbose(true).Call(installFn, "install tool %q", fi.Name()).Done(); err != nil {
fmt.Fprintf(jirix.Stderr(), "%v\n", err)
failed = true
}
}
if failed {
return cmdline.ErrExitCode(2)
}
return nil
}
// TransitionBinDir handles the transition from the old location
// $JIRI_ROOT/devtools/bin to the new $JIRI_ROOT/.jiri_root/bin. In
// InstallTools above we've already installed the tools to the new location.
//
// For now we want $JIRI_ROOT/devtools/bin symlinked to the new location, so
// that users won't perceive a difference in behavior. In addition, we want to
// save the old binaries to $JIRI_ROOT/.jiri_root/bin.BACKUP the first time this
// is run. That way if we screwed something up, the user can recover their old
// binaries.
//
// TODO(toddw): Remove this logic after the transition to .jiri_root is done.
func TransitionBinDir(jirix *jiri.X) error {
s := jirix.NewSeq()
oldDir, newDir := filepath.Join(jirix.Root, "devtools", "bin"), jirix.BinDir()
switch info, err := s.Lstat(oldDir); {
case runutil.IsNotExist(err):
// Drop down to create the symlink below.
case err != nil:
return fmt.Errorf("Failed to stat old bin dir: %v", err)
case info.Mode()&os.ModeSymlink != 0:
link, err := s.Readlink(oldDir)
if err != nil {
return fmt.Errorf("Failed to read link from old bin dir: %v", err)
}
if filepath.Clean(link) == newDir {
// The old dir is already correctly symlinked to the new dir.
return nil
}
fallthrough
default:
// The old dir exists, and either it's not a symlink, or it's a symlink that
// doesn't point to the new dir. Move the old dir to the backup location.
backupDir := newDir + ".BACKUP"
switch _, err := s.Stat(backupDir); {
case runutil.IsNotExist(err):
if err := s.Rename(oldDir, backupDir).Done(); err != nil {
return fmt.Errorf("Failed to backup old bin dir %v to %v: %v", oldDir, backupDir, err)
}
// Drop down to create the symlink below.
case err != nil:
return fmt.Errorf("Failed to stat backup bin dir: %v", err)
default:
return fmt.Errorf("Backup bin dir %v already exists", backupDir)
}
}
// Create the symlink.
if err := s.MkdirAll(filepath.Dir(oldDir), 0755).Symlink(newDir, oldDir).Done(); err != nil {
return fmt.Errorf("Failed to symlink to new bin dir %v from %v: %v", newDir, oldDir, err)
}
return nil
}
// runHooks runs the specified hooks
func runHooks(jirix *jiri.X, hooks Hooks) error {
jirix.TimerPush("run hooks")
defer jirix.TimerPop()
s := jirix.NewSeq()
for _, hook := range hooks {
command := hook.Path
args := []string{}
if hook.Interpreter != "" {
command = hook.Interpreter
args = append(args, hook.Path)
}
for _, arg := range hook.Args {
args = append(args, arg.Arg)
}
if err := s.Last(command, args...); err != nil {
return fmt.Errorf("Hook %v failed: %v command: %v args: %v", hook.Name, err, command, args)
}
}
return nil
}
// resetProject advances the local master branch of the given
// project, which is expected to exist locally at project.Path.
func resetProject(jirix *jiri.X, project Project) error {
fn := func() error {
switch project.Protocol {
case "git":
if project.Remote == "" {
return fmt.Errorf("project %v does not have a remote", project.Name)
}
if err := jirix.Git().SetRemoteUrl("origin", project.Remote); err != nil {
return err
}
if err := jirix.Git().Fetch("origin"); err != nil {
return err
}
// Having a specific revision trumps everything else - once fetched,
// always reset to that revision.
if project.Revision != "" && project.Revision != "HEAD" {
return jirix.Git().Reset(project.Revision)
}
// If no revision, reset to the configured remote branch, or master
// if no remote branch.
remoteBranch := project.RemoteBranch
if remoteBranch == "" {
remoteBranch = "master"
}
return jirix.Git().Reset("origin/" + remoteBranch)
default:
return UnsupportedProtocolErr(project.Protocol)
}
}
return ApplyToLocalMaster(jirix, Projects{project.Key(): project}, fn)
}
// importer handles importing manifest files. There are two uses: Load reads
// full manifests into memory, while Update updates remote manifest projects.
type importer struct {
cycleStack []cycleInfo
}
type cycleInfo struct {
file, key string
}
// importNoCycles checks for cycles in imports. There are two types of cycles:
// file - Cycle in the paths of manifest files in the local filesystem.
// key - Cycle in the remote manifests specified by remote imports.
//
// Example of file cycles. File A imports file B, and vice versa.
// file=manifest/A file=manifest/B
// <manifest> <manifest>
// <fileimport file="B"/> <fileimport file="A"/>
// </manifest> </manifest>
//
// Example of key cycles. The key consists of "remote/manifest", e.g.
// https://vanadium.googlesource.com/manifest/v2/default
// In the example, key x/A imports y/B, and vice versa.
// key=x/A key=y/B
// <manifest> <manifest>
// <import remote="y" manifest="B"/> <import remote="x" manifest="A"/>
// </manifest> </manifest>
//
// The above examples are simple, but the general strategy is demonstrated. We
// keep a single stack for both files and keys, and push onto each stack before
// running the recursive load or update function, and pop the stack when the
// function is done. If we see a duplicate on the stack at any point, we know
// there's a cycle. Note that we know the file for both local fileimports as
// well as remote imports, but we only know the key for remote imports; the key
// for local fileimports is empty.
//
// A more complex case would involve a combination of local fileimports and
// remote imports, using the "root" attribute to change paths on the local
// filesystem. In this case the key will eventually expose the cycle.
func (imp *importer) importNoCycles(file, key string, fn func() error) error {
info := cycleInfo{file, key}
for _, c := range imp.cycleStack {
if file == c.file {
return fmt.Errorf("import cycle detected in local manifest files: %q", append(imp.cycleStack, info))
}
if key != "" && key == c.key {
return fmt.Errorf("import cycle detected in remote manifest imports: %q", append(imp.cycleStack, info))
}
}
imp.cycleStack = append(imp.cycleStack, info)
if err := fn(); err != nil {
return err
}
imp.cycleStack = imp.cycleStack[:len(imp.cycleStack)-1]
return nil
}
func (imp *importer) Load(jirix *jiri.X, root, file, key string, projects Projects, tools Tools, hooks Hooks) error {
return imp.importNoCycles(file, key, func() error {
return imp.load(jirix, root, file, projects, tools, hooks)
})
}
func (imp *importer) load(jirix *jiri.X, root, file string, projects Projects, tools Tools, hooks Hooks) error {
m, err := ManifestFromFile(jirix, file)
if err != nil {
return err
}
// Process all imports.
for _, _import := range m.Imports {
newRoot, newFile := root, ""
if _import.Remote != "" {
// New-style remote import
newRoot = filepath.Join(root, _import.Root)
newFile = filepath.Join(newRoot, _import.Path, _import.Manifest)
} else {
// Old-style name-based local import.
//
// TODO(toddw): Remove this logic when the manifest transition is done.
if newFile, err = jirix.ResolveManifestPath(_import.Name); err != nil {
return err
}
}
if err := imp.Load(jirix, newRoot, newFile, _import.remoteKey(), projects, tools, hooks); err != nil {
return err
}
}
// Process all file imports.
for _, fileImport := range m.FileImports {
newFile := filepath.Join(filepath.Dir(file), fileImport.File)
if err := imp.Load(jirix, root, newFile, "", projects, tools, hooks); err != nil {
return err
}
}
// Process all projects.
for _, project := range m.Projects {
project.Path = filepath.Join(root, project.Path)
projects[project.Key()] = project
}
// Process all tools.
for _, tool := range m.Tools {
tools[tool.Name] = tool
}
// Process all hooks.
for _, hook := range m.Hooks {
project, err := projects.FindUnique(hook.Project)
if err != nil {
return fmt.Errorf("error while finding project %q for hook %q: %v", hook.Project, hook.Name, err)
}
hook.Path = filepath.Join(project.Path, hook.Path)
hooks[hook.Name] = hook
}
return nil
}
func (imp *importer) Update(jirix *jiri.X, root, file, key string, localProjects Projects) error {
return imp.importNoCycles(file, key, func() error {
return imp.update(jirix, root, file, localProjects)
})
}
func (imp *importer) update(jirix *jiri.X, root, file string, localProjects Projects) error {
m, err := ManifestFromFile(jirix, file)
if err != nil {
return err
}
// Process all remote imports. This logic treats the remote import as a
// regular project, and runs our regular create/move/update logic on it. We
// never handle deletes here; those are handled in updateProjects.
for _, remote := range m.Imports {
if remote.Remote == "" {
// Old-style local imports handled in loop below.
continue
}
newRoot := filepath.Join(root, remote.Root)
remote.Path = filepath.Join(newRoot, remote.Path)
newFile := filepath.Join(remote.Path, remote.Manifest)
var localProject *Project
if p, ok := localProjects[remote.Project.Key()]; ok {
localProject = &p
}
// Since &remote.Project is never nil, we'll never produce a delete op.
op := computeOp(localProject, &remote.Project, false, newRoot)
if err := op.Test(jirix, newFsUpdates()); err != nil {
return err
}
updateFn := func() error { return op.Run(jirix, nil) }
if err := jirix.NewSeq().Verbose(true).Call(updateFn, "%v", op).Done(); err != nil {
fmt.Fprintf(jirix.Stderr(), "%v\n", err)
return err
}
localProjects[remote.Project.Key()] = remote.Project
if err := imp.Update(jirix, newRoot, newFile, remote.remoteKey(), localProjects); err != nil {
return err
}
}
// Process all old-style local imports.
for _, local := range m.Imports {
if local.Remote != "" {
// New-style remote imports handled in loop above.
continue
}
newFile, err := jirix.ResolveManifestPath(local.Name)
if err != nil {
return err
}
if err := imp.Update(jirix, root, newFile, "", localProjects); err != nil {
return err
}
}
// Process all file imports.
for _, fileImport := range m.FileImports {
newFile := filepath.Join(filepath.Dir(file), fileImport.File)
if err := imp.Update(jirix, root, newFile, "", localProjects); err != nil {
return err
}
}
return nil
}
// reportNonMaster checks if the given project is on master branch and
// if not, reports this fact along with information on how to update it.
func reportNonMaster(jirix *jiri.X, project Project) (e error) {
cwd, err := os.Getwd()
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
s := jirix.NewSeq()
if err := s.Chdir(project.Path).Done(); err != nil {
return err
}
switch project.Protocol {
case "git":
current, err := jirix.Git().CurrentBranchName()
if err != nil {
return err
}
if current != "master" {
line1 := fmt.Sprintf(`NOTE: "jiri update" only updates the "master" branch and the current branch is %q`, current)
line2 := fmt.Sprintf(`to update the %q branch once the master branch is updated, run "git merge master"`, current)
s.Verbose(true).Output([]string{line1, line2})
}
return nil
default:
return UnsupportedProtocolErr(project.Protocol)
}
}
// collectGoogleSourceHosts returns a slice of googlesource hosts for the given
// projects. Each host will appear once in the slice.
func collectGoogleSourceHosts(ps Projects) []string {
hostsMap := map[string]bool{}
for _, p := range ps {
if !googlesource.IsGoogleSourceRemote(p.Remote) {
continue
}
u, err := url.Parse(p.Remote)
if err != nil {
continue
}
host := u.Scheme + "://" + u.Host
hostsMap[host] = true
}
return set.StringBool.ToSlice(hostsMap)
}
// getRemoteHeadRevisions attempts to get the repo statuses from remote for HEAD
// projects so we can detect when a local project is already up-to-date.
func getRemoteHeadRevisions(jirix *jiri.X, remoteProjects Projects) {
someAtHead := false
for _, rp := range remoteProjects {
if rp.Revision == "HEAD" {
someAtHead = true
break
}
}
if !someAtHead {
return
}
gsHosts := collectGoogleSourceHosts(remoteProjects)
allRepoStatuses := googlesource.RepoStatuses{}
for _, host := range gsHosts {
repoStatuses, err := googlesource.GetRepoStatuses(jirix, host)
if err != nil {
// Log the error but don't fail.
fmt.Fprintf(jirix.Stderr(), "Error fetching repo statuses from remote: %v\n", err)
continue
}
for repo, status := range repoStatuses {
allRepoStatuses[repo] = status
}
}
for name, rp := range remoteProjects {
if rp.Revision != "HEAD" {
continue
}
status, ok := allRepoStatuses[rp.Name]
if !ok {
continue
}
masterRev, ok := status.Branches["master"]
if !ok || masterRev == "" {
continue
}
rp.Revision = masterRev
remoteProjects[name] = rp
}
}
func updateProjects(jirix *jiri.X, remoteProjects Projects, gc bool) error {
jirix.TimerPush("update projects")
defer jirix.TimerPop()
scanMode := FastScan
if gc {
scanMode = FullScan
}
localProjects, err := LocalProjects(jirix, scanMode)
if err != nil {
return err
}
getRemoteHeadRevisions(jirix, remoteProjects)
ops := computeOperations(localProjects, remoteProjects, gc, "")
updates := newFsUpdates()
for _, op := range ops {
if err := op.Test(jirix, updates); err != nil {
return err
}
}
failed := false
manifest := &Manifest{Label: jirix.Manifest()}
s := jirix.NewSeq()
for _, op := range ops {
updateFn := func() error { return op.Run(jirix, manifest) }
// Always log the output of updateFn, irrespective of
// the value of the verbose flag.
if err := s.Verbose(true).Call(updateFn, "%v", op).Done(); err != nil {
fmt.Fprintf(jirix.Stderr(), "%v\n", err)
failed = true
}
}
if failed {
return cmdline.ErrExitCode(2)
}
if err := writeCurrentManifest(jirix, manifest); err != nil {
return err
}
return nil
}
// writeMetadata stores the given project metadata in the directory
// identified by the given path.
func writeMetadata(jirix *jiri.X, project Project, dir string) (e error) {
metadataDir := filepath.Join(dir, jiri.ProjectMetaDir)
cwd, err := os.Getwd()
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
s := jirix.NewSeq()
if err := s.MkdirAll(metadataDir, os.FileMode(0755)).
Chdir(metadataDir).Done(); err != nil {
return err
}
// Replace absolute project paths with relative paths to make it
// possible to move the $JIRI_ROOT directory locally.
relPath, err := filepath.Rel(jirix.Root, project.Path)
if err != nil {
return err
}
project.Path = relPath
metadataFile := filepath.Join(metadataDir, jiri.ProjectMetaFile)
return project.ToFile(jirix, metadataFile)
}
// addProjectToManifest records the information about the given
// project in the given manifest. The function is used to create a
// manifest that records the current state of jiri projects, which
// can be used to restore this state at some later point.
//
// NOTE: The function assumes that the the given project is on a
// master branch.
func addProjectToManifest(jirix *jiri.X, manifest *Manifest, project Project) error {
if manifest == nil {
return nil
}
// If the project uses relative revision, replace it with an absolute one.
switch project.Protocol {
case "git":
if project.Revision == "HEAD" {
revision, err := jirix.Git(tool.RootDirOpt(project.Path)).CurrentRevision()
if err != nil {
return err
}
project.Revision = revision
}
default:
return UnsupportedProtocolErr(project.Protocol)
}
relPath, err := filepath.Rel(jirix.Root, project.Path)
if err != nil {
return err
}
project.Path = relPath
manifest.Projects = append(manifest.Projects, project)
return nil
}
// fsUpdates is used to track filesystem updates made by operations.
// TODO(nlacasse): Currently we only use fsUpdates to track deletions so that
// jiri can delete and create a project in the same directory in one update.
// There are lots of other cases that should be covered though, like detecting
// when two projects would be created in the same directory.
type fsUpdates struct {
deletedDirs map[string]bool
}
func newFsUpdates() *fsUpdates {
return &fsUpdates{
deletedDirs: map[string]bool{},
}
}
func (u *fsUpdates) deleteDir(dir string) {
dir = filepath.Clean(dir)
u.deletedDirs[dir] = true
}
func (u *fsUpdates) isDeleted(dir string) bool {
_, ok := u.deletedDirs[filepath.Clean(dir)]
return ok
}
type operation interface {
// Project identifies the project this operation pertains to.
Project() Project
// Run executes the operation.
Run(jirix *jiri.X, manifest *Manifest) error
// String returns a string representation of the operation.
String() string
// Test checks whether the operation would fail.
Test(jirix *jiri.X, updates *fsUpdates) error
}
// commonOperation represents a project operation.
type commonOperation struct {
// project holds information about the project such as its
// name, local path, and the protocol it uses for version
// control.
project Project
// destination is the new project path.
destination string
// source is the current project path.
source string
}
func (op commonOperation) Project() Project {
return op.project
}
// createOperation represents the creation of a project.
type createOperation struct {
commonOperation
root string
}
func (op createOperation) Run(jirix *jiri.X, manifest *Manifest) (e error) {
s := jirix.NewSeq()
path, perm := filepath.Dir(op.destination), os.FileMode(0755)
tmpDirPrefix := strings.Replace(op.Project().Name, "/", ".", -1) + "-"
// Create a temporary directory for the initial setup of the
// project to prevent an untimely termination from leaving the
// $JIRI_ROOT directory in an inconsistent state.
tmpDir, err := s.MkdirAll(path, perm).TempDir(path, tmpDirPrefix)
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().RemoveAll(tmpDir).Done() }, &e)
switch op.project.Protocol {
case "git":
if err := jirix.Git().Clone(op.project.Remote, tmpDir); err != nil {
return err
}
// Apply git hooks. We're creating this repo, so there's no danger of
// overriding existing hooks. Customizing your git hooks with jiri is a bad
// idea anyway, since jiri won't know to not delete the project when you
// switch between manifests or do a cleanup.
gitHookDir := filepath.Join(tmpDir, ".git", "hooks")
if op.project.GitHooks != "" {
// Copy the specified GitHooks directory into the project's git
// hook directory. We walk the file system, creating directories
// and copying files as we encounter them.
copyFn := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, err := filepath.Rel(op.project.GitHooks, path)
if err != nil {
return err
}
dst := filepath.Join(gitHookDir, relPath)
if info.IsDir() {
return s.MkdirAll(dst, perm).Done()
}
src, err := s.ReadFile(path)
if err != nil {
return err
}
return s.WriteFile(dst, src, perm).Done()
}
if err := filepath.Walk(filepath.Join(op.root, op.project.GitHooks), copyFn); err != nil {
return err
}
}
// Apply exclusion for /.jiri/. We're creating the repo so we can safely
// write to .git/info/exclude
excludeString := "/.jiri/\n"
excludeDir := filepath.Join(tmpDir, ".git", "info")
excludeFile := filepath.Join(excludeDir, "exclude")
if err := s.MkdirAll(excludeDir, os.FileMode(0750)).
WriteFile(excludeFile, []byte(excludeString), perm).Done(); err != nil {
return err
}
cwd, err := os.Getwd()
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
if err := s.Chdir(tmpDir).Done(); err != nil {
return err
}
if err := jirix.Git().Reset(op.project.Revision); err != nil {
return err
}
default:
return UnsupportedProtocolErr(op.project.Protocol)
}
if err := writeMetadata(jirix, op.project, tmpDir); err != nil {
return err
}
if err := s.Chmod(tmpDir, os.FileMode(0755)).
Rename(tmpDir, op.destination).Done(); err != nil {
return err
}
if err := resetProject(jirix, op.project); err != nil {
return err
}
return addProjectToManifest(jirix, manifest, op.project)
}
func (op createOperation) String() string {
return fmt.Sprintf("create project %q in %q and advance it to %q", op.project.Name, op.destination, fmtRevision(op.project.Revision))
}
func (op createOperation) Test(jirix *jiri.X, updates *fsUpdates) error {
// Check the local file system.
if _, err := jirix.NewSeq().Stat(op.destination); err != nil {
if !runutil.IsNotExist(err) {
return err
}
} else if !updates.isDeleted(op.destination) {
return fmt.Errorf("cannot create %q as it already exists", op.destination)
}
return nil
}
// deleteOperation represents the deletion of a project.
type deleteOperation struct {
commonOperation
// gc determines whether the operation should be executed or
// whether it should only print a notification.
gc bool
}
func (op deleteOperation) Run(jirix *jiri.X, _ *Manifest) error {
s := jirix.NewSeq()
if op.gc {
// Never delete projects with non-master branches, uncommitted
// work, or untracked content.
git := jirix.Git(tool.RootDirOpt(op.project.Path))
branches, _, err := git.GetBranches()
if err != nil {
return err
}
uncommitted, err := git.HasUncommittedChanges()
if err != nil {
return err
}
untracked, err := git.HasUntrackedFiles()
if err != nil {
return err
}
if len(branches) != 1 || uncommitted || untracked {
lines := []string{
fmt.Sprintf("NOTE: project %v was not found in the project manifest", op.project.Name),
"however this project either contains non-master branches, uncommitted",
"work, or untracked files and will thus not be deleted",
}
s.Verbose(true).Output(lines)
return nil
}
return s.RemoveAll(op.source).Done()
}
lines := []string{
fmt.Sprintf("NOTE: project %v was not found in the project manifest", op.project.Name),
"it was not automatically removed to avoid deleting uncommitted work",
fmt.Sprintf(`if you no longer need it, invoke "rm -rf %v"`, op.source),
`or invoke "jiri update -gc" to remove all such local projects`,
}
s.Verbose(true).Output(lines)
return nil
}
func (op deleteOperation) String() string {
return fmt.Sprintf("delete project %q from %q", op.project.Name, op.source)
}
func (op deleteOperation) Test(jirix *jiri.X, updates *fsUpdates) error {
if _, err := jirix.NewSeq().Stat(op.source); err != nil {
if runutil.IsNotExist(err) {
return fmt.Errorf("cannot delete %q as it does not exist", op.source)
}
return err
}
updates.deleteDir(op.source)
return nil
}
// moveOperation represents the relocation of a project.
type moveOperation struct {
commonOperation
}
func (op moveOperation) Run(jirix *jiri.X, manifest *Manifest) error {
s := jirix.NewSeq()
path, perm := filepath.Dir(op.destination), os.FileMode(0755)
if err := s.MkdirAll(path, perm).Rename(op.source, op.destination).Done(); err != nil {
return err
}
if err := reportNonMaster(jirix, op.project); err != nil {
return err
}
if err := resetProject(jirix, op.project); err != nil {
return err
}
if err := writeMetadata(jirix, op.project, op.project.Path); err != nil {
return err
}
return addProjectToManifest(jirix, manifest, op.project)
}
func (op moveOperation) String() string {
return fmt.Sprintf("move project %q located in %q to %q and advance it to %q", op.project.Name, op.source, op.destination, fmtRevision(op.project.Revision))
}
func (op moveOperation) Test(jirix *jiri.X, updates *fsUpdates) error {
s := jirix.NewSeq()
if _, err := s.Stat(op.source); err != nil {
if runutil.IsNotExist(err) {
return fmt.Errorf("cannot move %q to %q as the source does not exist", op.source, op.destination)
}
return err
}
if _, err := s.Stat(op.destination); err != nil {
if !runutil.IsNotExist(err) {
return err
}
} else {
return fmt.Errorf("cannot move %q to %q as the destination already exists", op.source, op.destination)
}
updates.deleteDir(op.source)
return nil
}
// updateOperation represents the update of a project.
type updateOperation struct {
commonOperation
}
func (op updateOperation) Run(jirix *jiri.X, manifest *Manifest) error {
if err := reportNonMaster(jirix, op.project); err != nil {
return err
}
if err := resetProject(jirix, op.project); err != nil {
return err
}
if err := writeMetadata(jirix, op.project, op.project.Path); err != nil {
return err
}
return addProjectToManifest(jirix, manifest, op.project)
}
func (op updateOperation) String() string {
return fmt.Sprintf("advance project %q located in %q to %q", op.project.Name, op.source, fmtRevision(op.project.Revision))
}
func (op updateOperation) Test(jirix *jiri.X, _ *fsUpdates) error {
return nil
}
// nullOperation represents a noop. It is used for logging and adding project
// information to the current manifest.
type nullOperation struct {
commonOperation
}
func (op nullOperation) Run(jirix *jiri.X, manifest *Manifest) error {
if err := writeMetadata(jirix, op.project, op.project.Path); err != nil {
return err
}
return addProjectToManifest(jirix, manifest, op.project)
}
func (op nullOperation) String() string {
return fmt.Sprintf("project %q located in %q at revision %q is up-to-date", op.project.Name, op.source, fmtRevision(op.project.Revision))
}
func (op nullOperation) Test(jirix *jiri.X, _ *fsUpdates) error {
return nil
}
// operations is a sortable collection of operations
type operations []operation
// Len returns the length of the collection.
func (ops operations) Len() int {
return len(ops)
}
// Less defines the order of operations. Operations are ordered first
// by their type and then by their project path.
//
// The order in which operation types are defined determines the order
// in which operations are performed. For correctness and also to
// minimize the chance of a conflict, the delete operations should
// happen before move operations, which should happen before create
// operations. If two create operations make nested directories, the
// outermost should be created first.
func (ops operations) Less(i, j int) bool {
vals := make([]int, 2)
for idx, op := range []operation{ops[i], ops[j]} {
switch op.(type) {
case deleteOperation:
vals[idx] = 0
case moveOperation:
vals[idx] = 1
case createOperation:
vals[idx] = 2
case updateOperation:
vals[idx] = 3
case nullOperation:
vals[idx] = 4
}
}
if vals[0] != vals[1] {
return vals[0] < vals[1]
}
return ops[i].Project().Path < ops[j].Project().Path
}
// Swap swaps two elements of the collection.
func (ops operations) Swap(i, j int) {
ops[i], ops[j] = ops[j], ops[i]
}
// computeOperations inputs a set of projects to update and the set of
// current and new projects (as defined by contents of the local file
// system and manifest file respectively) and outputs a collection of
// operations that describe the actions needed to update the target
// projects.
func computeOperations(localProjects, remoteProjects Projects, gc bool, root string) operations {
result := operations{}
allProjects := map[ProjectKey]bool{}
for _, p := range localProjects {
allProjects[p.Key()] = true
}
for _, p := range remoteProjects {
allProjects[p.Key()] = true
}
for key, _ := range allProjects {
var local, remote *Project
if project, ok := localProjects[key]; ok {
local = &project
}
if project, ok := remoteProjects[key]; ok {
remote = &project
}
result = append(result, computeOp(local, remote, gc, root))
}
sort.Sort(result)
return result
}
func computeOp(local, remote *Project, gc bool, root string) operation {
switch {
case local != nil && remote != nil:
if local.Path != remote.Path {
// moveOperation also does an update, so we don't need to check the
// revision here.
return moveOperation{commonOperation{
destination: remote.Path,
project: *remote,
source: local.Path,
}}
}
if local.Revision != remote.Revision {
return updateOperation{commonOperation{
destination: remote.Path,
project: *remote,
source: local.Path,
}}
}
return nullOperation{commonOperation{
destination: remote.Path,
project: *remote,
source: local.Path,
}}
case local != nil && remote == nil:
return deleteOperation{commonOperation{
destination: "",
project: *local,
source: local.Path,
}, gc}
case local == nil && remote != nil:
return createOperation{commonOperation{
destination: remote.Path,
project: *remote,
source: "",
}, root}
default:
panic("jiri: computeOp called with nil local and remote")
}
}
// ParseNames identifies the set of projects that a jiri command should
// be applied to.
func ParseNames(jirix *jiri.X, args []string, defaultProjects map[string]struct{}) (Projects, error) {
manifestProjects, _, err := ReadManifest(jirix)
if err != nil {
return nil, err
}
result := Projects{}
if len(args) == 0 {
// Use the default set of projects.
args = set.String.ToSlice(defaultProjects)
}
for _, name := range args {
projects := manifestProjects.Find(name)
if len(projects) == 0 {
// Issue a warning if the target project does not exist in the
// project manifest.
fmt.Fprintf(jirix.Stderr(), "project %q does not exist in the project manifest", name)
}
for _, project := range projects {
result[project.Key()] = project
}
}
return result, nil
}
// fmtRevision returns the first 8 chars of a revision hash.
func fmtRevision(r string) string {
l := 8
if len(r) < l {
return r
}
return r[:l]
}
TBR: jiri: Fix GitHooks path.
The project.GitHooks path is relative to JIRI_ROOT/<op.root>.
Fixes https://github.com/vanadium/issues/issues/1062
Change-Id: I1f2657b23efc2132195d6d787ec8543ee61f1d8f
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package project
import (
"bytes"
"encoding/xml"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"sort"
"strings"
"v.io/jiri/collect"
"v.io/jiri/gitutil"
"v.io/jiri/googlesource"
"v.io/jiri/jiri"
"v.io/jiri/runutil"
"v.io/jiri/tool"
"v.io/x/lib/cmdline"
"v.io/x/lib/set"
)
var JiriProject = "release.go.jiri"
var JiriName = "jiri"
var JiriPackage = "v.io/jiri"
// CL represents a changelist.
type CL struct {
// Author identifies the author of the changelist.
Author string
// Email identifies the author's email.
Email string
// Description holds the description of the changelist.
Description string
}
// Manifest represents a setting used for updating the universe.
type Manifest struct {
Hooks []Hook `xml:"hooks>hook"`
Imports []Import `xml:"imports>import"`
FileImports []FileImport `xml:"imports>fileimport"`
Label string `xml:"label,attr,omitempty"`
Projects []Project `xml:"projects>project"`
Tools []Tool `xml:"tools>tool"`
XMLName struct{} `xml:"manifest"`
}
// ManifestFromBytes returns a manifest parsed from data, with defaults filled
// in.
func ManifestFromBytes(data []byte) (*Manifest, error) {
m := new(Manifest)
if err := xml.Unmarshal(data, m); err != nil {
return nil, err
}
if err := m.fillDefaults(); err != nil {
return nil, err
}
return m, nil
}
// ManifestFromFile returns a manifest parsed from the contents of filename,
// with defaults filled in.
func ManifestFromFile(jirix *jiri.X, filename string) (*Manifest, error) {
data, err := jirix.NewSeq().ReadFile(filename)
if err != nil {
return nil, err
}
m, err := ManifestFromBytes(data)
if err != nil {
return nil, fmt.Errorf("invalid manifest %s: %v", filename, err)
}
return m, nil
}
var (
newlineBytes = []byte("\n")
emptyHooksBytes = []byte("\n <hooks></hooks>\n")
emptyImportsBytes = []byte("\n <imports></imports>\n")
emptyProjectsBytes = []byte("\n <projects></projects>\n")
emptyToolsBytes = []byte("\n <tools></tools>\n")
endElemBytes = []byte("/>\n")
endHookBytes = []byte("></hook>\n")
endImportBytes = []byte("></import>\n")
endFileImportBytes = []byte("></fileimport>\n")
endProjectBytes = []byte("></project>\n")
endToolBytes = []byte("></tool>\n")
endImportSoloBytes = []byte("></import>")
endProjectSoloBytes = []byte("></project>")
endElemSoloBytes = []byte("/>")
)
// deepCopy returns a deep copy of Manifest.
func (m *Manifest) deepCopy() *Manifest {
x := new(Manifest)
x.Label = m.Label
// First make copies of all slices.
x.Hooks = append([]Hook(nil), m.Hooks...)
x.Imports = append([]Import(nil), m.Imports...)
x.FileImports = append([]FileImport(nil), m.FileImports...)
x.Projects = append([]Project(nil), m.Projects...)
x.Tools = append([]Tool(nil), m.Tools...)
// Now make copies of sub-slices.
for index, hook := range x.Hooks {
x.Hooks[index].Args = append([]HookArg(nil), hook.Args...)
}
return x
}
// ToBytes returns m as serialized bytes, with defaults unfilled.
func (m *Manifest) ToBytes() ([]byte, error) {
m = m.deepCopy() // avoid changing manifest when unfilling defaults.
if err := m.unfillDefaults(); err != nil {
return nil, err
}
data, err := xml.MarshalIndent(m, "", " ")
if err != nil {
return nil, fmt.Errorf("manifest xml.Marshal failed: %v", err)
}
// It's hard (impossible?) to get xml.Marshal to elide some of the empty
// elements, or produce short empty elements, so we post-process the data.
data = bytes.Replace(data, emptyHooksBytes, newlineBytes, -1)
data = bytes.Replace(data, emptyImportsBytes, newlineBytes, -1)
data = bytes.Replace(data, emptyProjectsBytes, newlineBytes, -1)
data = bytes.Replace(data, emptyToolsBytes, newlineBytes, -1)
data = bytes.Replace(data, endHookBytes, endElemBytes, -1)
data = bytes.Replace(data, endImportBytes, endElemBytes, -1)
data = bytes.Replace(data, endFileImportBytes, endElemBytes, -1)
data = bytes.Replace(data, endProjectBytes, endElemBytes, -1)
data = bytes.Replace(data, endToolBytes, endElemBytes, -1)
if !bytes.HasSuffix(data, newlineBytes) {
data = append(data, '\n')
}
return data, nil
}
func safeWriteFile(jirix *jiri.X, filename string, data []byte) error {
tmp := filename + ".tmp"
return jirix.NewSeq().
MkdirAll(filepath.Dir(filename), 0755).
WriteFile(tmp, data, 0644).
Rename(tmp, filename).
Done()
}
// ToFile writes the manifest m to a file with the given filename, with defaults
// unfilled.
func (m *Manifest) ToFile(jirix *jiri.X, filename string) error {
data, err := m.ToBytes()
if err != nil {
return err
}
return safeWriteFile(jirix, filename, data)
}
func (m *Manifest) fillDefaults() error {
for index := range m.Imports {
if err := m.Imports[index].fillDefaults(); err != nil {
return err
}
}
for index := range m.FileImports {
if err := m.FileImports[index].validate(); err != nil {
return err
}
}
for index := range m.Projects {
if err := m.Projects[index].fillDefaults(); err != nil {
return err
}
}
for index := range m.Tools {
if err := m.Tools[index].fillDefaults(); err != nil {
return err
}
}
return nil
}
func (m *Manifest) unfillDefaults() error {
for index := range m.Imports {
if err := m.Imports[index].unfillDefaults(); err != nil {
return err
}
}
for index := range m.FileImports {
if err := m.FileImports[index].validate(); err != nil {
return err
}
}
for index := range m.Projects {
if err := m.Projects[index].unfillDefaults(); err != nil {
return err
}
}
for index := range m.Tools {
if err := m.Tools[index].unfillDefaults(); err != nil {
return err
}
}
return nil
}
// Hooks maps hook names to their detailed description.
type Hooks map[string]Hook
// Hook represents a post-update project hook.
type Hook struct {
// Name is the hook name.
Name string `xml:"name,attr,omitempty"`
// Project is the name of the project the hook is associated with.
Project string `xml:"project,attr,omitempty"`
// Path is the path of the hook relative to its project's root.
Path string `xml:"path,attr,omitempty"`
// Interpreter is an optional program used to interpret the hook (i.e. python). Unlike Path,
// Interpreter is relative to the environment's PATH and not the project's root.
Interpreter string `xml:"interpreter,attr,omitempty"`
// Arguments for the hook.
Args []HookArg `xml:"arg,omitempty"`
XMLName struct{} `xml:"hook"`
}
type HookArg struct {
Arg string `xml:",chardata"`
XMLName struct{} `xml:"arg"`
}
// Import represents a remote manifest import.
type Import struct {
// Manifest file to use from the remote manifest project.
Manifest string `xml:"manifest,attr,omitempty"`
// Root path, prepended to the manifest project path, as well as all projects
// specified in the manifest file.
Root string `xml:"root,attr,omitempty"`
// Project description of the manifest repository.
Project
XMLName struct{} `xml:"import"`
}
// ToFile writes the import i to a file with the given filename, with defaults
// unfilled.
func (i Import) ToFile(jirix *jiri.X, filename string) error {
if err := i.unfillDefaults(); err != nil {
return err
}
data, err := xml.Marshal(i)
if err != nil {
return fmt.Errorf("import xml.Marshal failed: %v", err)
}
// Same logic as Manifest.ToBytes, to make the output more compact.
data = bytes.Replace(data, endImportSoloBytes, endElemSoloBytes, -1)
return safeWriteFile(jirix, filename, data)
}
func (i *Import) fillDefaults() error {
if i.Remote != "" {
if i.Path == "" {
i.Path = "manifest"
}
if err := i.Project.fillDefaults(); err != nil {
return err
}
}
return i.validate()
}
func (i *Import) unfillDefaults() error {
if i.Remote != "" {
if i.Path == "manifest" {
i.Path = ""
}
if err := i.Project.unfillDefaults(); err != nil {
return err
}
}
return i.validate()
}
func (i *Import) validate() error {
// After our transition is done, the "import" element will always denote
// remote imports, and the "remote" and "manifest" attributes will be
// required. During the transition we allow old-style local imports, which
// only set the "name" attribute.
//
// This is a bit tricky, since the "name" attribute is allowed in both old and
// new styles, but have different semantics. We distinguish between old and
// new styles based on the existence of the "remote" attribute.
oldStyle := *i
oldStyle.Name = ""
switch {
case i.Name != "" && oldStyle == Import{}:
// Only "name" is set, this is the old-style.
case i.Remote != "" && i.Manifest != "":
// At least "remote" and "manifest" are set, this is the new-style.
default:
return fmt.Errorf("bad import: neither old style (only name is set) or new style (at least remote and manifest are set): %+v", *i)
}
return nil
}
// remoteKey returns a key based on the remote and manifest, used for
// cycle-detection. It's only valid for new-style remote imports; it's empty
// for the old-style local imports.
func (i *Import) remoteKey() string {
if i.Remote == "" {
return ""
}
// We don't join the remote and manifest with a slash, since that might not be
// unique. E.g.
// remote: https://foo.com/a/b remote: https://foo.com/a
// manifest: c manifest: b/c
// In both cases, the key would be https://foo.com/a/b/c.
return i.Remote + " + " + i.Manifest
}
// FileImport represents a file-based import.
type FileImport struct {
// Manifest file to import from.
File string `xml:"file,attr,omitempty"`
XMLName struct{} `xml:"fileimport"`
}
func (i *FileImport) validate() error {
if i.File == "" {
return fmt.Errorf("bad fileimport: must specify file: %+v", *i)
}
return nil
}
// ProjectKey is a unique string for a project.
type ProjectKey string
// MakeProjectKey returns the project key, given the project name and remote.
func MakeProjectKey(name, remote string) ProjectKey {
return ProjectKey(name + projectKeySeparator + remote)
}
// projectKeySeparator is a reserved string used in ProjectKeys. It cannot
// occur in Project names.
const projectKeySeparator = "="
// ProjectKeys is a slice of ProjectKeys implementing the Sort interface.
type ProjectKeys []ProjectKey
func (pks ProjectKeys) Len() int { return len(pks) }
func (pks ProjectKeys) Less(i, j int) bool { return string(pks[i]) < string(pks[j]) }
func (pks ProjectKeys) Swap(i, j int) { pks[i], pks[j] = pks[j], pks[i] }
// Project represents a jiri project.
type Project struct {
// Name is the project name.
Name string `xml:"name,attr,omitempty"`
// Path is the path used to store the project locally. Project
// manifest uses paths that are relative to the $JIRI_ROOT
// environment variable. When a manifest is parsed (e.g. in
// RemoteProjects), the program logic converts the relative
// paths to an absolute paths, using the current value of the
// $JIRI_ROOT environment variable as a prefix.
Path string `xml:"path,attr,omitempty"`
// Protocol is the version control protocol used by the
// project. If not set, "git" is used as the default.
Protocol string `xml:"protocol,attr,omitempty"`
// Remote is the project remote.
Remote string `xml:"remote,attr,omitempty"`
// RemoteBranch is the name of the remote branch to track. It doesn't affect
// the name of the local branch that jiri maintains, which is always "master".
RemoteBranch string `xml:"remotebranch,attr,omitempty"`
// Revision is the revision the project should be advanced to
// during "jiri update". If not set, "HEAD" is used as the
// default.
Revision string `xml:"revision,attr,omitempty"`
// GerritHost is the gerrit host where project CLs will be sent.
GerritHost string `xml:"gerrithost,attr,omitempty"`
// GitHooks is a directory containing git hooks that will be installed for
// this project.
GitHooks string `xml:"githooks,attr,omitempty"`
XMLName struct{} `xml:"project"`
}
var (
startUpperProjectBytes = []byte("<Project")
startLowerProjectBytes = []byte("<project")
endUpperProjectBytes = []byte("</Project>")
endLowerProjectBytes = []byte("</project>")
)
// ProjectFromFile returns a project parsed from the contents of filename,
// with defaults filled in.
func ProjectFromFile(jirix *jiri.X, filename string) (*Project, error) {
data, err := jirix.NewSeq().ReadFile(filename)
if err != nil {
return nil, err
}
// Previous versions of the jiri tool had a bug where the project start and
// end elements were in upper-case, since the XMLName field was missing. That
// bug is now fixed, but the xml.Unmarshal call is case-sensitive, and will
// fail if it sees the upper-case version. This hack rewrites the elements to
// the lower-case version.
//
// TODO(toddw): Remove when the transition to new manifests is complete.
data = bytes.Replace(data, startUpperProjectBytes, startLowerProjectBytes, -1)
data = bytes.Replace(data, endUpperProjectBytes, endLowerProjectBytes, -1)
p := new(Project)
if err := xml.Unmarshal(data, p); err != nil {
return nil, err
}
if err := p.fillDefaults(); err != nil {
return nil, err
}
return p, nil
}
// ToFile writes the project p to a file with the given filename, with defaults
// unfilled.
func (p Project) ToFile(jirix *jiri.X, filename string) error {
if err := p.unfillDefaults(); err != nil {
return err
}
data, err := xml.Marshal(p)
if err != nil {
return fmt.Errorf("project xml.Marshal failed: %v", err)
}
// Same logic as Manifest.ToBytes, to make the output more compact.
data = bytes.Replace(data, endProjectSoloBytes, endElemSoloBytes, -1)
return safeWriteFile(jirix, filename, data)
}
// Key returns a unique ProjectKey for the project.
func (p Project) Key() ProjectKey {
return MakeProjectKey(p.Name, p.Remote)
}
func (p *Project) fillDefaults() error {
if p.Protocol == "" {
p.Protocol = "git"
}
if p.RemoteBranch == "" {
p.RemoteBranch = "master"
}
if p.Revision == "" {
p.Revision = "HEAD"
}
return p.validate()
}
func (p *Project) unfillDefaults() error {
if p.Protocol == "git" {
p.Protocol = ""
}
if p.RemoteBranch == "master" {
p.RemoteBranch = ""
}
if p.Revision == "HEAD" {
p.Revision = ""
}
return p.validate()
}
func (p *Project) validate() error {
if strings.Contains(p.Name, projectKeySeparator) {
return fmt.Errorf("bad project: name cannot contain %q: %+v", projectKeySeparator, *p)
}
if p.Protocol != "" && p.Protocol != "git" {
return fmt.Errorf("bad project: only git protocol is supported: %+v", *p)
}
return nil
}
// Projects maps ProjectKeys to Projects.
type Projects map[ProjectKey]Project
// Find returns all projects in Projects with the given key or name.
func (ps Projects) Find(keyOrName string) Projects {
projects := Projects{}
if p, ok := ps[ProjectKey(keyOrName)]; ok {
projects[ProjectKey(keyOrName)] = p
} else {
for key, p := range ps {
if keyOrName == p.Name {
projects[key] = p
}
}
}
return projects
}
// FindUnique returns the project in Projects with the given key or name, and
// returns an error if none or multiple matching projects are found.
func (ps Projects) FindUnique(keyOrName string) (Project, error) {
var p Project
projects := ps.Find(keyOrName)
if len(projects) == 0 {
return p, fmt.Errorf("no projects found with key or name %q", keyOrName)
}
if len(projects) > 1 {
return p, fmt.Errorf("multiple projects found with name %q", keyOrName)
}
// Return the only project in projects.
for _, project := range projects {
p = project
}
return p, nil
}
// Tools maps jiri tool names, to their detailed description.
type Tools map[string]Tool
// Tool represents a jiri tool.
type Tool struct {
// Data is a relative path to a directory for storing tool data
// (e.g. tool configuration files). The purpose of this field is to
// decouple the configuration of the data directory from the tool
// itself so that the location of the data directory can change
// without the need to change the tool.
Data string `xml:"data,attr,omitempty"`
// Name is the name of the tool binary.
Name string `xml:"name,attr,omitempty"`
// Package is the package path of the tool.
Package string `xml:"package,attr,omitempty"`
// Project identifies the project that contains the tool. If not
// set, "https://vanadium.googlesource.com/<JiriProject>" is
// used as the default.
Project string `xml:"project,attr,omitempty"`
XMLName struct{} `xml:"tool"`
}
func (t *Tool) fillDefaults() error {
if t.Data == "" {
t.Data = "data"
}
if t.Project == "" {
t.Project = "https://vanadium.googlesource.com/" + JiriProject
}
return nil
}
func (t *Tool) unfillDefaults() error {
if t.Data == "data" {
t.Data = ""
}
// Don't unfill the jiri project setting, since that's not meant to be
// optional.
return nil
}
// ScanMode determines whether LocalProjects should scan the local filesystem
// for projects (FullScan), or optimistically assume that the local projects
// will match those in the manifest (FastScan).
type ScanMode bool
const (
FastScan = ScanMode(false)
FullScan = ScanMode(true)
)
type UnsupportedProtocolErr string
func (e UnsupportedProtocolErr) Error() string {
return "unsupported protocol: " + string(e)
}
// Update represents an update of projects as a map from
// project names to a collections of commits.
type Update map[string][]CL
// CreateSnapshot creates a manifest that encodes the current state of
// master branches of all projects and writes this snapshot out to the
// given file.
func CreateSnapshot(jirix *jiri.X, path string) error {
jirix.TimerPush("create snapshot")
defer jirix.TimerPop()
manifest := Manifest{}
// Add all local projects to manifest.
localProjects, err := LocalProjects(jirix, FullScan)
if err != nil {
return err
}
for _, project := range localProjects {
relPath, err := filepath.Rel(jirix.Root, project.Path)
if err != nil {
return err
}
project.Path = relPath
manifest.Projects = append(manifest.Projects, project)
}
// Add all tools and hooks from the current manifest to the
// snapshot manifest.
_, tools, hooks, err := readManifest(jirix)
if err != nil {
return err
}
for _, tool := range tools {
manifest.Tools = append(manifest.Tools, tool)
}
for _, hook := range hooks {
manifest.Hooks = append(manifest.Hooks, hook)
}
return manifest.ToFile(jirix, path)
}
// CurrentManifest returns a manifest that identifies the result of
// the most recent "jiri update" invocation.
func CurrentManifest(jirix *jiri.X) (*Manifest, error) {
filename := filepath.Join(jirix.Root, ".current_manifest")
m, err := ManifestFromFile(jirix, filename)
if runutil.IsNotExist(err) {
fmt.Fprintf(jirix.Stderr(), `WARNING: Could not find %s.
The contents of this file are stored as metadata in binaries the jiri
tool builds. To fix this problem, please run "jiri update".
`, filename)
return &Manifest{}, nil
}
return m, err
}
// writeCurrentManifest writes the given manifest to a file that
// stores the result of the most recent "jiri update" invocation.
func writeCurrentManifest(jirix *jiri.X, manifest *Manifest) error {
filename := filepath.Join(jirix.Root, ".current_manifest")
return manifest.ToFile(jirix, filename)
}
// CurrentProjectKey gets the key of the current project from the current
// directory by reading the jiri project metadata located in a directory at the
// root of the current repository.
func CurrentProjectKey(jirix *jiri.X) (ProjectKey, error) {
topLevel, err := jirix.Git().TopLevel()
if err != nil {
return "", nil
}
metadataDir := filepath.Join(topLevel, jiri.ProjectMetaDir)
if _, err := jirix.NewSeq().Stat(metadataDir); err == nil {
project, err := ProjectFromFile(jirix, filepath.Join(metadataDir, jiri.ProjectMetaFile))
if err != nil {
return "", err
}
return project.Key(), nil
}
return "", nil
}
// setProjectRevisions sets the current project revision from the master for
// each project as found on the filesystem
func setProjectRevisions(jirix *jiri.X, projects Projects) (_ Projects, e error) {
for name, project := range projects {
switch project.Protocol {
case "git":
revision, err := jirix.Git(tool.RootDirOpt(project.Path)).CurrentRevisionOfBranch("master")
if err != nil {
return nil, err
}
project.Revision = revision
default:
return nil, UnsupportedProtocolErr(project.Protocol)
}
projects[name] = project
}
return projects, nil
}
// LocalProjects returns projects on the local filesystem. If all projects in
// the manifest exist locally and scanMode is set to FastScan, then only the
// projects in the manifest that exist locally will be returned. Otherwise, a
// full scan of the filesystem will take place, and all found projects will be
// returned.
func LocalProjects(jirix *jiri.X, scanMode ScanMode) (Projects, error) {
jirix.TimerPush("local projects")
defer jirix.TimerPop()
if scanMode == FastScan {
// Fast path: Full scan was not requested, and all projects in
// manifest exist on local filesystem. We just use the projects
// directly from the manifest.
manifestProjects, _, err := ReadManifest(jirix)
if err != nil {
return nil, err
}
projectsExist, err := projectsExistLocally(jirix, manifestProjects)
if err != nil {
return nil, err
}
if projectsExist {
return setProjectRevisions(jirix, manifestProjects)
}
}
// Slow path: Either full scan was requested, or projects exist in manifest
// that were not found locally. Do a recursive scan of all projects under
// JIRI_ROOT.
projects := Projects{}
jirix.TimerPush("scan fs")
err := findLocalProjects(jirix, jirix.Root, projects)
jirix.TimerPop()
if err != nil {
return nil, err
}
return setProjectRevisions(jirix, projects)
}
// projectsExistLocally returns true iff all the given projects exist on the
// local filesystem.
// Note that this may return true even if there are projects on the local
// filesystem not included in the provided projects argument.
func projectsExistLocally(jirix *jiri.X, projects Projects) (bool, error) {
jirix.TimerPush("match manifest")
defer jirix.TimerPop()
for _, p := range projects {
isLocal, err := isLocalProject(jirix, p.Path)
if err != nil {
return false, err
}
if !isLocal {
return false, nil
}
}
return true, nil
}
// PollProjects returns the set of changelists that exist remotely but not
// locally. Changes are grouped by projects and contain author identification
// and a description of their content.
func PollProjects(jirix *jiri.X, projectSet map[string]struct{}) (_ Update, e error) {
jirix.TimerPush("poll projects")
defer jirix.TimerPop()
// Switch back to current working directory when we're done.
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
// Gather local & remote project data.
localProjects, err := LocalProjects(jirix, FastScan)
if err != nil {
return nil, err
}
remoteProjects, _, _, err := readManifest(jirix)
if err != nil {
return nil, err
}
// Compute difference between local and remote.
update := Update{}
ops := computeOperations(localProjects, remoteProjects, false, "")
s := jirix.NewSeq()
for _, op := range ops {
name := op.Project().Name
// If given a project set, limit our results to those projects in the set.
if len(projectSet) > 0 {
if _, ok := projectSet[name]; !ok {
continue
}
}
// We only inspect this project if an update operation is required.
cls := []CL{}
if updateOp, ok := op.(updateOperation); ok {
switch updateOp.project.Protocol {
case "git":
// Enter project directory - this assumes absolute paths.
if err := s.Chdir(updateOp.destination).Done(); err != nil {
return nil, err
}
// Fetch the latest from origin.
if err := jirix.Git().FetchRefspec("origin", updateOp.project.RemoteBranch); err != nil {
return nil, err
}
// Collect commits visible from FETCH_HEAD that aren't visible from master.
commitsText, err := jirix.Git().Log("FETCH_HEAD", "master", "%an%n%ae%n%B")
if err != nil {
return nil, err
}
// Format those commits and add them to the results.
for _, commitText := range commitsText {
if got, want := len(commitText), 3; got < want {
return nil, fmt.Errorf("Unexpected length of %v: got %v, want at least %v", commitText, got, want)
}
cls = append(cls, CL{
Author: commitText[0],
Email: commitText[1],
Description: strings.Join(commitText[2:], "\n"),
})
}
default:
return nil, UnsupportedProtocolErr(updateOp.project.Protocol)
}
}
update[name] = cls
}
return update, nil
}
// ReadManifest retrieves and parses the manifest that determines what
// projects and tools are part of the jiri universe.
func ReadManifest(jirix *jiri.X) (Projects, Tools, error) {
p, t, _, e := readManifest(jirix)
return p, t, e
}
// getManifestRemote returns the remote url of the origin from the manifest
// repo.
// TODO(nlacasse,toddw): Once the manifest project is specified in the
// manifest, we should get the remote directly from the manifest, and not from
// the filesystem.
func getManifestRemote(jirix *jiri.X, manifestPath string) (string, error) {
var remote string
return remote, jirix.NewSeq().Pushd(manifestPath).Call(
func() (e error) {
remote, e = jirix.Git().RemoteUrl("origin")
return
}, "get manifest origin").Done()
}
func readManifest(jirix *jiri.X) (Projects, Tools, Hooks, error) {
jirix.TimerPush("read manifest")
defer jirix.TimerPop()
file, err := jirix.ResolveManifestPath(jirix.Manifest())
if err != nil {
return nil, nil, nil, err
}
var imp importer
projects, tools, hooks := Projects{}, Tools{}, Hooks{}
if err := imp.Load(jirix, jirix.Root, file, "", projects, tools, hooks); err != nil {
return nil, nil, nil, err
}
return projects, tools, hooks, nil
}
func updateManifestProjects(jirix *jiri.X) error {
jirix.TimerPush("update manifest")
defer jirix.TimerPop()
if jirix.UsingOldManifests() {
return updateManifestProjectsDeprecated(jirix)
}
// Update the repositories corresponding to all remote imports.
//
// TODO(toddw): Cache local projects in jirix, so that we don't need to
// perform multiple full scans.
localProjects, err := LocalProjects(jirix, FullScan)
if err != nil {
return err
}
file, err := jirix.ResolveManifestPath(jirix.Manifest())
if err != nil {
return err
}
var imp importer
return imp.Update(jirix, jirix.Root, file, "", localProjects)
}
func updateManifestProjectsDeprecated(jirix *jiri.X) error {
manifestPath := filepath.Join(jirix.Root, ".manifest")
manifestRemote, err := getManifestRemote(jirix, manifestPath)
if err != nil {
return err
}
project := Project{
Path: manifestPath,
Protocol: "git",
Remote: manifestRemote,
Revision: "HEAD",
RemoteBranch: "master",
}
return resetProject(jirix, project)
}
// UpdateUniverse updates all local projects and tools to match the
// remote counterparts identified by the given manifest. Optionally,
// the 'gc' flag can be used to indicate that local projects that no
// longer exist remotely should be removed.
func UpdateUniverse(jirix *jiri.X, gc bool) (e error) {
jirix.TimerPush("update universe")
defer jirix.TimerPop()
// 0. Update all manifest projects to match their remote counterparts, and
// read the manifest file.
if err := updateManifestProjects(jirix); err != nil {
return err
}
remoteProjects, remoteTools, remoteHooks, err := readManifest(jirix)
if err != nil {
return err
}
s := jirix.NewSeq()
// 1. Update all local projects to match their remote counterparts.
if err := updateProjects(jirix, remoteProjects, gc); err != nil {
return err
}
// 2. Build all tools in a temporary directory.
tmpDir, err := s.TempDir("", "tmp-jiri-tools-build")
if err != nil {
return fmt.Errorf("TempDir() failed: %v", err)
}
defer collect.Error(func() error { return s.RemoveAll(tmpDir).Done() }, &e)
if err := buildToolsFromMaster(jirix, remoteTools, tmpDir); err != nil {
return err
}
// 3. Install the tools into $JIRI_ROOT/.jiri_root/bin.
if err := InstallTools(jirix, tmpDir); err != nil {
return err
}
// 4. Run all specified hooks
return runHooks(jirix, remoteHooks)
}
// ApplyToLocalMaster applies an operation expressed as the given function to
// the local master branch of the given projects.
func ApplyToLocalMaster(jirix *jiri.X, projects Projects, fn func() error) (e error) {
cwd, err := os.Getwd()
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
s := jirix.NewSeq()
// Loop through all projects, checking out master and stashing any unstaged
// changes.
for _, project := range projects {
p := project
if err := s.Chdir(p.Path).Done(); err != nil {
return err
}
switch p.Protocol {
case "git":
branch, err := jirix.Git().CurrentBranchName()
if err != nil {
return err
}
stashed, err := jirix.Git().Stash()
if err != nil {
return err
}
if err := jirix.Git().CheckoutBranch("master"); err != nil {
return err
}
// After running the function, return to this project's directory,
// checkout the original branch, and stash pop if necessary.
defer collect.Error(func() error {
if err := s.Chdir(p.Path).Done(); err != nil {
return err
}
if err := jirix.Git().CheckoutBranch(branch); err != nil {
return err
}
if stashed {
return jirix.Git().StashPop()
}
return nil
}, &e)
default:
return UnsupportedProtocolErr(p.Protocol)
}
}
return fn()
}
// BuildTools builds the given tools and places the resulting binaries into the
// given directory.
func BuildTools(jirix *jiri.X, tools Tools, outputDir string) error {
jirix.TimerPush("build tools")
defer jirix.TimerPop()
if len(tools) == 0 {
// Nothing to do here...
return nil
}
projects, err := LocalProjects(jirix, FastScan)
if err != nil {
return err
}
toolPkgs := []string{}
workspaceSet := map[string]bool{}
for _, tool := range tools {
toolPkgs = append(toolPkgs, tool.Package)
toolProject, err := projects.FindUnique(tool.Project)
if err != nil {
return err
}
// Identify the Go workspace the tool is in. To this end we use a
// heuristic that identifies the maximal suffix of the project path
// that corresponds to a prefix of the package name.
workspace := ""
for i := 0; i < len(toolProject.Path); i++ {
if toolProject.Path[i] == filepath.Separator {
if strings.HasPrefix("src/"+tool.Package, filepath.ToSlash(toolProject.Path[i+1:])) {
workspace = toolProject.Path[:i]
break
}
}
}
if workspace == "" {
return fmt.Errorf("could not identify go workspace for tool %v", tool.Name)
}
workspaceSet[workspace] = true
}
workspaces := []string{}
for workspace := range workspaceSet {
workspaces = append(workspaces, workspace)
}
if envGoPath := os.Getenv("GOPATH"); envGoPath != "" {
workspaces = append(workspaces, strings.Split(envGoPath, string(filepath.ListSeparator))...)
}
s := jirix.NewSeq()
var stderr bytes.Buffer
// We unset GOARCH and GOOS because jiri update should always build for the
// native architecture and OS. Also, as of go1.5, setting GOBIN is not
// compatible with GOARCH or GOOS.
env := map[string]string{
"GOARCH": "",
"GOOS": "",
"GOBIN": outputDir,
"GOPATH": strings.Join(workspaces, string(filepath.ListSeparator)),
}
args := append([]string{"install"}, toolPkgs...)
if err := s.Env(env).Capture(ioutil.Discard, &stderr).Last("go", args...); err != nil {
return fmt.Errorf("tool build failed\n%v", stderr.String())
}
return nil
}
// buildToolsFromMaster builds and installs all jiri tools using the version
// available in the local master branch of the tools repository. Notably, this
// function does not perform any version control operation on the master
// branch.
func buildToolsFromMaster(jirix *jiri.X, tools Tools, outputDir string) error {
localProjects, err := LocalProjects(jirix, FastScan)
if err != nil {
return err
}
failed := false
toolsToBuild, toolProjects := Tools{}, Projects{}
toolNames := []string{} // Used for logging purposes.
for _, tool := range tools {
// Skip tools with no package specified. Besides increasing
// robustness, this step also allows us to create jiri root
// fakes without having to provide an implementation for the "jiri"
// tool, which every manifest needs to specify.
if tool.Package == "" {
continue
}
project, err := localProjects.FindUnique(tool.Project)
if err != nil {
return err
}
toolProjects[project.Key()] = project
toolsToBuild[tool.Name] = tool
toolNames = append(toolNames, tool.Name)
}
updateFn := func() error {
return ApplyToLocalMaster(jirix, toolProjects, func() error {
return BuildTools(jirix, toolsToBuild, outputDir)
})
}
// Always log the output of updateFn, irrespective of
// the value of the verbose flag.
if err := jirix.NewSeq().Verbose(true).Call(updateFn, "build tools: %v", strings.Join(toolNames, " ")).Done(); err != nil {
fmt.Fprintf(jirix.Stderr(), "%v\n", err)
failed = true
}
if failed {
return cmdline.ErrExitCode(2)
}
return nil
}
// CleanupProjects restores the given jiri projects back to their master
// branches and gets rid of all the local changes. If "cleanupBranches" is
// true, it will also delete all the non-master branches.
func CleanupProjects(jirix *jiri.X, projects Projects, cleanupBranches bool) (e error) {
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("Getwd() failed: %v", err)
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(wd).Done() }, &e)
s := jirix.NewSeq()
for _, project := range projects {
localProjectDir := project.Path
if err := s.Chdir(localProjectDir).Done(); err != nil {
return err
}
if err := resetLocalProject(jirix, cleanupBranches, project.RemoteBranch); err != nil {
return err
}
}
return nil
}
// resetLocalProject checks out the master branch, cleans up untracked files
// and uncommitted changes, and optionally deletes all the other branches.
func resetLocalProject(jirix *jiri.X, cleanupBranches bool, remoteBranch string) error {
// Check out master and clean up changes.
curBranchName, err := jirix.Git().CurrentBranchName()
if err != nil {
return err
}
if curBranchName != "master" {
if err := jirix.Git().CheckoutBranch("master", gitutil.ForceOpt(true)); err != nil {
return err
}
}
if err := jirix.Git().RemoveUntrackedFiles(); err != nil {
return err
}
// Discard any uncommitted changes.
if remoteBranch == "" {
remoteBranch = "master"
}
if err := jirix.Git().Reset("origin/" + remoteBranch); err != nil {
return err
}
// Delete all the other branches.
// At this point we should be at the master branch.
branches, _, err := jirix.Git().GetBranches()
if err != nil {
return err
}
for _, branch := range branches {
if branch == "master" {
continue
}
if cleanupBranches {
if err := jirix.Git().DeleteBranch(branch, gitutil.ForceOpt(true)); err != nil {
return nil
}
}
}
return nil
}
// isLocalProject returns true if there is a project at the given path.
func isLocalProject(jirix *jiri.X, path string) (bool, error) {
// Existence of a metadata directory is how we know we've found a
// Jiri-maintained project.
metadataDir := filepath.Join(path, jiri.ProjectMetaDir)
if _, err := jirix.NewSeq().Stat(metadataDir); err != nil {
if runutil.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
// ProjectAtPath returns a Project struct corresponding to the project at the
// path in the filesystem.
func ProjectAtPath(jirix *jiri.X, path string) (Project, error) {
metadataFile := filepath.Join(path, jiri.ProjectMetaDir, jiri.ProjectMetaFile)
project, err := ProjectFromFile(jirix, metadataFile)
if err != nil {
return Project{}, err
}
project.Path = filepath.Join(jirix.Root, project.Path)
return *project, nil
}
// findLocalProjects scans the filesystem for all projects. Note that project
// directories can be nested recursively.
func findLocalProjects(jirix *jiri.X, path string, projects Projects) error {
isLocal, err := isLocalProject(jirix, path)
if err != nil {
return err
}
if isLocal {
project, err := ProjectAtPath(jirix, path)
if err != nil {
return err
}
if path != project.Path {
return fmt.Errorf("project %v has path %v but was found in %v", project.Name, project.Path, path)
}
if p, ok := projects[project.Key()]; ok {
return fmt.Errorf("name conflict: both %v and %v contain project with key %v", p.Path, project.Path, project.Key())
}
projects[project.Key()] = project
}
// Recurse into all the sub directories.
fileInfos, err := jirix.NewSeq().ReadDir(path)
if err != nil {
return err
}
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() && !strings.HasPrefix(fileInfo.Name(), ".") {
if err := findLocalProjects(jirix, filepath.Join(path, fileInfo.Name()), projects); err != nil {
return err
}
}
}
return nil
}
// InstallTools installs the tools from the given directory into
// $JIRI_ROOT/.jiri_root/bin.
func InstallTools(jirix *jiri.X, dir string) error {
jirix.TimerPush("install tools")
defer jirix.TimerPop()
if jirix.DryRun() {
// In "dry run" mode, no binaries are built.
return nil
}
fis, err := ioutil.ReadDir(dir)
if err != nil {
return fmt.Errorf("ReadDir(%v) failed: %v", dir, err)
}
binDir := jirix.BinDir()
if err := jirix.NewSeq().MkdirAll(binDir, 0755).Done(); err != nil {
return fmt.Errorf("MkdirAll(%v) failed: %v", binDir, err)
}
failed := false
s := jirix.NewSeq()
for _, fi := range fis {
installFn := func() error {
src := filepath.Join(dir, fi.Name())
dst := filepath.Join(binDir, fi.Name())
return jirix.NewSeq().Rename(src, dst).Done()
}
if err := s.Verbose(true).Call(installFn, "install tool %q", fi.Name()).Done(); err != nil {
fmt.Fprintf(jirix.Stderr(), "%v\n", err)
failed = true
}
}
if failed {
return cmdline.ErrExitCode(2)
}
return nil
}
// TransitionBinDir handles the transition from the old location
// $JIRI_ROOT/devtools/bin to the new $JIRI_ROOT/.jiri_root/bin. In
// InstallTools above we've already installed the tools to the new location.
//
// For now we want $JIRI_ROOT/devtools/bin symlinked to the new location, so
// that users won't perceive a difference in behavior. In addition, we want to
// save the old binaries to $JIRI_ROOT/.jiri_root/bin.BACKUP the first time this
// is run. That way if we screwed something up, the user can recover their old
// binaries.
//
// TODO(toddw): Remove this logic after the transition to .jiri_root is done.
func TransitionBinDir(jirix *jiri.X) error {
s := jirix.NewSeq()
oldDir, newDir := filepath.Join(jirix.Root, "devtools", "bin"), jirix.BinDir()
switch info, err := s.Lstat(oldDir); {
case runutil.IsNotExist(err):
// Drop down to create the symlink below.
case err != nil:
return fmt.Errorf("Failed to stat old bin dir: %v", err)
case info.Mode()&os.ModeSymlink != 0:
link, err := s.Readlink(oldDir)
if err != nil {
return fmt.Errorf("Failed to read link from old bin dir: %v", err)
}
if filepath.Clean(link) == newDir {
// The old dir is already correctly symlinked to the new dir.
return nil
}
fallthrough
default:
// The old dir exists, and either it's not a symlink, or it's a symlink that
// doesn't point to the new dir. Move the old dir to the backup location.
backupDir := newDir + ".BACKUP"
switch _, err := s.Stat(backupDir); {
case runutil.IsNotExist(err):
if err := s.Rename(oldDir, backupDir).Done(); err != nil {
return fmt.Errorf("Failed to backup old bin dir %v to %v: %v", oldDir, backupDir, err)
}
// Drop down to create the symlink below.
case err != nil:
return fmt.Errorf("Failed to stat backup bin dir: %v", err)
default:
return fmt.Errorf("Backup bin dir %v already exists", backupDir)
}
}
// Create the symlink.
if err := s.MkdirAll(filepath.Dir(oldDir), 0755).Symlink(newDir, oldDir).Done(); err != nil {
return fmt.Errorf("Failed to symlink to new bin dir %v from %v: %v", newDir, oldDir, err)
}
return nil
}
// runHooks runs the specified hooks
func runHooks(jirix *jiri.X, hooks Hooks) error {
jirix.TimerPush("run hooks")
defer jirix.TimerPop()
s := jirix.NewSeq()
for _, hook := range hooks {
command := hook.Path
args := []string{}
if hook.Interpreter != "" {
command = hook.Interpreter
args = append(args, hook.Path)
}
for _, arg := range hook.Args {
args = append(args, arg.Arg)
}
if err := s.Last(command, args...); err != nil {
return fmt.Errorf("Hook %v failed: %v command: %v args: %v", hook.Name, err, command, args)
}
}
return nil
}
// resetProject advances the local master branch of the given
// project, which is expected to exist locally at project.Path.
func resetProject(jirix *jiri.X, project Project) error {
fn := func() error {
switch project.Protocol {
case "git":
if project.Remote == "" {
return fmt.Errorf("project %v does not have a remote", project.Name)
}
if err := jirix.Git().SetRemoteUrl("origin", project.Remote); err != nil {
return err
}
if err := jirix.Git().Fetch("origin"); err != nil {
return err
}
// Having a specific revision trumps everything else - once fetched,
// always reset to that revision.
if project.Revision != "" && project.Revision != "HEAD" {
return jirix.Git().Reset(project.Revision)
}
// If no revision, reset to the configured remote branch, or master
// if no remote branch.
remoteBranch := project.RemoteBranch
if remoteBranch == "" {
remoteBranch = "master"
}
return jirix.Git().Reset("origin/" + remoteBranch)
default:
return UnsupportedProtocolErr(project.Protocol)
}
}
return ApplyToLocalMaster(jirix, Projects{project.Key(): project}, fn)
}
// importer handles importing manifest files. There are two uses: Load reads
// full manifests into memory, while Update updates remote manifest projects.
type importer struct {
cycleStack []cycleInfo
}
type cycleInfo struct {
file, key string
}
// importNoCycles checks for cycles in imports. There are two types of cycles:
// file - Cycle in the paths of manifest files in the local filesystem.
// key - Cycle in the remote manifests specified by remote imports.
//
// Example of file cycles. File A imports file B, and vice versa.
// file=manifest/A file=manifest/B
// <manifest> <manifest>
// <fileimport file="B"/> <fileimport file="A"/>
// </manifest> </manifest>
//
// Example of key cycles. The key consists of "remote/manifest", e.g.
// https://vanadium.googlesource.com/manifest/v2/default
// In the example, key x/A imports y/B, and vice versa.
// key=x/A key=y/B
// <manifest> <manifest>
// <import remote="y" manifest="B"/> <import remote="x" manifest="A"/>
// </manifest> </manifest>
//
// The above examples are simple, but the general strategy is demonstrated. We
// keep a single stack for both files and keys, and push onto each stack before
// running the recursive load or update function, and pop the stack when the
// function is done. If we see a duplicate on the stack at any point, we know
// there's a cycle. Note that we know the file for both local fileimports as
// well as remote imports, but we only know the key for remote imports; the key
// for local fileimports is empty.
//
// A more complex case would involve a combination of local fileimports and
// remote imports, using the "root" attribute to change paths on the local
// filesystem. In this case the key will eventually expose the cycle.
func (imp *importer) importNoCycles(file, key string, fn func() error) error {
info := cycleInfo{file, key}
for _, c := range imp.cycleStack {
if file == c.file {
return fmt.Errorf("import cycle detected in local manifest files: %q", append(imp.cycleStack, info))
}
if key != "" && key == c.key {
return fmt.Errorf("import cycle detected in remote manifest imports: %q", append(imp.cycleStack, info))
}
}
imp.cycleStack = append(imp.cycleStack, info)
if err := fn(); err != nil {
return err
}
imp.cycleStack = imp.cycleStack[:len(imp.cycleStack)-1]
return nil
}
func (imp *importer) Load(jirix *jiri.X, root, file, key string, projects Projects, tools Tools, hooks Hooks) error {
return imp.importNoCycles(file, key, func() error {
return imp.load(jirix, root, file, projects, tools, hooks)
})
}
func (imp *importer) load(jirix *jiri.X, root, file string, projects Projects, tools Tools, hooks Hooks) error {
m, err := ManifestFromFile(jirix, file)
if err != nil {
return err
}
// Process all imports.
for _, _import := range m.Imports {
newRoot, newFile := root, ""
if _import.Remote != "" {
// New-style remote import
newRoot = filepath.Join(root, _import.Root)
newFile = filepath.Join(newRoot, _import.Path, _import.Manifest)
} else {
// Old-style name-based local import.
//
// TODO(toddw): Remove this logic when the manifest transition is done.
if newFile, err = jirix.ResolveManifestPath(_import.Name); err != nil {
return err
}
}
if err := imp.Load(jirix, newRoot, newFile, _import.remoteKey(), projects, tools, hooks); err != nil {
return err
}
}
// Process all file imports.
for _, fileImport := range m.FileImports {
newFile := filepath.Join(filepath.Dir(file), fileImport.File)
if err := imp.Load(jirix, root, newFile, "", projects, tools, hooks); err != nil {
return err
}
}
// Process all projects.
for _, project := range m.Projects {
project.Path = filepath.Join(root, project.Path)
projects[project.Key()] = project
}
// Process all tools.
for _, tool := range m.Tools {
tools[tool.Name] = tool
}
// Process all hooks.
for _, hook := range m.Hooks {
project, err := projects.FindUnique(hook.Project)
if err != nil {
return fmt.Errorf("error while finding project %q for hook %q: %v", hook.Project, hook.Name, err)
}
hook.Path = filepath.Join(project.Path, hook.Path)
hooks[hook.Name] = hook
}
return nil
}
func (imp *importer) Update(jirix *jiri.X, root, file, key string, localProjects Projects) error {
return imp.importNoCycles(file, key, func() error {
return imp.update(jirix, root, file, localProjects)
})
}
func (imp *importer) update(jirix *jiri.X, root, file string, localProjects Projects) error {
m, err := ManifestFromFile(jirix, file)
if err != nil {
return err
}
// Process all remote imports. This logic treats the remote import as a
// regular project, and runs our regular create/move/update logic on it. We
// never handle deletes here; those are handled in updateProjects.
for _, remote := range m.Imports {
if remote.Remote == "" {
// Old-style local imports handled in loop below.
continue
}
newRoot := filepath.Join(root, remote.Root)
remote.Path = filepath.Join(newRoot, remote.Path)
newFile := filepath.Join(remote.Path, remote.Manifest)
var localProject *Project
if p, ok := localProjects[remote.Project.Key()]; ok {
localProject = &p
}
// Since &remote.Project is never nil, we'll never produce a delete op.
op := computeOp(localProject, &remote.Project, false, newRoot)
if err := op.Test(jirix, newFsUpdates()); err != nil {
return err
}
updateFn := func() error { return op.Run(jirix, nil) }
if err := jirix.NewSeq().Verbose(true).Call(updateFn, "%v", op).Done(); err != nil {
fmt.Fprintf(jirix.Stderr(), "%v\n", err)
return err
}
localProjects[remote.Project.Key()] = remote.Project
if err := imp.Update(jirix, newRoot, newFile, remote.remoteKey(), localProjects); err != nil {
return err
}
}
// Process all old-style local imports.
for _, local := range m.Imports {
if local.Remote != "" {
// New-style remote imports handled in loop above.
continue
}
newFile, err := jirix.ResolveManifestPath(local.Name)
if err != nil {
return err
}
if err := imp.Update(jirix, root, newFile, "", localProjects); err != nil {
return err
}
}
// Process all file imports.
for _, fileImport := range m.FileImports {
newFile := filepath.Join(filepath.Dir(file), fileImport.File)
if err := imp.Update(jirix, root, newFile, "", localProjects); err != nil {
return err
}
}
return nil
}
// reportNonMaster checks if the given project is on master branch and
// if not, reports this fact along with information on how to update it.
func reportNonMaster(jirix *jiri.X, project Project) (e error) {
cwd, err := os.Getwd()
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
s := jirix.NewSeq()
if err := s.Chdir(project.Path).Done(); err != nil {
return err
}
switch project.Protocol {
case "git":
current, err := jirix.Git().CurrentBranchName()
if err != nil {
return err
}
if current != "master" {
line1 := fmt.Sprintf(`NOTE: "jiri update" only updates the "master" branch and the current branch is %q`, current)
line2 := fmt.Sprintf(`to update the %q branch once the master branch is updated, run "git merge master"`, current)
s.Verbose(true).Output([]string{line1, line2})
}
return nil
default:
return UnsupportedProtocolErr(project.Protocol)
}
}
// collectGoogleSourceHosts returns a slice of googlesource hosts for the given
// projects. Each host will appear once in the slice.
func collectGoogleSourceHosts(ps Projects) []string {
hostsMap := map[string]bool{}
for _, p := range ps {
if !googlesource.IsGoogleSourceRemote(p.Remote) {
continue
}
u, err := url.Parse(p.Remote)
if err != nil {
continue
}
host := u.Scheme + "://" + u.Host
hostsMap[host] = true
}
return set.StringBool.ToSlice(hostsMap)
}
// getRemoteHeadRevisions attempts to get the repo statuses from remote for HEAD
// projects so we can detect when a local project is already up-to-date.
func getRemoteHeadRevisions(jirix *jiri.X, remoteProjects Projects) {
someAtHead := false
for _, rp := range remoteProjects {
if rp.Revision == "HEAD" {
someAtHead = true
break
}
}
if !someAtHead {
return
}
gsHosts := collectGoogleSourceHosts(remoteProjects)
allRepoStatuses := googlesource.RepoStatuses{}
for _, host := range gsHosts {
repoStatuses, err := googlesource.GetRepoStatuses(jirix, host)
if err != nil {
// Log the error but don't fail.
fmt.Fprintf(jirix.Stderr(), "Error fetching repo statuses from remote: %v\n", err)
continue
}
for repo, status := range repoStatuses {
allRepoStatuses[repo] = status
}
}
for name, rp := range remoteProjects {
if rp.Revision != "HEAD" {
continue
}
status, ok := allRepoStatuses[rp.Name]
if !ok {
continue
}
masterRev, ok := status.Branches["master"]
if !ok || masterRev == "" {
continue
}
rp.Revision = masterRev
remoteProjects[name] = rp
}
}
func updateProjects(jirix *jiri.X, remoteProjects Projects, gc bool) error {
jirix.TimerPush("update projects")
defer jirix.TimerPop()
scanMode := FastScan
if gc {
scanMode = FullScan
}
localProjects, err := LocalProjects(jirix, scanMode)
if err != nil {
return err
}
getRemoteHeadRevisions(jirix, remoteProjects)
ops := computeOperations(localProjects, remoteProjects, gc, "")
updates := newFsUpdates()
for _, op := range ops {
if err := op.Test(jirix, updates); err != nil {
return err
}
}
failed := false
manifest := &Manifest{Label: jirix.Manifest()}
s := jirix.NewSeq()
for _, op := range ops {
updateFn := func() error { return op.Run(jirix, manifest) }
// Always log the output of updateFn, irrespective of
// the value of the verbose flag.
if err := s.Verbose(true).Call(updateFn, "%v", op).Done(); err != nil {
fmt.Fprintf(jirix.Stderr(), "%v\n", err)
failed = true
}
}
if failed {
return cmdline.ErrExitCode(2)
}
if err := writeCurrentManifest(jirix, manifest); err != nil {
return err
}
return nil
}
// writeMetadata stores the given project metadata in the directory
// identified by the given path.
func writeMetadata(jirix *jiri.X, project Project, dir string) (e error) {
metadataDir := filepath.Join(dir, jiri.ProjectMetaDir)
cwd, err := os.Getwd()
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
s := jirix.NewSeq()
if err := s.MkdirAll(metadataDir, os.FileMode(0755)).
Chdir(metadataDir).Done(); err != nil {
return err
}
// Replace absolute project paths with relative paths to make it
// possible to move the $JIRI_ROOT directory locally.
relPath, err := filepath.Rel(jirix.Root, project.Path)
if err != nil {
return err
}
project.Path = relPath
metadataFile := filepath.Join(metadataDir, jiri.ProjectMetaFile)
return project.ToFile(jirix, metadataFile)
}
// addProjectToManifest records the information about the given
// project in the given manifest. The function is used to create a
// manifest that records the current state of jiri projects, which
// can be used to restore this state at some later point.
//
// NOTE: The function assumes that the the given project is on a
// master branch.
func addProjectToManifest(jirix *jiri.X, manifest *Manifest, project Project) error {
if manifest == nil {
return nil
}
// If the project uses relative revision, replace it with an absolute one.
switch project.Protocol {
case "git":
if project.Revision == "HEAD" {
revision, err := jirix.Git(tool.RootDirOpt(project.Path)).CurrentRevision()
if err != nil {
return err
}
project.Revision = revision
}
default:
return UnsupportedProtocolErr(project.Protocol)
}
relPath, err := filepath.Rel(jirix.Root, project.Path)
if err != nil {
return err
}
project.Path = relPath
manifest.Projects = append(manifest.Projects, project)
return nil
}
// fsUpdates is used to track filesystem updates made by operations.
// TODO(nlacasse): Currently we only use fsUpdates to track deletions so that
// jiri can delete and create a project in the same directory in one update.
// There are lots of other cases that should be covered though, like detecting
// when two projects would be created in the same directory.
type fsUpdates struct {
deletedDirs map[string]bool
}
func newFsUpdates() *fsUpdates {
return &fsUpdates{
deletedDirs: map[string]bool{},
}
}
func (u *fsUpdates) deleteDir(dir string) {
dir = filepath.Clean(dir)
u.deletedDirs[dir] = true
}
func (u *fsUpdates) isDeleted(dir string) bool {
_, ok := u.deletedDirs[filepath.Clean(dir)]
return ok
}
type operation interface {
// Project identifies the project this operation pertains to.
Project() Project
// Run executes the operation.
Run(jirix *jiri.X, manifest *Manifest) error
// String returns a string representation of the operation.
String() string
// Test checks whether the operation would fail.
Test(jirix *jiri.X, updates *fsUpdates) error
}
// commonOperation represents a project operation.
type commonOperation struct {
// project holds information about the project such as its
// name, local path, and the protocol it uses for version
// control.
project Project
// destination is the new project path.
destination string
// source is the current project path.
source string
}
func (op commonOperation) Project() Project {
return op.project
}
// createOperation represents the creation of a project.
type createOperation struct {
commonOperation
root string
}
func (op createOperation) Run(jirix *jiri.X, manifest *Manifest) (e error) {
s := jirix.NewSeq()
path, perm := filepath.Dir(op.destination), os.FileMode(0755)
tmpDirPrefix := strings.Replace(op.Project().Name, "/", ".", -1) + "-"
// Create a temporary directory for the initial setup of the
// project to prevent an untimely termination from leaving the
// $JIRI_ROOT directory in an inconsistent state.
tmpDir, err := s.MkdirAll(path, perm).TempDir(path, tmpDirPrefix)
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().RemoveAll(tmpDir).Done() }, &e)
switch op.project.Protocol {
case "git":
if err := jirix.Git().Clone(op.project.Remote, tmpDir); err != nil {
return err
}
// Apply git hooks. We're creating this repo, so there's no danger of
// overriding existing hooks. Customizing your git hooks with jiri is a bad
// idea anyway, since jiri won't know to not delete the project when you
// switch between manifests or do a cleanup.
gitHooksDstDir := filepath.Join(tmpDir, ".git", "hooks")
if op.project.GitHooks != "" {
gitHooksSrcDir := filepath.Join(jirix.Root, op.root, op.project.GitHooks)
// Copy the specified GitHooks directory into the project's git
// hook directory. We walk the file system, creating directories
// and copying files as we encounter them.
copyFn := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, err := filepath.Rel(gitHooksSrcDir, path)
if err != nil {
return err
}
dst := filepath.Join(gitHooksDstDir, relPath)
if info.IsDir() {
return s.MkdirAll(dst, perm).Done()
}
src, err := s.ReadFile(path)
if err != nil {
return err
}
return s.WriteFile(dst, src, perm).Done()
}
if err := filepath.Walk(gitHooksSrcDir, copyFn); err != nil {
return err
}
}
// Apply exclusion for /.jiri/. We're creating the repo so we can safely
// write to .git/info/exclude
excludeString := "/.jiri/\n"
excludeDir := filepath.Join(tmpDir, ".git", "info")
excludeFile := filepath.Join(excludeDir, "exclude")
if err := s.MkdirAll(excludeDir, os.FileMode(0750)).
WriteFile(excludeFile, []byte(excludeString), perm).Done(); err != nil {
return err
}
cwd, err := os.Getwd()
if err != nil {
return err
}
defer collect.Error(func() error { return jirix.NewSeq().Chdir(cwd).Done() }, &e)
if err := s.Chdir(tmpDir).Done(); err != nil {
return err
}
if err := jirix.Git().Reset(op.project.Revision); err != nil {
return err
}
default:
return UnsupportedProtocolErr(op.project.Protocol)
}
if err := writeMetadata(jirix, op.project, tmpDir); err != nil {
return err
}
if err := s.Chmod(tmpDir, os.FileMode(0755)).
Rename(tmpDir, op.destination).Done(); err != nil {
return err
}
if err := resetProject(jirix, op.project); err != nil {
return err
}
return addProjectToManifest(jirix, manifest, op.project)
}
func (op createOperation) String() string {
return fmt.Sprintf("create project %q in %q and advance it to %q", op.project.Name, op.destination, fmtRevision(op.project.Revision))
}
func (op createOperation) Test(jirix *jiri.X, updates *fsUpdates) error {
// Check the local file system.
if _, err := jirix.NewSeq().Stat(op.destination); err != nil {
if !runutil.IsNotExist(err) {
return err
}
} else if !updates.isDeleted(op.destination) {
return fmt.Errorf("cannot create %q as it already exists", op.destination)
}
return nil
}
// deleteOperation represents the deletion of a project.
type deleteOperation struct {
commonOperation
// gc determines whether the operation should be executed or
// whether it should only print a notification.
gc bool
}
func (op deleteOperation) Run(jirix *jiri.X, _ *Manifest) error {
s := jirix.NewSeq()
if op.gc {
// Never delete projects with non-master branches, uncommitted
// work, or untracked content.
git := jirix.Git(tool.RootDirOpt(op.project.Path))
branches, _, err := git.GetBranches()
if err != nil {
return err
}
uncommitted, err := git.HasUncommittedChanges()
if err != nil {
return err
}
untracked, err := git.HasUntrackedFiles()
if err != nil {
return err
}
if len(branches) != 1 || uncommitted || untracked {
lines := []string{
fmt.Sprintf("NOTE: project %v was not found in the project manifest", op.project.Name),
"however this project either contains non-master branches, uncommitted",
"work, or untracked files and will thus not be deleted",
}
s.Verbose(true).Output(lines)
return nil
}
return s.RemoveAll(op.source).Done()
}
lines := []string{
fmt.Sprintf("NOTE: project %v was not found in the project manifest", op.project.Name),
"it was not automatically removed to avoid deleting uncommitted work",
fmt.Sprintf(`if you no longer need it, invoke "rm -rf %v"`, op.source),
`or invoke "jiri update -gc" to remove all such local projects`,
}
s.Verbose(true).Output(lines)
return nil
}
func (op deleteOperation) String() string {
return fmt.Sprintf("delete project %q from %q", op.project.Name, op.source)
}
func (op deleteOperation) Test(jirix *jiri.X, updates *fsUpdates) error {
if _, err := jirix.NewSeq().Stat(op.source); err != nil {
if runutil.IsNotExist(err) {
return fmt.Errorf("cannot delete %q as it does not exist", op.source)
}
return err
}
updates.deleteDir(op.source)
return nil
}
// moveOperation represents the relocation of a project.
type moveOperation struct {
commonOperation
}
func (op moveOperation) Run(jirix *jiri.X, manifest *Manifest) error {
s := jirix.NewSeq()
path, perm := filepath.Dir(op.destination), os.FileMode(0755)
if err := s.MkdirAll(path, perm).Rename(op.source, op.destination).Done(); err != nil {
return err
}
if err := reportNonMaster(jirix, op.project); err != nil {
return err
}
if err := resetProject(jirix, op.project); err != nil {
return err
}
if err := writeMetadata(jirix, op.project, op.project.Path); err != nil {
return err
}
return addProjectToManifest(jirix, manifest, op.project)
}
func (op moveOperation) String() string {
return fmt.Sprintf("move project %q located in %q to %q and advance it to %q", op.project.Name, op.source, op.destination, fmtRevision(op.project.Revision))
}
func (op moveOperation) Test(jirix *jiri.X, updates *fsUpdates) error {
s := jirix.NewSeq()
if _, err := s.Stat(op.source); err != nil {
if runutil.IsNotExist(err) {
return fmt.Errorf("cannot move %q to %q as the source does not exist", op.source, op.destination)
}
return err
}
if _, err := s.Stat(op.destination); err != nil {
if !runutil.IsNotExist(err) {
return err
}
} else {
return fmt.Errorf("cannot move %q to %q as the destination already exists", op.source, op.destination)
}
updates.deleteDir(op.source)
return nil
}
// updateOperation represents the update of a project.
type updateOperation struct {
commonOperation
}
func (op updateOperation) Run(jirix *jiri.X, manifest *Manifest) error {
if err := reportNonMaster(jirix, op.project); err != nil {
return err
}
if err := resetProject(jirix, op.project); err != nil {
return err
}
if err := writeMetadata(jirix, op.project, op.project.Path); err != nil {
return err
}
return addProjectToManifest(jirix, manifest, op.project)
}
func (op updateOperation) String() string {
return fmt.Sprintf("advance project %q located in %q to %q", op.project.Name, op.source, fmtRevision(op.project.Revision))
}
func (op updateOperation) Test(jirix *jiri.X, _ *fsUpdates) error {
return nil
}
// nullOperation represents a noop. It is used for logging and adding project
// information to the current manifest.
type nullOperation struct {
commonOperation
}
func (op nullOperation) Run(jirix *jiri.X, manifest *Manifest) error {
if err := writeMetadata(jirix, op.project, op.project.Path); err != nil {
return err
}
return addProjectToManifest(jirix, manifest, op.project)
}
func (op nullOperation) String() string {
return fmt.Sprintf("project %q located in %q at revision %q is up-to-date", op.project.Name, op.source, fmtRevision(op.project.Revision))
}
func (op nullOperation) Test(jirix *jiri.X, _ *fsUpdates) error {
return nil
}
// operations is a sortable collection of operations
type operations []operation
// Len returns the length of the collection.
func (ops operations) Len() int {
return len(ops)
}
// Less defines the order of operations. Operations are ordered first
// by their type and then by their project path.
//
// The order in which operation types are defined determines the order
// in which operations are performed. For correctness and also to
// minimize the chance of a conflict, the delete operations should
// happen before move operations, which should happen before create
// operations. If two create operations make nested directories, the
// outermost should be created first.
func (ops operations) Less(i, j int) bool {
vals := make([]int, 2)
for idx, op := range []operation{ops[i], ops[j]} {
switch op.(type) {
case deleteOperation:
vals[idx] = 0
case moveOperation:
vals[idx] = 1
case createOperation:
vals[idx] = 2
case updateOperation:
vals[idx] = 3
case nullOperation:
vals[idx] = 4
}
}
if vals[0] != vals[1] {
return vals[0] < vals[1]
}
return ops[i].Project().Path < ops[j].Project().Path
}
// Swap swaps two elements of the collection.
func (ops operations) Swap(i, j int) {
ops[i], ops[j] = ops[j], ops[i]
}
// computeOperations inputs a set of projects to update and the set of
// current and new projects (as defined by contents of the local file
// system and manifest file respectively) and outputs a collection of
// operations that describe the actions needed to update the target
// projects.
func computeOperations(localProjects, remoteProjects Projects, gc bool, root string) operations {
result := operations{}
allProjects := map[ProjectKey]bool{}
for _, p := range localProjects {
allProjects[p.Key()] = true
}
for _, p := range remoteProjects {
allProjects[p.Key()] = true
}
for key, _ := range allProjects {
var local, remote *Project
if project, ok := localProjects[key]; ok {
local = &project
}
if project, ok := remoteProjects[key]; ok {
remote = &project
}
result = append(result, computeOp(local, remote, gc, root))
}
sort.Sort(result)
return result
}
func computeOp(local, remote *Project, gc bool, root string) operation {
switch {
case local != nil && remote != nil:
if local.Path != remote.Path {
// moveOperation also does an update, so we don't need to check the
// revision here.
return moveOperation{commonOperation{
destination: remote.Path,
project: *remote,
source: local.Path,
}}
}
if local.Revision != remote.Revision {
return updateOperation{commonOperation{
destination: remote.Path,
project: *remote,
source: local.Path,
}}
}
return nullOperation{commonOperation{
destination: remote.Path,
project: *remote,
source: local.Path,
}}
case local != nil && remote == nil:
return deleteOperation{commonOperation{
destination: "",
project: *local,
source: local.Path,
}, gc}
case local == nil && remote != nil:
return createOperation{commonOperation{
destination: remote.Path,
project: *remote,
source: "",
}, root}
default:
panic("jiri: computeOp called with nil local and remote")
}
}
// ParseNames identifies the set of projects that a jiri command should
// be applied to.
func ParseNames(jirix *jiri.X, args []string, defaultProjects map[string]struct{}) (Projects, error) {
manifestProjects, _, err := ReadManifest(jirix)
if err != nil {
return nil, err
}
result := Projects{}
if len(args) == 0 {
// Use the default set of projects.
args = set.String.ToSlice(defaultProjects)
}
for _, name := range args {
projects := manifestProjects.Find(name)
if len(projects) == 0 {
// Issue a warning if the target project does not exist in the
// project manifest.
fmt.Fprintf(jirix.Stderr(), "project %q does not exist in the project manifest", name)
}
for _, project := range projects {
result[project.Key()] = project
}
}
return result, nil
}
// fmtRevision returns the first 8 chars of a revision hash.
func fmtRevision(r string) string {
l := 8
if len(r) < l {
return r
}
return r[:l]
}
|
/*
A simple kafka consumer-group client
Copyright 2016 MistSys
*/
package consumer
import (
"fmt"
"log"
"sort"
"sync"
"time"
"github.com/Shopify/sarama"
)
const debug = true // set to true to see log messages
// dbgf logs a printf style message to somewhere reasonable if debug is enabled, and as efficiently as it can does nothing with any side effects if debug is disabled
func dbgf(fmt string, args ...interface{}) {
if debug {
log.Printf(fmt, args...)
}
}
// minimum kafka API version required. Use this when constructing the sarama.Client's sarama.Config.MinVersion
var MinVersion = sarama.V0_9_0_0
// Error holds the errors generated by this package
type Error struct {
Err error // underlying error
Context string // description of the context surrounding the error
Consumer Consumer // nil, or Consumer which produced the error
Topic string // "", or the topic which had the error
Partition int32 // -1, or the partition which had the error
cl *client
}
func (err *Error) Error() string {
if err.Topic != "" {
if err.Partition != -1 {
return fmt.Sprintf("consumer-group %q: Error %s, topic %q partition %d: %s", err.cl.group_name, err.Context, err.Topic, err.Partition, err.Err)
}
return fmt.Sprintf("consumer-group %q: Error %s, topic %q: %s", err.cl.group_name, err.Context, err.Topic, err.Err)
}
return fmt.Sprintf("consumer-group %q: Error %s: %s", err.cl.group_name, err.Context, err.Err)
}
// Config is the configuration of a Client. Typically you'd create a default configuration with
// NewConfig, modify any fields of interest, and pass it to NewClient. Once passed to NewClient the
// Config must not be modified. (doing so leads to data races, and may caused bugs as well).
//
// In addition to this config, consumer's code also looks at the sarama.Config of the sarama.Client
// supplied to NewClient, especially at the Consumer.Offset settings, Version, and [TODO] ChannelBufferSize.
type Config struct {
Session struct {
// The allowed session timeout for registered consumers (defaults to 30s).
// Must be within the allowed server range.
Timeout time.Duration
}
Rebalance struct {
// The allowed rebalance timeout for registered consumers (defaults to 30s).
// Must be within the allowed server range. Only functions if sarama.Config.Version >= 0.10.1
// Otherwise Session.Timeout is used for rebalancing too.
Timeout time.Duration
}
Heartbeat struct {
// Interval between each heartbeat (defaults to 3s). It should be no more
// than 1/3rd of the Group.Session.Timout setting
Interval time.Duration
}
// the partitioner used to map partitions to consumer group members (defaults to a round-robin partitioner)
Partitioner Partitioner
// The handler for sarama.ErrOffsetOutOfRange errors (defaults to sarama.OffsetNewest,nil). Implementation
// must return the new starting offset in the partition, or an error. The sarama.Client is included for
// convenience, since handling this might involve querying the partition's current offsets.
OffsetOutOfRange func(topic string, partition int32, client sarama.Client) (offset int64, err error)
}
// default implementation of Config.Offsets.OffsetOutOfRange jumps to the current head of the partition.
func DefaultOffsetOutOfRange(topic string, partition int32, client sarama.Client) (offset int64, err error) {
offset = sarama.OffsetNewest
return
}
// NewConfig constructs a default configuration.
func NewConfig() *Config {
cfg := &Config{}
cfg.Session.Timeout = 30 * time.Second
cfg.Rebalance.Timeout = 30 * time.Second
cfg.Heartbeat.Interval = 3 * time.Second
cfg.Partitioner = RoundRobin
cfg.OffsetOutOfRange = DefaultOffsetOutOfRange
return cfg
}
/*
NewClient creates a new consumer group client on top of an existing
sarama.Client.
After this call the contents of config should be treated as read-only.
config can be nil if the defaults are acceptable.
The consumer group name is used to match this client with other
instances running elsewhere, but connected to the same cluster
of kafka brokers and using the same consumer group name.
The supplied sarama.Client should have been constructed with a sarama.Config
where sarama.Config.Version is >= consumer.MinVersion, and if full handling of
ErrOffsetOutOfRange is desired, sarama.Config.Consumer.Return.Errors = true.
In addition, this package uses the settings in sarama.Config.Consumer.Offset
*/
func NewClient(group_name string, config *Config, sarama_client sarama.Client) (Client, error) {
cl := &client{
client: sarama_client,
config: config,
group_name: group_name,
errors: make(chan error),
closed: make(chan struct{}),
add_consumer: make(chan add_consumer),
rem_consumer: make(chan *consumer),
}
// start the client's manager goroutine
rc := make(chan error)
cl.wg.Add(1)
go cl.run(rc)
return cl, <-rc
}
/*
Client is a kafaka client belonging to a consumer group. It is created by NewClient.
*/
type Client interface {
// Consume returns a consumer of the given topic
Consume(topic string) (Consumer, error)
// Close closes the client. It must be called to shutdown
// the client. It calls AsyncClose on any yet unclosed topic
// Consumers created by this Client.
// It does NOT close the inner sarama.Client.
// Calling twice is NOT supported.
Close()
// Errors returns a channel which can (should) be monitored
// for errors. callers should probably log or otherwise report
// the returned errors. The channel closes when the client
// is closed.
Errors() <-chan error
// TODO have a Status() method for debug/logging? Or is Errors() enough?
}
/*
Consumer is a consumer of a topic.
Messages from any partition assigned to this client arrive on the
channel returned by Messages.
Every message read from the Messages channel must be eventually passed
to Done. Calling Done is the signal that that message has been consumed
and the offset of that message can be committed back to kafka.
Of course this requires that the message's Partition and Offset fields not
be altered. Then again for what possible reason would you do such a thing?
*/
type Consumer interface {
// Messages returns the channel of messages arriving from kafka. It always
// returns the same result, so it is safe to call once and store the result.
// Every message read from the channel should be passed to Done when processing
// of the message is complete.
Messages() <-chan *sarama.ConsumerMessage
// Done indicates the processing of the message is complete, and its offset can
// be committed to kafka. Calling Done twice with the same message, or with a
// garbage message, can cause trouble.
Done(*sarama.ConsumerMessage)
// AsyncClose terminates the consumer cleanly. Callers can continue to read from
// Messages channel until it is closed, or not, as they wish.
// Calling Client.Close() performs a AsyncClose() on any remaining consumers.
// Calling AsyncClose multiple times is permitted. Only the first call has any effect.
// Never calling AsyncClose is also permitted. Client.Close() implies Consumer.AsyncClose.
AsyncClose()
}
/*
Partitioner maps partitions to consumer group members.
When the user wants control over the partitioning they should set
Config.Partitioner to their implementation of Partitioner.
*/
type Partitioner interface {
// PrepareJoin prepares a JoinGroupRequest given the topics supplied.
// The simplest implementation would be something like
// join_req.AddGroupProtocolMetadata("<partitioner name>", &sarama.ConsumerGroupMemberMetadata{ Version: 1, Topics: topics, })
PrepareJoin(join_req *sarama.JoinGroupRequest, topics []string)
// Partition performs the partitioning. Given the requested
// memberships from the JoinGroupResponse, it adds the results
// to the SyncGroupRequest. Returning an error cancels everything.
// The sarama.Client supplied to NewClient is included for convenince,
// since performing the partitioning probably requires looking at each
// topic's metadata, especially its list of partitions.
Partition(*sarama.SyncGroupRequest, *sarama.JoinGroupResponse, sarama.Client) error
// ParseSync parses the SyncGroupResponse and returns the map of topics
// to partitions assigned to this client, or an error if the information
// is not parsable.
ParseSync(*sarama.SyncGroupResponse) (map[string][]int32, error)
}
// client implements the Client interface
type client struct {
client sarama.Client // the sarama client we were constructed from
config *Config // our configuration (read-only)
group_name string // the client-group name
errors chan error // channel over which asynchronous errors are reported
closed chan struct{} // channel which is closed to cause the client to shutdown
wg sync.WaitGroup // waitgroup which is done when the client is shutdown
add_consumer chan add_consumer // command channel used to add a new consumer
rem_consumer chan *consumer // command channel used to remove an existing consumer
}
// Errors returns the channel over which asynchronous errors are observed.
func (cl *client) Errors() <-chan error { return cl.errors }
// add_consumer are the messages sent over the client.add_consumer channel
type add_consumer struct {
con *consumer
reply chan<- error
}
func (cl *client) Consume(topic string) (Consumer, error) {
sarama_consumer, err := sarama.NewConsumerFromClient(cl.client)
if err != nil {
return nil, cl.makeError("Consume sarama.NewConsumerFromClient", err)
}
con := &consumer{
cl: cl,
consumer: sarama_consumer,
topic: topic,
messages: make(chan *sarama.ConsumerMessage),
closed: make(chan struct{}),
assignments: make(chan *assignment, 1),
commit_reqs: make(chan commit_req),
restart_partitions: make(chan *partition),
premessages: make(chan *sarama.ConsumerMessage), // TODO give ourselves some capacity once I know it runs right without any (capacity hides bugs :-)
done: make(chan *sarama.ConsumerMessage), // we should probably use sarama.Config.ChannelBufferSize for our channels
}
reply := make(chan error)
cl.add_consumer <- add_consumer{con, reply}
err = <-reply
if err != nil {
// if an error is returned then it is up to us to close the sarama.Consumer
_ = sarama_consumer.Close() // we already have an error to return. a 2nd one is too much
return nil, err
}
return con, nil
}
// Close shutsdown the client and any remaining Consumers.
func (cl *client) Close() {
// signal to cl.run() that it should exit
dbgf("Close client of consumer-group %q", cl.group_name)
close(cl.closed)
// and wait for the shutdown to be complete
cl.wg.Wait()
}
// run is a long lived goroutine which manages this client's membership in the consumer group.
func (cl *client) run(early_rc chan<- error) {
defer cl.wg.Done()
var member_id string // our group member id, assigned to us by kafka when we first make contact
consumers := make(map[string]*consumer) // map of topic -> consumer
var wg sync.WaitGroup // waitgroup used to wait for all consumers to exit
defer dbgf("consumer-group %q client exiting", cl.group_name)
// add a consumer
add := func(add add_consumer) {
dbgf("client.run add(topic %q)", add.con.topic)
if _, ok := consumers[add.con.topic]; ok {
// topic already is being consumed. the way the standard kafka 0.9 group coordination works you cannot consume twice with the
// same client. If you want to consume the same topic twice, use two Clients.
add.reply <- cl.makeError("Consume", fmt.Errorf("topic %q is already being consumed", add.con.topic))
return
}
consumers[add.con.topic] = add.con
wg.Add(1)
go add.con.run(&wg)
add.reply <- nil
}
// remove a consumer
rem := func(con *consumer) {
dbgf("client.run rem(topic %q)", con.topic)
existing_con := consumers[con.topic]
if existing_con == con {
delete(consumers, con.topic)
} // else it's some old consumer and we've already removed it
// and let the consumer shutdown
close(con.assignments)
close(con.commit_reqs)
}
// shutdown the consumers. waits until they are all stopped. only call once and return afterwards, since it makes assumptions that hold only when it is used like that
shutdown := func() {
dbgf("client.run shutdown")
// shutdown the remaining consumers
for _, con := range consumers {
con.AsyncClose()
}
// and consume any last rem_consumer messages from them
go func() {
wg.Wait()
close(cl.rem_consumer)
}()
for con := range cl.rem_consumer {
rem(con)
}
// and shutdown the errors channel
close(cl.errors)
}
pause := false
refresh := false
// loop rejoining the group each time the group reforms
join_loop:
for {
if pause {
// pause before continuing, so we don't fail continuously too fast
pause = false
timeout := time.After(time.Second) // TODO should we increase timeouts?
pause_loop:
for {
select {
case <-timeout:
break pause_loop
case <-cl.closed:
// shutdown the remaining consumers
shutdown()
return
case a := <-cl.add_consumer:
add(a)
case r := <-cl.rem_consumer:
rem(r)
}
}
}
if refresh {
// refresh the group coordinator (because sarama caches the result, and the cache must be manually invalidated by us when we decide it might be needed)
err := cl.client.RefreshCoordinator(cl.group_name)
if err != nil {
err = cl.makeError("refreshing coordinating broker", err)
if early_rc != nil {
early_rc <- err
return
}
cl.deliverError("", err)
pause = true
continue join_loop
}
refresh = false
}
// make contact with the kafka broker coordinating this group
// NOTE: sarama keeps the result cached, so we aren't taking a round trip to the kafka brokers very time
// (then again we need to manage sarama's cache too)
coor, err := cl.client.Coordinator(cl.group_name)
if err != nil {
err = cl.makeError("contacting coordinating broker", err)
if early_rc != nil {
early_rc <- err
return
}
cl.deliverError("", err)
pause = true
refresh = true
continue join_loop
}
dbgf("Coordinator %v", coor)
// join the group
jreq := &sarama.JoinGroupRequest{
GroupId: cl.group_name,
SessionTimeout: int32(cl.config.Session.Timeout / time.Millisecond),
MemberId: member_id,
ProtocolType: "consumer", // we implement the standard kafka 0.9 consumer protocol metadata
}
var topics = make([]string, 0, len(consumers))
for topic := range consumers {
topics = append(topics, topic)
}
cl.config.Partitioner.PrepareJoin(jreq, topics)
dbgf("sending JoinGroupRequest %v", jreq)
jresp, err := coor.JoinGroup(jreq)
dbgf("received JoinGroupResponse %v, %v", jresp, err)
if err != nil || jresp.Err == sarama.ErrNotCoordinatorForConsumer {
// some I/O error happened, or the broker told us it is no longer the coordinator. in either case we should recompute the coordinator
refresh = true
}
if err == nil && jresp.Err != 0 {
err = jresp.Err
}
if err != nil {
err = cl.makeError("joining group", err)
// if it is still early (the 1st iteration of this loop) then return the error and bail out
if early_rc != nil {
early_rc <- err
return
}
cl.deliverError("", err)
pause = true
continue join_loop
}
// we managed to get a successfull join-group response. that is far enough that basic communication is functioning
// and we can declare that our early_rc is success and release the caller to NewClient
if early_rc != nil {
early_rc <- nil
early_rc = nil
}
// save our member_id for next time we join, and the new generation id
member_id = jresp.MemberId
generation_id := jresp.GenerationId
dbgf("member_id %q, generation_id %d", member_id, generation_id)
// prepare a sync request
sreq := &sarama.SyncGroupRequest{
GroupId: cl.group_name,
GenerationId: generation_id,
MemberId: member_id,
}
// we have been chosen as the leader then we have to map the partitions
if jresp.LeaderId == member_id {
dbgf("leader is we")
err := cl.config.Partitioner.Partition(sreq, jresp, cl.client)
if err != nil {
cl.deliverError("partitioning", err)
// and rejoin (thus aborting this generation) since we can't partition it as needed
pause = true
continue join_loop
}
}
// send SyncGroup
dbgf("sending SyncGroupRequest %v", sreq)
sresp, err := coor.SyncGroup(sreq)
dbgf("received SyncGroupResponse %v, %v", sresp, err)
if err != nil && sresp.Err == sarama.ErrNotCoordinatorForConsumer {
// we'll need a new coordinator
refresh = true
}
if err == nil && sresp.Err != 0 {
err = sresp.Err
}
if err != nil {
cl.deliverError("synchronizing group", err)
pause = true
continue join_loop
}
assignments, err := cl.config.Partitioner.ParseSync(sresp)
if err != nil {
cl.deliverError("decoding member assignments", err)
pause = true
continue join_loop
}
// save and distribute the new assignments to our topic consumers
a := &assignment{
generation_id: generation_id,
coordinator: coor,
member_id: member_id,
assignments: assignments,
}
for _, con := range consumers {
select {
case con.assignments <- a:
// got it on the first try
default:
// con.assignment is full (it has a capacity of 1)
// remove the stale assignment and place this one in its place
select {
case <-con.assignments:
// we have room now (since we're the only code which writes to this channel)
con.assignments <- a
case con.assignments <- a:
// in this case the consumer removed the stale assignment before we could
}
}
}
// start the heartbeat timer
heartbeat_timer := time.After(cl.config.Heartbeat.Interval)
// and the offset commit timer
clconfig := cl.client.Config()
commit_timer := time.After(clconfig.Consumer.Offsets.CommitInterval)
// and loop, sending heartbeats until something happens and we need to rejoin (or exit)
heartbeat_loop:
for {
select {
case <-cl.closed:
// cl.Close() has been called; time to exit
// shutdown any remaining consumers (causing them to sync their final offsets)
shutdown()
// and nicely leave the consumer group
req := &sarama.LeaveGroupRequest{
GroupId: cl.group_name,
MemberId: member_id,
}
dbgf("sending LeaveGroupRequest %v", req)
resp, err := coor.LeaveGroup(req)
dbgf("received LeaveGroupResponse %v, %v", resp, err)
if err == nil && resp.Err != 0 {
err = resp.Err
}
if err != nil {
cl.deliverError("leaving group", err)
}
// and we're done
return
case <-heartbeat_timer:
// send a heartbeat
req := &sarama.HeartbeatRequest{
GroupId: cl.group_name,
MemberId: member_id,
GenerationId: generation_id,
}
dbgf("sending HeartbeatRequest %v", req)
resp, err := coor.Heartbeat(req)
dbgf("received HeartbeatResponse %v, %v", resp, err)
if err != nil || resp.Err == sarama.ErrNotCoordinatorForConsumer {
// we need a new coordinator
refresh = true
continue join_loop
}
if err != nil || resp.Err != 0 {
// we've got heartbeat troubles of one kind or another; disconnect and reconnect
break heartbeat_loop
}
// and start the next heartbeat only after we get the response to this one
// that way when the network or the broker are slow we back off.
heartbeat_timer = time.After(cl.config.Heartbeat.Interval)
case <-commit_timer:
ocreq := &sarama.OffsetCommitRequest{
ConsumerGroup: cl.group_name,
ConsumerGroupGeneration: generation_id,
ConsumerID: member_id,
RetentionTime: int64(clconfig.Consumer.Offsets.Retention / time.Millisecond),
Version: 2, // kafka 0.9.0 version, with RetentionTime
}
if clconfig.Consumer.Offsets.Retention == 0 { // note that this and the rounding math above means that if you wanted a retention time of 0 millseconds you could set Config.Offsets.RetentionTime to something < 1 ms, like 1 nanosecond
ocreq.RetentionTime = -1 // use broker's value
}
var wg sync.WaitGroup
wg.Add(len(consumers))
for _, con := range consumers {
con.commit_reqs <- commit_req{ocreq, &wg}
}
wg.Wait()
dbgf("sending OffsetCommitRequest %v", ocreq)
ocresp, err := coor.CommitOffset(ocreq)
dbgf("received OffsetCommitResponse %v, %v", ocresp, err)
// log any errors we got. there isn't much we can do about them
if err != nil {
cl.deliverError("committing offsets", err)
} else {
for topic, partitions := range ocresp.Errors {
for p, err := range partitions {
if err != 0 {
cl.deliverError(fmt.Sprintf("committing offset of topic %q partition %d", topic, p), err)
}
}
}
}
commit_timer = time.After(clconfig.Consumer.Offsets.CommitInterval)
case a := <-cl.add_consumer:
add(a)
// and rejoin so we can become a member of the new topic
continue join_loop
case r := <-cl.rem_consumer:
rem(r)
// and rejoin so we can be removed as member of the new topic
continue join_loop
}
} // end of heartbeat_loop
} // end of join_loop
}
// makeError wraps err into a *Error, associating it with context
func (cl *client) makeError(context string, err error) *Error {
return &Error{
cl: cl,
Err: err,
Context: context,
Topic: "",
Partition: -1,
}
}
// deliverError builds an error and delivers it to the channel returned by cl.Errors
func (cl *client) deliverError(context string, err error) {
if context != "" {
err = cl.makeError(context, err)
}
dbgf("%v", err)
cl.errors <- err
}
// consumer implements the Consumer interface
type consumer struct {
cl *client
consumer sarama.Consumer
topic string
messages chan *sarama.ConsumerMessage
closed chan struct{} // channel which is closed when the consumer is AsyncClose()ed
close_once sync.Once // Once used to make sure we close only once
assignments chan *assignment // channel over which client.run sends consumer.run each generation's partition assignments
commit_reqs chan commit_req // channel over which client.run sends consumer.run request to fill out a OffsetCommitRequest
restart_partitions chan *partition // channel through which partition.run delivers partition restart [at new offset] requests
premessages chan *sarama.ConsumerMessage // channel through which partition.run delivers messages to consumer.run
done chan *sarama.ConsumerMessage // channel through which Done() returns messages
}
// commit_req is a request for a consumer to write its part into a OffsetCommitRequest
type commit_req struct {
ocreq *sarama.OffsetCommitRequest
wg *sync.WaitGroup
}
// assignment is this client's assigned partitions
type assignment struct {
generation_id int32 // the current generation
coordinator *sarama.Broker // the current client-group coordinating broker
member_id string // the member_id assigned to us by the coordinator
assignments map[string][]int32 // map of topic -> list of partitions
}
// construct a *Error from this consumer
func (con *consumer) makeError(context string, err error) *Error {
Err := con.cl.makeError(context, err)
Err.Consumer = con
Err.Topic = con.topic
return Err
}
// construct and deliver an *Error from this consumer
func (con *consumer) deliverError(context string, partition int32, err error) {
Err := con.makeError(context, err)
Err.Partition = partition
con.cl.deliverError("", Err)
}
func (con *consumer) Messages() <-chan *sarama.ConsumerMessage { return con.messages }
// close the consumer. it can safely be called multiple times
func (con *consumer) AsyncClose() {
dbgf("AsyncClose consumer of topic %q", con.topic)
con.close_once.Do(func() { close(con.closed) })
}
// consumer goroutine coordinates consuming from multiple partitions in a topic
// NOTE WELL: this function must never do anything which would prevent it from processing message from client.run promptly.
// That means any channel I/O must include cases for con.assignments and con.commit_reqs.
func (con *consumer) run(wg *sync.WaitGroup) {
var generation_id int32 // current generation
var coor *sarama.Broker // current consumer group coordinating broker
var member_id string // our member id assigned by coor
partitions := make(map[int32]*partition) // map of partition number -> partition consumer
// shutdown the removed partitions, committing their last offset
remove := func(removed []int32) {
dbgf("consumer %q rem(%v)", con.topic, removed)
if len(removed) == 0 {
// nothing to do, and no point in sending an empty OffsetCommitRequest msg either
return
}
clconfig := con.cl.client.Config()
ocreq := &sarama.OffsetCommitRequest{
ConsumerGroup: con.cl.group_name,
ConsumerGroupGeneration: generation_id,
ConsumerID: member_id,
RetentionTime: int64(clconfig.Consumer.Offsets.Retention / time.Millisecond),
Version: 2, // kafka 0.9.0 version, with RetentionTime
}
if clconfig.Consumer.Offsets.Retention == 0 { // note that this and the rounding math above means that if you wanted a retention time of 0 millseconds you could set Config.Offsets.RetentionTime to something < 1 ms, like 1 nanosecond
ocreq.RetentionTime = -1 // use broker's value
}
for _, p := range removed {
// stop consuming from partition p
if part, ok := partitions[p]; ok {
delete(partitions, p)
part.consumer.Close()
offset := part.oldest
if len(part.buckets) != 0 {
if part.buckets[0][0] == part.buckets[0][1] {
// add to that the portion of the last block we know been completed (this is often useful when the traffic rate is low or a client shuts down cleanly, since it has probably cleanly returned all offsets we've delivered)
offset += int64(part.buckets[0][1])
} // else we don't know enough to commit any further
}
dbgf("ocreq.AddBlock(%q, %d, %d)", con.topic, p, offset)
ocreq.AddBlock(con.topic, p, offset, 0, "")
}
}
dbgf("sending OffsetCommitRequest %v", ocreq)
ocresp, err := coor.CommitOffset(ocreq)
dbgf("received OffsetCommitResponse %v, %v", ocresp, err)
// log any errors we got. there isn't much we can do about them; the next consumer will start at an older offset
if err != nil {
con.deliverError("committing offsets", -1, err)
} else {
for _, partitions := range ocresp.Errors {
for p, err := range partitions {
if err != 0 {
con.deliverError("committing offset", p, err)
}
}
}
}
}
// handle a commit request from client.run
commit_req := func(c commit_req) {
dbgf("consumer %q commit_req(%v)", con.topic, c)
for p, partition := range partitions {
offset := partition.oldest
if len(partition.buckets) != 0 {
if partition.buckets[0][0] == partition.buckets[0][1] {
// add to that the portion of the last block we know been completed (this is useful when the message rate is slow)
offset += int64(partition.buckets[0][1])
} // else we don't know enough to commit any further
}
dbgf("ocreq.AddBlock(%q, %d, %d)", con.topic, p, offset)
c.ocreq.AddBlock(con.topic, p, offset, 0, "")
}
c.wg.Done()
}
defer func() {
if len(partitions) != 0 {
// cleanup the remaining partition consumers
removed := make([]int32, 0, len(partitions))
for p := range partitions {
removed = append(removed, p)
}
remove(removed)
}
con.consumer.Close()
close(con.messages)
// send ourselves to rem_consumer
rem_loop:
for {
select {
case c := <-con.commit_reqs:
commit_req(c)
case <-con.assignments:
// ignore them, we're shutting down
case con.cl.rem_consumer <- con:
break rem_loop
}
// NOTE: <-con.done is not a case above because there is no good way to shut it down. it is never closed,
// so we'd never know when it was drained. As a consequence, it's client.Done() which aborts when the
// consumer closes, rather than us draining con.done
}
// drain any remaining requests from run.client, until
// run.client closes the channels
for c := range con.commit_reqs {
commit_req(c)
}
for range con.assignments {
// ignore them
}
dbgf("consumer of topic %q exiting", con.topic)
wg.Done()
}()
// handle a message sent to us via con.done
done := func(msg *sarama.ConsumerMessage) {
dbgf("consumer %q done(%d/%d)", con.topic, msg.Partition, msg.Offset)
part := partitions[msg.Partition]
if part == nil {
dbgf("no partition %d", msg.Partition)
return
}
delta := msg.Offset - part.oldest
if delta < 0 {
dbgf("stale message %d/%d", msg.Partition, msg.Offset)
return
}
index := int(delta) >> 6 // /64
if index >= len(part.buckets) {
dbgf("early message %d/%d", msg.Partition, msg.Offset)
return
}
part.buckets[index][1]++
if index == 0 {
// we might have finished the oldest bucket
for part.buckets[0] == [2]uint8{64, 64} {
// the oldest bucket is complete; advance the last committed offset
part.oldest += 64
part.buckets = part.buckets[1:]
if len(part.buckets) == 0 {
break
}
}
}
}
// handle an assignment message
assignment := func(a *assignment) {
dbgf("consumer %q assignment(%v)", con.topic, a)
// see what has changed in the partition assignment of our topic
new_partitions := a.assignments[con.topic]
added, removed := difference(partitions, new_partitions)
dbgf("consumer %q added %v, removed %v", con.topic, added, removed)
// shutdown the partitions while we still belong to the previous generation
remove(removed)
// update the current generation and related info after committing the last offsets from the previous generation
generation_id = a.generation_id
coor = a.coordinator
member_id = a.member_id
// the sarama-cluster code pauses here so that other consumers have time to sync their offsets. Should we do the same?
// I've observed with kafka 0.9.0.1 that once the coordinator bumps the generation_id the client can't commit an offset with
// the old id. So unless the client lies and sends generation_id+1 when it commits there is nothing it can commit, and there
// is no point in waiting. So for now, no waiting.
// fetch the last committed offsets of the new partitions
oreq := &sarama.OffsetFetchRequest{
ConsumerGroup: con.cl.group_name,
Version: 1, // kafka 0.9.0 expects version 1 offset requests
}
for _, p := range added {
oreq.AddPartition(con.topic, p)
}
dbgf("consumer %q sending OffsetFetchRequest %v", con.topic, oreq)
oresp, err := a.coordinator.FetchOffset(oreq)
dbgf("consumer %q received OffsetFetchResponse %v, %v", con.topic, oresp, err)
if err != nil {
con.deliverError("fetching offsets", -1, err)
// and we can't consume any of the new partitions without the offsets
} else if len(added) != 0 {
// start consuming from the added partitions at each partition's last committed offset (which by convention kafaka defines as the last consumed offset+1)
// since computing the starting offset and beginning to consume requires several round trips to the kafka brokers we start all the
// partitions concurrently. That reduces the startup time to a couple RTTs even for topics with a numerous partitions.
started := make(chan *partition)
var wg sync.WaitGroup
for _, p := range added {
wg.Add(1)
go func(p int32) {
defer wg.Done()
ob := oresp.GetBlock(con.topic, p)
if ob == nil {
// can't start this partition without an offset
con.deliverError("FetchOffset response", p, fmt.Errorf("partition %d missing", p))
return
}
if ob.Err != 0 {
con.deliverError("FetchOffset response", p, ob.Err)
return
}
offset := ob.Offset
if offset == sarama.OffsetNewest {
// the broker doesn't have an offset for is. Use the configured initial offset
offset = con.cl.client.Config().Consumer.Offsets.Initial
}
dbgf("consumer %q consuming partition %d at offset %d", con.topic, p, offset)
consumer, err := con.consumer.ConsumePartition(con.topic, p, offset)
if err != nil {
con.deliverError(fmt.Sprintf("sarama.ConsumePartition at offset %d", offset), p, err)
// If the error is ErrOffsetOutOfRange then give ourselves one chance to recover
if err != sarama.ErrOffsetOutOfRange {
// otherwise we can't consume this partition.
return
}
offset, err = con.cl.config.OffsetOutOfRange(con.topic, p, con.cl.client)
if err != nil {
// should we deliver them their own error? I guess so.
con.deliverError("OffsetOutOfRange callback", p, err)
return
}
consumer, err = con.consumer.ConsumePartition(con.topic, p, offset)
if err != nil {
con.deliverError(fmt.Sprintf("sarama.ConsumePartition at offset %d", offset), p, err)
// it didn't work with their offset either. give up
// (we could go into a loop and call them again, but what would that solve?)
return
}
// it worked with the new offset; carry on
}
part := &partition{
con: con,
consumer: consumer,
partition: p,
oldest: offset,
}
go part.run()
started <- part
}(p)
}
go func() {
wg.Wait()
close(started)
}()
for part := range started {
partitions[part.partition] = part
}
}
}
// restart consuming a partition at a new[er] offset
restart_partition := func(part *partition) {
// we kill the old and start a new partition consumer since there is no way to seek an existing sarama.PartitionConsumer in sarama's November 2016 API)
p := part.partition
// first remove the old partition consumer. Once it gets a ErrOffsetOutOfRange it's unable to function.
// since it had an out-of-range offset, it can't commit its offset either
if pa, ok := partitions[p]; !ok || part != pa {
// this is an unknown partition, or we've already killed it; ignore the request
return
}
delete(partitions, p)
part.consumer.Close()
// then ask what the new starting offset should be
offset, err := con.cl.config.OffsetOutOfRange(con.topic, p, con.cl.client)
if err != nil {
// should we deliver them their own error? I guess so.
con.deliverError("OffsetOutOfRange callback", p, err)
return
}
// finally make a new partition consuming starting at the given offset
consumer, err := con.consumer.ConsumePartition(con.topic, p, offset)
if err != nil {
con.deliverError(fmt.Sprintf("sarama.ConsumePartition at offset %d", offset), p, err)
return
}
part = &partition{
con: con,
consumer: consumer,
partition: p,
oldest: offset,
}
go part.run()
partitions[p] = part
}
for {
select {
case msg := <-con.premessages:
dbgf("premessage msg %d/%d", msg.Partition, msg.Offset)
// keep track of msg's offset so we can match it with Done, and deliver the msg
part := partitions[msg.Partition]
if part == nil {
// message from a stale consumer; ignore it
dbgf("no partition %d", msg.Partition)
continue
}
if part.oldest == sarama.OffsetNewest || part.oldest == sarama.OffsetOldest {
// we now know the starting offset. make as if we'd been asked to start there
part.oldest = msg.Offset
}
delta := msg.Offset - part.oldest
if delta < 0 { // || delta > max-out-of-order (TODO)
dbgf("stale message %d/%d", msg.Partition, msg.Offset)
// we can't take this message into account
continue
}
index := int(delta) >> 6 // /64
for index >= len(part.buckets) {
// add a new bucket
part.buckets = append(part.buckets, [2]uint8{0, 0})
}
part.buckets[index][0]++
// and deliver the msg (or handle any of the other messages which can arrive)
deliver_loop:
for {
select {
case con.messages <- msg:
dbgf("delivered msg %d/%d", msg.Partition, msg.Offset)
// success
break deliver_loop
case msg2 := <-con.done:
done(msg2)
case a := <-con.assignments:
assignment(a)
case c := <-con.commit_reqs:
commit_req(c)
case p := <-con.restart_partitions:
restart_partition(p)
case <-con.closed:
// the defered operations do the work
return
}
}
case msg := <-con.done:
done(msg)
case a := <-con.assignments:
assignment(a)
case c := <-con.commit_reqs:
commit_req(c)
case p := <-con.restart_partitions:
restart_partition(p)
case <-con.closed:
// the defered operations do the work
return
}
}
}
func (con *consumer) Done(msg *sarama.ConsumerMessage) {
// send it back to consumer.run to be processed synchronously
dbgf("Done(%d/%d)", msg.Partition, msg.Offset)
select {
case con.done <- msg:
// great, msg delivered
case <-con.closed:
// consumer has closed
}
}
// partition contains the data associated with us consuming one partition
type partition struct {
con *consumer
consumer sarama.PartitionConsumer
partition int32 // partition number
// buckets of # of offsets read from kafka, and the # of offsets completed by a call to Done(). the difference is the # of offsets in flight in the calling code
// we group offsets in groups of 64 and simply keep a count of how many are outstanding
// any time the two counts are equal then the offsets are committable. Otherwise we can't tell which is the not yet Done() offset and so we don't know
buckets [][2]uint8
oldest int64 // 1st offset in bucket[0]
}
// wrap a sarama.ConsumerError into an *Error
func (part *partition) makeConsumerError(cerr *sarama.ConsumerError) *Error {
Err := part.con.makeError("consuming from sarama", cerr.Err)
Err.Topic = cerr.Topic
Err.Partition = cerr.Partition
return Err
}
// run consumes from the partition and delivers it to the consumer
func (part *partition) run() {
con := part.con
defer dbgf("partition consumer of %q partition %d exiting", con.topic, part.partition)
msgs := part.consumer.Messages()
errors := part.consumer.Errors()
for {
select {
case msg, ok := <-msgs:
if ok {
dbgf("got msg %d/%d", msg.Partition, msg.Offset)
select {
case con.premessages <- msg:
case <-con.closed:
return
}
} else {
dbgf("draining topic %q partition %d errors", con.topic, part.partition)
// deliver any remaining errors, and exit
for sarama_err := range errors {
con.cl.deliverError("", part.makeConsumerError(sarama_err))
}
return
}
case sarama_err, ok := <-errors:
if ok {
// pick out ErrOffsetOutOfRange errors. These happen if the consumer offset falls off the tail of the kafka log.
// this easily happens in two cases: when the consumer is too slow, or when the consumer has been stopped for too long.
// This error cannot be fixed without seeking to a valid offset. However we can't assume that OffsetNewest is the right
// choice, nor OffsetOldest, nor "5 minutes ago" or anything else. It's up to the user to decide.
if sarama_err.Err == sarama.ErrOffsetOutOfRange {
dbgf("ErrOffsetOutOfRange topic %q partition %d", con.topic, part.partition)
select {
case con.restart_partitions <- part:
case <-con.closed:
return
}
// should we keep reading from the partition? it's unlikely to produce much
}
// and always deliver the error
con.cl.deliverError("", part.makeConsumerError(sarama_err))
} else {
// finish off any remaining messages, and exit
dbgf("draining topic %q partition %d msgs", con.topic, part.partition)
for msg := range msgs {
select {
case con.premessages <- msg:
case <-con.closed:
return
}
}
return
}
}
}
}
// difference returns the differences (additions and subtractions) between two slices of int32.
// typically the slices contain partition numbers.
func difference(old map[int32]*partition, next []int32) (added, removed []int32) {
o := make(int32Slice, 0, len(old))
for p := range old {
o = append(o, p)
}
n := make(int32Slice, len(next))
copy(n, next)
sort.Sort(o)
sort.Sort(n)
i, j := 0, 0
for i < len(o) && j < len(n) {
if o[i] < n[j] {
removed = append(removed, o[i])
i++
} else if o[i] > n[j] {
added = append(added, n[j])
j++
} else {
i++
j++
}
}
removed = append(removed, o[i:]...)
added = append(added, n[j:]...)
return
}
// a sortable []int32
type int32Slice []int32
func (p int32Slice) Len() int { return len(p) }
func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// a simple partitioner that assigns partitions round-robin across all consumers requesting each topic
type roundRobinPartitioner string
// global instance of the round-robin partitioner
const RoundRobin roundRobinPartitioner = "roundrobin" // use the string "roundrobin" without a dash to match what kafka java code uses, should someone want to mix go and java consumers in the same group
func (rr roundRobinPartitioner) PrepareJoin(jreq *sarama.JoinGroupRequest, topics []string) {
jreq.AddGroupProtocolMetadata(string(rr),
&sarama.ConsumerGroupMemberMetadata{
Version: 1,
Topics: topics,
})
}
// for each topic in jresp, assign the topic's partitions round-robin across the members requesting each topic
func (roundRobinPartitioner) Partition(sreq *sarama.SyncGroupRequest, jresp *sarama.JoinGroupResponse, client sarama.Client) error {
by_member, err := jresp.GetMembers()
dbgf("by_member %v", by_member)
if err != nil {
return err
}
// invert the data, so we have the requests grouped by topic (they arrived grouped by member, since the kafka broker treats the data from each consumer as an opaque blob, so it couldn't do this step for us)
by_topic := make(map[string][]string) // map of topic to members requesting the topic
for member, request := range by_member {
if request.Version != 1 {
// skip unsupported versions. we'll only assign to clients we can understand. Since we are such a client
// we won't block all consumers (at least for those topics we consume). If this ends up a bad idea, we
// can always change this code to return an error.
continue
}
for _, topic := range request.Topics {
by_topic[topic] = append(by_topic[topic], member)
}
}
dbgf("by_topic %v", by_topic)
// finally, build our assignments of partitions to members
assignments := make(map[string]map[string][]int32) // map of member to topics, and topic to partitions
for topic, members := range by_topic {
partitions, err := client.Partitions(topic)
dbgf("Partitions(%q) = %v", topic, partitions)
if err != nil {
// what to do? we could maybe skip the topic, assigning it to no-one. But I/O errors are likely to happen again.
// so let's stop partitioning and return the error.
return err
}
n := len(partitions)
if n == 0 { // can this happen? best not to /0 later if it can
// no one gets anything assigned. it is as if this topic didn't exist
continue
}
for i := 0; i < n; {
for _, member_id := range members {
topics, ok := assignments[member_id]
if !ok {
topics = make(map[string][]int32)
assignments[member_id] = topics
}
topics[topic] = append(topics[topic], partitions[i])
i++
if i == n {
break
}
}
}
}
dbgf("assignments %v", assignments)
// and encode the assignments in the sync request
for member_id, topics := range assignments {
sreq.AddGroupAssignmentMember(member_id,
&sarama.ConsumerGroupMemberAssignment{
Version: 1,
Topics: topics,
})
}
return nil
}
func (roundRobinPartitioner) ParseSync(sresp *sarama.SyncGroupResponse) (map[string][]int32, error) {
if len(sresp.MemberAssignment) == 0 {
// in the corner case that we ask for no topics, we get nothing back. However sarama fd498173ae2bf (head of master branch Nov 6th 2016) will return a useless error if we call sresp.GetMemberAssignment() in this case
return nil, nil
}
ma, err := sresp.GetMemberAssignment()
dbgf("MemberAssignment %v", ma)
if err != nil {
return nil, err
}
if ma.Version != 1 {
return nil, fmt.Errorf("unsupported MemberAssignment version %d", ma.Version)
}
return ma.Topics, nil
}
fix concurrent hashmap writes in sarama.OffsetCommitRequest.AddBlock()
due to each consumer.run adding itself concurrently
/*
A simple kafka consumer-group client
Copyright 2016 MistSys
*/
package consumer
import (
"fmt"
"log"
"sort"
"sync"
"time"
"github.com/Shopify/sarama"
)
const debug = true // set to true to see log messages
// dbgf logs a printf style message to somewhere reasonable if debug is enabled, and as efficiently as it can does nothing with any side effects if debug is disabled
func dbgf(fmt string, args ...interface{}) {
if debug {
log.Printf(fmt, args...)
}
}
// minimum kafka API version required. Use this when constructing the sarama.Client's sarama.Config.MinVersion
var MinVersion = sarama.V0_9_0_0
// Error holds the errors generated by this package
type Error struct {
Err error // underlying error
Context string // description of the context surrounding the error
Consumer Consumer // nil, or Consumer which produced the error
Topic string // "", or the topic which had the error
Partition int32 // -1, or the partition which had the error
cl *client
}
func (err *Error) Error() string {
if err.Topic != "" {
if err.Partition != -1 {
return fmt.Sprintf("consumer-group %q: Error %s, topic %q partition %d: %s", err.cl.group_name, err.Context, err.Topic, err.Partition, err.Err)
}
return fmt.Sprintf("consumer-group %q: Error %s, topic %q: %s", err.cl.group_name, err.Context, err.Topic, err.Err)
}
return fmt.Sprintf("consumer-group %q: Error %s: %s", err.cl.group_name, err.Context, err.Err)
}
// Config is the configuration of a Client. Typically you'd create a default configuration with
// NewConfig, modify any fields of interest, and pass it to NewClient. Once passed to NewClient the
// Config must not be modified. (doing so leads to data races, and may caused bugs as well).
//
// In addition to this config, consumer's code also looks at the sarama.Config of the sarama.Client
// supplied to NewClient, especially at the Consumer.Offset settings, Version, and [TODO] ChannelBufferSize.
type Config struct {
Session struct {
// The allowed session timeout for registered consumers (defaults to 30s).
// Must be within the allowed server range.
Timeout time.Duration
}
Rebalance struct {
// The allowed rebalance timeout for registered consumers (defaults to 30s).
// Must be within the allowed server range. Only functions if sarama.Config.Version >= 0.10.1
// Otherwise Session.Timeout is used for rebalancing too.
Timeout time.Duration
}
Heartbeat struct {
// Interval between each heartbeat (defaults to 3s). It should be no more
// than 1/3rd of the Group.Session.Timout setting
Interval time.Duration
}
// the partitioner used to map partitions to consumer group members (defaults to a round-robin partitioner)
Partitioner Partitioner
// The handler for sarama.ErrOffsetOutOfRange errors (defaults to sarama.OffsetNewest,nil). Implementation
// must return the new starting offset in the partition, or an error. The sarama.Client is included for
// convenience, since handling this might involve querying the partition's current offsets.
OffsetOutOfRange func(topic string, partition int32, client sarama.Client) (offset int64, err error)
}
// default implementation of Config.Offsets.OffsetOutOfRange jumps to the current head of the partition.
func DefaultOffsetOutOfRange(topic string, partition int32, client sarama.Client) (offset int64, err error) {
offset = sarama.OffsetNewest
return
}
// NewConfig constructs a default configuration.
func NewConfig() *Config {
cfg := &Config{}
cfg.Session.Timeout = 30 * time.Second
cfg.Rebalance.Timeout = 30 * time.Second
cfg.Heartbeat.Interval = 3 * time.Second
cfg.Partitioner = RoundRobin
cfg.OffsetOutOfRange = DefaultOffsetOutOfRange
return cfg
}
/*
NewClient creates a new consumer group client on top of an existing
sarama.Client.
After this call the contents of config should be treated as read-only.
config can be nil if the defaults are acceptable.
The consumer group name is used to match this client with other
instances running elsewhere, but connected to the same cluster
of kafka brokers and using the same consumer group name.
The supplied sarama.Client should have been constructed with a sarama.Config
where sarama.Config.Version is >= consumer.MinVersion, and if full handling of
ErrOffsetOutOfRange is desired, sarama.Config.Consumer.Return.Errors = true.
In addition, this package uses the settings in sarama.Config.Consumer.Offset
*/
func NewClient(group_name string, config *Config, sarama_client sarama.Client) (Client, error) {
cl := &client{
client: sarama_client,
config: config,
group_name: group_name,
errors: make(chan error),
closed: make(chan struct{}),
add_consumer: make(chan add_consumer),
rem_consumer: make(chan *consumer),
}
// start the client's manager goroutine
rc := make(chan error)
cl.wg.Add(1)
go cl.run(rc)
return cl, <-rc
}
/*
Client is a kafaka client belonging to a consumer group. It is created by NewClient.
*/
type Client interface {
// Consume returns a consumer of the given topic
Consume(topic string) (Consumer, error)
// Close closes the client. It must be called to shutdown
// the client. It calls AsyncClose on any yet unclosed topic
// Consumers created by this Client.
// It does NOT close the inner sarama.Client.
// Calling twice is NOT supported.
Close()
// Errors returns a channel which can (should) be monitored
// for errors. callers should probably log or otherwise report
// the returned errors. The channel closes when the client
// is closed.
Errors() <-chan error
// TODO have a Status() method for debug/logging? Or is Errors() enough?
}
/*
Consumer is a consumer of a topic.
Messages from any partition assigned to this client arrive on the
channel returned by Messages.
Every message read from the Messages channel must be eventually passed
to Done. Calling Done is the signal that that message has been consumed
and the offset of that message can be committed back to kafka.
Of course this requires that the message's Partition and Offset fields not
be altered. Then again for what possible reason would you do such a thing?
*/
type Consumer interface {
// Messages returns the channel of messages arriving from kafka. It always
// returns the same result, so it is safe to call once and store the result.
// Every message read from the channel should be passed to Done when processing
// of the message is complete.
Messages() <-chan *sarama.ConsumerMessage
// Done indicates the processing of the message is complete, and its offset can
// be committed to kafka. Calling Done twice with the same message, or with a
// garbage message, can cause trouble.
Done(*sarama.ConsumerMessage)
// AsyncClose terminates the consumer cleanly. Callers can continue to read from
// Messages channel until it is closed, or not, as they wish.
// Calling Client.Close() performs a AsyncClose() on any remaining consumers.
// Calling AsyncClose multiple times is permitted. Only the first call has any effect.
// Never calling AsyncClose is also permitted. Client.Close() implies Consumer.AsyncClose.
AsyncClose()
}
/*
Partitioner maps partitions to consumer group members.
When the user wants control over the partitioning they should set
Config.Partitioner to their implementation of Partitioner.
*/
type Partitioner interface {
// PrepareJoin prepares a JoinGroupRequest given the topics supplied.
// The simplest implementation would be something like
// join_req.AddGroupProtocolMetadata("<partitioner name>", &sarama.ConsumerGroupMemberMetadata{ Version: 1, Topics: topics, })
PrepareJoin(join_req *sarama.JoinGroupRequest, topics []string)
// Partition performs the partitioning. Given the requested
// memberships from the JoinGroupResponse, it adds the results
// to the SyncGroupRequest. Returning an error cancels everything.
// The sarama.Client supplied to NewClient is included for convenince,
// since performing the partitioning probably requires looking at each
// topic's metadata, especially its list of partitions.
Partition(*sarama.SyncGroupRequest, *sarama.JoinGroupResponse, sarama.Client) error
// ParseSync parses the SyncGroupResponse and returns the map of topics
// to partitions assigned to this client, or an error if the information
// is not parsable.
ParseSync(*sarama.SyncGroupResponse) (map[string][]int32, error)
}
// client implements the Client interface
type client struct {
client sarama.Client // the sarama client we were constructed from
config *Config // our configuration (read-only)
group_name string // the client-group name
errors chan error // channel over which asynchronous errors are reported
closed chan struct{} // channel which is closed to cause the client to shutdown
wg sync.WaitGroup // waitgroup which is done when the client is shutdown
add_consumer chan add_consumer // command channel used to add a new consumer
rem_consumer chan *consumer // command channel used to remove an existing consumer
}
// Errors returns the channel over which asynchronous errors are observed.
func (cl *client) Errors() <-chan error { return cl.errors }
// add_consumer are the messages sent over the client.add_consumer channel
type add_consumer struct {
con *consumer
reply chan<- error
}
func (cl *client) Consume(topic string) (Consumer, error) {
sarama_consumer, err := sarama.NewConsumerFromClient(cl.client)
if err != nil {
return nil, cl.makeError("Consume sarama.NewConsumerFromClient", err)
}
con := &consumer{
cl: cl,
consumer: sarama_consumer,
topic: topic,
messages: make(chan *sarama.ConsumerMessage),
closed: make(chan struct{}),
assignments: make(chan *assignment, 1),
commit_reqs: make(chan commit_req),
restart_partitions: make(chan *partition),
premessages: make(chan *sarama.ConsumerMessage), // TODO give ourselves some capacity once I know it runs right without any (capacity hides bugs :-)
done: make(chan *sarama.ConsumerMessage), // we should probably use sarama.Config.ChannelBufferSize for our channels
}
reply := make(chan error)
cl.add_consumer <- add_consumer{con, reply}
err = <-reply
if err != nil {
// if an error is returned then it is up to us to close the sarama.Consumer
_ = sarama_consumer.Close() // we already have an error to return. a 2nd one is too much
return nil, err
}
return con, nil
}
// Close shutsdown the client and any remaining Consumers.
func (cl *client) Close() {
// signal to cl.run() that it should exit
dbgf("Close client of consumer-group %q", cl.group_name)
close(cl.closed)
// and wait for the shutdown to be complete
cl.wg.Wait()
}
// run is a long lived goroutine which manages this client's membership in the consumer group.
func (cl *client) run(early_rc chan<- error) {
defer cl.wg.Done()
var member_id string // our group member id, assigned to us by kafka when we first make contact
consumers := make(map[string]*consumer) // map of topic -> consumer
var wg sync.WaitGroup // waitgroup used to wait for all consumers to exit
defer dbgf("consumer-group %q client exiting", cl.group_name)
// add a consumer
add := func(add add_consumer) {
dbgf("client.run add(topic %q)", add.con.topic)
if _, ok := consumers[add.con.topic]; ok {
// topic already is being consumed. the way the standard kafka 0.9 group coordination works you cannot consume twice with the
// same client. If you want to consume the same topic twice, use two Clients.
add.reply <- cl.makeError("Consume", fmt.Errorf("topic %q is already being consumed", add.con.topic))
return
}
consumers[add.con.topic] = add.con
wg.Add(1)
go add.con.run(&wg)
add.reply <- nil
}
// remove a consumer
rem := func(con *consumer) {
dbgf("client.run rem(topic %q)", con.topic)
existing_con := consumers[con.topic]
if existing_con == con {
delete(consumers, con.topic)
} // else it's some old consumer and we've already removed it
// and let the consumer shutdown
close(con.assignments)
close(con.commit_reqs)
}
// shutdown the consumers. waits until they are all stopped. only call once and return afterwards, since it makes assumptions that hold only when it is used like that
shutdown := func() {
dbgf("client.run shutdown")
// shutdown the remaining consumers
for _, con := range consumers {
con.AsyncClose()
}
// and consume any last rem_consumer messages from them
go func() {
wg.Wait()
close(cl.rem_consumer)
}()
for con := range cl.rem_consumer {
rem(con)
}
// and shutdown the errors channel
close(cl.errors)
}
pause := false
refresh := false
// loop rejoining the group each time the group reforms
join_loop:
for {
if pause {
// pause before continuing, so we don't fail continuously too fast
pause = false
timeout := time.After(time.Second) // TODO should we increase timeouts?
pause_loop:
for {
select {
case <-timeout:
break pause_loop
case <-cl.closed:
// shutdown the remaining consumers
shutdown()
return
case a := <-cl.add_consumer:
add(a)
case r := <-cl.rem_consumer:
rem(r)
}
}
}
if refresh {
// refresh the group coordinator (because sarama caches the result, and the cache must be manually invalidated by us when we decide it might be needed)
err := cl.client.RefreshCoordinator(cl.group_name)
if err != nil {
err = cl.makeError("refreshing coordinating broker", err)
if early_rc != nil {
early_rc <- err
return
}
cl.deliverError("", err)
pause = true
continue join_loop
}
refresh = false
}
// make contact with the kafka broker coordinating this group
// NOTE: sarama keeps the result cached, so we aren't taking a round trip to the kafka brokers very time
// (then again we need to manage sarama's cache too)
coor, err := cl.client.Coordinator(cl.group_name)
if err != nil {
err = cl.makeError("contacting coordinating broker", err)
if early_rc != nil {
early_rc <- err
return
}
cl.deliverError("", err)
pause = true
refresh = true
continue join_loop
}
dbgf("Coordinator %v", coor)
// join the group
jreq := &sarama.JoinGroupRequest{
GroupId: cl.group_name,
SessionTimeout: int32(cl.config.Session.Timeout / time.Millisecond),
MemberId: member_id,
ProtocolType: "consumer", // we implement the standard kafka 0.9 consumer protocol metadata
}
var topics = make([]string, 0, len(consumers))
for topic := range consumers {
topics = append(topics, topic)
}
cl.config.Partitioner.PrepareJoin(jreq, topics)
dbgf("sending JoinGroupRequest %v", jreq)
jresp, err := coor.JoinGroup(jreq)
dbgf("received JoinGroupResponse %v, %v", jresp, err)
if err != nil || jresp.Err == sarama.ErrNotCoordinatorForConsumer {
// some I/O error happened, or the broker told us it is no longer the coordinator. in either case we should recompute the coordinator
refresh = true
}
if err == nil && jresp.Err != 0 {
err = jresp.Err
}
if err != nil {
err = cl.makeError("joining group", err)
// if it is still early (the 1st iteration of this loop) then return the error and bail out
if early_rc != nil {
early_rc <- err
return
}
cl.deliverError("", err)
pause = true
continue join_loop
}
// we managed to get a successfull join-group response. that is far enough that basic communication is functioning
// and we can declare that our early_rc is success and release the caller to NewClient
if early_rc != nil {
early_rc <- nil
early_rc = nil
}
// save our member_id for next time we join, and the new generation id
member_id = jresp.MemberId
generation_id := jresp.GenerationId
dbgf("member_id %q, generation_id %d", member_id, generation_id)
// prepare a sync request
sreq := &sarama.SyncGroupRequest{
GroupId: cl.group_name,
GenerationId: generation_id,
MemberId: member_id,
}
// we have been chosen as the leader then we have to map the partitions
if jresp.LeaderId == member_id {
dbgf("leader is we")
err := cl.config.Partitioner.Partition(sreq, jresp, cl.client)
if err != nil {
cl.deliverError("partitioning", err)
// and rejoin (thus aborting this generation) since we can't partition it as needed
pause = true
continue join_loop
}
}
// send SyncGroup
dbgf("sending SyncGroupRequest %v", sreq)
sresp, err := coor.SyncGroup(sreq)
dbgf("received SyncGroupResponse %v, %v", sresp, err)
if err != nil && sresp.Err == sarama.ErrNotCoordinatorForConsumer {
// we'll need a new coordinator
refresh = true
}
if err == nil && sresp.Err != 0 {
err = sresp.Err
}
if err != nil {
cl.deliverError("synchronizing group", err)
pause = true
continue join_loop
}
assignments, err := cl.config.Partitioner.ParseSync(sresp)
if err != nil {
cl.deliverError("decoding member assignments", err)
pause = true
continue join_loop
}
// save and distribute the new assignments to our topic consumers
a := &assignment{
generation_id: generation_id,
coordinator: coor,
member_id: member_id,
assignments: assignments,
}
for _, con := range consumers {
select {
case con.assignments <- a:
// got it on the first try
default:
// con.assignment is full (it has a capacity of 1)
// remove the stale assignment and place this one in its place
select {
case <-con.assignments:
// we have room now (since we're the only code which writes to this channel)
con.assignments <- a
case con.assignments <- a:
// in this case the consumer removed the stale assignment before we could
}
}
}
// start the heartbeat timer
heartbeat_timer := time.After(cl.config.Heartbeat.Interval)
// and the offset commit timer
clconfig := cl.client.Config()
commit_timer := time.After(clconfig.Consumer.Offsets.CommitInterval)
// and loop, sending heartbeats until something happens and we need to rejoin (or exit)
heartbeat_loop:
for {
select {
case <-cl.closed:
// cl.Close() has been called; time to exit
// shutdown any remaining consumers (causing them to sync their final offsets)
shutdown()
// and nicely leave the consumer group
req := &sarama.LeaveGroupRequest{
GroupId: cl.group_name,
MemberId: member_id,
}
dbgf("sending LeaveGroupRequest %v", req)
resp, err := coor.LeaveGroup(req)
dbgf("received LeaveGroupResponse %v, %v", resp, err)
if err == nil && resp.Err != 0 {
err = resp.Err
}
if err != nil {
cl.deliverError("leaving group", err)
}
// and we're done
return
case <-heartbeat_timer:
// send a heartbeat
req := &sarama.HeartbeatRequest{
GroupId: cl.group_name,
MemberId: member_id,
GenerationId: generation_id,
}
dbgf("sending HeartbeatRequest %v", req)
resp, err := coor.Heartbeat(req)
dbgf("received HeartbeatResponse %v, %v", resp, err)
if err != nil || resp.Err == sarama.ErrNotCoordinatorForConsumer {
// we need a new coordinator
refresh = true
continue join_loop
}
if err != nil || resp.Err != 0 {
// we've got heartbeat troubles of one kind or another; disconnect and reconnect
break heartbeat_loop
}
// and start the next heartbeat only after we get the response to this one
// that way when the network or the broker are slow we back off.
heartbeat_timer = time.After(cl.config.Heartbeat.Interval)
case <-commit_timer:
ocreq := &sarama.OffsetCommitRequest{
ConsumerGroup: cl.group_name,
ConsumerGroupGeneration: generation_id,
ConsumerID: member_id,
RetentionTime: int64(clconfig.Consumer.Offsets.Retention / time.Millisecond),
Version: 2, // kafka 0.9.0 version, with RetentionTime
}
if clconfig.Consumer.Offsets.Retention == 0 { // note that this and the rounding math above means that if you wanted a retention time of 0 millseconds you could set Config.Offsets.RetentionTime to something < 1 ms, like 1 nanosecond
ocreq.RetentionTime = -1 // use broker's value
}
var wg sync.WaitGroup
for _, con := range consumers {
// NOTE we must wait for each consumer to finish adding itself before sending the commit_req to the next consumer
// otherwise they race on calling ocreq.AddBlock() and will cause concurrent hashmap writes.
wg.Add(1)
con.commit_reqs <- commit_req{ocreq, &wg}
wg.Wait()
}
dbgf("sending OffsetCommitRequest %v", ocreq)
ocresp, err := coor.CommitOffset(ocreq)
dbgf("received OffsetCommitResponse %v, %v", ocresp, err)
// log any errors we got. there isn't much we can do about them
if err != nil {
cl.deliverError("committing offsets", err)
} else {
for topic, partitions := range ocresp.Errors {
for p, err := range partitions {
if err != 0 {
cl.deliverError(fmt.Sprintf("committing offset of topic %q partition %d", topic, p), err)
}
}
}
}
commit_timer = time.After(clconfig.Consumer.Offsets.CommitInterval)
case a := <-cl.add_consumer:
add(a)
// and rejoin so we can become a member of the new topic
continue join_loop
case r := <-cl.rem_consumer:
rem(r)
// and rejoin so we can be removed as member of the new topic
continue join_loop
}
} // end of heartbeat_loop
} // end of join_loop
}
// makeError wraps err into a *Error, associating it with context
func (cl *client) makeError(context string, err error) *Error {
return &Error{
cl: cl,
Err: err,
Context: context,
Topic: "",
Partition: -1,
}
}
// deliverError builds an error and delivers it to the channel returned by cl.Errors
func (cl *client) deliverError(context string, err error) {
if context != "" {
err = cl.makeError(context, err)
}
dbgf("%v", err)
cl.errors <- err
}
// consumer implements the Consumer interface
type consumer struct {
cl *client
consumer sarama.Consumer
topic string
messages chan *sarama.ConsumerMessage
closed chan struct{} // channel which is closed when the consumer is AsyncClose()ed
close_once sync.Once // Once used to make sure we close only once
assignments chan *assignment // channel over which client.run sends consumer.run each generation's partition assignments
commit_reqs chan commit_req // channel over which client.run sends consumer.run request to fill out a OffsetCommitRequest
restart_partitions chan *partition // channel through which partition.run delivers partition restart [at new offset] requests
premessages chan *sarama.ConsumerMessage // channel through which partition.run delivers messages to consumer.run
done chan *sarama.ConsumerMessage // channel through which Done() returns messages
}
// commit_req is a request for a consumer to write its part into a OffsetCommitRequest
type commit_req struct {
ocreq *sarama.OffsetCommitRequest
wg *sync.WaitGroup
}
// assignment is this client's assigned partitions
type assignment struct {
generation_id int32 // the current generation
coordinator *sarama.Broker // the current client-group coordinating broker
member_id string // the member_id assigned to us by the coordinator
assignments map[string][]int32 // map of topic -> list of partitions
}
// construct a *Error from this consumer
func (con *consumer) makeError(context string, err error) *Error {
Err := con.cl.makeError(context, err)
Err.Consumer = con
Err.Topic = con.topic
return Err
}
// construct and deliver an *Error from this consumer
func (con *consumer) deliverError(context string, partition int32, err error) {
Err := con.makeError(context, err)
Err.Partition = partition
con.cl.deliverError("", Err)
}
func (con *consumer) Messages() <-chan *sarama.ConsumerMessage { return con.messages }
// close the consumer. it can safely be called multiple times
func (con *consumer) AsyncClose() {
dbgf("AsyncClose consumer of topic %q", con.topic)
con.close_once.Do(func() { close(con.closed) })
}
// consumer goroutine coordinates consuming from multiple partitions in a topic
// NOTE WELL: this function must never do anything which would prevent it from processing message from client.run promptly.
// That means any channel I/O must include cases for con.assignments and con.commit_reqs.
func (con *consumer) run(wg *sync.WaitGroup) {
var generation_id int32 // current generation
var coor *sarama.Broker // current consumer group coordinating broker
var member_id string // our member id assigned by coor
partitions := make(map[int32]*partition) // map of partition number -> partition consumer
// shutdown the removed partitions, committing their last offset
remove := func(removed []int32) {
dbgf("consumer %q rem(%v)", con.topic, removed)
if len(removed) == 0 {
// nothing to do, and no point in sending an empty OffsetCommitRequest msg either
return
}
clconfig := con.cl.client.Config()
ocreq := &sarama.OffsetCommitRequest{
ConsumerGroup: con.cl.group_name,
ConsumerGroupGeneration: generation_id,
ConsumerID: member_id,
RetentionTime: int64(clconfig.Consumer.Offsets.Retention / time.Millisecond),
Version: 2, // kafka 0.9.0 version, with RetentionTime
}
if clconfig.Consumer.Offsets.Retention == 0 { // note that this and the rounding math above means that if you wanted a retention time of 0 millseconds you could set Config.Offsets.RetentionTime to something < 1 ms, like 1 nanosecond
ocreq.RetentionTime = -1 // use broker's value
}
for _, p := range removed {
// stop consuming from partition p
if part, ok := partitions[p]; ok {
delete(partitions, p)
part.consumer.Close()
offset := part.oldest
if len(part.buckets) != 0 {
if part.buckets[0][0] == part.buckets[0][1] {
// add to that the portion of the last block we know been completed (this is often useful when the traffic rate is low or a client shuts down cleanly, since it has probably cleanly returned all offsets we've delivered)
offset += int64(part.buckets[0][1])
} // else we don't know enough to commit any further
}
dbgf("ocreq.AddBlock(%q, %d, %d)", con.topic, p, offset)
ocreq.AddBlock(con.topic, p, offset, 0, "")
}
}
dbgf("sending OffsetCommitRequest %v", ocreq)
ocresp, err := coor.CommitOffset(ocreq)
dbgf("received OffsetCommitResponse %v, %v", ocresp, err)
// log any errors we got. there isn't much we can do about them; the next consumer will start at an older offset
if err != nil {
con.deliverError("committing offsets", -1, err)
} else {
for _, partitions := range ocresp.Errors {
for p, err := range partitions {
if err != 0 {
con.deliverError("committing offset", p, err)
}
}
}
}
}
// handle a commit request from client.run
commit_req := func(c commit_req) {
dbgf("consumer %q commit_req(%v)", con.topic, c)
for p, partition := range partitions {
offset := partition.oldest
if len(partition.buckets) != 0 {
if partition.buckets[0][0] == partition.buckets[0][1] {
// add to that the portion of the last block we know been completed (this is useful when the message rate is slow)
offset += int64(partition.buckets[0][1])
} // else we don't know enough to commit any further
}
dbgf("ocreq.AddBlock(%q, %d, %d)", con.topic, p, offset)
c.ocreq.AddBlock(con.topic, p, offset, 0, "")
}
c.wg.Done()
}
defer func() {
if len(partitions) != 0 {
// cleanup the remaining partition consumers
removed := make([]int32, 0, len(partitions))
for p := range partitions {
removed = append(removed, p)
}
remove(removed)
}
con.consumer.Close()
close(con.messages)
// send ourselves to rem_consumer
rem_loop:
for {
select {
case c := <-con.commit_reqs:
commit_req(c)
case <-con.assignments:
// ignore them, we're shutting down
case con.cl.rem_consumer <- con:
break rem_loop
}
// NOTE: <-con.done is not a case above because there is no good way to shut it down. it is never closed,
// so we'd never know when it was drained. As a consequence, it's client.Done() which aborts when the
// consumer closes, rather than us draining con.done
}
// drain any remaining requests from run.client, until
// run.client closes the channels
for c := range con.commit_reqs {
commit_req(c)
}
for range con.assignments {
// ignore them
}
dbgf("consumer of topic %q exiting", con.topic)
wg.Done()
}()
// handle a message sent to us via con.done
done := func(msg *sarama.ConsumerMessage) {
dbgf("consumer %q done(%d/%d)", con.topic, msg.Partition, msg.Offset)
part := partitions[msg.Partition]
if part == nil {
dbgf("no partition %d", msg.Partition)
return
}
delta := msg.Offset - part.oldest
if delta < 0 {
dbgf("stale message %d/%d", msg.Partition, msg.Offset)
return
}
index := int(delta) >> 6 // /64
if index >= len(part.buckets) {
dbgf("early message %d/%d", msg.Partition, msg.Offset)
return
}
part.buckets[index][1]++
if index == 0 {
// we might have finished the oldest bucket
for part.buckets[0] == [2]uint8{64, 64} {
// the oldest bucket is complete; advance the last committed offset
part.oldest += 64
part.buckets = part.buckets[1:]
if len(part.buckets) == 0 {
break
}
}
}
}
// handle an assignment message
assignment := func(a *assignment) {
dbgf("consumer %q assignment(%v)", con.topic, a)
// see what has changed in the partition assignment of our topic
new_partitions := a.assignments[con.topic]
added, removed := difference(partitions, new_partitions)
dbgf("consumer %q added %v, removed %v", con.topic, added, removed)
// shutdown the partitions while we still belong to the previous generation
remove(removed)
// update the current generation and related info after committing the last offsets from the previous generation
generation_id = a.generation_id
coor = a.coordinator
member_id = a.member_id
// the sarama-cluster code pauses here so that other consumers have time to sync their offsets. Should we do the same?
// I've observed with kafka 0.9.0.1 that once the coordinator bumps the generation_id the client can't commit an offset with
// the old id. So unless the client lies and sends generation_id+1 when it commits there is nothing it can commit, and there
// is no point in waiting. So for now, no waiting.
// fetch the last committed offsets of the new partitions
oreq := &sarama.OffsetFetchRequest{
ConsumerGroup: con.cl.group_name,
Version: 1, // kafka 0.9.0 expects version 1 offset requests
}
for _, p := range added {
oreq.AddPartition(con.topic, p)
}
dbgf("consumer %q sending OffsetFetchRequest %v", con.topic, oreq)
oresp, err := a.coordinator.FetchOffset(oreq)
dbgf("consumer %q received OffsetFetchResponse %v, %v", con.topic, oresp, err)
if err != nil {
con.deliverError("fetching offsets", -1, err)
// and we can't consume any of the new partitions without the offsets
} else if len(added) != 0 {
// start consuming from the added partitions at each partition's last committed offset (which by convention kafaka defines as the last consumed offset+1)
// since computing the starting offset and beginning to consume requires several round trips to the kafka brokers we start all the
// partitions concurrently. That reduces the startup time to a couple RTTs even for topics with a numerous partitions.
started := make(chan *partition)
var wg sync.WaitGroup
for _, p := range added {
wg.Add(1)
go func(p int32) {
defer wg.Done()
ob := oresp.GetBlock(con.topic, p)
if ob == nil {
// can't start this partition without an offset
con.deliverError("FetchOffset response", p, fmt.Errorf("partition %d missing", p))
return
}
if ob.Err != 0 {
con.deliverError("FetchOffset response", p, ob.Err)
return
}
offset := ob.Offset
if offset == sarama.OffsetNewest {
// the broker doesn't have an offset for is. Use the configured initial offset
offset = con.cl.client.Config().Consumer.Offsets.Initial
}
dbgf("consumer %q consuming partition %d at offset %d", con.topic, p, offset)
consumer, err := con.consumer.ConsumePartition(con.topic, p, offset)
if err != nil {
con.deliverError(fmt.Sprintf("sarama.ConsumePartition at offset %d", offset), p, err)
// If the error is ErrOffsetOutOfRange then give ourselves one chance to recover
if err != sarama.ErrOffsetOutOfRange {
// otherwise we can't consume this partition.
return
}
offset, err = con.cl.config.OffsetOutOfRange(con.topic, p, con.cl.client)
if err != nil {
// should we deliver them their own error? I guess so.
con.deliverError("OffsetOutOfRange callback", p, err)
return
}
consumer, err = con.consumer.ConsumePartition(con.topic, p, offset)
if err != nil {
con.deliverError(fmt.Sprintf("sarama.ConsumePartition at offset %d", offset), p, err)
// it didn't work with their offset either. give up
// (we could go into a loop and call them again, but what would that solve?)
return
}
// it worked with the new offset; carry on
}
part := &partition{
con: con,
consumer: consumer,
partition: p,
oldest: offset,
}
go part.run()
started <- part
}(p)
}
go func() {
wg.Wait()
close(started)
}()
for part := range started {
partitions[part.partition] = part
}
}
}
// restart consuming a partition at a new[er] offset
restart_partition := func(part *partition) {
// we kill the old and start a new partition consumer since there is no way to seek an existing sarama.PartitionConsumer in sarama's November 2016 API)
p := part.partition
// first remove the old partition consumer. Once it gets a ErrOffsetOutOfRange it's unable to function.
// since it had an out-of-range offset, it can't commit its offset either
if pa, ok := partitions[p]; !ok || part != pa {
// this is an unknown partition, or we've already killed it; ignore the request
return
}
delete(partitions, p)
part.consumer.Close()
// then ask what the new starting offset should be
offset, err := con.cl.config.OffsetOutOfRange(con.topic, p, con.cl.client)
if err != nil {
// should we deliver them their own error? I guess so.
con.deliverError("OffsetOutOfRange callback", p, err)
return
}
// finally make a new partition consuming starting at the given offset
consumer, err := con.consumer.ConsumePartition(con.topic, p, offset)
if err != nil {
con.deliverError(fmt.Sprintf("sarama.ConsumePartition at offset %d", offset), p, err)
return
}
part = &partition{
con: con,
consumer: consumer,
partition: p,
oldest: offset,
}
go part.run()
partitions[p] = part
}
for {
select {
case msg := <-con.premessages:
dbgf("premessage msg %d/%d", msg.Partition, msg.Offset)
// keep track of msg's offset so we can match it with Done, and deliver the msg
part := partitions[msg.Partition]
if part == nil {
// message from a stale consumer; ignore it
dbgf("no partition %d", msg.Partition)
continue
}
if part.oldest == sarama.OffsetNewest || part.oldest == sarama.OffsetOldest {
// we now know the starting offset. make as if we'd been asked to start there
part.oldest = msg.Offset
}
delta := msg.Offset - part.oldest
if delta < 0 { // || delta > max-out-of-order (TODO)
dbgf("stale message %d/%d", msg.Partition, msg.Offset)
// we can't take this message into account
continue
}
index := int(delta) >> 6 // /64
for index >= len(part.buckets) {
// add a new bucket
part.buckets = append(part.buckets, [2]uint8{0, 0})
}
part.buckets[index][0]++
// and deliver the msg (or handle any of the other messages which can arrive)
deliver_loop:
for {
select {
case con.messages <- msg:
dbgf("delivered msg %d/%d", msg.Partition, msg.Offset)
// success
break deliver_loop
case msg2 := <-con.done:
done(msg2)
case a := <-con.assignments:
assignment(a)
case c := <-con.commit_reqs:
commit_req(c)
case p := <-con.restart_partitions:
restart_partition(p)
case <-con.closed:
// the defered operations do the work
return
}
}
case msg := <-con.done:
done(msg)
case a := <-con.assignments:
assignment(a)
case c := <-con.commit_reqs:
commit_req(c)
case p := <-con.restart_partitions:
restart_partition(p)
case <-con.closed:
// the defered operations do the work
return
}
}
}
func (con *consumer) Done(msg *sarama.ConsumerMessage) {
// send it back to consumer.run to be processed synchronously
dbgf("Done(%d/%d)", msg.Partition, msg.Offset)
select {
case con.done <- msg:
// great, msg delivered
case <-con.closed:
// consumer has closed
}
}
// partition contains the data associated with us consuming one partition
type partition struct {
con *consumer
consumer sarama.PartitionConsumer
partition int32 // partition number
// buckets of # of offsets read from kafka, and the # of offsets completed by a call to Done(). the difference is the # of offsets in flight in the calling code
// we group offsets in groups of 64 and simply keep a count of how many are outstanding
// any time the two counts are equal then the offsets are committable. Otherwise we can't tell which is the not yet Done() offset and so we don't know
buckets [][2]uint8
oldest int64 // 1st offset in bucket[0]
}
// wrap a sarama.ConsumerError into an *Error
func (part *partition) makeConsumerError(cerr *sarama.ConsumerError) *Error {
Err := part.con.makeError("consuming from sarama", cerr.Err)
Err.Topic = cerr.Topic
Err.Partition = cerr.Partition
return Err
}
// run consumes from the partition and delivers it to the consumer
func (part *partition) run() {
con := part.con
defer dbgf("partition consumer of %q partition %d exiting", con.topic, part.partition)
msgs := part.consumer.Messages()
errors := part.consumer.Errors()
for {
select {
case msg, ok := <-msgs:
if ok {
dbgf("got msg %d/%d", msg.Partition, msg.Offset)
select {
case con.premessages <- msg:
case <-con.closed:
return
}
} else {
dbgf("draining topic %q partition %d errors", con.topic, part.partition)
// deliver any remaining errors, and exit
for sarama_err := range errors {
con.cl.deliverError("", part.makeConsumerError(sarama_err))
}
return
}
case sarama_err, ok := <-errors:
if ok {
// pick out ErrOffsetOutOfRange errors. These happen if the consumer offset falls off the tail of the kafka log.
// this easily happens in two cases: when the consumer is too slow, or when the consumer has been stopped for too long.
// This error cannot be fixed without seeking to a valid offset. However we can't assume that OffsetNewest is the right
// choice, nor OffsetOldest, nor "5 minutes ago" or anything else. It's up to the user to decide.
if sarama_err.Err == sarama.ErrOffsetOutOfRange {
dbgf("ErrOffsetOutOfRange topic %q partition %d", con.topic, part.partition)
select {
case con.restart_partitions <- part:
case <-con.closed:
return
}
// should we keep reading from the partition? it's unlikely to produce much
}
// and always deliver the error
con.cl.deliverError("", part.makeConsumerError(sarama_err))
} else {
// finish off any remaining messages, and exit
dbgf("draining topic %q partition %d msgs", con.topic, part.partition)
for msg := range msgs {
select {
case con.premessages <- msg:
case <-con.closed:
return
}
}
return
}
}
}
}
// difference returns the differences (additions and subtractions) between two slices of int32.
// typically the slices contain partition numbers.
func difference(old map[int32]*partition, next []int32) (added, removed []int32) {
o := make(int32Slice, 0, len(old))
for p := range old {
o = append(o, p)
}
n := make(int32Slice, len(next))
copy(n, next)
sort.Sort(o)
sort.Sort(n)
i, j := 0, 0
for i < len(o) && j < len(n) {
if o[i] < n[j] {
removed = append(removed, o[i])
i++
} else if o[i] > n[j] {
added = append(added, n[j])
j++
} else {
i++
j++
}
}
removed = append(removed, o[i:]...)
added = append(added, n[j:]...)
return
}
// a sortable []int32
type int32Slice []int32
func (p int32Slice) Len() int { return len(p) }
func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// a simple partitioner that assigns partitions round-robin across all consumers requesting each topic
type roundRobinPartitioner string
// global instance of the round-robin partitioner
const RoundRobin roundRobinPartitioner = "roundrobin" // use the string "roundrobin" without a dash to match what kafka java code uses, should someone want to mix go and java consumers in the same group
func (rr roundRobinPartitioner) PrepareJoin(jreq *sarama.JoinGroupRequest, topics []string) {
jreq.AddGroupProtocolMetadata(string(rr),
&sarama.ConsumerGroupMemberMetadata{
Version: 1,
Topics: topics,
})
}
// for each topic in jresp, assign the topic's partitions round-robin across the members requesting each topic
func (roundRobinPartitioner) Partition(sreq *sarama.SyncGroupRequest, jresp *sarama.JoinGroupResponse, client sarama.Client) error {
by_member, err := jresp.GetMembers()
dbgf("by_member %v", by_member)
if err != nil {
return err
}
// invert the data, so we have the requests grouped by topic (they arrived grouped by member, since the kafka broker treats the data from each consumer as an opaque blob, so it couldn't do this step for us)
by_topic := make(map[string][]string) // map of topic to members requesting the topic
for member, request := range by_member {
if request.Version != 1 {
// skip unsupported versions. we'll only assign to clients we can understand. Since we are such a client
// we won't block all consumers (at least for those topics we consume). If this ends up a bad idea, we
// can always change this code to return an error.
continue
}
for _, topic := range request.Topics {
by_topic[topic] = append(by_topic[topic], member)
}
}
dbgf("by_topic %v", by_topic)
// finally, build our assignments of partitions to members
assignments := make(map[string]map[string][]int32) // map of member to topics, and topic to partitions
for topic, members := range by_topic {
partitions, err := client.Partitions(topic)
dbgf("Partitions(%q) = %v", topic, partitions)
if err != nil {
// what to do? we could maybe skip the topic, assigning it to no-one. But I/O errors are likely to happen again.
// so let's stop partitioning and return the error.
return err
}
n := len(partitions)
if n == 0 { // can this happen? best not to /0 later if it can
// no one gets anything assigned. it is as if this topic didn't exist
continue
}
for i := 0; i < n; {
for _, member_id := range members {
topics, ok := assignments[member_id]
if !ok {
topics = make(map[string][]int32)
assignments[member_id] = topics
}
topics[topic] = append(topics[topic], partitions[i])
i++
if i == n {
break
}
}
}
}
dbgf("assignments %v", assignments)
// and encode the assignments in the sync request
for member_id, topics := range assignments {
sreq.AddGroupAssignmentMember(member_id,
&sarama.ConsumerGroupMemberAssignment{
Version: 1,
Topics: topics,
})
}
return nil
}
func (roundRobinPartitioner) ParseSync(sresp *sarama.SyncGroupResponse) (map[string][]int32, error) {
if len(sresp.MemberAssignment) == 0 {
// in the corner case that we ask for no topics, we get nothing back. However sarama fd498173ae2bf (head of master branch Nov 6th 2016) will return a useless error if we call sresp.GetMemberAssignment() in this case
return nil, nil
}
ma, err := sresp.GetMemberAssignment()
dbgf("MemberAssignment %v", ma)
if err != nil {
return nil, err
}
if ma.Version != 1 {
return nil, fmt.Errorf("unsupported MemberAssignment version %d", ma.Version)
}
return ma.Topics, nil
}
|
// (c) 2019-2021, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package message
import (
"errors"
"github.com/ava-labs/avalanchego/ids"
)
const (
// TODO choose a sensible value
// MaxEthTxsLen must be updated inside of EthTxsNotify's struct definition
// as well when changed
MaxEthTxsLen int = 10
)
var (
_ Message = &TxNotify{}
_ Message = &Tx{}
errUnexpectedCodecVersion = errors.New("unexpected codec version")
)
type Message interface {
// Handle this message with the correct message handler
Handle(handler Handler, nodeID ids.ShortID, requestID uint32) error
// initialize should be called whenever a message is built or parsed
initialize([]byte)
// Bytes returns the binary representation of this message
//
// Bytes should only be called after being initialized
Bytes() []byte
}
type message []byte
func (m *message) initialize(bytes []byte) { *m = bytes }
func (m *message) Bytes() []byte { return *m }
type TxNotify struct {
message
TxID ids.ID `serialize:"true"`
}
func (msg *TxNotify) Handle(handler Handler, nodeID ids.ShortID, requestID uint32) error {
return handler.HandleTxNotify(nodeID, requestID, msg)
}
type Tx struct {
message
Tx []byte `serialize:"true"`
}
func (msg *Tx) Handle(handler Handler, nodeID ids.ShortID, requestID uint32) error {
return handler.HandleTx(nodeID, requestID, msg)
}
func Parse(bytes []byte) (Message, error) {
var msg Message
version, err := c.Unmarshal(bytes, &msg)
if err != nil {
return nil, err
}
if version != codecVersion {
return nil, errUnexpectedCodecVersion
}
msg.initialize(bytes)
return msg, nil
}
func Build(msg Message) ([]byte, error) {
bytes, err := c.Marshal(codecVersion, &msg)
msg.initialize(bytes)
return bytes, err
}
removed unused constant
// (c) 2019-2021, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package message
import (
"errors"
"github.com/ava-labs/avalanchego/ids"
)
var (
_ Message = &TxNotify{}
_ Message = &Tx{}
errUnexpectedCodecVersion = errors.New("unexpected codec version")
)
type Message interface {
// Handle this message with the correct message handler
Handle(handler Handler, nodeID ids.ShortID, requestID uint32) error
// initialize should be called whenever a message is built or parsed
initialize([]byte)
// Bytes returns the binary representation of this message
//
// Bytes should only be called after being initialized
Bytes() []byte
}
type message []byte
func (m *message) initialize(bytes []byte) { *m = bytes }
func (m *message) Bytes() []byte { return *m }
type TxNotify struct {
message
TxID ids.ID `serialize:"true"`
}
func (msg *TxNotify) Handle(handler Handler, nodeID ids.ShortID, requestID uint32) error {
return handler.HandleTxNotify(nodeID, requestID, msg)
}
type Tx struct {
message
Tx []byte `serialize:"true"`
}
func (msg *Tx) Handle(handler Handler, nodeID ids.ShortID, requestID uint32) error {
return handler.HandleTx(nodeID, requestID, msg)
}
func Parse(bytes []byte) (Message, error) {
var msg Message
version, err := c.Unmarshal(bytes, &msg)
if err != nil {
return nil, err
}
if version != codecVersion {
return nil, errUnexpectedCodecVersion
}
msg.initialize(bytes)
return msg, nil
}
func Build(msg Message) ([]byte, error) {
bytes, err := c.Marshal(codecVersion, &msg)
msg.initialize(bytes)
return bytes, err
}
|
package consensus
import (
"container/heap"
"fmt"
"github.com/jddixon/xlattice_go/rnglib"
"io/ioutil"
. "launchpad.net/gocheck"
"os"
"testing"
"time"
)
// gocheck tie-in /////////////////////
func Test(t *testing.T) { TestingT(t) }
type XLSuite struct{}
var _ = Suite(&XLSuite{})
// end gocheck setup //////////////////
const (
TEST_PAIR_COUNT = 7
)
func (s *XLSuite) makeSimpleRNG() *rnglib.PRNG {
t := time.Now().Unix()
rng := rnglib.NewSimpleRNG(t)
return rng
}
func (s *XLSuite) TestCmdQ(c *C) {
q := pairQ{}
heap.Init(&q)
c.Assert(q.Len(), Equals, 0)
pair0 := NumberedCmd{Seqn: 42, Cmd: "foo"}
pair1 := NumberedCmd{Seqn: 1, Cmd: "bar"}
pair2 := NumberedCmd{Seqn: 99, Cmd: "baz"}
pp0 := cmdPlus{pair: &pair0}
pp1 := cmdPlus{pair: &pair1}
pp2 := cmdPlus{pair: &pair2}
heap.Push(&q, &pp0)
heap.Push(&q, &pp1)
heap.Push(&q, &pp2)
c.Assert(q.Len(), Equals, 3)
out := heap.Pop(&q).(*cmdPlus)
c.Assert(out.pair.Seqn, Equals, int64(1))
c.Assert(out.pair.Cmd, Equals, "bar")
out = heap.Pop(&q).(*cmdPlus)
c.Assert(out.pair.Seqn, Equals, int64(42))
c.Assert(out.pair.Cmd, Equals, "foo")
out = heap.Pop(&q).(*cmdPlus)
c.Assert(out.pair.Seqn, Equals, int64(99))
c.Assert(out.pair.Cmd, Equals, "baz")
c.Assert(q.Len(), Equals, 0)
// XXX THIS PANICS - so if popping from a heap, always check
// the length first.
//zzz := heap.Pop(&q)
// c.Assert(zzz, Equals, nil)
}
func (s *XLSuite) doTestCmdBufferI(c *C, p CmdBufferI, logging bool) {
var cmdMap = map[int64]string{
1: "foo",
2: "bar",
3: "baz",
4: "it's me!",
5: "my chance will come soon",
6: "it's my turn now",
7: "wait for me",
}
c.Assert(len(cmdMap), Equals, TEST_PAIR_COUNT)
// we send the messages somewhat out of order, with some duplicates
order := [...]int{1, 2, 3, 6, 3, 2, 6, 5, 4, 1, 7}
var out = make(chan NumberedCmd, len(order)+1) // must exceed len(order)
var stopCh = make(chan bool, 1)
var logFile string
if logging {
logFile = "tmp/logFile"
}
p.Init(out, stopCh, 0, 4, logFile, 0, false) // 4 is chan bufSize
if logging {
_, err := os.Stat(logFile)
c.Assert(err, Equals, nil)
}
c.Assert(p.Running(), Equals, false)
fmt.Println(" starting p loop ...")
// XXX Run() can return an error, which must be nil
go p.Run()
for !p.Running() {
time.Sleep(time.Millisecond)
}
c.Assert(p.Running(), Equals, true)
for n := 0; n < len(order); n++ {
which := order[n]
cmd := cmdMap[int64(which)]
pair := NumberedCmd{Seqn: int64(which), Cmd: cmd}
// DEBUG
// fmt.Printf("sending %d : %s\n", order[n], cmd)
// END
p.InCh() <- pair
}
var results [7]NumberedCmd
for n := 0; n < 7; n++ {
results[n] = <-out
c.Assert(results[n].Seqn, Equals, int64(n+1))
}
c.Assert(p.Running(), Equals, true)
stopCh <- true
time.Sleep(time.Microsecond)
c.Assert(p.Running(), Equals, false)
if logging {
var expected string
for i := 1; i <= TEST_PAIR_COUNT; i++ {
n := int64(i)
cmd := cmdMap[n]
line := fmt.Sprintf("%d %s\n", n, cmd)
expected += line
}
raw, err := ioutil.ReadFile(logFile)
c.Assert(nil, Equals, err)
actual := string(raw)
c.Assert(actual, Equals, expected)
}
} // GEEP
func (s *XLSuite) TestCmdBuffer(c *C) {
var buf CmdBuffer
fmt.Println("running test without logging")
s.doTestCmdBufferI(c, &buf, false)
fmt.Println("running test -with- logging")
s.doTestCmdBufferI(c, &buf, true)
}
func (s *XLSuite) TestLogBufferOverflow(c *C) {
var buf CmdBuffer
p := &buf
var bufSize = LOG_BUFFER_SIZE
N := int(1.25 * float64(bufSize) / float64(5+96))
fmt.Printf("TEST_LOG_BUFFER_OVERFLOW WITH %d RECORDS\n", N)
rng := s.makeSimpleRNG()
var out = make(chan NumberedCmd, 4) //
var stopCh = make(chan bool, 1)
logFile := "tmp/overflows.log"
p.Init(out, stopCh, 0, 4, logFile, 0, false) // 4 is bufSize
c.Assert(p.Running(), Equals, false)
fmt.Println(" starting p loop for overflow test ...")
// XXX Run() can return an error, which must be nil
go p.Run()
for !p.Running() {
time.Sleep(time.Millisecond)
}
c.Assert(p.Running(), Equals, true)
// Run() will have created the log file
_, err := os.Stat(logFile)
c.Assert(err, Equals, nil)
cmds := make([]*string, N)
results := make([]*NumberedCmd, N)
var expectedInFile string
for n := 0; n < N; n++ {
seqN := int64(n + 1)
cmdLen := 64 + rng.Intn(64)
raw := make([]byte, cmdLen)
rng.NextBytes(&raw)
cmd := string(raw)
pair := NumberedCmd{Seqn: seqN, Cmd: cmd}
p.InCh() <- pair
txt := fmt.Sprintf("%d %s\n", n+1, cmd)
cmds[n] = &txt
expectedInFile += txt
nextResult := <-out
results[n] = &nextResult
// expected == actual
c.Assert(int64(n+1), Equals, nextResult.Seqn)
c.Assert(cmd, Equals, nextResult.Cmd)
// fmt.Printf("%d sent and received\n", n)
}
c.Assert(p.Running(), Equals, true)
stopCh <- true
time.Sleep(time.Microsecond)
c.Assert(p.Running(), Equals, false)
// compare actual data in log file with expected
raw, err := ioutil.ReadFile(logFile)
c.Assert(nil, Equals, err)
actual := string(raw)
c.Assert(expectedInFile, Equals, actual)
}
corrected small error in cmdBuffer_test
package consensus
import (
"container/heap"
"fmt"
"github.com/jddixon/xlattice_go/rnglib"
"io/ioutil"
. "launchpad.net/gocheck"
"os"
"testing"
"time"
)
// gocheck tie-in /////////////////////
func Test(t *testing.T) { TestingT(t) }
type XLSuite struct{}
var _ = Suite(&XLSuite{})
// end gocheck setup //////////////////
const (
TEST_PAIR_COUNT = 7
)
func (s *XLSuite) makeSimpleRNG() *rnglib.PRNG {
t := time.Now().Unix()
rng := rnglib.NewSimpleRNG(t)
return rng
}
func (s *XLSuite) TestCmdQ(c *C) {
q := pairQ{}
heap.Init(&q)
c.Assert(q.Len(), Equals, 0)
pair0 := NumberedCmd{Seqn: 42, Cmd: "foo"}
pair1 := NumberedCmd{Seqn: 1, Cmd: "bar"}
pair2 := NumberedCmd{Seqn: 99, Cmd: "baz"}
pp0 := cmdPlus{pair: &pair0}
pp1 := cmdPlus{pair: &pair1}
pp2 := cmdPlus{pair: &pair2}
heap.Push(&q, &pp0)
heap.Push(&q, &pp1)
heap.Push(&q, &pp2)
c.Assert(q.Len(), Equals, 3)
out := heap.Pop(&q).(*cmdPlus)
c.Assert(out.pair.Seqn, Equals, int64(1))
c.Assert(out.pair.Cmd, Equals, "bar")
out = heap.Pop(&q).(*cmdPlus)
c.Assert(out.pair.Seqn, Equals, int64(42))
c.Assert(out.pair.Cmd, Equals, "foo")
out = heap.Pop(&q).(*cmdPlus)
c.Assert(out.pair.Seqn, Equals, int64(99))
c.Assert(out.pair.Cmd, Equals, "baz")
c.Assert(q.Len(), Equals, 0)
// XXX THIS PANICS - so if popping from a heap, always check
// the length first.
//zzz := heap.Pop(&q)
// c.Assert(zzz, Equals, nil)
}
func (s *XLSuite) doTestCmdBufferI(c *C, p CmdBufferI, logging bool) {
var cmdMap = map[int64]string{
1: "foo",
2: "bar",
3: "baz",
4: "it's me!",
5: "my chance will come soon",
6: "it's my turn now",
7: "wait for me",
}
c.Assert(len(cmdMap), Equals, TEST_PAIR_COUNT)
// we send the messages somewhat out of order, with some duplicates
order := [...]int{1, 2, 3, 6, 3, 2, 6, 5, 4, 1, 7}
var out = make(chan NumberedCmd, len(order)+1) // must exceed len(order)
var stopCh = make(chan bool, 1)
var logFile string
if logging {
logFile = "tmp/logFile"
}
p.Init(out, stopCh, 0, 4, logFile, 0, false) // 4 is chan bufSize
c.Assert(p.Running(), Equals, false)
fmt.Println(" starting p loop ...")
// XXX Run() can return an error, which must be nil
go p.Run()
for !p.Running() {
time.Sleep(time.Millisecond)
}
c.Assert(p.Running(), Equals, true)
if logging {
_, err := os.Stat(logFile) // created by Run()
c.Assert(err, Equals, nil)
}
for n := 0; n < len(order); n++ {
which := order[n]
cmd := cmdMap[int64(which)]
pair := NumberedCmd{Seqn: int64(which), Cmd: cmd}
// DEBUG
// fmt.Printf("sending %d : %s\n", order[n], cmd)
// END
p.InCh() <- pair
}
var results [7]NumberedCmd
for n := 0; n < 7; n++ {
results[n] = <-out
c.Assert(results[n].Seqn, Equals, int64(n+1))
}
c.Assert(p.Running(), Equals, true)
stopCh <- true
time.Sleep(time.Microsecond)
c.Assert(p.Running(), Equals, false)
if logging {
var expected string
for i := 1; i <= TEST_PAIR_COUNT; i++ {
n := int64(i)
cmd := cmdMap[n]
line := fmt.Sprintf("%d %s\n", n, cmd)
expected += line
}
raw, err := ioutil.ReadFile(logFile)
c.Assert(nil, Equals, err)
actual := string(raw)
c.Assert(actual, Equals, expected)
}
} // GEEP
func (s *XLSuite) TestCmdBuffer(c *C) {
var buf CmdBuffer
fmt.Println("running test without logging")
s.doTestCmdBufferI(c, &buf, false)
fmt.Println("running test -with- logging")
s.doTestCmdBufferI(c, &buf, true)
}
func (s *XLSuite) TestLogBufferOverflow(c *C) {
var buf CmdBuffer
p := &buf
var bufSize = LOG_BUFFER_SIZE
N := int(1.25 * float64(bufSize) / float64(5+96))
fmt.Printf("TEST_LOG_BUFFER_OVERFLOW WITH %d RECORDS\n", N)
rng := s.makeSimpleRNG()
var out = make(chan NumberedCmd, 4) //
var stopCh = make(chan bool, 1)
logFile := "tmp/overflows.log"
p.Init(out, stopCh, 0, 4, logFile, 0, false) // 4 is bufSize
c.Assert(p.Running(), Equals, false)
fmt.Println(" starting p loop for overflow test ...")
// XXX Run() can return an error, which must be nil
go p.Run()
for !p.Running() {
time.Sleep(time.Millisecond)
}
c.Assert(p.Running(), Equals, true)
// Run() will have created the log file
_, err := os.Stat(logFile)
c.Assert(err, Equals, nil)
cmds := make([]*string, N)
results := make([]*NumberedCmd, N)
var expectedInFile string
for n := 0; n < N; n++ {
seqN := int64(n + 1)
cmdLen := 64 + rng.Intn(64)
raw := make([]byte, cmdLen)
rng.NextBytes(&raw)
cmd := string(raw)
pair := NumberedCmd{Seqn: seqN, Cmd: cmd}
p.InCh() <- pair
txt := fmt.Sprintf("%d %s\n", n+1, cmd)
cmds[n] = &txt
expectedInFile += txt
nextResult := <-out
results[n] = &nextResult
// expected == actual
c.Assert(int64(n+1), Equals, nextResult.Seqn)
c.Assert(cmd, Equals, nextResult.Cmd)
// fmt.Printf("%d sent and received\n", n)
}
c.Assert(p.Running(), Equals, true)
stopCh <- true
time.Sleep(time.Microsecond)
c.Assert(p.Running(), Equals, false)
// compare actual data in log file with expected
raw, err := ioutil.ReadFile(logFile)
c.Assert(nil, Equals, err)
actual := string(raw)
c.Assert(expectedInFile, Equals, actual)
}
|
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package local contains the local time series storage used by Prometheus.
package local
import (
"container/list"
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
const (
persistQueueCap = 1024
evictRequestsCap = 1024
chunkLen = 1024
// See waitForNextFP.
fpMaxWaitDuration = 10 * time.Second
fpMinWaitDuration = 5 * time.Millisecond // ~ hard disk seek time.
fpMaxSweepTime = 6 * time.Hour
maxEvictInterval = time.Minute
headChunkTimeout = time.Hour // Close head chunk if not touched for that long.
)
type storageState uint
const (
storageStarting storageState = iota
storageServing
storageStopping
)
type persistRequest struct {
fingerprint clientmodel.Fingerprint
chunkDesc *chunkDesc
}
type evictRequest struct {
cd *chunkDesc
evict bool
}
type memorySeriesStorage struct {
fpLocker *fingerprintLocker
fpToSeries *seriesMap
loopStopping, loopStopped chan struct{}
maxMemoryChunks int
purgeAfter time.Duration
checkpointInterval time.Duration
persistQueue chan persistRequest
persistStopped chan struct{}
persistence *persistence
evictList *list.List
evictRequests chan evictRequest
evictStopping, evictStopped chan struct{}
persistLatency prometheus.Summary
persistErrors *prometheus.CounterVec
persistQueueLength prometheus.Gauge
numSeries prometheus.Gauge
seriesOps *prometheus.CounterVec
ingestedSamplesCount prometheus.Counter
invalidPreloadRequestsCount prometheus.Counter
purgeDuration prometheus.Gauge
}
// MemorySeriesStorageOptions contains options needed by
// NewMemorySeriesStorage. It is not safe to leave any of those at their zero
// values.
type MemorySeriesStorageOptions struct {
MemoryChunks int // How many chunks to keep in memory.
PersistenceStoragePath string // Location of persistence files.
PersistenceRetentionPeriod time.Duration // Chunks at least that old are purged.
CheckpointInterval time.Duration // How often to checkpoint the series map and head chunks.
Dirty bool // Force the storage to consider itself dirty on startup.
}
// NewMemorySeriesStorage returns a newly allocated Storage. Storage.Serve still
// has to be called to start the storage.
func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (Storage, error) {
p, err := newPersistence(o.PersistenceStoragePath, chunkLen, o.Dirty)
if err != nil {
return nil, err
}
glog.Info("Loading series map and head chunks...")
fpToSeries, err := p.loadSeriesMapAndHeads()
if err != nil {
return nil, err
}
glog.Infof("%d series loaded.", fpToSeries.length())
numSeries := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "memory_series",
Help: "The current number of series in memory.",
})
numSeries.Set(float64(fpToSeries.length()))
return &memorySeriesStorage{
fpLocker: newFingerprintLocker(256),
fpToSeries: fpToSeries,
loopStopping: make(chan struct{}),
loopStopped: make(chan struct{}),
maxMemoryChunks: o.MemoryChunks,
purgeAfter: o.PersistenceRetentionPeriod,
checkpointInterval: o.CheckpointInterval,
persistQueue: make(chan persistRequest, persistQueueCap),
persistStopped: make(chan struct{}),
persistence: p,
evictList: list.New(),
evictRequests: make(chan evictRequest, evictRequestsCap),
evictStopping: make(chan struct{}),
evictStopped: make(chan struct{}),
persistLatency: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_latency_microseconds",
Help: "A summary of latencies for persisting each chunk.",
}),
persistErrors: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_errors_total",
Help: "A counter of errors persisting chunks.",
},
[]string{"error"},
),
persistQueueLength: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_queue_length",
Help: "The current number of chunks waiting in the persist queue.",
}),
numSeries: numSeries,
seriesOps: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "series_ops_total",
Help: "The total number of series operations by their type.",
},
[]string{opTypeLabel},
),
ingestedSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "ingested_samples_total",
Help: "The total number of samples ingested.",
}),
invalidPreloadRequestsCount: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "invalid_preload_requests_total",
Help: "The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.",
}),
}, nil
}
// Start implements Storage.
func (s *memorySeriesStorage) Start() {
go s.handleEvictList()
go s.handlePersistQueue()
go s.loop()
}
// Stop implements Storage.
func (s *memorySeriesStorage) Stop() error {
glog.Info("Stopping local storage...")
glog.Info("Stopping maintenance loop...")
close(s.loopStopping)
<-s.loopStopped
glog.Info("Stopping persist queue...")
close(s.persistQueue)
<-s.persistStopped
glog.Info("Stopping chunk eviction...")
close(s.evictStopping)
<-s.evictStopped
// One final checkpoint of the series map and the head chunks.
if err := s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker); err != nil {
return err
}
if err := s.persistence.close(); err != nil {
return err
}
glog.Info("Local storage stopped.")
return nil
}
// WaitForIndexing implements Storage.
func (s *memorySeriesStorage) WaitForIndexing() {
s.persistence.waitForIndexing()
}
// NewIterator implements storage.
func (s *memorySeriesStorage) NewIterator(fp clientmodel.Fingerprint) SeriesIterator {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if !ok {
// Oops, no series for fp found. That happens if, after
// preloading is done, the whole series is identified as old
// enough for purging and hence purged for good. As there is no
// data left to iterate over, return an iterator that will never
// return any values.
return nopSeriesIterator{}
}
return series.newIterator(
func() { s.fpLocker.Lock(fp) },
func() { s.fpLocker.Unlock(fp) },
)
}
// NewPreloader implements Storage.
func (s *memorySeriesStorage) NewPreloader() Preloader {
return &memorySeriesPreloader{
storage: s,
}
}
// GetFingerprintsForLabelMatchers implements Storage.
func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metric.LabelMatchers) clientmodel.Fingerprints {
var result map[clientmodel.Fingerprint]struct{}
for _, matcher := range labelMatchers {
intersection := map[clientmodel.Fingerprint]struct{}{}
switch matcher.Type {
case metric.Equal:
fps, err := s.persistence.getFingerprintsForLabelPair(
metric.LabelPair{
Name: matcher.Name,
Value: matcher.Value,
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
}
if len(fps) == 0 {
return nil
}
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
intersection[fp] = struct{}{}
}
}
default:
values, err := s.persistence.getLabelValuesForLabelName(matcher.Name)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", matcher.Name, err)
}
matches := matcher.Filter(values)
if len(matches) == 0 {
return nil
}
for _, v := range matches {
fps, err := s.persistence.getFingerprintsForLabelPair(
metric.LabelPair{
Name: matcher.Name,
Value: v,
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
}
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
intersection[fp] = struct{}{}
}
}
}
}
if len(intersection) == 0 {
return nil
}
result = intersection
}
fps := make(clientmodel.Fingerprints, 0, len(result))
for fp := range result {
fps = append(fps, fp)
}
return fps
}
// GetLabelValuesForLabelName implements Storage.
func (s *memorySeriesStorage) GetLabelValuesForLabelName(labelName clientmodel.LabelName) clientmodel.LabelValues {
lvs, err := s.persistence.getLabelValuesForLabelName(labelName)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", labelName, err)
}
return lvs
}
// GetMetricForFingerprint implements Storage.
func (s *memorySeriesStorage) GetMetricForFingerprint(fp clientmodel.Fingerprint) clientmodel.COWMetric {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if ok {
// Wrap the returned metric in a copy-on-write (COW) metric here because
// the caller might mutate it.
return clientmodel.COWMetric{
Metric: series.metric,
}
}
metric, err := s.persistence.getArchivedMetric(fp)
if err != nil {
glog.Errorf("Error retrieving archived metric for fingerprint %v: %v", fp, err)
}
return clientmodel.COWMetric{
Metric: metric,
}
}
// AppendSamples implements Storage.
func (s *memorySeriesStorage) AppendSamples(samples clientmodel.Samples) {
for _, sample := range samples {
s.appendSample(sample)
}
s.ingestedSamplesCount.Add(float64(len(samples)))
}
func (s *memorySeriesStorage) appendSample(sample *clientmodel.Sample) {
fp := sample.Metric.Fingerprint()
s.fpLocker.Lock(fp)
series := s.getOrCreateSeries(fp, sample.Metric)
chunkDescsToPersist := series.add(fp, &metric.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
// Queue only outside of the locked area, processing the persistQueue
// requires the same lock!
for _, cd := range chunkDescsToPersist {
s.persistQueue <- persistRequest{fp, cd}
}
}
func (s *memorySeriesStorage) getOrCreateSeries(fp clientmodel.Fingerprint, m clientmodel.Metric) *memorySeries {
series, ok := s.fpToSeries.get(fp)
if !ok {
unarchived, firstTime, err := s.persistence.unarchiveMetric(fp)
if err != nil {
glog.Errorf("Error unarchiving fingerprint %v: %v", fp, err)
}
if unarchived {
s.seriesOps.WithLabelValues(unarchive).Inc()
} else {
// This was a genuinely new series, so index the metric.
s.persistence.indexMetric(fp, m)
s.seriesOps.WithLabelValues(create).Inc()
}
series = newMemorySeries(m, !unarchived, firstTime)
s.fpToSeries.put(fp, series)
s.numSeries.Inc()
}
return series
}
func (s *memorySeriesStorage) preloadChunksForRange(
fp clientmodel.Fingerprint,
from clientmodel.Timestamp, through clientmodel.Timestamp,
stalenessDelta time.Duration,
) ([]*chunkDesc, error) {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if !ok {
has, first, last, err := s.persistence.hasArchivedMetric(fp)
if err != nil {
return nil, err
}
if !has {
s.invalidPreloadRequestsCount.Inc()
return nil, nil
}
if from.Add(-stalenessDelta).Before(last) && through.Add(stalenessDelta).After(first) {
metric, err := s.persistence.getArchivedMetric(fp)
if err != nil {
return nil, err
}
series = s.getOrCreateSeries(fp, metric)
} else {
return nil, nil
}
}
return series.preloadChunksForRange(from, through, fp, s)
}
func (s *memorySeriesStorage) handleEvictList() {
ticker := time.NewTicker(maxEvictInterval)
count := 0
loop:
for {
// To batch up evictions a bit, this tries evictions at least
// once per evict interval, but earlier if the number of evict
// requests with evict==true that have happened since the last
// evict run is more than maxMemoryChunks/1000.
select {
case req := <-s.evictRequests:
if req.evict {
req.cd.evictListElement = s.evictList.PushBack(req.cd)
count++
if count > s.maxMemoryChunks/1000 {
s.maybeEvict()
count = 0
}
} else {
if req.cd.evictListElement != nil {
s.evictList.Remove(req.cd.evictListElement)
req.cd.evictListElement = nil
}
}
case <-ticker.C:
if s.evictList.Len() > 0 {
s.maybeEvict()
}
case <-s.evictStopping:
break loop
}
}
ticker.Stop()
glog.Info("Chunk eviction stopped.")
close(s.evictStopped)
}
// maybeEvict is a local helper method. Must only be called by handleEvictList.
func (s *memorySeriesStorage) maybeEvict() {
numChunksToEvict := int(atomic.LoadInt64(&numMemChunks)) - s.maxMemoryChunks
if numChunksToEvict <= 0 {
return
}
chunkDescsToEvict := make([]*chunkDesc, numChunksToEvict)
for i := range chunkDescsToEvict {
e := s.evictList.Front()
if e == nil {
break
}
cd := e.Value.(*chunkDesc)
cd.evictListElement = nil
chunkDescsToEvict[i] = cd
s.evictList.Remove(e)
}
// Do the actual eviction in a goroutine as we might otherwise deadlock,
// in the following way: A chunk was unpinned completely and therefore
// scheduled for eviction. At the time we actually try to evict it,
// another goroutine is pinning the chunk. The pinning goroutine has
// currently locked the chunk and tries to send the evict request (to
// remove the chunk from the evict list) to the evictRequests
// channel. The send blocks because evictRequests is full. However, the
// goroutine that is supposed to empty the channel is waiting for the
// chunkDesc lock to try to evict the chunk.
go func() {
for _, cd := range chunkDescsToEvict {
if cd == nil {
break
}
cd.maybeEvict()
// We don't care if the eviction succeeds. If the chunk
// was pinned in the meantime, it will be added to the
// evict list once it gets unpinned again.
}
}()
}
func (s *memorySeriesStorage) handlePersistQueue() {
for req := range s.persistQueue {
s.persistQueueLength.Set(float64(len(s.persistQueue)))
start := time.Now()
s.fpLocker.Lock(req.fingerprint)
offset, err := s.persistence.persistChunk(req.fingerprint, req.chunkDesc.chunk)
if series, seriesInMemory := s.fpToSeries.get(req.fingerprint); err == nil && seriesInMemory && series.chunkDescsOffset == -1 {
// This is the first chunk persisted for a newly created
// series that had prior chunks on disk. Finally, we can
// set the chunkDescsOffset.
series.chunkDescsOffset = offset
}
s.fpLocker.Unlock(req.fingerprint)
s.persistLatency.Observe(float64(time.Since(start)) / float64(time.Microsecond))
if err != nil {
s.persistErrors.WithLabelValues(err.Error()).Inc()
glog.Error("Error persisting chunk: ", err)
s.persistence.setDirty(true)
continue
}
req.chunkDesc.unpin(s.evictRequests)
chunkOps.WithLabelValues(persistAndUnpin).Inc()
}
glog.Info("Persist queue drained and stopped.")
close(s.persistStopped)
}
// waitForNextFP waits an estimated duration, after which we want to process
// another fingerprint so that we will process all fingerprints in a tenth of
// s.purgeAfter assuming that the system is doing nothing else, e.g. if we want
// to purge after 40h, we want to cycle through all fingerprints within
// 4h. However, the maximum sweep time is capped at fpMaxSweepTime. Furthermore,
// this method will always wait for at least fpMinWaitDuration and never longer
// than fpMaxWaitDuration. If s.loopStopped is closed, it will return false
// immediately. The estimation is based on the total number of fingerprints as
// passed in.
func (s *memorySeriesStorage) waitForNextFP(numberOfFPs int) bool {
d := fpMaxWaitDuration
if numberOfFPs != 0 {
sweepTime := s.purgeAfter / 10
if sweepTime > fpMaxSweepTime {
sweepTime = fpMaxSweepTime
}
d = sweepTime / time.Duration(numberOfFPs)
if d < fpMinWaitDuration {
d = fpMinWaitDuration
}
if d > fpMaxWaitDuration {
d = fpMaxWaitDuration
}
}
t := time.NewTimer(d)
select {
case <-t.C:
return true
case <-s.loopStopping:
return false
}
}
// cycleThroughMemoryFingerprints returns a channel that emits fingerprints for
// series in memory in a throttled fashion. It continues to cycle through all
// fingerprints in memory until s.loopStopping is closed.
func (s *memorySeriesStorage) cycleThroughMemoryFingerprints() chan clientmodel.Fingerprint {
memoryFingerprints := make(chan clientmodel.Fingerprint)
go func() {
var fpIter <-chan clientmodel.Fingerprint
defer func() {
if fpIter != nil {
for range fpIter {
// Consume the iterator.
}
}
close(memoryFingerprints)
}()
for {
// Initial wait, also important if there are no FPs yet.
if !s.waitForNextFP(s.fpToSeries.length()) {
return
}
begin := time.Now()
fpIter = s.fpToSeries.fpIter()
for fp := range fpIter {
select {
case memoryFingerprints <- fp:
case <-s.loopStopping:
return
}
s.waitForNextFP(s.fpToSeries.length())
}
glog.Infof("Completed maintenance sweep through in-memory fingerprints in %v.", time.Since(begin))
}
}()
return memoryFingerprints
}
// cycleThroughArchivedFingerprints returns a channel that emits fingerprints
// for archived series in a throttled fashion. It continues to cycle through all
// archived fingerprints until s.loopStopping is closed.
func (s *memorySeriesStorage) cycleThroughArchivedFingerprints() chan clientmodel.Fingerprint {
archivedFingerprints := make(chan clientmodel.Fingerprint)
go func() {
defer close(archivedFingerprints)
for {
archivedFPs, err := s.persistence.getFingerprintsModifiedBefore(
clientmodel.TimestampFromTime(time.Now()).Add(-1 * s.purgeAfter),
)
if err != nil {
glog.Error("Failed to lookup archived fingerprint ranges: ", err)
s.waitForNextFP(0)
continue
}
// Initial wait, also important if there are no FPs yet.
if !s.waitForNextFP(len(archivedFPs)) {
return
}
begin := time.Now()
for _, fp := range archivedFPs {
select {
case archivedFingerprints <- fp:
case <-s.loopStopping:
return
}
s.waitForNextFP(len(archivedFPs))
}
glog.Infof("Completed maintenance sweep through archived fingerprints in %v.", time.Since(begin))
}
}()
return archivedFingerprints
}
func (s *memorySeriesStorage) loop() {
checkpointTicker := time.NewTicker(s.checkpointInterval)
defer func() {
checkpointTicker.Stop()
glog.Info("Maintenance loop stopped.")
close(s.loopStopped)
}()
memoryFingerprints := s.cycleThroughMemoryFingerprints()
archivedFingerprints := s.cycleThroughArchivedFingerprints()
loop:
for {
select {
case <-s.loopStopping:
break loop
case <-checkpointTicker.C:
s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker)
case fp := <-memoryFingerprints:
s.purgeSeries(fp, clientmodel.TimestampFromTime(time.Now()).Add(-1*s.purgeAfter))
s.maintainSeries(fp)
s.seriesOps.WithLabelValues(memoryMaintenance).Inc()
case fp := <-archivedFingerprints:
s.purgeSeries(fp, clientmodel.TimestampFromTime(time.Now()).Add(-1*s.purgeAfter))
s.seriesOps.WithLabelValues(archiveMaintenance).Inc()
}
}
// Wait until both channels are closed.
for range memoryFingerprints {
}
for range archivedFingerprints {
}
}
// maintainSeries closes the head chunk if not touched in a while. It archives a
// series if all chunks are evicted. It evicts chunkDescs if there are too many.
func (s *memorySeriesStorage) maintainSeries(fp clientmodel.Fingerprint) {
var headChunkToPersist *chunkDesc
s.fpLocker.Lock(fp)
defer func() {
s.fpLocker.Unlock(fp)
// Queue outside of lock!
if headChunkToPersist != nil {
s.persistQueue <- persistRequest{fp, headChunkToPersist}
}
}()
series, ok := s.fpToSeries.get(fp)
if !ok {
return
}
iOldestNotEvicted := -1
for i, cd := range series.chunkDescs {
if !cd.isEvicted() {
iOldestNotEvicted = i
break
}
}
// Archive if all chunks are evicted.
if iOldestNotEvicted == -1 {
s.fpToSeries.del(fp)
s.numSeries.Dec()
if err := s.persistence.archiveMetric(
fp, series.metric, series.firstTime(), series.lastTime(),
); err != nil {
glog.Errorf("Error archiving metric %v: %v", series.metric, err)
} else {
s.seriesOps.WithLabelValues(archive).Inc()
}
return
}
// If we are here, the series is not archived, so check for chunkDesc
// eviction next and then if the head chunk needs to be persisted.
series.evictChunkDescs(iOldestNotEvicted)
if !series.headChunkPersisted && time.Now().Sub(series.head().firstTime().Time()) > headChunkTimeout {
series.headChunkPersisted = true
// Since we cannot modify the head chunk from now on, we
// don't need to bother with cloning anymore.
series.headChunkUsedByIterator = false
headChunkToPersist = series.head()
}
}
// purgeSeries purges chunks older than beforeTime from a series. If the series
// contains no chunks after the purge, it is dropped entirely.
func (s *memorySeriesStorage) purgeSeries(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
if series, ok := s.fpToSeries.get(fp); ok {
// Deal with series in memory.
if !series.firstTime().Before(beforeTime) {
// Oldest sample not old enough.
return
}
newFirstTime, numDropped, allDropped, err := s.persistence.dropChunks(fp, beforeTime)
if err != nil {
glog.Error("Error purging persisted chunks: ", err)
}
numPurged, allPurged := series.purgeOlderThan(beforeTime)
if allPurged && allDropped {
s.fpToSeries.del(fp)
s.numSeries.Dec()
s.seriesOps.WithLabelValues(memoryPurge).Inc()
s.persistence.unindexMetric(fp, series.metric)
} else if series.chunkDescsOffset != -1 {
series.savedFirstTime = newFirstTime
series.chunkDescsOffset += numPurged - numDropped
if series.chunkDescsOffset < 0 {
panic("dropped more chunks from persistence than from memory")
}
}
return
}
// Deal with archived series.
has, firstTime, lastTime, err := s.persistence.hasArchivedMetric(fp)
if err != nil {
glog.Error("Error looking up archived time range: ", err)
return
}
if !has || !firstTime.Before(beforeTime) {
// Oldest sample not old enough, or metric purged or unarchived in the meantime.
return
}
newFirstTime, _, allDropped, err := s.persistence.dropChunks(fp, beforeTime)
if err != nil {
glog.Error("Error purging persisted chunks: ", err)
}
if allDropped {
if err := s.persistence.dropArchivedMetric(fp); err != nil {
glog.Errorf("Error dropping archived metric for fingerprint %v: %v", fp, err)
return
}
s.seriesOps.WithLabelValues(archivePurge).Inc()
return
}
s.persistence.updateArchivedTimeRange(fp, newFirstTime, lastTime)
}
// See persistence.loadChunks for detailed explanation.
func (s *memorySeriesStorage) loadChunks(fp clientmodel.Fingerprint, indexes []int, indexOffset int) ([]chunk, error) {
return s.persistence.loadChunks(fp, indexes, indexOffset)
}
// See persistence.loadChunkDescs for detailed explanation.
func (s *memorySeriesStorage) loadChunkDescs(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) ([]*chunkDesc, error) {
return s.persistence.loadChunkDescs(fp, beforeTime)
}
// To expose persistQueueCap as metric:
var (
persistQueueCapDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "persist_queue_capacity"),
"The total capacity of the persist queue.",
nil, nil,
)
persistQueueCapGauge = prometheus.MustNewConstMetric(
persistQueueCapDesc, prometheus.GaugeValue, persistQueueCap,
)
)
// Describe implements prometheus.Collector.
func (s *memorySeriesStorage) Describe(ch chan<- *prometheus.Desc) {
s.persistence.Describe(ch)
ch <- s.persistLatency.Desc()
s.persistErrors.Describe(ch)
ch <- s.persistQueueLength.Desc()
ch <- s.numSeries.Desc()
s.seriesOps.Describe(ch)
ch <- s.ingestedSamplesCount.Desc()
ch <- s.invalidPreloadRequestsCount.Desc()
ch <- persistQueueCapDesc
ch <- numMemChunksDesc
}
// Collect implements prometheus.Collector.
func (s *memorySeriesStorage) Collect(ch chan<- prometheus.Metric) {
s.persistence.Collect(ch)
ch <- s.persistLatency
s.persistErrors.Collect(ch)
ch <- s.persistQueueLength
ch <- s.numSeries
s.seriesOps.Collect(ch)
ch <- s.ingestedSamplesCount
ch <- s.invalidPreloadRequestsCount
ch <- persistQueueCapGauge
count := atomic.LoadInt64(&numMemChunks)
ch <- prometheus.MustNewConstMetric(numMemChunksDesc, prometheus.GaugeValue, float64(count))
}
Remove a deadlock during shutdown.
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package local contains the local time series storage used by Prometheus.
package local
import (
"container/list"
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
const (
persistQueueCap = 1024
evictRequestsCap = 1024
chunkLen = 1024
// See waitForNextFP.
fpMaxWaitDuration = 10 * time.Second
fpMinWaitDuration = 5 * time.Millisecond // ~ hard disk seek time.
fpMaxSweepTime = 6 * time.Hour
maxEvictInterval = time.Minute
headChunkTimeout = time.Hour // Close head chunk if not touched for that long.
)
type storageState uint
const (
storageStarting storageState = iota
storageServing
storageStopping
)
type persistRequest struct {
fingerprint clientmodel.Fingerprint
chunkDesc *chunkDesc
}
type evictRequest struct {
cd *chunkDesc
evict bool
}
type memorySeriesStorage struct {
fpLocker *fingerprintLocker
fpToSeries *seriesMap
loopStopping, loopStopped chan struct{}
maxMemoryChunks int
purgeAfter time.Duration
checkpointInterval time.Duration
persistQueue chan persistRequest
persistStopped chan struct{}
persistence *persistence
evictList *list.List
evictRequests chan evictRequest
evictStopping, evictStopped chan struct{}
persistLatency prometheus.Summary
persistErrors *prometheus.CounterVec
persistQueueLength prometheus.Gauge
numSeries prometheus.Gauge
seriesOps *prometheus.CounterVec
ingestedSamplesCount prometheus.Counter
invalidPreloadRequestsCount prometheus.Counter
purgeDuration prometheus.Gauge
}
// MemorySeriesStorageOptions contains options needed by
// NewMemorySeriesStorage. It is not safe to leave any of those at their zero
// values.
type MemorySeriesStorageOptions struct {
MemoryChunks int // How many chunks to keep in memory.
PersistenceStoragePath string // Location of persistence files.
PersistenceRetentionPeriod time.Duration // Chunks at least that old are purged.
CheckpointInterval time.Duration // How often to checkpoint the series map and head chunks.
Dirty bool // Force the storage to consider itself dirty on startup.
}
// NewMemorySeriesStorage returns a newly allocated Storage. Storage.Serve still
// has to be called to start the storage.
func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (Storage, error) {
p, err := newPersistence(o.PersistenceStoragePath, chunkLen, o.Dirty)
if err != nil {
return nil, err
}
glog.Info("Loading series map and head chunks...")
fpToSeries, err := p.loadSeriesMapAndHeads()
if err != nil {
return nil, err
}
glog.Infof("%d series loaded.", fpToSeries.length())
numSeries := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "memory_series",
Help: "The current number of series in memory.",
})
numSeries.Set(float64(fpToSeries.length()))
return &memorySeriesStorage{
fpLocker: newFingerprintLocker(256),
fpToSeries: fpToSeries,
loopStopping: make(chan struct{}),
loopStopped: make(chan struct{}),
maxMemoryChunks: o.MemoryChunks,
purgeAfter: o.PersistenceRetentionPeriod,
checkpointInterval: o.CheckpointInterval,
persistQueue: make(chan persistRequest, persistQueueCap),
persistStopped: make(chan struct{}),
persistence: p,
evictList: list.New(),
evictRequests: make(chan evictRequest, evictRequestsCap),
evictStopping: make(chan struct{}),
evictStopped: make(chan struct{}),
persistLatency: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_latency_microseconds",
Help: "A summary of latencies for persisting each chunk.",
}),
persistErrors: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_errors_total",
Help: "A counter of errors persisting chunks.",
},
[]string{"error"},
),
persistQueueLength: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_queue_length",
Help: "The current number of chunks waiting in the persist queue.",
}),
numSeries: numSeries,
seriesOps: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "series_ops_total",
Help: "The total number of series operations by their type.",
},
[]string{opTypeLabel},
),
ingestedSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "ingested_samples_total",
Help: "The total number of samples ingested.",
}),
invalidPreloadRequestsCount: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "invalid_preload_requests_total",
Help: "The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.",
}),
}, nil
}
// Start implements Storage.
func (s *memorySeriesStorage) Start() {
go s.handleEvictList()
go s.handlePersistQueue()
go s.loop()
}
// Stop implements Storage.
func (s *memorySeriesStorage) Stop() error {
glog.Info("Stopping local storage...")
glog.Info("Stopping maintenance loop...")
close(s.loopStopping)
<-s.loopStopped
glog.Info("Stopping persist queue...")
close(s.persistQueue)
<-s.persistStopped
glog.Info("Stopping chunk eviction...")
close(s.evictStopping)
<-s.evictStopped
// One final checkpoint of the series map and the head chunks.
if err := s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker); err != nil {
return err
}
if err := s.persistence.close(); err != nil {
return err
}
glog.Info("Local storage stopped.")
return nil
}
// WaitForIndexing implements Storage.
func (s *memorySeriesStorage) WaitForIndexing() {
s.persistence.waitForIndexing()
}
// NewIterator implements storage.
func (s *memorySeriesStorage) NewIterator(fp clientmodel.Fingerprint) SeriesIterator {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if !ok {
// Oops, no series for fp found. That happens if, after
// preloading is done, the whole series is identified as old
// enough for purging and hence purged for good. As there is no
// data left to iterate over, return an iterator that will never
// return any values.
return nopSeriesIterator{}
}
return series.newIterator(
func() { s.fpLocker.Lock(fp) },
func() { s.fpLocker.Unlock(fp) },
)
}
// NewPreloader implements Storage.
func (s *memorySeriesStorage) NewPreloader() Preloader {
return &memorySeriesPreloader{
storage: s,
}
}
// GetFingerprintsForLabelMatchers implements Storage.
func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metric.LabelMatchers) clientmodel.Fingerprints {
var result map[clientmodel.Fingerprint]struct{}
for _, matcher := range labelMatchers {
intersection := map[clientmodel.Fingerprint]struct{}{}
switch matcher.Type {
case metric.Equal:
fps, err := s.persistence.getFingerprintsForLabelPair(
metric.LabelPair{
Name: matcher.Name,
Value: matcher.Value,
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
}
if len(fps) == 0 {
return nil
}
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
intersection[fp] = struct{}{}
}
}
default:
values, err := s.persistence.getLabelValuesForLabelName(matcher.Name)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", matcher.Name, err)
}
matches := matcher.Filter(values)
if len(matches) == 0 {
return nil
}
for _, v := range matches {
fps, err := s.persistence.getFingerprintsForLabelPair(
metric.LabelPair{
Name: matcher.Name,
Value: v,
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
}
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
intersection[fp] = struct{}{}
}
}
}
}
if len(intersection) == 0 {
return nil
}
result = intersection
}
fps := make(clientmodel.Fingerprints, 0, len(result))
for fp := range result {
fps = append(fps, fp)
}
return fps
}
// GetLabelValuesForLabelName implements Storage.
func (s *memorySeriesStorage) GetLabelValuesForLabelName(labelName clientmodel.LabelName) clientmodel.LabelValues {
lvs, err := s.persistence.getLabelValuesForLabelName(labelName)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", labelName, err)
}
return lvs
}
// GetMetricForFingerprint implements Storage.
func (s *memorySeriesStorage) GetMetricForFingerprint(fp clientmodel.Fingerprint) clientmodel.COWMetric {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if ok {
// Wrap the returned metric in a copy-on-write (COW) metric here because
// the caller might mutate it.
return clientmodel.COWMetric{
Metric: series.metric,
}
}
metric, err := s.persistence.getArchivedMetric(fp)
if err != nil {
glog.Errorf("Error retrieving archived metric for fingerprint %v: %v", fp, err)
}
return clientmodel.COWMetric{
Metric: metric,
}
}
// AppendSamples implements Storage.
func (s *memorySeriesStorage) AppendSamples(samples clientmodel.Samples) {
for _, sample := range samples {
s.appendSample(sample)
}
s.ingestedSamplesCount.Add(float64(len(samples)))
}
func (s *memorySeriesStorage) appendSample(sample *clientmodel.Sample) {
fp := sample.Metric.Fingerprint()
s.fpLocker.Lock(fp)
series := s.getOrCreateSeries(fp, sample.Metric)
chunkDescsToPersist := series.add(fp, &metric.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
// Queue only outside of the locked area, processing the persistQueue
// requires the same lock!
for _, cd := range chunkDescsToPersist {
s.persistQueue <- persistRequest{fp, cd}
}
}
func (s *memorySeriesStorage) getOrCreateSeries(fp clientmodel.Fingerprint, m clientmodel.Metric) *memorySeries {
series, ok := s.fpToSeries.get(fp)
if !ok {
unarchived, firstTime, err := s.persistence.unarchiveMetric(fp)
if err != nil {
glog.Errorf("Error unarchiving fingerprint %v: %v", fp, err)
}
if unarchived {
s.seriesOps.WithLabelValues(unarchive).Inc()
} else {
// This was a genuinely new series, so index the metric.
s.persistence.indexMetric(fp, m)
s.seriesOps.WithLabelValues(create).Inc()
}
series = newMemorySeries(m, !unarchived, firstTime)
s.fpToSeries.put(fp, series)
s.numSeries.Inc()
}
return series
}
func (s *memorySeriesStorage) preloadChunksForRange(
fp clientmodel.Fingerprint,
from clientmodel.Timestamp, through clientmodel.Timestamp,
stalenessDelta time.Duration,
) ([]*chunkDesc, error) {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if !ok {
has, first, last, err := s.persistence.hasArchivedMetric(fp)
if err != nil {
return nil, err
}
if !has {
s.invalidPreloadRequestsCount.Inc()
return nil, nil
}
if from.Add(-stalenessDelta).Before(last) && through.Add(stalenessDelta).After(first) {
metric, err := s.persistence.getArchivedMetric(fp)
if err != nil {
return nil, err
}
series = s.getOrCreateSeries(fp, metric)
} else {
return nil, nil
}
}
return series.preloadChunksForRange(from, through, fp, s)
}
func (s *memorySeriesStorage) handleEvictList() {
ticker := time.NewTicker(maxEvictInterval)
count := 0
for {
// To batch up evictions a bit, this tries evictions at least
// once per evict interval, but earlier if the number of evict
// requests with evict==true that have happened since the last
// evict run is more than maxMemoryChunks/1000.
select {
case req := <-s.evictRequests:
if req.evict {
req.cd.evictListElement = s.evictList.PushBack(req.cd)
count++
if count > s.maxMemoryChunks/1000 {
s.maybeEvict()
count = 0
}
} else {
if req.cd.evictListElement != nil {
s.evictList.Remove(req.cd.evictListElement)
req.cd.evictListElement = nil
}
}
case <-ticker.C:
if s.evictList.Len() > 0 {
s.maybeEvict()
}
case <-s.evictStopping:
// Drain evictRequests to not let requesters hang.
for {
select {
case <-s.evictRequests:
// Do nothing.
default:
ticker.Stop()
glog.Info("Chunk eviction stopped.")
close(s.evictStopped)
return
}
}
}
}
}
// maybeEvict is a local helper method. Must only be called by handleEvictList.
func (s *memorySeriesStorage) maybeEvict() {
numChunksToEvict := int(atomic.LoadInt64(&numMemChunks)) - s.maxMemoryChunks
if numChunksToEvict <= 0 {
return
}
chunkDescsToEvict := make([]*chunkDesc, numChunksToEvict)
for i := range chunkDescsToEvict {
e := s.evictList.Front()
if e == nil {
break
}
cd := e.Value.(*chunkDesc)
cd.evictListElement = nil
chunkDescsToEvict[i] = cd
s.evictList.Remove(e)
}
// Do the actual eviction in a goroutine as we might otherwise deadlock,
// in the following way: A chunk was unpinned completely and therefore
// scheduled for eviction. At the time we actually try to evict it,
// another goroutine is pinning the chunk. The pinning goroutine has
// currently locked the chunk and tries to send the evict request (to
// remove the chunk from the evict list) to the evictRequests
// channel. The send blocks because evictRequests is full. However, the
// goroutine that is supposed to empty the channel is waiting for the
// chunkDesc lock to try to evict the chunk.
go func() {
for _, cd := range chunkDescsToEvict {
if cd == nil {
break
}
cd.maybeEvict()
// We don't care if the eviction succeeds. If the chunk
// was pinned in the meantime, it will be added to the
// evict list once it gets unpinned again.
}
}()
}
func (s *memorySeriesStorage) handlePersistQueue() {
for req := range s.persistQueue {
s.persistQueueLength.Set(float64(len(s.persistQueue)))
start := time.Now()
s.fpLocker.Lock(req.fingerprint)
offset, err := s.persistence.persistChunk(req.fingerprint, req.chunkDesc.chunk)
if series, seriesInMemory := s.fpToSeries.get(req.fingerprint); err == nil && seriesInMemory && series.chunkDescsOffset == -1 {
// This is the first chunk persisted for a newly created
// series that had prior chunks on disk. Finally, we can
// set the chunkDescsOffset.
series.chunkDescsOffset = offset
}
s.fpLocker.Unlock(req.fingerprint)
s.persistLatency.Observe(float64(time.Since(start)) / float64(time.Microsecond))
if err != nil {
s.persistErrors.WithLabelValues(err.Error()).Inc()
glog.Error("Error persisting chunk: ", err)
s.persistence.setDirty(true)
continue
}
req.chunkDesc.unpin(s.evictRequests)
chunkOps.WithLabelValues(persistAndUnpin).Inc()
}
glog.Info("Persist queue drained and stopped.")
close(s.persistStopped)
}
// waitForNextFP waits an estimated duration, after which we want to process
// another fingerprint so that we will process all fingerprints in a tenth of
// s.purgeAfter assuming that the system is doing nothing else, e.g. if we want
// to purge after 40h, we want to cycle through all fingerprints within
// 4h. However, the maximum sweep time is capped at fpMaxSweepTime. Furthermore,
// this method will always wait for at least fpMinWaitDuration and never longer
// than fpMaxWaitDuration. If s.loopStopped is closed, it will return false
// immediately. The estimation is based on the total number of fingerprints as
// passed in.
func (s *memorySeriesStorage) waitForNextFP(numberOfFPs int) bool {
d := fpMaxWaitDuration
if numberOfFPs != 0 {
sweepTime := s.purgeAfter / 10
if sweepTime > fpMaxSweepTime {
sweepTime = fpMaxSweepTime
}
d = sweepTime / time.Duration(numberOfFPs)
if d < fpMinWaitDuration {
d = fpMinWaitDuration
}
if d > fpMaxWaitDuration {
d = fpMaxWaitDuration
}
}
t := time.NewTimer(d)
select {
case <-t.C:
return true
case <-s.loopStopping:
return false
}
}
// cycleThroughMemoryFingerprints returns a channel that emits fingerprints for
// series in memory in a throttled fashion. It continues to cycle through all
// fingerprints in memory until s.loopStopping is closed.
func (s *memorySeriesStorage) cycleThroughMemoryFingerprints() chan clientmodel.Fingerprint {
memoryFingerprints := make(chan clientmodel.Fingerprint)
go func() {
var fpIter <-chan clientmodel.Fingerprint
defer func() {
if fpIter != nil {
for range fpIter {
// Consume the iterator.
}
}
close(memoryFingerprints)
}()
for {
// Initial wait, also important if there are no FPs yet.
if !s.waitForNextFP(s.fpToSeries.length()) {
return
}
begin := time.Now()
fpIter = s.fpToSeries.fpIter()
for fp := range fpIter {
select {
case memoryFingerprints <- fp:
case <-s.loopStopping:
return
}
s.waitForNextFP(s.fpToSeries.length())
}
glog.Infof("Completed maintenance sweep through in-memory fingerprints in %v.", time.Since(begin))
}
}()
return memoryFingerprints
}
// cycleThroughArchivedFingerprints returns a channel that emits fingerprints
// for archived series in a throttled fashion. It continues to cycle through all
// archived fingerprints until s.loopStopping is closed.
func (s *memorySeriesStorage) cycleThroughArchivedFingerprints() chan clientmodel.Fingerprint {
archivedFingerprints := make(chan clientmodel.Fingerprint)
go func() {
defer close(archivedFingerprints)
for {
archivedFPs, err := s.persistence.getFingerprintsModifiedBefore(
clientmodel.TimestampFromTime(time.Now()).Add(-1 * s.purgeAfter),
)
if err != nil {
glog.Error("Failed to lookup archived fingerprint ranges: ", err)
s.waitForNextFP(0)
continue
}
// Initial wait, also important if there are no FPs yet.
if !s.waitForNextFP(len(archivedFPs)) {
return
}
begin := time.Now()
for _, fp := range archivedFPs {
select {
case archivedFingerprints <- fp:
case <-s.loopStopping:
return
}
s.waitForNextFP(len(archivedFPs))
}
glog.Infof("Completed maintenance sweep through archived fingerprints in %v.", time.Since(begin))
}
}()
return archivedFingerprints
}
func (s *memorySeriesStorage) loop() {
checkpointTicker := time.NewTicker(s.checkpointInterval)
defer func() {
checkpointTicker.Stop()
glog.Info("Maintenance loop stopped.")
close(s.loopStopped)
}()
memoryFingerprints := s.cycleThroughMemoryFingerprints()
archivedFingerprints := s.cycleThroughArchivedFingerprints()
loop:
for {
select {
case <-s.loopStopping:
break loop
case <-checkpointTicker.C:
s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker)
case fp := <-memoryFingerprints:
s.purgeSeries(fp, clientmodel.TimestampFromTime(time.Now()).Add(-1*s.purgeAfter))
s.maintainSeries(fp)
s.seriesOps.WithLabelValues(memoryMaintenance).Inc()
case fp := <-archivedFingerprints:
s.purgeSeries(fp, clientmodel.TimestampFromTime(time.Now()).Add(-1*s.purgeAfter))
s.seriesOps.WithLabelValues(archiveMaintenance).Inc()
}
}
// Wait until both channels are closed.
for range memoryFingerprints {
}
for range archivedFingerprints {
}
}
// maintainSeries closes the head chunk if not touched in a while. It archives a
// series if all chunks are evicted. It evicts chunkDescs if there are too many.
func (s *memorySeriesStorage) maintainSeries(fp clientmodel.Fingerprint) {
var headChunkToPersist *chunkDesc
s.fpLocker.Lock(fp)
defer func() {
s.fpLocker.Unlock(fp)
// Queue outside of lock!
if headChunkToPersist != nil {
s.persistQueue <- persistRequest{fp, headChunkToPersist}
}
}()
series, ok := s.fpToSeries.get(fp)
if !ok {
return
}
iOldestNotEvicted := -1
for i, cd := range series.chunkDescs {
if !cd.isEvicted() {
iOldestNotEvicted = i
break
}
}
// Archive if all chunks are evicted.
if iOldestNotEvicted == -1 {
s.fpToSeries.del(fp)
s.numSeries.Dec()
if err := s.persistence.archiveMetric(
fp, series.metric, series.firstTime(), series.lastTime(),
); err != nil {
glog.Errorf("Error archiving metric %v: %v", series.metric, err)
} else {
s.seriesOps.WithLabelValues(archive).Inc()
}
return
}
// If we are here, the series is not archived, so check for chunkDesc
// eviction next and then if the head chunk needs to be persisted.
series.evictChunkDescs(iOldestNotEvicted)
if !series.headChunkPersisted && time.Now().Sub(series.head().firstTime().Time()) > headChunkTimeout {
series.headChunkPersisted = true
// Since we cannot modify the head chunk from now on, we
// don't need to bother with cloning anymore.
series.headChunkUsedByIterator = false
headChunkToPersist = series.head()
}
}
// purgeSeries purges chunks older than beforeTime from a series. If the series
// contains no chunks after the purge, it is dropped entirely.
func (s *memorySeriesStorage) purgeSeries(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
if series, ok := s.fpToSeries.get(fp); ok {
// Deal with series in memory.
if !series.firstTime().Before(beforeTime) {
// Oldest sample not old enough.
return
}
newFirstTime, numDropped, allDropped, err := s.persistence.dropChunks(fp, beforeTime)
if err != nil {
glog.Error("Error purging persisted chunks: ", err)
}
numPurged, allPurged := series.purgeOlderThan(beforeTime)
if allPurged && allDropped {
s.fpToSeries.del(fp)
s.numSeries.Dec()
s.seriesOps.WithLabelValues(memoryPurge).Inc()
s.persistence.unindexMetric(fp, series.metric)
} else if series.chunkDescsOffset != -1 {
series.savedFirstTime = newFirstTime
series.chunkDescsOffset += numPurged - numDropped
if series.chunkDescsOffset < 0 {
panic("dropped more chunks from persistence than from memory")
}
}
return
}
// Deal with archived series.
has, firstTime, lastTime, err := s.persistence.hasArchivedMetric(fp)
if err != nil {
glog.Error("Error looking up archived time range: ", err)
return
}
if !has || !firstTime.Before(beforeTime) {
// Oldest sample not old enough, or metric purged or unarchived in the meantime.
return
}
newFirstTime, _, allDropped, err := s.persistence.dropChunks(fp, beforeTime)
if err != nil {
glog.Error("Error purging persisted chunks: ", err)
}
if allDropped {
if err := s.persistence.dropArchivedMetric(fp); err != nil {
glog.Errorf("Error dropping archived metric for fingerprint %v: %v", fp, err)
return
}
s.seriesOps.WithLabelValues(archivePurge).Inc()
return
}
s.persistence.updateArchivedTimeRange(fp, newFirstTime, lastTime)
}
// See persistence.loadChunks for detailed explanation.
func (s *memorySeriesStorage) loadChunks(fp clientmodel.Fingerprint, indexes []int, indexOffset int) ([]chunk, error) {
return s.persistence.loadChunks(fp, indexes, indexOffset)
}
// See persistence.loadChunkDescs for detailed explanation.
func (s *memorySeriesStorage) loadChunkDescs(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) ([]*chunkDesc, error) {
return s.persistence.loadChunkDescs(fp, beforeTime)
}
// To expose persistQueueCap as metric:
var (
persistQueueCapDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "persist_queue_capacity"),
"The total capacity of the persist queue.",
nil, nil,
)
persistQueueCapGauge = prometheus.MustNewConstMetric(
persistQueueCapDesc, prometheus.GaugeValue, persistQueueCap,
)
)
// Describe implements prometheus.Collector.
func (s *memorySeriesStorage) Describe(ch chan<- *prometheus.Desc) {
s.persistence.Describe(ch)
ch <- s.persistLatency.Desc()
s.persistErrors.Describe(ch)
ch <- s.persistQueueLength.Desc()
ch <- s.numSeries.Desc()
s.seriesOps.Describe(ch)
ch <- s.ingestedSamplesCount.Desc()
ch <- s.invalidPreloadRequestsCount.Desc()
ch <- persistQueueCapDesc
ch <- numMemChunksDesc
}
// Collect implements prometheus.Collector.
func (s *memorySeriesStorage) Collect(ch chan<- prometheus.Metric) {
s.persistence.Collect(ch)
ch <- s.persistLatency
s.persistErrors.Collect(ch)
ch <- s.persistQueueLength
ch <- s.numSeries
s.seriesOps.Collect(ch)
ch <- s.ingestedSamplesCount
ch <- s.invalidPreloadRequestsCount
ch <- persistQueueCapGauge
count := atomic.LoadInt64(&numMemChunks)
ch <- prometheus.MustNewConstMetric(numMemChunksDesc, prometheus.GaugeValue, float64(count))
}
|
package gerrit
import (
"fmt"
"net/url"
)
// GetCommit retrieves a commit of a project.
// The commit must be visible to the caller.
//
// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-commit
func (s *ProjectsService) GetCommit(projectName, commitID string) (*CommitInfo, *Response, error) {
u := fmt.Sprintf("projects/%s/commits/%s", url.QueryEscape(projectName), commitID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
v := new(CommitInfo)
resp, err := s.client.Do(req, v)
if err != nil {
return nil, resp, err
}
return v, resp, err
}
// GetCommitContent gets the content of a file from the HEAD revision of a certain branch.
// The content is returned as base64 encoded string.
//
// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-content
func (s *ProjectsService) GetCommitContent(projectName, branchID, fileID string) (string, *Response, error) {
u := fmt.Sprintf("projects/%s/branches/%s/files/%s/content", url.QueryEscape(projectName), branchID, fileID)
return getStringResponseWithoutOptions(s.client, u)
}
Fix GetCommitContent to actually get commit content
The method was accidentally duplicating GetBranchContent.
package gerrit
import (
"fmt"
"net/url"
)
// GetCommit retrieves a commit of a project.
// The commit must be visible to the caller.
//
// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-commit
func (s *ProjectsService) GetCommit(projectName, commitID string) (*CommitInfo, *Response, error) {
u := fmt.Sprintf("projects/%s/commits/%s", url.QueryEscape(projectName), commitID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
v := new(CommitInfo)
resp, err := s.client.Do(req, v)
if err != nil {
return nil, resp, err
}
return v, resp, err
}
// GetCommitContent gets the content of a file from a certain commit.
// The content is returned as base64 encoded string.
//
// Gerrit API docs: https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html##get-content-from-commit
func (s *ProjectsService) GetCommitContent(projectName, commitID, fileID string) (string, *Response, error) {
u := fmt.Sprintf("projects/%s/commits/%s/files/%s/content", url.QueryEscape(projectName), commitID, fileID)
return getStringResponseWithoutOptions(s.client, u)
}
|
package api
import (
"fmt"
"sort"
"strings"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/api/navlinks"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
ac "github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/setting"
)
const (
// Themes
lightName = "light"
darkName = "dark"
)
func (hs *HTTPServer) getProfileNode(c *models.ReqContext) *dtos.NavLink {
// Only set login if it's different from the name
var login string
if c.SignedInUser.Login != c.SignedInUser.NameOrFallback() {
login = c.SignedInUser.Login
}
gravatarURL := dtos.GetGravatarUrl(c.Email)
children := []*dtos.NavLink{
{
Text: "Preferences", Id: "profile-settings", Url: hs.Cfg.AppSubURL + "/profile", Icon: "sliders-v-alt",
},
}
if setting.AddChangePasswordLink() {
children = append(children, &dtos.NavLink{
Text: "Change password", Id: "change-password", Url: hs.Cfg.AppSubURL + "/profile/password",
Icon: "lock",
})
}
if !setting.DisableSignoutMenu {
// add sign out first
children = append(children, &dtos.NavLink{
Text: "Sign out",
Id: "sign-out",
Url: hs.Cfg.AppSubURL + "/logout",
Icon: "arrow-from-right",
Target: "_self",
HideFromTabs: true,
})
}
return &dtos.NavLink{
Text: c.SignedInUser.NameOrFallback(),
SubTitle: login,
Id: "profile",
Img: gravatarURL,
Url: hs.Cfg.AppSubURL + "/profile",
Section: dtos.NavSectionConfig,
SortWeight: dtos.WeightProfile,
Children: children,
}
}
func (hs *HTTPServer) getAppLinks(c *models.ReqContext) ([]*dtos.NavLink, error) {
enabledPlugins, err := hs.enabledPlugins(c.Req.Context(), c.OrgId)
if err != nil {
return nil, err
}
appLinks := []*dtos.NavLink{}
for _, plugin := range enabledPlugins[plugins.App] {
if !plugin.Pinned {
continue
}
appLink := &dtos.NavLink{
Text: plugin.Name,
Id: "plugin-page-" + plugin.ID,
Url: plugin.DefaultNavURL,
Img: plugin.Info.Logos.Small,
SortWeight: dtos.WeightPlugin,
}
if hs.Cfg.IsNewNavigationEnabled() {
appLink.Section = dtos.NavSectionPlugin
} else {
appLink.Section = dtos.NavSectionCore
}
for _, include := range plugin.Includes {
if !c.HasUserRole(include.Role) {
continue
}
if include.Type == "page" && include.AddToNav {
var link *dtos.NavLink
if len(include.Path) > 0 {
link = &dtos.NavLink{
Url: hs.Cfg.AppSubURL + include.Path,
Text: include.Name,
}
if include.DefaultNav {
appLink.Url = link.Url // Overwrite the hardcoded page logic
}
} else {
link = &dtos.NavLink{
Url: hs.Cfg.AppSubURL + "/plugins/" + plugin.ID + "/page/" + include.Slug,
Text: include.Name,
}
}
link.Icon = include.Icon
appLink.Children = append(appLink.Children, link)
}
if include.Type == "dashboard" && include.AddToNav {
link := &dtos.NavLink{
Url: hs.Cfg.AppSubURL + include.GetSlugOrUIDLink(),
Text: include.Name,
}
appLink.Children = append(appLink.Children, link)
}
}
if len(appLink.Children) > 0 {
appLinks = append(appLinks, appLink)
}
}
if len(appLinks) > 0 {
sort.SliceStable(appLinks, func(i, j int) bool {
return appLinks[i].Text < appLinks[j].Text
})
}
return appLinks, nil
}
func (hs *HTTPServer) getNavTree(c *models.ReqContext, hasEditPerm bool) ([]*dtos.NavLink, error) {
hasAccess := ac.HasAccess(hs.AccessControl, c)
navTree := []*dtos.NavLink{}
if hs.Cfg.IsNewNavigationEnabled() {
navTree = append(navTree, &dtos.NavLink{
Text: "Home",
Id: "home",
Icon: "home-alt",
Url: hs.Cfg.AppSubURL + "/",
Section: dtos.NavSectionCore,
SortWeight: dtos.WeightHome,
})
}
if hasEditPerm && !hs.Cfg.IsNewNavigationEnabled() {
children := hs.buildCreateNavLinks(c)
navTree = append(navTree, &dtos.NavLink{
Text: "Create",
Id: "create",
Icon: "plus",
Url: hs.Cfg.AppSubURL + "/dashboard/new",
Children: children,
Section: dtos.NavSectionCore,
SortWeight: dtos.WeightCreate,
})
}
dashboardChildLinks := hs.buildDashboardNavLinks(c, hasEditPerm)
dashboardsUrl := "/"
if hs.Cfg.IsNewNavigationEnabled() {
dashboardsUrl = "/dashboards"
}
navTree = append(navTree, &dtos.NavLink{
Text: "Dashboards",
Id: "dashboards",
SubTitle: "Manage dashboards and folders",
Icon: "apps",
Url: hs.Cfg.AppSubURL + dashboardsUrl,
SortWeight: dtos.WeightDashboard,
Section: dtos.NavSectionCore,
Children: dashboardChildLinks,
})
canExplore := func(context *models.ReqContext) bool {
return c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR || setting.ViewersCanEdit
}
if setting.ExploreEnabled && hasAccess(canExplore, ac.EvalPermission(ac.ActionDatasourcesExplore)) {
navTree = append(navTree, &dtos.NavLink{
Text: "Explore",
Id: "explore",
SubTitle: "Explore your data",
Icon: "compass",
SortWeight: dtos.WeightExplore,
Section: dtos.NavSectionCore,
Url: hs.Cfg.AppSubURL + "/explore",
})
}
if c.IsSignedIn {
navTree = append(navTree, hs.getProfileNode(c))
}
_, uaIsDisabledForOrg := hs.Cfg.UnifiedAlerting.DisabledOrgs[c.OrgId]
uaVisibleForOrg := hs.Cfg.UnifiedAlerting.IsEnabled() && !uaIsDisabledForOrg
if setting.AlertingEnabled != nil && *setting.AlertingEnabled || uaVisibleForOrg {
alertChildNavs := hs.buildAlertNavLinks(c, uaVisibleForOrg)
navTree = append(navTree, &dtos.NavLink{
Text: "Alerting",
SubTitle: "Alert rules and notifications",
Id: "alerting",
Icon: "bell",
Url: hs.Cfg.AppSubURL + "/alerting/list",
Children: alertChildNavs,
Section: dtos.NavSectionCore,
SortWeight: dtos.WeightAlerting,
})
}
appLinks, err := hs.getAppLinks(c)
if err != nil {
return nil, err
}
navTree = append(navTree, appLinks...)
configNodes := []*dtos.NavLink{}
if hasAccess(ac.ReqOrgAdmin, dataSourcesConfigurationAccessEvaluator) {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Data sources",
Icon: "database",
Description: "Add and configure data sources",
Id: "datasources",
Url: hs.Cfg.AppSubURL + "/datasources",
})
}
if hasAccess(ac.ReqOrgAdmin, ac.EvalPermission(ac.ActionOrgUsersRead, ac.ScopeUsersAll)) {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Users",
Id: "users",
Description: "Manage org members",
Icon: "user",
Url: hs.Cfg.AppSubURL + "/org/users",
})
}
if c.OrgRole == models.ROLE_ADMIN || (hs.Cfg.EditorsCanAdmin && c.OrgRole == models.ROLE_EDITOR) {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Teams",
Id: "teams",
Description: "Manage org groups",
Icon: "users-alt",
Url: hs.Cfg.AppSubURL + "/org/teams",
})
}
if c.OrgRole == models.ROLE_ADMIN {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Plugins",
Id: "plugins",
Description: "View and configure plugins",
Icon: "plug",
Url: hs.Cfg.AppSubURL + "/plugins",
})
}
if hasAccess(ac.ReqOrgAdmin, orgPreferencesAccessEvaluator) {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Preferences",
Id: "org-settings",
Description: "Organization preferences",
Icon: "sliders-v-alt",
Url: hs.Cfg.AppSubURL + "/org",
})
}
if c.OrgRole == models.ROLE_ADMIN {
configNodes = append(configNodes, &dtos.NavLink{
Text: "API keys",
Id: "apikeys",
Description: "Create & manage API keys",
Icon: "key-skeleton-alt",
Url: hs.Cfg.AppSubURL + "/org/apikeys",
})
}
if hs.Cfg.FeatureToggles["live-pipeline"] {
liveNavLinks := []*dtos.NavLink{}
liveNavLinks = append(liveNavLinks, &dtos.NavLink{
Text: "Status", Id: "live-status", Url: hs.Cfg.AppSubURL + "/live", Icon: "exchange-alt",
})
liveNavLinks = append(liveNavLinks, &dtos.NavLink{
Text: "Pipeline", Id: "live-pipeline", Url: hs.Cfg.AppSubURL + "/live/pipeline", Icon: "arrow-to-right",
})
liveNavLinks = append(liveNavLinks, &dtos.NavLink{
Text: "Cloud", Id: "live-cloud", Url: hs.Cfg.AppSubURL + "/live/cloud", Icon: "cloud-upload",
})
navTree = append(navTree, &dtos.NavLink{
Id: "live",
Text: "Live",
SubTitle: "Event Streaming",
Icon: "exchange-alt",
Url: hs.Cfg.AppSubURL + "/live",
Children: liveNavLinks,
Section: dtos.NavSectionConfig,
HideFromTabs: true,
})
}
if len(configNodes) > 0 {
configNode := &dtos.NavLink{
Id: dtos.NavIDCfg,
Text: "Configuration",
SubTitle: "Organization: " + c.OrgName,
Icon: "cog",
Url: configNodes[0].Url,
SortWeight: dtos.WeightConfig,
Children: configNodes,
}
if hs.Cfg.IsNewNavigationEnabled() {
configNode.Section = dtos.NavSectionConfig
} else {
configNode.Section = dtos.NavSectionCore
}
navTree = append(navTree, configNode)
}
adminNavLinks := hs.buildAdminNavLinks(c)
if len(adminNavLinks) > 0 {
navSection := dtos.NavSectionCore
if hs.Cfg.IsNewNavigationEnabled() {
navSection = dtos.NavSectionConfig
}
serverAdminNode := navlinks.GetServerAdminNode(adminNavLinks, navSection)
navTree = append(navTree, serverAdminNode)
}
helpVersion := fmt.Sprintf(`%s v%s (%s)`, setting.ApplicationName, setting.BuildVersion, setting.BuildCommit)
if hs.Cfg.AnonymousHideVersion && !c.IsSignedIn {
helpVersion = setting.ApplicationName
}
navTree = append(navTree, &dtos.NavLink{
Text: "Help",
SubTitle: helpVersion,
Id: "help",
Url: "#",
Icon: "question-circle",
SortWeight: dtos.WeightHelp,
Section: dtos.NavSectionConfig,
Children: []*dtos.NavLink{},
})
return navTree, nil
}
func (hs *HTTPServer) buildDashboardNavLinks(c *models.ReqContext, hasEditPerm bool) []*dtos.NavLink {
dashboardChildNavs := []*dtos.NavLink{}
if !hs.Cfg.IsNewNavigationEnabled() {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Home", Id: "home", Url: hs.Cfg.AppSubURL + "/", Icon: "home-alt", HideFromTabs: true,
})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Divider", Divider: true, Id: "divider", HideFromTabs: true,
})
}
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Browse", Id: "manage-dashboards", Url: hs.Cfg.AppSubURL + "/dashboards", Icon: "sitemap",
})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Playlists", Id: "playlists", Url: hs.Cfg.AppSubURL + "/playlists", Icon: "presentation-play",
})
if c.IsSignedIn {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Snapshots",
Id: "snapshots",
Url: hs.Cfg.AppSubURL + "/dashboard/snapshots",
Icon: "camera",
})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Library panels",
Id: "library-panels",
Url: hs.Cfg.AppSubURL + "/library-panels",
Icon: "library-panel",
})
}
if hasEditPerm && hs.Cfg.IsNewNavigationEnabled() {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Divider", Divider: true, Id: "divider", HideFromTabs: true,
})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "New dashboard", Icon: "plus", Url: hs.Cfg.AppSubURL + "/dashboard/new", HideFromTabs: true,
})
if c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "New folder", SubTitle: "Create a new folder to organize your dashboards", Id: "folder",
Icon: "plus", Url: hs.Cfg.AppSubURL + "/dashboards/folder/new", HideFromTabs: true,
})
}
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Import", SubTitle: "Import dashboard from file or Grafana.com", Id: "import", Icon: "plus",
Url: hs.Cfg.AppSubURL + "/dashboard/import", HideFromTabs: true,
})
}
return dashboardChildNavs
}
func (hs *HTTPServer) buildAlertNavLinks(c *models.ReqContext, uaVisibleForOrg bool) []*dtos.NavLink {
alertChildNavs := []*dtos.NavLink{
{Text: "Alert rules", Id: "alert-list", Url: hs.Cfg.AppSubURL + "/alerting/list", Icon: "list-ul"},
}
if c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR {
if uaVisibleForOrg {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{
Text: "Contact points", Id: "receivers", Url: hs.Cfg.AppSubURL + "/alerting/notifications",
Icon: "comment-alt-share",
})
alertChildNavs = append(alertChildNavs, &dtos.NavLink{Text: "Notification policies", Id: "am-routes", Url: hs.Cfg.AppSubURL + "/alerting/routes", Icon: "sitemap"})
} else {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{
Text: "Notification channels", Id: "channels", Url: hs.Cfg.AppSubURL + "/alerting/notifications",
Icon: "comment-alt-share",
})
}
}
if uaVisibleForOrg {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{Text: "Silences", Id: "silences", Url: hs.Cfg.AppSubURL + "/alerting/silences", Icon: "bell-slash"})
alertChildNavs = append(alertChildNavs, &dtos.NavLink{Text: "Alert groups", Id: "groups", Url: hs.Cfg.AppSubURL + "/alerting/groups", Icon: "layer-group"})
}
if c.OrgRole == models.ROLE_ADMIN && uaVisibleForOrg {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{
Text: "Admin", Id: "alerting-admin", Url: hs.Cfg.AppSubURL + "/alerting/admin",
Icon: "cog",
})
}
return alertChildNavs
}
func (hs *HTTPServer) buildCreateNavLinks(c *models.ReqContext) []*dtos.NavLink {
children := []*dtos.NavLink{
{Text: "Dashboard", Icon: "apps", Url: hs.Cfg.AppSubURL + "/dashboard/new"},
}
if c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR {
children = append(children, &dtos.NavLink{
Text: "Folder", SubTitle: "Create a new folder to organize your dashboards", Id: "folder",
Icon: "folder-plus", Url: hs.Cfg.AppSubURL + "/dashboards/folder/new",
})
}
children = append(children, &dtos.NavLink{
Text: "Import", SubTitle: "Import dashboard from file or Grafana.com", Id: "import", Icon: "import",
Url: hs.Cfg.AppSubURL + "/dashboard/import",
})
_, uaIsDisabledForOrg := hs.Cfg.UnifiedAlerting.DisabledOrgs[c.OrgId]
uaVisibleForOrg := hs.Cfg.UnifiedAlerting.IsEnabled() && !uaIsDisabledForOrg
if setting.AlertingEnabled != nil && *setting.AlertingEnabled || uaVisibleForOrg {
children = append(children, &dtos.NavLink{
Text: "Alert rule", SubTitle: "Create an alert rule", Id: "alert",
Icon: "bell", Url: hs.Cfg.AppSubURL + "/alerting/new",
})
}
return children
}
func (hs *HTTPServer) buildAdminNavLinks(c *models.ReqContext) []*dtos.NavLink {
hasAccess := ac.HasAccess(hs.AccessControl, c)
hasGlobalAccess := ac.HasGlobalAccess(hs.AccessControl, c)
adminNavLinks := []*dtos.NavLink{}
if hasAccess(ac.ReqGrafanaAdmin, ac.EvalPermission(ac.ActionUsersRead, ac.ScopeGlobalUsersAll)) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "Users", Id: "global-users", Url: hs.Cfg.AppSubURL + "/admin/users", Icon: "user",
})
}
if hasGlobalAccess(ac.ReqGrafanaAdmin, orgsAccessEvaluator) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "Orgs", Id: "global-orgs", Url: hs.Cfg.AppSubURL + "/admin/orgs", Icon: "building",
})
}
if hasAccess(ac.ReqGrafanaAdmin, ac.EvalPermission(ac.ActionSettingsRead)) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "Settings", Id: "server-settings", Url: hs.Cfg.AppSubURL + "/admin/settings", Icon: "sliders-v-alt",
})
}
if hs.Cfg.LDAPEnabled && hasAccess(ac.ReqGrafanaAdmin, ac.EvalPermission(ac.ActionLDAPStatusRead)) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "LDAP", Id: "ldap", Url: hs.Cfg.AppSubURL + "/admin/ldap", Icon: "book",
})
}
if hs.Cfg.PluginAdminEnabled && hasAccess(ac.ReqGrafanaAdmin, ac.EvalPermission(ac.ActionPluginsManage)) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "Plugins", Id: "admin-plugins", Url: hs.Cfg.AppSubURL + "/admin/plugins", Icon: "plug",
})
}
return adminNavLinks
}
func (hs *HTTPServer) setIndexViewData(c *models.ReqContext) (*dtos.IndexViewData, error) {
hasEditPermissionInFoldersQuery := models.HasEditPermissionInFoldersQuery{SignedInUser: c.SignedInUser}
if err := bus.DispatchCtx(c.Req.Context(), &hasEditPermissionInFoldersQuery); err != nil {
return nil, err
}
hasEditPerm := hasEditPermissionInFoldersQuery.Result
settings, err := hs.getFrontendSettingsMap(c)
if err != nil {
return nil, err
}
settings["dateFormats"] = hs.Cfg.DateFormats
prefsQuery := models.GetPreferencesWithDefaultsQuery{User: c.SignedInUser}
if err := bus.DispatchCtx(c.Req.Context(), &prefsQuery); err != nil {
return nil, err
}
prefs := prefsQuery.Result
// Read locale from accept-language
acceptLang := c.Req.Header.Get("Accept-Language")
locale := "en-US"
if len(acceptLang) > 0 {
parts := strings.Split(acceptLang, ",")
locale = parts[0]
}
appURL := setting.AppUrl
appSubURL := hs.Cfg.AppSubURL
// special case when doing localhost call from image renderer
if c.IsRenderCall && !hs.Cfg.ServeFromSubPath {
appURL = fmt.Sprintf("%s://localhost:%s", hs.Cfg.Protocol, hs.Cfg.HTTPPort)
appSubURL = ""
settings["appSubUrl"] = ""
}
navTree, err := hs.getNavTree(c, hasEditPerm)
if err != nil {
return nil, err
}
data := dtos.IndexViewData{
User: &dtos.CurrentUser{
Id: c.UserId,
IsSignedIn: c.IsSignedIn,
Login: c.Login,
Email: c.Email,
Name: c.Name,
OrgCount: c.OrgCount,
OrgId: c.OrgId,
OrgName: c.OrgName,
OrgRole: c.OrgRole,
GravatarUrl: dtos.GetGravatarUrl(c.Email),
IsGrafanaAdmin: c.IsGrafanaAdmin,
LightTheme: prefs.Theme == lightName,
Timezone: prefs.Timezone,
WeekStart: prefs.WeekStart,
Locale: locale,
HelpFlags1: c.HelpFlags1,
HasEditPermissionInFolders: hasEditPerm,
},
Settings: settings,
Theme: prefs.Theme,
AppUrl: appURL,
AppSubUrl: appSubURL,
GoogleAnalyticsId: setting.GoogleAnalyticsId,
GoogleTagManagerId: setting.GoogleTagManagerId,
BuildVersion: setting.BuildVersion,
BuildCommit: setting.BuildCommit,
NewGrafanaVersion: hs.updateChecker.LatestGrafanaVersion(),
NewGrafanaVersionExists: hs.updateChecker.GrafanaUpdateAvailable(),
AppName: setting.ApplicationName,
AppNameBodyClass: getAppNameBodyClass(hs.License.HasValidLicense()),
FavIcon: "public/img/fav32.png",
AppleTouchIcon: "public/img/apple-touch-icon.png",
AppTitle: "Grafana",
NavTree: navTree,
Sentry: &hs.Cfg.Sentry,
Nonce: c.RequestNonce,
ContentDeliveryURL: hs.Cfg.GetContentDeliveryURL(hs.License.ContentDeliveryPrefix()),
LoadingLogo: "public/img/grafana_icon.svg",
}
if hs.Cfg.FeatureToggles["accesscontrol"] {
userPermissions, err := hs.AccessControl.GetUserPermissions(c.Req.Context(), c.SignedInUser)
if err != nil {
return nil, err
}
data.User.Permissions = ac.BuildPermissionsMap(userPermissions)
}
if setting.DisableGravatar {
data.User.GravatarUrl = hs.Cfg.AppSubURL + "/public/img/user_profile.png"
}
if len(data.User.Name) == 0 {
data.User.Name = data.User.Login
}
themeURLParam := c.Query("theme")
if themeURLParam == lightName {
data.User.LightTheme = true
data.Theme = lightName
} else if themeURLParam == darkName {
data.User.LightTheme = false
data.Theme = darkName
}
hs.HooksService.RunIndexDataHooks(&data, c)
sort.SliceStable(data.NavTree, func(i, j int) bool {
return data.NavTree[i].SortWeight < data.NavTree[j].SortWeight
})
return &data, nil
}
func (hs *HTTPServer) Index(c *models.ReqContext) {
data, err := hs.setIndexViewData(c)
if err != nil {
c.Handle(hs.Cfg, 500, "Failed to get settings", err)
return
}
c.HTML(200, "index", data)
}
func (hs *HTTPServer) NotFoundHandler(c *models.ReqContext) {
if c.IsApiRequest() {
c.JsonApiErr(404, "Not found", nil)
return
}
data, err := hs.setIndexViewData(c)
if err != nil {
c.Handle(hs.Cfg, 500, "Failed to get settings", err)
return
}
c.HTML(404, "index", data)
}
func getAppNameBodyClass(validLicense bool) string {
if validLicense {
return "app-enterprise"
}
return "app-grafana"
}
AppPlugins: Support app plugins with only default nav (#43016)
package api
import (
"fmt"
"sort"
"strings"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/api/navlinks"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
ac "github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/setting"
)
const (
// Themes
lightName = "light"
darkName = "dark"
)
func (hs *HTTPServer) getProfileNode(c *models.ReqContext) *dtos.NavLink {
// Only set login if it's different from the name
var login string
if c.SignedInUser.Login != c.SignedInUser.NameOrFallback() {
login = c.SignedInUser.Login
}
gravatarURL := dtos.GetGravatarUrl(c.Email)
children := []*dtos.NavLink{
{
Text: "Preferences", Id: "profile-settings", Url: hs.Cfg.AppSubURL + "/profile", Icon: "sliders-v-alt",
},
}
if setting.AddChangePasswordLink() {
children = append(children, &dtos.NavLink{
Text: "Change password", Id: "change-password", Url: hs.Cfg.AppSubURL + "/profile/password",
Icon: "lock",
})
}
if !setting.DisableSignoutMenu {
// add sign out first
children = append(children, &dtos.NavLink{
Text: "Sign out",
Id: "sign-out",
Url: hs.Cfg.AppSubURL + "/logout",
Icon: "arrow-from-right",
Target: "_self",
HideFromTabs: true,
})
}
return &dtos.NavLink{
Text: c.SignedInUser.NameOrFallback(),
SubTitle: login,
Id: "profile",
Img: gravatarURL,
Url: hs.Cfg.AppSubURL + "/profile",
Section: dtos.NavSectionConfig,
SortWeight: dtos.WeightProfile,
Children: children,
}
}
func (hs *HTTPServer) getAppLinks(c *models.ReqContext) ([]*dtos.NavLink, error) {
enabledPlugins, err := hs.enabledPlugins(c.Req.Context(), c.OrgId)
if err != nil {
return nil, err
}
appLinks := []*dtos.NavLink{}
for _, plugin := range enabledPlugins[plugins.App] {
if !plugin.Pinned {
continue
}
appLink := &dtos.NavLink{
Text: plugin.Name,
Id: "plugin-page-" + plugin.ID,
Url: plugin.DefaultNavURL,
Img: plugin.Info.Logos.Small,
SortWeight: dtos.WeightPlugin,
}
if hs.Cfg.IsNewNavigationEnabled() {
appLink.Section = dtos.NavSectionPlugin
} else {
appLink.Section = dtos.NavSectionCore
}
for _, include := range plugin.Includes {
if !c.HasUserRole(include.Role) {
continue
}
if include.Type == "page" && include.AddToNav {
var link *dtos.NavLink
if len(include.Path) > 0 {
link = &dtos.NavLink{
Url: hs.Cfg.AppSubURL + include.Path,
Text: include.Name,
}
if include.DefaultNav {
appLink.Url = link.Url // Overwrite the hardcoded page logic
}
} else {
link = &dtos.NavLink{
Url: hs.Cfg.AppSubURL + "/plugins/" + plugin.ID + "/page/" + include.Slug,
Text: include.Name,
}
}
link.Icon = include.Icon
appLink.Children = append(appLink.Children, link)
}
if include.Type == "dashboard" && include.AddToNav {
link := &dtos.NavLink{
Url: hs.Cfg.AppSubURL + include.GetSlugOrUIDLink(),
Text: include.Name,
}
appLink.Children = append(appLink.Children, link)
}
}
if len(appLink.Children) > 0 {
// If we only have one child and it's the app default nav then remove it from children
if len(appLink.Children) == 1 && appLink.Children[0].Url == appLink.Url {
appLink.Children = []*dtos.NavLink{}
}
appLinks = append(appLinks, appLink)
}
}
if len(appLinks) > 0 {
sort.SliceStable(appLinks, func(i, j int) bool {
return appLinks[i].Text < appLinks[j].Text
})
}
return appLinks, nil
}
func (hs *HTTPServer) getNavTree(c *models.ReqContext, hasEditPerm bool) ([]*dtos.NavLink, error) {
hasAccess := ac.HasAccess(hs.AccessControl, c)
navTree := []*dtos.NavLink{}
if hs.Cfg.IsNewNavigationEnabled() {
navTree = append(navTree, &dtos.NavLink{
Text: "Home",
Id: "home",
Icon: "home-alt",
Url: hs.Cfg.AppSubURL + "/",
Section: dtos.NavSectionCore,
SortWeight: dtos.WeightHome,
})
}
if hasEditPerm && !hs.Cfg.IsNewNavigationEnabled() {
children := hs.buildCreateNavLinks(c)
navTree = append(navTree, &dtos.NavLink{
Text: "Create",
Id: "create",
Icon: "plus",
Url: hs.Cfg.AppSubURL + "/dashboard/new",
Children: children,
Section: dtos.NavSectionCore,
SortWeight: dtos.WeightCreate,
})
}
dashboardChildLinks := hs.buildDashboardNavLinks(c, hasEditPerm)
dashboardsUrl := "/"
if hs.Cfg.IsNewNavigationEnabled() {
dashboardsUrl = "/dashboards"
}
navTree = append(navTree, &dtos.NavLink{
Text: "Dashboards",
Id: "dashboards",
SubTitle: "Manage dashboards and folders",
Icon: "apps",
Url: hs.Cfg.AppSubURL + dashboardsUrl,
SortWeight: dtos.WeightDashboard,
Section: dtos.NavSectionCore,
Children: dashboardChildLinks,
})
canExplore := func(context *models.ReqContext) bool {
return c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR || setting.ViewersCanEdit
}
if setting.ExploreEnabled && hasAccess(canExplore, ac.EvalPermission(ac.ActionDatasourcesExplore)) {
navTree = append(navTree, &dtos.NavLink{
Text: "Explore",
Id: "explore",
SubTitle: "Explore your data",
Icon: "compass",
SortWeight: dtos.WeightExplore,
Section: dtos.NavSectionCore,
Url: hs.Cfg.AppSubURL + "/explore",
})
}
if c.IsSignedIn {
navTree = append(navTree, hs.getProfileNode(c))
}
_, uaIsDisabledForOrg := hs.Cfg.UnifiedAlerting.DisabledOrgs[c.OrgId]
uaVisibleForOrg := hs.Cfg.UnifiedAlerting.IsEnabled() && !uaIsDisabledForOrg
if setting.AlertingEnabled != nil && *setting.AlertingEnabled || uaVisibleForOrg {
alertChildNavs := hs.buildAlertNavLinks(c, uaVisibleForOrg)
navTree = append(navTree, &dtos.NavLink{
Text: "Alerting",
SubTitle: "Alert rules and notifications",
Id: "alerting",
Icon: "bell",
Url: hs.Cfg.AppSubURL + "/alerting/list",
Children: alertChildNavs,
Section: dtos.NavSectionCore,
SortWeight: dtos.WeightAlerting,
})
}
appLinks, err := hs.getAppLinks(c)
if err != nil {
return nil, err
}
navTree = append(navTree, appLinks...)
configNodes := []*dtos.NavLink{}
if hasAccess(ac.ReqOrgAdmin, dataSourcesConfigurationAccessEvaluator) {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Data sources",
Icon: "database",
Description: "Add and configure data sources",
Id: "datasources",
Url: hs.Cfg.AppSubURL + "/datasources",
})
}
if hasAccess(ac.ReqOrgAdmin, ac.EvalPermission(ac.ActionOrgUsersRead, ac.ScopeUsersAll)) {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Users",
Id: "users",
Description: "Manage org members",
Icon: "user",
Url: hs.Cfg.AppSubURL + "/org/users",
})
}
if c.OrgRole == models.ROLE_ADMIN || (hs.Cfg.EditorsCanAdmin && c.OrgRole == models.ROLE_EDITOR) {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Teams",
Id: "teams",
Description: "Manage org groups",
Icon: "users-alt",
Url: hs.Cfg.AppSubURL + "/org/teams",
})
}
if c.OrgRole == models.ROLE_ADMIN {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Plugins",
Id: "plugins",
Description: "View and configure plugins",
Icon: "plug",
Url: hs.Cfg.AppSubURL + "/plugins",
})
}
if hasAccess(ac.ReqOrgAdmin, orgPreferencesAccessEvaluator) {
configNodes = append(configNodes, &dtos.NavLink{
Text: "Preferences",
Id: "org-settings",
Description: "Organization preferences",
Icon: "sliders-v-alt",
Url: hs.Cfg.AppSubURL + "/org",
})
}
if c.OrgRole == models.ROLE_ADMIN {
configNodes = append(configNodes, &dtos.NavLink{
Text: "API keys",
Id: "apikeys",
Description: "Create & manage API keys",
Icon: "key-skeleton-alt",
Url: hs.Cfg.AppSubURL + "/org/apikeys",
})
}
if hs.Cfg.FeatureToggles["live-pipeline"] {
liveNavLinks := []*dtos.NavLink{}
liveNavLinks = append(liveNavLinks, &dtos.NavLink{
Text: "Status", Id: "live-status", Url: hs.Cfg.AppSubURL + "/live", Icon: "exchange-alt",
})
liveNavLinks = append(liveNavLinks, &dtos.NavLink{
Text: "Pipeline", Id: "live-pipeline", Url: hs.Cfg.AppSubURL + "/live/pipeline", Icon: "arrow-to-right",
})
liveNavLinks = append(liveNavLinks, &dtos.NavLink{
Text: "Cloud", Id: "live-cloud", Url: hs.Cfg.AppSubURL + "/live/cloud", Icon: "cloud-upload",
})
navTree = append(navTree, &dtos.NavLink{
Id: "live",
Text: "Live",
SubTitle: "Event Streaming",
Icon: "exchange-alt",
Url: hs.Cfg.AppSubURL + "/live",
Children: liveNavLinks,
Section: dtos.NavSectionConfig,
HideFromTabs: true,
})
}
if len(configNodes) > 0 {
configNode := &dtos.NavLink{
Id: dtos.NavIDCfg,
Text: "Configuration",
SubTitle: "Organization: " + c.OrgName,
Icon: "cog",
Url: configNodes[0].Url,
SortWeight: dtos.WeightConfig,
Children: configNodes,
}
if hs.Cfg.IsNewNavigationEnabled() {
configNode.Section = dtos.NavSectionConfig
} else {
configNode.Section = dtos.NavSectionCore
}
navTree = append(navTree, configNode)
}
adminNavLinks := hs.buildAdminNavLinks(c)
if len(adminNavLinks) > 0 {
navSection := dtos.NavSectionCore
if hs.Cfg.IsNewNavigationEnabled() {
navSection = dtos.NavSectionConfig
}
serverAdminNode := navlinks.GetServerAdminNode(adminNavLinks, navSection)
navTree = append(navTree, serverAdminNode)
}
helpVersion := fmt.Sprintf(`%s v%s (%s)`, setting.ApplicationName, setting.BuildVersion, setting.BuildCommit)
if hs.Cfg.AnonymousHideVersion && !c.IsSignedIn {
helpVersion = setting.ApplicationName
}
navTree = append(navTree, &dtos.NavLink{
Text: "Help",
SubTitle: helpVersion,
Id: "help",
Url: "#",
Icon: "question-circle",
SortWeight: dtos.WeightHelp,
Section: dtos.NavSectionConfig,
Children: []*dtos.NavLink{},
})
return navTree, nil
}
func (hs *HTTPServer) buildDashboardNavLinks(c *models.ReqContext, hasEditPerm bool) []*dtos.NavLink {
dashboardChildNavs := []*dtos.NavLink{}
if !hs.Cfg.IsNewNavigationEnabled() {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Home", Id: "home", Url: hs.Cfg.AppSubURL + "/", Icon: "home-alt", HideFromTabs: true,
})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Divider", Divider: true, Id: "divider", HideFromTabs: true,
})
}
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Browse", Id: "manage-dashboards", Url: hs.Cfg.AppSubURL + "/dashboards", Icon: "sitemap",
})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Playlists", Id: "playlists", Url: hs.Cfg.AppSubURL + "/playlists", Icon: "presentation-play",
})
if c.IsSignedIn {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Snapshots",
Id: "snapshots",
Url: hs.Cfg.AppSubURL + "/dashboard/snapshots",
Icon: "camera",
})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Library panels",
Id: "library-panels",
Url: hs.Cfg.AppSubURL + "/library-panels",
Icon: "library-panel",
})
}
if hasEditPerm && hs.Cfg.IsNewNavigationEnabled() {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Divider", Divider: true, Id: "divider", HideFromTabs: true,
})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "New dashboard", Icon: "plus", Url: hs.Cfg.AppSubURL + "/dashboard/new", HideFromTabs: true,
})
if c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "New folder", SubTitle: "Create a new folder to organize your dashboards", Id: "folder",
Icon: "plus", Url: hs.Cfg.AppSubURL + "/dashboards/folder/new", HideFromTabs: true,
})
}
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{
Text: "Import", SubTitle: "Import dashboard from file or Grafana.com", Id: "import", Icon: "plus",
Url: hs.Cfg.AppSubURL + "/dashboard/import", HideFromTabs: true,
})
}
return dashboardChildNavs
}
func (hs *HTTPServer) buildAlertNavLinks(c *models.ReqContext, uaVisibleForOrg bool) []*dtos.NavLink {
alertChildNavs := []*dtos.NavLink{
{Text: "Alert rules", Id: "alert-list", Url: hs.Cfg.AppSubURL + "/alerting/list", Icon: "list-ul"},
}
if c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR {
if uaVisibleForOrg {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{
Text: "Contact points", Id: "receivers", Url: hs.Cfg.AppSubURL + "/alerting/notifications",
Icon: "comment-alt-share",
})
alertChildNavs = append(alertChildNavs, &dtos.NavLink{Text: "Notification policies", Id: "am-routes", Url: hs.Cfg.AppSubURL + "/alerting/routes", Icon: "sitemap"})
} else {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{
Text: "Notification channels", Id: "channels", Url: hs.Cfg.AppSubURL + "/alerting/notifications",
Icon: "comment-alt-share",
})
}
}
if uaVisibleForOrg {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{Text: "Silences", Id: "silences", Url: hs.Cfg.AppSubURL + "/alerting/silences", Icon: "bell-slash"})
alertChildNavs = append(alertChildNavs, &dtos.NavLink{Text: "Alert groups", Id: "groups", Url: hs.Cfg.AppSubURL + "/alerting/groups", Icon: "layer-group"})
}
if c.OrgRole == models.ROLE_ADMIN && uaVisibleForOrg {
alertChildNavs = append(alertChildNavs, &dtos.NavLink{
Text: "Admin", Id: "alerting-admin", Url: hs.Cfg.AppSubURL + "/alerting/admin",
Icon: "cog",
})
}
return alertChildNavs
}
func (hs *HTTPServer) buildCreateNavLinks(c *models.ReqContext) []*dtos.NavLink {
children := []*dtos.NavLink{
{Text: "Dashboard", Icon: "apps", Url: hs.Cfg.AppSubURL + "/dashboard/new"},
}
if c.OrgRole == models.ROLE_ADMIN || c.OrgRole == models.ROLE_EDITOR {
children = append(children, &dtos.NavLink{
Text: "Folder", SubTitle: "Create a new folder to organize your dashboards", Id: "folder",
Icon: "folder-plus", Url: hs.Cfg.AppSubURL + "/dashboards/folder/new",
})
}
children = append(children, &dtos.NavLink{
Text: "Import", SubTitle: "Import dashboard from file or Grafana.com", Id: "import", Icon: "import",
Url: hs.Cfg.AppSubURL + "/dashboard/import",
})
_, uaIsDisabledForOrg := hs.Cfg.UnifiedAlerting.DisabledOrgs[c.OrgId]
uaVisibleForOrg := hs.Cfg.UnifiedAlerting.IsEnabled() && !uaIsDisabledForOrg
if setting.AlertingEnabled != nil && *setting.AlertingEnabled || uaVisibleForOrg {
children = append(children, &dtos.NavLink{
Text: "Alert rule", SubTitle: "Create an alert rule", Id: "alert",
Icon: "bell", Url: hs.Cfg.AppSubURL + "/alerting/new",
})
}
return children
}
func (hs *HTTPServer) buildAdminNavLinks(c *models.ReqContext) []*dtos.NavLink {
hasAccess := ac.HasAccess(hs.AccessControl, c)
hasGlobalAccess := ac.HasGlobalAccess(hs.AccessControl, c)
adminNavLinks := []*dtos.NavLink{}
if hasAccess(ac.ReqGrafanaAdmin, ac.EvalPermission(ac.ActionUsersRead, ac.ScopeGlobalUsersAll)) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "Users", Id: "global-users", Url: hs.Cfg.AppSubURL + "/admin/users", Icon: "user",
})
}
if hasGlobalAccess(ac.ReqGrafanaAdmin, orgsAccessEvaluator) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "Orgs", Id: "global-orgs", Url: hs.Cfg.AppSubURL + "/admin/orgs", Icon: "building",
})
}
if hasAccess(ac.ReqGrafanaAdmin, ac.EvalPermission(ac.ActionSettingsRead)) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "Settings", Id: "server-settings", Url: hs.Cfg.AppSubURL + "/admin/settings", Icon: "sliders-v-alt",
})
}
if hs.Cfg.LDAPEnabled && hasAccess(ac.ReqGrafanaAdmin, ac.EvalPermission(ac.ActionLDAPStatusRead)) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "LDAP", Id: "ldap", Url: hs.Cfg.AppSubURL + "/admin/ldap", Icon: "book",
})
}
if hs.Cfg.PluginAdminEnabled && hasAccess(ac.ReqGrafanaAdmin, ac.EvalPermission(ac.ActionPluginsManage)) {
adminNavLinks = append(adminNavLinks, &dtos.NavLink{
Text: "Plugins", Id: "admin-plugins", Url: hs.Cfg.AppSubURL + "/admin/plugins", Icon: "plug",
})
}
return adminNavLinks
}
func (hs *HTTPServer) setIndexViewData(c *models.ReqContext) (*dtos.IndexViewData, error) {
hasEditPermissionInFoldersQuery := models.HasEditPermissionInFoldersQuery{SignedInUser: c.SignedInUser}
if err := bus.DispatchCtx(c.Req.Context(), &hasEditPermissionInFoldersQuery); err != nil {
return nil, err
}
hasEditPerm := hasEditPermissionInFoldersQuery.Result
settings, err := hs.getFrontendSettingsMap(c)
if err != nil {
return nil, err
}
settings["dateFormats"] = hs.Cfg.DateFormats
prefsQuery := models.GetPreferencesWithDefaultsQuery{User: c.SignedInUser}
if err := bus.DispatchCtx(c.Req.Context(), &prefsQuery); err != nil {
return nil, err
}
prefs := prefsQuery.Result
// Read locale from accept-language
acceptLang := c.Req.Header.Get("Accept-Language")
locale := "en-US"
if len(acceptLang) > 0 {
parts := strings.Split(acceptLang, ",")
locale = parts[0]
}
appURL := setting.AppUrl
appSubURL := hs.Cfg.AppSubURL
// special case when doing localhost call from image renderer
if c.IsRenderCall && !hs.Cfg.ServeFromSubPath {
appURL = fmt.Sprintf("%s://localhost:%s", hs.Cfg.Protocol, hs.Cfg.HTTPPort)
appSubURL = ""
settings["appSubUrl"] = ""
}
navTree, err := hs.getNavTree(c, hasEditPerm)
if err != nil {
return nil, err
}
data := dtos.IndexViewData{
User: &dtos.CurrentUser{
Id: c.UserId,
IsSignedIn: c.IsSignedIn,
Login: c.Login,
Email: c.Email,
Name: c.Name,
OrgCount: c.OrgCount,
OrgId: c.OrgId,
OrgName: c.OrgName,
OrgRole: c.OrgRole,
GravatarUrl: dtos.GetGravatarUrl(c.Email),
IsGrafanaAdmin: c.IsGrafanaAdmin,
LightTheme: prefs.Theme == lightName,
Timezone: prefs.Timezone,
WeekStart: prefs.WeekStart,
Locale: locale,
HelpFlags1: c.HelpFlags1,
HasEditPermissionInFolders: hasEditPerm,
},
Settings: settings,
Theme: prefs.Theme,
AppUrl: appURL,
AppSubUrl: appSubURL,
GoogleAnalyticsId: setting.GoogleAnalyticsId,
GoogleTagManagerId: setting.GoogleTagManagerId,
BuildVersion: setting.BuildVersion,
BuildCommit: setting.BuildCommit,
NewGrafanaVersion: hs.updateChecker.LatestGrafanaVersion(),
NewGrafanaVersionExists: hs.updateChecker.GrafanaUpdateAvailable(),
AppName: setting.ApplicationName,
AppNameBodyClass: getAppNameBodyClass(hs.License.HasValidLicense()),
FavIcon: "public/img/fav32.png",
AppleTouchIcon: "public/img/apple-touch-icon.png",
AppTitle: "Grafana",
NavTree: navTree,
Sentry: &hs.Cfg.Sentry,
Nonce: c.RequestNonce,
ContentDeliveryURL: hs.Cfg.GetContentDeliveryURL(hs.License.ContentDeliveryPrefix()),
LoadingLogo: "public/img/grafana_icon.svg",
}
if hs.Cfg.FeatureToggles["accesscontrol"] {
userPermissions, err := hs.AccessControl.GetUserPermissions(c.Req.Context(), c.SignedInUser)
if err != nil {
return nil, err
}
data.User.Permissions = ac.BuildPermissionsMap(userPermissions)
}
if setting.DisableGravatar {
data.User.GravatarUrl = hs.Cfg.AppSubURL + "/public/img/user_profile.png"
}
if len(data.User.Name) == 0 {
data.User.Name = data.User.Login
}
themeURLParam := c.Query("theme")
if themeURLParam == lightName {
data.User.LightTheme = true
data.Theme = lightName
} else if themeURLParam == darkName {
data.User.LightTheme = false
data.Theme = darkName
}
hs.HooksService.RunIndexDataHooks(&data, c)
sort.SliceStable(data.NavTree, func(i, j int) bool {
return data.NavTree[i].SortWeight < data.NavTree[j].SortWeight
})
return &data, nil
}
func (hs *HTTPServer) Index(c *models.ReqContext) {
data, err := hs.setIndexViewData(c)
if err != nil {
c.Handle(hs.Cfg, 500, "Failed to get settings", err)
return
}
c.HTML(200, "index", data)
}
func (hs *HTTPServer) NotFoundHandler(c *models.ReqContext) {
if c.IsApiRequest() {
c.JsonApiErr(404, "Not found", nil)
return
}
data, err := hs.setIndexViewData(c)
if err != nil {
c.Handle(hs.Cfg, 500, "Failed to get settings", err)
return
}
c.HTML(404, "index", data)
}
func getAppNameBodyClass(validLicense bool) string {
if validLicense {
return "app-enterprise"
}
return "app-grafana"
}
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
// Common string formats
// ---------------------
// Many fields in this API have formatting requirements. The commonly used
// formats are defined here.
//
// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
// in the C language. This is captured by the following regex:
// [A-Za-z_][A-Za-z0-9_]*
// This defines the format, but not the length restriction, which should be
// specified at the definition of any field of this type.
//
// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
// to the definition of a "label" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
//
// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
// or more simply:
// DNS_LABEL(\.DNS_LABEL)*
// TypeMeta describes an individual object in an API response or request
// with strings representing the type of the object and its API schema version.
// Structures that are versioned or persisted should inline TypeMeta.
type TypeMeta struct {
// Kind is a string value representing the REST resource this object represents.
// Servers may infer this from the endpoint the client submits requests to.
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
// APIVersion defines the versioned schema of this representation of an object.
// Servers should convert recognized schemas to the latest internal value, and
// may reject unrecognized values.
APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"`
}
// ListMeta describes metadata that synthetic resources must have, including lists and
// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
type ListMeta struct {
// SelfLink is a URL representing this object.
SelfLink string `json:"selfLink,omitempty" yaml:"selfLink,omitempty"`
// An opaque value that represents the version of this response for use with optimistic
// concurrency and change monitoring endpoints. Clients must treat these values as opaque
// and values may only be valid for a particular resource or set of resources. Only servers
// will generate resource versions.
ResourceVersion string `json:"resourceVersion,omitempty" yaml:"resourceVersion,omitempty"`
}
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create. A resource may have only one of {ObjectMeta, ListMeta}.
type ObjectMeta struct {
// Name is unique within a namespace. Name is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// Namespace defines the space within which name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
// SelfLink is a URL representing this object.
SelfLink string `json:"selfLink,omitempty" yaml:"selfLink,omitempty"`
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
UID string `json:"uid,omitempty" yaml:"uid,omitempty"`
// An opaque value that represents the version of this resource. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and values may only be valid for a particular
// resource or set of resources. Only servers will generate resource versions.
ResourceVersion string `json:"resourceVersion,omitempty" yaml:"resourceVersion,omitempty"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
CreationTimestamp util.Time `json:"creationTimestamp,omitempty" yaml:"creationTimestamp,omitempty"`
// Labels are key value pairs that may be used to scope and select individual resources.
// TODO: replace map[string]string with labels.LabelSet type
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
// Annotations are unstructured key value data stored with a resource that may be set by
// external tooling. They are not queryable and should be preserved when modifying
// objects.
Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
}
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
)
// Volume represents a named volume in a pod that may be accessed by any containers in the pod.
type Volume struct {
// Required: This must be a DNS_LABEL. Each volume in a pod must have
// a unique name.
Name string `json:"name" yaml:"name"`
// Source represents the location and type of a volume to mount.
// This is optional for now. If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
Source *VolumeSource `json:"source" yaml:"source"`
}
type VolumeSource struct {
// Only one of the following sources may be specified
// HostDir represents a pre-existing directory on the host machine that is directly
// exposed to the container. This is generally used for system agents or other privileged
// things that are allowed to see the host machine. Most containers will NOT need this.
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
HostDir *HostDir `json:"hostDir" yaml:"hostDir"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
EmptyDir *EmptyDir `json:"emptyDir" yaml:"emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
GCEPersistentDisk *GCEPersistentDisk `json:"persistentDisk" yaml:"persistentDisk"`
}
// HostDir represents bare host directory volume.
type HostDir struct {
Path string `json:"path" yaml:"path"`
}
type EmptyDir struct{}
// Protocol defines network protocols supported for things like conatiner ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
)
// GCEPersistent Disk resource.
// A GCE PD must exist and be formatted before mounting to a container.
// The disk must also be in the same GCE project and zone as the kubelet.
// A GCE PD can only be mounted as read/write once.
type GCEPersistentDisk struct {
// Unique name of the PD resource. Used to identify the disk in GCE
PDName string `yaml:"pdName" json:"pdName"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `yaml:"fsType,omitempty" json:"fsType,omitempty"`
// Optional: Partition on the disk to mount.
// If omitted, kubelet will attempt to mount the device name.
// Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty.
Partition int `yaml:"partition,omitempty" json:"partition,omitempty"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `yaml:"readOnly,omitempty" json:"readOnly,omitempty"`
}
// Port represents a network port in a single container
type Port struct {
// Optional: If specified, this must be a DNS_LABEL. Each named port
// in a pod must have a unique name.
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// Optional: If specified, this must be a valid port number, 0 < x < 65536.
HostPort int `json:"hostPort,omitempty" yaml:"hostPort,omitempty"`
// Required: This must be a valid port number, 0 < x < 65536.
ContainerPort int `json:"containerPort" yaml:"containerPort"`
// Optional: Supports "TCP" and "UDP". Defaults to "TCP".
Protocol Protocol `json:"protocol,omitempty" yaml:"protocol,omitempty"`
// Optional: What host IP to bind the external port to.
HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// Required: This must match the Name of a Volume [above].
Name string `json:"name" yaml:"name"`
// Optional: Defaults to false (read-write).
ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"`
// Required.
MountPath string `json:"mountPath,omitempty" yaml:"mountPath,omitempty"`
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Required: This must be a C_IDENTIFIER.
Name string `json:"name" yaml:"name"`
// Optional: defaults to "".
Value string `json:"value,omitempty" yaml:"value,omitempty"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Optional: Path to access on the HTTP server.
Path string `json:"path,omitempty" yaml:"path,omitempty"`
// Required: Name or number of the port to access on the container.
Port util.IntOrString `json:"port,omitempty" yaml:"port,omitempty"`
// Optional: Host name to connect to, defaults to the pod IP.
Host string `json:"host,omitempty" yaml:"host,omitempty"`
}
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Required: Port to connect to.
Port util.IntOrString `json:"port,omitempty" yaml:"port,omitempty"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
Command []string `yaml:"command,omitempty" json:"command,omitempty"`
}
// LivenessProbe describes a liveness probe to be examined to the container.
// TODO: pass structured data to the actions, and document that data here.
type LivenessProbe struct {
// HTTPGetProbe parameters, required if Type == 'http'
HTTPGet *HTTPGetAction `yaml:"httpGet,omitempty" json:"httpGet,omitempty"`
// TCPSocketProbe parameter, required if Type == 'tcp'
TCPSocket *TCPSocketAction `yaml:"tcpSocket,omitempty" json:"tcpSocket,omitempty"`
// ExecProbe parameter, required if Type == 'exec'
Exec *ExecAction `yaml:"exec,omitempty" json:"exec,omitempty"`
// Length of time before health checking is activated. In seconds.
InitialDelaySeconds int64 `yaml:"initialDelaySeconds,omitempty" json:"initialDelaySeconds,omitempty"`
}
// PullPolicy describes a policy for if/when to pull a container image
type PullPolicy string
const (
// Always attempt to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "PullAlways"
// Never pull an image, only use a local image. Container will fail if the image isn't present
PullNever PullPolicy = "PullNever"
// Pull if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "PullIfNotPresent"
)
// Container represents a single container that is expected to be run on the host.
type Container struct {
// Required: This must be a DNS_LABEL. Each container in a pod must
// have a unique name.
Name string `json:"name" yaml:"name"`
// Required.
Image string `json:"image" yaml:"image"`
// Optional: Defaults to whatever is defined in the image.
Command []string `json:"command,omitempty" yaml:"command,omitempty"`
// Optional: Defaults to Docker's default.
WorkingDir string `json:"workingDir,omitempty" yaml:"workingDir,omitempty"`
Ports []Port `json:"ports,omitempty" yaml:"ports,omitempty"`
Env []EnvVar `json:"env,omitempty" yaml:"env,omitempty"`
// Optional: Defaults to unlimited.
Memory int `json:"memory,omitempty" yaml:"memory,omitempty"`
// Optional: Defaults to unlimited.
CPU int `json:"cpu,omitempty" yaml:"cpu,omitempty"`
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" yaml:"volumeMounts,omitempty"`
LivenessProbe *LivenessProbe `json:"livenessProbe,omitempty" yaml:"livenessProbe,omitempty"`
Lifecycle *Lifecycle `json:"lifecycle,omitempty" yaml:"lifecycle,omitempty"`
// Optional: Default to false.
Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
// Optional: Policy for pulling images for this container
ImagePullPolicy PullPolicy `json:"imagePullPolicy" yaml:"imagePullPolicy"`
}
// Handler defines a specific action that should be taken
// TODO: pass structured data to these actions, and document that data here.
type Handler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
Exec *ExecAction `json:"exec,omitempty" yaml:"exec,omitempty"`
// HTTPGet specifies the http request to perform.
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" yaml:"httpGet,omitempty"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails, the container
// is terminated and restarted.
PostStart *Handler `json:"postStart,omitempty" yaml:"postStart,omitempty"`
// PreStop is called immediately before a container is terminated. The reason for termination is
// passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated.
PreStop *Handler `yaml:"preStop,omitempty" json:"preStop,omitempty"`
}
// The below types are used by kube_client and api_server.
// PodStatus represents a status of a pod.
type PodStatus string
// These are the valid statuses of pods.
const (
// PodWaiting means that we're waiting for the pod to begin running.
PodWaiting PodStatus = "Waiting"
// PodRunning means that the pod is up and running.
PodRunning PodStatus = "Running"
// PodTerminated means that the pod has stopped.
PodTerminated PodStatus = "Terminated"
)
type ContainerStateWaiting struct {
// Reason could be pulling image,
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
}
type ContainerStateRunning struct {
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
}
type ContainerStateTerminated struct {
ExitCode int `json:"exitCode" yaml:"exitCode"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
FinishedAt time.Time `json:"finishedAt,omitempty" yaml:"finishedAt,omitempty"`
}
type ContainerState struct {
// Only one of the following ContainerState may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
Waiting *ContainerStateWaiting `json:"waiting,omitempty" yaml:"waiting,omitempty"`
Running *ContainerStateRunning `json:"running,omitempty" yaml:"running,omitempty"`
Termination *ContainerStateTerminated `json:"termination,omitempty" yaml:"termination,omitempty"`
}
type ContainerStatus struct {
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
// defined for container?
State ContainerState `json:"state,omitempty" yaml:"state,omitempty"`
RestartCount int `json:"restartCount" yaml:"restartCount"`
// TODO(dchen1107): Deprecated this soon once we pull entire PodStatus from node,
// not just PodInfo. Now we need this to remove docker.Container from API
PodIP string `json:"podIP,omitempty" yaml:"podIP,omitempty"`
// TODO(dchen1107): Need to decide how to represent this in v1beta3
Image string `yaml:"image" json:"image"`
// TODO(dchen1107): Once we have done with integration with cadvisor, resource
// usage should be included.
}
// PodInfo contains one entry for every container with available info.
type PodInfo map[string]ContainerStatus
type RestartPolicyAlways struct{}
// TODO(dchen1107): Define what kinds of failures should restart.
// TODO(dchen1107): Decide whether to support policy knobs, and, if so, which ones.
type RestartPolicyOnFailure struct{}
type RestartPolicyNever struct{}
type RestartPolicy struct {
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
Always *RestartPolicyAlways `json:"always,omitempty" yaml:"always,omitempty"`
OnFailure *RestartPolicyOnFailure `json:"onFailure,omitempty" yaml:"onFailure,omitempty"`
Never *RestartPolicyNever `json:"never,omitempty" yaml:"never,omitempty"`
}
// PodState is the state of a pod, used as either input (desired state) or output (current state).
type PodState struct {
Manifest ContainerManifest `json:"manifest,omitempty" yaml:"manifest,omitempty"`
Status PodStatus `json:"status,omitempty" yaml:"status,omitempty"`
Host string `json:"host,omitempty" yaml:"host,omitempty"`
HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
PodIP string `json:"podIP,omitempty" yaml:"podIP,omitempty"`
// The key of this map is the *name* of the container within the manifest; it has one
// entry per container in the manifest. The value of this map is currently the output
// of `docker inspect`. This output format is *not* final and should not be relied
// upon.
// TODO: Make real decisions about what our info should look like. Re-enable fuzz test
// when we have done this.
Info PodInfo `json:"info,omitempty" yaml:"info,omitempty"`
}
// PodList is a list of Pods.
type PodList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Pod `json:"items" yaml:"items,omitempty"`
}
// Pod is a collection of containers, used as either input (create, update) or as output (list, get).
type Pod struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
DesiredState PodState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState PodState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
}
// ReplicationControllerState is the state of a replication controller, either input (create, update) or as output (list, get).
type ReplicationControllerState struct {
Replicas int `json:"replicas" yaml:"replicas"`
ReplicaSelector map[string]string `json:"replicaSelector,omitempty" yaml:"replicaSelector,omitempty"`
PodTemplate PodTemplate `json:"podTemplate,omitempty" yaml:"podTemplate,omitempty"`
}
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []ReplicationController `json:"items,omitempty" yaml:"items,omitempty"`
}
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
DesiredState ReplicationControllerState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState ReplicationControllerState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
}
// PodTemplate holds the information used for creating pods.
type PodTemplate struct {
DesiredState PodState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// ServiceList holds a list of services.
type ServiceList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Service `json:"items" yaml:"items"`
}
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Required.
Port int `json:"port" yaml:"port"`
// Optional: Defaults to "TCP".
Protocol Protocol `yaml:"protocol,omitempty" json:"protocol,omitempty"`
// This service will route traffic to pods having labels matching this selector.
Selector map[string]string `json:"selector,omitempty" yaml:"selector,omitempty"`
CreateExternalLoadBalancer bool `json:"createExternalLoadBalancer,omitempty" yaml:"createExternalLoadBalancer,omitempty"`
// ContainerPort is the name of the port on the container to direct traffic to.
// Optional, if unspecified use the first port on the container.
ContainerPort util.IntOrString `json:"containerPort,omitempty" yaml:"containerPort,omitempty"`
// PortalIP is assigned by the master. If specified by the user it will be ignored.
// TODO: This is awkward - if we had a BoundService, it would be better factored.
PortalIP string `json:"portalIP,omitempty" yaml:"portalIP,omitempty"`
// ProxyPort is assigned by the master. If specified by the user it will be ignored.
ProxyPort int `json:"proxyPort,omitempty" yaml:"proxyPort,omitempty"`
}
// Endpoints is a collection of endpoints that implement the actual service, for example:
// Name: "mysql", Endpoints: ["10.10.1.1:1909", "10.10.2.2:8834"]
type Endpoints struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Endpoints []string `json:"endpoints,omitempty" yaml:"endpoints,omitempty"`
}
// EndpointsList is a list of endpoints.
type EndpointsList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Endpoints `json:"items,omitempty" yaml:"items,omitempty"`
}
// NodeResources represents resources on a Kubernetes system node
// see https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/resources.md for more details.
type NodeResources struct {
// Capacity represents the available resources.
Capacity ResourceList `json:"capacity,omitempty" yaml:"capacity,omitempty"`
}
type ResourceName string
// TODO Replace this with a more complete "Quantity" struct
type ResourceList map[ResourceName]util.IntOrString
// Minion is a worker node in Kubernetenes.
// The name of the minion according to etcd is in ID.
type Minion struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Queried from cloud provider, if available.
HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
// Resources available on the node
NodeResources NodeResources `json:"resources,omitempty" yaml:"resources,omitempty"`
}
// MinionList is a list of minions.
type MinionList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Minion `json:"items,omitempty" yaml:"items,omitempty"`
}
// Binding is written by a scheduler to cause a pod to be bound to a host.
type Binding struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PodID string `json:"podID" yaml:"podID"`
Host string `json:"host" yaml:"host"`
}
// Status is a return value for calls that don't return other objects.
// TODO: this could go in apiserver, but I'm including it here so clients needn't
// import both.
type Status struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// One of: "Success", "Failure", "Working" (for operations not yet completed)
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// A human-readable description of the status of this operation.
Message string `json:"message,omitempty" yaml:"message,omitempty"`
// A machine-readable description of why this operation is in the
// "Failure" or "Working" status. If this value is empty there
// is no information available. A Reason clarifies an HTTP status
// code but does not override it.
Reason StatusReason `json:"reason,omitempty" yaml:"reason,omitempty"`
// Extended data associated with the reason. Each reason may define its
// own extended details. This field is optional and the data returned
// is not guaranteed to conform to any schema except that defined by
// the reason type.
Details *StatusDetails `json:"details,omitempty" yaml:"details,omitempty"`
// Suggested HTTP return code for this status, 0 if not set.
Code int `json:"code,omitempty" yaml:"code,omitempty"`
}
// StatusDetails is a set of additional properties that MAY be set by the
// server to provide additional information about a response. The Reason
// field of a Status object defines what attributes will be set. Clients
// must ignore fields that do not match the defined type of each attribute,
// and should assume that any attribute may be empty, invalid, or under
// defined.
type StatusDetails struct {
// The ID attribute of the resource associated with the status StatusReason
// (when there is a single ID which can be described).
ID string `json:"id,omitempty" yaml:"id,omitempty"`
// The kind attribute of the resource associated with the status StatusReason.
// On some operations may differ from the requested resource Kind.
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
// The Causes array includes more details associated with the StatusReason
// failure. Not all StatusReasons may provide detailed causes.
Causes []StatusCause `json:"causes,omitempty" yaml:"causes,omitempty"`
}
// Values of Status.Status
const (
StatusSuccess = "Success"
StatusFailure = "Failure"
StatusWorking = "Working"
)
// StatusReason is an enumeration of possible failure causes. Each StatusReason
// must map to a single HTTP status code, but multiple reasons may map
// to the same HTTP status code.
// TODO: move to apiserver
type StatusReason string
const (
// StatusReasonUnknown means the server has declined to indicate a specific reason.
// The details field may contain other information about this error.
// Status code 500.
StatusReasonUnknown StatusReason = ""
// StatusReasonWorking means the server is processing this request and will complete
// at a future time.
// Details (optional):
// "kind" string - the name of the resource being referenced ("operation" today)
// "id" string - the identifier of the Operation resource where updates
// will be returned
// Headers (optional):
// "Location" - HTTP header populated with a URL that can retrieved the final
// status of this operation.
// Status code 202
StatusReasonWorking StatusReason = "Working"
// StatusReasonNotFound means one or more resources required for this operation
// could not be found.
// Details (optional):
// "kind" string - the kind attribute of the missing resource
// on some operations may differ from the requested
// resource.
// "id" string - the identifier of the missing resource
// Status code 404
StatusReasonNotFound StatusReason = "NotFound"
// StatusReasonAlreadyExists means the resource you are creating already exists.
// Details (optional):
// "kind" string - the kind attribute of the conflicting resource
// "id" string - the identifier of the conflicting resource
// Status code 409
StatusReasonAlreadyExists StatusReason = "AlreadyExists"
// StatusReasonConflict means the requested update operation cannot be completed
// due to a conflict in the operation. The client may need to alter the request.
// Each resource may define custom details that indicate the nature of the
// conflict.
// Status code 409
StatusReasonConflict StatusReason = "Conflict"
// StatusReasonInvalid means the requested create or update operation cannot be
// completed due to invalid data provided as part of the request. The client may
// need to alter the request. When set, the client may use the StatusDetails
// message field as a summary of the issues encountered.
// Details (optional):
// "kind" string - the kind attribute of the invalid resource
// "id" string - the identifier of the invalid resource
// "causes" - one or more StatusCause entries indicating the data in the
// provided resource that was invalid. The code, message, and
// field attributes will be set.
// Status code 422
StatusReasonInvalid StatusReason = "Invalid"
)
// StatusCause provides more information about an api.Status failure, including
// cases when multiple errors are encountered.
type StatusCause struct {
// A machine-readable description of the cause of the error. If this value is
// empty there is no information available.
Type CauseType `json:"reason,omitempty" yaml:"reason,omitempty"`
// A human-readable description of the cause of the error. This field may be
// presented as-is to a reader.
Message string `json:"message,omitempty" yaml:"message,omitempty"`
// The field of the resource that has caused this error, as named by its JSON
// serialization. May include dot and postfix notation for nested attributes.
// Arrays are zero-indexed. Fields may appear more than once in an array of
// causes due to fields having multiple errors.
// Optional.
//
// Examples:
// "name" - the field "name" on the current resource
// "items[0].name" - the field "name" on the first array entry in "items"
Field string `json:"field,omitempty" yaml:"field,omitempty"`
}
// CauseType is a machine readable value providing more detail about what
// occured in a status response. An operation may have multiple causes for a
// status (whether Failure, Success, or Working).
type CauseType string
const (
// CauseTypeFieldValueNotFound is used to report failure to find a requested value
// (e.g. looking up an ID).
CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound"
// CauseTypeFieldValueInvalid is used to report required values that are not
// provided (e.g. empty strings, null values, or empty arrays).
CauseTypeFieldValueRequired CauseType = "FieldValueRequired"
// CauseTypeFieldValueDuplicate is used to report collisions of values that must be
// unique (e.g. unique IDs).
CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate"
// CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex
// match).
CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid"
// CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules)
// values that can not be handled (e.g. an enumerated string).
CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported"
)
// ServerOp is an operation delivered to API clients.
type ServerOp struct {
TypeMeta `yaml:",inline" json:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
}
// ServerOpList is a list of operations, as delivered to API clients.
type ServerOpList struct {
TypeMeta `yaml:",inline" json:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []ServerOp `yaml:"items,omitempty" json:"items,omitempty"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
UID string `json:"uid,omitempty" yaml:"uid,omitempty"`
APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"`
ResourceVersion string `json:"resourceVersion,omitempty" yaml:"resourceVersion,omitempty"`
// Optional. If referring to a piece of an object instead of an entire object, this string
// should contain a valid field access statement. For example,
// if the object reference is to a container within a pod, this would take on a value like:
// "desiredState.manifest.containers[2]". Such statements are valid language constructs in
// both go and JavaScript. This is syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"`
}
// Event is a report of an event somewhere in the cluster.
// TODO: Decide whether to store these separately or with the object they apply to.
type Event struct {
TypeMeta `yaml:",inline" json:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Required. The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject,omitempty" yaml:"involvedObject,omitempty"`
// Should be a short, machine understandable string that describes the current status
// of the referred object. This should not give the reason for being in this state.
// Examples: "running", "cantStart", "cantSchedule", "deleted".
// It's OK for components to make up statuses to report here, but the same string should
// always be used for the same status.
// TODO: define a way of making sure these are consistent and don't collide.
// TODO: provide exact specification for format.
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// Optional; this should be a short, machine understandable string that gives the reason
// for the transition into the object's current status. For example, if ObjectStatus is
// "cantStart", StatusReason might be "imageNotFound".
// TODO: provide exact specification for format.
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
// Optional. A human-readable description of the status of this operation.
// TODO: decide on maximum length.
Message string `json:"message,omitempty" yaml:"message,omitempty"`
// Optional. The component reporting this event. Should be a short machine understandable string.
// TODO: provide exact specification for format.
Source string `json:"source,omitempty" yaml:"source,omitempty"`
// The time at which the client recorded the event. (Time of server receipt is in TypeMeta.)
Timestamp util.Time `json:"timestamp,omitempty" yaml:"timestamp,omitempty"`
}
// EventList is a list of events.
type EventList struct {
TypeMeta `yaml:",inline" json:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Event `yaml:"items,omitempty" json:"items,omitempty"`
}
// ContainerManifest corresponds to the Container Manifest format, documented at:
// https://developers.google.com/compute/docs/containers/container_vms#container_manifest
// This is used as the representation of Kubernetes workloads.
// DEPRECATED: Replaced with BoundPod
type ContainerManifest struct {
// Required: This must be a supported version string, such as "v1beta1".
Version string `yaml:"version" json:"version"`
// Required: This must be a DNS_SUBDOMAIN.
// TODO: ID on Manifest is deprecated and will be removed in the future.
ID string `yaml:"id" json:"id"`
// TODO: UUID on Manifest is deprecated in the future once we are done
// with the API refactoring. It is required for now to determine the instance
// of a Pod.
UUID string `yaml:"uuid,omitempty" json:"uuid,omitempty"`
Volumes []Volume `yaml:"volumes" json:"volumes"`
Containers []Container `yaml:"containers" json:"containers"`
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"`
}
// ContainerManifestList is used to communicate container manifests to kubelet.
// DEPRECATED: Replaced with BoundPods
type ContainerManifestList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []ContainerManifest `json:"items,omitempty" yaml:"items,omitempty"`
}
// Included in partial form from v1beta3 to replace ContainerManifest
// PodSpec is a description of a pod
type PodSpec struct {
Volumes []Volume `json:"volumes" yaml:"volumes"`
Containers []Container `json:"containers" yaml:"containers"`
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"`
}
// BoundPod is a collection of containers that should be run on a host. A BoundPod
// defines how a Pod may change after a Binding is created. A Pod is a request to
// execute a pod, whereas a BoundPod is the specification that would be run on a server.
type BoundPod struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Spec defines the behavior of a pod.
Spec PodSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
}
// BoundPods is a list of Pods bound to a common server. The resource version of
// the pod list is guaranteed to only change when the list of bound pods changes.
type BoundPods struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Host is the name of a node that these pods were bound to.
Host string `json:"host" yaml:"host"`
// Items is the list of all pods bound to a given host.
Items []BoundPod `json:"items" yaml:"items"`
}
Start adding git volumes.
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
// Common string formats
// ---------------------
// Many fields in this API have formatting requirements. The commonly used
// formats are defined here.
//
// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
// in the C language. This is captured by the following regex:
// [A-Za-z_][A-Za-z0-9_]*
// This defines the format, but not the length restriction, which should be
// specified at the definition of any field of this type.
//
// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
// to the definition of a "label" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
//
// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
// or more simply:
// DNS_LABEL(\.DNS_LABEL)*
// TypeMeta describes an individual object in an API response or request
// with strings representing the type of the object and its API schema version.
// Structures that are versioned or persisted should inline TypeMeta.
type TypeMeta struct {
// Kind is a string value representing the REST resource this object represents.
// Servers may infer this from the endpoint the client submits requests to.
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
// APIVersion defines the versioned schema of this representation of an object.
// Servers should convert recognized schemas to the latest internal value, and
// may reject unrecognized values.
APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"`
}
// ListMeta describes metadata that synthetic resources must have, including lists and
// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
type ListMeta struct {
// SelfLink is a URL representing this object.
SelfLink string `json:"selfLink,omitempty" yaml:"selfLink,omitempty"`
// An opaque value that represents the version of this response for use with optimistic
// concurrency and change monitoring endpoints. Clients must treat these values as opaque
// and values may only be valid for a particular resource or set of resources. Only servers
// will generate resource versions.
ResourceVersion string `json:"resourceVersion,omitempty" yaml:"resourceVersion,omitempty"`
}
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create. A resource may have only one of {ObjectMeta, ListMeta}.
type ObjectMeta struct {
// Name is unique within a namespace. Name is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// Namespace defines the space within which name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
// SelfLink is a URL representing this object.
SelfLink string `json:"selfLink,omitempty" yaml:"selfLink,omitempty"`
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
UID string `json:"uid,omitempty" yaml:"uid,omitempty"`
// An opaque value that represents the version of this resource. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and values may only be valid for a particular
// resource or set of resources. Only servers will generate resource versions.
ResourceVersion string `json:"resourceVersion,omitempty" yaml:"resourceVersion,omitempty"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
CreationTimestamp util.Time `json:"creationTimestamp,omitempty" yaml:"creationTimestamp,omitempty"`
// Labels are key value pairs that may be used to scope and select individual resources.
// TODO: replace map[string]string with labels.LabelSet type
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
// Annotations are unstructured key value data stored with a resource that may be set by
// external tooling. They are not queryable and should be preserved when modifying
// objects.
Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
}
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
)
// Volume represents a named volume in a pod that may be accessed by any containers in the pod.
type Volume struct {
// Required: This must be a DNS_LABEL. Each volume in a pod must have
// a unique name.
Name string `json:"name" yaml:"name"`
// Source represents the location and type of a volume to mount.
// This is optional for now. If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
Source *VolumeSource `json:"source" yaml:"source"`
}
type VolumeSource struct {
// Only one of the following sources may be specified
// HostDir represents a pre-existing directory on the host machine that is directly
// exposed to the container. This is generally used for system agents or other privileged
// things that are allowed to see the host machine. Most containers will NOT need this.
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
HostDir *HostDir `json:"hostDir" yaml:"hostDir"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
EmptyDir *EmptyDir `json:"emptyDir" yaml:"emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
GCEPersistentDisk *GCEPersistentDisk `json:"persistentDisk" yaml:"persistentDisk"`
// GitVolume represents a git repository at a particular revision.
GitVolume *GitVolume `json:"gitVolume" yaml:"gitVolume"`
}
// HostDir represents bare host directory volume.
type HostDir struct {
Path string `json:"path" yaml:"path"`
}
type EmptyDir struct{}
// Protocol defines network protocols supported for things like conatiner ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
)
// GCEPersistent Disk resource.
// A GCE PD must exist and be formatted before mounting to a container.
// The disk must also be in the same GCE project and zone as the kubelet.
// A GCE PD can only be mounted as read/write once.
type GCEPersistentDisk struct {
// Unique name of the PD resource. Used to identify the disk in GCE
PDName string `yaml:"pdName" json:"pdName"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `yaml:"fsType,omitempty" json:"fsType,omitempty"`
// Optional: Partition on the disk to mount.
// If omitted, kubelet will attempt to mount the device name.
// Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty.
Partition int `yaml:"partition,omitempty" json:"partition,omitempty"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `yaml:"readOnly,omitempty" json:"readOnly,omitempty"`
}
type GitVolume struct {
// Repository URL
Repository string
}
// Port represents a network port in a single container
type Port struct {
// Optional: If specified, this must be a DNS_LABEL. Each named port
// in a pod must have a unique name.
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// Optional: If specified, this must be a valid port number, 0 < x < 65536.
HostPort int `json:"hostPort,omitempty" yaml:"hostPort,omitempty"`
// Required: This must be a valid port number, 0 < x < 65536.
ContainerPort int `json:"containerPort" yaml:"containerPort"`
// Optional: Supports "TCP" and "UDP". Defaults to "TCP".
Protocol Protocol `json:"protocol,omitempty" yaml:"protocol,omitempty"`
// Optional: What host IP to bind the external port to.
HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// Required: This must match the Name of a Volume [above].
Name string `json:"name" yaml:"name"`
// Optional: Defaults to false (read-write).
ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"`
// Required.
MountPath string `json:"mountPath,omitempty" yaml:"mountPath,omitempty"`
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Required: This must be a C_IDENTIFIER.
Name string `json:"name" yaml:"name"`
// Optional: defaults to "".
Value string `json:"value,omitempty" yaml:"value,omitempty"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Optional: Path to access on the HTTP server.
Path string `json:"path,omitempty" yaml:"path,omitempty"`
// Required: Name or number of the port to access on the container.
Port util.IntOrString `json:"port,omitempty" yaml:"port,omitempty"`
// Optional: Host name to connect to, defaults to the pod IP.
Host string `json:"host,omitempty" yaml:"host,omitempty"`
}
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Required: Port to connect to.
Port util.IntOrString `json:"port,omitempty" yaml:"port,omitempty"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
Command []string `yaml:"command,omitempty" json:"command,omitempty"`
}
// LivenessProbe describes a liveness probe to be examined to the container.
// TODO: pass structured data to the actions, and document that data here.
type LivenessProbe struct {
// HTTPGetProbe parameters, required if Type == 'http'
HTTPGet *HTTPGetAction `yaml:"httpGet,omitempty" json:"httpGet,omitempty"`
// TCPSocketProbe parameter, required if Type == 'tcp'
TCPSocket *TCPSocketAction `yaml:"tcpSocket,omitempty" json:"tcpSocket,omitempty"`
// ExecProbe parameter, required if Type == 'exec'
Exec *ExecAction `yaml:"exec,omitempty" json:"exec,omitempty"`
// Length of time before health checking is activated. In seconds.
InitialDelaySeconds int64 `yaml:"initialDelaySeconds,omitempty" json:"initialDelaySeconds,omitempty"`
}
// PullPolicy describes a policy for if/when to pull a container image
type PullPolicy string
const (
// Always attempt to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "PullAlways"
// Never pull an image, only use a local image. Container will fail if the image isn't present
PullNever PullPolicy = "PullNever"
// Pull if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "PullIfNotPresent"
)
// Container represents a single container that is expected to be run on the host.
type Container struct {
// Required: This must be a DNS_LABEL. Each container in a pod must
// have a unique name.
Name string `json:"name" yaml:"name"`
// Required.
Image string `json:"image" yaml:"image"`
// Optional: Defaults to whatever is defined in the image.
Command []string `json:"command,omitempty" yaml:"command,omitempty"`
// Optional: Defaults to Docker's default.
WorkingDir string `json:"workingDir,omitempty" yaml:"workingDir,omitempty"`
Ports []Port `json:"ports,omitempty" yaml:"ports,omitempty"`
Env []EnvVar `json:"env,omitempty" yaml:"env,omitempty"`
// Optional: Defaults to unlimited.
Memory int `json:"memory,omitempty" yaml:"memory,omitempty"`
// Optional: Defaults to unlimited.
CPU int `json:"cpu,omitempty" yaml:"cpu,omitempty"`
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" yaml:"volumeMounts,omitempty"`
LivenessProbe *LivenessProbe `json:"livenessProbe,omitempty" yaml:"livenessProbe,omitempty"`
Lifecycle *Lifecycle `json:"lifecycle,omitempty" yaml:"lifecycle,omitempty"`
// Optional: Default to false.
Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
// Optional: Policy for pulling images for this container
ImagePullPolicy PullPolicy `json:"imagePullPolicy" yaml:"imagePullPolicy"`
}
// Handler defines a specific action that should be taken
// TODO: pass structured data to these actions, and document that data here.
type Handler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
Exec *ExecAction `json:"exec,omitempty" yaml:"exec,omitempty"`
// HTTPGet specifies the http request to perform.
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" yaml:"httpGet,omitempty"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails, the container
// is terminated and restarted.
PostStart *Handler `json:"postStart,omitempty" yaml:"postStart,omitempty"`
// PreStop is called immediately before a container is terminated. The reason for termination is
// passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated.
PreStop *Handler `yaml:"preStop,omitempty" json:"preStop,omitempty"`
}
// The below types are used by kube_client and api_server.
// PodStatus represents a status of a pod.
type PodStatus string
// These are the valid statuses of pods.
const (
// PodWaiting means that we're waiting for the pod to begin running.
PodWaiting PodStatus = "Waiting"
// PodRunning means that the pod is up and running.
PodRunning PodStatus = "Running"
// PodTerminated means that the pod has stopped.
PodTerminated PodStatus = "Terminated"
)
type ContainerStateWaiting struct {
// Reason could be pulling image,
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
}
type ContainerStateRunning struct {
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
}
type ContainerStateTerminated struct {
ExitCode int `json:"exitCode" yaml:"exitCode"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
FinishedAt time.Time `json:"finishedAt,omitempty" yaml:"finishedAt,omitempty"`
}
type ContainerState struct {
// Only one of the following ContainerState may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
Waiting *ContainerStateWaiting `json:"waiting,omitempty" yaml:"waiting,omitempty"`
Running *ContainerStateRunning `json:"running,omitempty" yaml:"running,omitempty"`
Termination *ContainerStateTerminated `json:"termination,omitempty" yaml:"termination,omitempty"`
}
type ContainerStatus struct {
// TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
// defined for container?
State ContainerState `json:"state,omitempty" yaml:"state,omitempty"`
RestartCount int `json:"restartCount" yaml:"restartCount"`
// TODO(dchen1107): Deprecated this soon once we pull entire PodStatus from node,
// not just PodInfo. Now we need this to remove docker.Container from API
PodIP string `json:"podIP,omitempty" yaml:"podIP,omitempty"`
// TODO(dchen1107): Need to decide how to represent this in v1beta3
Image string `yaml:"image" json:"image"`
// TODO(dchen1107): Once we have done with integration with cadvisor, resource
// usage should be included.
}
// PodInfo contains one entry for every container with available info.
type PodInfo map[string]ContainerStatus
type RestartPolicyAlways struct{}
// TODO(dchen1107): Define what kinds of failures should restart.
// TODO(dchen1107): Decide whether to support policy knobs, and, if so, which ones.
type RestartPolicyOnFailure struct{}
type RestartPolicyNever struct{}
type RestartPolicy struct {
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
Always *RestartPolicyAlways `json:"always,omitempty" yaml:"always,omitempty"`
OnFailure *RestartPolicyOnFailure `json:"onFailure,omitempty" yaml:"onFailure,omitempty"`
Never *RestartPolicyNever `json:"never,omitempty" yaml:"never,omitempty"`
}
// PodState is the state of a pod, used as either input (desired state) or output (current state).
type PodState struct {
Manifest ContainerManifest `json:"manifest,omitempty" yaml:"manifest,omitempty"`
Status PodStatus `json:"status,omitempty" yaml:"status,omitempty"`
Host string `json:"host,omitempty" yaml:"host,omitempty"`
HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
PodIP string `json:"podIP,omitempty" yaml:"podIP,omitempty"`
// The key of this map is the *name* of the container within the manifest; it has one
// entry per container in the manifest. The value of this map is currently the output
// of `docker inspect`. This output format is *not* final and should not be relied
// upon.
// TODO: Make real decisions about what our info should look like. Re-enable fuzz test
// when we have done this.
Info PodInfo `json:"info,omitempty" yaml:"info,omitempty"`
}
// PodList is a list of Pods.
type PodList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Pod `json:"items" yaml:"items,omitempty"`
}
// Pod is a collection of containers, used as either input (create, update) or as output (list, get).
type Pod struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
DesiredState PodState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState PodState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
}
// ReplicationControllerState is the state of a replication controller, either input (create, update) or as output (list, get).
type ReplicationControllerState struct {
Replicas int `json:"replicas" yaml:"replicas"`
ReplicaSelector map[string]string `json:"replicaSelector,omitempty" yaml:"replicaSelector,omitempty"`
PodTemplate PodTemplate `json:"podTemplate,omitempty" yaml:"podTemplate,omitempty"`
}
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []ReplicationController `json:"items,omitempty" yaml:"items,omitempty"`
}
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
DesiredState ReplicationControllerState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState ReplicationControllerState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
}
// PodTemplate holds the information used for creating pods.
type PodTemplate struct {
DesiredState PodState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// ServiceList holds a list of services.
type ServiceList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Service `json:"items" yaml:"items"`
}
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Required.
Port int `json:"port" yaml:"port"`
// Optional: Defaults to "TCP".
Protocol Protocol `yaml:"protocol,omitempty" json:"protocol,omitempty"`
// This service will route traffic to pods having labels matching this selector.
Selector map[string]string `json:"selector,omitempty" yaml:"selector,omitempty"`
CreateExternalLoadBalancer bool `json:"createExternalLoadBalancer,omitempty" yaml:"createExternalLoadBalancer,omitempty"`
// ContainerPort is the name of the port on the container to direct traffic to.
// Optional, if unspecified use the first port on the container.
ContainerPort util.IntOrString `json:"containerPort,omitempty" yaml:"containerPort,omitempty"`
// PortalIP is assigned by the master. If specified by the user it will be ignored.
// TODO: This is awkward - if we had a BoundService, it would be better factored.
PortalIP string `json:"portalIP,omitempty" yaml:"portalIP,omitempty"`
// ProxyPort is assigned by the master. If specified by the user it will be ignored.
ProxyPort int `json:"proxyPort,omitempty" yaml:"proxyPort,omitempty"`
}
// Endpoints is a collection of endpoints that implement the actual service, for example:
// Name: "mysql", Endpoints: ["10.10.1.1:1909", "10.10.2.2:8834"]
type Endpoints struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Endpoints []string `json:"endpoints,omitempty" yaml:"endpoints,omitempty"`
}
// EndpointsList is a list of endpoints.
type EndpointsList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Endpoints `json:"items,omitempty" yaml:"items,omitempty"`
}
// NodeResources represents resources on a Kubernetes system node
// see https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/resources.md for more details.
type NodeResources struct {
// Capacity represents the available resources.
Capacity ResourceList `json:"capacity,omitempty" yaml:"capacity,omitempty"`
}
type ResourceName string
// TODO Replace this with a more complete "Quantity" struct
type ResourceList map[ResourceName]util.IntOrString
// Minion is a worker node in Kubernetenes.
// The name of the minion according to etcd is in ID.
type Minion struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Queried from cloud provider, if available.
HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
// Resources available on the node
NodeResources NodeResources `json:"resources,omitempty" yaml:"resources,omitempty"`
}
// MinionList is a list of minions.
type MinionList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Minion `json:"items,omitempty" yaml:"items,omitempty"`
}
// Binding is written by a scheduler to cause a pod to be bound to a host.
type Binding struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PodID string `json:"podID" yaml:"podID"`
Host string `json:"host" yaml:"host"`
}
// Status is a return value for calls that don't return other objects.
// TODO: this could go in apiserver, but I'm including it here so clients needn't
// import both.
type Status struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// One of: "Success", "Failure", "Working" (for operations not yet completed)
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// A human-readable description of the status of this operation.
Message string `json:"message,omitempty" yaml:"message,omitempty"`
// A machine-readable description of why this operation is in the
// "Failure" or "Working" status. If this value is empty there
// is no information available. A Reason clarifies an HTTP status
// code but does not override it.
Reason StatusReason `json:"reason,omitempty" yaml:"reason,omitempty"`
// Extended data associated with the reason. Each reason may define its
// own extended details. This field is optional and the data returned
// is not guaranteed to conform to any schema except that defined by
// the reason type.
Details *StatusDetails `json:"details,omitempty" yaml:"details,omitempty"`
// Suggested HTTP return code for this status, 0 if not set.
Code int `json:"code,omitempty" yaml:"code,omitempty"`
}
// StatusDetails is a set of additional properties that MAY be set by the
// server to provide additional information about a response. The Reason
// field of a Status object defines what attributes will be set. Clients
// must ignore fields that do not match the defined type of each attribute,
// and should assume that any attribute may be empty, invalid, or under
// defined.
type StatusDetails struct {
// The ID attribute of the resource associated with the status StatusReason
// (when there is a single ID which can be described).
ID string `json:"id,omitempty" yaml:"id,omitempty"`
// The kind attribute of the resource associated with the status StatusReason.
// On some operations may differ from the requested resource Kind.
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
// The Causes array includes more details associated with the StatusReason
// failure. Not all StatusReasons may provide detailed causes.
Causes []StatusCause `json:"causes,omitempty" yaml:"causes,omitempty"`
}
// Values of Status.Status
const (
StatusSuccess = "Success"
StatusFailure = "Failure"
StatusWorking = "Working"
)
// StatusReason is an enumeration of possible failure causes. Each StatusReason
// must map to a single HTTP status code, but multiple reasons may map
// to the same HTTP status code.
// TODO: move to apiserver
type StatusReason string
const (
// StatusReasonUnknown means the server has declined to indicate a specific reason.
// The details field may contain other information about this error.
// Status code 500.
StatusReasonUnknown StatusReason = ""
// StatusReasonWorking means the server is processing this request and will complete
// at a future time.
// Details (optional):
// "kind" string - the name of the resource being referenced ("operation" today)
// "id" string - the identifier of the Operation resource where updates
// will be returned
// Headers (optional):
// "Location" - HTTP header populated with a URL that can retrieved the final
// status of this operation.
// Status code 202
StatusReasonWorking StatusReason = "Working"
// StatusReasonNotFound means one or more resources required for this operation
// could not be found.
// Details (optional):
// "kind" string - the kind attribute of the missing resource
// on some operations may differ from the requested
// resource.
// "id" string - the identifier of the missing resource
// Status code 404
StatusReasonNotFound StatusReason = "NotFound"
// StatusReasonAlreadyExists means the resource you are creating already exists.
// Details (optional):
// "kind" string - the kind attribute of the conflicting resource
// "id" string - the identifier of the conflicting resource
// Status code 409
StatusReasonAlreadyExists StatusReason = "AlreadyExists"
// StatusReasonConflict means the requested update operation cannot be completed
// due to a conflict in the operation. The client may need to alter the request.
// Each resource may define custom details that indicate the nature of the
// conflict.
// Status code 409
StatusReasonConflict StatusReason = "Conflict"
// StatusReasonInvalid means the requested create or update operation cannot be
// completed due to invalid data provided as part of the request. The client may
// need to alter the request. When set, the client may use the StatusDetails
// message field as a summary of the issues encountered.
// Details (optional):
// "kind" string - the kind attribute of the invalid resource
// "id" string - the identifier of the invalid resource
// "causes" - one or more StatusCause entries indicating the data in the
// provided resource that was invalid. The code, message, and
// field attributes will be set.
// Status code 422
StatusReasonInvalid StatusReason = "Invalid"
)
// StatusCause provides more information about an api.Status failure, including
// cases when multiple errors are encountered.
type StatusCause struct {
// A machine-readable description of the cause of the error. If this value is
// empty there is no information available.
Type CauseType `json:"reason,omitempty" yaml:"reason,omitempty"`
// A human-readable description of the cause of the error. This field may be
// presented as-is to a reader.
Message string `json:"message,omitempty" yaml:"message,omitempty"`
// The field of the resource that has caused this error, as named by its JSON
// serialization. May include dot and postfix notation for nested attributes.
// Arrays are zero-indexed. Fields may appear more than once in an array of
// causes due to fields having multiple errors.
// Optional.
//
// Examples:
// "name" - the field "name" on the current resource
// "items[0].name" - the field "name" on the first array entry in "items"
Field string `json:"field,omitempty" yaml:"field,omitempty"`
}
// CauseType is a machine readable value providing more detail about what
// occured in a status response. An operation may have multiple causes for a
// status (whether Failure, Success, or Working).
type CauseType string
const (
// CauseTypeFieldValueNotFound is used to report failure to find a requested value
// (e.g. looking up an ID).
CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound"
// CauseTypeFieldValueInvalid is used to report required values that are not
// provided (e.g. empty strings, null values, or empty arrays).
CauseTypeFieldValueRequired CauseType = "FieldValueRequired"
// CauseTypeFieldValueDuplicate is used to report collisions of values that must be
// unique (e.g. unique IDs).
CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate"
// CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex
// match).
CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid"
// CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules)
// values that can not be handled (e.g. an enumerated string).
CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported"
)
// ServerOp is an operation delivered to API clients.
type ServerOp struct {
TypeMeta `yaml:",inline" json:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
}
// ServerOpList is a list of operations, as delivered to API clients.
type ServerOpList struct {
TypeMeta `yaml:",inline" json:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []ServerOp `yaml:"items,omitempty" json:"items,omitempty"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
UID string `json:"uid,omitempty" yaml:"uid,omitempty"`
APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"`
ResourceVersion string `json:"resourceVersion,omitempty" yaml:"resourceVersion,omitempty"`
// Optional. If referring to a piece of an object instead of an entire object, this string
// should contain a valid field access statement. For example,
// if the object reference is to a container within a pod, this would take on a value like:
// "desiredState.manifest.containers[2]". Such statements are valid language constructs in
// both go and JavaScript. This is syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"`
}
// Event is a report of an event somewhere in the cluster.
// TODO: Decide whether to store these separately or with the object they apply to.
type Event struct {
TypeMeta `yaml:",inline" json:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Required. The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject,omitempty" yaml:"involvedObject,omitempty"`
// Should be a short, machine understandable string that describes the current status
// of the referred object. This should not give the reason for being in this state.
// Examples: "running", "cantStart", "cantSchedule", "deleted".
// It's OK for components to make up statuses to report here, but the same string should
// always be used for the same status.
// TODO: define a way of making sure these are consistent and don't collide.
// TODO: provide exact specification for format.
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// Optional; this should be a short, machine understandable string that gives the reason
// for the transition into the object's current status. For example, if ObjectStatus is
// "cantStart", StatusReason might be "imageNotFound".
// TODO: provide exact specification for format.
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
// Optional. A human-readable description of the status of this operation.
// TODO: decide on maximum length.
Message string `json:"message,omitempty" yaml:"message,omitempty"`
// Optional. The component reporting this event. Should be a short machine understandable string.
// TODO: provide exact specification for format.
Source string `json:"source,omitempty" yaml:"source,omitempty"`
// The time at which the client recorded the event. (Time of server receipt is in TypeMeta.)
Timestamp util.Time `json:"timestamp,omitempty" yaml:"timestamp,omitempty"`
}
// EventList is a list of events.
type EventList struct {
TypeMeta `yaml:",inline" json:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []Event `yaml:"items,omitempty" json:"items,omitempty"`
}
// ContainerManifest corresponds to the Container Manifest format, documented at:
// https://developers.google.com/compute/docs/containers/container_vms#container_manifest
// This is used as the representation of Kubernetes workloads.
// DEPRECATED: Replaced with BoundPod
type ContainerManifest struct {
// Required: This must be a supported version string, such as "v1beta1".
Version string `yaml:"version" json:"version"`
// Required: This must be a DNS_SUBDOMAIN.
// TODO: ID on Manifest is deprecated and will be removed in the future.
ID string `yaml:"id" json:"id"`
// TODO: UUID on Manifest is deprecated in the future once we are done
// with the API refactoring. It is required for now to determine the instance
// of a Pod.
UUID string `yaml:"uuid,omitempty" json:"uuid,omitempty"`
Volumes []Volume `yaml:"volumes" json:"volumes"`
Containers []Container `yaml:"containers" json:"containers"`
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"`
}
// ContainerManifestList is used to communicate container manifests to kubelet.
// DEPRECATED: Replaced with BoundPods
type ContainerManifestList struct {
TypeMeta `json:",inline" yaml:",inline"`
ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []ContainerManifest `json:"items,omitempty" yaml:"items,omitempty"`
}
// Included in partial form from v1beta3 to replace ContainerManifest
// PodSpec is a description of a pod
type PodSpec struct {
Volumes []Volume `json:"volumes" yaml:"volumes"`
Containers []Container `json:"containers" yaml:"containers"`
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" yaml:"restartPolicy,omitempty"`
}
// BoundPod is a collection of containers that should be run on a host. A BoundPod
// defines how a Pod may change after a Binding is created. A Pod is a request to
// execute a pod, whereas a BoundPod is the specification that would be run on a server.
type BoundPod struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Spec defines the behavior of a pod.
Spec PodSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
}
// BoundPods is a list of Pods bound to a common server. The resource version of
// the pod list is guaranteed to only change when the list of bound pods changes.
type BoundPods struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Host is the name of a node that these pods were bound to.
Host string `json:"host" yaml:"host"`
// Items is the list of all pods bound to a given host.
Items []BoundPod `json:"items" yaml:"items"`
}
|
package auth
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"sync"
jwt "github.com/dgrijalva/jwt-go"
uuid "github.com/nu7hatch/gouuid"
log "github.com/sirupsen/logrus"
)
const (
sessionTokenName = "kuill"
claimExpires = "exp"
claimNotBefore = "nbf"
claimCSRFToken = "csrf"
claimUserID = "uid"
claimGroups = "grp"
)
// Context is a holder for the currently authenticated user's information
type Context interface {
User() string
Groups() []string
}
// Authenticator is a pluggable interface for authentication providers
type Authenticator interface {
// Name returns the url-safe unique name of this authenticator
Name() string
// Description returns the user-friendly description of this authenticator
Description() string
// Type returns the type of this authenticator
Type() string
// GetHandlers returns the handlers for this authenticator; the set of
// handlers must include a "login" handler which will be triggered
// GetHandlers() map[string]http.HandlerFunc
// LoginURL returns the initial login URL for this handler
Authenticate(w http.ResponseWriter, r *http.Request) (*SessionToken, error)
LoginURL() string
// IconURL returns an icon URL to signify this login method; empty string implies a default can be used
IconURL() string
// PostWithCredentials returns true if this authenticator expects username/password credentials be POST'd
PostWithCredentials() bool
}
// Delegate is a function which requires user and group information along with
// the standard http request/response parameters
type Delegate func(w http.ResponseWriter, r *http.Request, authentication Context)
// NewAuthDelegate returns a new http.Handler func which delegates to the
// provided AuthDelegate, passing the resolved user,group information form
// the session where found, otherwise returning 401 Unauthorized.
func (m *Manager) NewAuthDelegate(delegate Delegate) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
session, err := m.ParseSessionToken(r)
if session == nil {
if err != nil {
log.Warnf("Authentication error: %v", err)
} else {
log.Infof("No existing/valid session")
}
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
m.keepSessionAlive(session, w)
delegate(w, r, session)
}
}
// Manager manages supported authentication mechanisms
type Manager struct {
authenticators map[string]Authenticator
loginMethodsResponse []byte
mutex sync.Mutex
hmac []byte
}
// NewAuthManager creates a new authentication manager instance
func NewAuthManager() (*Manager, error) {
hmc, _ := uuid.NewV4()
m := &Manager{
authenticators: make(map[string]Authenticator),
hmac: []byte(hmc.String()),
}
m.loginMethodsResponse, _ = m.buildLoginMethodsResponse()
http.HandleFunc("/auth/login_methods", m.listLoginMethods)
http.HandleFunc("/auth/user_info", m.userInfo)
return m, nil
}
func (m *Manager) buildLoginMethodsResponse() ([]byte, error) {
loginMethods := []map[string]interface{}{}
for id, authN := range m.authenticators {
loginMethods = append(loginMethods, map[string]interface{}{
"id": id,
"name": authN.Name(),
"desc": authN.Description(),
"type": authN.Type(),
"url": authN.LoginURL(),
"icon": authN.IconURL(),
"post_creds": authN.PostWithCredentials(),
})
}
resp := map[string]interface{}{
"login_methods": loginMethods,
}
return json.Marshal(&resp)
}
// RegisterAuthenticator registers an authentication provider
func (m *Manager) RegisterAuthenticator(authn Authenticator) error {
key := fmt.Sprintf("%s:%s", authn.Type(), authn.Name())
m.mutex.Lock()
defer m.mutex.Unlock()
if _, ok := m.authenticators[key]; ok {
return fmt.Errorf("An authenticator already exists with key '%s'", key)
}
m.authenticators[key] = authn
var handlerFunc http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
sessionToken, err := authn.Authenticate(w, r)
if err != nil {
log.Error(err)
} else if sessionToken != nil {
m.completeAuthentication(sessionToken, w, r)
}
}
http.HandleFunc(authn.LoginURL(), handlerFunc)
var err error
m.loginMethodsResponse, err = m.buildLoginMethodsResponse()
if err != nil {
return fmt.Errorf("Problem marshalling cached authenticators response: %v", err)
}
log.Infof("Enabled authenticator: %s => %s", key, authn.LoginURL())
return nil
}
func (m *Manager) listLoginMethods(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(m.loginMethodsResponse)))
w.Write(m.loginMethodsResponse)
}
type userInfo struct {
User string `json:"user,omitempty"`
Groups string `json:"groups,omitempty"`
SessionExpires string `json:"session_expires,omitempty"`
}
func (m *Manager) userInfo(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
session, _ := m.ParseSessionToken(r)
m.keepSessionAlive(session, w)
m.respondWithUserInfo(session, w)
}
func (m *Manager) respondWithUserInfo(session *SessionToken, w http.ResponseWriter) {
var resp userInfo
if session != nil {
resp.User = session.User()
resp.Groups = session.Groups()
resp.SessionExpires = time.Unix(session.Expires(), 0).Format(time.RFC3339)
}
data, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Failed marshalling user response", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(data)))
w.Write(data)
}
func (m *Manager) keepSessionAlive(session *SessionToken, w http.ResponseWriter) {
// If the session expires in less than 1 minute, renew it
if session != nil && session.Valid && session.Expires() < (time.Now().Unix()-int64(time.Minute.Seconds())) {
session = NewSessionToken(session.User(), []string{}, session.claims)
m.writeSessionCookie(session, w)
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Renewed session for %s: expires %d -> %d", session.User(), session.Expires())
}
}
}
func (m *Manager) writeSessionCookie(session *SessionToken, w http.ResponseWriter) {
signedJWT, err := session.SignedString(m.hmac)
if err != nil {
http.Error(w, "Failed to sign JWT: "+err.Error(), http.StatusInternalServerError)
return
}
http.SetCookie(w, &http.Cookie{
Name: sessionTokenName,
Value: signedJWT,
HttpOnly: true,
Path: "/",
})
}
// TODO: this should be re-worked so that it wraps authenticators' authenticate methods
func (m *Manager) completeAuthentication(session *SessionToken, w http.ResponseWriter, r *http.Request) {
m.writeSessionCookie(session, w)
targetURL := r.URL.Query().Get("target")
if len(targetURL) > 0 {
http.Redirect(w, r, targetURL, http.StatusFound)
} else {
m.respondWithUserInfo(session, w)
}
}
// SessionToken is a wrapper around JWT with methods for easy user/group access
type SessionToken struct {
*jwt.Token
claims jwt.MapClaims
}
// User recovers the userID from a session token
func (s *SessionToken) User() string {
return s.claims[claimUserID].(string)
}
// Expires returns the expiration of the token in Unix time, the number of seconds elapsed
// since January 1, 1970 UTC.
func (s *SessionToken) Expires() int64 {
expires := s.claims[claimExpires]
if intVal, ok := expires.(int64); ok {
return intVal
}
return int64(expires.(float64))
}
// Groups recovers the groups from a session token
func (s *SessionToken) Groups() []string {
return strings.Split(s.claims[claimGroups].(string), ",")
}
// NewSessionToken generates a new auth token suitable for storing user session state
func NewSessionToken(user string, groups []string, additionalClaims map[string]interface{}) *SessionToken {
csrfToken, _ := uuid.NewV4()
claims := jwt.MapClaims{
claimNotBefore: time.Now().Unix(),
claimExpires: time.Now().Add(time.Minute * 15).Unix(),
claimCSRFToken: csrfToken.String(),
claimUserID: user,
claimGroups: strings.Join(groups, ","),
}
if additionalClaims != nil {
for k, v := range additionalClaims {
claims[k] = v
}
}
return &SessionToken{Token: jwt.NewWithClaims(jwt.SigningMethodHS256, claims), claims: claims}
}
// ParseSessionToken recovers the session
func (m *Manager) ParseSessionToken(r *http.Request) (*SessionToken, error) {
cookie, err := r.Cookie(sessionTokenName)
if cookie == nil || err != nil {
return nil, nil
}
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Found cookie for %s; %s: %s", r.URL, sessionTokenName, cookie.Value)
}
token, err := jwt.Parse(cookie.Value, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return m.hmac, nil
})
if err != nil {
return nil, err
} else if claims, ok := token.Claims.(jwt.MapClaims); ok {
st := &SessionToken{Token: token, claims: claims}
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Resolved session token %v", st)
}
return st, nil
}
return nil, fmt.Errorf("Failed to parse token claims")
}
fix groups
package auth
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"sync"
jwt "github.com/dgrijalva/jwt-go"
uuid "github.com/nu7hatch/gouuid"
log "github.com/sirupsen/logrus"
)
const (
sessionTokenName = "kuill"
claimExpires = "exp"
claimNotBefore = "nbf"
claimCSRFToken = "csrf"
claimUserID = "uid"
claimGroups = "grp"
)
// Context is a holder for the currently authenticated user's information
type Context interface {
User() string
Groups() []string
}
// Authenticator is a pluggable interface for authentication providers
type Authenticator interface {
// Name returns the url-safe unique name of this authenticator
Name() string
// Description returns the user-friendly description of this authenticator
Description() string
// Type returns the type of this authenticator
Type() string
// GetHandlers returns the handlers for this authenticator; the set of
// handlers must include a "login" handler which will be triggered
// GetHandlers() map[string]http.HandlerFunc
// LoginURL returns the initial login URL for this handler
Authenticate(w http.ResponseWriter, r *http.Request) (*SessionToken, error)
LoginURL() string
// IconURL returns an icon URL to signify this login method; empty string implies a default can be used
IconURL() string
// PostWithCredentials returns true if this authenticator expects username/password credentials be POST'd
PostWithCredentials() bool
}
// Delegate is a function which requires user and group information along with
// the standard http request/response parameters
type Delegate func(w http.ResponseWriter, r *http.Request, authentication Context)
// NewAuthDelegate returns a new http.Handler func which delegates to the
// provided AuthDelegate, passing the resolved user,group information form
// the session where found, otherwise returning 401 Unauthorized.
func (m *Manager) NewAuthDelegate(delegate Delegate) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
session, err := m.ParseSessionToken(r)
if session == nil {
if err != nil {
log.Warnf("Authentication error: %v", err)
} else {
log.Infof("No existing/valid session")
}
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
m.keepSessionAlive(session, w)
delegate(w, r, session)
}
}
// Manager manages supported authentication mechanisms
type Manager struct {
authenticators map[string]Authenticator
loginMethodsResponse []byte
mutex sync.Mutex
hmac []byte
}
// NewAuthManager creates a new authentication manager instance
func NewAuthManager() (*Manager, error) {
hmc, _ := uuid.NewV4()
m := &Manager{
authenticators: make(map[string]Authenticator),
hmac: []byte(hmc.String()),
}
m.loginMethodsResponse, _ = m.buildLoginMethodsResponse()
http.HandleFunc("/auth/login_methods", m.listLoginMethods)
http.HandleFunc("/auth/user_info", m.userInfo)
return m, nil
}
func (m *Manager) buildLoginMethodsResponse() ([]byte, error) {
loginMethods := []map[string]interface{}{}
for id, authN := range m.authenticators {
loginMethods = append(loginMethods, map[string]interface{}{
"id": id,
"name": authN.Name(),
"desc": authN.Description(),
"type": authN.Type(),
"url": authN.LoginURL(),
"icon": authN.IconURL(),
"post_creds": authN.PostWithCredentials(),
})
}
resp := map[string]interface{}{
"login_methods": loginMethods,
}
return json.Marshal(&resp)
}
// RegisterAuthenticator registers an authentication provider
func (m *Manager) RegisterAuthenticator(authn Authenticator) error {
key := fmt.Sprintf("%s:%s", authn.Type(), authn.Name())
m.mutex.Lock()
defer m.mutex.Unlock()
if _, ok := m.authenticators[key]; ok {
return fmt.Errorf("An authenticator already exists with key '%s'", key)
}
m.authenticators[key] = authn
var handlerFunc http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
sessionToken, err := authn.Authenticate(w, r)
if err != nil {
log.Error(err)
} else if sessionToken != nil {
m.completeAuthentication(sessionToken, w, r)
}
}
http.HandleFunc(authn.LoginURL(), handlerFunc)
var err error
m.loginMethodsResponse, err = m.buildLoginMethodsResponse()
if err != nil {
return fmt.Errorf("Problem marshalling cached authenticators response: %v", err)
}
log.Infof("Enabled authenticator: %s => %s", key, authn.LoginURL())
return nil
}
func (m *Manager) listLoginMethods(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(m.loginMethodsResponse)))
w.Write(m.loginMethodsResponse)
}
type userInfo struct {
User string `json:"user,omitempty"`
Groups []string `json:"groups,omitempty"`
SessionExpires string `json:"session_expires,omitempty"`
}
func (m *Manager) userInfo(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
session, _ := m.ParseSessionToken(r)
m.keepSessionAlive(session, w)
m.respondWithUserInfo(session, w)
}
func (m *Manager) respondWithUserInfo(session *SessionToken, w http.ResponseWriter) {
var resp userInfo
if session != nil {
resp.User = session.User()
resp.Groups = session.Groups()
resp.SessionExpires = time.Unix(session.Expires(), 0).Format(time.RFC3339)
}
data, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Failed marshalling user response", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(data)))
w.Write(data)
}
func (m *Manager) keepSessionAlive(session *SessionToken, w http.ResponseWriter) {
// If the session expires in less than 1 minute, renew it
if session != nil && session.Valid && session.Expires() < (time.Now().Unix()-int64(time.Minute.Seconds())) {
session = NewSessionToken(session.User(), []string{}, session.claims)
m.writeSessionCookie(session, w)
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Renewed session for %s: expires %d -> %d", session.User(), session.Expires())
}
}
}
func (m *Manager) writeSessionCookie(session *SessionToken, w http.ResponseWriter) {
signedJWT, err := session.SignedString(m.hmac)
if err != nil {
http.Error(w, "Failed to sign JWT: "+err.Error(), http.StatusInternalServerError)
return
}
http.SetCookie(w, &http.Cookie{
Name: sessionTokenName,
Value: signedJWT,
HttpOnly: true,
Path: "/",
})
}
// TODO: this should be re-worked so that it wraps authenticators' authenticate methods
func (m *Manager) completeAuthentication(session *SessionToken, w http.ResponseWriter, r *http.Request) {
m.writeSessionCookie(session, w)
targetURL := r.URL.Query().Get("target")
if len(targetURL) > 0 {
http.Redirect(w, r, targetURL, http.StatusFound)
} else {
m.respondWithUserInfo(session, w)
}
}
// SessionToken is a wrapper around JWT with methods for easy user/group access
type SessionToken struct {
*jwt.Token
claims jwt.MapClaims
}
// User recovers the userID from a session token
func (s *SessionToken) User() string {
return s.claims[claimUserID].(string)
}
// Expires returns the expiration of the token in Unix time, the number of seconds elapsed
// since January 1, 1970 UTC.
func (s *SessionToken) Expires() int64 {
expires := s.claims[claimExpires]
if intVal, ok := expires.(int64); ok {
return intVal
}
return int64(expires.(float64))
}
// Groups recovers the groups from a session token
func (s *SessionToken) Groups() []string {
return strings.Split(s.claims[claimGroups].(string), ",")
}
// NewSessionToken generates a new auth token suitable for storing user session state
func NewSessionToken(user string, groups []string, additionalClaims map[string]interface{}) *SessionToken {
csrfToken, _ := uuid.NewV4()
claims := jwt.MapClaims{
claimNotBefore: time.Now().Unix(),
claimExpires: time.Now().Add(time.Minute * 15).Unix(),
claimCSRFToken: csrfToken.String(),
claimUserID: user,
claimGroups: strings.Join(groups, ","),
}
if additionalClaims != nil {
for k, v := range additionalClaims {
claims[k] = v
}
}
return &SessionToken{Token: jwt.NewWithClaims(jwt.SigningMethodHS256, claims), claims: claims}
}
// ParseSessionToken recovers the session
func (m *Manager) ParseSessionToken(r *http.Request) (*SessionToken, error) {
cookie, err := r.Cookie(sessionTokenName)
if cookie == nil || err != nil {
return nil, nil
}
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Found cookie for %s; %s: %s", r.URL, sessionTokenName, cookie.Value)
}
token, err := jwt.Parse(cookie.Value, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return m.hmac, nil
})
if err != nil {
return nil, err
} else if claims, ok := token.Claims.(jwt.MapClaims); ok {
st := &SessionToken{Token: token, claims: claims}
if log.GetLevel() >= log.DebugLevel {
log.Debugf("Resolved session token %v", st)
}
return st, nil
}
return nil, fmt.Errorf("Failed to parse token claims")
}
|
package pkg
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
ylib "github.com/ghodss/yaml"
"k8s.io/helm/pkg/proto/hapi/chart"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/batch"
kext "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/storage"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/yaml"
)
type Generator struct {
Location string
ChartName string
YamlFiles []string
}
func (g Generator) Create() (string, error) {
chartfile := chartMetaData(g.ChartName)
imageTag := "" //TODO
fmt.Println("Creating Custom Chart...")
cdir := filepath.Join(g.Location, chartfile.Name)
fi, err := os.Stat(cdir)
if err == nil && !fi.IsDir() {
return cdir, fmt.Errorf("%s already exists and is not a directory", cdir)
}
if err := os.MkdirAll(cdir, 0755); err != nil {
return cdir, err
}
cf := filepath.Join(cdir, ChartfileName)
if _, err := os.Stat(cf); err != nil {
if len(chartfile.Version) == 0 {
chartfile.Version = imageTag
}
if err := SaveChartfile(cf, &chartfile); err != nil {
return cdir, err
}
}
valueFile := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
templateLocation := filepath.Join(cdir, TemplatesDir)
err = os.MkdirAll(templateLocation, 0755)
for _, kubeObj := range g.YamlFiles {
kubeJson, err := yaml.ToJSON([]byte(kubeObj))
if err != nil {
log.Fatal(err)
}
var objMeta unversioned.TypeMeta
err = json.Unmarshal(kubeJson, &objMeta)
if err != nil {
log.Fatal(err)
}
values := valueFileGenerator{}
var template, templateName string
if objMeta.Kind == "Pod" {
pod := kapi.Pod{}
err = json.Unmarshal(kubeJson, &pod)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&pod.ObjectMeta)
cleanUpPodSpec(&pod.Spec)
name := pod.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = podTemplate(pod)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "ReplicationController" {
rc := kapi.ReplicationController{}
err = json.Unmarshal(kubeJson, &rc)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&rc.ObjectMeta)
cleanUpPodSpec(&rc.Spec.Template.Spec)
name := rc.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = replicationControllerTemplate(rc)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "Deployment" {
deployment := kext.Deployment{}
err = json.Unmarshal(kubeJson, &deployment)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&deployment.ObjectMeta)
cleanUpPodSpec(&deployment.Spec.Template.Spec)
name := deployment.Name
templateName = filepath.Join(templateLocation, name+".yaml")
if err != nil {
log.Fatal(err)
}
template, values = deploymentTemplate(deployment)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "Job" {
job := batch.Job{}
err = json.Unmarshal(kubeJson, &job)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&job.ObjectMeta)
cleanUpPodSpec(&job.Spec.Template.Spec)
name := job.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = jobTemplate(job)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "DaemonSet" {
daemonset := kext.DaemonSet{}
err = json.Unmarshal(kubeJson, &daemonset)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&daemonset.ObjectMeta)
cleanUpPodSpec(&daemonset.Spec.Template.Spec)
name := daemonset.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = daemonsetTemplate(daemonset)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "ReplicaSet" {
rcSet := kext.ReplicaSet{}
err = json.Unmarshal(kubeJson, &rcSet)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&rcSet.ObjectMeta)
cleanUpPodSpec(&rcSet.Spec.Template.Spec)
name := rcSet.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = replicaSetTemplate(rcSet)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "StatefulSet" {
statefulset := apps.StatefulSet{}
err := json.Unmarshal(kubeJson, &statefulset)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&statefulset.ObjectMeta)
cleanUpPodSpec(&statefulset.Spec.Template.Spec)
name := statefulset.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = statefulsetTemplate(statefulset)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "Service" {
service := kapi.Service{}
err := json.Unmarshal(kubeJson, &service)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&service.ObjectMeta)
template, values = serviceTemplate(service)
name := service.Name
templateName = filepath.Join(templateLocation, name+".yaml")
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "ConfigMap" {
configMap := kapi.ConfigMap{}
err := json.Unmarshal(kubeJson, &configMap)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&configMap.ObjectMeta)
name := configMap.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = configMapTemplate(configMap)
valueFile[removeCharactersFromName(name)] = values.value
} else if objMeta.Kind == "Secret" {
secret := kapi.Secret{}
err := json.Unmarshal(kubeJson, &secret)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&secret.ObjectMeta)
name := secret.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = secretTemplate(secret)
valueFile[removeCharactersFromName(name)] = values.value
} else if objMeta.Kind == "PersistentVolumeClaim" {
pvc := kapi.PersistentVolumeClaim{}
err := json.Unmarshal(kubeJson, &pvc)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&pvc.ObjectMeta)
name := pvc.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = pvcTemplate(pvc)
persistence = addPersistence(persistence, values.persistence)
//valueFile[removeCharactersFromName(name)] = values.value
} else if objMeta.Kind == "PersistentVolume" {
pv := kapi.PersistentVolume{}
err := json.Unmarshal(kubeJson, &pv)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&pv.ObjectMeta)
name := pv.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = pvTemplate(pv)
valueFile[removeCharactersFromName(name)] = values.value
} else if objMeta.Kind == "StorageClass" {
storageClass := storage.StorageClass{}
err := json.Unmarshal(kubeJson, &storageClass)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&storageClass.ObjectMeta)
name := storageClass.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = storageClassTemplate(storageClass)
valueFile[removeCharactersFromName(name)] = values.value
} else {
fmt.Printf("NOT IMPLEMENTED. ADD MAUALLY ")
}
err = ioutil.WriteFile(templateName, []byte(template), 0644)
if err != nil {
log.Fatal(err)
}
}
if len(persistence) != 0 {
valueFile["persistence"] = persistence
}
valueFileData, err := ylib.Marshal(valueFile)
if err != nil {
log.Fatal(err)
}
helperDir := filepath.Join(templateLocation, HelpersName)
err = ioutil.WriteFile(helperDir, []byte(defaultHelpers), 0644) //TODO change default values
valueDir := filepath.Join(cdir, ValuesfileName)
err = ioutil.WriteFile(valueDir, []byte(valueFileData), 0644)
if err != nil {
log.Fatal(err)
}
fmt.Println("CREATE : SUCCESSFULL")
return cdir, nil
}
func cleanUpObjectMeta(m *kapi.ObjectMeta) {
var t unversioned.Time
m.GenerateName = ""
m.SelfLink = ""
m.UID = types.UID("")
m.ResourceVersion = ""
m.Generation = 0
m.CreationTimestamp = t
m.DeletionTimestamp = nil
}
func cleanUpPodSpec(p *kapi.PodSpec) {
p.TerminationGracePeriodSeconds = nil
p.DNSPolicy = kapi.DNSPolicy("")
for i, c := range p.Containers {
c.TerminationMessagePath = ""
p.Containers[i] = c
}
for i, c := range p.InitContainers {
c.TerminationMessagePath = ""
p.InitContainers[i] = c
}
}
func podTemplate(pod kapi.Pod) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(pod.ObjectMeta.Name)
pod.ObjectMeta = generateObjectMetaTemplate(pod.ObjectMeta, key, value, pod.ObjectMeta.Name)
//pod.Spec.Containers = generateTemplateForContainer(pod.Spec.Containers, value)
pod.Spec = generateTemplateForPodSpec(pod.Spec, key, value)
if len(pod.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(pod.Spec.Volumes, key, value)
pod.Spec.Volumes = nil
}
tempPodByte, err := ylib.Marshal(pod)
if err != nil {
log.Fatal(err)
}
tempPod := removeEmptyFields(string(tempPodByte))
template := ""
if len(volumes) != 0 {
template = addVolumeToTemplateForPod(string(tempPod), volumes)
} else {
template = string(tempPod)
}
data := valueFileGenerator{
value: value,
persistence: persistence,
}
return template, data
}
func replicationControllerTemplate(rc kapi.ReplicationController) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(rc.ObjectMeta.Name)
rc.ObjectMeta = generateObjectMetaTemplate(rc.ObjectMeta, key, value, rc.ObjectMeta.Name)
rc.Spec.Template.Spec = generateTemplateForPodSpec(rc.Spec.Template.Spec, key, value)
if len(rc.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(rc.Spec.Template.Spec.Volumes, key, value)
value["persistence"] = true
rc.Spec.Template.Spec.Volumes = nil
}
tempRcByte, err := ylib.Marshal(rc)
if err != nil {
log.Fatal(err)
}
tempRc := removeEmptyFields(string(tempRcByte))
template := ""
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempRc, volumes)
} else {
template = tempRc
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func replicaSetTemplate(replicaSet kext.ReplicaSet) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(replicaSet.ObjectMeta.Name)
replicaSet.ObjectMeta = generateObjectMetaTemplate(replicaSet.ObjectMeta, key, value, replicaSet.ObjectMeta.Name)
replicaSet.Spec.Template.Spec = generateTemplateForPodSpec(replicaSet.Spec.Template.Spec, key, value)
if len(replicaSet.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(replicaSet.Spec.Template.Spec.Volumes, key, value)
value["persistence"] = true
replicaSet.Spec.Template.Spec.Volumes = nil
}
template := ""
tempRcSetByte, err := ylib.Marshal(replicaSet)
if err != nil {
log.Fatal(err)
}
tempReplicaSet := removeEmptyFields(string(tempRcSetByte))
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempReplicaSet, volumes) // RC and replica_set has volume in same layer
} else {
template = tempReplicaSet
}
return template, valueFileGenerator{
value: value,
persistence: persistence,
}
}
func deploymentTemplate(deployment kext.Deployment) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(deployment.ObjectMeta.Name)
deployment.ObjectMeta = generateObjectMetaTemplate(deployment.ObjectMeta, key, value, deployment.ObjectMeta.Name)
deployment.Spec.Template.Spec = generateTemplateForPodSpec(deployment.Spec.Template.Spec, key, value)
if len(deployment.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(deployment.Spec.Template.Spec.Volumes, key, value)
deployment.Spec.Template.Spec.Volumes = nil
}
if len(string(deployment.Spec.Strategy.Type)) != 0 {
deployment.Spec.Strategy.Type = kext.DeploymentStrategyType(fmt.Sprintf("{{.Values.%sDeploymentStrategy}}", key))
//generateTemplateForSingleValue(string(deployment.Spec.Strategy.Type), "DeploymentStrategy", value)
value["DeploymentStrategy"] = deployment.Spec.Strategy.Type //TODO test
}
template := ""
tempDeploymentByte, err := ylib.Marshal(deployment)
if err != nil {
log.Fatal(err)
}
tempDeployment := removeEmptyFields(string(tempDeploymentByte))
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempDeployment, volumes)
} else {
template = tempDeployment
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func daemonsetTemplate(daemonset kext.DaemonSet) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(daemonset.ObjectMeta.Name)
daemonset.ObjectMeta = generateObjectMetaTemplate(daemonset.ObjectMeta, key, value, daemonset.ObjectMeta.Name)
daemonset.Spec.Template.Spec = generateTemplateForPodSpec(daemonset.Spec.Template.Spec, key, value)
if len(daemonset.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(daemonset.Spec.Template.Spec.Volumes, key, value)
value["persistence"] = true
daemonset.Spec.Template.Spec.Volumes = nil
}
template := ""
//valueData, err := ylib.Marshal(value)
tempDaemonSetByte, err := ylib.Marshal(daemonset)
if err != nil {
log.Fatal(err)
}
tempDaemonSet := removeEmptyFields(string(tempDaemonSetByte))
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempDaemonSet, volumes)
} else {
template = tempDaemonSet
}
if err != nil {
log.Fatal(err)
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func statefulsetTemplate(statefulset apps.StatefulSet) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(statefulset.ObjectMeta.Name)
statefulset.ObjectMeta = generateObjectMetaTemplate(statefulset.ObjectMeta, key, value, statefulset.ObjectMeta.Name)
if len(statefulset.Spec.ServiceName) != 0 {
statefulset.Spec.ServiceName = fmt.Sprintf("{{.Values.%s.ServiceName}}", key)
value["ServiceName"] = statefulset.Spec.ServiceName //generateTemplateForSingleValue(statefulset.Spec.ServiceName, "ServiceName", value)
}
statefulset.Spec.Template.Spec = generateTemplateForPodSpec(statefulset.Spec.Template.Spec, key, value)
if len(statefulset.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(statefulset.Spec.Template.Spec.Volumes, key, value)
statefulset.Spec.Template.Spec.Volumes = nil
}
tempStatefulSetByte, err := ylib.Marshal(statefulset)
if err != nil {
log.Fatal(err)
}
tempStatefulSet := removeEmptyFields(string(tempStatefulSetByte))
template := ""
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempStatefulSet, volumes)
} else {
template = tempStatefulSet
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func jobTemplate(job batch.Job) (string, valueFileGenerator) {
volumes := ""
persistence := make(map[string]interface{}, 0)
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(job.ObjectMeta.Name)
job.ObjectMeta = generateObjectMetaTemplate(job.ObjectMeta, key, value, job.ObjectMeta.Name)
job.Spec.Template.Spec = generateTemplateForPodSpec(job.Spec.Template.Spec, key, value)
if len(job.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(job.Spec.Template.Spec.Volumes, key, value)
value["persistence"] = true
job.Spec.Template.Spec.Volumes = nil
}
tempJobByte, err := ylib.Marshal(job)
if err != nil {
log.Fatal(err)
}
tempJob := removeEmptyFields(string(tempJobByte))
template := ""
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempJob, volumes)
} else {
template = tempJob
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func serviceTemplate(svc kapi.Service) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(svc.ObjectMeta.Name)
svc.ObjectMeta = generateObjectMetaTemplate(svc.ObjectMeta, key, value, svc.ObjectMeta.Name)
svc.Spec = generateServiceSpecTemplate(svc.Spec, key, value)
svcData, err := ylib.Marshal(svc)
if err != nil {
log.Fatal(err)
}
service := removeEmptyFields(string(svcData))
return string(service), valueFileGenerator{value: value}
}
func configMapTemplate(configMap kapi.ConfigMap) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(configMap.ObjectMeta.Name)
configMap.ObjectMeta = generateObjectMetaTemplate(configMap.ObjectMeta, key, value, configMap.ObjectMeta.Name)
configMap.ObjectMeta.Name = key // not using release name befor configmap
configMapData, err := ylib.Marshal(configMap)
if err != nil {
log.Fatal(err)
}
if len(configMap.Data) != 0 {
for k, v := range configMap.Data {
value[k] = v
configMap.Data[k] = (fmt.Sprintf("{{.Values.%s.%s}}", key, k))
}
}
data := removeEmptyFields(string(configMapData))
return string(data), valueFileGenerator{value: value}
}
func secretTemplate(secret kapi.Secret) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
secretDataMap := make(map[string]interface{}, 0)
key := removeCharactersFromName(secret.ObjectMeta.Name)
secret.ObjectMeta = generateObjectMetaTemplate(secret.ObjectMeta, key, value, secret.ObjectMeta.Name)
secret.ObjectMeta.Name = key
if len(secret.Data) != 0 {
for k, v := range secret.Data {
value[k] = v
secretDataMap[k] = (fmt.Sprintf("{{.Values.%s.%s}}", key, k))
}
}
secret.Data = nil
value["Type"] = secret.Type
secret.Type = kapi.SecretType(fmt.Sprintf("{{.Values.%s.Type}}", key))
secretDataByte, err := ylib.Marshal(secret)
if err != nil {
log.Fatal(err)
}
secretData := removeEmptyFields(string(secretDataByte))
//dataSecret := make(map[string]interface{}, 0)
//dataSecret["data"] = secretDataMap
secretData = addSecretData(secretData, secretDataMap, key)
return secretData, valueFileGenerator{value: value}
}
func pvcTemplate(pvc kapi.PersistentVolumeClaim) (string, valueFileGenerator) {
tempValue := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(pvc.ObjectMeta.Name)
pvc.ObjectMeta = generateObjectMetaTemplate(pvc.ObjectMeta, key, tempValue, pvc.ObjectMeta.Name)
pvc.Spec = generatePersistentVolumeClaimSpec(pvc.Spec, key, tempValue)
pvcData, err := ylib.Marshal(pvc)
if err != nil {
log.Fatal(err)
}
temp := removeEmptyFields(string(pvcData))
pvcTemplateData := fmt.Sprintf("{{- if .Values.persistence.%s.enabled -}}\n%s{{- end -}}", key, temp)
tempValue["enabled"] = true // By Default use persistence volume true
persistence[key] = tempValue
return pvcTemplateData, valueFileGenerator{persistence: persistence}
}
func pvTemplate(pv kapi.PersistentVolume) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(pv.ObjectMeta.Name)
pv.ObjectMeta = generateObjectMetaTemplate(pv.ObjectMeta, key, value, pv.Name)
pv.Spec = generatePersistentVolumeSpec(pv.Spec, key, value)
pvData, err := ylib.Marshal(pv)
if err != nil {
log.Fatal(err)
}
temp := removeEmptyFields(string(pvData))
return string(temp), valueFileGenerator{value: value}
}
func storageClassTemplate(storageClass storage.StorageClass) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(storageClass.ObjectMeta.Name)
storageClass.ObjectMeta = generateObjectMetaTemplate(storageClass.ObjectMeta, key, value, storageClass.ObjectMeta.Name)
value["Provisioner"] = storageClass.Provisioner
storageClass.Provisioner = fmt.Sprintf("{{.Values.%s.Provisioner}}", key)
storageClass.Parameters = mapToValueMaker(storageClass.Parameters, value, key)
storageData, err := ylib.Marshal(storageClass)
if err != nil {
log.Fatal(err)
}
return string(storageData), valueFileGenerator{value: value}
}
func addSecretData(secretData string, secretDataMap map[string]interface{}, key string) string {
elseCondition := "{{ else }}"
elseAction := "{{ randAlphaNum 10 | b64enc | quote }}"
end := "{{ end }}"
data := ""
for k, v := range secretDataMap {
ifCondition := fmt.Sprintf("{{ if .Values.%s.%s }}", key, k)
data += fmt.Sprintf(" %s\n %s: %s\n %s\n %s: %s\n %s\n", ifCondition, k, v, elseCondition, k, elseAction, end)
}
dataOfSecret := "data:" + "\n" + data
return (secretData + dataOfSecret)
}
func addPersistence(persistence map[string]interface{}, elements map[string]interface{}) map[string]interface{} {
for k, v := range elements {
persistence[k] = v
}
return persistence
}
func chartMetaData(name string) chart.Metadata {
return chart.Metadata{
Name: name,
Description: "Helm chart generated by https://github.com/appscode/chartify",
Version: "0.1.0",
ApiVersion: "v1",
}
}
func mapToValueMaker(mp map[string]string, value map[string]interface{}, key string) map[string]string {
for k, v := range mp {
value[k] = v
mp[k] = fmt.Sprintf("{{.Values.%s.%s}}", key, k)
}
return mp
}
Cleanup serviceAccountName from PodSpec
package pkg
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
ylib "github.com/ghodss/yaml"
"k8s.io/helm/pkg/proto/hapi/chart"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/batch"
kext "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/storage"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/yaml"
)
type Generator struct {
Location string
ChartName string
YamlFiles []string
}
func (g Generator) Create() (string, error) {
chartfile := chartMetaData(g.ChartName)
imageTag := "" //TODO
fmt.Println("Creating Custom Chart...")
cdir := filepath.Join(g.Location, chartfile.Name)
fi, err := os.Stat(cdir)
if err == nil && !fi.IsDir() {
return cdir, fmt.Errorf("%s already exists and is not a directory", cdir)
}
if err := os.MkdirAll(cdir, 0755); err != nil {
return cdir, err
}
cf := filepath.Join(cdir, ChartfileName)
if _, err := os.Stat(cf); err != nil {
if len(chartfile.Version) == 0 {
chartfile.Version = imageTag
}
if err := SaveChartfile(cf, &chartfile); err != nil {
return cdir, err
}
}
valueFile := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
templateLocation := filepath.Join(cdir, TemplatesDir)
err = os.MkdirAll(templateLocation, 0755)
for _, kubeObj := range g.YamlFiles {
kubeJson, err := yaml.ToJSON([]byte(kubeObj))
if err != nil {
log.Fatal(err)
}
var objMeta unversioned.TypeMeta
err = json.Unmarshal(kubeJson, &objMeta)
if err != nil {
log.Fatal(err)
}
values := valueFileGenerator{}
var template, templateName string
if objMeta.Kind == "Pod" {
pod := kapi.Pod{}
err = json.Unmarshal(kubeJson, &pod)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&pod.ObjectMeta)
cleanUpPodSpec(&pod.Spec)
name := pod.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = podTemplate(pod)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "ReplicationController" {
rc := kapi.ReplicationController{}
err = json.Unmarshal(kubeJson, &rc)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&rc.ObjectMeta)
cleanUpPodSpec(&rc.Spec.Template.Spec)
name := rc.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = replicationControllerTemplate(rc)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "Deployment" {
deployment := kext.Deployment{}
err = json.Unmarshal(kubeJson, &deployment)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&deployment.ObjectMeta)
cleanUpPodSpec(&deployment.Spec.Template.Spec)
name := deployment.Name
templateName = filepath.Join(templateLocation, name+".yaml")
if err != nil {
log.Fatal(err)
}
template, values = deploymentTemplate(deployment)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "Job" {
job := batch.Job{}
err = json.Unmarshal(kubeJson, &job)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&job.ObjectMeta)
cleanUpPodSpec(&job.Spec.Template.Spec)
name := job.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = jobTemplate(job)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "DaemonSet" {
daemonset := kext.DaemonSet{}
err = json.Unmarshal(kubeJson, &daemonset)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&daemonset.ObjectMeta)
cleanUpPodSpec(&daemonset.Spec.Template.Spec)
name := daemonset.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = daemonsetTemplate(daemonset)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "ReplicaSet" {
rcSet := kext.ReplicaSet{}
err = json.Unmarshal(kubeJson, &rcSet)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&rcSet.ObjectMeta)
cleanUpPodSpec(&rcSet.Spec.Template.Spec)
name := rcSet.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = replicaSetTemplate(rcSet)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "StatefulSet" {
statefulset := apps.StatefulSet{}
err := json.Unmarshal(kubeJson, &statefulset)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&statefulset.ObjectMeta)
cleanUpPodSpec(&statefulset.Spec.Template.Spec)
name := statefulset.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = statefulsetTemplate(statefulset)
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "Service" {
service := kapi.Service{}
err := json.Unmarshal(kubeJson, &service)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&service.ObjectMeta)
template, values = serviceTemplate(service)
name := service.Name
templateName = filepath.Join(templateLocation, name+".yaml")
valueFile[removeCharactersFromName(name)] = values.value
persistence = addPersistence(persistence, values.persistence)
} else if objMeta.Kind == "ConfigMap" {
configMap := kapi.ConfigMap{}
err := json.Unmarshal(kubeJson, &configMap)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&configMap.ObjectMeta)
name := configMap.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = configMapTemplate(configMap)
valueFile[removeCharactersFromName(name)] = values.value
} else if objMeta.Kind == "Secret" {
secret := kapi.Secret{}
err := json.Unmarshal(kubeJson, &secret)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&secret.ObjectMeta)
name := secret.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = secretTemplate(secret)
valueFile[removeCharactersFromName(name)] = values.value
} else if objMeta.Kind == "PersistentVolumeClaim" {
pvc := kapi.PersistentVolumeClaim{}
err := json.Unmarshal(kubeJson, &pvc)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&pvc.ObjectMeta)
name := pvc.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = pvcTemplate(pvc)
persistence = addPersistence(persistence, values.persistence)
//valueFile[removeCharactersFromName(name)] = values.value
} else if objMeta.Kind == "PersistentVolume" {
pv := kapi.PersistentVolume{}
err := json.Unmarshal(kubeJson, &pv)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&pv.ObjectMeta)
name := pv.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = pvTemplate(pv)
valueFile[removeCharactersFromName(name)] = values.value
} else if objMeta.Kind == "StorageClass" {
storageClass := storage.StorageClass{}
err := json.Unmarshal(kubeJson, &storageClass)
if err != nil {
log.Fatal(err)
}
cleanUpObjectMeta(&storageClass.ObjectMeta)
name := storageClass.Name
templateName = filepath.Join(templateLocation, name+".yaml")
template, values = storageClassTemplate(storageClass)
valueFile[removeCharactersFromName(name)] = values.value
} else {
fmt.Printf("NOT IMPLEMENTED. ADD MAUALLY ")
}
err = ioutil.WriteFile(templateName, []byte(template), 0644)
if err != nil {
log.Fatal(err)
}
}
if len(persistence) != 0 {
valueFile["persistence"] = persistence
}
valueFileData, err := ylib.Marshal(valueFile)
if err != nil {
log.Fatal(err)
}
helperDir := filepath.Join(templateLocation, HelpersName)
err = ioutil.WriteFile(helperDir, []byte(defaultHelpers), 0644) //TODO change default values
valueDir := filepath.Join(cdir, ValuesfileName)
err = ioutil.WriteFile(valueDir, []byte(valueFileData), 0644)
if err != nil {
log.Fatal(err)
}
fmt.Println("CREATE : SUCCESSFULL")
return cdir, nil
}
func cleanUpObjectMeta(m *kapi.ObjectMeta) {
var t unversioned.Time
m.GenerateName = ""
m.SelfLink = ""
m.UID = types.UID("")
m.ResourceVersion = ""
m.Generation = 0
m.CreationTimestamp = t
m.DeletionTimestamp = nil
}
func cleanUpPodSpec(p *kapi.PodSpec) {
p.DNSPolicy = kapi.DNSPolicy("")
p.NodeName = ""
if p.ServiceAccountName == "default" {
p.ServiceAccountName = ""
}
p.TerminationGracePeriodSeconds = nil
for i, c := range p.Containers {
c.TerminationMessagePath = ""
p.Containers[i] = c
}
for i, c := range p.InitContainers {
c.TerminationMessagePath = ""
p.InitContainers[i] = c
}
}
func podTemplate(pod kapi.Pod) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(pod.ObjectMeta.Name)
pod.ObjectMeta = generateObjectMetaTemplate(pod.ObjectMeta, key, value, pod.ObjectMeta.Name)
//pod.Spec.Containers = generateTemplateForContainer(pod.Spec.Containers, value)
pod.Spec = generateTemplateForPodSpec(pod.Spec, key, value)
if len(pod.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(pod.Spec.Volumes, key, value)
pod.Spec.Volumes = nil
}
tempPodByte, err := ylib.Marshal(pod)
if err != nil {
log.Fatal(err)
}
tempPod := removeEmptyFields(string(tempPodByte))
template := ""
if len(volumes) != 0 {
template = addVolumeToTemplateForPod(string(tempPod), volumes)
} else {
template = string(tempPod)
}
data := valueFileGenerator{
value: value,
persistence: persistence,
}
return template, data
}
func replicationControllerTemplate(rc kapi.ReplicationController) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(rc.ObjectMeta.Name)
rc.ObjectMeta = generateObjectMetaTemplate(rc.ObjectMeta, key, value, rc.ObjectMeta.Name)
rc.Spec.Template.Spec = generateTemplateForPodSpec(rc.Spec.Template.Spec, key, value)
if len(rc.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(rc.Spec.Template.Spec.Volumes, key, value)
value["persistence"] = true
rc.Spec.Template.Spec.Volumes = nil
}
tempRcByte, err := ylib.Marshal(rc)
if err != nil {
log.Fatal(err)
}
tempRc := removeEmptyFields(string(tempRcByte))
template := ""
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempRc, volumes)
} else {
template = tempRc
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func replicaSetTemplate(replicaSet kext.ReplicaSet) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(replicaSet.ObjectMeta.Name)
replicaSet.ObjectMeta = generateObjectMetaTemplate(replicaSet.ObjectMeta, key, value, replicaSet.ObjectMeta.Name)
replicaSet.Spec.Template.Spec = generateTemplateForPodSpec(replicaSet.Spec.Template.Spec, key, value)
if len(replicaSet.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(replicaSet.Spec.Template.Spec.Volumes, key, value)
value["persistence"] = true
replicaSet.Spec.Template.Spec.Volumes = nil
}
template := ""
tempRcSetByte, err := ylib.Marshal(replicaSet)
if err != nil {
log.Fatal(err)
}
tempReplicaSet := removeEmptyFields(string(tempRcSetByte))
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempReplicaSet, volumes) // RC and replica_set has volume in same layer
} else {
template = tempReplicaSet
}
return template, valueFileGenerator{
value: value,
persistence: persistence,
}
}
func deploymentTemplate(deployment kext.Deployment) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(deployment.ObjectMeta.Name)
deployment.ObjectMeta = generateObjectMetaTemplate(deployment.ObjectMeta, key, value, deployment.ObjectMeta.Name)
deployment.Spec.Template.Spec = generateTemplateForPodSpec(deployment.Spec.Template.Spec, key, value)
if len(deployment.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(deployment.Spec.Template.Spec.Volumes, key, value)
deployment.Spec.Template.Spec.Volumes = nil
}
if len(string(deployment.Spec.Strategy.Type)) != 0 {
deployment.Spec.Strategy.Type = kext.DeploymentStrategyType(fmt.Sprintf("{{.Values.%sDeploymentStrategy}}", key))
//generateTemplateForSingleValue(string(deployment.Spec.Strategy.Type), "DeploymentStrategy", value)
value["DeploymentStrategy"] = deployment.Spec.Strategy.Type //TODO test
}
template := ""
tempDeploymentByte, err := ylib.Marshal(deployment)
if err != nil {
log.Fatal(err)
}
tempDeployment := removeEmptyFields(string(tempDeploymentByte))
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempDeployment, volumes)
} else {
template = tempDeployment
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func daemonsetTemplate(daemonset kext.DaemonSet) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(daemonset.ObjectMeta.Name)
daemonset.ObjectMeta = generateObjectMetaTemplate(daemonset.ObjectMeta, key, value, daemonset.ObjectMeta.Name)
daemonset.Spec.Template.Spec = generateTemplateForPodSpec(daemonset.Spec.Template.Spec, key, value)
if len(daemonset.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(daemonset.Spec.Template.Spec.Volumes, key, value)
value["persistence"] = true
daemonset.Spec.Template.Spec.Volumes = nil
}
template := ""
//valueData, err := ylib.Marshal(value)
tempDaemonSetByte, err := ylib.Marshal(daemonset)
if err != nil {
log.Fatal(err)
}
tempDaemonSet := removeEmptyFields(string(tempDaemonSetByte))
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempDaemonSet, volumes)
} else {
template = tempDaemonSet
}
if err != nil {
log.Fatal(err)
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func statefulsetTemplate(statefulset apps.StatefulSet) (string, valueFileGenerator) {
volumes := ""
value := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(statefulset.ObjectMeta.Name)
statefulset.ObjectMeta = generateObjectMetaTemplate(statefulset.ObjectMeta, key, value, statefulset.ObjectMeta.Name)
if len(statefulset.Spec.ServiceName) != 0 {
statefulset.Spec.ServiceName = fmt.Sprintf("{{.Values.%s.ServiceName}}", key)
value["ServiceName"] = statefulset.Spec.ServiceName //generateTemplateForSingleValue(statefulset.Spec.ServiceName, "ServiceName", value)
}
statefulset.Spec.Template.Spec = generateTemplateForPodSpec(statefulset.Spec.Template.Spec, key, value)
if len(statefulset.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(statefulset.Spec.Template.Spec.Volumes, key, value)
statefulset.Spec.Template.Spec.Volumes = nil
}
tempStatefulSetByte, err := ylib.Marshal(statefulset)
if err != nil {
log.Fatal(err)
}
tempStatefulSet := removeEmptyFields(string(tempStatefulSetByte))
template := ""
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempStatefulSet, volumes)
} else {
template = tempStatefulSet
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func jobTemplate(job batch.Job) (string, valueFileGenerator) {
volumes := ""
persistence := make(map[string]interface{}, 0)
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(job.ObjectMeta.Name)
job.ObjectMeta = generateObjectMetaTemplate(job.ObjectMeta, key, value, job.ObjectMeta.Name)
job.Spec.Template.Spec = generateTemplateForPodSpec(job.Spec.Template.Spec, key, value)
if len(job.Spec.Template.Spec.Volumes) != 0 {
volumes, persistence = generateTemplateForVolume(job.Spec.Template.Spec.Volumes, key, value)
value["persistence"] = true
job.Spec.Template.Spec.Volumes = nil
}
tempJobByte, err := ylib.Marshal(job)
if err != nil {
log.Fatal(err)
}
tempJob := removeEmptyFields(string(tempJobByte))
template := ""
if len(volumes) != 0 {
template = addVolumeToTemplateForRc(tempJob, volumes)
} else {
template = tempJob
}
return template, valueFileGenerator{value: value, persistence: persistence}
}
func serviceTemplate(svc kapi.Service) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(svc.ObjectMeta.Name)
svc.ObjectMeta = generateObjectMetaTemplate(svc.ObjectMeta, key, value, svc.ObjectMeta.Name)
svc.Spec = generateServiceSpecTemplate(svc.Spec, key, value)
svcData, err := ylib.Marshal(svc)
if err != nil {
log.Fatal(err)
}
service := removeEmptyFields(string(svcData))
return string(service), valueFileGenerator{value: value}
}
func configMapTemplate(configMap kapi.ConfigMap) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(configMap.ObjectMeta.Name)
configMap.ObjectMeta = generateObjectMetaTemplate(configMap.ObjectMeta, key, value, configMap.ObjectMeta.Name)
configMap.ObjectMeta.Name = key // not using release name befor configmap
configMapData, err := ylib.Marshal(configMap)
if err != nil {
log.Fatal(err)
}
if len(configMap.Data) != 0 {
for k, v := range configMap.Data {
value[k] = v
configMap.Data[k] = (fmt.Sprintf("{{.Values.%s.%s}}", key, k))
}
}
data := removeEmptyFields(string(configMapData))
return string(data), valueFileGenerator{value: value}
}
func secretTemplate(secret kapi.Secret) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
secretDataMap := make(map[string]interface{}, 0)
key := removeCharactersFromName(secret.ObjectMeta.Name)
secret.ObjectMeta = generateObjectMetaTemplate(secret.ObjectMeta, key, value, secret.ObjectMeta.Name)
secret.ObjectMeta.Name = key
if len(secret.Data) != 0 {
for k, v := range secret.Data {
value[k] = v
secretDataMap[k] = (fmt.Sprintf("{{.Values.%s.%s}}", key, k))
}
}
secret.Data = nil
value["Type"] = secret.Type
secret.Type = kapi.SecretType(fmt.Sprintf("{{.Values.%s.Type}}", key))
secretDataByte, err := ylib.Marshal(secret)
if err != nil {
log.Fatal(err)
}
secretData := removeEmptyFields(string(secretDataByte))
//dataSecret := make(map[string]interface{}, 0)
//dataSecret["data"] = secretDataMap
secretData = addSecretData(secretData, secretDataMap, key)
return secretData, valueFileGenerator{value: value}
}
func pvcTemplate(pvc kapi.PersistentVolumeClaim) (string, valueFileGenerator) {
tempValue := make(map[string]interface{}, 0)
persistence := make(map[string]interface{}, 0)
key := removeCharactersFromName(pvc.ObjectMeta.Name)
pvc.ObjectMeta = generateObjectMetaTemplate(pvc.ObjectMeta, key, tempValue, pvc.ObjectMeta.Name)
pvc.Spec = generatePersistentVolumeClaimSpec(pvc.Spec, key, tempValue)
pvcData, err := ylib.Marshal(pvc)
if err != nil {
log.Fatal(err)
}
temp := removeEmptyFields(string(pvcData))
pvcTemplateData := fmt.Sprintf("{{- if .Values.persistence.%s.enabled -}}\n%s{{- end -}}", key, temp)
tempValue["enabled"] = true // By Default use persistence volume true
persistence[key] = tempValue
return pvcTemplateData, valueFileGenerator{persistence: persistence}
}
func pvTemplate(pv kapi.PersistentVolume) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(pv.ObjectMeta.Name)
pv.ObjectMeta = generateObjectMetaTemplate(pv.ObjectMeta, key, value, pv.Name)
pv.Spec = generatePersistentVolumeSpec(pv.Spec, key, value)
pvData, err := ylib.Marshal(pv)
if err != nil {
log.Fatal(err)
}
temp := removeEmptyFields(string(pvData))
return string(temp), valueFileGenerator{value: value}
}
func storageClassTemplate(storageClass storage.StorageClass) (string, valueFileGenerator) {
value := make(map[string]interface{}, 0)
key := removeCharactersFromName(storageClass.ObjectMeta.Name)
storageClass.ObjectMeta = generateObjectMetaTemplate(storageClass.ObjectMeta, key, value, storageClass.ObjectMeta.Name)
value["Provisioner"] = storageClass.Provisioner
storageClass.Provisioner = fmt.Sprintf("{{.Values.%s.Provisioner}}", key)
storageClass.Parameters = mapToValueMaker(storageClass.Parameters, value, key)
storageData, err := ylib.Marshal(storageClass)
if err != nil {
log.Fatal(err)
}
return string(storageData), valueFileGenerator{value: value}
}
func addSecretData(secretData string, secretDataMap map[string]interface{}, key string) string {
elseCondition := "{{ else }}"
elseAction := "{{ randAlphaNum 10 | b64enc | quote }}"
end := "{{ end }}"
data := ""
for k, v := range secretDataMap {
ifCondition := fmt.Sprintf("{{ if .Values.%s.%s }}", key, k)
data += fmt.Sprintf(" %s\n %s: %s\n %s\n %s: %s\n %s\n", ifCondition, k, v, elseCondition, k, elseAction, end)
}
dataOfSecret := "data:" + "\n" + data
return (secretData + dataOfSecret)
}
func addPersistence(persistence map[string]interface{}, elements map[string]interface{}) map[string]interface{} {
for k, v := range elements {
persistence[k] = v
}
return persistence
}
func chartMetaData(name string) chart.Metadata {
return chart.Metadata{
Name: name,
Description: "Helm chart generated by https://github.com/appscode/chartify",
Version: "0.1.0",
ApiVersion: "v1",
}
}
func mapToValueMaker(mp map[string]string, value map[string]interface{}, key string) map[string]string {
for k, v := range mp {
value[k] = v
mp[k] = fmt.Sprintf("{{.Values.%s.%s}}", key, k)
}
return mp
}
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"sync"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch
// channel is full.
type FullChannelBehavior int
const (
WaitIfChannelFull FullChannelBehavior = iota
DropIfChannelFull
)
// Buffer the incoming queue a little bit even though it should rarely ever accumulate
// anything, just in case a few events are received in such a short window that
// Broadcaster can't move them onto the watchers' queues fast enough.
const incomingQueueLength = 25
// Broadcaster distributes event notifications among any number of watchers. Every event
// is delivered to every watcher.
type Broadcaster struct {
watchers map[int64]*broadcasterWatcher
nextWatcher int64
distributing sync.WaitGroup
incoming chan Event
stopped chan struct{}
// How large to make watcher's channel.
watchQueueLength int
// If one of the watch channels is full, don't wait for it to become empty.
// Instead just deliver it to the watchers that do have space in their
// channels and move on to the next event.
// It's more fair to do this on a per-watcher basis than to do it on the
// "incoming" channel, which would allow one slow watcher to prevent all
// other watchers from getting new events.
fullChannelBehavior FullChannelBehavior
}
// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher.
// It is guaranteed that events will be distributed in the order in which they occur,
// but the order in which a single event is distributed among all of the watchers is unspecified.
func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster {
m := &Broadcaster{
watchers: map[int64]*broadcasterWatcher{},
incoming: make(chan Event, incomingQueueLength),
stopped: make(chan struct{}),
watchQueueLength: queueLength,
fullChannelBehavior: fullChannelBehavior,
}
m.distributing.Add(1)
go m.loop()
return m
}
const internalRunFunctionMarker = "internal-do-function"
// a function type we can shoehorn into the queue.
type functionFakeRuntimeObject func()
func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind {
return schema.EmptyObjectKind
}
func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object {
if obj == nil {
return nil
}
// funcs are immutable. Hence, just return the original func.
return obj
}
// Execute f, blocking the incoming queue (and waiting for it to drain first).
// The purpose of this terrible hack is so that watchers added after an event
// won't ever see that event, and will always see any event after they are
// added.
func (m *Broadcaster) blockQueue(f func()) {
select {
case <-m.stopped:
return
default:
}
var wg sync.WaitGroup
wg.Add(1)
m.incoming <- Event{
Type: internalRunFunctionMarker,
Object: functionFakeRuntimeObject(func() {
defer wg.Done()
f()
}),
}
wg.Wait()
}
// Watch adds a new watcher to the list and returns an Interface for it.
// Note: new watchers will only receive new events. They won't get an entire history
// of previous events. It will block until the watcher is actually added to the
// broadcaster.
func (m *Broadcaster) Watch() Interface {
var w *broadcasterWatcher
m.blockQueue(func() {
id := m.nextWatcher
m.nextWatcher++
w = &broadcasterWatcher{
result: make(chan Event, m.watchQueueLength),
stopped: make(chan struct{}),
id: id,
m: m,
}
m.watchers[id] = w
})
if w == nil {
// The panic here is to be consistent with the previous interface behavior
// we are willing to re-evaluate in the future.
panic("broadcaster already stopped")
}
return w
}
// WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends
// queuedEvents down the new watch before beginning to send ordinary events from Broadcaster.
// The returned watch will have a queue length that is at least large enough to accommodate
// all of the items in queuedEvents. It will block until the watcher is actually added to
// the broadcaster.
func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface {
var w *broadcasterWatcher
m.blockQueue(func() {
id := m.nextWatcher
m.nextWatcher++
length := m.watchQueueLength
if n := len(queuedEvents) + 1; n > length {
length = n
}
w = &broadcasterWatcher{
result: make(chan Event, length),
stopped: make(chan struct{}),
id: id,
m: m,
}
m.watchers[id] = w
for _, e := range queuedEvents {
w.result <- e
}
})
if w == nil {
// The panic here is to be consistent with the previous interface behavior
// we are willing to re-evaluate in the future.
panic("broadcaster already stopped")
}
return w
}
// stopWatching stops the given watcher and removes it from the list.
func (m *Broadcaster) stopWatching(id int64) {
m.blockQueue(func() {
w, ok := m.watchers[id]
if !ok {
// No need to do anything, it's already been removed from the list.
return
}
delete(m.watchers, id)
close(w.result)
})
}
// closeAll disconnects all watchers (presumably in response to a Shutdown call).
func (m *Broadcaster) closeAll() {
for _, w := range m.watchers {
close(w.result)
}
// Delete everything from the map, since presence/absence in the map is used
// by stopWatching to avoid double-closing the channel.
m.watchers = map[int64]*broadcasterWatcher{}
}
// Action distributes the given event among all watchers.
func (m *Broadcaster) Action(action EventType, obj runtime.Object) {
m.incoming <- Event{action, obj}
}
// Shutdown disconnects all watchers (but any queued events will still be distributed).
// You must not call Action or Watch* after calling Shutdown. This call blocks
// until all events have been distributed through the outbound channels. Note
// that since they can be buffered, this means that the watchers might not
// have received the data yet as it can remain sitting in the buffered
// channel. It will block until the broadcaster stop request is actually executed
func (m *Broadcaster) Shutdown() {
m.blockQueue(func() {
close(m.stopped)
close(m.incoming)
})
m.distributing.Wait()
}
// loop receives from m.incoming and distributes to all watchers.
func (m *Broadcaster) loop() {
// Deliberately not catching crashes here. Yes, bring down the process if there's a
// bug in watch.Broadcaster.
for event := range m.incoming {
if event.Type == internalRunFunctionMarker {
event.Object.(functionFakeRuntimeObject)()
continue
}
m.distribute(event)
}
m.closeAll()
m.distributing.Done()
}
// distribute sends event to all watchers. Blocking.
func (m *Broadcaster) distribute(event Event) {
if m.fullChannelBehavior == DropIfChannelFull {
for _, w := range m.watchers {
select {
case w.result <- event:
case <-w.stopped:
default: // Don't block if the event can't be queued.
}
}
} else {
for _, w := range m.watchers {
select {
case w.result <- event:
case <-w.stopped:
}
}
}
}
// broadcasterWatcher handles a single watcher of a broadcaster
type broadcasterWatcher struct {
result chan Event
stopped chan struct{}
stop sync.Once
id int64
m *Broadcaster
}
// ResultChan returns a channel to use for waiting on events.
func (mw *broadcasterWatcher) ResultChan() <-chan Event {
return mw.result
}
// Stop stops watching and removes mw from its list.
// It will block until the watcher stop request is actually executed
func (mw *broadcasterWatcher) Stop() {
mw.stop.Do(func() {
close(mw.stopped)
mw.m.stopWatching(mw.id)
})
}
Don't record events in goroutines
This changes the event recorder to use the equivalent of a select
statement instead of a goroutine to record events.
Previously, we used a goroutine to make event recording non-blocking.
Unfortunately, this writes to a channel, and during shutdown we then
race to write to a closed channel, panicing (caught by the error
handler, but still) and making the race detector unhappy.
Instead, we now use the select statement to make event emitting
non-blocking, and if we'd block, we just drop the event. We already
drop events if a particular sink is overloaded, so this just moves the
incoming event queue to match that behavior (and makes the incoming
event queue much longer).
This means that, if the user uses `Eventf` and friends correctly (i.e.
ensure they've returned by the time we call `Shutdown`), it's
now safe to call Shutdown. This matches the conventional go guidance on
channels: the writer should call close.
Kubernetes-commit: e90e67bd002e70a525d3ee9045b213a5d826074d
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"sync"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch
// channel is full.
type FullChannelBehavior int
const (
WaitIfChannelFull FullChannelBehavior = iota
DropIfChannelFull
)
// Buffer the incoming queue a little bit even though it should rarely ever accumulate
// anything, just in case a few events are received in such a short window that
// Broadcaster can't move them onto the watchers' queues fast enough.
const incomingQueueLength = 25
// Broadcaster distributes event notifications among any number of watchers. Every event
// is delivered to every watcher.
type Broadcaster struct {
watchers map[int64]*broadcasterWatcher
nextWatcher int64
distributing sync.WaitGroup
incoming chan Event
stopped chan struct{}
// How large to make watcher's channel.
watchQueueLength int
// If one of the watch channels is full, don't wait for it to become empty.
// Instead just deliver it to the watchers that do have space in their
// channels and move on to the next event.
// It's more fair to do this on a per-watcher basis than to do it on the
// "incoming" channel, which would allow one slow watcher to prevent all
// other watchers from getting new events.
fullChannelBehavior FullChannelBehavior
}
// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher.
// It is guaranteed that events will be distributed in the order in which they occur,
// but the order in which a single event is distributed among all of the watchers is unspecified.
func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster {
m := &Broadcaster{
watchers: map[int64]*broadcasterWatcher{},
incoming: make(chan Event, incomingQueueLength),
stopped: make(chan struct{}),
watchQueueLength: queueLength,
fullChannelBehavior: fullChannelBehavior,
}
m.distributing.Add(1)
go m.loop()
return m
}
// NewLongQueueBroadcaster functions nearly identically to NewBroadcaster,
// except that the incoming queue is the same size as the outgoing queues
// (specified by queueLength).
func NewLongQueueBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster {
m := &Broadcaster{
watchers: map[int64]*broadcasterWatcher{},
incoming: make(chan Event, queueLength),
stopped: make(chan struct{}),
watchQueueLength: queueLength,
fullChannelBehavior: fullChannelBehavior,
}
m.distributing.Add(1)
go m.loop()
return m
}
const internalRunFunctionMarker = "internal-do-function"
// a function type we can shoehorn into the queue.
type functionFakeRuntimeObject func()
func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind {
return schema.EmptyObjectKind
}
func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object {
if obj == nil {
return nil
}
// funcs are immutable. Hence, just return the original func.
return obj
}
// Execute f, blocking the incoming queue (and waiting for it to drain first).
// The purpose of this terrible hack is so that watchers added after an event
// won't ever see that event, and will always see any event after they are
// added.
func (m *Broadcaster) blockQueue(f func()) {
select {
case <-m.stopped:
return
default:
}
var wg sync.WaitGroup
wg.Add(1)
m.incoming <- Event{
Type: internalRunFunctionMarker,
Object: functionFakeRuntimeObject(func() {
defer wg.Done()
f()
}),
}
wg.Wait()
}
// Watch adds a new watcher to the list and returns an Interface for it.
// Note: new watchers will only receive new events. They won't get an entire history
// of previous events. It will block until the watcher is actually added to the
// broadcaster.
func (m *Broadcaster) Watch() Interface {
var w *broadcasterWatcher
m.blockQueue(func() {
id := m.nextWatcher
m.nextWatcher++
w = &broadcasterWatcher{
result: make(chan Event, m.watchQueueLength),
stopped: make(chan struct{}),
id: id,
m: m,
}
m.watchers[id] = w
})
if w == nil {
// The panic here is to be consistent with the previous interface behavior
// we are willing to re-evaluate in the future.
panic("broadcaster already stopped")
}
return w
}
// WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends
// queuedEvents down the new watch before beginning to send ordinary events from Broadcaster.
// The returned watch will have a queue length that is at least large enough to accommodate
// all of the items in queuedEvents. It will block until the watcher is actually added to
// the broadcaster.
func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface {
var w *broadcasterWatcher
m.blockQueue(func() {
id := m.nextWatcher
m.nextWatcher++
length := m.watchQueueLength
if n := len(queuedEvents) + 1; n > length {
length = n
}
w = &broadcasterWatcher{
result: make(chan Event, length),
stopped: make(chan struct{}),
id: id,
m: m,
}
m.watchers[id] = w
for _, e := range queuedEvents {
w.result <- e
}
})
if w == nil {
// The panic here is to be consistent with the previous interface behavior
// we are willing to re-evaluate in the future.
panic("broadcaster already stopped")
}
return w
}
// stopWatching stops the given watcher and removes it from the list.
func (m *Broadcaster) stopWatching(id int64) {
m.blockQueue(func() {
w, ok := m.watchers[id]
if !ok {
// No need to do anything, it's already been removed from the list.
return
}
delete(m.watchers, id)
close(w.result)
})
}
// closeAll disconnects all watchers (presumably in response to a Shutdown call).
func (m *Broadcaster) closeAll() {
for _, w := range m.watchers {
close(w.result)
}
// Delete everything from the map, since presence/absence in the map is used
// by stopWatching to avoid double-closing the channel.
m.watchers = map[int64]*broadcasterWatcher{}
}
// Action distributes the given event among all watchers.
func (m *Broadcaster) Action(action EventType, obj runtime.Object) {
m.incoming <- Event{action, obj}
}
// Action distributes the given event among all watchers, or drops it on the floor
// if too many incoming actions are queued up. Returns true if the action was sent,
// false if dropped.
func (m *Broadcaster) ActionOrDrop(action EventType, obj runtime.Object) bool {
select {
case m.incoming <- Event{action, obj}:
return true
default:
return false
}
}
// Shutdown disconnects all watchers (but any queued events will still be distributed).
// You must not call Action or Watch* after calling Shutdown. This call blocks
// until all events have been distributed through the outbound channels. Note
// that since they can be buffered, this means that the watchers might not
// have received the data yet as it can remain sitting in the buffered
// channel. It will block until the broadcaster stop request is actually executed
func (m *Broadcaster) Shutdown() {
m.blockQueue(func() {
close(m.stopped)
close(m.incoming)
})
m.distributing.Wait()
}
// loop receives from m.incoming and distributes to all watchers.
func (m *Broadcaster) loop() {
// Deliberately not catching crashes here. Yes, bring down the process if there's a
// bug in watch.Broadcaster.
for event := range m.incoming {
if event.Type == internalRunFunctionMarker {
event.Object.(functionFakeRuntimeObject)()
continue
}
m.distribute(event)
}
m.closeAll()
m.distributing.Done()
}
// distribute sends event to all watchers. Blocking.
func (m *Broadcaster) distribute(event Event) {
if m.fullChannelBehavior == DropIfChannelFull {
for _, w := range m.watchers {
select {
case w.result <- event:
case <-w.stopped:
default: // Don't block if the event can't be queued.
}
}
} else {
for _, w := range m.watchers {
select {
case w.result <- event:
case <-w.stopped:
}
}
}
}
// broadcasterWatcher handles a single watcher of a broadcaster
type broadcasterWatcher struct {
result chan Event
stopped chan struct{}
stop sync.Once
id int64
m *Broadcaster
}
// ResultChan returns a channel to use for waiting on events.
func (mw *broadcasterWatcher) ResultChan() <-chan Event {
return mw.result
}
// Stop stops watching and removes mw from its list.
// It will block until the watcher stop request is actually executed
func (mw *broadcasterWatcher) Stop() {
mw.stop.Do(func() {
close(mw.stopped)
mw.m.stopWatching(mw.id)
})
}
|
package main
import (
"fmt"
"github.com/go-macaron/binding"
//"github.com/go-macaron/cache"
"github.com/go-macaron/session"
"gopkg.in/macaron.v1"
// "html/template"
"net/http"
)
//HTTPConfig has webserver config options
type HTTPConfig struct {
Port int `toml:"port"`
AdminUser string `toml:"adminuser"`
AdminPassword string `toml:"adminpassword"`
}
//UserLogin for login purposes
type UserLogin struct {
UserName string `form:"username" binding:"Required"`
Password string `form:"password" binding:"Required"`
}
func webServer(port int) {
bind := binding.Bind
/* jwtMiddleware := jwtmiddleware.New(jwtmiddleware.Options{
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
return []byte("My Secret"), nil
},
// When set, the middleware verifies that tokens are signed with the specific signing algorithm
// If the signing method is not constant the ValidationKeyGetter callback can be used to implement additional checks
// Important to avoid security issues described here: https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/
SigningMethod: jwt.SigningMethodHS256,
})*/
// initiate the app
m := macaron.Classic()
// register middleware
m.Use(GetContextHandler())
m.Use(macaron.Recovery())
// m.Use(gzip.Gziper())
m.Use(macaron.Static("public",
macaron.StaticOptions{
// Prefix is the optional prefix used to serve the static directory content. Default is empty string.
Prefix: "public",
// SkipLogging will disable [Static] log messages when a static file is served. Default is false.
SkipLogging: true,
// IndexFile defines which file to serve as index if it exists. Default is "index.html".
IndexFile: "index.html",
// Expires defines which user-defined function to use for producing a HTTP Expires Header. Default is nil.
// https://developers.google.com/speed/docs/insights/LeverageBrowserCaching
Expires: func() string { return "max-age=0" },
}))
m.Use(Sessioner(session.Options{
// Name of provider. Default is "memory".
Provider: "memory",
// Provider configuration, it's corresponding to provider.
ProviderConfig: "",
// Cookie name to save session ID. Default is "MacaronSession".
CookieName: "snmpcollector-session",
// Cookie path to store. Default is "/".
CookiePath: "/",
// GC interval time in seconds. Default is 3600.
Gclifetime: 3600,
// Max life time in seconds. Default is whatever GC interval time is.
Maxlifetime: 3600,
// Use HTTPS only. Default is false.
Secure: false,
// Cookie life time. Default is 0.
CookieLifeTime: 0,
// Cookie domain name. Default is empty.
Domain: "",
// Session ID length. Default is 16.
IDLength: 16,
// Configuration section name. Default is "session".
Section: "session",
}))
m.Use(macaron.Renderer(macaron.RenderOptions{
// Directory to load templates. Default is "templates".
Directory: "pkg/templates",
// Extensions to parse template files from. Defaults are [".tmpl", ".html"].
Extensions: []string{".tmpl", ".html"},
// Funcs is a slice of FuncMaps to apply to the template upon compilation. Default is [].
/*Funcs: []template.FuncMap{map[string]interface{}{
"AppName": func() string {
return "snmpcollector"
},
"AppVer": func() string {
return "0.5.1"
},
}},*/
// Delims sets the action delimiters to the specified strings. Defaults are ["{{", "}}"].
Delims: macaron.Delims{"{{", "}}"},
// Appends the given charset to the Content-Type header. Default is "UTF-8".
Charset: "UTF-8",
// Outputs human readable JSON. Default is false.
IndentJSON: true,
// Outputs human readable XML. Default is false.
IndentXML: true,
// Prefixes the JSON output with the given bytes. Default is no prefix.
// PrefixJSON: []byte("macaron"),
// Prefixes the XML output with the given bytes. Default is no prefix.
// PrefixXML: []byte("macaron"),
// Allows changing of output to XHTML instead of HTML. Default is "text/html".
HTMLContentType: "text/html",
}))
/*
m.Use(cache.Cacher(cache.Options{
// Name of adapter. Default is "memory".
Adapter: "memory",
// Adapter configuration, it's corresponding to adapter.
AdapterConfig: "",
// GC interval time in seconds. Default is 60.
Interval: 60,
// Configuration section name. Default is "cache".
Section: "cache",
}))*/
m.Post("/login", bind(UserLogin{}), myLoginHandler)
m.Post("/logout", myLogoutHandler)
m.Group("/metric", func() {
m.Get("/", reqSignedIn, GetMetrics)
m.Post("/", reqSignedIn, bind(SnmpMetricCfg{}), AddMetric)
m.Put("/:id", reqSignedIn, bind(SnmpMetricCfg{}), UpdateMetric)
m.Delete("/:id", reqSignedIn, DeleteMetric)
m.Get("/:id", reqSignedIn, GetMetricByID)
})
m.Group("/measurement", func() {
m.Get("/", reqSignedIn, GetMeas)
m.Post("/", reqSignedIn, bind(InfluxMeasurementCfg{}), AddMeas)
m.Put("/:id", reqSignedIn, bind(InfluxMeasurementCfg{}), UpdateMeas)
m.Delete("/:id", reqSignedIn, DeleteMeas)
m.Get("/:id", reqSignedIn, GetMeasByID)
})
m.Group("/measgroups", func() {
m.Get("/", reqSignedIn, GetMeasGroup)
m.Post("/", reqSignedIn, bind(MGroupsCfg{}), AddMeasGroup)
m.Put("/:id", reqSignedIn, bind(MGroupsCfg{}), UpdateMeasGroup)
m.Delete("/:id", reqSignedIn, DeleteMeasGroup)
m.Get("/:id", reqSignedIn, GetMeasGroupByID)
})
m.Group("/measfilters", func() {
m.Get("/", reqSignedIn, GetMeasFilter)
m.Post("/", reqSignedIn, bind(MeasFilterCfg{}), AddMeasFilter)
m.Put("/:id", reqSignedIn, bind(MeasFilterCfg{}), UpdateMeasFilter)
m.Delete("/:id", reqSignedIn, DeleteMeasFilter)
m.Get("/:id", reqSignedIn, GetMeasFilterByID)
})
m.Group("/influxservers", func() {
m.Get("/", reqSignedIn, GetInfluxServer)
m.Post("/", reqSignedIn, bind(InfluxCfg{}), AddInfluxServer)
m.Put("/:id", reqSignedIn, bind(InfluxCfg{}), UpdateInfluxServer)
m.Delete("/:id", reqSignedIn, DeleteInfluxServer)
m.Get("/:id", reqSignedIn, GetInfluxServerByID)
m.Get("/ckeckondel/:id", reqSignedIn, GetInfluxAffectOnDel)
})
// Data sources
m.Group("/snmpdevice", func() {
m.Get("/", reqSignedIn, GetSNMPDevices)
m.Post("/", reqSignedIn, bind(SnmpDeviceCfg{}), AddSNMPDevice)
m.Put("/:id", reqSignedIn, bind(SnmpDeviceCfg{}), UpdateSNMPDevice)
m.Delete("/:id", reqSignedIn, DeleteSNMPDevice)
m.Get("/:id", reqSignedIn, GetSNMPDeviceByID)
})
m.Group("/runtime", func() {
m.Post("/snmpping/", reqSignedIn, bind(SnmpDeviceCfg{}), PingSNMPDevice)
m.Get("/version/", reqSignedIn, RTGetVersion)
m.Get("/info/", reqSignedIn, RTGetInfo)
m.Get("/info/:id", reqSignedIn, RTGetInfo)
m.Put("/activatedev/:id", reqSignedIn, RTActivateDev)
m.Put("/deactivatedev/:id", reqSignedIn, RTDeactivateDev)
m.Put("/actsnmpdbg/:id", reqSignedIn, RTActSnmpDebugDev)
m.Put("/deactsnmpdbg/:id", reqSignedIn, RTDeactSnmpDebugDev)
m.Put("/setloglevel/:id/:level", reqSignedIn, RTSetLogLevelDev)
})
log.Printf("Server is running on localhost:%d...", port)
httpServer := fmt.Sprintf("0.0.0.0:%d", port)
log.Println(http.ListenAndServe(httpServer, m))
}
/****************/
/*Runtime Info
/****************/
func PingSNMPDevice(ctx *macaron.Context, cfg SnmpDeviceCfg) {
log.Infof("trying to ping device %s : %+v", cfg.ID, cfg)
dev := SnmpDevice{}
dev.Init(&cfg)
err := dev.InitSnmpConnect()
if err != nil {
log.Debugf("ERROR: DEVICE RETURNED %+v, ERROR: %s", dev, err)
ctx.JSON(400, err.Error())
} else {
log.Debugf("OK DEVICE RETURNED %+v", dev)
ctx.JSON(200, &dev.SysInfo)
}
}
//RTActivateDev xx
func RTSetLogLevelDev(ctx *Context) {
id := ctx.Params(":id")
level := ctx.Params(":level")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("set runtime log level from device id %s : %s", id, level)
dev.RTSetLogLevel(level)
ctx.JSON(200, dev)
}
}
//RTActivateDev xx
func RTActivateDev(ctx *Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("activating runtime on device %s", id)
dev.RTActivate(true)
ctx.JSON(200, dev)
}
}
//RTDeactivateDev xx
func RTDeactivateDev(ctx *Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("deactivating runtime on device %s", id)
dev.RTActivate(false)
ctx.JSON(200, dev)
}
}
//RTActSnmpDebugDev xx
func RTActSnmpDebugDev(ctx *Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("activating snmpdebug %s", id)
dev.RTActSnmpDebug(true)
ctx.JSON(200, dev)
}
}
//RTDeactSnmpDebugDev xx
func RTDeactSnmpDebugDev(ctx *Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("deactivating snmpdebug %s", id)
dev.RTActSnmpDebug(false)
ctx.JSON(200, dev)
}
}
//RTGetInfo xx
func RTGetInfo(ctx *Context) {
id := ctx.Params(":id")
if len(id) > 0 {
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("get runtime data from id %s", id)
ctx.JSON(200, dev)
}
//get only one device info
} else {
ctx.JSON(200, &devices)
}
return
}
type RInfo struct {
InstanceID string
Version string
Commit string
Branch string
BuildStamp string
}
//RTGetVersion xx
func RTGetVersion(ctx *Context) {
info := &RInfo{
InstanceID: cfg.General.InstanceID,
Version: version,
Commit: commit,
Branch: branch,
BuildStamp: buildstamp,
}
ctx.JSON(200, &info)
}
/****************/
/*SNMP DEVICES
/****************/
// GetSNMPDevices Return snmpdevice list to frontend
func GetSNMPDevices(ctx *Context) {
devcfgarray, err := cfg.Database.GetSnmpDeviceCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Devices :%+s", err)
return
}
ctx.JSON(200, &devcfgarray)
log.Debugf("Getting DEVICEs %+v", &devcfgarray)
}
// AddSNMPDevice Insert new snmpdevice to de internal BBDD --pending--
func AddSNMPDevice(ctx *Context, dev SnmpDeviceCfg) {
log.Printf("ADDING DEVICE %+v", dev)
affected, err := cfg.Database.AddSnmpDeviceCfg(dev)
if err != nil {
log.Warningf("Error on insert for device %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateSNMPDevice --pending--
func UpdateSNMPDevice(ctx *Context, dev SnmpDeviceCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateSnmpDeviceCfg(id, dev)
if err != nil {
log.Warningf("Error on update for device %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteSNMPDevice --pending--
func DeleteSNMPDevice(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelSnmpDeviceCfg(id)
if err != nil {
log.Warningf("Error on delete1 for device %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetSNMPDeviceByID --pending--
func GetSNMPDeviceByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetSnmpDeviceCfgByID(id)
if err != nil {
log.Warningf("Error on get Device for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*SNMP METRICS
/****************/
// GetMetrics Return metrics list to frontend
func GetMetrics(ctx *Context) {
cfgarray, err := cfg.Database.GetSnmpMetricCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Metrics :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Metrics %+v", &cfgarray)
}
// AddMetric Insert new metric to de internal BBDD --pending--
func AddMetric(ctx *Context, dev SnmpMetricCfg) {
log.Printf("ADDING Metric %+v", dev)
affected, err := cfg.Database.AddSnmpMetricCfg(dev)
if err != nil {
log.Warningf("Error on insert Metric %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMetric --pending--
func UpdateMetric(ctx *Context, dev SnmpMetricCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateSnmpMetricCfg(id, dev)
if err != nil {
log.Warningf("Error on update Metric %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMetric --pending--
func DeleteMetric(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelSnmpMetricCfg(id)
if err != nil {
log.Warningf("Error on delete Metric %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMetricByID --pending--
func GetMetricByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetSnmpMetricCfgByID(id)
if err != nil {
log.Warningf("Error on get Metric for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*INFLUX MEASUREMENTS
/****************/
// GetMeas Return measurements list to frontend
func GetMeas(ctx *Context) {
cfgarray, err := cfg.Database.GetInfluxMeasurementCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Influx Measurements :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Measurements %+v", &cfgarray)
}
// AddMeas Insert new measurement to de internal BBDD --pending--
func AddMeas(ctx *Context, dev InfluxMeasurementCfg) {
log.Printf("ADDING Measurement %+v", dev)
affected, err := cfg.Database.AddInfluxMeasurementCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurement %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeas --pending--
func UpdateMeas(ctx *Context, dev InfluxMeasurementCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateInfluxMeasurementCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurement %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeas --pending--
func DeleteMeas(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelInfluxMeasurementCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasByID --pending--
func GetMeasByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetInfluxMeasurementCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*MEASUREMENT GROUPS
/****************/
// GetMeasGroup Return measurements groups list to frontend
func GetMeasGroup(ctx *Context) {
cfgarray, err := cfg.Database.GetMGroupsCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Measurement Group :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Meas Group %+v", &cfgarray)
}
// AddMeasGroup Insert new measurement groups to de internal BBDD --pending--
func AddMeasGroup(ctx *Context, dev MGroupsCfg) {
log.Printf("ADDING Measurement Group %+v", dev)
affected, err := cfg.Database.AddMGroupsCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurement Group %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeasGroup --pending--
func UpdateMeasGroup(ctx *Context, dev MGroupsCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateMGroupsCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurement Group %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeasGroup --pending--
func DeleteMeasGroup(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelMGroupsCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement Group %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasGroupByID --pending--
func GetMeasGroupByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetMGroupsCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement Group for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/********************/
/*MEASUREMENT FILTERS
/********************/
// GetMeasFilter Return measurements groups list to frontend
func GetMeasFilter(ctx *Context) {
cfgarray, err := cfg.Database.GetMeasFilterCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Measurement Filter :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Measurement Filter %+v", &cfgarray)
}
// AddMeasFilter Insert new measurement groups to de internal BBDD --pending--
func AddMeasFilter(ctx *Context, dev MeasFilterCfg) {
log.Printf("ADDING measurement Filter %+v", dev)
affected, err := cfg.Database.AddMeasFilterCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurment Filter %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeasFilter --pending--
func UpdateMeasFilter(ctx *Context, dev MeasFilterCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateMeasFilterCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurment Filter %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeasFilter --pending--
func DeleteMeasFilter(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelMeasFilterCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement Filter %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasFilterByID --pending--
func GetMeasFilterByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetMeasFilterCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement Filter for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/* INFLUX SERVERS
/****************/
// GetInfluxServer Return Server Array
func GetInfluxServer(ctx *Context) {
cfgarray, err := cfg.Database.GetInfluxCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Influx db :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting DEVICEs %+v", &cfgarray)
}
// AddInfluxServer Insert new measurement groups to de internal BBDD --pending--
func AddInfluxServer(ctx *Context, dev InfluxCfg) {
log.Printf("ADDING Influx Backend %+v", dev)
affected, err := cfg.Database.AddInfluxCfg(dev)
if err != nil {
log.Warningf("Error on insert new Backend %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateInfluxServer --pending--
func UpdateInfluxServer(ctx *Context, dev InfluxCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateInfluxCfg(id, dev)
if err != nil {
log.Warningf("Error on update Influx db %s , affected : %+v , error: %s", dev.ID, affected, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteInfluxServer --pending--
func DeleteInfluxServer(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelInfluxCfg(id)
if err != nil {
log.Warningf("Error on delete influx db %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetInfluxServerByID --pending--
func GetInfluxServerByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetInfluxCfgByID(id)
if err != nil {
log.Warningf("Error on get Influx db data for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
//GetInfluxAffectOnDel --pending--
func GetInfluxAffectOnDel(ctx *Context) {
id := ctx.Params(":id")
obarray, err := cfg.Database.GetInfluxCfgAffectOnDel(id)
if err != nil {
log.Warningf("Error on get object array for influx device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &obarray)
}
}
/****************/
/*LOGIN
/****************/
func myLoginHandler(ctx *Context, user UserLogin) {
fmt.Printf("USER LOGIN: USER: +%#v (Config: %#v)", user, cfg.HTTP)
if user.UserName == cfg.HTTP.AdminUser && user.Password == cfg.HTTP.AdminPassword {
ctx.SignedInUser = user.UserName
ctx.IsSignedIn = true
ctx.Session.Set(SESS_KEY_USERID, user.UserName)
fmt.Println("OK")
ctx.JSON(200, "OK")
} else {
fmt.Println("ERROR")
ctx.JSON(404, "ERROR")
}
}
func myLogoutHandler(ctx *Context) {
log.Printf("USER LOGOUT: USER: +%#v ", ctx.SignedInUser)
ctx.Session.Destory(ctx)
//ctx.Redirect("/login")
}
reduced runtime info array to only the main stats
package main
import (
"fmt"
"github.com/go-macaron/binding"
//"github.com/go-macaron/cache"
"github.com/go-macaron/session"
"gopkg.in/macaron.v1"
// "html/template"
"net/http"
)
//HTTPConfig has webserver config options
type HTTPConfig struct {
Port int `toml:"port"`
AdminUser string `toml:"adminuser"`
AdminPassword string `toml:"adminpassword"`
}
//UserLogin for login purposes
type UserLogin struct {
UserName string `form:"username" binding:"Required"`
Password string `form:"password" binding:"Required"`
}
func webServer(port int) {
bind := binding.Bind
/* jwtMiddleware := jwtmiddleware.New(jwtmiddleware.Options{
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
return []byte("My Secret"), nil
},
// When set, the middleware verifies that tokens are signed with the specific signing algorithm
// If the signing method is not constant the ValidationKeyGetter callback can be used to implement additional checks
// Important to avoid security issues described here: https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/
SigningMethod: jwt.SigningMethodHS256,
})*/
// initiate the app
m := macaron.Classic()
// register middleware
m.Use(GetContextHandler())
m.Use(macaron.Recovery())
// m.Use(gzip.Gziper())
m.Use(macaron.Static("public",
macaron.StaticOptions{
// Prefix is the optional prefix used to serve the static directory content. Default is empty string.
Prefix: "public",
// SkipLogging will disable [Static] log messages when a static file is served. Default is false.
SkipLogging: true,
// IndexFile defines which file to serve as index if it exists. Default is "index.html".
IndexFile: "index.html",
// Expires defines which user-defined function to use for producing a HTTP Expires Header. Default is nil.
// https://developers.google.com/speed/docs/insights/LeverageBrowserCaching
Expires: func() string { return "max-age=0" },
}))
m.Use(Sessioner(session.Options{
// Name of provider. Default is "memory".
Provider: "memory",
// Provider configuration, it's corresponding to provider.
ProviderConfig: "",
// Cookie name to save session ID. Default is "MacaronSession".
CookieName: "snmpcollector-session",
// Cookie path to store. Default is "/".
CookiePath: "/",
// GC interval time in seconds. Default is 3600.
Gclifetime: 3600,
// Max life time in seconds. Default is whatever GC interval time is.
Maxlifetime: 3600,
// Use HTTPS only. Default is false.
Secure: false,
// Cookie life time. Default is 0.
CookieLifeTime: 0,
// Cookie domain name. Default is empty.
Domain: "",
// Session ID length. Default is 16.
IDLength: 16,
// Configuration section name. Default is "session".
Section: "session",
}))
m.Use(macaron.Renderer(macaron.RenderOptions{
// Directory to load templates. Default is "templates".
Directory: "pkg/templates",
// Extensions to parse template files from. Defaults are [".tmpl", ".html"].
Extensions: []string{".tmpl", ".html"},
// Funcs is a slice of FuncMaps to apply to the template upon compilation. Default is [].
/*Funcs: []template.FuncMap{map[string]interface{}{
"AppName": func() string {
return "snmpcollector"
},
"AppVer": func() string {
return "0.5.1"
},
}},*/
// Delims sets the action delimiters to the specified strings. Defaults are ["{{", "}}"].
Delims: macaron.Delims{"{{", "}}"},
// Appends the given charset to the Content-Type header. Default is "UTF-8".
Charset: "UTF-8",
// Outputs human readable JSON. Default is false.
IndentJSON: true,
// Outputs human readable XML. Default is false.
IndentXML: true,
// Prefixes the JSON output with the given bytes. Default is no prefix.
// PrefixJSON: []byte("macaron"),
// Prefixes the XML output with the given bytes. Default is no prefix.
// PrefixXML: []byte("macaron"),
// Allows changing of output to XHTML instead of HTML. Default is "text/html".
HTMLContentType: "text/html",
}))
/*
m.Use(cache.Cacher(cache.Options{
// Name of adapter. Default is "memory".
Adapter: "memory",
// Adapter configuration, it's corresponding to adapter.
AdapterConfig: "",
// GC interval time in seconds. Default is 60.
Interval: 60,
// Configuration section name. Default is "cache".
Section: "cache",
}))*/
m.Post("/login", bind(UserLogin{}), myLoginHandler)
m.Post("/logout", myLogoutHandler)
m.Group("/metric", func() {
m.Get("/", reqSignedIn, GetMetrics)
m.Post("/", reqSignedIn, bind(SnmpMetricCfg{}), AddMetric)
m.Put("/:id", reqSignedIn, bind(SnmpMetricCfg{}), UpdateMetric)
m.Delete("/:id", reqSignedIn, DeleteMetric)
m.Get("/:id", reqSignedIn, GetMetricByID)
})
m.Group("/measurement", func() {
m.Get("/", reqSignedIn, GetMeas)
m.Post("/", reqSignedIn, bind(InfluxMeasurementCfg{}), AddMeas)
m.Put("/:id", reqSignedIn, bind(InfluxMeasurementCfg{}), UpdateMeas)
m.Delete("/:id", reqSignedIn, DeleteMeas)
m.Get("/:id", reqSignedIn, GetMeasByID)
})
m.Group("/measgroups", func() {
m.Get("/", reqSignedIn, GetMeasGroup)
m.Post("/", reqSignedIn, bind(MGroupsCfg{}), AddMeasGroup)
m.Put("/:id", reqSignedIn, bind(MGroupsCfg{}), UpdateMeasGroup)
m.Delete("/:id", reqSignedIn, DeleteMeasGroup)
m.Get("/:id", reqSignedIn, GetMeasGroupByID)
})
m.Group("/measfilters", func() {
m.Get("/", reqSignedIn, GetMeasFilter)
m.Post("/", reqSignedIn, bind(MeasFilterCfg{}), AddMeasFilter)
m.Put("/:id", reqSignedIn, bind(MeasFilterCfg{}), UpdateMeasFilter)
m.Delete("/:id", reqSignedIn, DeleteMeasFilter)
m.Get("/:id", reqSignedIn, GetMeasFilterByID)
})
m.Group("/influxservers", func() {
m.Get("/", reqSignedIn, GetInfluxServer)
m.Post("/", reqSignedIn, bind(InfluxCfg{}), AddInfluxServer)
m.Put("/:id", reqSignedIn, bind(InfluxCfg{}), UpdateInfluxServer)
m.Delete("/:id", reqSignedIn, DeleteInfluxServer)
m.Get("/:id", reqSignedIn, GetInfluxServerByID)
m.Get("/ckeckondel/:id", reqSignedIn, GetInfluxAffectOnDel)
})
// Data sources
m.Group("/snmpdevice", func() {
m.Get("/", reqSignedIn, GetSNMPDevices)
m.Post("/", reqSignedIn, bind(SnmpDeviceCfg{}), AddSNMPDevice)
m.Put("/:id", reqSignedIn, bind(SnmpDeviceCfg{}), UpdateSNMPDevice)
m.Delete("/:id", reqSignedIn, DeleteSNMPDevice)
m.Get("/:id", reqSignedIn, GetSNMPDeviceByID)
})
m.Group("/runtime", func() {
m.Post("/snmpping/", reqSignedIn, bind(SnmpDeviceCfg{}), PingSNMPDevice)
m.Get("/version/", reqSignedIn, RTGetVersion)
m.Get("/info/", reqSignedIn, RTGetInfo)
m.Get("/info/:id", reqSignedIn, RTGetInfo)
m.Put("/activatedev/:id", reqSignedIn, RTActivateDev)
m.Put("/deactivatedev/:id", reqSignedIn, RTDeactivateDev)
m.Put("/actsnmpdbg/:id", reqSignedIn, RTActSnmpDebugDev)
m.Put("/deactsnmpdbg/:id", reqSignedIn, RTDeactSnmpDebugDev)
m.Put("/setloglevel/:id/:level", reqSignedIn, RTSetLogLevelDev)
})
log.Printf("Server is running on localhost:%d...", port)
httpServer := fmt.Sprintf("0.0.0.0:%d", port)
log.Println(http.ListenAndServe(httpServer, m))
}
/****************/
/*Runtime Info
/****************/
func PingSNMPDevice(ctx *macaron.Context, cfg SnmpDeviceCfg) {
log.Infof("trying to ping device %s : %+v", cfg.ID, cfg)
dev := SnmpDevice{}
dev.Init(&cfg)
err := dev.InitSnmpConnect()
if err != nil {
log.Debugf("ERROR: DEVICE RETURNED %+v, ERROR: %s", dev, err)
ctx.JSON(400, err.Error())
} else {
log.Debugf("OK DEVICE RETURNED %+v", dev)
ctx.JSON(200, &dev.SysInfo)
}
}
//RTActivateDev xx
func RTSetLogLevelDev(ctx *Context) {
id := ctx.Params(":id")
level := ctx.Params(":level")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("set runtime log level from device id %s : %s", id, level)
dev.RTSetLogLevel(level)
ctx.JSON(200, dev)
}
}
//RTActivateDev xx
func RTActivateDev(ctx *Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("activating runtime on device %s", id)
dev.RTActivate(true)
ctx.JSON(200, dev)
}
}
//RTDeactivateDev xx
func RTDeactivateDev(ctx *Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("deactivating runtime on device %s", id)
dev.RTActivate(false)
ctx.JSON(200, dev)
}
}
//RTActSnmpDebugDev xx
func RTActSnmpDebugDev(ctx *Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("activating snmpdebug %s", id)
dev.RTActSnmpDebug(true)
ctx.JSON(200, dev)
}
}
//RTDeactSnmpDebugDev xx
func RTDeactSnmpDebugDev(ctx *Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("deactivating snmpdebug %s", id)
dev.RTActSnmpDebug(false)
ctx.JSON(200, dev)
}
}
type devStat struct {
Requests int64
Gets int64
Errors int64
ReloadLoopsPending int
DeviceActive bool
DeviceConnected bool
NumMeasurements int
NumMetrics int
}
//RTGetInfo xx
func RTGetInfo(ctx *Context) {
id := ctx.Params(":id")
if len(id) > 0 {
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("get runtime data from id %s", id)
ctx.JSON(200, dev)
}
//get only one device info
} else {
devstats := make(map[string]*devStat)
for k, v := range devices {
sum := 0
for _, m := range v.Measurements {
sum += len(m.OidSnmpMap)
}
devstats[k] = &devStat{
Requests: v.Requests,
Gets: v.Gets,
Errors: v.Errors,
ReloadLoopsPending: v.ReloadLoopsPending,
DeviceActive: v.DeviceActive,
DeviceConnected: v.DeviceConnected,
NumMeasurements: len(v.Measurements),
NumMetrics: sum,
}
}
ctx.JSON(200, &devstats)
}
return
}
type RInfo struct {
InstanceID string
Version string
Commit string
Branch string
BuildStamp string
}
//RTGetVersion xx
func RTGetVersion(ctx *Context) {
info := &RInfo{
InstanceID: cfg.General.InstanceID,
Version: version,
Commit: commit,
Branch: branch,
BuildStamp: buildstamp,
}
ctx.JSON(200, &info)
}
/****************/
/*SNMP DEVICES
/****************/
// GetSNMPDevices Return snmpdevice list to frontend
func GetSNMPDevices(ctx *Context) {
devcfgarray, err := cfg.Database.GetSnmpDeviceCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Devices :%+s", err)
return
}
ctx.JSON(200, &devcfgarray)
log.Debugf("Getting DEVICEs %+v", &devcfgarray)
}
// AddSNMPDevice Insert new snmpdevice to de internal BBDD --pending--
func AddSNMPDevice(ctx *Context, dev SnmpDeviceCfg) {
log.Printf("ADDING DEVICE %+v", dev)
affected, err := cfg.Database.AddSnmpDeviceCfg(dev)
if err != nil {
log.Warningf("Error on insert for device %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateSNMPDevice --pending--
func UpdateSNMPDevice(ctx *Context, dev SnmpDeviceCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateSnmpDeviceCfg(id, dev)
if err != nil {
log.Warningf("Error on update for device %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteSNMPDevice --pending--
func DeleteSNMPDevice(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelSnmpDeviceCfg(id)
if err != nil {
log.Warningf("Error on delete1 for device %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetSNMPDeviceByID --pending--
func GetSNMPDeviceByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetSnmpDeviceCfgByID(id)
if err != nil {
log.Warningf("Error on get Device for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*SNMP METRICS
/****************/
// GetMetrics Return metrics list to frontend
func GetMetrics(ctx *Context) {
cfgarray, err := cfg.Database.GetSnmpMetricCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Metrics :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Metrics %+v", &cfgarray)
}
// AddMetric Insert new metric to de internal BBDD --pending--
func AddMetric(ctx *Context, dev SnmpMetricCfg) {
log.Printf("ADDING Metric %+v", dev)
affected, err := cfg.Database.AddSnmpMetricCfg(dev)
if err != nil {
log.Warningf("Error on insert Metric %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMetric --pending--
func UpdateMetric(ctx *Context, dev SnmpMetricCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateSnmpMetricCfg(id, dev)
if err != nil {
log.Warningf("Error on update Metric %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMetric --pending--
func DeleteMetric(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelSnmpMetricCfg(id)
if err != nil {
log.Warningf("Error on delete Metric %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMetricByID --pending--
func GetMetricByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetSnmpMetricCfgByID(id)
if err != nil {
log.Warningf("Error on get Metric for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*INFLUX MEASUREMENTS
/****************/
// GetMeas Return measurements list to frontend
func GetMeas(ctx *Context) {
cfgarray, err := cfg.Database.GetInfluxMeasurementCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Influx Measurements :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Measurements %+v", &cfgarray)
}
// AddMeas Insert new measurement to de internal BBDD --pending--
func AddMeas(ctx *Context, dev InfluxMeasurementCfg) {
log.Printf("ADDING Measurement %+v", dev)
affected, err := cfg.Database.AddInfluxMeasurementCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurement %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeas --pending--
func UpdateMeas(ctx *Context, dev InfluxMeasurementCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateInfluxMeasurementCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurement %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeas --pending--
func DeleteMeas(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelInfluxMeasurementCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasByID --pending--
func GetMeasByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetInfluxMeasurementCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*MEASUREMENT GROUPS
/****************/
// GetMeasGroup Return measurements groups list to frontend
func GetMeasGroup(ctx *Context) {
cfgarray, err := cfg.Database.GetMGroupsCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Measurement Group :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Meas Group %+v", &cfgarray)
}
// AddMeasGroup Insert new measurement groups to de internal BBDD --pending--
func AddMeasGroup(ctx *Context, dev MGroupsCfg) {
log.Printf("ADDING Measurement Group %+v", dev)
affected, err := cfg.Database.AddMGroupsCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurement Group %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeasGroup --pending--
func UpdateMeasGroup(ctx *Context, dev MGroupsCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateMGroupsCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurement Group %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeasGroup --pending--
func DeleteMeasGroup(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelMGroupsCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement Group %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasGroupByID --pending--
func GetMeasGroupByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetMGroupsCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement Group for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/********************/
/*MEASUREMENT FILTERS
/********************/
// GetMeasFilter Return measurements groups list to frontend
func GetMeasFilter(ctx *Context) {
cfgarray, err := cfg.Database.GetMeasFilterCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Measurement Filter :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Measurement Filter %+v", &cfgarray)
}
// AddMeasFilter Insert new measurement groups to de internal BBDD --pending--
func AddMeasFilter(ctx *Context, dev MeasFilterCfg) {
log.Printf("ADDING measurement Filter %+v", dev)
affected, err := cfg.Database.AddMeasFilterCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurment Filter %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeasFilter --pending--
func UpdateMeasFilter(ctx *Context, dev MeasFilterCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateMeasFilterCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurment Filter %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeasFilter --pending--
func DeleteMeasFilter(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelMeasFilterCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement Filter %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasFilterByID --pending--
func GetMeasFilterByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetMeasFilterCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement Filter for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/* INFLUX SERVERS
/****************/
// GetInfluxServer Return Server Array
func GetInfluxServer(ctx *Context) {
cfgarray, err := cfg.Database.GetInfluxCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Influx db :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting DEVICEs %+v", &cfgarray)
}
// AddInfluxServer Insert new measurement groups to de internal BBDD --pending--
func AddInfluxServer(ctx *Context, dev InfluxCfg) {
log.Printf("ADDING Influx Backend %+v", dev)
affected, err := cfg.Database.AddInfluxCfg(dev)
if err != nil {
log.Warningf("Error on insert new Backend %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateInfluxServer --pending--
func UpdateInfluxServer(ctx *Context, dev InfluxCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateInfluxCfg(id, dev)
if err != nil {
log.Warningf("Error on update Influx db %s , affected : %+v , error: %s", dev.ID, affected, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteInfluxServer --pending--
func DeleteInfluxServer(ctx *Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelInfluxCfg(id)
if err != nil {
log.Warningf("Error on delete influx db %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetInfluxServerByID --pending--
func GetInfluxServerByID(ctx *Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetInfluxCfgByID(id)
if err != nil {
log.Warningf("Error on get Influx db data for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
//GetInfluxAffectOnDel --pending--
func GetInfluxAffectOnDel(ctx *Context) {
id := ctx.Params(":id")
obarray, err := cfg.Database.GetInfluxCfgAffectOnDel(id)
if err != nil {
log.Warningf("Error on get object array for influx device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &obarray)
}
}
/****************/
/*LOGIN
/****************/
func myLoginHandler(ctx *Context, user UserLogin) {
fmt.Printf("USER LOGIN: USER: +%#v (Config: %#v)", user, cfg.HTTP)
if user.UserName == cfg.HTTP.AdminUser && user.Password == cfg.HTTP.AdminPassword {
ctx.SignedInUser = user.UserName
ctx.IsSignedIn = true
ctx.Session.Set(SESS_KEY_USERID, user.UserName)
fmt.Println("OK")
ctx.JSON(200, "OK")
} else {
fmt.Println("ERROR")
ctx.JSON(404, "ERROR")
}
}
func myLogoutHandler(ctx *Context) {
log.Printf("USER LOGOUT: USER: +%#v ", ctx.SignedInUser)
ctx.Session.Destory(ctx)
//ctx.Redirect("/login")
}
|
package bitstream
import (
"bytes"
"encoding/binary"
"io"
"math"
)
const (
bufferSize = (1024 * 1024) + sled
sled = 4
kMaxVarintBytes = 10
kMaxVarint32Bytes = 5
)
type BitReader interface {
LazyGlobalPosition() int
ActualGlobalPosition() int
ReadBit() bool
ReadByte() byte
ReadBytes(int) []byte
ReadString() string
ReadCString(int) string
ReadSignedInt(uint) int
ReadInt(uint) uint
ReadVarInt32() uint32
ReadSignedVarInt32() int32
ReadFloat() float32
ReadUBitInt() uint
BeginChunk(int)
EndChunk()
ChunkFinished() bool
}
// A simple int stack
type stack []int
func (s stack) Push(v int) stack {
return append(s, v)
}
// Pop returns the stack without the last added item as well as said item (seperately)
// Attention: panics when the stack is empty
func (s stack) Pop() (stack, int) {
// FIXME: CBA to handle empty stacks rn
l := len(s)
return s[:l-1], s[l-1]
}
func (s stack) Top() int {
return s[len(s)-1]
}
type bitReader struct {
underlying io.Reader
buffer []byte
offset int
bitsInBuffer int
lazyGlobalPosition int
chunkTargets stack
}
func (r *bitReader) LazyGlobalPosition() int {
return r.lazyGlobalPosition
}
func (r *bitReader) ActualGlobalPosition() int {
return r.lazyGlobalPosition + r.offset
}
func (r *bitReader) ReadBits(bits uint) []byte {
b := make([]byte, (bits+7)/8)
r.underlying.Read(b)
r.advance(bits)
return b
}
func (r *bitReader) ReadBit() bool {
res := (r.buffer[r.offset/8] & (1 << uint(r.offset&7))) != 0
r.advance(1)
return res
}
func (r *bitReader) advance(bits uint) {
r.offset += int(bits)
for r.offset >= r.bitsInBuffer {
// Refill if we reached the sled
r.refillBuffer()
}
}
func (r *bitReader) refillBuffer() {
r.offset -= r.bitsInBuffer // sled bits used remain in offset
r.lazyGlobalPosition += r.bitsInBuffer
// Copy sled to beginning
copy(r.buffer[0:sled], r.buffer[r.bitsInBuffer/8:r.bitsInBuffer/8+sled])
newBytes, _ := r.underlying.Read(r.buffer[sled:])
r.bitsInBuffer = newBytes * 8
if newBytes < bufferSize-sled {
// we're done here, consume sled
r.bitsInBuffer += sled * 8
}
}
func (r *bitReader) ReadByte() byte {
return r.ReadBitsToByte(8)
}
func (r *bitReader) ReadBitsToByte(bits uint) byte {
if bits > 8 {
panic("Can't read more than 8 bits into byte")
}
return byte(r.ReadInt(bits))
}
func (r *bitReader) ReadInt(bits uint) uint {
res := r.peekInt(bits)
r.advance(bits)
return res
}
func (r *bitReader) peekInt(bits uint) uint {
if bits > 32 {
panic("Can't read more than 32 bits for uint")
}
val := binary.LittleEndian.Uint64(r.buffer[r.offset/8&^3:])
return uint(val << (64 - (uint(r.offset) % 32) - bits) >> (64 - bits))
}
func (r *bitReader) ReadBytes(bytes int) []byte {
res := make([]byte, 0, bytes)
for i := 0; i < bytes; i++ {
b := r.ReadByte()
res = append(res, b)
}
return res
}
func (r *bitReader) ReadCString(chars int) string {
b := r.ReadBytes(chars)
end := bytes.IndexByte(b, 0)
if end < 0 {
end = chars
}
return string(b[:end])
}
// ReadString reads a varaible length string
func (r *bitReader) ReadString() string {
// Valve also uses this sooo
return r.readStringLimited(4096, false)
}
func (r *bitReader) readStringLimited(limit int, endOnNewLine bool) string {
result := make([]byte, 0, 512)
for i := 0; i < limit; i++ {
b := r.ReadByte()
if b == 0 || (endOnNewLine && b == '\n') {
break
}
result = append(result, b)
}
return string(result)
}
// ReadSignedInt is like ReadInt but returns signed int
func (r *bitReader) ReadSignedInt(bits uint) int {
if bits > 32 {
panic("Can't read more than 32 bits for int")
}
val := binary.LittleEndian.Uint64(r.buffer[r.offset/8&^3:])
res := int(int64(val<<(64-(uint(r.offset)%32)-bits)) >> (64 - bits))
r.advance(bits)
// Cast to int64 before right shift
return res
}
func (r *bitReader) ReadFloat() float32 {
bits := r.ReadInt(32)
return math.Float32frombits(uint32(bits))
}
func (r *bitReader) ReadVarInt32() uint32 {
var res uint32 = 0
var b uint32 = 0x80
// do while hack
for count := uint(0); b&0x80 != 0; count++ {
if count == kMaxVarint32Bytes {
return res
}
b = uint32(r.ReadByte())
res |= (b & 0x7f) << (7 * count)
}
return res
}
func (r *bitReader) ReadSignedVarInt32() int32 {
res := r.ReadVarInt32()
return int32((res >> 1) ^ -(res & 1))
}
func (r *bitReader) ReadUBitInt() uint {
res := r.ReadInt(6)
switch res & (16 | 32) {
case 16:
res = (res & 15) | (r.ReadInt(4) << 4)
case 32:
res = (res & 15) | (r.ReadInt(8) << 4)
case 48:
res = (res & 15) | (r.ReadInt(32-4) << 4)
}
return res
}
func (r *bitReader) BeginChunk(length int) {
r.chunkTargets = r.chunkTargets.Push(r.ActualGlobalPosition() + length)
}
func (r *bitReader) EndChunk() {
var target int
r.chunkTargets, target = r.chunkTargets.Pop()
delta := target - r.ActualGlobalPosition()
if delta < 0 {
panic("Someone read beyond a chunk boundary, what a dick")
} else if delta > 0 {
// We must seek for peace (or the end of the boundary, for a start)
seeker, ok := r.underlying.(io.Seeker)
if ok {
bufferBits := r.bitsInBuffer - r.offset
if delta > bufferBits+sled*8 {
unbufferedSkipBits := delta - bufferBits
seeker.Seek(int64((unbufferedSkipBits>>3)-sled), io.SeekCurrent)
newBytes, _ := r.underlying.Read(r.buffer)
r.bitsInBuffer = 8 * (newBytes - sled)
if newBytes <= sled {
// FIXME: Maybe do this even if newBytes is <= bufferSize - sled like in refillBuffer
// Consume sled
// Shouldn't really happen unless we reached the end of the stream
// In that case bitsInBuffer should be 0 after this line (newBytes=0 - sled + sled)
r.bitsInBuffer += sled * 8
}
r.offset = unbufferedSkipBits & 7
r.lazyGlobalPosition = target - r.offset
} else {
// no seek necessary
r.advance(uint(delta))
}
} else {
// Canny seek, do it manually
r.advance(uint(delta))
}
}
}
func (r *bitReader) ChunkFinished() bool {
return r.chunkTargets.Top() == r.ActualGlobalPosition()
}
func NewBitReader(underlying io.Reader) BitReader {
br := &bitReader{underlying: underlying, buffer: make([]byte, bufferSize)}
br.refillBuffer()
br.offset = sled * 8
return BitReader(br)
}
Fixed mad memory allocations due to too large buffer
Former-commit-id: 9e822b7a534f685b39f5b2344872a7802b2335a4
package bitstream
import (
"bytes"
"encoding/binary"
"io"
"math"
)
const (
bufferSize = (1024 * 2) + sled
sled = 4
kMaxVarintBytes = 10
kMaxVarint32Bytes = 5
)
type BitReader interface {
LazyGlobalPosition() int
ActualGlobalPosition() int
ReadBit() bool
ReadByte() byte
ReadBytes(int) []byte
ReadString() string
ReadCString(int) string
ReadSignedInt(uint) int
ReadInt(uint) uint
ReadVarInt32() uint32
ReadSignedVarInt32() int32
ReadFloat() float32
ReadUBitInt() uint
BeginChunk(int)
EndChunk()
ChunkFinished() bool
}
// A simple int stack
type stack []int
func (s stack) Push(v int) stack {
return append(s, v)
}
// Pop returns the stack without the last added item as well as said item (seperately)
// Attention: panics when the stack is empty
func (s stack) Pop() (stack, int) {
// FIXME: CBA to handle empty stacks rn
l := len(s)
return s[:l-1], s[l-1]
}
func (s stack) Top() int {
return s[len(s)-1]
}
type bitReader struct {
underlying io.Reader
buffer []byte
offset int
bitsInBuffer int
lazyGlobalPosition int
chunkTargets stack
}
func (r *bitReader) LazyGlobalPosition() int {
return r.lazyGlobalPosition
}
func (r *bitReader) ActualGlobalPosition() int {
return r.lazyGlobalPosition + r.offset
}
func (r *bitReader) ReadBits(bits uint) []byte {
b := make([]byte, (bits+7)/8)
r.underlying.Read(b)
r.advance(bits)
return b
}
func (r *bitReader) ReadBit() bool {
res := (r.buffer[r.offset/8] & (1 << uint(r.offset&7))) != 0
r.advance(1)
return res
}
func (r *bitReader) advance(bits uint) {
r.offset += int(bits)
for r.offset >= r.bitsInBuffer {
// Refill if we reached the sled
r.refillBuffer()
}
}
func (r *bitReader) refillBuffer() {
r.offset -= r.bitsInBuffer // sled bits used remain in offset
r.lazyGlobalPosition += r.bitsInBuffer
// Copy sled to beginning
copy(r.buffer[0:sled], r.buffer[r.bitsInBuffer/8:r.bitsInBuffer/8+sled])
newBytes, _ := r.underlying.Read(r.buffer[sled:])
r.bitsInBuffer = newBytes * 8
if newBytes < bufferSize-sled {
// we're done here, consume sled
r.bitsInBuffer += sled * 8
}
}
func (r *bitReader) ReadByte() byte {
return r.ReadBitsToByte(8)
}
func (r *bitReader) ReadBitsToByte(bits uint) byte {
if bits > 8 {
panic("Can't read more than 8 bits into byte")
}
return byte(r.ReadInt(bits))
}
func (r *bitReader) ReadInt(bits uint) uint {
res := r.peekInt(bits)
r.advance(bits)
return res
}
func (r *bitReader) peekInt(bits uint) uint {
if bits > 32 {
panic("Can't read more than 32 bits for uint")
}
val := binary.LittleEndian.Uint64(r.buffer[r.offset/8&^3:])
return uint(val << (64 - (uint(r.offset) % 32) - bits) >> (64 - bits))
}
func (r *bitReader) ReadBytes(bytes int) []byte {
res := make([]byte, 0, bytes)
for i := 0; i < bytes; i++ {
b := r.ReadByte()
res = append(res, b)
}
return res
}
func (r *bitReader) ReadCString(chars int) string {
b := r.ReadBytes(chars)
end := bytes.IndexByte(b, 0)
if end < 0 {
end = chars
}
return string(b[:end])
}
// ReadString reads a varaible length string
func (r *bitReader) ReadString() string {
// Valve also uses this sooo
return r.readStringLimited(4096, false)
}
func (r *bitReader) readStringLimited(limit int, endOnNewLine bool) string {
result := make([]byte, 0, 512)
for i := 0; i < limit; i++ {
b := r.ReadByte()
if b == 0 || (endOnNewLine && b == '\n') {
break
}
result = append(result, b)
}
return string(result)
}
// ReadSignedInt is like ReadInt but returns signed int
func (r *bitReader) ReadSignedInt(bits uint) int {
if bits > 32 {
panic("Can't read more than 32 bits for int")
}
val := binary.LittleEndian.Uint64(r.buffer[r.offset/8&^3:])
res := int(int64(val<<(64-(uint(r.offset)%32)-bits)) >> (64 - bits))
r.advance(bits)
// Cast to int64 before right shift
return res
}
func (r *bitReader) ReadFloat() float32 {
bits := r.ReadInt(32)
return math.Float32frombits(uint32(bits))
}
func (r *bitReader) ReadVarInt32() uint32 {
var res uint32 = 0
var b uint32 = 0x80
// do while hack
for count := uint(0); b&0x80 != 0; count++ {
if count == kMaxVarint32Bytes {
return res
}
b = uint32(r.ReadByte())
res |= (b & 0x7f) << (7 * count)
}
return res
}
func (r *bitReader) ReadSignedVarInt32() int32 {
res := r.ReadVarInt32()
return int32((res >> 1) ^ -(res & 1))
}
func (r *bitReader) ReadUBitInt() uint {
res := r.ReadInt(6)
switch res & (16 | 32) {
case 16:
res = (res & 15) | (r.ReadInt(4) << 4)
case 32:
res = (res & 15) | (r.ReadInt(8) << 4)
case 48:
res = (res & 15) | (r.ReadInt(32-4) << 4)
}
return res
}
func (r *bitReader) BeginChunk(length int) {
r.chunkTargets = r.chunkTargets.Push(r.ActualGlobalPosition() + length)
}
func (r *bitReader) EndChunk() {
var target int
r.chunkTargets, target = r.chunkTargets.Pop()
delta := target - r.ActualGlobalPosition()
if delta < 0 {
panic("Someone read beyond a chunk boundary, what a dick")
} else if delta > 0 {
// We must seek for peace (or the end of the boundary, for a start)
seeker, ok := r.underlying.(io.Seeker)
if ok {
bufferBits := r.bitsInBuffer - r.offset
if delta > bufferBits+sled*8 {
unbufferedSkipBits := delta - bufferBits
seeker.Seek(int64((unbufferedSkipBits>>3)-sled), io.SeekCurrent)
newBytes, _ := r.underlying.Read(r.buffer)
r.bitsInBuffer = 8 * (newBytes - sled)
if newBytes <= sled {
// FIXME: Maybe do this even if newBytes is <= bufferSize - sled like in refillBuffer
// Consume sled
// Shouldn't really happen unless we reached the end of the stream
// In that case bitsInBuffer should be 0 after this line (newBytes=0 - sled + sled)
r.bitsInBuffer += sled * 8
}
r.offset = unbufferedSkipBits & 7
r.lazyGlobalPosition = target - r.offset
} else {
// no seek necessary
r.advance(uint(delta))
}
} else {
// Canny seek, do it manually
r.advance(uint(delta))
}
}
}
func (r *bitReader) ChunkFinished() bool {
return r.chunkTargets.Top() == r.ActualGlobalPosition()
}
func NewBitReader(underlying io.Reader) BitReader {
br := &bitReader{underlying: underlying, buffer: make([]byte, bufferSize)}
br.refillBuffer()
br.offset = sled * 8
return BitReader(br)
}
|
package sparta
import (
"encoding/json"
"fmt"
"strings"
"github.com/mweagle/cloudformationresources"
gocf "github.com/crewjam/go-cloudformation"
"github.com/Sirupsen/logrus"
)
const salt = "213EA743-A98F-499D-8FEF-B87015FE13E7"
// PushSourceConfigurationActions map stores common IAM Policy Actions for Lambda
// push-source configuration management.
// The configuration is handled by CustomResources inserted into the generated
// CloudFormation template.
var PushSourceConfigurationActions = map[string][]string{}
func init() {
PushSourceConfigurationActions[cloudformationresources.SNSLambdaEventSource] = []string{"sns:ConfirmSubscription",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:Subscribe",
"sns:Unsubscribe"}
PushSourceConfigurationActions[cloudformationresources.S3LambdaEventSource] = []string{"s3:GetBucketLocation",
"s3:GetBucketNotification",
"s3:PutBucketNotification",
"s3:GetBucketNotificationConfiguration",
"s3:PutBucketNotificationConfiguration"}
PushSourceConfigurationActions[cloudformationresources.SESLambdaEventSource] = []string{"ses:CreateReceiptRuleSet",
"ses:CreateReceiptRule",
"ses:DeleteReceiptRule",
"ses:DeleteReceiptRuleSet",
"ses:DescribeReceiptRuleSet"}
PushSourceConfigurationActions[cloudformationresources.CloudWatchLogsLambdaEventSource] = []string{"logs:DescribeSubscriptionFilters",
"logs:DeleteSubscriptionFilter",
"logs:PutSubscriptionFilter"}
}
func nodeJSHandlerName(jsBaseFilename string) string {
return fmt.Sprintf("index.%sConfiguration", jsBaseFilename)
}
func awsPrincipalToService(awsPrincipalName string) string {
return strings.ToUpper(strings.SplitN(awsPrincipalName, ".", 2)[0])
}
func ensureCustomResourceHandler(serviceName string,
customResourceTypeName string,
sourceArn *gocf.StringExpr,
dependsOn []string,
template *gocf.Template,
S3Bucket string,
S3Key string,
logger *logrus.Logger) (string, error) {
// AWS service basename
awsServiceName := awsPrincipalToService(customResourceTypeName)
// Use a stable resource CloudFormation resource name to represent
// the single CustomResource that can configure the different
// PushSource's for the given principal.
keyName, err := json.Marshal(ArbitraryJSONObject{
"Principal": customResourceTypeName,
"ServiceName": awsServiceName,
})
if err != nil {
logger.Error("Failed to create configurator resource name: ", err.Error())
return "", err
}
subscriberHandlerName := CloudFormationResourceName(fmt.Sprintf("%sCustomResource", awsServiceName),
string(keyName))
//////////////////////////////////////////////////////////////////////////////
// IAM Role definition
iamResourceName, err := ensureIAMRoleForCustomResource(customResourceTypeName, sourceArn, template, logger)
if nil != err {
return "", err
}
iamRoleRef := gocf.GetAtt(iamResourceName, "Arn")
_, exists := template.Resources[subscriberHandlerName]
if !exists {
logger.WithFields(logrus.Fields{
"Service": customResourceTypeName,
}).Debug("Including Lambda CustomResource for AWS Service")
configuratorDescription := customResourceDescription(serviceName, customResourceTypeName)
//////////////////////////////////////////////////////////////////////////////
// Custom Resource Lambda Handler
// The export name MUST correspond to the createForwarder entry that is dynamically
// written into the index.js file during compile in createNewSpartaCustomResourceEntry
handlerName := lambdaExportNameForCustomResourceType(customResourceTypeName)
logger.WithFields(logrus.Fields{
"CustomResourceType": customResourceTypeName,
"NodeJSExport": handlerName,
}).Debug("Sparta CloudFormation custom resource handler info")
customResourceHandlerDef := gocf.LambdaFunction{
Code: &gocf.LambdaFunctionCode{
S3Bucket: gocf.String(S3Bucket),
S3Key: gocf.String(S3Key),
},
Description: gocf.String(configuratorDescription),
Handler: gocf.String(handlerName),
Role: iamRoleRef,
Runtime: gocf.String(NodeJSVersion),
Timeout: gocf.Integer(30),
}
cfResource := template.AddResource(subscriberHandlerName, customResourceHandlerDef)
if nil != dependsOn && (len(dependsOn) > 0) {
cfResource.DependsOn = append(cfResource.DependsOn, dependsOn...)
}
}
return subscriberHandlerName, nil
}
// ensureIAMRoleForCustomResource ensures that the single IAM::Role for a single
// AWS principal (eg, s3.*.*) exists, and includes statements for the given
// sourceArn. Sparta uses a single IAM::Role for the CustomResource configuration
// lambda, which is the union of all Arns in the application.
func ensureIAMRoleForCustomResource(awsPrincipalName string,
sourceArn *gocf.StringExpr,
template *gocf.Template,
logger *logrus.Logger) (string, error) {
principalActions, exists := PushSourceConfigurationActions[awsPrincipalName]
if !exists {
return "", fmt.Errorf("Unsupported principal for IAM role creation: %s", awsPrincipalName)
}
// What's the stable IAMRoleName?
resourceBaseName := fmt.Sprintf("CustomResource%sIAMRole", awsPrincipalToService(awsPrincipalName))
stableRoleName := CloudFormationResourceName(resourceBaseName, awsPrincipalName)
// Ensure it exists, then check to see if this Source ARN is already specified...
// Checking equality with Stringable?
// Create a new Role
var existingIAMRole *gocf.IAMRole
existingResource, exists := template.Resources[stableRoleName]
logger.WithFields(logrus.Fields{
"PrincipalActions": principalActions,
"SourceArn": sourceArn,
}).Debug("Ensuring IAM Role results")
if !exists {
// Insert the IAM role here. We'll walk the policies data in the next section
// to make sure that the sourceARN we have is in the list
statements := CommonIAMStatements["core"]
existingIAMRole = &gocf.IAMRole{
AssumeRolePolicyDocument: AssumePolicyDocument,
Policies: &gocf.IAMPoliciesList{
gocf.IAMPolicies{
PolicyDocument: ArbitraryJSONObject{
"Version": "2012-10-17",
"Statement": statements,
},
PolicyName: gocf.String(fmt.Sprintf("%sPolicy", stableRoleName)),
},
},
}
template.AddResource(stableRoleName, existingIAMRole)
// Create a new IAM Role resource
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
}).Debug("Inserting IAM Role")
} else {
existingIAMRole = existingResource.Properties.(*gocf.IAMRole)
}
// Walk the existing statements
if nil != existingIAMRole.Policies {
for _, eachPolicy := range *existingIAMRole.Policies {
policyDoc := eachPolicy.PolicyDocument.(ArbitraryJSONObject)
statements := policyDoc["Statement"]
for _, eachStatement := range statements.([]iamPolicyStatement) {
if sourceArn.String() == eachStatement.Resource.String() {
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"SourceArn": sourceArn.String(),
}).Debug("SourceArn already exists for IAM Policy")
return stableRoleName, nil
}
}
}
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"Action": principalActions,
"Resource": sourceArn,
}).Debug("Inserting Actions for configuration ARN")
// Add this statement to the first policy, iff the actions are non-empty
if len(principalActions) > 0 {
rootPolicy := (*existingIAMRole.Policies)[0]
rootPolicyDoc := rootPolicy.PolicyDocument.(ArbitraryJSONObject)
rootPolicyStatements := rootPolicyDoc["Statement"].([]iamPolicyStatement)
rootPolicyDoc["Statement"] = append(rootPolicyStatements, iamPolicyStatement{
Effect: "Allow",
Action: principalActions,
Resource: sourceArn,
})
}
return stableRoleName, nil
}
return "", fmt.Errorf("Unable to find Policies entry for IAM role: %s", stableRoleName)
}
Update init to comply with goreportcard vet results
package sparta
import (
"encoding/json"
"fmt"
"strings"
"github.com/mweagle/cloudformationresources"
gocf "github.com/crewjam/go-cloudformation"
"github.com/Sirupsen/logrus"
)
const salt = "213EA743-A98F-499D-8FEF-B87015FE13E7"
// PushSourceConfigurationActions map stores common IAM Policy Actions for Lambda
// push-source configuration management.
// The configuration is handled by CustomResources inserted into the generated
// CloudFormation template.
var PushSourceConfigurationActions = map[string][]string{}
func init() {
PushSourceConfigurationActions[cloudformationresources.SNSLambdaEventSource] = []string{"sns:ConfirmSubscription",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:Subscribe",
"sns:Unsubscribe"}
PushSourceConfigurationActions[cloudformationresources.S3LambdaEventSource] = []string{"s3:GetBucketLocation",
"s3:GetBucketNotification",
"s3:PutBucketNotification",
"s3:GetBucketNotificationConfiguration",
"s3:PutBucketNotificationConfiguration"}
PushSourceConfigurationActions[cloudformationresources.SESLambdaEventSource] = []string{"ses:CreateReceiptRuleSet",
"ses:CreateReceiptRule",
"ses:DeleteReceiptRule",
"ses:DeleteReceiptRuleSet",
"ses:DescribeReceiptRuleSet"}
PushSourceConfigurationActions[cloudformationresources.CloudWatchLogsLambdaEventSource] = []string{"logs:DescribeSubscriptionFilters",
"logs:DeleteSubscriptionFilter",
"logs:PutSubscriptionFilter"}
}
func nodeJSHandlerName(jsBaseFilename string) string {
return fmt.Sprintf("index.%sConfiguration", jsBaseFilename)
}
func awsPrincipalToService(awsPrincipalName string) string {
return strings.ToUpper(strings.SplitN(awsPrincipalName, ".", 2)[0])
}
func ensureCustomResourceHandler(serviceName string,
customResourceTypeName string,
sourceArn *gocf.StringExpr,
dependsOn []string,
template *gocf.Template,
S3Bucket string,
S3Key string,
logger *logrus.Logger) (string, error) {
// AWS service basename
awsServiceName := awsPrincipalToService(customResourceTypeName)
// Use a stable resource CloudFormation resource name to represent
// the single CustomResource that can configure the different
// PushSource's for the given principal.
keyName, err := json.Marshal(ArbitraryJSONObject{
"Principal": customResourceTypeName,
"ServiceName": awsServiceName,
})
if err != nil {
logger.Error("Failed to create configurator resource name: ", err.Error())
return "", err
}
subscriberHandlerName := CloudFormationResourceName(fmt.Sprintf("%sCustomResource", awsServiceName),
string(keyName))
//////////////////////////////////////////////////////////////////////////////
// IAM Role definition
iamResourceName, err := ensureIAMRoleForCustomResource(customResourceTypeName, sourceArn, template, logger)
if nil != err {
return "", err
}
iamRoleRef := gocf.GetAtt(iamResourceName, "Arn")
_, exists := template.Resources[subscriberHandlerName]
if !exists {
logger.WithFields(logrus.Fields{
"Service": customResourceTypeName,
}).Debug("Including Lambda CustomResource for AWS Service")
configuratorDescription := customResourceDescription(serviceName, customResourceTypeName)
//////////////////////////////////////////////////////////////////////////////
// Custom Resource Lambda Handler
// The export name MUST correspond to the createForwarder entry that is dynamically
// written into the index.js file during compile in createNewSpartaCustomResourceEntry
handlerName := lambdaExportNameForCustomResourceType(customResourceTypeName)
logger.WithFields(logrus.Fields{
"CustomResourceType": customResourceTypeName,
"NodeJSExport": handlerName,
}).Debug("Sparta CloudFormation custom resource handler info")
customResourceHandlerDef := gocf.LambdaFunction{
Code: &gocf.LambdaFunctionCode{
S3Bucket: gocf.String(S3Bucket),
S3Key: gocf.String(S3Key),
},
Description: gocf.String(configuratorDescription),
Handler: gocf.String(handlerName),
Role: iamRoleRef,
Runtime: gocf.String(NodeJSVersion),
Timeout: gocf.Integer(30),
}
cfResource := template.AddResource(subscriberHandlerName, customResourceHandlerDef)
if nil != dependsOn && (len(dependsOn) > 0) {
cfResource.DependsOn = append(cfResource.DependsOn, dependsOn...)
}
}
return subscriberHandlerName, nil
}
// ensureIAMRoleForCustomResource ensures that the single IAM::Role for a single
// AWS principal (eg, s3.*.*) exists, and includes statements for the given
// sourceArn. Sparta uses a single IAM::Role for the CustomResource configuration
// lambda, which is the union of all Arns in the application.
func ensureIAMRoleForCustomResource(awsPrincipalName string,
sourceArn *gocf.StringExpr,
template *gocf.Template,
logger *logrus.Logger) (string, error) {
principalActions, exists := PushSourceConfigurationActions[awsPrincipalName]
if !exists {
return "", fmt.Errorf("Unsupported principal for IAM role creation: %s", awsPrincipalName)
}
// What's the stable IAMRoleName?
resourceBaseName := fmt.Sprintf("CustomResource%sIAMRole", awsPrincipalToService(awsPrincipalName))
stableRoleName := CloudFormationResourceName(resourceBaseName, awsPrincipalName)
// Ensure it exists, then check to see if this Source ARN is already specified...
// Checking equality with Stringable?
// Create a new Role
var existingIAMRole *gocf.IAMRole
existingResource, exists := template.Resources[stableRoleName]
logger.WithFields(logrus.Fields{
"PrincipalActions": principalActions,
"SourceArn": sourceArn,
}).Debug("Ensuring IAM Role results")
if !exists {
// Insert the IAM role here. We'll walk the policies data in the next section
// to make sure that the sourceARN we have is in the list
statements := CommonIAMStatements["core"]
iamPolicyList := gocf.IAMPoliciesList{}
iamPolicyList = append(iamPolicyList,
gocf.IAMPolicies{
PolicyDocument: ArbitraryJSONObject{
"Version": "2012-10-17",
"Statement": statements,
},
PolicyName: gocf.String(fmt.Sprintf("%sPolicy", stableRoleName)),
},
)
existingIAMRole = &gocf.IAMRole{
AssumeRolePolicyDocument: AssumePolicyDocument,
Policies: &iamPolicyList,
}
template.AddResource(stableRoleName, existingIAMRole)
// Create a new IAM Role resource
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
}).Debug("Inserting IAM Role")
} else {
existingIAMRole = existingResource.Properties.(*gocf.IAMRole)
}
// Walk the existing statements
if nil != existingIAMRole.Policies {
for _, eachPolicy := range *existingIAMRole.Policies {
policyDoc := eachPolicy.PolicyDocument.(ArbitraryJSONObject)
statements := policyDoc["Statement"]
for _, eachStatement := range statements.([]iamPolicyStatement) {
if sourceArn.String() == eachStatement.Resource.String() {
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"SourceArn": sourceArn.String(),
}).Debug("SourceArn already exists for IAM Policy")
return stableRoleName, nil
}
}
}
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"Action": principalActions,
"Resource": sourceArn,
}).Debug("Inserting Actions for configuration ARN")
// Add this statement to the first policy, iff the actions are non-empty
if len(principalActions) > 0 {
rootPolicy := (*existingIAMRole.Policies)[0]
rootPolicyDoc := rootPolicy.PolicyDocument.(ArbitraryJSONObject)
rootPolicyStatements := rootPolicyDoc["Statement"].([]iamPolicyStatement)
rootPolicyDoc["Statement"] = append(rootPolicyStatements, iamPolicyStatement{
Effect: "Allow",
Action: principalActions,
Resource: sourceArn,
})
}
return stableRoleName, nil
}
return "", fmt.Errorf("Unable to find Policies entry for IAM role: %s", stableRoleName)
}
|
package momos
import (
"bytes"
"errors"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/PuerkitoBio/goquery"
clientCache "github.com/gregjones/httpcache"
)
var (
ErrRequest = errors.New("Request error")
ErrTimeout = errors.New("Timeout error")
ErrInvalidStatusCode = errors.New("Invalid status code")
ErrInvalidContentType = errors.New("Invalid content type")
)
var Client = &http.Client{Transport: clientCache.NewMemoryCacheTransport()}
type proxyTransport struct {
http.RoundTripper
}
func (t *proxyTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
timeStart := time.Now()
// get the response of an given request
resp, err = t.RoundTripper.RoundTrip(req)
if err != nil {
errorf("could not create RoundTripper from %q", req.URL)
return nil, err
}
// Only html files are scanned
contentType := resp.Header.Get("Content-Type")
if !strings.HasPrefix(contentType, "text/html") {
return resp, nil
}
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
errorf("illegal response body from %q", req.URL)
return nil, err
}
ssiElements := []SSIElement{}
doc.Find("ssi").Each(func(i int, element *goquery.Selection) {
se := SSIElement{Element: element}
se.Attributes = SSIAttributes{
"timeout": element.AttrOr("timeout", "2000"),
"src": element.AttrOr("src", ""),
"name": element.AttrOr("name", ""),
}
se.GetErrorTag()
se.GetTimeoutTag()
ssiElements = append(ssiElements, se)
})
ch := make(chan []byte)
chErr := make(chan error)
timeStartRequest := time.Now()
for _, element := range ssiElements {
timeout, _ := element.Timeout()
go makeRequest(element.Url(), ch, chErr, timeout)
}
for _, element := range ssiElements {
select {
case res := <-ch:
debugf("SSI [%v] - Request to %v took %v", element.Name(), element.Url(), time.Since(timeStartRequest))
element.SetupSuccess(res)
case err := <-chErr:
element.SetupFallback(err)
debugf("SSI [%v] - Request to %v error: %q", element.Name(), element.Url(), err)
}
}
htmlDoc, err := doc.Html()
if err != nil {
errorf("Could not get html from document %q", req.URL)
return nil, err
}
// assign new reader with content
content := []byte(htmlDoc)
body := ioutil.NopCloser(bytes.NewReader(content))
resp.Body = body
resp.ContentLength = int64(len(content)) // update content length
resp.Header.Set("Content-Length", strconv.Itoa(len(content)))
debugf("Process Complete Request %q took %q", req.URL, time.Since(timeStart))
return resp, nil
}
func makeRequest(url string, ch chan<- []byte, chErr chan<- error, timeoutMs int) {
timeout := time.Duration(time.Duration(timeoutMs) * time.Millisecond)
Client.Timeout = timeout
resp, err := Client.Get(url)
if err != nil {
chErr <- ErrRequest
} else if err, ok := err.(net.Error); ok && err.Timeout() {
chErr <- ErrTimeout
} else {
contentType := resp.Header.Get("Content-Type")
if !strings.HasPrefix(contentType, "text/html") {
chErr <- ErrInvalidContentType
} else if resp.StatusCode > 199 && resp.StatusCode < 300 {
body, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
ch <- body
}
}
}
better logging
package momos
import (
"bytes"
"errors"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/PuerkitoBio/goquery"
clientCache "github.com/gregjones/httpcache"
)
var (
ErrRequest = errors.New("Request error")
ErrTimeout = errors.New("Timeout error")
ErrInvalidStatusCode = errors.New("Invalid status code")
ErrInvalidContentType = errors.New("Invalid content type")
)
var Client = &http.Client{Transport: clientCache.NewMemoryCacheTransport()}
type proxyTransport struct {
http.RoundTripper
}
func (t *proxyTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
timeStart := time.Now()
// get the response of an given request
resp, err = t.RoundTripper.RoundTrip(req)
if err != nil {
errorf("could not create RoundTripper from %q", req.URL)
return nil, err
}
// Only html files are scanned
contentType := resp.Header.Get("Content-Type")
if !strings.HasPrefix(contentType, "text/html") {
return resp, nil
}
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
errorf("illegal response body from %q", req.URL)
return nil, err
}
ssiElements := []SSIElement{}
doc.Find("ssi").Each(func(i int, element *goquery.Selection) {
se := SSIElement{Element: element}
se.Attributes = SSIAttributes{
"timeout": element.AttrOr("timeout", "2000"),
"src": element.AttrOr("src", ""),
"name": element.AttrOr("name", ""),
}
se.GetErrorTag()
se.GetTimeoutTag()
ssiElements = append(ssiElements, se)
})
ch := make(chan []byte)
chErr := make(chan error)
timeStartRequest := time.Now()
for _, element := range ssiElements {
timeout, _ := element.Timeout()
go makeRequest(element.Url(), ch, chErr, timeout)
}
for _, element := range ssiElements {
select {
case res := <-ch:
debugf("⚛ SSI [%v] - Request to %v took %v", element.Name(), element.Url(), time.Since(timeStartRequest))
element.SetupSuccess(res)
case err := <-chErr:
element.SetupFallback(err)
debugf("⚛ SSI [%v] - Request to %v error: %q", element.Name(), element.Url(), err)
}
}
htmlDoc, err := doc.Html()
if err != nil {
errorf("Could not get html from document %q", req.URL)
return nil, err
}
// assign new reader with content
content := []byte(htmlDoc)
body := ioutil.NopCloser(bytes.NewReader(content))
resp.Body = body
resp.ContentLength = int64(len(content)) // update content length
resp.Header.Set("Content-Length", strconv.Itoa(len(content)))
debugf("✓ Process Complete Request %q took %q", req.URL, time.Since(timeStart))
return resp, nil
}
func makeRequest(url string, ch chan<- []byte, chErr chan<- error, timeoutMs int) {
timeout := time.Duration(time.Duration(timeoutMs) * time.Millisecond)
Client.Timeout = timeout
resp, err := Client.Get(url)
if err != nil {
chErr <- ErrRequest
} else if err, ok := err.(net.Error); ok && err.Timeout() {
chErr <- ErrTimeout
} else {
contentType := resp.Header.Get("Content-Type")
if !strings.HasPrefix(contentType, "text/html") {
chErr <- ErrInvalidContentType
} else if resp.StatusCode > 199 && resp.StatusCode < 300 {
body, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
ch <- body
}
}
}
|
package main
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/http/pprof"
"os"
"strings"
"time"
"github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
"github.com/grpc-ecosystem/go-grpc-middleware/tags"
"github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/mwitkow/go-conntrack"
"github.com/mwitkow/go-conntrack/connhelpers"
"github.com/mwitkow/go-flagz"
"github.com/mwitkow/go-httpwares/logging/logrus"
"github.com/mwitkow/go-httpwares/metrics"
"github.com/mwitkow/go-httpwares/metrics/prometheus"
"github.com/mwitkow/go-httpwares/tags"
"github.com/mwitkow/go-httpwares/tracing/debug"
"github.com/mwitkow/grpc-proxy/proxy"
pb_config "github.com/mwitkow/kedge/_protogen/kedge/config"
grpc_director "github.com/mwitkow/kedge/grpc/director"
http_director "github.com/mwitkow/kedge/http/director"
"github.com/mwitkow/kedge/lib/discovery"
"github.com/mwitkow/kedge/lib/http/ctxtags"
"github.com/mwitkow/kedge/lib/logstash"
"github.com/mwitkow/kedge/lib/sharedflags"
"github.com/pressly/chi"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"golang.org/x/net/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
var (
flagBindAddr = sharedflags.Set.String("server_bind_address", "0.0.0.0", "address to bind the server to")
flagGrpcTlsPort = sharedflags.Set.Int("server_grpc_tls_port", 8444, "TCP TLS port to listen on for secure gRPC calls. If 0, no gRPC-TLS will be open.")
flagHttpTlsPort = sharedflags.Set.Int("server_http_tls_port", 8443, "TCP port to listen on for HTTPS. If gRPC call will hit it will bounce to gRPC handler. If 0, no TLS will be open.")
flagHttpPort = sharedflags.Set.Int("server_http_port", 8080, "TCP port to listen on for HTTP1.1/REST calls for debug endpoints like metrics, flagz page or optional pprof (insecure, but private only IP are allowed). If 0, no insecure HTTP will be open.")
flagHttpMaxWriteTimeout = sharedflags.Set.Duration("server_http_max_write_timeout", 10*time.Second, "HTTP server config, max write duration.")
flagHttpMaxReadTimeout = sharedflags.Set.Duration("server_http_max_read_timeout", 10*time.Second, "HTTP server config, max read duration.")
flagGrpcWithTracing = sharedflags.Set.Bool("server_tracing_grpc_enabled", true, "Whether enable gRPC tracing (could be expensive).")
flagLogstashAddress = sharedflags.Set.String("logstash_hostport", "", "Host:port of logstash for remote logging. If empty remote logging is disabled.")
flagLogTestBackendpoolResolution = sharedflags.Set.Bool("log_backend_resolution_on_addition", false, "With this option "+
"kedge will always try to resolve and log (only) new backend entry. Useful for debugging backend routings.")
flagDynamicRoutingDiscoveryEnabled = sharedflags.Set.Bool("kedge_dynamic_routings_enabled", false,
"If enabled, kedge will watch on service changes (services which has particular label) and generates "+
"director & backendpool routings. It will update them directly into into flagz value, so you can see the current routings anytime in debug/flagz")
)
func main() {
log.SetOutput(os.Stdout)
if err := sharedflags.Set.Parse(os.Args); err != nil {
log.WithError(err).Fatalf("failed parsing flags")
}
if err := flagz.ReadFileFlags(sharedflags.Set); err != nil {
log.WithError(err).Fatalf("failed reading flagz from files")
}
if *flagLogstashAddress != "" {
formatter, err := logstash.NewFormatter()
if err != nil {
log.WithError(err).Fatal("Failed to get hostname for logstash formatter")
}
hook, err := logstash.NewHook(*flagLogstashAddress, formatter)
if err != nil {
log.WithError(err).Fatal("Failed to create new logstash hook")
}
log.AddHook(hook)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if *flagDynamicRoutingDiscoveryEnabled {
log.Info("Flag 'kedge_dynamic_routings_enabled' is true. Enabling dynamic routing with base configuration fetched from provided" +
"directorConfig and backendpoolConfig.")
routingDiscovery, err := discovery.NewFromFlags(
log.StandardLogger(),
flagConfigDirector.Get().(*pb_config.DirectorConfig),
flagConfigBackendpool.Get().(*pb_config.BackendPoolConfig),
)
if err != nil {
log.WithError(err).Fatal("Failed to create routingDiscovery")
}
go func() {
err := routingDiscovery.DiscoverAndSetFlags(
ctx,
flagConfigDirector,
flagConfigBackendpool,
)
if err != nil {
log.WithError(err).Fatal("Dynamic Routing Discovery failed")
}
}()
}
grpc.EnableTracing = *flagGrpcWithTracing
logEntry := log.NewEntry(log.StandardLogger())
grpc_logrus.ReplaceGrpcLogger(logEntry)
tlsConfig, err := buildTLSConfigFromFlags()
if err != nil {
log.Fatalf("failed building TLS config from flags: %v", err)
}
httpDirector := http_director.New(httpBackendPool, httpRouter, httpAddresser, logEntry)
grpcDirector := grpc_director.New(grpcBackendPool, grpcRouter)
// GRPC kedge.
grpcDirectorServer := grpc.NewServer(
grpc.CustomCodec(proxy.Codec()), // needed for director to function.
grpc.UnknownServiceHandler(proxy.TransparentHandler(grpcDirector)),
grpc_middleware.WithUnaryServerChain(
grpc_ctxtags.UnaryServerInterceptor(),
grpc_logrus.UnaryServerInterceptor(logEntry),
grpc_prometheus.UnaryServerInterceptor,
),
grpc_middleware.WithStreamServerChain(
grpc_ctxtags.StreamServerInterceptor(),
grpc_logrus.StreamServerInterceptor(logEntry),
grpc_prometheus.StreamServerInterceptor,
),
grpc.Creds(credentials.NewTLS(tlsConfig)),
)
// HTTPS proxy chain.
httpDirectorChain := chi.Chain(
http_ctxtags.Middleware("proxy"),
http_metrics.Middleware(http_prometheus.ServerMetrics(http_prometheus.WithLatency())),
http_debug.Middleware(),
http_logrus.Middleware(logEntry, http_logrus.WithLevels(kedgeCodeToLevel)),
)
// HTTP debug chain.
httpDebugChain := chi.Chain(
http_ctxtags.Middleware("debug"),
http_debug.Middleware(),
)
// httpNonAuthDebugChain chain is shares the same base but will not include auth. It is for metrics and _healthz.
httpNonAuthDebugChain := httpDebugChain
authorizer, err := authorizerFromFlags(logEntry)
if err != nil {
log.WithError(err).Fatal("failed to create authorizer.")
}
if authorizer != nil {
httpDirectorChain = append(httpDirectorChain, http_director.AuthMiddleware(authorizer))
logEntry.Info("configured OIDC authorization for HTTPS proxy.")
}
// Bouncer.
httpsBouncerServer := httpsBouncerServer(grpcDirectorServer, httpDirectorChain.Handler(httpDirector), logEntry)
if authorizer != nil && *flagEnableOIDCAuthForDebugEnpoints {
httpDebugChain = append(httpDebugChain, http_director.AuthMiddleware(authorizer))
logEntry.Info("configured OIDC authorization for HTTP debug server.")
}
// Debug.
httpDebugServer, err := debugServer(logEntry, httpDebugChain, httpNonAuthDebugChain)
if err != nil {
log.WithError(err).Fatal("failed to create debug Server.")
}
errChan := make(chan error)
var grpcTlsListener net.Listener
var httpPlainListener net.Listener
var httpTlsListener net.Listener
if *flagGrpcTlsPort != 0 {
grpcTlsListener = buildListenerOrFail("grpc_tls", *flagGrpcTlsPort)
}
if *flagHttpPort != 0 {
httpPlainListener = buildListenerOrFail("http_plain", *flagHttpPort)
}
if *flagHttpTlsPort != 0 {
httpTlsListener = buildListenerOrFail("http_tls", *flagHttpTlsPort)
http2TlsConfig, err := connhelpers.TlsConfigWithHttp2Enabled(tlsConfig)
if err != nil {
log.Fatalf("failed setting up HTTP2 TLS config: %v", err)
}
httpTlsListener = tls.NewListener(httpTlsListener, http2TlsConfig)
}
if grpcTlsListener != nil {
log.Infof("listening for gRPC TLS on: %v", grpcTlsListener.Addr().String())
go func() {
if err := grpcDirectorServer.Serve(grpcTlsListener); err != nil {
errChan <- fmt.Errorf("grpc_tls server error: %v", err)
}
}()
}
if httpTlsListener != nil {
log.Infof("listening for HTTP TLS on: %v", httpTlsListener.Addr().String())
go func() {
if err := httpsBouncerServer.Serve(httpTlsListener); err != nil {
errChan <- fmt.Errorf("http_tls server error: %v", err)
}
}()
}
if httpPlainListener != nil {
log.Infof("listening for HTTP Plain on: %v", httpPlainListener.Addr().String())
go func() {
if err := httpDebugServer.Serve(httpPlainListener); err != nil {
errChan <- fmt.Errorf("http_plain server error: %v", err)
}
}()
}
err = <-errChan // this waits for some server breaking
log.WithError(err).Fatalf("Fail")
}
// httpsBouncerHandler decides what kind of requests it is and redirects to GRPC if needed.
func httpsBouncerServer(grpcHandler *grpc.Server, httpHandler http.Handler, logEntry *log.Entry) *http.Server {
httpBouncerHandler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.URL.Path == "/_healthz" {
healthEndpoint(w, req)
return
}
if strings.HasPrefix(req.Header.Get("content-type"), "application/grpc") {
grpcHandler.ServeHTTP(w, req)
return
}
httpHandler.ServeHTTP(w, req)
}).ServeHTTP
return &http.Server{
WriteTimeout: *flagHttpMaxWriteTimeout,
ReadTimeout: *flagHttpMaxReadTimeout,
ErrorLog: http_logrus.AsHttpLogger(logEntry.WithField(ctxtags.TagForScheme, "tls")),
Handler: http.HandlerFunc(httpBouncerHandler),
}
}
func debugServer(logEntry *log.Entry, middlewares chi.Middlewares, noAuthMiddlewares chi.Middlewares) (*http.Server, error) {
m := chi.NewMux()
m.Handle("/_healthz", noAuthMiddlewares.HandlerFunc(healthEndpoint))
m.Handle("/debug/metrics", noAuthMiddlewares.Handler(promhttp.Handler()))
m.Handle("/_version",
// The only one worth to log.
chi.Chain(http_logrus.Middleware(logEntry.WithField(ctxtags.TagForScheme, "plain"), http_logrus.WithLevels(kedgeCodeToLevel))).
Handler(middlewares.HandlerFunc(versionEndpoint)))
m.Handle("/debug/flagz", middlewares.HandlerFunc(flagz.NewStatusEndpoint(sharedflags.Set).ListFlags))
m.Handle("/debug/pprof/", middlewares.HandlerFunc(pprof.Index))
m.Handle("/debug/pprof/cmdline", middlewares.HandlerFunc(pprof.Cmdline))
m.Handle("/debug/pprof/profile", middlewares.HandlerFunc(pprof.Profile))
m.Handle("/debug/pprof/symbol", middlewares.HandlerFunc(pprof.Symbol))
m.Handle("/debug/pprof/trace", middlewares.HandlerFunc(pprof.Trace))
m.Handle("/debug/traces", middlewares.HandlerFunc(trace.Traces))
m.Handle("/debug/events", middlewares.HandlerFunc(trace.Events))
return &http.Server{
WriteTimeout: *flagHttpMaxWriteTimeout,
ReadTimeout: *flagHttpMaxReadTimeout,
ErrorLog: http_logrus.AsHttpLogger(logEntry.WithField(ctxtags.TagForScheme, "tls")),
Handler: m,
}, nil
}
func buildListenerOrFail(name string, port int) net.Listener {
addr := fmt.Sprintf("%s:%d", *flagBindAddr, port)
listener, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("failed listening for '%v' on %v: %v", name, port, err)
}
return conntrack.NewListener(listener,
conntrack.TrackWithName(name),
conntrack.TrackWithTcpKeepAlive(20*time.Second),
conntrack.TrackWithTracing(),
)
}
func healthEndpoint(resp http.ResponseWriter, req *http.Request) {
resp.Header().Set("content-type", "text/plain")
resp.WriteHeader(http.StatusOK)
fmt.Fprintf(resp, "kedge isok")
}
func kedgeCodeToLevel(httpStatusCode int) log.Level {
if httpStatusCode < 400 || httpStatusCode == http.StatusNotFound {
return log.DebugLevel
} else if httpStatusCode < 500 {
return log.WarnLevel
}
return log.ErrorLevel
}
Reordered HTTP metrics / logrus middlewares to avoid issue with current go-httpwares.
Signed-off-by: Bartek Plotka <40733124e8bedaac646d3fd1c538b5cb2b6856fe@gmail.com>
package main
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/http/pprof"
"os"
"strings"
"time"
"github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
"github.com/grpc-ecosystem/go-grpc-middleware/tags"
"github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/mwitkow/go-conntrack"
"github.com/mwitkow/go-conntrack/connhelpers"
"github.com/mwitkow/go-flagz"
"github.com/mwitkow/go-httpwares/logging/logrus"
"github.com/mwitkow/go-httpwares/metrics"
"github.com/mwitkow/go-httpwares/metrics/prometheus"
"github.com/mwitkow/go-httpwares/tags"
"github.com/mwitkow/go-httpwares/tracing/debug"
"github.com/mwitkow/grpc-proxy/proxy"
pb_config "github.com/mwitkow/kedge/_protogen/kedge/config"
grpc_director "github.com/mwitkow/kedge/grpc/director"
http_director "github.com/mwitkow/kedge/http/director"
"github.com/mwitkow/kedge/lib/discovery"
"github.com/mwitkow/kedge/lib/http/ctxtags"
"github.com/mwitkow/kedge/lib/logstash"
"github.com/mwitkow/kedge/lib/sharedflags"
"github.com/pressly/chi"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"golang.org/x/net/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
var (
flagBindAddr = sharedflags.Set.String("server_bind_address", "0.0.0.0", "address to bind the server to")
flagGrpcTlsPort = sharedflags.Set.Int("server_grpc_tls_port", 8444, "TCP TLS port to listen on for secure gRPC calls. If 0, no gRPC-TLS will be open.")
flagHttpTlsPort = sharedflags.Set.Int("server_http_tls_port", 8443, "TCP port to listen on for HTTPS. If gRPC call will hit it will bounce to gRPC handler. If 0, no TLS will be open.")
flagHttpPort = sharedflags.Set.Int("server_http_port", 8080, "TCP port to listen on for HTTP1.1/REST calls for debug endpoints like metrics, flagz page or optional pprof (insecure, but private only IP are allowed). If 0, no insecure HTTP will be open.")
flagHttpMaxWriteTimeout = sharedflags.Set.Duration("server_http_max_write_timeout", 10*time.Second, "HTTP server config, max write duration.")
flagHttpMaxReadTimeout = sharedflags.Set.Duration("server_http_max_read_timeout", 10*time.Second, "HTTP server config, max read duration.")
flagGrpcWithTracing = sharedflags.Set.Bool("server_tracing_grpc_enabled", true, "Whether enable gRPC tracing (could be expensive).")
flagLogstashAddress = sharedflags.Set.String("logstash_hostport", "", "Host:port of logstash for remote logging. If empty remote logging is disabled.")
flagLogTestBackendpoolResolution = sharedflags.Set.Bool("log_backend_resolution_on_addition", false, "With this option "+
"kedge will always try to resolve and log (only) new backend entry. Useful for debugging backend routings.")
flagDynamicRoutingDiscoveryEnabled = sharedflags.Set.Bool("kedge_dynamic_routings_enabled", false,
"If enabled, kedge will watch on service changes (services which has particular label) and generates "+
"director & backendpool routings. It will update them directly into into flagz value, so you can see the current routings anytime in debug/flagz")
)
func main() {
log.SetOutput(os.Stdout)
if err := sharedflags.Set.Parse(os.Args); err != nil {
log.WithError(err).Fatalf("failed parsing flags")
}
if err := flagz.ReadFileFlags(sharedflags.Set); err != nil {
log.WithError(err).Fatalf("failed reading flagz from files")
}
if *flagLogstashAddress != "" {
formatter, err := logstash.NewFormatter()
if err != nil {
log.WithError(err).Fatal("Failed to get hostname for logstash formatter")
}
hook, err := logstash.NewHook(*flagLogstashAddress, formatter)
if err != nil {
log.WithError(err).Fatal("Failed to create new logstash hook")
}
log.AddHook(hook)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if *flagDynamicRoutingDiscoveryEnabled {
log.Info("Flag 'kedge_dynamic_routings_enabled' is true. Enabling dynamic routing with base configuration fetched from provided" +
"directorConfig and backendpoolConfig.")
routingDiscovery, err := discovery.NewFromFlags(
log.StandardLogger(),
flagConfigDirector.Get().(*pb_config.DirectorConfig),
flagConfigBackendpool.Get().(*pb_config.BackendPoolConfig),
)
if err != nil {
log.WithError(err).Fatal("Failed to create routingDiscovery")
}
go func() {
err := routingDiscovery.DiscoverAndSetFlags(
ctx,
flagConfigDirector,
flagConfigBackendpool,
)
if err != nil {
log.WithError(err).Fatal("Dynamic Routing Discovery failed")
}
}()
}
grpc.EnableTracing = *flagGrpcWithTracing
logEntry := log.NewEntry(log.StandardLogger())
grpc_logrus.ReplaceGrpcLogger(logEntry)
tlsConfig, err := buildTLSConfigFromFlags()
if err != nil {
log.Fatalf("failed building TLS config from flags: %v", err)
}
httpDirector := http_director.New(httpBackendPool, httpRouter, httpAddresser, logEntry)
grpcDirector := grpc_director.New(grpcBackendPool, grpcRouter)
// GRPC kedge.
grpcDirectorServer := grpc.NewServer(
grpc.CustomCodec(proxy.Codec()), // needed for director to function.
grpc.UnknownServiceHandler(proxy.TransparentHandler(grpcDirector)),
grpc_middleware.WithUnaryServerChain(
grpc_ctxtags.UnaryServerInterceptor(),
grpc_logrus.UnaryServerInterceptor(logEntry),
grpc_prometheus.UnaryServerInterceptor,
),
grpc_middleware.WithStreamServerChain(
grpc_ctxtags.StreamServerInterceptor(),
grpc_logrus.StreamServerInterceptor(logEntry),
grpc_prometheus.StreamServerInterceptor,
),
grpc.Creds(credentials.NewTLS(tlsConfig)),
)
// HTTPS proxy chain.
httpDirectorChain := chi.Chain(
http_ctxtags.Middleware("proxy"),
http_debug.Middleware(),
http_logrus.Middleware(logEntry, http_logrus.WithLevels(kedgeCodeToLevel)),
http_metrics.Middleware(http_prometheus.ServerMetrics(http_prometheus.WithLatency())),
)
// HTTP debug chain.
httpDebugChain := chi.Chain(
http_ctxtags.Middleware("debug"),
http_debug.Middleware(),
)
// httpNonAuthDebugChain chain is shares the same base but will not include auth. It is for metrics and _healthz.
httpNonAuthDebugChain := httpDebugChain
authorizer, err := authorizerFromFlags(logEntry)
if err != nil {
log.WithError(err).Fatal("failed to create authorizer.")
}
if authorizer != nil {
httpDirectorChain = append(httpDirectorChain, http_director.AuthMiddleware(authorizer))
logEntry.Info("configured OIDC authorization for HTTPS proxy.")
}
// Bouncer.
httpsBouncerServer := httpsBouncerServer(grpcDirectorServer, httpDirectorChain.Handler(httpDirector), logEntry)
if authorizer != nil && *flagEnableOIDCAuthForDebugEnpoints {
httpDebugChain = append(httpDebugChain, http_director.AuthMiddleware(authorizer))
logEntry.Info("configured OIDC authorization for HTTP debug server.")
}
// Debug.
httpDebugServer, err := debugServer(logEntry, httpDebugChain, httpNonAuthDebugChain)
if err != nil {
log.WithError(err).Fatal("failed to create debug Server.")
}
errChan := make(chan error)
var grpcTlsListener net.Listener
var httpPlainListener net.Listener
var httpTlsListener net.Listener
if *flagGrpcTlsPort != 0 {
grpcTlsListener = buildListenerOrFail("grpc_tls", *flagGrpcTlsPort)
}
if *flagHttpPort != 0 {
httpPlainListener = buildListenerOrFail("http_plain", *flagHttpPort)
}
if *flagHttpTlsPort != 0 {
httpTlsListener = buildListenerOrFail("http_tls", *flagHttpTlsPort)
http2TlsConfig, err := connhelpers.TlsConfigWithHttp2Enabled(tlsConfig)
if err != nil {
log.Fatalf("failed setting up HTTP2 TLS config: %v", err)
}
httpTlsListener = tls.NewListener(httpTlsListener, http2TlsConfig)
}
if grpcTlsListener != nil {
log.Infof("listening for gRPC TLS on: %v", grpcTlsListener.Addr().String())
go func() {
if err := grpcDirectorServer.Serve(grpcTlsListener); err != nil {
errChan <- fmt.Errorf("grpc_tls server error: %v", err)
}
}()
}
if httpTlsListener != nil {
log.Infof("listening for HTTP TLS on: %v", httpTlsListener.Addr().String())
go func() {
if err := httpsBouncerServer.Serve(httpTlsListener); err != nil {
errChan <- fmt.Errorf("http_tls server error: %v", err)
}
}()
}
if httpPlainListener != nil {
log.Infof("listening for HTTP Plain on: %v", httpPlainListener.Addr().String())
go func() {
if err := httpDebugServer.Serve(httpPlainListener); err != nil {
errChan <- fmt.Errorf("http_plain server error: %v", err)
}
}()
}
err = <-errChan // this waits for some server breaking
log.WithError(err).Fatalf("Fail")
}
// httpsBouncerHandler decides what kind of requests it is and redirects to GRPC if needed.
func httpsBouncerServer(grpcHandler *grpc.Server, httpHandler http.Handler, logEntry *log.Entry) *http.Server {
httpBouncerHandler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.URL.Path == "/_healthz" {
healthEndpoint(w, req)
return
}
if strings.HasPrefix(req.Header.Get("content-type"), "application/grpc") {
grpcHandler.ServeHTTP(w, req)
return
}
httpHandler.ServeHTTP(w, req)
}).ServeHTTP
return &http.Server{
WriteTimeout: *flagHttpMaxWriteTimeout,
ReadTimeout: *flagHttpMaxReadTimeout,
ErrorLog: http_logrus.AsHttpLogger(logEntry.WithField(ctxtags.TagForScheme, "tls")),
Handler: http.HandlerFunc(httpBouncerHandler),
}
}
func debugServer(logEntry *log.Entry, middlewares chi.Middlewares, noAuthMiddlewares chi.Middlewares) (*http.Server, error) {
m := chi.NewMux()
m.Handle("/_healthz", noAuthMiddlewares.HandlerFunc(healthEndpoint))
m.Handle("/debug/metrics", noAuthMiddlewares.Handler(promhttp.Handler()))
m.Handle("/_version",
// The only one worth to log.
chi.Chain(http_logrus.Middleware(logEntry.WithField(ctxtags.TagForScheme, "plain"), http_logrus.WithLevels(kedgeCodeToLevel))).
Handler(middlewares.HandlerFunc(versionEndpoint)))
m.Handle("/debug/flagz", middlewares.HandlerFunc(flagz.NewStatusEndpoint(sharedflags.Set).ListFlags))
m.Handle("/debug/pprof/", middlewares.HandlerFunc(pprof.Index))
m.Handle("/debug/pprof/cmdline", middlewares.HandlerFunc(pprof.Cmdline))
m.Handle("/debug/pprof/profile", middlewares.HandlerFunc(pprof.Profile))
m.Handle("/debug/pprof/symbol", middlewares.HandlerFunc(pprof.Symbol))
m.Handle("/debug/pprof/trace", middlewares.HandlerFunc(pprof.Trace))
m.Handle("/debug/traces", middlewares.HandlerFunc(trace.Traces))
m.Handle("/debug/events", middlewares.HandlerFunc(trace.Events))
return &http.Server{
WriteTimeout: *flagHttpMaxWriteTimeout,
ReadTimeout: *flagHttpMaxReadTimeout,
ErrorLog: http_logrus.AsHttpLogger(logEntry.WithField(ctxtags.TagForScheme, "tls")),
Handler: m,
}, nil
}
func buildListenerOrFail(name string, port int) net.Listener {
addr := fmt.Sprintf("%s:%d", *flagBindAddr, port)
listener, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("failed listening for '%v' on %v: %v", name, port, err)
}
return conntrack.NewListener(listener,
conntrack.TrackWithName(name),
conntrack.TrackWithTcpKeepAlive(20*time.Second),
conntrack.TrackWithTracing(),
)
}
func healthEndpoint(resp http.ResponseWriter, req *http.Request) {
resp.Header().Set("content-type", "text/plain")
resp.WriteHeader(http.StatusOK)
fmt.Fprintf(resp, "kedge isok")
}
func kedgeCodeToLevel(httpStatusCode int) log.Level {
if httpStatusCode < 400 || httpStatusCode == http.StatusNotFound {
return log.DebugLevel
} else if httpStatusCode < 500 {
return log.WarnLevel
}
return log.ErrorLevel
}
|
package main
import (
"encoding/base64"
"flag"
"fmt"
"net"
"net/http"
"strconv"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 10240,
WriteBufferSize: 10240,
}
var listenIp string
var listenPort int
func serveHome(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.Error(w, "Not found", 404)
return
}
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Write([]byte("It Works!"))
}
type connection struct {
ws *websocket.Conn
conn net.Conn
version int
request string
parameters string
}
func (c *connection) handle() {
Log("daemon", "debug", fmt.Sprintf("handled connection: version: %d, request: %s, parameters: %s", c.version, c.request, c.parameters))
var err error
c.conn, err = net.Dial(c.request, c.parameters)
if err != nil {
Log("daemon", "debug", fmt.Sprintf("error dialing %s - %s, err: %s", c.request, c.parameters, err))
c.ws.Close()
return
}
pipe(c.ws, c.conn)
}
// serveWs handles websocket requests from the peer.
func sockHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
version, _ := strconv.Atoi(vars["version"])
request := vars["request"]
parameters, err := base64.StdEncoding.DecodeString(vars["parameters"])
if err != nil {
Log("daemon", "error", fmt.Sprintf("base64decode failed: %s", err))
return
}
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
Log("daemon", "error", fmt.Sprintf("ws upgrade failed: %s", err))
return
}
c := &connection{version: version, request: request, parameters: fmt.Sprintf("%s", parameters), ws: ws}
go c.handle()
}
func listen() {
addr := fmt.Sprintf("%s:%d", listenIp, listenPort)
mux := mux.NewRouter()
mux.HandleFunc("/sock/{version}/{request}/{parameters}", sockHandler)
mux.HandleFunc("/", serveHome)
Log("daemon", "info", fmt.Sprintf("Listening on %s", addr))
err := http.ListenAndServe(addr, mux)
if err != nil {
panic("ListenAndServe: " + err.Error())
}
}
func main() {
Log("daemon", "info", "starting proxy")
flag.Parse()
listen()
}
func init() {
flag.IntVar(&listenPort, "port", 443, "port to listen on")
flag.StringVar(&listenIp, "ip", "", "ip to bind to")
}
Add HTTP/2 support to server
Signed-off-by: Jeroen Simonetti <fd3fb11d20dd2b55b441515e5cc120796bf92f11@simonetti.nl>
package main
import (
"encoding/base64"
"flag"
"fmt"
"net"
"net/http"
"strconv"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"golang.org/x/net/http2"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 10240,
WriteBufferSize: 10240,
}
var listenIp string
var listenPort int
func serveHome(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.Error(w, "Not found", 404)
return
}
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Write([]byte("It Works!"))
}
type connection struct {
ws *websocket.Conn
conn net.Conn
version int
request string
parameters string
}
func (c *connection) handle() {
Log("daemon", "debug", fmt.Sprintf("handled connection: version: %d, request: %s, parameters: %s", c.version, c.request, c.parameters))
var err error
c.conn, err = net.Dial(c.request, c.parameters)
if err != nil {
Log("daemon", "debug", fmt.Sprintf("error dialing %s - %s, err: %s", c.request, c.parameters, err))
c.ws.Close()
return
}
pipe(c.ws, c.conn)
}
// serveWs handles websocket requests from the peer.
func sockHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
version, _ := strconv.Atoi(vars["version"])
request := vars["request"]
parameters, err := base64.StdEncoding.DecodeString(vars["parameters"])
if err != nil {
Log("daemon", "error", fmt.Sprintf("base64decode failed: %s", err))
return
}
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
Log("daemon", "error", fmt.Sprintf("ws upgrade failed: %s", err))
return
}
c := &connection{version: version, request: request, parameters: fmt.Sprintf("%s", parameters), ws: ws}
go c.handle()
}
func listen() {
addr := fmt.Sprintf("%s:%d", listenIp, listenPort)
mux := mux.NewRouter()
mux.HandleFunc("/sock/{version}/{request}/{parameters}", sockHandler)
mux.HandleFunc("/", serveHome)
server := &http.Server{
Addr: addr,
Handler: mux,
}
Log("daemon", "info", "attemting upgrade of server to http/2")
if err := http2.ConfigureServer(server, nil); err != nil {
Log("daemon", "info", fmt.Sprintf("upgrade to http/2 failed: %s", err))
}
Log("daemon", "info", fmt.Sprintf("Listening on %s", addr))
err := server.ListenAndServe()
if err != nil {
panic("ListenAndServe: " + err.Error())
}
}
func main() {
Log("daemon", "info", "starting proxy")
flag.Parse()
listen()
}
func init() {
flag.IntVar(&listenPort, "port", 443, "port to listen on")
flag.StringVar(&listenIp, "ip", "", "ip to bind to")
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"github.com/dustin/go-jsonpointer"
)
func main() {
d, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("Error reading json from stdin: %v", err)
}
if len(os.Args) == 1 {
l, err := jsonpointer.ListPointers(d)
if err != nil {
log.Fatalf("Error listing pointers: %v", err)
}
for _, p := range l {
fmt.Println(p)
}
}
}
Make ptrtool pull out chunks based on pointer refs
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/dustin/go-jsonpointer"
)
func main() {
d, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("Error reading json from stdin: %v", err)
}
if len(os.Args) == 1 {
l, err := jsonpointer.ListPointers(d)
if err != nil {
log.Fatalf("Error listing pointers: %v", err)
}
for _, p := range l {
fmt.Println(p)
}
} else {
m, err := jsonpointer.FindMany(d, os.Args[1:])
if err != nil {
log.Fatalf("Error finding pointers: %v", err)
}
for k, v := range m {
b := &bytes.Buffer{}
json.Indent(b, v, "", " ")
fmt.Printf("%v\n%s\n\n", k, b)
}
}
}
|
package weavedns
import (
"github.com/miekg/dns"
"log"
"net"
"sync"
)
type Zone interface {
AddRecord(string, string, net.IP, net.IP, *net.IPNet) error
MatchLocal(string) (net.IP, error)
}
type Record struct {
Ident string
Name string
Ip net.IP
WeaveIp net.IP
Subnet *net.IPNet
}
type ZoneDb struct {
mx sync.RWMutex
recs []Record
}
type LookupError string
func (ops LookupError) Error() string {
return "Unable to find " + string(ops)
}
type DuplicateError struct {
Name string
WeaveIp net.IP
}
func (err DuplicateError) Error() string {
return "Duplicate " + err.Name + "," + err.WeaveIp.String()
}
// Stop gap.
func (zone *ZoneDb) match(name string) (net.IP, error) {
for _, r := range zone.recs {
log.Printf("%s == %s ?", r.Name, name)
if r.Name == name {
return r.WeaveIp, nil
}
}
return nil, LookupError(name)
}
func (zone *ZoneDb) indexOfNameAddr(name string, addr net.IP) int {
for i, r := range zone.recs {
if r.Name == name && r.WeaveIp.Equal(addr) {
return i
}
}
return -1
}
func (zone *ZoneDb) MatchLocal(name string) (net.IP, error) {
zone.mx.Lock()
defer zone.mx.Unlock()
return zone.match(name)
}
func (zone *ZoneDb) AddRecord(identifier string, name string, ip net.IP, weave_ip net.IP, weave_subnet *net.IPNet) error {
zone.mx.Lock()
defer zone.mx.Unlock()
fqdn := dns.Fqdn(name)
if zone.indexOfNameAddr(fqdn, weave_ip) != -1 {
return DuplicateError{fqdn, weave_ip}
}
zone.recs = append(zone.recs, Record{identifier, fqdn, ip, weave_ip, weave_subnet})
return nil
}
Better error reporting
package weavedns
import (
"github.com/miekg/dns"
"log"
"net"
"sync"
)
type Zone interface {
AddRecord(string, string, net.IP, net.IP, *net.IPNet) error
MatchLocal(string) (net.IP, error)
}
type Record struct {
Ident string
Name string
Ip net.IP
WeaveIp net.IP
Subnet *net.IPNet
}
type ZoneDb struct {
mx sync.RWMutex
recs []Record
}
type LookupError string
func (ops LookupError) Error() string {
return "Unable to find " + string(ops)
}
type DuplicateError struct {
Name string
WeaveIp net.IP
Ident string
}
func (err DuplicateError) Error() string {
return "Duplicate " + err.Name + "," + err.WeaveIp.String() + " in container " + err.Ident
}
// Stop gap.
func (zone *ZoneDb) match(name string) (net.IP, error) {
for _, r := range zone.recs {
log.Printf("%s == %s ?", r.Name, name)
if r.Name == name {
return r.WeaveIp, nil
}
}
return nil, LookupError(name)
}
func (zone *ZoneDb) indexOfNameAddr(name string, addr net.IP) int {
for i, r := range zone.recs {
if r.Name == name && r.WeaveIp.Equal(addr) {
return i
}
}
return -1
}
func (zone *ZoneDb) MatchLocal(name string) (net.IP, error) {
zone.mx.Lock()
defer zone.mx.Unlock()
return zone.match(name)
}
func (zone *ZoneDb) AddRecord(identifier string, name string, ip net.IP, weave_ip net.IP, weave_subnet *net.IPNet) error {
zone.mx.Lock()
defer zone.mx.Unlock()
fqdn := dns.Fqdn(name)
if index := zone.indexOfNameAddr(fqdn, weave_ip); index != -1 {
return DuplicateError{fqdn, weave_ip, zone.recs[index].Ident}
}
zone.recs = append(zone.recs, Record{identifier, fqdn, ip, weave_ip, weave_subnet})
return nil
}
|
package veneur
import (
"bytes"
"compress/zlib"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"sync"
"math/rand"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stripe/veneur/protocol"
"github.com/stripe/veneur/samplers"
"github.com/stripe/veneur/ssf"
"github.com/stripe/veneur/tdigest"
)
const ε = .00002
const DefaultFlushInterval = 50 * time.Millisecond
const DefaultServerTimeout = 100 * time.Millisecond
var DebugMode bool
func TestMain(m *testing.M) {
flag.Parse()
DebugMode = flag.Lookup("test.v").Value.(flag.Getter).Get().(bool)
os.Exit(m.Run())
}
// On the CI server, we can't be guaranteed that the port will be
// released immediately after the server is shut down. Instead, use
// a unique port for each test. As long as we don't have an insane number
// of integration tests, we should be fine.
var HTTPAddrPort = 8129
// set up a boilerplate local config for later use
func localConfig() Config {
return generateConfig("http://localhost")
}
// set up a boilerplate global config for later use
func globalConfig() Config {
return generateConfig("")
}
// generateConfig is not called config to avoid
// accidental variable shadowing
func generateConfig(forwardAddr string) Config {
// we don't shut down ports so avoid address in use errors
port := HTTPAddrPort
HTTPAddrPort++
metricsPort := HTTPAddrPort
HTTPAddrPort++
tracePort := HTTPAddrPort
HTTPAddrPort++
return Config{
DatadogAPIHostname: "http://localhost",
Debug: DebugMode,
Hostname: "localhost",
// Use a shorter interval for tests
Interval: DefaultFlushInterval.String(),
Key: "",
MetricMaxLength: 4096,
Percentiles: []float64{.5, .75, .99},
Aggregates: []string{"min", "max", "count"},
ReadBufferSizeBytes: 2097152,
StatsdListenAddresses: []string{fmt.Sprintf("udp://localhost:%d", metricsPort)},
HTTPAddress: fmt.Sprintf("localhost:%d", port),
ForwardAddress: forwardAddr,
NumWorkers: 4,
FlushFile: "",
// Use only one reader, so that we can run tests
// on platforms which do not support SO_REUSEPORT
NumReaders: 1,
// Currently this points nowhere, which is intentional.
// We don't need internal metrics for the tests, and they make testing
// more complicated.
StatsAddress: "localhost:8125",
Tags: []string{},
SentryDsn: "",
FlushMaxPerBody: 1024,
// Don't use the default port 8128: Veneur sends its own traces there, causing failures
SsfListenAddresses: []string{fmt.Sprintf("udp://127.0.0.1:%d", tracePort)},
DatadogTraceAPIAddress: forwardAddr,
TraceMaxLengthBytes: 4096,
SsfBufferSize: 32,
}
}
func generateMetrics() (metricValues []float64, expectedMetrics map[string]float64) {
metricValues = []float64{1.0, 2.0, 7.0, 8.0, 100.0}
expectedMetrics = map[string]float64{
"a.b.c.max": 100,
"a.b.c.min": 1,
// Count is normalized by second
// so 5 values/50ms = 100 values/s
"a.b.c.count": float64(len(metricValues)) * float64(time.Second) / float64(DefaultFlushInterval),
// tdigest approximation causes this to be off by 1
"a.b.c.50percentile": 6,
"a.b.c.75percentile": 42,
"a.b.c.99percentile": 98,
}
return metricValues, expectedMetrics
}
// assertMetrics checks that all expected metrics are present
// and have the correct value
func assertMetrics(t *testing.T, metrics DDMetricsRequest, expectedMetrics map[string]float64) {
// it doesn't count as accidentally quadratic if it's intentional
for metricName, expectedValue := range expectedMetrics {
assertMetric(t, metrics, metricName, expectedValue)
}
}
func assertMetric(t *testing.T, metrics DDMetricsRequest, metricName string, value float64) {
defer func() {
if r := recover(); r != nil {
assert.Fail(t, "error extracting metrics", r)
}
}()
for _, metric := range metrics.Series {
if metric.Name == metricName {
assert.Equal(t, int(value+.5), int(metric.Value[0][1]+.5), "Incorrect value for metric %s", metricName)
return
}
}
assert.Fail(t, "did not find expected metric", metricName)
}
// setupVeneurServer creates a local server from the specified config
// and starts listening for requests. It returns the server for inspection.
func setupVeneurServer(t *testing.T, config Config, transport http.RoundTripper) *Server {
server, err := NewFromConfig(config)
if transport != nil {
server.HTTPClient.Transport = transport
}
if err != nil {
t.Fatal(err)
}
if transport != nil {
server.HTTPClient.Transport = transport
}
server.Start()
go server.HTTPServe()
return &server
}
// DDMetricsRequest represents the body of the POST request
// for sending metrics data to Datadog
// Eventually we'll want to define this symmetrically.
type DDMetricsRequest struct {
Series []samplers.DDMetric
}
// fixture sets up a mock Datadog API server and Veneur
type fixture struct {
api *httptest.Server
server *Server
ddmetrics chan DDMetricsRequest
interval time.Duration
flushMaxPerBody int
}
func newFixture(t *testing.T, config Config) *fixture {
interval, err := config.ParseInterval()
assert.NoError(t, err)
// Set up a remote server (the API that we're sending the data to)
// (e.g. Datadog)
f := &fixture{nil, &Server{}, make(chan DDMetricsRequest, 10), interval, config.FlushMaxPerBody}
f.api = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
zr, err := zlib.NewReader(r.Body)
if err != nil {
t.Fatal(err)
}
var ddmetrics DDMetricsRequest
err = json.NewDecoder(zr).Decode(&ddmetrics)
if err != nil {
t.Fatal(err)
}
f.ddmetrics <- ddmetrics
w.WriteHeader(http.StatusAccepted)
}))
config.DatadogAPIHostname = f.api.URL
config.NumWorkers = 1
f.server = setupVeneurServer(t, config, nil)
return f
}
func (f *fixture) Close() {
// make Close safe to call multiple times
if f.ddmetrics == nil {
return
}
f.api.Close()
f.server.Shutdown()
close(f.ddmetrics)
f.ddmetrics = nil
}
// TestLocalServerUnaggregatedMetrics tests the behavior of
// the veneur client when operating without a global veneur
// instance (ie, when sending data directly to the remote server)
func TestLocalServerUnaggregatedMetrics(t *testing.T) {
metricValues, expectedMetrics := generateMetrics()
config := localConfig()
f := newFixture(t, config)
defer f.Close()
for _, value := range metricValues {
f.server.Workers[0].ProcessMetric(&samplers.UDPMetric{
MetricKey: samplers.MetricKey{
Name: "a.b.c",
Type: "histogram",
},
Value: value,
Digest: 12345,
SampleRate: 1.0,
Scope: samplers.LocalOnly,
})
}
f.server.Flush()
ddmetrics := <-f.ddmetrics
assert.Equal(t, 6, len(ddmetrics.Series), "incorrect number of elements in the flushed series on the remote server")
assertMetrics(t, ddmetrics, expectedMetrics)
}
func TestGlobalServerFlush(t *testing.T) {
metricValues, expectedMetrics := generateMetrics()
config := globalConfig()
f := newFixture(t, config)
defer f.Close()
for _, value := range metricValues {
f.server.Workers[0].ProcessMetric(&samplers.UDPMetric{
MetricKey: samplers.MetricKey{
Name: "a.b.c",
Type: "histogram",
},
Value: value,
Digest: 12345,
SampleRate: 1.0,
Scope: samplers.LocalOnly,
})
}
f.server.Flush()
ddmetrics := <-f.ddmetrics
assert.Equal(t, len(expectedMetrics), len(ddmetrics.Series), "incorrect number of elements in the flushed series on the remote server")
assertMetrics(t, ddmetrics, expectedMetrics)
}
func TestLocalServerMixedMetrics(t *testing.T) {
// The exact gob stream that we will receive might differ, so we can't
// test against the bytestream directly. But the two streams should unmarshal
// to t-digests that have the same key properties, so we can test
// those.
const ExpectedGobStream = "\r\xff\x87\x02\x01\x02\xff\x88\x00\x01\xff\x84\x00\x007\xff\x83\x03\x01\x01\bCentroid\x01\xff\x84\x00\x01\x03\x01\x04Mean\x01\b\x00\x01\x06Weight\x01\b\x00\x01\aSamples\x01\xff\x86\x00\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00/\xff\x88\x00\x05\x01\xfe\xf0?\x01\xfe\xf0?\x00\x01@\x01\xfe\xf0?\x00\x01\xfe\x1c@\x01\xfe\xf0?\x00\x01\xfe @\x01\xfe\xf0?\x00\x01\xfeY@\x01\xfe\xf0?\x00\x05\b\x00\xfeY@\x05\b\x00\xfe\xf0?\x05\b\x00\xfeY@"
tdExpected := tdigest.NewMerging(100, false)
err := tdExpected.GobDecode([]byte(ExpectedGobStream))
assert.NoError(t, err, "Should not have encountered error in decoding expected gob stream")
var HistogramValues = []float64{1.0, 2.0, 7.0, 8.0, 100.0}
// Number of events observed (in 50ms interval)
var HistogramCountRaw = len(HistogramValues)
// Normalize to events/second
// Explicitly convert to int to avoid confusing Stringer behavior
var HistogramCountNormalized = float64(HistogramCountRaw) * float64(time.Second) / float64(DefaultFlushInterval)
// Number of events observed
const CounterNumEvents = 40
expectedMetrics := map[string]float64{
// 40 events/50ms = 800 events/s
"x.y.z": CounterNumEvents * float64(time.Second) / float64(DefaultFlushInterval),
"a.b.c.max": 100,
"a.b.c.min": 1,
// Count is normalized by second
// so 5 values/50ms = 100 values/s
"a.b.c.count": float64(HistogramCountNormalized),
}
// This represents the global veneur instance, which receives request from
// the local veneur instances, aggregates the data, and sends it to the remote API
// (e.g. Datadog)
globalTD := make(chan *tdigest.MergingDigest)
globalVeneur := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, r.URL.Path, "/import", "Global veneur should receive request on /import path")
zr, err := zlib.NewReader(r.Body)
if err != nil {
t.Fatal(err)
}
type requestItem struct {
Name string `json:"name"`
Tags interface{} `json:"tags"`
Tagstring string `json:"tagstring"`
Type string `json:"type"`
Value []byte `json:"value"`
}
var metrics []requestItem
err = json.NewDecoder(zr).Decode(&metrics)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 1, len(metrics), "incorrect number of elements in the flushed series")
td := tdigest.NewMerging(100, false)
err = td.GobDecode(metrics[0].Value)
assert.NoError(t, err, "Should not have encountered error in decoding gob stream")
globalTD <- td
w.WriteHeader(http.StatusAccepted)
}))
defer globalVeneur.Close()
config := localConfig()
config.ForwardAddress = globalVeneur.URL
f := newFixture(t, config)
defer f.Close()
// Create non-local metrics that should be passed to the global veneur instance
for _, value := range HistogramValues {
f.server.Workers[0].ProcessMetric(&samplers.UDPMetric{
MetricKey: samplers.MetricKey{
Name: "a.b.c",
Type: "histogram",
},
Value: value,
Digest: 12345,
SampleRate: 1.0,
Scope: samplers.MixedScope,
})
}
// Create local-only metrics that should be passed directly to the remote API
for i := 0; i < CounterNumEvents; i++ {
f.server.Workers[0].ProcessMetric(&samplers.UDPMetric{
MetricKey: samplers.MetricKey{
Name: "x.y.z",
Type: "counter",
},
Value: 1.0,
Digest: 12345,
SampleRate: 1.0,
Scope: samplers.LocalOnly,
})
}
f.server.Flush()
// the global veneur instance should get valid data
td := <-globalTD
assert.Equal(t, expectedMetrics["a.b.c.min"], td.Min(), "Minimum value is incorrect")
assert.Equal(t, expectedMetrics["a.b.c.max"], td.Max(), "Maximum value is incorrect")
// The remote server receives the raw count, *not* the normalized count
assert.InEpsilon(t, HistogramCountRaw, td.Count(), ε)
assert.Equal(t, tdExpected, td, "Underlying tdigest structure is incorrect")
}
func TestSplitBytes(t *testing.T) {
rand.Seed(time.Now().Unix())
buf := make([]byte, 1000)
for i := 0; i < 1000; i++ {
// we construct a string of random length which is approximately 1/3rd A
// and the other 2/3rds B
buf = buf[:rand.Intn(cap(buf))]
for i := range buf {
if rand.Intn(3) == 0 {
buf[i] = 'A'
} else {
buf[i] = 'B'
}
}
checkBufferSplit(t, buf)
buf = buf[:cap(buf)]
}
// also test pathological cases that the fuzz is unlikely to find
checkBufferSplit(t, nil)
checkBufferSplit(t, []byte{})
}
func checkBufferSplit(t *testing.T, buf []byte) {
var testSplit [][]byte
sb := samplers.NewSplitBytes(buf, 'A')
for sb.Next() {
testSplit = append(testSplit, sb.Chunk())
}
// now compare our split to the "real" implementation of split
assert.EqualValues(t, bytes.Split(buf, []byte{'A'}), testSplit, "should have split %s correctly", buf)
}
func readTestKeysCerts() (map[string]string, error) {
// reads the insecure test keys and certificates in fixtures generated with:
// # Generate the authority key and certificate (512-bit RSA signed using SHA-256)
// openssl genrsa -out cakey.pem 512
// openssl req -new -x509 -key cakey.pem -out cacert.pem -days 1095 -subj "/O=Example Inc/CN=Example Certificate Authority"
// # Generate the server key and certificate, signed by the authority
// openssl genrsa -out serverkey.pem 512
// openssl req -new -key serverkey.pem -out serverkey.csr -days 1095 -subj "/O=Example Inc/CN=localhost"
// openssl x509 -req -in serverkey.csr -CA cacert.pem -CAkey cakey.pem -CAcreateserial -out servercert.pem -days 1095
// # Generate a client key and certificate, signed by the authority
// openssl genrsa -out clientkey.pem 512
// openssl req -new -key clientkey.pem -out clientkey.csr -days 1095 -subj "/O=Example Inc/CN=Veneur client key"
// openssl x509 -req -in clientkey.csr -CA cacert.pem -CAkey cakey.pem -CAcreateserial -out clientcert.pem -days 1095
// # Generate another ca and sign the client key
// openssl genrsa -out wrongcakey.pem 512
// openssl req -new -x509 -key wrongcakey.pem -out wrongcacert.pem -days 1095 -subj "/O=Wrong Inc/CN=Wrong Certificate Authority"
// openssl x509 -req -in clientkey.csr -CA wrongcacert.pem -CAkey wrongcakey.pem -CAcreateserial -out wrongclientcert.pem -days 1095
pems := map[string]string{}
pemFileNames := []string{
"cacert.pem",
"clientcert_correct.pem",
"clientcert_wrong.pem",
"clientkey.pem",
"servercert.pem",
"serverkey.pem",
}
for _, fileName := range pemFileNames {
b, err := ioutil.ReadFile(filepath.Join("fixtures", fileName))
if err != nil {
return nil, err
}
pems[fileName] = string(b)
}
return pems, nil
}
// TestTCPConfig checks that invalid configurations are errors
func TestTCPConfig(t *testing.T) {
config := localConfig()
config.StatsdListenAddresses = []string{"tcp://invalid:invalid"}
_, err := NewFromConfig(config)
if err == nil {
t.Error("invalid TCP address is a config error")
}
config.StatsdListenAddresses = []string{"tcp://localhost:8129"}
config.TLSKey = "somekey"
config.TLSCertificate = ""
_, err = NewFromConfig(config)
if err == nil {
t.Error("key without certificate is a config error")
}
pems, err := readTestKeysCerts()
if err != nil {
t.Fatal("could not read test keys/certs:", err)
}
config.TLSKey = pems["serverkey.pem"]
config.TLSCertificate = "somecert"
_, err = NewFromConfig(config)
if err == nil {
t.Error("invalid key and certificate is a config error")
}
config.TLSKey = pems["serverkey.pem"]
config.TLSCertificate = pems["servercert.pem"]
_, err = NewFromConfig(config)
if err != nil {
t.Error("expected valid config")
}
}
func sendTCPMetrics(addr string, tlsConfig *tls.Config, f *fixture) error {
// TODO: attempt to ensure the accept goroutine opens the port before we attempt to connect
// connect and send stats in two parts
var conn net.Conn
var err error
if tlsConfig != nil {
conn, err = tls.Dial("tcp", addr, tlsConfig)
} else {
conn, err = net.Dial("tcp", addr)
}
if err != nil {
return err
}
defer conn.Close()
_, err = conn.Write([]byte("page.views:1|c\npage.views:1|c\n"))
if err != nil {
return err
}
err = conn.Close()
if err != nil {
return err
}
// check that the server received the stats; HACK: sleep to ensure workers process before flush
time.Sleep(20 * time.Millisecond)
f.server.Flush()
select {
case ddmetrics := <-f.ddmetrics:
if len(ddmetrics.Series) != 1 {
return fmt.Errorf("unexpected Series: %v", ddmetrics.Series)
}
if !(ddmetrics.Series[0].Name == "page.views" && ddmetrics.Series[0].Value[0][1] == 40) {
return fmt.Errorf("unexpected metric: %v", ddmetrics.Series[0])
}
case <-time.After(100 * time.Millisecond):
return fmt.Errorf("timed out waiting for metrics")
}
return nil
}
func TestUDPMetrics(t *testing.T) {
config := localConfig()
config.NumWorkers = 1
config.Interval = "60s"
addr := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
HTTPAddrPort++
config.StatsdListenAddresses = []string{fmt.Sprintf("udp://%s", addr)}
f := newFixture(t, config)
defer f.Close()
// Add a bit of delay to ensure things get listening
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("udp", addr)
assert.NoError(t, err)
defer conn.Close()
conn.Write([]byte("foo.bar:1|c|#baz:gorch"))
// Add a bit of delay to ensure things get processed
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(1), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
func TestMultipleUDPSockets(t *testing.T) {
config := localConfig()
config.NumWorkers = 1
config.Interval = "60s"
addr1 := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
HTTPAddrPort++
addr2 := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
HTTPAddrPort++
config.StatsdListenAddresses = []string{fmt.Sprintf("udp://%s", addr1), fmt.Sprintf("udp://%s", addr2)}
f := newFixture(t, config)
defer f.Close()
// Add a bit of delay to ensure things get listening
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("udp", addr1)
assert.NoError(t, err)
defer conn.Close()
conn.Write([]byte("foo.bar:1|c|#baz:gorch"))
conn2, err := net.Dial("udp", addr2)
assert.NoError(t, err)
defer conn2.Close()
conn2.Write([]byte("foo.bar:1|c|#baz:gorch"))
// Add a bit of delay to ensure things get processed
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(2), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
func TestUDPMetricsSSF(t *testing.T) {
config := localConfig()
config.NumWorkers = 1
config.Interval = "60s"
addr := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
config.SsfListenAddresses = []string{fmt.Sprintf("udp://%s", addr)}
HTTPAddrPort++
f := newFixture(t, config)
defer f.Close()
// listen delay
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("udp", addr)
assert.NoError(t, err)
defer conn.Close()
testSample := &ssf.SSFSpan{}
testMetric := &ssf.SSFSample{}
testMetric.Name = "test.metric"
testMetric.Metric = ssf.SSFSample_COUNTER
testMetric.Value = 1
testMetric.Tags = make(map[string]string)
testMetric.Tags["tag"] = "tagValue"
testSample.Metrics = append(testSample.Metrics, testMetric)
packet, err := proto.Marshal(testSample)
assert.NoError(t, err)
conn.Write(packet)
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(1), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
func TestUNIXMetricsSSF(t *testing.T) {
tdir, err := ioutil.TempDir("", "unixmetrics_ssf")
require.NoError(t, err)
defer os.RemoveAll(tdir)
config := localConfig()
config.NumWorkers = 1
config.Interval = "60s"
path := filepath.Join(tdir, "test.sock")
config.SsfListenAddresses = []string{fmt.Sprintf("unix://%s", path)}
HTTPAddrPort++
f := newFixture(t, config)
defer f.Close()
// listen delay
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("unix", path)
assert.NoError(t, err)
defer conn.Close()
testSpan := &ssf.SSFSpan{}
testMetric := &ssf.SSFSample{}
testMetric.Name = "test.metric"
testMetric.Metric = ssf.SSFSample_COUNTER
testMetric.Value = 1
testMetric.Tags = make(map[string]string)
testMetric.Tags["tag"] = "tagValue"
testSpan.Metrics = append(testSpan.Metrics, testMetric)
_, err = protocol.WriteSSF(conn, testSpan)
if assert.NoError(t, err) {
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(1), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
_, err = protocol.WriteSSF(conn, testSpan)
if assert.NoError(t, err) {
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(2), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
}
func TestIgnoreLongUDPMetrics(t *testing.T) {
config := localConfig()
config.NumWorkers = 1
config.MetricMaxLength = 31
config.Interval = "60s"
addr := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
config.StatsdListenAddresses = []string{fmt.Sprintf("udp://%s", addr)}
HTTPAddrPort++
f := newFixture(t, config)
defer f.Close()
// Add a bit of delay to ensure things get listening
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("udp", addr)
assert.NoError(t, err)
defer conn.Close()
// nb this metric is bad because it's too long based on the `MetricMaxLength`
// we set above!
conn.Write([]byte("foo.bar:1|c|#baz:gorch,long:tag,is:long"))
// Add a bit of delay to ensure things get processed
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(0), f.server.Workers[0].processed, "worker did not process a metric")
}
// TestTCPMetrics checks that a server can accept metrics over a TCP socket.
func TestTCPMetrics(t *testing.T) {
pems, err := readTestKeysCerts()
if err != nil {
t.Fatal("could not read test keys/certs:", err)
}
// all supported TCP connection modes
serverConfigs := []struct {
name string
serverKey string
serverCertificate string
authorityCertificate string
expectedConnectResults [4]bool
}{
{"TCP", "", "", "", [4]bool{true, false, false, false}},
{"encrypted", pems["serverkey.pem"], pems["servercert.pem"], "",
[4]bool{false, true, true, true}},
{"authenticated", pems["serverkey.pem"], pems["servercert.pem"], pems["cacert.pem"],
[4]bool{false, false, false, true}},
}
// load all the various keys and certificates for the client
trustServerCA := x509.NewCertPool()
ok := trustServerCA.AppendCertsFromPEM([]byte(pems["cacert.pem"]))
if !ok {
t.Fatal("could not load server certificate")
}
wrongCert, err := tls.X509KeyPair(
[]byte(pems["clientcert_wrong.pem"]), []byte(pems["clientkey.pem"]))
if err != nil {
t.Fatal("could not load wrong client cert/key:", err)
}
wrongConfig := &tls.Config{
RootCAs: trustServerCA,
Certificates: []tls.Certificate{wrongCert},
}
correctCert, err := tls.X509KeyPair(
[]byte(pems["clientcert_correct.pem"]), []byte(pems["clientkey.pem"]))
if err != nil {
t.Fatal("could not load correct client cert/key:", err)
}
correctConfig := &tls.Config{
RootCAs: trustServerCA,
Certificates: []tls.Certificate{correctCert},
}
// all supported client configurations
clientConfigs := []struct {
name string
tlsConfig *tls.Config
}{
{"TCP", nil},
{"TLS no cert", &tls.Config{RootCAs: trustServerCA}},
{"TLS wrong cert", wrongConfig},
{"TLS correct cert", correctConfig},
}
for _, serverConfig := range serverConfigs {
config := localConfig()
config.NumWorkers = 1
// Use a unique port to avoid race with shutting down accept goroutine on Linux
addr := fmt.Sprintf("localhost:%d", HTTPAddrPort)
HTTPAddrPort++
config.StatsdListenAddresses = []string{fmt.Sprintf("tcp://%s", addr)}
config.TLSKey = serverConfig.serverKey
config.TLSCertificate = serverConfig.serverCertificate
config.TLSAuthorityCertificate = serverConfig.authorityCertificate
f := newFixture(t, config)
defer f.Close() // ensure shutdown if the test aborts
// attempt to connect and send stats with each of the client configurations
for i, clientConfig := range clientConfigs {
expectedSuccess := serverConfig.expectedConnectResults[i]
err := sendTCPMetrics(addr, clientConfig.tlsConfig, f)
if err != nil {
if expectedSuccess {
t.Errorf("server config: '%s' client config: '%s' failed: %s",
serverConfig.name, clientConfig.name, err.Error())
} else {
fmt.Printf("SUCCESS server config: '%s' client config: '%s' got expected error: %s\n",
serverConfig.name, clientConfig.name, err.Error())
}
} else if !expectedSuccess {
t.Errorf("server config: '%s' client config: '%s' worked; should fail!",
serverConfig.name, clientConfig.name)
} else {
fmt.Printf("SUCCESS server config: '%s' client config: '%s'\n",
serverConfig.name, clientConfig.name)
}
}
f.Close()
}
}
// TestHandleTCPGoroutineTimeout verifies that an idle TCP connection doesn't block forever.
func TestHandleTCPGoroutineTimeout(t *testing.T) {
const readTimeout = 30 * time.Millisecond
s := &Server{tcpReadTimeout: readTimeout, Workers: []*Worker{
&Worker{PacketChan: make(chan samplers.UDPMetric, 1)},
}}
// make a real TCP connection ... to ourselves
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatal(err)
}
acceptorDone := make(chan struct{})
go func() {
accepted, err := listener.Accept()
if err != nil {
t.Fatal(err)
}
// after half the read timeout: send a stat; it should work
time.Sleep(readTimeout / 2)
_, err = accepted.Write([]byte("metric:42|g\n"))
if err != nil {
t.Error("expected Write to succeed:", err)
}
// read: returns when the connection is closed
out, err := ioutil.ReadAll(accepted)
if !(len(out) == 0 && err == nil) {
t.Errorf("expected len(out)==0 (was %d) and err==nil (was %v)", len(out), err)
}
close(acceptorDone)
}()
conn, err := net.Dial("tcp", listener.Addr().String())
if err != nil {
t.Fatal(err)
}
// handleTCPGoroutine should not block forever: it will time outTest
log.Printf("handling goroutine")
s.handleTCPGoroutine(conn)
<-acceptorDone
// we should have received one metric
packet := <-s.Workers[0].PacketChan
if packet.Name != "metric" {
t.Error("Expected packet for metric:", packet)
}
}
func TestNewFromServerConfigRenamedVariables(t *testing.T) {
// test the variables that have been renamed
config := Config{
DatadogAPIKey: "apikey",
DatadogAPIHostname: "http://api",
DatadogTraceAPIAddress: "http://trace",
SsfListenAddresses: []string{"udp://127.0.0.1:99"},
// required or NewFromConfig fails
Interval: "10s",
StatsAddress: "localhost:62251",
}
s, err := NewFromConfig(config)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, "apikey", s.DDAPIKey)
assert.Equal(t, "http://api", s.DDHostname)
assert.Equal(t, "http://trace", s.DDTraceAddress)
addr := s.SSFListenAddrs[0].(*net.UDPAddr)
assert.True(t, addr.IP.IsLoopback(), "TraceAddr should be loopback")
assert.Equal(t, 99, addr.Port)
}
// BenchmarkSendSSFUNIX sends b.N metrics to veneur and waits until
// all of them have been read (not processed).
func BenchmarkSendSSFUNIX(b *testing.B) {
tdir, err := ioutil.TempDir("", "unixmetrics_ssf")
require.NoError(b, err)
defer os.RemoveAll(tdir)
HTTPAddrPort++
path := filepath.Join(tdir, "test.sock")
// test the variables that have been renamed
config := Config{
DatadogAPIKey: "apikey",
DatadogAPIHostname: "http://api",
DatadogTraceAPIAddress: "http://trace",
SsfListenAddresses: []string{fmt.Sprintf("unix://%s", path)},
// required or NewFromConfig fails
Interval: "10s",
StatsAddress: "localhost:62251",
}
s, err := NewFromConfig(config)
if err != nil {
b.Fatal(err)
}
// Simulate a metrics worker:
logger, _ := test.NewNullLogger()
w := NewWorker(0, nil, logger)
s.Workers = []*Worker{w}
go func() {
}()
defer close(w.QuitChan)
// Simulate an incoming connection on the server:
l, err := net.Listen("unix", path)
require.NoError(b, err)
defer l.Close()
go func() {
testSpan := &ssf.SSFSpan{}
testMetric := &ssf.SSFSample{}
testMetric.Name = "test.metric"
testMetric.Metric = ssf.SSFSample_COUNTER
testMetric.Value = 1
testMetric.Tags = make(map[string]string)
testMetric.Tags["tag"] = "tagValue"
testSpan.Metrics = append(testSpan.Metrics, testMetric)
conn, err := net.Dial("unix", path)
require.NoError(b, err)
defer conn.Close()
for i := 0; i < b.N; i++ {
_, err := protocol.WriteSSF(conn, testSpan)
require.NoError(b, err)
}
conn.Close()
}()
sConn, err := l.Accept()
require.NoError(b, err)
go s.ReadTraceStream(sConn)
b.ResetTimer()
for i := 0; i < b.N; i++ {
<-w.PacketChan
}
close(s.shutdown)
}
// BenchmarkSendSSFUDP floods the veneur UDP socket with messages and
// and times how long it takes to read (not process) b.N metrics. This
// is almost an inversion of the SSFUNIX benchmark above, as UDP does
// lose packets and we don't want to loop forever.
func BenchmarkSendSSFUDP(b *testing.B) {
addr := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
HTTPAddrPort++
// test the variables that have been renamed
config := Config{
DatadogAPIKey: "apikey",
DatadogAPIHostname: "http://api",
DatadogTraceAPIAddress: "http://trace",
SsfListenAddresses: []string{fmt.Sprintf("udp://%s", addr)},
ReadBufferSizeBytes: 16 * 1024,
TraceMaxLengthBytes: 900 * 1024,
// required or NewFromConfig fails
Interval: "10s",
StatsAddress: "localhost:62251",
}
s, err := NewFromConfig(config)
if err != nil {
b.Fatal(err)
}
pool := &sync.Pool{
New: func() interface{} {
return make([]byte, s.traceMaxLengthBytes)
},
}
// Simulate listening for UDP SSF on the server:
udpAddr, err := net.ResolveUDPAddr("udp", addr)
require.NoError(b, err)
l, err := NewSocket(udpAddr, s.RcvbufBytes, false)
require.NoError(b, err)
// Simulate a metrics worker:
logger, _ := test.NewNullLogger()
w := NewWorker(0, nil, logger)
s.Workers = []*Worker{w}
go func() {
testSpan := &ssf.SSFSpan{}
testMetric := &ssf.SSFSample{}
testMetric.Name = "test.metric"
testMetric.Metric = ssf.SSFSample_COUNTER
testMetric.Value = 1
testMetric.Tags = make(map[string]string)
testMetric.Tags["tag"] = "tagValue"
testSpan.Metrics = append(testSpan.Metrics, testMetric)
conn, err := net.Dial("udp", addr)
require.NoError(b, err)
defer conn.Close()
for {
select {
case <-s.shutdown:
return
default:
}
packet, err := proto.Marshal(testSpan)
assert.NoError(b, err)
conn.Write(packet)
}
}()
go s.ReadTraceSocket(l, pool)
b.ResetTimer()
for i := 0; i < b.N; i++ {
<-w.PacketChan
}
l.Close()
close(s.shutdown)
return
}
Make the benchmark work in spite of golang/dep#433
Since we can't rely on sub-packages of logrus, just define a
plausible-enough null logger for benchmarks.
package veneur
import (
"bytes"
"compress/zlib"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"sync"
"math/rand"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"time"
"github.com/Sirupsen/logrus"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stripe/veneur/protocol"
"github.com/stripe/veneur/samplers"
"github.com/stripe/veneur/ssf"
"github.com/stripe/veneur/tdigest"
)
const ε = .00002
const DefaultFlushInterval = 50 * time.Millisecond
const DefaultServerTimeout = 100 * time.Millisecond
var DebugMode bool
func TestMain(m *testing.M) {
flag.Parse()
DebugMode = flag.Lookup("test.v").Value.(flag.Getter).Get().(bool)
os.Exit(m.Run())
}
// On the CI server, we can't be guaranteed that the port will be
// released immediately after the server is shut down. Instead, use
// a unique port for each test. As long as we don't have an insane number
// of integration tests, we should be fine.
var HTTPAddrPort = 8129
// set up a boilerplate local config for later use
func localConfig() Config {
return generateConfig("http://localhost")
}
// set up a boilerplate global config for later use
func globalConfig() Config {
return generateConfig("")
}
// generateConfig is not called config to avoid
// accidental variable shadowing
func generateConfig(forwardAddr string) Config {
// we don't shut down ports so avoid address in use errors
port := HTTPAddrPort
HTTPAddrPort++
metricsPort := HTTPAddrPort
HTTPAddrPort++
tracePort := HTTPAddrPort
HTTPAddrPort++
return Config{
DatadogAPIHostname: "http://localhost",
Debug: DebugMode,
Hostname: "localhost",
// Use a shorter interval for tests
Interval: DefaultFlushInterval.String(),
Key: "",
MetricMaxLength: 4096,
Percentiles: []float64{.5, .75, .99},
Aggregates: []string{"min", "max", "count"},
ReadBufferSizeBytes: 2097152,
StatsdListenAddresses: []string{fmt.Sprintf("udp://localhost:%d", metricsPort)},
HTTPAddress: fmt.Sprintf("localhost:%d", port),
ForwardAddress: forwardAddr,
NumWorkers: 4,
FlushFile: "",
// Use only one reader, so that we can run tests
// on platforms which do not support SO_REUSEPORT
NumReaders: 1,
// Currently this points nowhere, which is intentional.
// We don't need internal metrics for the tests, and they make testing
// more complicated.
StatsAddress: "localhost:8125",
Tags: []string{},
SentryDsn: "",
FlushMaxPerBody: 1024,
// Don't use the default port 8128: Veneur sends its own traces there, causing failures
SsfListenAddresses: []string{fmt.Sprintf("udp://127.0.0.1:%d", tracePort)},
DatadogTraceAPIAddress: forwardAddr,
TraceMaxLengthBytes: 4096,
SsfBufferSize: 32,
}
}
func generateMetrics() (metricValues []float64, expectedMetrics map[string]float64) {
metricValues = []float64{1.0, 2.0, 7.0, 8.0, 100.0}
expectedMetrics = map[string]float64{
"a.b.c.max": 100,
"a.b.c.min": 1,
// Count is normalized by second
// so 5 values/50ms = 100 values/s
"a.b.c.count": float64(len(metricValues)) * float64(time.Second) / float64(DefaultFlushInterval),
// tdigest approximation causes this to be off by 1
"a.b.c.50percentile": 6,
"a.b.c.75percentile": 42,
"a.b.c.99percentile": 98,
}
return metricValues, expectedMetrics
}
// assertMetrics checks that all expected metrics are present
// and have the correct value
func assertMetrics(t *testing.T, metrics DDMetricsRequest, expectedMetrics map[string]float64) {
// it doesn't count as accidentally quadratic if it's intentional
for metricName, expectedValue := range expectedMetrics {
assertMetric(t, metrics, metricName, expectedValue)
}
}
func assertMetric(t *testing.T, metrics DDMetricsRequest, metricName string, value float64) {
defer func() {
if r := recover(); r != nil {
assert.Fail(t, "error extracting metrics", r)
}
}()
for _, metric := range metrics.Series {
if metric.Name == metricName {
assert.Equal(t, int(value+.5), int(metric.Value[0][1]+.5), "Incorrect value for metric %s", metricName)
return
}
}
assert.Fail(t, "did not find expected metric", metricName)
}
// setupVeneurServer creates a local server from the specified config
// and starts listening for requests. It returns the server for inspection.
func setupVeneurServer(t *testing.T, config Config, transport http.RoundTripper) *Server {
server, err := NewFromConfig(config)
if transport != nil {
server.HTTPClient.Transport = transport
}
if err != nil {
t.Fatal(err)
}
if transport != nil {
server.HTTPClient.Transport = transport
}
server.Start()
go server.HTTPServe()
return &server
}
// DDMetricsRequest represents the body of the POST request
// for sending metrics data to Datadog
// Eventually we'll want to define this symmetrically.
type DDMetricsRequest struct {
Series []samplers.DDMetric
}
// fixture sets up a mock Datadog API server and Veneur
type fixture struct {
api *httptest.Server
server *Server
ddmetrics chan DDMetricsRequest
interval time.Duration
flushMaxPerBody int
}
func newFixture(t *testing.T, config Config) *fixture {
interval, err := config.ParseInterval()
assert.NoError(t, err)
// Set up a remote server (the API that we're sending the data to)
// (e.g. Datadog)
f := &fixture{nil, &Server{}, make(chan DDMetricsRequest, 10), interval, config.FlushMaxPerBody}
f.api = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
zr, err := zlib.NewReader(r.Body)
if err != nil {
t.Fatal(err)
}
var ddmetrics DDMetricsRequest
err = json.NewDecoder(zr).Decode(&ddmetrics)
if err != nil {
t.Fatal(err)
}
f.ddmetrics <- ddmetrics
w.WriteHeader(http.StatusAccepted)
}))
config.DatadogAPIHostname = f.api.URL
config.NumWorkers = 1
f.server = setupVeneurServer(t, config, nil)
return f
}
func (f *fixture) Close() {
// make Close safe to call multiple times
if f.ddmetrics == nil {
return
}
f.api.Close()
f.server.Shutdown()
close(f.ddmetrics)
f.ddmetrics = nil
}
// TestLocalServerUnaggregatedMetrics tests the behavior of
// the veneur client when operating without a global veneur
// instance (ie, when sending data directly to the remote server)
func TestLocalServerUnaggregatedMetrics(t *testing.T) {
metricValues, expectedMetrics := generateMetrics()
config := localConfig()
f := newFixture(t, config)
defer f.Close()
for _, value := range metricValues {
f.server.Workers[0].ProcessMetric(&samplers.UDPMetric{
MetricKey: samplers.MetricKey{
Name: "a.b.c",
Type: "histogram",
},
Value: value,
Digest: 12345,
SampleRate: 1.0,
Scope: samplers.LocalOnly,
})
}
f.server.Flush()
ddmetrics := <-f.ddmetrics
assert.Equal(t, 6, len(ddmetrics.Series), "incorrect number of elements in the flushed series on the remote server")
assertMetrics(t, ddmetrics, expectedMetrics)
}
func TestGlobalServerFlush(t *testing.T) {
metricValues, expectedMetrics := generateMetrics()
config := globalConfig()
f := newFixture(t, config)
defer f.Close()
for _, value := range metricValues {
f.server.Workers[0].ProcessMetric(&samplers.UDPMetric{
MetricKey: samplers.MetricKey{
Name: "a.b.c",
Type: "histogram",
},
Value: value,
Digest: 12345,
SampleRate: 1.0,
Scope: samplers.LocalOnly,
})
}
f.server.Flush()
ddmetrics := <-f.ddmetrics
assert.Equal(t, len(expectedMetrics), len(ddmetrics.Series), "incorrect number of elements in the flushed series on the remote server")
assertMetrics(t, ddmetrics, expectedMetrics)
}
func TestLocalServerMixedMetrics(t *testing.T) {
// The exact gob stream that we will receive might differ, so we can't
// test against the bytestream directly. But the two streams should unmarshal
// to t-digests that have the same key properties, so we can test
// those.
const ExpectedGobStream = "\r\xff\x87\x02\x01\x02\xff\x88\x00\x01\xff\x84\x00\x007\xff\x83\x03\x01\x01\bCentroid\x01\xff\x84\x00\x01\x03\x01\x04Mean\x01\b\x00\x01\x06Weight\x01\b\x00\x01\aSamples\x01\xff\x86\x00\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00/\xff\x88\x00\x05\x01\xfe\xf0?\x01\xfe\xf0?\x00\x01@\x01\xfe\xf0?\x00\x01\xfe\x1c@\x01\xfe\xf0?\x00\x01\xfe @\x01\xfe\xf0?\x00\x01\xfeY@\x01\xfe\xf0?\x00\x05\b\x00\xfeY@\x05\b\x00\xfe\xf0?\x05\b\x00\xfeY@"
tdExpected := tdigest.NewMerging(100, false)
err := tdExpected.GobDecode([]byte(ExpectedGobStream))
assert.NoError(t, err, "Should not have encountered error in decoding expected gob stream")
var HistogramValues = []float64{1.0, 2.0, 7.0, 8.0, 100.0}
// Number of events observed (in 50ms interval)
var HistogramCountRaw = len(HistogramValues)
// Normalize to events/second
// Explicitly convert to int to avoid confusing Stringer behavior
var HistogramCountNormalized = float64(HistogramCountRaw) * float64(time.Second) / float64(DefaultFlushInterval)
// Number of events observed
const CounterNumEvents = 40
expectedMetrics := map[string]float64{
// 40 events/50ms = 800 events/s
"x.y.z": CounterNumEvents * float64(time.Second) / float64(DefaultFlushInterval),
"a.b.c.max": 100,
"a.b.c.min": 1,
// Count is normalized by second
// so 5 values/50ms = 100 values/s
"a.b.c.count": float64(HistogramCountNormalized),
}
// This represents the global veneur instance, which receives request from
// the local veneur instances, aggregates the data, and sends it to the remote API
// (e.g. Datadog)
globalTD := make(chan *tdigest.MergingDigest)
globalVeneur := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, r.URL.Path, "/import", "Global veneur should receive request on /import path")
zr, err := zlib.NewReader(r.Body)
if err != nil {
t.Fatal(err)
}
type requestItem struct {
Name string `json:"name"`
Tags interface{} `json:"tags"`
Tagstring string `json:"tagstring"`
Type string `json:"type"`
Value []byte `json:"value"`
}
var metrics []requestItem
err = json.NewDecoder(zr).Decode(&metrics)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 1, len(metrics), "incorrect number of elements in the flushed series")
td := tdigest.NewMerging(100, false)
err = td.GobDecode(metrics[0].Value)
assert.NoError(t, err, "Should not have encountered error in decoding gob stream")
globalTD <- td
w.WriteHeader(http.StatusAccepted)
}))
defer globalVeneur.Close()
config := localConfig()
config.ForwardAddress = globalVeneur.URL
f := newFixture(t, config)
defer f.Close()
// Create non-local metrics that should be passed to the global veneur instance
for _, value := range HistogramValues {
f.server.Workers[0].ProcessMetric(&samplers.UDPMetric{
MetricKey: samplers.MetricKey{
Name: "a.b.c",
Type: "histogram",
},
Value: value,
Digest: 12345,
SampleRate: 1.0,
Scope: samplers.MixedScope,
})
}
// Create local-only metrics that should be passed directly to the remote API
for i := 0; i < CounterNumEvents; i++ {
f.server.Workers[0].ProcessMetric(&samplers.UDPMetric{
MetricKey: samplers.MetricKey{
Name: "x.y.z",
Type: "counter",
},
Value: 1.0,
Digest: 12345,
SampleRate: 1.0,
Scope: samplers.LocalOnly,
})
}
f.server.Flush()
// the global veneur instance should get valid data
td := <-globalTD
assert.Equal(t, expectedMetrics["a.b.c.min"], td.Min(), "Minimum value is incorrect")
assert.Equal(t, expectedMetrics["a.b.c.max"], td.Max(), "Maximum value is incorrect")
// The remote server receives the raw count, *not* the normalized count
assert.InEpsilon(t, HistogramCountRaw, td.Count(), ε)
assert.Equal(t, tdExpected, td, "Underlying tdigest structure is incorrect")
}
func TestSplitBytes(t *testing.T) {
rand.Seed(time.Now().Unix())
buf := make([]byte, 1000)
for i := 0; i < 1000; i++ {
// we construct a string of random length which is approximately 1/3rd A
// and the other 2/3rds B
buf = buf[:rand.Intn(cap(buf))]
for i := range buf {
if rand.Intn(3) == 0 {
buf[i] = 'A'
} else {
buf[i] = 'B'
}
}
checkBufferSplit(t, buf)
buf = buf[:cap(buf)]
}
// also test pathological cases that the fuzz is unlikely to find
checkBufferSplit(t, nil)
checkBufferSplit(t, []byte{})
}
func checkBufferSplit(t *testing.T, buf []byte) {
var testSplit [][]byte
sb := samplers.NewSplitBytes(buf, 'A')
for sb.Next() {
testSplit = append(testSplit, sb.Chunk())
}
// now compare our split to the "real" implementation of split
assert.EqualValues(t, bytes.Split(buf, []byte{'A'}), testSplit, "should have split %s correctly", buf)
}
func readTestKeysCerts() (map[string]string, error) {
// reads the insecure test keys and certificates in fixtures generated with:
// # Generate the authority key and certificate (512-bit RSA signed using SHA-256)
// openssl genrsa -out cakey.pem 512
// openssl req -new -x509 -key cakey.pem -out cacert.pem -days 1095 -subj "/O=Example Inc/CN=Example Certificate Authority"
// # Generate the server key and certificate, signed by the authority
// openssl genrsa -out serverkey.pem 512
// openssl req -new -key serverkey.pem -out serverkey.csr -days 1095 -subj "/O=Example Inc/CN=localhost"
// openssl x509 -req -in serverkey.csr -CA cacert.pem -CAkey cakey.pem -CAcreateserial -out servercert.pem -days 1095
// # Generate a client key and certificate, signed by the authority
// openssl genrsa -out clientkey.pem 512
// openssl req -new -key clientkey.pem -out clientkey.csr -days 1095 -subj "/O=Example Inc/CN=Veneur client key"
// openssl x509 -req -in clientkey.csr -CA cacert.pem -CAkey cakey.pem -CAcreateserial -out clientcert.pem -days 1095
// # Generate another ca and sign the client key
// openssl genrsa -out wrongcakey.pem 512
// openssl req -new -x509 -key wrongcakey.pem -out wrongcacert.pem -days 1095 -subj "/O=Wrong Inc/CN=Wrong Certificate Authority"
// openssl x509 -req -in clientkey.csr -CA wrongcacert.pem -CAkey wrongcakey.pem -CAcreateserial -out wrongclientcert.pem -days 1095
pems := map[string]string{}
pemFileNames := []string{
"cacert.pem",
"clientcert_correct.pem",
"clientcert_wrong.pem",
"clientkey.pem",
"servercert.pem",
"serverkey.pem",
}
for _, fileName := range pemFileNames {
b, err := ioutil.ReadFile(filepath.Join("fixtures", fileName))
if err != nil {
return nil, err
}
pems[fileName] = string(b)
}
return pems, nil
}
// TestTCPConfig checks that invalid configurations are errors
func TestTCPConfig(t *testing.T) {
config := localConfig()
config.StatsdListenAddresses = []string{"tcp://invalid:invalid"}
_, err := NewFromConfig(config)
if err == nil {
t.Error("invalid TCP address is a config error")
}
config.StatsdListenAddresses = []string{"tcp://localhost:8129"}
config.TLSKey = "somekey"
config.TLSCertificate = ""
_, err = NewFromConfig(config)
if err == nil {
t.Error("key without certificate is a config error")
}
pems, err := readTestKeysCerts()
if err != nil {
t.Fatal("could not read test keys/certs:", err)
}
config.TLSKey = pems["serverkey.pem"]
config.TLSCertificate = "somecert"
_, err = NewFromConfig(config)
if err == nil {
t.Error("invalid key and certificate is a config error")
}
config.TLSKey = pems["serverkey.pem"]
config.TLSCertificate = pems["servercert.pem"]
_, err = NewFromConfig(config)
if err != nil {
t.Error("expected valid config")
}
}
func sendTCPMetrics(addr string, tlsConfig *tls.Config, f *fixture) error {
// TODO: attempt to ensure the accept goroutine opens the port before we attempt to connect
// connect and send stats in two parts
var conn net.Conn
var err error
if tlsConfig != nil {
conn, err = tls.Dial("tcp", addr, tlsConfig)
} else {
conn, err = net.Dial("tcp", addr)
}
if err != nil {
return err
}
defer conn.Close()
_, err = conn.Write([]byte("page.views:1|c\npage.views:1|c\n"))
if err != nil {
return err
}
err = conn.Close()
if err != nil {
return err
}
// check that the server received the stats; HACK: sleep to ensure workers process before flush
time.Sleep(20 * time.Millisecond)
f.server.Flush()
select {
case ddmetrics := <-f.ddmetrics:
if len(ddmetrics.Series) != 1 {
return fmt.Errorf("unexpected Series: %v", ddmetrics.Series)
}
if !(ddmetrics.Series[0].Name == "page.views" && ddmetrics.Series[0].Value[0][1] == 40) {
return fmt.Errorf("unexpected metric: %v", ddmetrics.Series[0])
}
case <-time.After(100 * time.Millisecond):
return fmt.Errorf("timed out waiting for metrics")
}
return nil
}
func TestUDPMetrics(t *testing.T) {
config := localConfig()
config.NumWorkers = 1
config.Interval = "60s"
addr := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
HTTPAddrPort++
config.StatsdListenAddresses = []string{fmt.Sprintf("udp://%s", addr)}
f := newFixture(t, config)
defer f.Close()
// Add a bit of delay to ensure things get listening
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("udp", addr)
assert.NoError(t, err)
defer conn.Close()
conn.Write([]byte("foo.bar:1|c|#baz:gorch"))
// Add a bit of delay to ensure things get processed
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(1), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
func TestMultipleUDPSockets(t *testing.T) {
config := localConfig()
config.NumWorkers = 1
config.Interval = "60s"
addr1 := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
HTTPAddrPort++
addr2 := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
HTTPAddrPort++
config.StatsdListenAddresses = []string{fmt.Sprintf("udp://%s", addr1), fmt.Sprintf("udp://%s", addr2)}
f := newFixture(t, config)
defer f.Close()
// Add a bit of delay to ensure things get listening
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("udp", addr1)
assert.NoError(t, err)
defer conn.Close()
conn.Write([]byte("foo.bar:1|c|#baz:gorch"))
conn2, err := net.Dial("udp", addr2)
assert.NoError(t, err)
defer conn2.Close()
conn2.Write([]byte("foo.bar:1|c|#baz:gorch"))
// Add a bit of delay to ensure things get processed
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(2), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
func TestUDPMetricsSSF(t *testing.T) {
config := localConfig()
config.NumWorkers = 1
config.Interval = "60s"
addr := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
config.SsfListenAddresses = []string{fmt.Sprintf("udp://%s", addr)}
HTTPAddrPort++
f := newFixture(t, config)
defer f.Close()
// listen delay
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("udp", addr)
assert.NoError(t, err)
defer conn.Close()
testSample := &ssf.SSFSpan{}
testMetric := &ssf.SSFSample{}
testMetric.Name = "test.metric"
testMetric.Metric = ssf.SSFSample_COUNTER
testMetric.Value = 1
testMetric.Tags = make(map[string]string)
testMetric.Tags["tag"] = "tagValue"
testSample.Metrics = append(testSample.Metrics, testMetric)
packet, err := proto.Marshal(testSample)
assert.NoError(t, err)
conn.Write(packet)
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(1), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
func TestUNIXMetricsSSF(t *testing.T) {
tdir, err := ioutil.TempDir("", "unixmetrics_ssf")
require.NoError(t, err)
defer os.RemoveAll(tdir)
config := localConfig()
config.NumWorkers = 1
config.Interval = "60s"
path := filepath.Join(tdir, "test.sock")
config.SsfListenAddresses = []string{fmt.Sprintf("unix://%s", path)}
HTTPAddrPort++
f := newFixture(t, config)
defer f.Close()
// listen delay
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("unix", path)
assert.NoError(t, err)
defer conn.Close()
testSpan := &ssf.SSFSpan{}
testMetric := &ssf.SSFSample{}
testMetric.Name = "test.metric"
testMetric.Metric = ssf.SSFSample_COUNTER
testMetric.Value = 1
testMetric.Tags = make(map[string]string)
testMetric.Tags["tag"] = "tagValue"
testSpan.Metrics = append(testSpan.Metrics, testMetric)
_, err = protocol.WriteSSF(conn, testSpan)
if assert.NoError(t, err) {
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(1), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
_, err = protocol.WriteSSF(conn, testSpan)
if assert.NoError(t, err) {
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(2), f.server.Workers[0].MetricsProcessedCount(), "worker processed metric")
}
}
func TestIgnoreLongUDPMetrics(t *testing.T) {
config := localConfig()
config.NumWorkers = 1
config.MetricMaxLength = 31
config.Interval = "60s"
addr := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
config.StatsdListenAddresses = []string{fmt.Sprintf("udp://%s", addr)}
HTTPAddrPort++
f := newFixture(t, config)
defer f.Close()
// Add a bit of delay to ensure things get listening
time.Sleep(20 * time.Millisecond)
conn, err := net.Dial("udp", addr)
assert.NoError(t, err)
defer conn.Close()
// nb this metric is bad because it's too long based on the `MetricMaxLength`
// we set above!
conn.Write([]byte("foo.bar:1|c|#baz:gorch,long:tag,is:long"))
// Add a bit of delay to ensure things get processed
time.Sleep(20 * time.Millisecond)
assert.Equal(t, int64(0), f.server.Workers[0].processed, "worker did not process a metric")
}
// TestTCPMetrics checks that a server can accept metrics over a TCP socket.
func TestTCPMetrics(t *testing.T) {
pems, err := readTestKeysCerts()
if err != nil {
t.Fatal("could not read test keys/certs:", err)
}
// all supported TCP connection modes
serverConfigs := []struct {
name string
serverKey string
serverCertificate string
authorityCertificate string
expectedConnectResults [4]bool
}{
{"TCP", "", "", "", [4]bool{true, false, false, false}},
{"encrypted", pems["serverkey.pem"], pems["servercert.pem"], "",
[4]bool{false, true, true, true}},
{"authenticated", pems["serverkey.pem"], pems["servercert.pem"], pems["cacert.pem"],
[4]bool{false, false, false, true}},
}
// load all the various keys and certificates for the client
trustServerCA := x509.NewCertPool()
ok := trustServerCA.AppendCertsFromPEM([]byte(pems["cacert.pem"]))
if !ok {
t.Fatal("could not load server certificate")
}
wrongCert, err := tls.X509KeyPair(
[]byte(pems["clientcert_wrong.pem"]), []byte(pems["clientkey.pem"]))
if err != nil {
t.Fatal("could not load wrong client cert/key:", err)
}
wrongConfig := &tls.Config{
RootCAs: trustServerCA,
Certificates: []tls.Certificate{wrongCert},
}
correctCert, err := tls.X509KeyPair(
[]byte(pems["clientcert_correct.pem"]), []byte(pems["clientkey.pem"]))
if err != nil {
t.Fatal("could not load correct client cert/key:", err)
}
correctConfig := &tls.Config{
RootCAs: trustServerCA,
Certificates: []tls.Certificate{correctCert},
}
// all supported client configurations
clientConfigs := []struct {
name string
tlsConfig *tls.Config
}{
{"TCP", nil},
{"TLS no cert", &tls.Config{RootCAs: trustServerCA}},
{"TLS wrong cert", wrongConfig},
{"TLS correct cert", correctConfig},
}
for _, serverConfig := range serverConfigs {
config := localConfig()
config.NumWorkers = 1
// Use a unique port to avoid race with shutting down accept goroutine on Linux
addr := fmt.Sprintf("localhost:%d", HTTPAddrPort)
HTTPAddrPort++
config.StatsdListenAddresses = []string{fmt.Sprintf("tcp://%s", addr)}
config.TLSKey = serverConfig.serverKey
config.TLSCertificate = serverConfig.serverCertificate
config.TLSAuthorityCertificate = serverConfig.authorityCertificate
f := newFixture(t, config)
defer f.Close() // ensure shutdown if the test aborts
// attempt to connect and send stats with each of the client configurations
for i, clientConfig := range clientConfigs {
expectedSuccess := serverConfig.expectedConnectResults[i]
err := sendTCPMetrics(addr, clientConfig.tlsConfig, f)
if err != nil {
if expectedSuccess {
t.Errorf("server config: '%s' client config: '%s' failed: %s",
serverConfig.name, clientConfig.name, err.Error())
} else {
fmt.Printf("SUCCESS server config: '%s' client config: '%s' got expected error: %s\n",
serverConfig.name, clientConfig.name, err.Error())
}
} else if !expectedSuccess {
t.Errorf("server config: '%s' client config: '%s' worked; should fail!",
serverConfig.name, clientConfig.name)
} else {
fmt.Printf("SUCCESS server config: '%s' client config: '%s'\n",
serverConfig.name, clientConfig.name)
}
}
f.Close()
}
}
// TestHandleTCPGoroutineTimeout verifies that an idle TCP connection doesn't block forever.
func TestHandleTCPGoroutineTimeout(t *testing.T) {
const readTimeout = 30 * time.Millisecond
s := &Server{tcpReadTimeout: readTimeout, Workers: []*Worker{
&Worker{PacketChan: make(chan samplers.UDPMetric, 1)},
}}
// make a real TCP connection ... to ourselves
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatal(err)
}
acceptorDone := make(chan struct{})
go func() {
accepted, err := listener.Accept()
if err != nil {
t.Fatal(err)
}
// after half the read timeout: send a stat; it should work
time.Sleep(readTimeout / 2)
_, err = accepted.Write([]byte("metric:42|g\n"))
if err != nil {
t.Error("expected Write to succeed:", err)
}
// read: returns when the connection is closed
out, err := ioutil.ReadAll(accepted)
if !(len(out) == 0 && err == nil) {
t.Errorf("expected len(out)==0 (was %d) and err==nil (was %v)", len(out), err)
}
close(acceptorDone)
}()
conn, err := net.Dial("tcp", listener.Addr().String())
if err != nil {
t.Fatal(err)
}
// handleTCPGoroutine should not block forever: it will time outTest
log.Printf("handling goroutine")
s.handleTCPGoroutine(conn)
<-acceptorDone
// we should have received one metric
packet := <-s.Workers[0].PacketChan
if packet.Name != "metric" {
t.Error("Expected packet for metric:", packet)
}
}
func TestNewFromServerConfigRenamedVariables(t *testing.T) {
// test the variables that have been renamed
config := Config{
DatadogAPIKey: "apikey",
DatadogAPIHostname: "http://api",
DatadogTraceAPIAddress: "http://trace",
SsfListenAddresses: []string{"udp://127.0.0.1:99"},
// required or NewFromConfig fails
Interval: "10s",
StatsAddress: "localhost:62251",
}
s, err := NewFromConfig(config)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, "apikey", s.DDAPIKey)
assert.Equal(t, "http://api", s.DDHostname)
assert.Equal(t, "http://trace", s.DDTraceAddress)
addr := s.SSFListenAddrs[0].(*net.UDPAddr)
assert.True(t, addr.IP.IsLoopback(), "TraceAddr should be loopback")
assert.Equal(t, 99, addr.Port)
}
// This is necessary until we can import
// github.com/sirupsen/logrus/test - it's currently failing due to dep
// insisting on pulling the repo in with its capitalized name.
//
// TODO: Revisit once https://github.com/golang/dep/issues/433 is fixed
func nullLogger() *logrus.Logger {
logger := logrus.New()
logger.Out = ioutil.Discard
return logger
}
// BenchmarkSendSSFUNIX sends b.N metrics to veneur and waits until
// all of them have been read (not processed).
func BenchmarkSendSSFUNIX(b *testing.B) {
tdir, err := ioutil.TempDir("", "unixmetrics_ssf")
require.NoError(b, err)
defer os.RemoveAll(tdir)
HTTPAddrPort++
path := filepath.Join(tdir, "test.sock")
// test the variables that have been renamed
config := Config{
DatadogAPIKey: "apikey",
DatadogAPIHostname: "http://api",
DatadogTraceAPIAddress: "http://trace",
SsfListenAddresses: []string{fmt.Sprintf("unix://%s", path)},
// required or NewFromConfig fails
Interval: "10s",
StatsAddress: "localhost:62251",
}
s, err := NewFromConfig(config)
if err != nil {
b.Fatal(err)
}
// Simulate a metrics worker:
w := NewWorker(0, nil, nullLogger())
s.Workers = []*Worker{w}
go func() {
}()
defer close(w.QuitChan)
// Simulate an incoming connection on the server:
l, err := net.Listen("unix", path)
require.NoError(b, err)
defer l.Close()
go func() {
testSpan := &ssf.SSFSpan{}
testMetric := &ssf.SSFSample{}
testMetric.Name = "test.metric"
testMetric.Metric = ssf.SSFSample_COUNTER
testMetric.Value = 1
testMetric.Tags = make(map[string]string)
testMetric.Tags["tag"] = "tagValue"
testSpan.Metrics = append(testSpan.Metrics, testMetric)
conn, err := net.Dial("unix", path)
require.NoError(b, err)
defer conn.Close()
for i := 0; i < b.N; i++ {
_, err := protocol.WriteSSF(conn, testSpan)
require.NoError(b, err)
}
conn.Close()
}()
sConn, err := l.Accept()
require.NoError(b, err)
go s.ReadTraceStream(sConn)
b.ResetTimer()
for i := 0; i < b.N; i++ {
<-w.PacketChan
}
close(s.shutdown)
}
// BenchmarkSendSSFUDP floods the veneur UDP socket with messages and
// and times how long it takes to read (not process) b.N metrics. This
// is almost an inversion of the SSFUNIX benchmark above, as UDP does
// lose packets and we don't want to loop forever.
func BenchmarkSendSSFUDP(b *testing.B) {
addr := fmt.Sprintf("127.0.0.1:%d", HTTPAddrPort)
HTTPAddrPort++
// test the variables that have been renamed
config := Config{
DatadogAPIKey: "apikey",
DatadogAPIHostname: "http://api",
DatadogTraceAPIAddress: "http://trace",
SsfListenAddresses: []string{fmt.Sprintf("udp://%s", addr)},
ReadBufferSizeBytes: 16 * 1024,
TraceMaxLengthBytes: 900 * 1024,
// required or NewFromConfig fails
Interval: "10s",
StatsAddress: "localhost:62251",
}
s, err := NewFromConfig(config)
if err != nil {
b.Fatal(err)
}
pool := &sync.Pool{
New: func() interface{} {
return make([]byte, s.traceMaxLengthBytes)
},
}
// Simulate listening for UDP SSF on the server:
udpAddr, err := net.ResolveUDPAddr("udp", addr)
require.NoError(b, err)
l, err := NewSocket(udpAddr, s.RcvbufBytes, false)
require.NoError(b, err)
// Simulate a metrics worker:
w := NewWorker(0, nil, nullLogger())
s.Workers = []*Worker{w}
go func() {
testSpan := &ssf.SSFSpan{}
testMetric := &ssf.SSFSample{}
testMetric.Name = "test.metric"
testMetric.Metric = ssf.SSFSample_COUNTER
testMetric.Value = 1
testMetric.Tags = make(map[string]string)
testMetric.Tags["tag"] = "tagValue"
testSpan.Metrics = append(testSpan.Metrics, testMetric)
conn, err := net.Dial("udp", addr)
require.NoError(b, err)
defer conn.Close()
for {
select {
case <-s.shutdown:
return
default:
}
packet, err := proto.Marshal(testSpan)
assert.NoError(b, err)
conn.Write(packet)
}
}()
go s.ReadTraceSocket(l, pool)
b.ResetTimer()
for i := 0; i < b.N; i++ {
<-w.PacketChan
}
l.Close()
close(s.shutdown)
return
}
|
package syslog
import (
"io"
"net"
"testing"
"time"
"github.com/jeromer/syslogparser"
. "launchpad.net/gocheck"
)
func Test(t *testing.T) { TestingT(t) }
type ServerSuite struct {
}
var _ = Suite(&ServerSuite{})
var exampleSyslog = "<31>Dec 26 05:08:46 hostname tag[296]: content"
func (s *ServerSuite) TestTailFile(c *C) {
handler := new(HandlerMock)
server := NewServer()
server.SetFormat(RFC3164)
server.SetHandler(handler)
server.ListenUDP("0.0.0.0:5141")
server.ListenTCP("0.0.0.0:5141")
go func(server *Server) {
time.Sleep(100 * time.Microsecond)
serverAddr, _ := net.ResolveUDPAddr("udp", "localhost:5141")
con, _ := net.DialUDP("udp", nil, serverAddr)
con.Write([]byte(exampleSyslog))
time.Sleep(100 * time.Microsecond)
server.Kill()
}(server)
server.Boot()
server.Wait()
c.Check(handler.LastLogParts["hostname"], Equals, "hostname")
c.Check(handler.LastLogParts["tag"], Equals, "tag")
c.Check(handler.LastLogParts["content"], Equals, "content")
}
type HandlerMock struct {
LastLogParts syslogparser.LogParts
}
func (self *HandlerMock) Handle(logParts syslogparser.LogParts, msgLen int64, err error) {
self.LastLogParts = logParts
}
type ConnMock struct {
ReadData []byte
ReturnTimeout bool
isClosed bool
isReadDeadline bool
}
func (c *ConnMock) Read(b []byte) (n int, err error) {
if c.ReturnTimeout {
return 0, net.UnknownNetworkError("i/o timeout")
}
if c.ReadData != nil {
l := copy(b, c.ReadData)
c.ReadData = nil
return l, nil
}
return 0, io.EOF
}
func (c *ConnMock) Write(b []byte) (n int, err error) {
return 0, nil
}
func (c *ConnMock) Close() error {
c.isClosed = true
return nil
}
func (c *ConnMock) LocalAddr() net.Addr {
return nil
}
func (c *ConnMock) RemoteAddr() net.Addr {
return nil
}
func (c *ConnMock) SetDeadline(t time.Time) error {
return nil
}
func (c *ConnMock) SetReadDeadline(t time.Time) error {
c.isReadDeadline = true
return nil
}
func (c *ConnMock) SetWriteDeadline(t time.Time) error {
return nil
}
func (s *ServerSuite) TestConnectionClose(c *C) {
for _, closeConnection := range []bool{true, false} {
handler := new(HandlerMock)
server := NewServer()
server.SetFormat(RFC3164)
server.SetHandler(handler)
con := ConnMock{ReadData: []byte(exampleSyslog)}
server.goScanConnection(&con, closeConnection)
server.Wait()
c.Check(con.isClosed, Equals, closeConnection)
}
}
func (s *ServerSuite) TestTcpTimeout(c *C) {
handler := new(HandlerMock)
server := NewServer()
server.SetFormat(RFC3164)
server.SetHandler(handler)
server.SetTimeout(10)
con := ConnMock{ReadData: []byte(exampleSyslog), ReturnTimeout: true}
c.Check(con.isReadDeadline, Equals, false)
server.goScanConnection(&con, true)
server.Wait()
c.Check(con.isReadDeadline, Equals, true)
c.Check(handler.LastLogParts, IsNil)
}
Verify Handler interface extension in server_test.go.
package syslog
import (
"io"
"net"
"testing"
"time"
"github.com/jeromer/syslogparser"
. "launchpad.net/gocheck"
)
func Test(t *testing.T) { TestingT(t) }
type ServerSuite struct {
}
var _ = Suite(&ServerSuite{})
var exampleSyslog = "<31>Dec 26 05:08:46 hostname tag[296]: content"
func (s *ServerSuite) TestTailFile(c *C) {
handler := new(HandlerMock)
server := NewServer()
server.SetFormat(RFC3164)
server.SetHandler(handler)
server.ListenUDP("0.0.0.0:5141")
server.ListenTCP("0.0.0.0:5141")
go func(server *Server) {
time.Sleep(100 * time.Microsecond)
serverAddr, _ := net.ResolveUDPAddr("udp", "localhost:5141")
con, _ := net.DialUDP("udp", nil, serverAddr)
con.Write([]byte(exampleSyslog))
time.Sleep(100 * time.Microsecond)
server.Kill()
}(server)
server.Boot()
server.Wait()
c.Check(handler.LastLogParts["hostname"], Equals, "hostname")
c.Check(handler.LastLogParts["tag"], Equals, "tag")
c.Check(handler.LastLogParts["content"], Equals, "content")
c.Check(handler.LastMessageLength, Equals, int64(len(exampleSyslog)))
c.Check(handler.LastError, IsNil)
}
type HandlerMock struct {
LastLogParts syslogparser.LogParts
LastMessageLength int64
LastError error
}
func (self *HandlerMock) Handle(logParts syslogparser.LogParts, msgLen int64, err error) {
self.LastLogParts = logParts
self.LastMessageLength = msgLen
self.LastError = err
}
type ConnMock struct {
ReadData []byte
ReturnTimeout bool
isClosed bool
isReadDeadline bool
}
func (c *ConnMock) Read(b []byte) (n int, err error) {
if c.ReturnTimeout {
return 0, net.UnknownNetworkError("i/o timeout")
}
if c.ReadData != nil {
l := copy(b, c.ReadData)
c.ReadData = nil
return l, nil
}
return 0, io.EOF
}
func (c *ConnMock) Write(b []byte) (n int, err error) {
return 0, nil
}
func (c *ConnMock) Close() error {
c.isClosed = true
return nil
}
func (c *ConnMock) LocalAddr() net.Addr {
return nil
}
func (c *ConnMock) RemoteAddr() net.Addr {
return nil
}
func (c *ConnMock) SetDeadline(t time.Time) error {
return nil
}
func (c *ConnMock) SetReadDeadline(t time.Time) error {
c.isReadDeadline = true
return nil
}
func (c *ConnMock) SetWriteDeadline(t time.Time) error {
return nil
}
func (s *ServerSuite) TestConnectionClose(c *C) {
for _, closeConnection := range []bool{true, false} {
handler := new(HandlerMock)
server := NewServer()
server.SetFormat(RFC3164)
server.SetHandler(handler)
con := ConnMock{ReadData: []byte(exampleSyslog)}
server.goScanConnection(&con, closeConnection)
server.Wait()
c.Check(con.isClosed, Equals, closeConnection)
}
}
func (s *ServerSuite) TestTcpTimeout(c *C) {
handler := new(HandlerMock)
server := NewServer()
server.SetFormat(RFC3164)
server.SetHandler(handler)
server.SetTimeout(10)
con := ConnMock{ReadData: []byte(exampleSyslog), ReturnTimeout: true}
c.Check(con.isReadDeadline, Equals, false)
server.goScanConnection(&con, true)
server.Wait()
c.Check(con.isReadDeadline, Equals, true)
c.Check(handler.LastLogParts, IsNil)
c.Check(handler.LastMessageLength, Equals, int64(0))
c.Check(handler.LastError, IsNil)
}
|
package sweetiebot
import (
"database/sql"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/bwmarrin/discordgo"
)
type ModuleHooks struct {
OnEvent []ModuleOnEvent
OnTypingStart []ModuleOnTypingStart
OnMessageCreate []ModuleOnMessageCreate
OnMessageUpdate []ModuleOnMessageUpdate
OnMessageDelete []ModuleOnMessageDelete
OnMessageAck []ModuleOnMessageAck
OnPresenceUpdate []ModuleOnPresenceUpdate
OnVoiceStateUpdate []ModuleOnVoiceStateUpdate
OnGuildUpdate []ModuleOnGuildUpdate
OnGuildMemberAdd []ModuleOnGuildMemberAdd
OnGuildMemberRemove []ModuleOnGuildMemberRemove
OnGuildMemberUpdate []ModuleOnGuildMemberUpdate
OnGuildBanAdd []ModuleOnGuildBanAdd
OnGuildBanRemove []ModuleOnGuildBanRemove
OnCommand []ModuleOnCommand
OnIdle []ModuleOnIdle
OnTick []ModuleOnTick
}
type BotConfig struct {
Version int `json:"version"`
Debug bool `json:"debug"`
Maxerror int64 `json:"maxerror"`
Maxwit int64 `json:"maxwit"`
Maxbored int64 `json:"maxbored"`
DisableBored int `json:"disablebored"`
MaxPMlines int `json:"maxpmlines"`
Maxquotelines int `json:"maxquotelines"`
Maxsearchresults int `json:"maxsearchresults"`
Defaultmarkovlines int `json:"defaultmarkovlines"`
Commandperduration int `json:"commandperduration"`
Commandmaxduration int64 `json:"commandmaxduration"`
StatusDelayTime int `json:"statusdelaytime"`
MaxRaidTime int64 `json:"maxraidtime"`
RaidSize int `json:"raidsize"`
Witty map[string]string `json:"witty"`
Aliases map[string]string `json:"aliases"`
MaxBucket int `json:"maxbucket"`
MaxBucketLength int `json:"maxbucketlength"`
MaxFightHP int `json:"maxfighthp"`
MaxFightDamage int `json:"maxfightdamage"`
MaxImageSpam int `json:"maximagespam"`
MaxAttachSpam int `json:"maxattachspam"`
MaxPingSpam int `json:"maxpingspam"`
MaxMessageSpam map[int64]int `json:"maxmessagespam"`
IgnoreInvalidCommands bool `json:"ignoreinvalidcommands"`
UseMemberNames bool `json:"usemembernames"`
Timezone int `json:"timezone"`
AutoSilence int `json:"autosilence"`
AlertRole uint64 `json:"alertrole"`
SilentRole uint64 `json:"silentrole"`
LogChannel uint64 `json:"logchannel"`
ModChannel uint64 `json:"modchannel"`
WelcomeChannel uint64 `json:"welcomechannel"`
WelcomeMessage string `json:"welcomemessage"`
BirthdayRole uint64 `json:"birthdayrole"`
SpoilChannels []uint64 `json:"spoilchannels"`
FreeChannels map[string]bool `json:"freechannels"`
Command_roles map[string]map[string]bool `json:"command_roles"`
Command_channels map[string]map[string]bool `json:"command_channels"`
Command_limits map[string]int64 `json:command_limits`
Command_disabled map[string]bool `json:command_disabled`
Module_disabled map[string]bool `json:module_disabled`
Module_channels map[string]map[string]bool `json:module_channels`
Collections map[string]map[string]bool `json:"collections"`
Groups map[string]map[string]bool `json:"groups"`
}
type GuildInfo struct {
Guild *discordgo.Guild
log *Log
command_last map[string]map[string]int64
commandlimit *SaturationLimit
config BotConfig
emotemodule *EmoteModule
hooks ModuleHooks
modules []Module
commands map[string]Command
}
type SweetieBot struct {
db *BotDB
dg *discordgo.Session
version string
SelfID string
Owners map[uint64]bool
RestrictedCommands map[string]bool
MainGuildID uint64
DebugChannels map[string]string
quit bool
guilds map[string]*GuildInfo
GuildChannels map[string]*GuildInfo
LastMessages map[string]int64
MaxConfigSize int
}
var sb *SweetieBot
var channelregex = regexp.MustCompile("<#[0-9]+>")
var userregex = regexp.MustCompile("<@!?[0-9]+>")
var repeatregex = regexp.MustCompile("repeat -?[0-9]+ (second|minute|hour|day|week|month|quarter|year)s?")
var locUTC = time.FixedZone("UTC", 0)
func (sbot *SweetieBot) IsMainGuild(info *GuildInfo) bool {
return SBatoi(info.Guild.ID) == sbot.MainGuildID
}
func (info *GuildInfo) AddCommand(c Command) {
info.commands[strings.ToLower(c.Name())] = c
}
func (info *GuildInfo) SaveConfig() {
data, err := json.Marshal(info.config)
if err == nil {
if len(data) > sb.MaxConfigSize {
info.log.Log("Error saving config file: Config file is too large! Config files cannot exceed " + strconv.Itoa(sb.MaxConfigSize) + " bytes.")
} else {
ioutil.WriteFile(info.Guild.ID+".json", data, 0664)
}
} else {
info.log.Log("Error writing json: ", err.Error())
}
}
func DeleteFromMapReflect(f reflect.Value, k string) string {
f.SetMapIndex(reflect.ValueOf(k), reflect.Value{})
return "Deleted " + k
}
func (info *GuildInfo) SetConfig(name string, value string, extra ...string) (string, bool) {
name = strings.ToLower(name)
t := reflect.ValueOf(&info.config).Elem()
n := t.NumField()
for i := 0; i < n; i++ {
if strings.ToLower(t.Type().Field(i).Name) == name {
f := t.Field(i)
switch t.Field(i).Interface().(type) {
case string:
f.SetString(value)
case int, int8, int16, int32, int64:
k, _ := strconv.ParseInt(value, 10, 64)
f.SetInt(k)
case uint, uint8, uint16, uint32:
k, _ := strconv.ParseUint(value, 10, 64)
f.SetUint(k)
case uint64:
f.SetUint(PingAtoi(value))
case []uint64:
f.Set(reflect.MakeSlice(reflect.TypeOf(f.Interface()), 0, 1+len(extra)))
if len(value) > 0 {
f.Set(reflect.Append(f, reflect.ValueOf(PingAtoi(value))))
for _, k := range extra {
f.Set(reflect.Append(f, reflect.ValueOf(PingAtoi(k))))
}
}
case bool:
f.SetBool(value == "true")
case map[string]string:
value = strings.ToLower(value)
if len(extra) == 0 {
return "No extra parameter given for " + name, false
}
if f.IsNil() {
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
}
if len(extra[0]) == 0 {
return DeleteFromMapReflect(f, value), false
}
f.SetMapIndex(reflect.ValueOf(value), reflect.ValueOf(extra[0]))
return value + ": " + extra[0], true
case map[string]int64:
value = strings.ToLower(value)
if len(extra) == 0 {
return "No extra parameter given for " + name, false
}
if f.IsNil() {
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
}
if len(extra[0]) == 0 {
return DeleteFromMapReflect(f, value), false
}
k, _ := strconv.ParseInt(extra[0], 10, 64)
f.SetMapIndex(reflect.ValueOf(value), reflect.ValueOf(k))
return value + ": " + strconv.FormatInt(k, 10), true
case map[int64]int:
ivalue, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value + " is not an integer.", false
}
if len(extra) == 0 {
return "No extra parameter given for " + name, false
}
if f.IsNil() {
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
}
if len(extra[0]) == 0 {
f.SetMapIndex(reflect.ValueOf(ivalue), reflect.Value{})
return "Deleted " + value, false
}
k, _ := strconv.Atoi(extra[0])
f.SetMapIndex(reflect.ValueOf(ivalue), reflect.ValueOf(k))
return value + ": " + strconv.Itoa(k), true
case map[string]bool:
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
f.SetMapIndex(reflect.ValueOf(StripPing(value)), reflect.ValueOf(true))
stripped := []string{StripPing(value)}
for _, k := range extra {
f.SetMapIndex(reflect.ValueOf(StripPing(k)), reflect.ValueOf(true))
stripped = append(stripped, StripPing(k))
}
return "[" + strings.Join(stripped, ", ") + "]", true
case map[string]map[string]bool:
value = strings.ToLower(value)
if f.IsNil() {
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
}
if len(extra) == 0 {
return DeleteFromMapReflect(f, value), false
}
m := reflect.MakeMap(reflect.TypeOf(f.Interface()).Elem())
stripped := []string{}
for _, k := range extra {
m.SetMapIndex(reflect.ValueOf(StripPing(k)), reflect.ValueOf(true))
stripped = append(stripped, StripPing(k))
}
f.SetMapIndex(reflect.ValueOf(value), m)
return value + ": [" + strings.Join(stripped, ", ") + "]", true
default:
info.log.Log(name + " is an unknown type " + t.Field(i).Type().Name())
return "That config option has an unknown type!", false
}
return fmt.Sprint(t.Field(i).Interface()), true
}
}
return "Could not find configuration parameter " + name + "!", false
}
func sbemotereplace(s string) string {
return strings.Replace(s, "[](/", "[\u200B](/", -1)
}
func (info *GuildInfo) SanitizeOutput(message string) string {
if info.emotemodule != nil {
message = info.emotemodule.emoteban.ReplaceAllStringFunc(message, sbemotereplace)
}
return message
}
func ExtraSanitize(s string) string {
s = strings.Replace(s, "`", "", -1)
s = strings.Replace(s, "[](/", "[\u200B](/", -1)
s = strings.Replace(s, "http://", "http\u200B://", -1)
s = strings.Replace(s, "https://", "https\u200B://", -1)
return ReplaceAllMentions(s)
}
func (info *GuildInfo) SendMessage(channelID string, message string) {
sb.dg.ChannelMessageSend(channelID, info.SanitizeOutput(message))
}
func (info *GuildInfo) ProcessModule(channelID string, m Module) bool {
_, disabled := info.config.Module_disabled[strings.ToLower(m.Name())]
if disabled {
return false
}
c := info.config.Module_channels[strings.ToLower(m.Name())]
if len(channelID) > 0 && len(c) > 0 { // Only check for channels if we have a channel to check for, and the module actually has specific channels
_, reverse := c["!"]
_, ok := c[channelID]
return ok != reverse
}
return true
}
func (info *GuildInfo) SwapStatusLoop() {
if sb.IsMainGuild(info) {
for !sb.quit {
if len(info.config.Collections["status"]) > 0 {
sb.dg.UpdateStatus(0, MapGetRandomItem(info.config.Collections["status"]))
}
time.Sleep(time.Duration(info.config.StatusDelayTime) * time.Second)
}
}
}
func ChangeBotName(s *discordgo.Session, name string, avatarfile string) {
binary, _ := ioutil.ReadFile(avatarfile)
email, _ := ioutil.ReadFile("email")
password, _ := ioutil.ReadFile("passwd")
avatar := base64.StdEncoding.EncodeToString(binary)
_, err := s.UserUpdate(strings.TrimSpace(string(email)), strings.TrimSpace(string(password)), name, "data:image/jpeg;base64,"+avatar, "")
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Changed username successfully")
}
}
//func SBEvent(s *discordgo.Session, e *discordgo.Event) { ApplyFuncRange(len(info.hooks.OnEvent), func(i int) { if(ProcessModule("", info.hooks.OnEvent[i])) { info.hooks.OnEvent[i].OnEvent(s, e) } }) }
func SBReady(s *discordgo.Session, r *discordgo.Ready) {
fmt.Println("Ready message receieved, waiting for guilds...")
sb.SelfID = r.User.ID
// Only used to change sweetiebot's name or avatar
//ChangeBotName(s, "Sweetie", "avatar.jpg")
}
func AttachToGuild(g *discordgo.Guild) {
guild, exists := sb.guilds[g.ID]
if exists {
guild.ProcessGuild(g)
return
}
fmt.Println("Initializing " + g.Name)
guild = &GuildInfo{
Guild: g,
command_last: make(map[string]map[string]int64),
commandlimit: &SaturationLimit{[]int64{}, 0, AtomicFlag{0}},
commands: make(map[string]Command),
emotemodule: nil,
}
guild.log = &Log{0, guild}
config, err := ioutil.ReadFile(g.ID + ".json")
disableall := false
if err != nil {
config, _ = ioutil.ReadFile("default.json")
disableall = true
}
err = json.Unmarshal(config, &guild.config)
if err != nil {
fmt.Println("Error reading config file for "+g.Name+": ", err.Error())
}
MigrateSettings(guild)
guild.commandlimit.times = make([]int64, guild.config.Commandperduration*2, guild.config.Commandperduration*2)
if len(guild.config.Witty) == 0 {
guild.config.Witty = make(map[string]string)
}
if len(guild.config.Aliases) == 0 {
guild.config.Aliases = make(map[string]string)
}
if len(guild.config.FreeChannels) == 0 {
guild.config.FreeChannels = make(map[string]bool)
}
if len(guild.config.Command_roles) == 0 {
guild.config.Command_roles = make(map[string]map[string]bool)
}
if len(guild.config.Command_channels) == 0 {
guild.config.Command_channels = make(map[string]map[string]bool)
}
if len(guild.config.Command_limits) == 0 {
guild.config.Command_limits = make(map[string]int64)
}
if len(guild.config.Command_disabled) == 0 {
guild.config.Command_disabled = make(map[string]bool)
}
if len(guild.config.Module_disabled) == 0 {
guild.config.Module_disabled = make(map[string]bool)
}
if len(guild.config.Module_channels) == 0 {
guild.config.Module_channels = make(map[string]map[string]bool)
}
if len(guild.config.Groups) == 0 {
guild.config.Groups = make(map[string]map[string]bool)
}
if len(guild.config.Collections) == 0 {
guild.config.Collections = make(map[string]map[string]bool)
}
collections := []string{"emote", "bored", "cute", "status", "spoiler", "bucket"}
for _, v := range collections {
_, ok := guild.config.Collections[v]
if !ok {
guild.config.Collections[v] = make(map[string]bool)
}
}
if sb.IsMainGuild(guild) {
sb.db.log = guild.log
}
if guild.config.Debug { // The server does not necessarily tie a standard input to the program
go WaitForInput()
}
sb.guilds[g.ID] = guild
guild.ProcessGuild(g)
episodegencommand := &EpisodeGenCommand{}
guild.emotemodule = &EmoteModule{}
spoilermodule := &SpoilerModule{}
wittymodule := &WittyModule{}
spammodule := &SpamModule{}
guild.modules = make([]Module, 0, 6)
guild.modules = append(guild.modules, spammodule)
guild.modules = append(guild.modules, &PingModule{})
guild.modules = append(guild.modules, guild.emotemodule)
guild.modules = append(guild.modules, wittymodule)
guild.modules = append(guild.modules, &BoredModule{Episodegen: episodegencommand})
guild.modules = append(guild.modules, spoilermodule)
guild.modules = append(guild.modules, &ScheduleModule{})
for _, v := range guild.modules {
v.Register(guild)
}
addfuncmap := map[string]func(string) string{
"emote": func(arg string) string {
r := guild.emotemodule.UpdateRegex(guild)
if !r {
delete(guild.config.Collections["emote"], arg)
guild.emotemodule.UpdateRegex(guild)
return "```Failed to ban " + arg + " because regex compilation failed.```"
}
return "```Banned " + arg + " and recompiled the emote regex.```"
},
"spoiler": func(arg string) string {
r := spoilermodule.UpdateRegex(guild)
if !r {
delete(guild.config.Collections["spoiler"], arg)
spoilermodule.UpdateRegex(guild)
return "```Failed to ban " + arg + " because regex compilation failed.```"
}
return "```Banned " + arg + " and recompiled the spoiler regex.```"
},
}
removefuncmap := map[string]func(string) string{
"emote": func(arg string) string {
guild.emotemodule.UpdateRegex(guild)
return "```Unbanned " + arg + " and recompiled the emote regex.```"
},
"spoiler": func(arg string) string {
spoilermodule.UpdateRegex(guild)
return "```Unbanned " + arg + " and recompiled the spoiler regex.```"
},
}
// We have to initialize commands and modules up here because they depend on the discord channel state
guild.AddCommand(&AddCommand{addfuncmap})
guild.AddCommand(&RemoveCommand{removefuncmap})
guild.AddCommand(&CollectionsCommand{})
guild.AddCommand(&EchoCommand{})
guild.AddCommand(&HelpCommand{})
guild.AddCommand(&NewUsersCommand{})
guild.AddCommand(&EnableCommand{})
guild.AddCommand(&DisableCommand{})
guild.AddCommand(&UpdateCommand{})
guild.AddCommand(&AKACommand{})
guild.AddCommand(&AboutCommand{})
guild.AddCommand(&LastPingCommand{})
guild.AddCommand(&SetConfigCommand{})
guild.AddCommand(&GetConfigCommand{})
guild.AddCommand(&LastSeenCommand{})
guild.AddCommand(&DumpTablesCommand{})
guild.AddCommand(episodegencommand)
guild.AddCommand(&QuoteCommand{})
guild.AddCommand(&ShipCommand{})
guild.AddCommand(&AddWitCommand{wittymodule})
guild.AddCommand(&RemoveWitCommand{wittymodule})
guild.AddCommand(&SearchCommand{emotes: guild.emotemodule, statements: make(map[string][]*sql.Stmt)})
guild.AddCommand(&SetStatusCommand{})
guild.AddCommand(&AddGroupCommand{})
guild.AddCommand(&JoinGroupCommand{})
guild.AddCommand(&ListGroupCommand{})
guild.AddCommand(&LeaveGroupCommand{})
guild.AddCommand(&PingCommand{})
guild.AddCommand(&PurgeGroupCommand{})
guild.AddCommand(&BestPonyCommand{})
guild.AddCommand(&BanCommand{})
guild.AddCommand(&DropCommand{})
guild.AddCommand(&GiveCommand{})
guild.AddCommand(&ListCommand{})
guild.AddCommand(&FightCommand{"", 0})
guild.AddCommand(&CuteCommand{})
guild.AddCommand(&RollCommand{})
guild.AddCommand(&ListGuildsCommand{})
guild.AddCommand(&AnnounceCommand{})
guild.AddCommand(&QuickConfigCommand{})
guild.AddCommand(&ScheduleCommand{})
guild.AddCommand(&NextCommand{})
guild.AddCommand(&AddEventCommand{})
guild.AddCommand(&RemoveEventCommand{})
guild.AddCommand(&AddBirthdayCommand{})
guild.AddCommand(&RemindMeCommand{})
guild.AddCommand(&AutoSilenceCommand{spammodule})
guild.AddCommand(&WipeWelcomeCommand{})
guild.AddCommand(&SilenceCommand{})
guild.AddCommand(&UnsilenceCommand{})
guild.AddCommand(&TimeCommand{})
guild.AddCommand(&SetTimeZoneCommand{})
if disableall {
for k, _ := range guild.commands {
guild.config.Command_disabled[k] = true
}
for _, v := range guild.modules {
guild.config.Module_disabled[strings.ToLower(v.Name())] = true
}
guild.SaveConfig()
}
go guild.IdleCheckLoop()
go guild.SwapStatusLoop()
debug := "."
if guild.config.Debug {
debug = ".\n[DEBUG BUILD]"
}
guild.log.Log("[](/sbload)\n Sweetiebot version ", sb.version, " successfully loaded on ", g.Name, debug)
}
func GetChannelGuild(id string) *GuildInfo {
g, ok := sb.GuildChannels[id]
if !ok {
return sb.guilds[SBitoa(sb.MainGuildID)]
}
return g
}
func GetGuildFromID(id string) *GuildInfo {
g, ok := sb.guilds[id]
if !ok {
return sb.guilds[SBitoa(sb.MainGuildID)]
}
return g
}
func (info *GuildInfo) IsDebug(channel string) bool {
debugchannel, isdebug := sb.DebugChannels[info.Guild.ID]
if isdebug {
return channel == debugchannel
}
return false
}
func SBTypingStart(s *discordgo.Session, t *discordgo.TypingStart) {
info := GetChannelGuild(t.ChannelID)
ApplyFuncRange(len(info.hooks.OnTypingStart), func(i int) {
if info.ProcessModule("", info.hooks.OnTypingStart[i]) {
info.hooks.OnTypingStart[i].OnTypingStart(info, t)
}
})
}
func SBMessageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
if m.Author == nil { // This shouldn't ever happen but we check for it anyway
return
}
t := time.Now().UTC().Unix()
sb.LastMessages[m.ChannelID] = t
ch, err := sb.dg.State.Channel(m.ChannelID)
private := true
if err == nil { // Because of the magic of web development, we can get a message BEFORE the "channel created" packet for the channel being used by that message.
private = ch.IsPrivate
} else {
fmt.Println("Error retrieving channel "+m.ChannelID+": ", err.Error())
}
var info *GuildInfo
ismainguild := true
if !private {
info = GetChannelGuild(m.ChannelID)
ismainguild = SBatoi(ch.GuildID) == sb.MainGuildID
} else {
info = sb.guilds[SBitoa(sb.MainGuildID)]
}
cid := SBatoi(m.ChannelID)
isdebug := info.IsDebug(m.ChannelID)
if isdebug && !info.config.Debug {
return // we do this up here so the release build doesn't log messages in bot-debug, but debug builds still log messages from the rest of the channels
}
if cid != info.config.LogChannel && !private && ismainguild { // Log this message if it was sent to the main guild only.
sb.db.AddMessage(SBatoi(m.ID), SBatoi(m.Author.ID), m.ContentWithMentionsReplaced(), cid, m.MentionEveryone)
}
if m.Author.ID == sb.SelfID { // ALWAYS discard any of our own messages before analysis.
SBAddPings(info, m.Message) // If we're discarding a message we still need to add any pings to the ping table
return
}
if boolXOR(info.config.Debug, isdebug) { // debug builds only respond to the debug channel, and release builds ignore it
return
}
// Check if this is a command. If it is, process it as a command, otherwise process it with our modules.
if len(m.Content) > 1 && m.Content[0] == '!' && (len(m.Content) < 2 || m.Content[1] != '!') { // We check for > 1 here because a single character can't possibly be a valid command
_, isfree := info.config.FreeChannels[m.ChannelID]
_, isOwner := sb.Owners[SBatoi(m.Author.ID)]
isOwner = isOwner || m.Author.ID == info.Guild.OwnerID
ignore := false
ApplyFuncRange(len(info.hooks.OnCommand), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnCommand[i]) {
ignore = ignore || info.hooks.OnCommand[i].OnCommand(info, m.Message)
}
})
if ignore && !isOwner { // if true, a module wants us to ignore this command
return
}
args := ParseArguments(m.Content[1:])
arg := strings.ToLower(args[0])
alias, ok := info.config.Aliases[arg]
if ok {
arg = alias
}
c, ok := info.commands[arg]
if ok {
cmdname := strings.ToLower(c.Name())
cch := info.config.Command_channels[cmdname]
_, disabled := info.config.Command_disabled[cmdname]
_, restricted := sb.RestrictedCommands[cmdname]
if disabled && !isOwner {
return
}
if restricted && !ismainguild {
return
}
if !private && len(cch) > 0 {
_, reverse := cch["!"]
_, ok = cch[m.ChannelID]
if ok == reverse {
return
}
}
if err != nil || (!private && !isdebug && !isfree) { // Private channels are not limited, nor is the debug channel
if info.commandlimit.check(info.config.Commandperduration, info.config.Commandmaxduration, t) { // if we've hit the saturation limit, post an error (which itself will only post if the error saturation limit hasn't been hit)
info.log.Error(m.ChannelID, "You can't input more than "+strconv.Itoa(info.config.Commandperduration)+" commands every "+TimeDiff(time.Duration(info.config.Commandmaxduration)*time.Second)+"!")
return
}
info.commandlimit.append(t)
}
if !isOwner && !info.UserHasAnyRole(m.Author.ID, info.config.Command_roles[cmdname]) {
info.log.Error(m.ChannelID, "You don't have permission to run this command! Allowed Roles: "+info.GetRoles(c))
return
}
cmdlimit := info.config.Command_limits[cmdname]
if !isfree && cmdlimit > 0 {
lastcmd := info.command_last[m.ChannelID][cmdname]
if !RateLimit(&lastcmd, cmdlimit) {
info.log.Error(m.ChannelID, "You can only run that command once every "+TimeDiff(time.Duration(cmdlimit)*time.Second)+"!")
return
}
if len(info.command_last[m.ChannelID]) == 0 {
info.command_last[m.ChannelID] = make(map[string]int64)
}
info.command_last[m.ChannelID][cmdname] = t
}
result, usepm := c.Process(args[1:], m.Message, info)
if len(result) > 0 {
targetchannel := m.ChannelID
if usepm && !private {
channel, err := s.UserChannelCreate(m.Author.ID)
info.log.LogError("Error opening private channel: ", err)
if err == nil {
targetchannel = channel.ID
if rand.Float32() < 0.01 {
info.SendMessage(m.ChannelID, "Check your ~~privilege~~ Private Messages for my reply!")
} else {
info.SendMessage(m.ChannelID, "```Check your Private Messages for my reply!```")
}
}
}
for len(result) > 1999 { // discord has a 2000 character limit
if result[0:3] == "```" {
index := strings.LastIndex(result[:1996], "\n")
if index < 10 { // Ensure we process at least 10 characters to prevent an infinite loop
index = 1996
}
info.SendMessage(targetchannel, result[:index]+"```")
result = "```" + result[index:]
} else {
index := strings.LastIndex(result[:1999], "\n")
if index < 10 {
index = 1999
}
info.SendMessage(targetchannel, result[:index])
result = result[index:]
}
}
info.SendMessage(targetchannel, result)
}
} else {
if !info.config.IgnoreInvalidCommands {
info.log.Error(m.ChannelID, "Sorry, "+args[0]+" is not a valid command.\nFor a list of valid commands, type !help.")
}
}
} else {
ApplyFuncRange(len(info.hooks.OnMessageCreate), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnMessageCreate[i]) {
info.hooks.OnMessageCreate[i].OnMessageCreate(info, m.Message)
}
})
}
}
func SBMessageUpdate(s *discordgo.Session, m *discordgo.MessageUpdate) {
info := GetChannelGuild(m.ChannelID)
if boolXOR(info.config.Debug, info.IsDebug(m.ChannelID)) {
return
}
if m.Author == nil { // Discord sends an update message with an empty author when certain media links are posted
original, err := s.ChannelMessage(m.ChannelID, m.ID)
if err != nil {
info.log.LogError("Error processing MessageUpdate: ", err)
return // Fuck it, we can't process this
}
m.Author = original.Author
}
ch, err := sb.dg.State.Channel(m.ChannelID)
info.log.LogError("Error retrieving channel ID "+m.ChannelID+": ", err)
private := true
if err == nil {
private = ch.IsPrivate
}
cid := SBatoi(m.ChannelID)
if cid != info.config.LogChannel && !private && SBatoi(ch.GuildID) == sb.MainGuildID { // Always ignore messages from the log channel
sb.db.AddMessage(SBatoi(m.ID), SBatoi(m.Author.ID), m.ContentWithMentionsReplaced(), cid, m.MentionEveryone)
}
ApplyFuncRange(len(info.hooks.OnMessageUpdate), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnMessageUpdate[i]) {
info.hooks.OnMessageUpdate[i].OnMessageUpdate(info, m.Message)
}
})
}
func SBMessageDelete(s *discordgo.Session, m *discordgo.MessageDelete) {
info := GetChannelGuild(m.ChannelID)
if boolXOR(info.config.Debug, info.IsDebug(m.ChannelID)) {
return
}
ApplyFuncRange(len(info.hooks.OnMessageDelete), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnMessageDelete[i]) {
info.hooks.OnMessageDelete[i].OnMessageDelete(info, m.Message)
}
})
}
func SBMessageAck(s *discordgo.Session, m *discordgo.MessageAck) {
info := GetChannelGuild(m.ChannelID)
ApplyFuncRange(len(info.hooks.OnMessageAck), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnMessageAck[i]) {
info.hooks.OnMessageAck[i].OnMessageAck(info, m)
}
})
}
func SBUserUpdate(s *discordgo.Session, m *discordgo.UserUpdate) { ProcessUser(m.User) }
func SBPresenceUpdate(s *discordgo.Session, m *discordgo.PresenceUpdate) {
info := GetGuildFromID(m.GuildID)
ProcessUser(m.User)
ApplyFuncRange(len(info.hooks.OnPresenceUpdate), func(i int) {
if info.ProcessModule("", info.hooks.OnPresenceUpdate[i]) {
info.hooks.OnPresenceUpdate[i].OnPresenceUpdate(info, m)
}
})
}
func SBVoiceStateUpdate(s *discordgo.Session, m *discordgo.VoiceStateUpdate) {
info := GetGuildFromID(m.GuildID)
ApplyFuncRange(len(info.hooks.OnVoiceStateUpdate), func(i int) {
if info.ProcessModule("", info.hooks.OnVoiceStateUpdate[i]) {
info.hooks.OnVoiceStateUpdate[i].OnVoiceStateUpdate(info, m.VoiceState)
}
})
}
func SBGuildUpdate(s *discordgo.Session, m *discordgo.GuildUpdate) {
info := GetChannelGuild(m.ID)
info.log.Log("Guild update detected, updating ", m.Name)
info.ProcessGuild(m.Guild)
ApplyFuncRange(len(info.hooks.OnGuildUpdate), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildUpdate[i]) {
info.hooks.OnGuildUpdate[i].OnGuildUpdate(info, m.Guild)
}
})
}
func SBGuildMemberAdd(s *discordgo.Session, m *discordgo.GuildMemberAdd) {
info := GetGuildFromID(m.GuildID)
info.ProcessMember(m.Member)
ApplyFuncRange(len(info.hooks.OnGuildMemberAdd), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildMemberAdd[i]) {
info.hooks.OnGuildMemberAdd[i].OnGuildMemberAdd(info, m.Member)
}
})
}
func SBGuildMemberRemove(s *discordgo.Session, m *discordgo.GuildMemberRemove) {
info := GetGuildFromID(m.GuildID)
ApplyFuncRange(len(info.hooks.OnGuildMemberRemove), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildMemberRemove[i]) {
info.hooks.OnGuildMemberRemove[i].OnGuildMemberRemove(info, m.Member)
}
})
}
func SBGuildMemberUpdate(s *discordgo.Session, m *discordgo.GuildMemberUpdate) {
info := GetGuildFromID(m.GuildID)
info.ProcessMember(m.Member)
ApplyFuncRange(len(info.hooks.OnGuildMemberUpdate), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildMemberUpdate[i]) {
info.hooks.OnGuildMemberUpdate[i].OnGuildMemberUpdate(info, m.Member)
}
})
}
func SBGuildBanAdd(s *discordgo.Session, m *discordgo.GuildBanAdd) {
info := GetGuildFromID(m.GuildID)
ApplyFuncRange(len(info.hooks.OnGuildBanAdd), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildBanAdd[i]) {
info.hooks.OnGuildBanAdd[i].OnGuildBanAdd(info, m.GuildBan)
}
})
}
func SBGuildBanRemove(s *discordgo.Session, m *discordgo.GuildBanRemove) {
info := GetGuildFromID(m.GuildID)
ApplyFuncRange(len(info.hooks.OnGuildBanRemove), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildBanRemove[i]) {
info.hooks.OnGuildBanRemove[i].OnGuildBanRemove(info, m.GuildBan)
}
})
}
func SBGuildCreate(s *discordgo.Session, m *discordgo.GuildCreate) { ProcessGuildCreate(m.Guild) }
func SBChannelCreate(s *discordgo.Session, c *discordgo.ChannelCreate) {
guild, ok := sb.guilds[c.GuildID]
if ok {
sb.GuildChannels[c.ID] = guild
}
}
func SBChannelDelete(s *discordgo.Session, c *discordgo.ChannelDelete) {
delete(sb.GuildChannels, c.ID)
}
func ProcessUser(u *discordgo.User) uint64 {
id := SBatoi(u.ID)
sb.db.AddUser(id, u.Email, u.Username, u.Avatar, u.Verified)
return id
}
func (info *GuildInfo) ProcessMember(u *discordgo.Member) {
ProcessUser(u.User)
t := time.Now().UTC()
if len(u.JoinedAt) > 0 { // Parse join date and update user table only if it is less than our current first seen date.
var err error
t, err = time.Parse(time.RFC3339Nano, u.JoinedAt)
if err != nil {
fmt.Println(err.Error())
return
}
}
sb.db.AddMember(SBatoi(u.User.ID), SBatoi(info.Guild.ID), t, u.Nick)
}
func ProcessGuildCreate(g *discordgo.Guild) {
AttachToGuild(g)
}
func (info *GuildInfo) ProcessGuild(g *discordgo.Guild) {
if len(g.Members) == 0 || len(g.Channels) == 0 || len(g.Roles) == 0 { // If this is true we were given half a guild update
info.log.Log("Got half a guild update for " + g.Name)
info.Guild.Name = g.Name
info.Guild.Icon = g.Icon
info.Guild.Region = g.Region
info.Guild.AfkChannelID = g.AfkChannelID
info.Guild.EmbedChannelID = g.EmbedChannelID
info.Guild.OwnerID = g.OwnerID
info.Guild.JoinedAt = g.JoinedAt
info.Guild.Splash = g.Splash
info.Guild.AfkTimeout = g.AfkTimeout
info.Guild.VerificationLevel = g.VerificationLevel
info.Guild.EmbedEnabled = g.EmbedEnabled
info.Guild.Large = g.Large
info.Guild.DefaultMessageNotifications = g.DefaultMessageNotifications
} else {
info.Guild = g
for _, v := range info.Guild.Channels {
sb.GuildChannels[v.ID] = info
}
for _, v := range g.Members {
info.ProcessMember(v)
}
}
}
func (info *GuildInfo) FindChannelID(name string) string {
channels := info.Guild.Channels
for _, v := range channels {
if v.Name == name {
return v.ID
}
}
return ""
}
func ApplyFuncRange(length int, fn func(i int)) {
for i := 0; i < length; i++ {
fn(i)
}
}
func (info *GuildInfo) IdleCheckLoop() {
for !sb.quit {
channels := info.Guild.Channels
if info.config.Debug { // override this in debug mode
c, err := sb.dg.State.Channel(sb.DebugChannels[info.Guild.ID])
if err == nil {
channels = []*discordgo.Channel{c}
} else {
channels = []*discordgo.Channel{}
}
}
for _, ch := range channels {
t, exists := sb.LastMessages[ch.ID]
if exists {
diff := time.Now().UTC().Sub(time.Unix(t, 0))
ApplyFuncRange(len(info.hooks.OnIdle), func(i int) {
if info.ProcessModule(ch.ID, info.hooks.OnIdle[i]) && diff >= (time.Duration(info.hooks.OnIdle[i].IdlePeriod(info))*time.Second) {
info.hooks.OnIdle[i].OnIdle(info, ch)
}
})
}
}
ApplyFuncRange(len(info.hooks.OnTick), func(i int) {
if info.ProcessModule("", info.hooks.OnTick[i]) {
info.hooks.OnTick[i].OnTick(info)
}
})
time.Sleep(30 * time.Second)
}
}
func WaitForInput() {
var input string
fmt.Scanln(&input)
sb.quit = true
}
func Initialize(Token string) {
dbauth, _ := ioutil.ReadFile("db.auth")
sb = &SweetieBot{
version: "0.8.0",
Owners: map[uint64]bool{95585199324143616: true, 98605232707080192: true},
RestrictedCommands: map[string]bool{"search": true, "lastping": true, "setstatus": true},
MainGuildID: 98609319519453184,
DebugChannels: map[string]string{"98609319519453184": "141710126628339712", "105443346608095232": "200112394494541824"},
GuildChannels: make(map[string]*GuildInfo),
quit: false,
guilds: make(map[string]*GuildInfo),
LastMessages: make(map[string]int64),
MaxConfigSize: 1000000,
}
rand.Intn(10)
for i := 0; i < 20+rand.Intn(20); i++ {
rand.Intn(50)
}
db, err := DB_Load(&Log{0, nil}, "mysql", strings.TrimSpace(string(dbauth)))
if err != nil {
fmt.Println("Error loading database", err.Error())
return
}
sb.db = db
sb.dg, err = discordgo.New(Token)
if err != nil {
fmt.Println("Error creating discord session", err.Error())
return
}
sb.dg.AddHandler(SBReady)
sb.dg.AddHandler(SBTypingStart)
sb.dg.AddHandler(SBMessageCreate)
sb.dg.AddHandler(SBMessageUpdate)
sb.dg.AddHandler(SBMessageDelete)
sb.dg.AddHandler(SBMessageAck)
sb.dg.AddHandler(SBUserUpdate)
sb.dg.AddHandler(SBPresenceUpdate)
sb.dg.AddHandler(SBVoiceStateUpdate)
sb.dg.AddHandler(SBGuildUpdate)
sb.dg.AddHandler(SBGuildMemberAdd)
sb.dg.AddHandler(SBGuildMemberRemove)
sb.dg.AddHandler(SBGuildMemberUpdate)
sb.dg.AddHandler(SBGuildBanAdd)
sb.dg.AddHandler(SBGuildBanRemove)
sb.dg.AddHandler(SBGuildCreate)
sb.dg.AddHandler(SBChannelCreate)
sb.dg.AddHandler(SBChannelDelete)
sb.db.LoadStatements()
fmt.Println("Finished loading database statements")
//BuildMarkov(1, 1)
//return
err = sb.dg.Open()
if err == nil {
fmt.Println("Connection established")
for !sb.quit {
time.Sleep(400 * time.Millisecond)
}
} else {
fmt.Println("Error opening websocket connection: ", err.Error())
}
fmt.Println("Sweetiebot quitting")
sb.dg.Close()
sb.db.Close()
}
appease the dark gods of discord's API
package sweetiebot
import (
"database/sql"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/bwmarrin/discordgo"
)
type ModuleHooks struct {
OnEvent []ModuleOnEvent
OnTypingStart []ModuleOnTypingStart
OnMessageCreate []ModuleOnMessageCreate
OnMessageUpdate []ModuleOnMessageUpdate
OnMessageDelete []ModuleOnMessageDelete
OnMessageAck []ModuleOnMessageAck
OnPresenceUpdate []ModuleOnPresenceUpdate
OnVoiceStateUpdate []ModuleOnVoiceStateUpdate
OnGuildUpdate []ModuleOnGuildUpdate
OnGuildMemberAdd []ModuleOnGuildMemberAdd
OnGuildMemberRemove []ModuleOnGuildMemberRemove
OnGuildMemberUpdate []ModuleOnGuildMemberUpdate
OnGuildBanAdd []ModuleOnGuildBanAdd
OnGuildBanRemove []ModuleOnGuildBanRemove
OnCommand []ModuleOnCommand
OnIdle []ModuleOnIdle
OnTick []ModuleOnTick
}
type BotConfig struct {
Version int `json:"version"`
Debug bool `json:"debug"`
Maxerror int64 `json:"maxerror"`
Maxwit int64 `json:"maxwit"`
Maxbored int64 `json:"maxbored"`
DisableBored int `json:"disablebored"`
MaxPMlines int `json:"maxpmlines"`
Maxquotelines int `json:"maxquotelines"`
Maxsearchresults int `json:"maxsearchresults"`
Defaultmarkovlines int `json:"defaultmarkovlines"`
Commandperduration int `json:"commandperduration"`
Commandmaxduration int64 `json:"commandmaxduration"`
StatusDelayTime int `json:"statusdelaytime"`
MaxRaidTime int64 `json:"maxraidtime"`
RaidSize int `json:"raidsize"`
Witty map[string]string `json:"witty"`
Aliases map[string]string `json:"aliases"`
MaxBucket int `json:"maxbucket"`
MaxBucketLength int `json:"maxbucketlength"`
MaxFightHP int `json:"maxfighthp"`
MaxFightDamage int `json:"maxfightdamage"`
MaxImageSpam int `json:"maximagespam"`
MaxAttachSpam int `json:"maxattachspam"`
MaxPingSpam int `json:"maxpingspam"`
MaxMessageSpam map[int64]int `json:"maxmessagespam"`
IgnoreInvalidCommands bool `json:"ignoreinvalidcommands"`
UseMemberNames bool `json:"usemembernames"`
Timezone int `json:"timezone"`
AutoSilence int `json:"autosilence"`
AlertRole uint64 `json:"alertrole"`
SilentRole uint64 `json:"silentrole"`
LogChannel uint64 `json:"logchannel"`
ModChannel uint64 `json:"modchannel"`
WelcomeChannel uint64 `json:"welcomechannel"`
WelcomeMessage string `json:"welcomemessage"`
BirthdayRole uint64 `json:"birthdayrole"`
SpoilChannels []uint64 `json:"spoilchannels"`
FreeChannels map[string]bool `json:"freechannels"`
Command_roles map[string]map[string]bool `json:"command_roles"`
Command_channels map[string]map[string]bool `json:"command_channels"`
Command_limits map[string]int64 `json:command_limits`
Command_disabled map[string]bool `json:command_disabled`
Module_disabled map[string]bool `json:module_disabled`
Module_channels map[string]map[string]bool `json:module_channels`
Collections map[string]map[string]bool `json:"collections"`
Groups map[string]map[string]bool `json:"groups"`
}
type GuildInfo struct {
Guild *discordgo.Guild
log *Log
command_last map[string]map[string]int64
commandlimit *SaturationLimit
config BotConfig
emotemodule *EmoteModule
hooks ModuleHooks
modules []Module
commands map[string]Command
}
type SweetieBot struct {
db *BotDB
dg *discordgo.Session
version string
SelfID string
Owners map[uint64]bool
RestrictedCommands map[string]bool
MainGuildID uint64
DebugChannels map[string]string
quit bool
guilds map[string]*GuildInfo
GuildChannels map[string]*GuildInfo
LastMessages map[string]int64
MaxConfigSize int
}
var sb *SweetieBot
var channelregex = regexp.MustCompile("<#[0-9]+>")
var userregex = regexp.MustCompile("<@!?[0-9]+>")
var repeatregex = regexp.MustCompile("repeat -?[0-9]+ (second|minute|hour|day|week|month|quarter|year)s?")
var locUTC = time.FixedZone("UTC", 0)
func (sbot *SweetieBot) IsMainGuild(info *GuildInfo) bool {
return SBatoi(info.Guild.ID) == sbot.MainGuildID
}
func (info *GuildInfo) AddCommand(c Command) {
info.commands[strings.ToLower(c.Name())] = c
}
func (info *GuildInfo) SaveConfig() {
data, err := json.Marshal(info.config)
if err == nil {
if len(data) > sb.MaxConfigSize {
info.log.Log("Error saving config file: Config file is too large! Config files cannot exceed " + strconv.Itoa(sb.MaxConfigSize) + " bytes.")
} else {
ioutil.WriteFile(info.Guild.ID+".json", data, 0664)
}
} else {
info.log.Log("Error writing json: ", err.Error())
}
}
func DeleteFromMapReflect(f reflect.Value, k string) string {
f.SetMapIndex(reflect.ValueOf(k), reflect.Value{})
return "Deleted " + k
}
func (info *GuildInfo) SetConfig(name string, value string, extra ...string) (string, bool) {
name = strings.ToLower(name)
t := reflect.ValueOf(&info.config).Elem()
n := t.NumField()
for i := 0; i < n; i++ {
if strings.ToLower(t.Type().Field(i).Name) == name {
f := t.Field(i)
switch t.Field(i).Interface().(type) {
case string:
f.SetString(value)
case int, int8, int16, int32, int64:
k, _ := strconv.ParseInt(value, 10, 64)
f.SetInt(k)
case uint, uint8, uint16, uint32:
k, _ := strconv.ParseUint(value, 10, 64)
f.SetUint(k)
case uint64:
f.SetUint(PingAtoi(value))
case []uint64:
f.Set(reflect.MakeSlice(reflect.TypeOf(f.Interface()), 0, 1+len(extra)))
if len(value) > 0 {
f.Set(reflect.Append(f, reflect.ValueOf(PingAtoi(value))))
for _, k := range extra {
f.Set(reflect.Append(f, reflect.ValueOf(PingAtoi(k))))
}
}
case bool:
f.SetBool(value == "true")
case map[string]string:
value = strings.ToLower(value)
if len(extra) == 0 {
return "No extra parameter given for " + name, false
}
if f.IsNil() {
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
}
if len(extra[0]) == 0 {
return DeleteFromMapReflect(f, value), false
}
f.SetMapIndex(reflect.ValueOf(value), reflect.ValueOf(extra[0]))
return value + ": " + extra[0], true
case map[string]int64:
value = strings.ToLower(value)
if len(extra) == 0 {
return "No extra parameter given for " + name, false
}
if f.IsNil() {
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
}
if len(extra[0]) == 0 {
return DeleteFromMapReflect(f, value), false
}
k, _ := strconv.ParseInt(extra[0], 10, 64)
f.SetMapIndex(reflect.ValueOf(value), reflect.ValueOf(k))
return value + ": " + strconv.FormatInt(k, 10), true
case map[int64]int:
ivalue, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return value + " is not an integer.", false
}
if len(extra) == 0 {
return "No extra parameter given for " + name, false
}
if f.IsNil() {
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
}
if len(extra[0]) == 0 {
f.SetMapIndex(reflect.ValueOf(ivalue), reflect.Value{})
return "Deleted " + value, false
}
k, _ := strconv.Atoi(extra[0])
f.SetMapIndex(reflect.ValueOf(ivalue), reflect.ValueOf(k))
return value + ": " + strconv.Itoa(k), true
case map[string]bool:
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
f.SetMapIndex(reflect.ValueOf(StripPing(value)), reflect.ValueOf(true))
stripped := []string{StripPing(value)}
for _, k := range extra {
f.SetMapIndex(reflect.ValueOf(StripPing(k)), reflect.ValueOf(true))
stripped = append(stripped, StripPing(k))
}
return "[" + strings.Join(stripped, ", ") + "]", true
case map[string]map[string]bool:
value = strings.ToLower(value)
if f.IsNil() {
f.Set(reflect.MakeMap(reflect.TypeOf(f.Interface())))
}
if len(extra) == 0 {
return DeleteFromMapReflect(f, value), false
}
m := reflect.MakeMap(reflect.TypeOf(f.Interface()).Elem())
stripped := []string{}
for _, k := range extra {
m.SetMapIndex(reflect.ValueOf(StripPing(k)), reflect.ValueOf(true))
stripped = append(stripped, StripPing(k))
}
f.SetMapIndex(reflect.ValueOf(value), m)
return value + ": [" + strings.Join(stripped, ", ") + "]", true
default:
info.log.Log(name + " is an unknown type " + t.Field(i).Type().Name())
return "That config option has an unknown type!", false
}
return fmt.Sprint(t.Field(i).Interface()), true
}
}
return "Could not find configuration parameter " + name + "!", false
}
func sbemotereplace(s string) string {
return strings.Replace(s, "[](/", "[\u200B](/", -1)
}
func (info *GuildInfo) SanitizeOutput(message string) string {
if info.emotemodule != nil {
message = info.emotemodule.emoteban.ReplaceAllStringFunc(message, sbemotereplace)
}
return message
}
func ExtraSanitize(s string) string {
s = strings.Replace(s, "`", "", -1)
s = strings.Replace(s, "[](/", "[\u200B](/", -1)
s = strings.Replace(s, "http://", "http\u200B://", -1)
s = strings.Replace(s, "https://", "https\u200B://", -1)
return ReplaceAllMentions(s)
}
func (info *GuildInfo) SendMessage(channelID string, message string) {
sb.dg.ChannelMessageSend(channelID, info.SanitizeOutput(message))
}
func (info *GuildInfo) ProcessModule(channelID string, m Module) bool {
_, disabled := info.config.Module_disabled[strings.ToLower(m.Name())]
if disabled {
return false
}
c := info.config.Module_channels[strings.ToLower(m.Name())]
if len(channelID) > 0 && len(c) > 0 { // Only check for channels if we have a channel to check for, and the module actually has specific channels
_, reverse := c["!"]
_, ok := c[channelID]
return ok != reverse
}
return true
}
func (info *GuildInfo) SwapStatusLoop() {
if sb.IsMainGuild(info) {
for !sb.quit {
if len(info.config.Collections["status"]) > 0 {
sb.dg.UpdateStatus(0, MapGetRandomItem(info.config.Collections["status"]))
}
time.Sleep(time.Duration(info.config.StatusDelayTime) * time.Second)
}
}
}
func ChangeBotName(s *discordgo.Session, name string, avatarfile string) {
binary, _ := ioutil.ReadFile(avatarfile)
email, _ := ioutil.ReadFile("email")
password, _ := ioutil.ReadFile("passwd")
avatar := base64.StdEncoding.EncodeToString(binary)
_, err := s.UserUpdate(strings.TrimSpace(string(email)), strings.TrimSpace(string(password)), name, "data:image/jpeg;base64,"+avatar, "")
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Changed username successfully")
}
}
//func SBEvent(s *discordgo.Session, e *discordgo.Event) { ApplyFuncRange(len(info.hooks.OnEvent), func(i int) { if(ProcessModule("", info.hooks.OnEvent[i])) { info.hooks.OnEvent[i].OnEvent(s, e) } }) }
func SBReady(s *discordgo.Session, r *discordgo.Ready) {
fmt.Println("Ready message receieved, waiting for guilds...")
sb.SelfID = r.User.ID
// Only used to change sweetiebot's name or avatar
//ChangeBotName(s, "Sweetie", "avatar.jpg")
}
func AttachToGuild(g *discordgo.Guild) {
guild, exists := sb.guilds[g.ID]
if exists {
guild.ProcessGuild(g)
return
}
fmt.Println("Initializing " + g.Name)
guild = &GuildInfo{
Guild: g,
command_last: make(map[string]map[string]int64),
commandlimit: &SaturationLimit{[]int64{}, 0, AtomicFlag{0}},
commands: make(map[string]Command),
emotemodule: nil,
}
guild.log = &Log{0, guild}
config, err := ioutil.ReadFile(g.ID + ".json")
disableall := false
if err != nil {
config, _ = ioutil.ReadFile("default.json")
disableall = true
}
err = json.Unmarshal(config, &guild.config)
if err != nil {
fmt.Println("Error reading config file for "+g.Name+": ", err.Error())
}
MigrateSettings(guild)
guild.commandlimit.times = make([]int64, guild.config.Commandperduration*2, guild.config.Commandperduration*2)
if len(guild.config.Witty) == 0 {
guild.config.Witty = make(map[string]string)
}
if len(guild.config.Aliases) == 0 {
guild.config.Aliases = make(map[string]string)
}
if len(guild.config.FreeChannels) == 0 {
guild.config.FreeChannels = make(map[string]bool)
}
if len(guild.config.Command_roles) == 0 {
guild.config.Command_roles = make(map[string]map[string]bool)
}
if len(guild.config.Command_channels) == 0 {
guild.config.Command_channels = make(map[string]map[string]bool)
}
if len(guild.config.Command_limits) == 0 {
guild.config.Command_limits = make(map[string]int64)
}
if len(guild.config.Command_disabled) == 0 {
guild.config.Command_disabled = make(map[string]bool)
}
if len(guild.config.Module_disabled) == 0 {
guild.config.Module_disabled = make(map[string]bool)
}
if len(guild.config.Module_channels) == 0 {
guild.config.Module_channels = make(map[string]map[string]bool)
}
if len(guild.config.Groups) == 0 {
guild.config.Groups = make(map[string]map[string]bool)
}
if len(guild.config.Collections) == 0 {
guild.config.Collections = make(map[string]map[string]bool)
}
collections := []string{"emote", "bored", "cute", "status", "spoiler", "bucket"}
for _, v := range collections {
_, ok := guild.config.Collections[v]
if !ok {
guild.config.Collections[v] = make(map[string]bool)
}
}
if sb.IsMainGuild(guild) {
sb.db.log = guild.log
}
if guild.config.Debug { // The server does not necessarily tie a standard input to the program
go WaitForInput()
}
sb.guilds[g.ID] = guild
guild.ProcessGuild(g)
episodegencommand := &EpisodeGenCommand{}
guild.emotemodule = &EmoteModule{}
spoilermodule := &SpoilerModule{}
wittymodule := &WittyModule{}
spammodule := &SpamModule{}
guild.modules = make([]Module, 0, 6)
guild.modules = append(guild.modules, spammodule)
guild.modules = append(guild.modules, &PingModule{})
guild.modules = append(guild.modules, guild.emotemodule)
guild.modules = append(guild.modules, wittymodule)
guild.modules = append(guild.modules, &BoredModule{Episodegen: episodegencommand})
guild.modules = append(guild.modules, spoilermodule)
guild.modules = append(guild.modules, &ScheduleModule{})
for _, v := range guild.modules {
v.Register(guild)
}
addfuncmap := map[string]func(string) string{
"emote": func(arg string) string {
r := guild.emotemodule.UpdateRegex(guild)
if !r {
delete(guild.config.Collections["emote"], arg)
guild.emotemodule.UpdateRegex(guild)
return "```Failed to ban " + arg + " because regex compilation failed.```"
}
return "```Banned " + arg + " and recompiled the emote regex.```"
},
"spoiler": func(arg string) string {
r := spoilermodule.UpdateRegex(guild)
if !r {
delete(guild.config.Collections["spoiler"], arg)
spoilermodule.UpdateRegex(guild)
return "```Failed to ban " + arg + " because regex compilation failed.```"
}
return "```Banned " + arg + " and recompiled the spoiler regex.```"
},
}
removefuncmap := map[string]func(string) string{
"emote": func(arg string) string {
guild.emotemodule.UpdateRegex(guild)
return "```Unbanned " + arg + " and recompiled the emote regex.```"
},
"spoiler": func(arg string) string {
spoilermodule.UpdateRegex(guild)
return "```Unbanned " + arg + " and recompiled the spoiler regex.```"
},
}
// We have to initialize commands and modules up here because they depend on the discord channel state
guild.AddCommand(&AddCommand{addfuncmap})
guild.AddCommand(&RemoveCommand{removefuncmap})
guild.AddCommand(&CollectionsCommand{})
guild.AddCommand(&EchoCommand{})
guild.AddCommand(&HelpCommand{})
guild.AddCommand(&NewUsersCommand{})
guild.AddCommand(&EnableCommand{})
guild.AddCommand(&DisableCommand{})
guild.AddCommand(&UpdateCommand{})
guild.AddCommand(&AKACommand{})
guild.AddCommand(&AboutCommand{})
guild.AddCommand(&LastPingCommand{})
guild.AddCommand(&SetConfigCommand{})
guild.AddCommand(&GetConfigCommand{})
guild.AddCommand(&LastSeenCommand{})
guild.AddCommand(&DumpTablesCommand{})
guild.AddCommand(episodegencommand)
guild.AddCommand(&QuoteCommand{})
guild.AddCommand(&ShipCommand{})
guild.AddCommand(&AddWitCommand{wittymodule})
guild.AddCommand(&RemoveWitCommand{wittymodule})
guild.AddCommand(&SearchCommand{emotes: guild.emotemodule, statements: make(map[string][]*sql.Stmt)})
guild.AddCommand(&SetStatusCommand{})
guild.AddCommand(&AddGroupCommand{})
guild.AddCommand(&JoinGroupCommand{})
guild.AddCommand(&ListGroupCommand{})
guild.AddCommand(&LeaveGroupCommand{})
guild.AddCommand(&PingCommand{})
guild.AddCommand(&PurgeGroupCommand{})
guild.AddCommand(&BestPonyCommand{})
guild.AddCommand(&BanCommand{})
guild.AddCommand(&DropCommand{})
guild.AddCommand(&GiveCommand{})
guild.AddCommand(&ListCommand{})
guild.AddCommand(&FightCommand{"", 0})
guild.AddCommand(&CuteCommand{})
guild.AddCommand(&RollCommand{})
guild.AddCommand(&ListGuildsCommand{})
guild.AddCommand(&AnnounceCommand{})
guild.AddCommand(&QuickConfigCommand{})
guild.AddCommand(&ScheduleCommand{})
guild.AddCommand(&NextCommand{})
guild.AddCommand(&AddEventCommand{})
guild.AddCommand(&RemoveEventCommand{})
guild.AddCommand(&AddBirthdayCommand{})
guild.AddCommand(&RemindMeCommand{})
guild.AddCommand(&AutoSilenceCommand{spammodule})
guild.AddCommand(&WipeWelcomeCommand{})
guild.AddCommand(&SilenceCommand{})
guild.AddCommand(&UnsilenceCommand{})
guild.AddCommand(&TimeCommand{})
guild.AddCommand(&SetTimeZoneCommand{})
if disableall {
for k, _ := range guild.commands {
guild.config.Command_disabled[k] = true
}
for _, v := range guild.modules {
guild.config.Module_disabled[strings.ToLower(v.Name())] = true
}
guild.SaveConfig()
}
go guild.IdleCheckLoop()
go guild.SwapStatusLoop()
debug := "."
if guild.config.Debug {
debug = ".\n[DEBUG BUILD]"
}
guild.log.Log("[](/sbload)\n Sweetiebot version ", sb.version, " successfully loaded on ", g.Name, debug)
}
func GetChannelGuild(id string) *GuildInfo {
g, ok := sb.GuildChannels[id]
if !ok {
return sb.guilds[SBitoa(sb.MainGuildID)]
}
return g
}
func GetGuildFromID(id string) *GuildInfo {
g, ok := sb.guilds[id]
if !ok {
return sb.guilds[SBitoa(sb.MainGuildID)]
}
return g
}
func (info *GuildInfo) IsDebug(channel string) bool {
debugchannel, isdebug := sb.DebugChannels[info.Guild.ID]
if isdebug {
return channel == debugchannel
}
return false
}
func SBTypingStart(s *discordgo.Session, t *discordgo.TypingStart) {
info := GetChannelGuild(t.ChannelID)
ApplyFuncRange(len(info.hooks.OnTypingStart), func(i int) {
if info.ProcessModule("", info.hooks.OnTypingStart[i]) {
info.hooks.OnTypingStart[i].OnTypingStart(info, t)
}
})
}
func SBMessageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
if m.Author == nil { // This shouldn't ever happen but we check for it anyway
return
}
t := time.Now().UTC().Unix()
sb.LastMessages[m.ChannelID] = t
ch, err := sb.dg.State.Channel(m.ChannelID)
private := true
if err == nil { // Because of the magic of web development, we can get a message BEFORE the "channel created" packet for the channel being used by that message.
private = ch.IsPrivate
} else {
fmt.Println("Error retrieving channel "+m.ChannelID+": ", err.Error())
}
var info *GuildInfo
ismainguild := true
if !private {
info = GetChannelGuild(m.ChannelID)
ismainguild = SBatoi(ch.GuildID) == sb.MainGuildID
} else {
info = sb.guilds[SBitoa(sb.MainGuildID)]
}
cid := SBatoi(m.ChannelID)
isdebug := info.IsDebug(m.ChannelID)
if isdebug && !info.config.Debug {
return // we do this up here so the release build doesn't log messages in bot-debug, but debug builds still log messages from the rest of the channels
}
if cid != info.config.LogChannel && !private && ismainguild { // Log this message if it was sent to the main guild only.
sb.db.AddMessage(SBatoi(m.ID), SBatoi(m.Author.ID), m.ContentWithMentionsReplaced(), cid, m.MentionEveryone)
}
if m.Author.ID == sb.SelfID { // ALWAYS discard any of our own messages before analysis.
SBAddPings(info, m.Message) // If we're discarding a message we still need to add any pings to the ping table
return
}
if boolXOR(info.config.Debug, isdebug) { // debug builds only respond to the debug channel, and release builds ignore it
return
}
// Check if this is a command. If it is, process it as a command, otherwise process it with our modules.
if len(m.Content) > 1 && m.Content[0] == '!' && (len(m.Content) < 2 || m.Content[1] != '!') { // We check for > 1 here because a single character can't possibly be a valid command
_, isfree := info.config.FreeChannels[m.ChannelID]
_, isOwner := sb.Owners[SBatoi(m.Author.ID)]
isOwner = isOwner || m.Author.ID == info.Guild.OwnerID
ignore := false
ApplyFuncRange(len(info.hooks.OnCommand), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnCommand[i]) {
ignore = ignore || info.hooks.OnCommand[i].OnCommand(info, m.Message)
}
})
if ignore && !isOwner { // if true, a module wants us to ignore this command
return
}
args := ParseArguments(m.Content[1:])
arg := strings.ToLower(args[0])
alias, ok := info.config.Aliases[arg]
if ok {
arg = alias
}
c, ok := info.commands[arg]
if ok {
cmdname := strings.ToLower(c.Name())
cch := info.config.Command_channels[cmdname]
_, disabled := info.config.Command_disabled[cmdname]
_, restricted := sb.RestrictedCommands[cmdname]
if disabled && !isOwner {
return
}
if restricted && !ismainguild {
return
}
if !private && len(cch) > 0 {
_, reverse := cch["!"]
_, ok = cch[m.ChannelID]
if ok == reverse {
return
}
}
if err != nil || (!private && !isdebug && !isfree) { // Private channels are not limited, nor is the debug channel
if info.commandlimit.check(info.config.Commandperduration, info.config.Commandmaxduration, t) { // if we've hit the saturation limit, post an error (which itself will only post if the error saturation limit hasn't been hit)
info.log.Error(m.ChannelID, "You can't input more than "+strconv.Itoa(info.config.Commandperduration)+" commands every "+TimeDiff(time.Duration(info.config.Commandmaxduration)*time.Second)+"!")
return
}
info.commandlimit.append(t)
}
if !isOwner && !info.UserHasAnyRole(m.Author.ID, info.config.Command_roles[cmdname]) {
info.log.Error(m.ChannelID, "You don't have permission to run this command! Allowed Roles: "+info.GetRoles(c))
return
}
cmdlimit := info.config.Command_limits[cmdname]
if !isfree && cmdlimit > 0 {
lastcmd := info.command_last[m.ChannelID][cmdname]
if !RateLimit(&lastcmd, cmdlimit) {
info.log.Error(m.ChannelID, "You can only run that command once every "+TimeDiff(time.Duration(cmdlimit)*time.Second)+"!")
return
}
if len(info.command_last[m.ChannelID]) == 0 {
info.command_last[m.ChannelID] = make(map[string]int64)
}
info.command_last[m.ChannelID][cmdname] = t
}
result, usepm := c.Process(args[1:], m.Message, info)
if len(result) > 0 {
targetchannel := m.ChannelID
if usepm && !private {
channel, err := s.UserChannelCreate(m.Author.ID)
info.log.LogError("Error opening private channel: ", err)
if err == nil {
targetchannel = channel.ID
if rand.Float32() < 0.01 {
info.SendMessage(m.ChannelID, "Check your ~~privilege~~ Private Messages for my reply!")
} else {
info.SendMessage(m.ChannelID, "```Check your Private Messages for my reply!```")
}
}
}
for len(result) > 1999 { // discord has a 2000 character limit
if result[0:3] == "```" {
index := strings.LastIndex(result[:1996], "\n")
if index < 10 { // Ensure we process at least 10 characters to prevent an infinite loop
index = 1996
}
info.SendMessage(targetchannel, result[:index]+"```")
result = "```" + result[index:]
} else {
index := strings.LastIndex(result[:1999], "\n")
if index < 10 {
index = 1999
}
info.SendMessage(targetchannel, result[:index])
result = result[index:]
}
}
info.SendMessage(targetchannel, result)
}
} else {
if !info.config.IgnoreInvalidCommands {
info.log.Error(m.ChannelID, "Sorry, "+args[0]+" is not a valid command.\nFor a list of valid commands, type !help.")
}
}
} else {
ApplyFuncRange(len(info.hooks.OnMessageCreate), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnMessageCreate[i]) {
info.hooks.OnMessageCreate[i].OnMessageCreate(info, m.Message)
}
})
}
}
func SBMessageUpdate(s *discordgo.Session, m *discordgo.MessageUpdate) {
info := GetChannelGuild(m.ChannelID)
if boolXOR(info.config.Debug, info.IsDebug(m.ChannelID)) {
return
}
if m.Author == nil { // Discord sends an update message with an empty author when certain media links are posted
original, err := s.ChannelMessage(m.ChannelID, m.ID)
if err != nil {
info.log.LogError("Error processing MessageUpdate: ", err)
return // Fuck it, we can't process this
}
m.Author = original.Author
}
ch, err := sb.dg.State.Channel(m.ChannelID)
info.log.LogError("Error retrieving channel ID "+m.ChannelID+": ", err)
private := true
if err == nil {
private = ch.IsPrivate
}
cid := SBatoi(m.ChannelID)
if cid != info.config.LogChannel && !private && SBatoi(ch.GuildID) == sb.MainGuildID { // Always ignore messages from the log channel
sb.db.AddMessage(SBatoi(m.ID), SBatoi(m.Author.ID), m.ContentWithMentionsReplaced(), cid, m.MentionEveryone)
}
ApplyFuncRange(len(info.hooks.OnMessageUpdate), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnMessageUpdate[i]) {
info.hooks.OnMessageUpdate[i].OnMessageUpdate(info, m.Message)
}
})
}
func SBMessageDelete(s *discordgo.Session, m *discordgo.MessageDelete) {
info := GetChannelGuild(m.ChannelID)
if boolXOR(info.config.Debug, info.IsDebug(m.ChannelID)) {
return
}
ApplyFuncRange(len(info.hooks.OnMessageDelete), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnMessageDelete[i]) {
info.hooks.OnMessageDelete[i].OnMessageDelete(info, m.Message)
}
})
}
func SBMessageAck(s *discordgo.Session, m *discordgo.MessageAck) {
info := GetChannelGuild(m.ChannelID)
ApplyFuncRange(len(info.hooks.OnMessageAck), func(i int) {
if info.ProcessModule(m.ChannelID, info.hooks.OnMessageAck[i]) {
info.hooks.OnMessageAck[i].OnMessageAck(info, m)
}
})
}
func SBUserUpdate(s *discordgo.Session, m *discordgo.UserUpdate) { ProcessUser(m.User) }
func SBPresenceUpdate(s *discordgo.Session, m *discordgo.PresenceUpdate) {
info := GetGuildFromID(m.GuildID)
ProcessUser(m.User)
ApplyFuncRange(len(info.hooks.OnPresenceUpdate), func(i int) {
if info.ProcessModule("", info.hooks.OnPresenceUpdate[i]) {
info.hooks.OnPresenceUpdate[i].OnPresenceUpdate(info, m)
}
})
}
func SBVoiceStateUpdate(s *discordgo.Session, m *discordgo.VoiceStateUpdate) {
info := GetGuildFromID(m.GuildID)
ApplyFuncRange(len(info.hooks.OnVoiceStateUpdate), func(i int) {
if info.ProcessModule("", info.hooks.OnVoiceStateUpdate[i]) {
info.hooks.OnVoiceStateUpdate[i].OnVoiceStateUpdate(info, m.VoiceState)
}
})
}
func SBGuildUpdate(s *discordgo.Session, m *discordgo.GuildUpdate) {
info := GetChannelGuild(m.ID)
info.log.Log("Guild update detected, updating ", m.Name)
info.ProcessGuild(m.Guild)
ApplyFuncRange(len(info.hooks.OnGuildUpdate), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildUpdate[i]) {
info.hooks.OnGuildUpdate[i].OnGuildUpdate(info, m.Guild)
}
})
}
func SBGuildMemberAdd(s *discordgo.Session, m *discordgo.GuildMemberAdd) {
info := GetGuildFromID(m.GuildID)
info.ProcessMember(m.Member)
ApplyFuncRange(len(info.hooks.OnGuildMemberAdd), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildMemberAdd[i]) {
info.hooks.OnGuildMemberAdd[i].OnGuildMemberAdd(info, m.Member)
}
})
}
func SBGuildMemberRemove(s *discordgo.Session, m *discordgo.GuildMemberRemove) {
info := GetGuildFromID(m.GuildID)
ApplyFuncRange(len(info.hooks.OnGuildMemberRemove), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildMemberRemove[i]) {
info.hooks.OnGuildMemberRemove[i].OnGuildMemberRemove(info, m.Member)
}
})
}
func SBGuildMemberUpdate(s *discordgo.Session, m *discordgo.GuildMemberUpdate) {
info := GetGuildFromID(m.GuildID)
info.ProcessMember(m.Member)
ApplyFuncRange(len(info.hooks.OnGuildMemberUpdate), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildMemberUpdate[i]) {
info.hooks.OnGuildMemberUpdate[i].OnGuildMemberUpdate(info, m.Member)
}
})
}
func SBGuildBanAdd(s *discordgo.Session, m *discordgo.GuildBanAdd) {
info := GetGuildFromID(m.GuildID)
ApplyFuncRange(len(info.hooks.OnGuildBanAdd), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildBanAdd[i]) {
info.hooks.OnGuildBanAdd[i].OnGuildBanAdd(info, m.GuildBan)
}
})
}
func SBGuildBanRemove(s *discordgo.Session, m *discordgo.GuildBanRemove) {
info := GetGuildFromID(m.GuildID)
ApplyFuncRange(len(info.hooks.OnGuildBanRemove), func(i int) {
if info.ProcessModule("", info.hooks.OnGuildBanRemove[i]) {
info.hooks.OnGuildBanRemove[i].OnGuildBanRemove(info, m.GuildBan)
}
})
}
func SBGuildCreate(s *discordgo.Session, m *discordgo.GuildCreate) { ProcessGuildCreate(m.Guild) }
func SBChannelCreate(s *discordgo.Session, c *discordgo.ChannelCreate) {
guild, ok := sb.guilds[c.GuildID]
if ok {
sb.GuildChannels[c.ID] = guild
}
}
func SBChannelDelete(s *discordgo.Session, c *discordgo.ChannelDelete) {
delete(sb.GuildChannels, c.ID)
}
func ProcessUser(u *discordgo.User) uint64 {
id := SBatoi(u.ID)
sb.db.AddUser(id, u.Email, u.Username, u.Avatar, u.Verified)
return id
}
func (info *GuildInfo) ProcessMember(u *discordgo.Member) {
ProcessUser(u.User)
t := time.Now().UTC()
if len(u.JoinedAt) > 0 { // Parse join date and update user table only if it is less than our current first seen date.
var err error
t, err = time.Parse(time.RFC3339Nano, u.JoinedAt)
if err != nil {
fmt.Println(err.Error())
return
}
}
sb.db.AddMember(SBatoi(u.User.ID), SBatoi(info.Guild.ID), t, u.Nick)
}
func ProcessGuildCreate(g *discordgo.Guild) {
AttachToGuild(g)
}
func (info *GuildInfo) ProcessGuild(g *discordgo.Guild) {
if len(g.Members) == 0 || len(g.Channels) == 0 || len(g.Roles) == 0 { // If this is true we were given half a guild update
info.log.Log("Got half a guild update for " + g.Name)
info.Guild.Name = g.Name
info.Guild.Icon = g.Icon
info.Guild.Region = g.Region
info.Guild.AfkChannelID = g.AfkChannelID
info.Guild.EmbedChannelID = g.EmbedChannelID
info.Guild.OwnerID = g.OwnerID
info.Guild.JoinedAt = g.JoinedAt
info.Guild.Splash = g.Splash
info.Guild.AfkTimeout = g.AfkTimeout
info.Guild.VerificationLevel = g.VerificationLevel
info.Guild.EmbedEnabled = g.EmbedEnabled
info.Guild.Large = g.Large
info.Guild.DefaultMessageNotifications = g.DefaultMessageNotifications
} else {
info.Guild = g
for _, v := range info.Guild.Channels {
sb.GuildChannels[v.ID] = info
}
for _, v := range g.Members {
info.ProcessMember(v)
}
}
}
func (info *GuildInfo) FindChannelID(name string) string {
channels := info.Guild.Channels
for _, v := range channels {
if v.Name == name {
return v.ID
}
}
return ""
}
func ApplyFuncRange(length int, fn func(i int)) {
for i := 0; i < length; i++ {
fn(i)
}
}
func (info *GuildInfo) IdleCheckLoop() {
for !sb.quit {
channels := info.Guild.Channels
if info.config.Debug { // override this in debug mode
c, err := sb.dg.State.Channel(sb.DebugChannels[info.Guild.ID])
if err == nil {
channels = []*discordgo.Channel{c}
} else {
channels = []*discordgo.Channel{}
}
}
for _, ch := range channels {
t, exists := sb.LastMessages[ch.ID]
if exists {
diff := time.Now().UTC().Sub(time.Unix(t, 0))
ApplyFuncRange(len(info.hooks.OnIdle), func(i int) {
if info.ProcessModule(ch.ID, info.hooks.OnIdle[i]) && diff >= (time.Duration(info.hooks.OnIdle[i].IdlePeriod(info))*time.Second) {
info.hooks.OnIdle[i].OnIdle(info, ch)
}
})
}
}
ApplyFuncRange(len(info.hooks.OnTick), func(i int) {
if info.ProcessModule("", info.hooks.OnTick[i]) {
info.hooks.OnTick[i].OnTick(info)
}
})
time.Sleep(30 * time.Second)
}
}
func WaitForInput() {
var input string
fmt.Scanln(&input)
sb.quit = true
}
func Initialize(Token string) {
dbauth, _ := ioutil.ReadFile("db.auth")
sb = &SweetieBot{
version: "0.8.0",
Owners: map[uint64]bool{95585199324143616: true, 98605232707080192: true},
RestrictedCommands: map[string]bool{"search": true, "lastping": true, "setstatus": true},
MainGuildID: 98609319519453184,
DebugChannels: map[string]string{"98609319519453184": "141710126628339712", "105443346608095232": "200112394494541824"},
GuildChannels: make(map[string]*GuildInfo),
quit: false,
guilds: make(map[string]*GuildInfo),
LastMessages: make(map[string]int64),
MaxConfigSize: 1000000,
}
rand.Intn(10)
for i := 0; i < 20+rand.Intn(20); i++ {
rand.Intn(50)
}
db, err := DB_Load(&Log{0, nil}, "mysql", strings.TrimSpace(string(dbauth)))
if err != nil {
fmt.Println("Error loading database", err.Error())
return
}
sb.db = db
sb.dg, err = discordgo.New("Bot " + Token)
if err != nil {
fmt.Println("Error creating discord session", err.Error())
return
}
sb.dg.AddHandler(SBReady)
sb.dg.AddHandler(SBTypingStart)
sb.dg.AddHandler(SBMessageCreate)
sb.dg.AddHandler(SBMessageUpdate)
sb.dg.AddHandler(SBMessageDelete)
sb.dg.AddHandler(SBMessageAck)
sb.dg.AddHandler(SBUserUpdate)
sb.dg.AddHandler(SBPresenceUpdate)
sb.dg.AddHandler(SBVoiceStateUpdate)
sb.dg.AddHandler(SBGuildUpdate)
sb.dg.AddHandler(SBGuildMemberAdd)
sb.dg.AddHandler(SBGuildMemberRemove)
sb.dg.AddHandler(SBGuildMemberUpdate)
sb.dg.AddHandler(SBGuildBanAdd)
sb.dg.AddHandler(SBGuildBanRemove)
sb.dg.AddHandler(SBGuildCreate)
sb.dg.AddHandler(SBChannelCreate)
sb.dg.AddHandler(SBChannelDelete)
sb.db.LoadStatements()
fmt.Println("Finished loading database statements")
//BuildMarkov(1, 1)
//return
err = sb.dg.Open()
if err == nil {
fmt.Println("Connection established")
for !sb.quit {
time.Sleep(400 * time.Millisecond)
}
} else {
fmt.Println("Error opening websocket connection: ", err.Error())
}
fmt.Println("Sweetiebot quitting")
sb.dg.Close()
sb.db.Close()
}
|
package restic_test
import (
"crypto/rand"
"encoding/json"
"flag"
"io"
mrand "math/rand"
"sync"
"testing"
"time"
"github.com/restic/restic"
"github.com/restic/restic/backend"
)
var maxWorkers = flag.Uint("workers", 100, "number of workers to test BlobList concurrent access against")
func randomID() []byte {
buf := make([]byte, backend.IDSize)
_, err := io.ReadFull(rand.Reader, buf)
if err != nil {
panic(err)
}
return buf
}
func newBlob() restic.Blob {
return restic.Blob{ID: randomID(), Size: uint64(mrand.Uint32())}
}
// Test basic functionality
func TestBlobList(t *testing.T) {
bl := restic.NewBlobList()
b := newBlob()
bl.Insert(b)
for i := 0; i < 1000; i++ {
bl.Insert(newBlob())
}
b2, err := bl.Find(restic.Blob{ID: b.ID})
ok(t, err)
assert(t, b2.Compare(b) == 0, "items are not equal: want %v, got %v", b, b2)
bl2 := restic.NewBlobList()
for i := 0; i < 1000; i++ {
bl.Insert(newBlob())
}
b2, err = bl2.Find(b)
assert(t, err != nil, "found ID in restic that was never inserted: %v", b2)
bl2.Merge(bl)
b2, err = bl2.Find(b)
if err != nil {
t.Fatal(err)
}
if b.Compare(b2) != 0 {
t.Fatalf("items are not equal: want %v, got %v", b, b2)
}
}
// Test JSON encode/decode
func TestBlobListJSON(t *testing.T) {
bl := restic.NewBlobList()
b := restic.Blob{ID: randomID()}
bl.Insert(b)
b2, err := bl.Find(b)
ok(t, err)
assert(t, b2.Compare(b) == 0, "items are not equal: want %v, got %v", b, b2)
buf, err := json.Marshal(bl)
ok(t, err)
bl2 := restic.BlobList{}
json.Unmarshal(buf, &bl2)
b2, err = bl2.Find(b)
ok(t, err)
assert(t, b2.Compare(b) == 0, "items are not equal: want %v, got %v", b, b2)
buf, err = json.Marshal(bl2)
ok(t, err)
}
// random insert/find access by several goroutines
func TestBlobListRandom(t *testing.T) {
var wg sync.WaitGroup
worker := func(bl *restic.BlobList) {
defer wg.Done()
b := newBlob()
bl.Insert(b)
for i := 0; i < 200; i++ {
bl.Insert(newBlob())
}
d := time.Duration(mrand.Intn(10)*100) * time.Millisecond
time.Sleep(d)
for i := 0; i < 100; i++ {
b2, err := bl.Find(b)
if err != nil {
t.Fatal(err)
}
if b.Compare(b2) != 0 {
t.Fatalf("items are not equal: want %v, got %v", b, b2)
}
}
bl2 := restic.NewBlobList()
for i := 0; i < 200; i++ {
bl2.Insert(newBlob())
}
bl2.Merge(bl)
}
bl := restic.NewBlobList()
for i := 0; uint(i) < *maxWorkers; i++ {
wg.Add(1)
go worker(bl)
}
wg.Wait()
}
Redude workers in tests to 20
package restic_test
import (
"crypto/rand"
"encoding/json"
"flag"
"io"
mrand "math/rand"
"sync"
"testing"
"time"
"github.com/restic/restic"
"github.com/restic/restic/backend"
)
var maxWorkers = flag.Uint("workers", 20, "number of workers to test BlobList concurrent access against")
func randomID() []byte {
buf := make([]byte, backend.IDSize)
_, err := io.ReadFull(rand.Reader, buf)
if err != nil {
panic(err)
}
return buf
}
func newBlob() restic.Blob {
return restic.Blob{ID: randomID(), Size: uint64(mrand.Uint32())}
}
// Test basic functionality
func TestBlobList(t *testing.T) {
bl := restic.NewBlobList()
b := newBlob()
bl.Insert(b)
for i := 0; i < 1000; i++ {
bl.Insert(newBlob())
}
b2, err := bl.Find(restic.Blob{ID: b.ID})
ok(t, err)
assert(t, b2.Compare(b) == 0, "items are not equal: want %v, got %v", b, b2)
bl2 := restic.NewBlobList()
for i := 0; i < 1000; i++ {
bl.Insert(newBlob())
}
b2, err = bl2.Find(b)
assert(t, err != nil, "found ID in restic that was never inserted: %v", b2)
bl2.Merge(bl)
b2, err = bl2.Find(b)
if err != nil {
t.Fatal(err)
}
if b.Compare(b2) != 0 {
t.Fatalf("items are not equal: want %v, got %v", b, b2)
}
}
// Test JSON encode/decode
func TestBlobListJSON(t *testing.T) {
bl := restic.NewBlobList()
b := restic.Blob{ID: randomID()}
bl.Insert(b)
b2, err := bl.Find(b)
ok(t, err)
assert(t, b2.Compare(b) == 0, "items are not equal: want %v, got %v", b, b2)
buf, err := json.Marshal(bl)
ok(t, err)
bl2 := restic.BlobList{}
json.Unmarshal(buf, &bl2)
b2, err = bl2.Find(b)
ok(t, err)
assert(t, b2.Compare(b) == 0, "items are not equal: want %v, got %v", b, b2)
buf, err = json.Marshal(bl2)
ok(t, err)
}
// random insert/find access by several goroutines
func TestBlobListRandom(t *testing.T) {
var wg sync.WaitGroup
worker := func(bl *restic.BlobList) {
defer wg.Done()
b := newBlob()
bl.Insert(b)
for i := 0; i < 200; i++ {
bl.Insert(newBlob())
}
d := time.Duration(mrand.Intn(10)*100) * time.Millisecond
time.Sleep(d)
for i := 0; i < 100; i++ {
b2, err := bl.Find(b)
if err != nil {
t.Fatal(err)
}
if b.Compare(b2) != 0 {
t.Fatalf("items are not equal: want %v, got %v", b, b2)
}
}
bl2 := restic.NewBlobList()
for i := 0; i < 200; i++ {
bl2.Insert(newBlob())
}
bl2.Merge(bl)
}
bl := restic.NewBlobList()
for i := 0; uint(i) < *maxWorkers; i++ {
wg.Add(1)
go worker(bl)
}
wg.Wait()
}
|
package main
import (
"errors"
"fmt"
"github.com/ethereum/ethutil-go"
"log"
"math/big"
)
type BlockChain struct {
// Last block
LastBlock *ethutil.Block
// The famous, the fabulous Mister GENESIIIIIIS (block)
genesisBlock *ethutil.Block
// Last known total difficulty
TD *big.Int
}
func NewBlockChain() *BlockChain {
bc := &BlockChain{}
bc.genesisBlock = ethutil.NewBlock(ethutil.Encode(ethutil.Genesis))
// Set the last know difficulty (might be 0x0 as initial value, Genesis)
bc.TD = ethutil.BigD(ethutil.Config.Db.LastKnownTD())
// TODO get last block from the database
bc.LastBlock = bc.genesisBlock
return bc
}
func (bc *BlockChain) HasBlock(hash string) bool {
data, _ := ethutil.Config.Db.Get([]byte(hash))
return len(data) != 0
}
func (bc *BlockChain) GenesisBlock() *ethutil.Block {
return bc.genesisBlock
}
type BlockManager struct {
// The block chain :)
bc *BlockChain
// Stack for processing contracts
stack *Stack
// Last known block number
LastBlockNumber *big.Int
}
func NewBlockManager() *BlockManager {
bm := &BlockManager{
bc: NewBlockChain(),
stack: NewStack(),
}
// Set the last known block number based on the blockchains last
// block
bm.LastBlockNumber = bm.BlockInfo(bm.bc.LastBlock).Number
return bm
}
// Process a block.
func (bm *BlockManager) ProcessBlock(block *ethutil.Block) error {
// Block validation
if err := bm.ValidateBlock(block); err != nil {
return err
}
// I'm not sure, but I don't know if there should be thrown
// any errors at this time.
if err := bm.AccumelateRewards(block); err != nil {
return err
}
// Get the tx count. Used to create enough channels to 'join' the go routines
txCount := len(block.Transactions())
// Locking channel. When it has been fully buffered this method will return
lockChan := make(chan bool, txCount)
// Process each transaction/contract
for _, tx := range block.Transactions() {
// If there's no recipient, it's a contract
if tx.IsContract() {
go bm.ProcessContract(tx, block, lockChan)
} else {
// "finish" tx which isn't a contract
lockChan <- true
}
}
// Wait for all Tx to finish processing
for i := 0; i < txCount; i++ {
<-lockChan
}
if bm.CalculateTD(block) {
ethutil.Config.Db.Put(block.Hash(), block.MarshalRlp())
bm.bc.LastBlock = block
}
return nil
}
func (bm *BlockManager) writeBlockInfo(block *ethutil.Block) {
bi := ethutil.BlockInfo{Number: bm.LastBlockNumber.Add(bm.LastBlockNumber, big.NewInt(1))}
ethutil.Config.Db.Put(append(block.Hash(), []byte("Info")...), bi.MarshalRlp())
}
func (bm *BlockManager) BlockInfo(block *ethutil.Block) ethutil.BlockInfo {
bi := ethutil.BlockInfo{}
data, _ := ethutil.Config.Db.Get(append(block.Hash(), []byte("Info")...))
bi.UnmarshalRlp(data)
return bi
}
func (bm *BlockManager) CalculateTD(block *ethutil.Block) bool {
uncleDiff := new(big.Int)
for _, uncle := range block.Uncles {
uncleDiff = uncleDiff.Add(uncleDiff, uncle.Difficulty)
}
// TD(genesis_block) = 0 and TD(B) = TD(B.parent) + sum(u.difficulty for u in B.uncles) + B.difficulty
td := new(big.Int)
td = td.Add(bm.bc.TD, uncleDiff)
td = td.Add(td, block.Difficulty)
// The new TD will only be accepted if the new difficulty is
// is greater than the previous.
if td.Cmp(bm.bc.TD) > 0 {
bm.bc.LastBlock = block
// Set the new total difficulty back to the block chain
bm.bc.TD = td
if Debug {
log.Println("TD(block) =", td)
}
return true
}
return false
}
// Validates the current block. Returns an error if the block was invalid,
// an uncle or anything that isn't on the current block chain.
// Validation validates easy over difficult (dagger takes longer time = difficult)
func (bm *BlockManager) ValidateBlock(block *ethutil.Block) error {
// TODO
// 2. Check if the difficulty is correct
// Check if we have the parent hash, if it isn't known we discard it
// Reasons might be catching up or simply an invalid block
if bm.bc.LastBlock != nil && block.PrevHash == "" &&
!bm.bc.HasBlock(block.PrevHash) {
return errors.New("Block's parent unknown")
}
// Check each uncle's previous hash. In order for it to be valid
// is if it has the same block hash as the current
for _, uncle := range block.Uncles {
if uncle.PrevHash != block.PrevHash {
if Debug {
log.Printf("Uncle prvhash mismatch %x %x\n", block.PrevHash, uncle.PrevHash)
}
return errors.New("Mismatching Prvhash from uncle")
}
}
// Verify the nonce of the block. Return an error if it's not valid
if bm.bc.LastBlock != nil && block.PrevHash == "" &&
!DaggerVerify(ethutil.BigD(block.Hash()), block.Difficulty, block.Nonce) {
return errors.New("Block's nonce is invalid")
}
log.Println("Block validation PASSED")
return nil
}
func (bm *BlockManager) AccumelateRewards(block *ethutil.Block) error {
// Get the coinbase rlp data
d := block.State().Get(block.Coinbase)
ether := ethutil.NewEtherFromData([]byte(d))
// Reward amount of ether to the coinbase address
ether.AddFee(ethutil.CalculateBlockReward(block, len(block.Uncles)))
block.State().Update(block.Coinbase, string(ether.MarshalRlp()))
// TODO Reward each uncle
return nil
}
func (bm *BlockManager) ProcessContract(tx *ethutil.Transaction, block *ethutil.Block, lockChan chan bool) {
// Recovering function in case the VM had any errors
defer func() {
if r := recover(); r != nil {
fmt.Println("Recovered from VM execution with err =", r)
// Let the channel know where done even though it failed (so the execution may resume normally)
lockChan <- true
}
}()
// Process contract
bm.ProcContract(tx, block, func(opType OpType) bool {
// TODO turn on once big ints are in place
//if !block.PayFee(tx.Hash(), StepFee.Uint64()) {
// return false
//}
return true // Continue
})
// Broadcast we're done
lockChan <- true
}
// Contract evaluation is done here.
func (bm *BlockManager) ProcContract(tx *ethutil.Transaction, block *ethutil.Block, cb TxCallback) {
// Instruction pointer
pc := 0
blockInfo := bm.BlockInfo(block)
contract := block.GetContract(tx.Hash())
if contract == nil {
fmt.Println("Contract not found")
return
}
Pow256 := ethutil.BigPow(2, 256)
//fmt.Printf("# op arg\n")
out:
for {
// The base big int for all calculations. Use this for any results.
base := new(big.Int)
// XXX Should Instr return big int slice instead of string slice?
// Get the next instruction from the contract
//op, _, _ := Instr(contract.state.Get(string(Encode(uint32(pc)))))
nb := ethutil.NumberToBytes(uint64(pc), 32)
o, _, _ := ethutil.Instr(contract.State().Get(string(nb)))
op := OpCode(o)
if !cb(0) {
break
}
if Debug {
//fmt.Printf("%-3d %-4s\n", pc, op.String())
}
switch op {
case oSTOP:
break out
case oADD:
x, y := bm.stack.Popn()
// (x + y) % 2 ** 256
base.Add(x, y)
base.Mod(base, Pow256)
// Pop result back on the stack
bm.stack.Push(base.String())
case oSUB:
x, y := bm.stack.Popn()
// (x - y) % 2 ** 256
base.Sub(x, y)
base.Mod(base, Pow256)
// Pop result back on the stack
bm.stack.Push(base.String())
case oMUL:
x, y := bm.stack.Popn()
// (x * y) % 2 ** 256
base.Mul(x, y)
base.Mod(base, Pow256)
// Pop result back on the stack
bm.stack.Push(base.String())
case oDIV:
x, y := bm.stack.Popn()
// floor(x / y)
base.Div(x, y)
// Pop result back on the stack
bm.stack.Push(base.String())
case oSDIV:
x, y := bm.stack.Popn()
// n > 2**255
if x.Cmp(Pow256) > 0 {
x.Sub(Pow256, x)
}
if y.Cmp(Pow256) > 0 {
y.Sub(Pow256, y)
}
z := new(big.Int)
z.Div(x, y)
if z.Cmp(Pow256) > 0 {
z.Sub(Pow256, z)
}
// Push result on to the stack
bm.stack.Push(z.String())
case oMOD:
x, y := bm.stack.Popn()
base.Mod(x, y)
bm.stack.Push(base.String())
case oSMOD:
x, y := bm.stack.Popn()
// n > 2**255
if x.Cmp(Pow256) > 0 {
x.Sub(Pow256, x)
}
if y.Cmp(Pow256) > 0 {
y.Sub(Pow256, y)
}
z := new(big.Int)
z.Mod(x, y)
if z.Cmp(Pow256) > 0 {
z.Sub(Pow256, z)
}
// Push result on to the stack
bm.stack.Push(z.String())
case oEXP:
x, y := bm.stack.Popn()
base.Exp(x, y, Pow256)
bm.stack.Push(base.String())
case oNEG:
base.Sub(Pow256, ethutil.Big(bm.stack.Pop()))
bm.stack.Push(base.String())
case oLT:
x, y := bm.stack.Popn()
// x < y
if x.Cmp(y) < 0 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
case oLE:
x, y := bm.stack.Popn()
// x <= y
if x.Cmp(y) < 1 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
case oGT:
x, y := bm.stack.Popn()
// x > y
if x.Cmp(y) > 0 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
case oGE:
x, y := bm.stack.Popn()
// x >= y
if x.Cmp(y) > -1 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
case oNOT:
x, y := bm.stack.Popn()
// x != y
if x.Cmp(y) != 0 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
// Please note that the following code contains some
// ugly string casting. This will have to change to big
// ints. TODO :)
case oMYADDRESS:
bm.stack.Push(string(tx.Hash()))
case oTXSENDER:
bm.stack.Push(string(tx.Sender()))
case oTXVALUE:
bm.stack.Push(tx.Value.String())
case oTXDATAN:
bm.stack.Push(big.NewInt(int64(len(tx.Data))).String())
case oTXDATA:
v := ethutil.Big(bm.stack.Pop())
// v >= len(data)
if v.Cmp(big.NewInt(int64(len(tx.Data)))) >= 0 {
//I know this will change. It makes no
//sense. Read comment above
bm.stack.Push(ethutil.Big("0").String())
} else {
bm.stack.Push(ethutil.Big(tx.Data[v.Uint64()]).String())
}
case oBLK_PREVHASH:
bm.stack.Push(string(block.PrevHash))
case oBLK_COINBASE:
bm.stack.Push(block.Coinbase)
case oBLK_TIMESTAMP:
bm.stack.Push(big.NewInt(block.Time).String())
case oBLK_NUMBER:
bm.stack.Push(blockInfo.Number.String())
case oBLK_DIFFICULTY:
bm.stack.Push(block.Difficulty.String())
case oBASEFEE:
// e = 10^21
e := big.NewInt(0).Exp(big.NewInt(10), big.NewInt(21), big.NewInt(0))
d := new(big.Rat)
d.SetInt(block.Difficulty)
c := new(big.Rat)
c.SetFloat64(0.5)
// d = diff / 0.5
d.Quo(d, c)
// base = floor(d)
base.Div(d.Num(), d.Denom())
x := new(big.Int)
x.Div(e, base)
// x = floor(10^21 / floor(diff^0.5))
bm.stack.Push(x.String())
case oSHA256:
case oRIPEMD160:
case oECMUL:
case oECADD:
case oECSIGN:
case oECRECOVER:
case oECVALID:
case oSHA3:
case oPUSH:
// Get the next entry and pushes the value on the stack
pc++
bm.stack.Push(contract.State().Get(string(ethutil.NumberToBytes(uint64(pc), 32))))
case oPOP:
// Pop current value of the stack
bm.stack.Pop()
case oDUP:
case oSWAP:
case oMLOAD:
case oMSTORE:
case oSLOAD:
case oSSTORE:
case oJMP:
case oJMPI:
case oIND:
case oEXTRO:
case oBALANCE:
case oMKTX:
case oSUICIDE:
/*
case oLOAD:
// Load instruction X on the stack
i, _ := strconv.Atoi(bm.stack.Pop())
bm.stack.Push(contract.State().Get(string(ethutil.NumberToBytes(uint64(i), 32))))
*/
}
pc++
}
bm.stack.Print()
}
Added more opcodes
package main
import (
"bytes"
"errors"
"fmt"
"github.com/ethereum/ethutil-go"
"github.com/obscuren/secp256-go"
"log"
"math"
"math/big"
)
type BlockChain struct {
// Last block
LastBlock *ethutil.Block
// The famous, the fabulous Mister GENESIIIIIIS (block)
genesisBlock *ethutil.Block
// Last known total difficulty
TD *big.Int
}
func NewBlockChain() *BlockChain {
bc := &BlockChain{}
bc.genesisBlock = ethutil.NewBlock(ethutil.Encode(ethutil.Genesis))
// Set the last know difficulty (might be 0x0 as initial value, Genesis)
bc.TD = ethutil.BigD(ethutil.Config.Db.LastKnownTD())
// TODO get last block from the database
bc.LastBlock = bc.genesisBlock
return bc
}
func (bc *BlockChain) HasBlock(hash string) bool {
data, _ := ethutil.Config.Db.Get([]byte(hash))
return len(data) != 0
}
func (bc *BlockChain) GenesisBlock() *ethutil.Block {
return bc.genesisBlock
}
type BlockManager struct {
// The block chain :)
bc *BlockChain
// Stack for processing contracts
stack *Stack
// Last known block number
LastBlockNumber *big.Int
}
func NewBlockManager() *BlockManager {
bm := &BlockManager{
bc: NewBlockChain(),
stack: NewStack(),
}
// Set the last known block number based on the blockchains last
// block
bm.LastBlockNumber = bm.BlockInfo(bm.bc.LastBlock).Number
return bm
}
// Process a block.
func (bm *BlockManager) ProcessBlock(block *ethutil.Block) error {
// Block validation
if err := bm.ValidateBlock(block); err != nil {
return err
}
// I'm not sure, but I don't know if there should be thrown
// any errors at this time.
if err := bm.AccumelateRewards(block); err != nil {
return err
}
// Get the tx count. Used to create enough channels to 'join' the go routines
txCount := len(block.Transactions())
// Locking channel. When it has been fully buffered this method will return
lockChan := make(chan bool, txCount)
// Process each transaction/contract
for _, tx := range block.Transactions() {
// If there's no recipient, it's a contract
if tx.IsContract() {
go bm.ProcessContract(tx, block, lockChan)
} else {
// "finish" tx which isn't a contract
lockChan <- true
}
}
// Wait for all Tx to finish processing
for i := 0; i < txCount; i++ {
<-lockChan
}
if bm.CalculateTD(block) {
ethutil.Config.Db.Put(block.Hash(), block.MarshalRlp())
bm.bc.LastBlock = block
}
return nil
}
func (bm *BlockManager) writeBlockInfo(block *ethutil.Block) {
bi := ethutil.BlockInfo{Number: bm.LastBlockNumber.Add(bm.LastBlockNumber, big.NewInt(1))}
ethutil.Config.Db.Put(append(block.Hash(), []byte("Info")...), bi.MarshalRlp())
}
func (bm *BlockManager) BlockInfo(block *ethutil.Block) ethutil.BlockInfo {
bi := ethutil.BlockInfo{}
data, _ := ethutil.Config.Db.Get(append(block.Hash(), []byte("Info")...))
bi.UnmarshalRlp(data)
return bi
}
func (bm *BlockManager) CalculateTD(block *ethutil.Block) bool {
uncleDiff := new(big.Int)
for _, uncle := range block.Uncles {
uncleDiff = uncleDiff.Add(uncleDiff, uncle.Difficulty)
}
// TD(genesis_block) = 0 and TD(B) = TD(B.parent) + sum(u.difficulty for u in B.uncles) + B.difficulty
td := new(big.Int)
td = td.Add(bm.bc.TD, uncleDiff)
td = td.Add(td, block.Difficulty)
// The new TD will only be accepted if the new difficulty is
// is greater than the previous.
if td.Cmp(bm.bc.TD) > 0 {
bm.bc.LastBlock = block
// Set the new total difficulty back to the block chain
bm.bc.TD = td
if Debug {
log.Println("TD(block) =", td)
}
return true
}
return false
}
// Validates the current block. Returns an error if the block was invalid,
// an uncle or anything that isn't on the current block chain.
// Validation validates easy over difficult (dagger takes longer time = difficult)
func (bm *BlockManager) ValidateBlock(block *ethutil.Block) error {
// TODO
// 2. Check if the difficulty is correct
// Check if we have the parent hash, if it isn't known we discard it
// Reasons might be catching up or simply an invalid block
if bm.bc.LastBlock != nil && block.PrevHash == "" &&
!bm.bc.HasBlock(block.PrevHash) {
return errors.New("Block's parent unknown")
}
// Check each uncle's previous hash. In order for it to be valid
// is if it has the same block hash as the current
for _, uncle := range block.Uncles {
if uncle.PrevHash != block.PrevHash {
if Debug {
log.Printf("Uncle prvhash mismatch %x %x\n", block.PrevHash, uncle.PrevHash)
}
return errors.New("Mismatching Prvhash from uncle")
}
}
// Verify the nonce of the block. Return an error if it's not valid
if bm.bc.LastBlock != nil && block.PrevHash == "" &&
!DaggerVerify(ethutil.BigD(block.Hash()), block.Difficulty, block.Nonce) {
return errors.New("Block's nonce is invalid")
}
log.Println("Block validation PASSED")
return nil
}
func (bm *BlockManager) AccumelateRewards(block *ethutil.Block) error {
// Get the coinbase rlp data
d := block.State().Get(block.Coinbase)
ether := ethutil.NewEtherFromData([]byte(d))
// Reward amount of ether to the coinbase address
ether.AddFee(ethutil.CalculateBlockReward(block, len(block.Uncles)))
block.State().Update(block.Coinbase, string(ether.MarshalRlp()))
// TODO Reward each uncle
return nil
}
func (bm *BlockManager) ProcessContract(tx *ethutil.Transaction, block *ethutil.Block, lockChan chan bool) {
// Recovering function in case the VM had any errors
defer func() {
if r := recover(); r != nil {
fmt.Println("Recovered from VM execution with err =", r)
// Let the channel know where done even though it failed (so the execution may resume normally)
lockChan <- true
}
}()
// Process contract
bm.ProcContract(tx, block, func(opType OpType) bool {
// TODO turn on once big ints are in place
//if !block.PayFee(tx.Hash(), StepFee.Uint64()) {
// return false
//}
return true // Continue
})
// Broadcast we're done
lockChan <- true
}
// Contract evaluation is done here.
func (bm *BlockManager) ProcContract(tx *ethutil.Transaction, block *ethutil.Block, cb TxCallback) {
// Instruction pointer
pc := 0
blockInfo := bm.BlockInfo(block)
contract := block.GetContract(tx.Hash())
if contract == nil {
fmt.Println("Contract not found")
return
}
Pow256 := ethutil.BigPow(2, 256)
//fmt.Printf("# op arg\n")
out:
for {
// The base big int for all calculations. Use this for any results.
base := new(big.Int)
// XXX Should Instr return big int slice instead of string slice?
// Get the next instruction from the contract
//op, _, _ := Instr(contract.state.Get(string(Encode(uint32(pc)))))
nb := ethutil.NumberToBytes(uint64(pc), 32)
o, _, _ := ethutil.Instr(contract.State().Get(string(nb)))
op := OpCode(o)
if !cb(0) {
break
}
if Debug {
//fmt.Printf("%-3d %-4s\n", pc, op.String())
}
switch op {
case oSTOP:
break out
case oADD:
x, y := bm.stack.Popn()
// (x + y) % 2 ** 256
base.Add(x, y)
base.Mod(base, Pow256)
// Pop result back on the stack
bm.stack.Push(base.String())
case oSUB:
x, y := bm.stack.Popn()
// (x - y) % 2 ** 256
base.Sub(x, y)
base.Mod(base, Pow256)
// Pop result back on the stack
bm.stack.Push(base.String())
case oMUL:
x, y := bm.stack.Popn()
// (x * y) % 2 ** 256
base.Mul(x, y)
base.Mod(base, Pow256)
// Pop result back on the stack
bm.stack.Push(base.String())
case oDIV:
x, y := bm.stack.Popn()
// floor(x / y)
base.Div(x, y)
// Pop result back on the stack
bm.stack.Push(base.String())
case oSDIV:
x, y := bm.stack.Popn()
// n > 2**255
if x.Cmp(Pow256) > 0 {
x.Sub(Pow256, x)
}
if y.Cmp(Pow256) > 0 {
y.Sub(Pow256, y)
}
z := new(big.Int)
z.Div(x, y)
if z.Cmp(Pow256) > 0 {
z.Sub(Pow256, z)
}
// Push result on to the stack
bm.stack.Push(z.String())
case oMOD:
x, y := bm.stack.Popn()
base.Mod(x, y)
bm.stack.Push(base.String())
case oSMOD:
x, y := bm.stack.Popn()
// n > 2**255
if x.Cmp(Pow256) > 0 {
x.Sub(Pow256, x)
}
if y.Cmp(Pow256) > 0 {
y.Sub(Pow256, y)
}
z := new(big.Int)
z.Mod(x, y)
if z.Cmp(Pow256) > 0 {
z.Sub(Pow256, z)
}
// Push result on to the stack
bm.stack.Push(z.String())
case oEXP:
x, y := bm.stack.Popn()
base.Exp(x, y, Pow256)
bm.stack.Push(base.String())
case oNEG:
base.Sub(Pow256, ethutil.Big(bm.stack.Pop()))
bm.stack.Push(base.String())
case oLT:
x, y := bm.stack.Popn()
// x < y
if x.Cmp(y) < 0 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
case oLE:
x, y := bm.stack.Popn()
// x <= y
if x.Cmp(y) < 1 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
case oGT:
x, y := bm.stack.Popn()
// x > y
if x.Cmp(y) > 0 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
case oGE:
x, y := bm.stack.Popn()
// x >= y
if x.Cmp(y) > -1 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
case oNOT:
x, y := bm.stack.Popn()
// x != y
if x.Cmp(y) != 0 {
bm.stack.Push("1")
} else {
bm.stack.Push("0")
}
// Please note that the following code contains some
// ugly string casting. This will have to change to big
// ints. TODO :)
case oMYADDRESS:
bm.stack.Push(string(tx.Hash()))
case oTXSENDER:
bm.stack.Push(string(tx.Sender()))
case oTXVALUE:
bm.stack.Push(tx.Value.String())
case oTXDATAN:
bm.stack.Push(big.NewInt(int64(len(tx.Data))).String())
case oTXDATA:
v := ethutil.Big(bm.stack.Pop())
// v >= len(data)
if v.Cmp(big.NewInt(int64(len(tx.Data)))) >= 0 {
//I know this will change. It makes no
//sense. Read comment above
bm.stack.Push(ethutil.Big("0").String())
} else {
bm.stack.Push(ethutil.Big(tx.Data[v.Uint64()]).String())
}
case oBLK_PREVHASH:
bm.stack.Push(string(block.PrevHash))
case oBLK_COINBASE:
bm.stack.Push(block.Coinbase)
case oBLK_TIMESTAMP:
bm.stack.Push(big.NewInt(block.Time).String())
case oBLK_NUMBER:
bm.stack.Push(blockInfo.Number.String())
case oBLK_DIFFICULTY:
bm.stack.Push(block.Difficulty.String())
case oBASEFEE:
// e = 10^21
e := big.NewInt(0).Exp(big.NewInt(10), big.NewInt(21), big.NewInt(0))
d := new(big.Rat)
d.SetInt(block.Difficulty)
c := new(big.Rat)
c.SetFloat64(0.5)
// d = diff / 0.5
d.Quo(d, c)
// base = floor(d)
base.Div(d.Num(), d.Denom())
x := new(big.Int)
x.Div(e, base)
// x = floor(10^21 / floor(diff^0.5))
bm.stack.Push(x.String())
case oSHA256, oRIPEMD160:
// This is probably save
// ceil(pop / 32)
length := int(math.Ceil(float64(ethutil.Big(bm.stack.Pop()).Uint64()) / 32.0))
// New buffer which will contain the concatenated popped items
data := new(bytes.Buffer)
for i := 0; i < length; i++ {
// Encode the number to bytes and have it 32bytes long
num := ethutil.NumberToBytes(ethutil.Big(bm.stack.Pop()).Bytes(), 256)
data.WriteString(string(num))
}
if op == oSHA256 {
bm.stack.Push(base.SetBytes(ethutil.Sha256Bin(data.Bytes())).String())
} else {
bm.stack.Push(base.SetBytes(ethutil.Ripemd160(data.Bytes())).String())
}
case oECMUL:
y := bm.stack.Pop()
x := bm.stack.Pop()
n := bm.stack.Pop()
if ethutil.Big(x).Cmp(ethutil.Big(y))
data := new(bytes.Buffer)
data.WriteString(x)
data.WriteString(y)
if secp256.VerifyPubkeyValidity(data.Bytes()) == 1 {
// TODO
} else {
// Invalid, push infinity
bm.stack.Push("0")
bm.stack.Push("0")
}
case oECADD:
case oECSIGN:
case oECRECOVER:
case oECVALID:
case oSHA3:
case oPUSH:
// Get the next entry and pushes the value on the stack
pc++
bm.stack.Push(contract.State().Get(string(ethutil.NumberToBytes(uint64(pc), 32))))
case oPOP:
// Pop current value of the stack
bm.stack.Pop()
case oDUP:
case oSWAP:
case oMLOAD:
case oMSTORE:
case oSLOAD:
case oSSTORE:
case oJMP:
case oJMPI:
case oIND:
case oEXTRO:
case oBALANCE:
case oMKTX:
case oSUICIDE:
/*
case oLOAD:
// Load instruction X on the stack
i, _ := strconv.Atoi(bm.stack.Pop())
bm.stack.Push(contract.State().Get(string(ethutil.NumberToBytes(uint64(i), 32))))
*/
}
pc++
}
bm.stack.Print()
}
|
// Copyright 2016 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xorm
import "database/sql"
// Count counts the records. bean's non-empty fields
// are conditions.
func (session *Session) Count(bean interface{}) (int64, error) {
defer session.resetStatement()
if session.IsAutoClose {
defer session.Close()
}
var sqlStr string
var args []interface{}
if session.Statement.RawSQL == "" {
sqlStr, args = session.Statement.genCountSQL(bean)
} else {
sqlStr = session.Statement.RawSQL
args = session.Statement.RawParams
}
session.queryPreprocess(&sqlStr, args...)
var err error
var total int64
if session.IsAutoCommit {
err = session.DB().QueryRow(sqlStr, args...).Scan(&total)
} else {
err = session.Tx.QueryRow(sqlStr, args...).Scan(&total)
}
if err == sql.ErrNoRows || err == nil {
return total, nil
}
return 0, err
}
// Sum call sum some column. bean's non-empty fields are conditions.
func (session *Session) Sum(bean interface{}, columnName string) (float64, error) {
defer session.resetStatement()
if session.IsAutoClose {
defer session.Close()
}
var sqlStr string
var args []interface{}
if len(session.Statement.RawSQL) == 0 {
sqlStr, args = session.Statement.genSumSQL(bean, columnName)
} else {
sqlStr = session.Statement.RawSQL
args = session.Statement.RawParams
}
session.queryPreprocess(&sqlStr, args...)
var err error
var res float64
if session.IsAutoCommit {
err = session.DB().QueryRow(sqlStr, args...).Scan(&res)
} else {
err = session.Tx.QueryRow(sqlStr, args...).Scan(&res)
}
if err == sql.ErrNoRows || err == nil {
return res, nil
}
return 0, err
}
// Sums call sum some columns. bean's non-empty fields are conditions.
func (session *Session) Sums(bean interface{}, columnNames ...string) ([]float64, error) {
defer session.resetStatement()
if session.IsAutoClose {
defer session.Close()
}
var sqlStr string
var args []interface{}
if len(session.Statement.RawSQL) == 0 {
sqlStr, args = session.Statement.genSumSQL(bean, columnNames...)
} else {
sqlStr = session.Statement.RawSQL
args = session.Statement.RawParams
}
session.queryPreprocess(&sqlStr, args...)
var err error
var res = make([]float64, len(columnNames), len(columnNames))
if session.IsAutoCommit {
err = session.DB().QueryRow(sqlStr, args...).ScanSlice(&res)
} else {
err = session.Tx.QueryRow(sqlStr, args...).ScanSlice(&res)
}
if err == sql.ErrNoRows || err == nil {
return res, nil
}
return nil, err
}
// SumsInt sum specify columns and return as []int64 instead of []float64
func (session *Session) SumsInt(bean interface{}, columnNames ...string) ([]int64, error) {
defer session.resetStatement()
if session.IsAutoClose {
defer session.Close()
}
var sqlStr string
var args []interface{}
if len(session.Statement.RawSQL) == 0 {
sqlStr, args = session.Statement.genSumSQL(bean, columnNames...)
} else {
sqlStr = session.Statement.RawSQL
args = session.Statement.RawParams
}
session.queryPreprocess(&sqlStr, args...)
var err error
var res = make([]int64, 0, len(columnNames))
if session.IsAutoCommit {
err = session.DB().QueryRow(sqlStr, args...).ScanSlice(&res)
} else {
err = session.Tx.QueryRow(sqlStr, args...).ScanSlice(&res)
}
if err == sql.ErrNoRows || err == nil {
return res, nil
}
return nil, err
}
bug fix for SumsInt return empty slice
// Copyright 2016 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xorm
import "database/sql"
// Count counts the records. bean's non-empty fields
// are conditions.
func (session *Session) Count(bean interface{}) (int64, error) {
defer session.resetStatement()
if session.IsAutoClose {
defer session.Close()
}
var sqlStr string
var args []interface{}
if session.Statement.RawSQL == "" {
sqlStr, args = session.Statement.genCountSQL(bean)
} else {
sqlStr = session.Statement.RawSQL
args = session.Statement.RawParams
}
session.queryPreprocess(&sqlStr, args...)
var err error
var total int64
if session.IsAutoCommit {
err = session.DB().QueryRow(sqlStr, args...).Scan(&total)
} else {
err = session.Tx.QueryRow(sqlStr, args...).Scan(&total)
}
if err == sql.ErrNoRows || err == nil {
return total, nil
}
return 0, err
}
// Sum call sum some column. bean's non-empty fields are conditions.
func (session *Session) Sum(bean interface{}, columnName string) (float64, error) {
defer session.resetStatement()
if session.IsAutoClose {
defer session.Close()
}
var sqlStr string
var args []interface{}
if len(session.Statement.RawSQL) == 0 {
sqlStr, args = session.Statement.genSumSQL(bean, columnName)
} else {
sqlStr = session.Statement.RawSQL
args = session.Statement.RawParams
}
session.queryPreprocess(&sqlStr, args...)
var err error
var res float64
if session.IsAutoCommit {
err = session.DB().QueryRow(sqlStr, args...).Scan(&res)
} else {
err = session.Tx.QueryRow(sqlStr, args...).Scan(&res)
}
if err == sql.ErrNoRows || err == nil {
return res, nil
}
return 0, err
}
// Sums call sum some columns. bean's non-empty fields are conditions.
func (session *Session) Sums(bean interface{}, columnNames ...string) ([]float64, error) {
defer session.resetStatement()
if session.IsAutoClose {
defer session.Close()
}
var sqlStr string
var args []interface{}
if len(session.Statement.RawSQL) == 0 {
sqlStr, args = session.Statement.genSumSQL(bean, columnNames...)
} else {
sqlStr = session.Statement.RawSQL
args = session.Statement.RawParams
}
session.queryPreprocess(&sqlStr, args...)
var err error
var res = make([]float64, len(columnNames), len(columnNames))
if session.IsAutoCommit {
err = session.DB().QueryRow(sqlStr, args...).ScanSlice(&res)
} else {
err = session.Tx.QueryRow(sqlStr, args...).ScanSlice(&res)
}
if err == sql.ErrNoRows || err == nil {
return res, nil
}
return nil, err
}
// SumsInt sum specify columns and return as []int64 instead of []float64
func (session *Session) SumsInt(bean interface{}, columnNames ...string) ([]int64, error) {
defer session.resetStatement()
if session.IsAutoClose {
defer session.Close()
}
var sqlStr string
var args []interface{}
if len(session.Statement.RawSQL) == 0 {
sqlStr, args = session.Statement.genSumSQL(bean, columnNames...)
} else {
sqlStr = session.Statement.RawSQL
args = session.Statement.RawParams
}
session.queryPreprocess(&sqlStr, args...)
var err error
var res = make([]int64, len(columnNames), len(columnNames))
if session.IsAutoCommit {
err = session.DB().QueryRow(sqlStr, args...).ScanSlice(&res)
} else {
err = session.Tx.QueryRow(sqlStr, args...).ScanSlice(&res)
}
if err == sql.ErrNoRows || err == nil {
return res, nil
}
return nil, err
}
|
package app
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
json "github.com/mohae/customjson"
"github.com/mohae/utilitybelt/deepcopy"
)
// Builder constants
const (
UnsupportedBuilder Builder = iota
Common
Custom
AmazonChroot
AmazonEBS
AmazonInstance
DigitalOcean
Docker
GoogleCompute
Null
OpenStack
Parallels
QEMU
VirtualBoxISO
VirtualBoxOVF
VMWareISO
VMWareVMX
)
// Builder is a Packer supported builder.
type Builder int
var builders = [...]string{
"unsupported builder",
"common",
"custom",
"amazon-chroot",
"amazon-ebs",
"amazon-instance",
"digitalocean",
"docker",
"googlecompute",
"null",
"openstack",
"parallels",
"qemu",
"virtualbox-iso",
"virtualbox-ovf",
"vmware-iso",
"vmware-vmx",
}
func (b Builder) String() string { return builders[b] }
// BuilderFromString returns the builder constant for the passed string or
// unsupported. All incoming strings are normalized to lowercase.
func BuilderFromString(s string) Builder {
s = strings.ToLower(s)
switch s {
case "common":
return Common
case "custom":
return Custom
case "amazon-chroot":
return AmazonChroot
case "amazon-ebs":
return AmazonEBS
case "amazon-instance":
return AmazonInstance
case "digitalocean":
return DigitalOcean
case "docker":
return Docker
case "googlecompute":
return GoogleCompute
case "null":
return Null
case "openstack":
return OpenStack
case "parallels":
return Parallels
case "qemu":
return QEMU
case "virtualbox-iso":
return VirtualBoxISO
case "virtualbox-ovf":
return VirtualBoxOVF
case "vmware-iso":
return VMWareISO
case "vmware-vmx":
return VMWareVMX
}
return UnsupportedBuilder
}
// r.createBuilders takes a raw builder and create the appropriate Packer
// Builder
func (r *rawTemplate) createBuilders() (bldrs []interface{}, err error) {
if r.BuilderIDs == nil || len(r.BuilderIDs) <= 0 {
return nil, fmt.Errorf("unable to create builders: none specified")
}
var tmpS map[string]interface{}
var ndx int
bldrs = make([]interface{}, len(r.BuilderIDs))
// Set the CommonBuilder settings. Only the builder.Settings field is used
// for CommonBuilder as everything else is usually builder specific, even
// if they have common names, e.g. difference between specifying memory
// between VMWare and VirtualBox.
// r.updateCommonBuilder
//
// Generate the builders for each builder type.
for _, ID := range r.BuilderIDs {
bldr, ok := r.Builders[ID]
if !ok {
return nil, fmt.Errorf("builder configuration for %s not found", ID)
}
typ := BuilderFromString(bldr.Type)
switch typ {
case AmazonChroot:
tmpS, err = r.createAmazonChroot(ID)
if err != nil {
return nil, &Error{AmazonChroot.String(), err}
}
case AmazonEBS:
tmpS, err = r.createAmazonEBS(ID)
if err != nil {
return nil, &Error{AmazonEBS.String(), err}
}
case AmazonInstance:
tmpS, err = r.createAmazonInstance(ID)
if err != nil {
return nil, &Error{AmazonInstance.String(), err}
}
case DigitalOcean:
tmpS, err = r.createDigitalOcean(ID)
if err != nil {
return nil, &Error{DigitalOcean.String(), err}
}
case Docker:
tmpS, err = r.createDocker(ID)
if err != nil {
return nil, &Error{Docker.String(), err}
}
case GoogleCompute:
tmpS, err = r.createGoogleCompute(ID)
if err != nil {
return nil, &Error{GoogleCompute.String(), err}
}
case Null:
tmpS, err = r.createNull(ID)
if err != nil {
return nil, &Error{Null.String(), err}
}
case OpenStack:
tmpS, err = r.createOpenStack(ID)
if err != nil {
return nil, &Error{Null.String(), err}
}
// case ParallelsISO, ParallelsPVM:
case QEMU:
tmpS, err = r.createQEMU(ID)
if err != nil {
return nil, &Error{QEMU.String(), err}
}
case VirtualBoxISO:
tmpS, err = r.createVirtualBoxISO(ID)
if err != nil {
return nil, &Error{VirtualBoxISO.String(), err}
}
case VirtualBoxOVF:
tmpS, err = r.createVirtualBoxOVF(ID)
if err != nil {
return nil, &Error{VirtualBoxOVF.String(), err}
}
case VMWareISO:
tmpS, err = r.createVMWareISO(ID)
if err != nil {
return nil, &Error{VMWareISO.String(), err}
}
case VMWareVMX:
tmpS, err = r.createVMWareVMX(ID)
if err != nil {
return nil, &Error{VMWareVMX.String(), err}
}
default:
return nil, &Error{UnsupportedBuilder.String(), fmt.Errorf("%q is not supported", typ.String())}
}
bldrs[ndx] = tmpS
ndx++
}
return bldrs, nil
}
// Go through all of the Settings and convert them to a map. Each setting is
// parsed into its constituent parts. The value then goes through variable
// replacement to ensure that the settings are properly resolved.
func (b *builder) settingsToMap(r *rawTemplate) map[string]interface{} {
var k, v string
m := make(map[string]interface{})
for _, s := range b.Settings {
k, v = parseVar(s)
v = r.replaceVariables(v)
m[k] = v
}
return m
}
// createAmazonChroot creates a map of settings for Packer's amazon-chroot
// builder. Any values that aren't supported by the amazon-ebs builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/amazon-instance.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// access_key string
// ami_name string
// secret_key string
// source_ami string
// Optional configuration options:
// ami_description string
// ami_groups array of strings
// ami_product_codes array of strings
// ami_regions array of strings
// ami_users array of strings
// ami_virtualization_type string
// chroot_mounts array of array of strings
// command_wrapper string
// copy_files array of strings
// device_path string
// enhanced_networking bool
// force_deregister bool
// mount_options array of strings
// mount_path string
// root_volume_size int
// tags object of key/value strings
func (r *rawTemplate) createAmazonChroot(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[AmazonChroot.String()]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = AmazonChroot.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var k, v string
var hasAccessKey, hasAmiName, hasSecretKey, hasSourceAmi bool
// check for communicator first
_, err = r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v = parseVar(s)
v = r.replaceVariables(v)
switch k {
case "access_key":
settings[k] = v
hasAccessKey = true
case "ami_name":
settings[k] = v
hasAmiName = true
case "ami_description":
settings[k] = v
case "ami_virtualization_type":
settings[k] = v
case "command_wrapper":
settings[k] = v
case "device_path":
settings[k] = v
case "enhanced_networking":
settings[k], _ = strconv.ParseBool(v)
case "force_deregister":
settings[k], _ = strconv.ParseBool(v)
case "mount_path":
settings[k] = v
case "root_volume_size":
settings[k], err = strconv.Atoi(v)
return nil, &SettingError{ID, k, v, err}
case "secret_key":
settings[k] = v
hasSecretKey = true
case "source_ami":
settings[k] = v
hasSourceAmi = true
}
}
if !hasAccessKey {
return nil, &RequiredSettingError{ID, "access_key"}
}
if !hasAmiName {
return nil, &RequiredSettingError{ID, "ami_name"}
}
if !hasSecretKey {
return nil, &RequiredSettingError{ID, "secret_key"}
}
if !hasSourceAmi {
return nil, &RequiredSettingError{ID, "source_ami"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
// if it's not a supported array group, log a warning and move on
switch name {
case "ami_groups":
case "ami_product_codes":
case "ami_regions":
case "ami_users":
case "chroot_mounts":
case "copy_files":
case "mount_options":
case "tags":
default:
// not supported; skip
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// createAmazonEBS creates a map of settings for Packer's amazon-ebs builder.
// Any values that aren't supported by the amazon-ebs builder are ignored. Any
// required settings that don't exist result in an error and processing of the
// builder is stopped. For more information, refer to
// https://packer.io/docs/builders/amazon-ebs.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// access_key string
// ami_name string
// instance_type string
// region string
// secret_key string
// source_ami string
// ssh_username string
// Optional configuration options:
// ami_block_device_mappings array of block device mappings
// delete_on_termination bool
// device_name string
// encrypted bool
// iops int
// no_device bool
// snapshot_id string
// virtual_name string
// volume_type string
// volume_size int
// ami_description string
// ami_groups array of strings
// ami_product_codes array of strings
// ami_regions array of strings
// ami_users array of strings
// associate_public_ip_address bool
// availability_zone string
// ebs_optimized bool
// enhanced_networking bool
// force_deregister bool
// iam_instance_profile string
// launch_block_device_mappings array of block device mappings
// run_tags object of key/value strings
// security_group_id string
// security_group_ids array of strings
// spot_price string
// spot_price_auto_product string
// ssh_keypair_name string
// ssh_private_ip bool
// ssh_private_key_file string
// subnet_id string
// tags object of key/value strings
// temporary_key_pair_name string
// token string
// user_data string
// user_data_file string
// volume_run_tags object of key/value strings
// vpc_id string
// windows_password_timeout string
func (r *rawTemplate) createAmazonEBS(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = AmazonEBS.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var k, v string
var hasAccessKey, hasAmiName, hasInstanceType, hasRegion, hasSecretKey bool
var hasSourceAmi, hasUsername, hasCommunicator bool
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v = parseVar(s)
v = r.replaceVariables(v)
switch k {
case "access_key":
settings[k] = v
hasAccessKey = true
case "ami_description":
settings[k] = v
case "ami_name":
settings[k] = v
hasAmiName = true
case "associate_public_ip_address":
settings[k], _ = strconv.ParseBool(v)
case "availability_zone":
settings[k] = v
case "enhanced_networking":
settings[k], _ = strconv.ParseBool(v)
case "force_deregister":
settings[k], _ = strconv.ParseBool(v)
case "iam_instance_profile":
settings[k] = v
case "instance_type":
settings[k] = v
hasInstanceType = true
case "region":
settings[k] = v
hasRegion = true
case "secret_key":
settings[k] = v
hasSecretKey = true
case "security_group_id":
settings[k] = v
case "source_ami":
settings[k] = v
hasSourceAmi = true
case "spot_price":
settings[k] = v
case "spot_price_auto_product":
settings[k] = v
case "ssh_keypair_name":
// Only process if there's no communicator or if the communicator is SSH.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k] = v
case "ssh_private_key_file":
// Only process if there's no communicator or if the communicator is SSH.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k] = v
case "ssh_username":
// Only set if there wasn't a communicator to process.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "subnet_id":
settings[k] = v
case "temporary_key_pair_name":
settings[k] = v
case "token":
settings[k] = v
case "user_data":
settings[k] = v
case "user_data_file":
src, err := r.findComponentSource(AmazonEBS.String(), v, false)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(AmazonEBS.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(AmazonEBS.String(), v)
case "vpc_id":
settings[k] = v
case "windows_password_timeout":
// Don't set if there's a non WinRM communicator.
if hasCommunicator && prefix != "winrm" {
continue
}
settings[k] = v
}
}
if !hasAccessKey {
return nil, &RequiredSettingError{ID, "access_key"}
}
if !hasAmiName {
return nil, &RequiredSettingError{ID, "ami_name"}
}
if !hasInstanceType {
return nil, &RequiredSettingError{ID, "instance_type"}
}
if !hasRegion {
return nil, &RequiredSettingError{ID, "region"}
}
if !hasSecretKey {
return nil, &RequiredSettingError{ID, "secret_key"}
}
if !hasSourceAmi {
return nil, &RequiredSettingError{ID, "source_ami"}
}
if !hasUsername {
// If there isn't a prefix, use ssh as that's the setting
// that's required according to the docs.
if prefix == "" {
prefix = "ssh"
}
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
// only process supported array stuff
switch name {
case "ami_block_device_mappings":
// do ami_block_device_mappings processing
settings[name], err = r.processAMIBlockDeviceMappings(val)
if err != nil {
return nil, &SettingError{ID, "ami_block_device_mappings", "", err}
}
continue
case "ami_groups":
case "ami_product_codes":
case "ami_regions":
case "ami_users":
case "launch_block_device_mappings":
case "run_tags":
case "security_group_ids":
case "tags":
default:
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// createAmazonInstance creates a map of settings for Packer's amazon-instance
// builder. Any values that aren't supported by the amazon-ebs builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/amazon-ebs.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// access_key string
// account_id string
// ami_name string
// instance_type string
// region string
// s3_bucket string
// secret_key string
// source_ami string
// ssh_username string
// x509_cert_path string
// x509_key_path string
// Optional configuration options:
// ami_block_device_mappings array of block device mappings
// delete_on_termination bool
// device_name string
// encrypted bool
// iops int
// no_device bool
// snapshot_id string
// virtual_name string
// volume_size int
// volume_type string
// ami_description string
// ami_groups array of strings
// ami_product_codes array of strings
// ami_regions array of strings
// ami_users array of strings
// ami_virtualization_type string
// associate_public_ip_address bool
// availability_zone string
// bundle_destination string
// bundle_prefix string
// bundle_upload_command string
// bundle_vol_command string
// ebs_optimized bool
// enhanced_networking bool
// force_deregister bool
// iam_instance_profile string
// launch_block_device_mappings array of block device mappings
// run_tags object of key/value strings
// security_group_id string
// security_group_ids array of strings
// spot_price string
// spot_price_auto_product string
// ssh_keypair_name string
// ssh_private_ip bool
// ssh_private_key_file string
// subnet_id string
// tags object of key/value strings
// temporary_key_pair_name string
// user_data string
// user_data_file string
// vpc_id string
// x509_upload_path string
// windows_password_timeout string
func (r *rawTemplate) createAmazonInstance(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = AmazonInstance.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var k, v string
var hasAccessKey, hasAccountID, hasAmiName, hasInstanceType, hasRegion, hasS3Bucket bool
var hasSecretKey, hasSourceAmi, hasUsername, hasX509CertPath, hasX509KeyPath, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v = parseVar(s)
v = r.replaceVariables(v)
switch k {
case "access_key":
settings[k] = v
hasAccessKey = true
case "account_id":
settings[k] = v
hasAccountID = true
case "ami_description":
settings[k] = v
case "ami_name":
settings[k] = v
hasAmiName = true
case "ami_virtualization_type":
settings[k] = v
case "associate_public_ip_address":
settings[k], _ = strconv.ParseBool(v)
case "availability_zone":
settings[k] = v
case "bundle_destination":
settings[k] = v
case "bundle_prefix":
settings[k] = v
case "bundle_upload_command":
if !strings.HasSuffix(v, ".command") {
// The value is the command.
settings[k] = v
continue
}
// The value is a command file, load the contents of the
// file.
cmds, err := r.commandsFromFile(AmazonInstance.String(), v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
//
cmd := commandFromSlice(cmds)
if cmd == "" {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = cmd
case "bundle_vol_command":
if !strings.HasSuffix(v, ".command") {
// The value is the command.
settings[k] = v
continue
}
// The value is a command file, load the contents of the
// file.
cmds, err := r.commandsFromFile(AmazonInstance.String(), v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
//
cmd := commandFromSlice(cmds)
if cmd == "" {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = cmd
case "ebs_optimized":
settings[k], _ = strconv.ParseBool(v)
case "enhanced_networking":
settings[k], _ = strconv.ParseBool(v)
case "force_deregister":
settings[k], _ = strconv.ParseBool(v)
case "iam_instance_profile":
settings[k] = v
case "instance_type":
settings[k] = v
hasInstanceType = true
case "region":
settings[k] = v
hasRegion = true
case "s3_bucket":
settings[k] = v
hasS3Bucket = true
case "secret_key":
settings[k] = v
hasSecretKey = true
case "security_group_id":
settings[k] = v
case "spot_price":
settings[k] = v
case "spot_price_auto_product":
settings[k] = v
case "ssh_keypair_name":
// Don't process if there's a communicator and it wasn't SSH.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k] = v
case "ssh_private_ip":
// Don't process if there's a communicator and it wasn't SSH.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k], _ = strconv.ParseBool(v)
case "ssh_private_key_file":
// Don't process if there was a communicator.
if hasCommunicator {
continue
}
settings[k] = v
case "source_ami":
settings[k] = v
hasSourceAmi = true
case "ssh_username":
// Don't process if there was a communicator.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "subnet_id":
settings[k] = v
case "temporary_key_pair_name":
settings[k] = v
case "user_data":
settings[k] = v
case "user_data_file":
src, err := r.findComponentSource(AmazonInstance.String(), v, false)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
// if the source couldn't be found and an error wasn't generated, replace
// src with the original value; this occurs when it is an example.
// Nothing should be copied in this instance and it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(AmazonEBS.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(AmazonInstance.String(), v)
case "vpc_id":
settings[k] = v
case "windows_password_timeout":
// Don't process if there was a communicator and it wasn't WinRM.
if hasCommunicator && prefix != "winrm" {
continue
}
settings[k] = v
case "x509_cert_path":
settings[k] = v
hasX509CertPath = true
case "x509_key_path":
settings[k] = v
hasX509KeyPath = true
case "x509_upload_path":
settings[k] = v
}
}
if !hasAccessKey {
return nil, &RequiredSettingError{ID, "access_key"}
}
if !hasAccountID {
return nil, &RequiredSettingError{ID, "account_id"}
}
if !hasAmiName {
return nil, &RequiredSettingError{ID, "ami_name"}
}
if !hasInstanceType {
return nil, &RequiredSettingError{ID, "instance_type"}
}
if !hasRegion {
return nil, &RequiredSettingError{ID, "region"}
}
if !hasS3Bucket {
return nil, &RequiredSettingError{ID, "s3_bucket"}
}
if !hasSecretKey {
return nil, &RequiredSettingError{ID, "secret_key"}
}
if !hasSourceAmi {
return nil, &RequiredSettingError{ID, "source_ami"}
}
if !hasUsername {
// if prefix was empty, no communicator was used which means
// ssh_username is expected.
if prefix == "" {
prefix = "ssh"
}
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
if !hasX509CertPath {
return nil, &RequiredSettingError{ID, "x509_cert_path"}
}
if !hasX509KeyPath {
return nil, &RequiredSettingError{ID, "x509_key_path"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
// if it's not a supported array group skip
switch name {
case "ami_block_device_mappings":
// do ami_block_device_mappings processing
settings[name], err = r.processAMIBlockDeviceMappings(val)
if err != nil {
return nil, &SettingError{ID, "ami_block_device_mappings", "", err}
}
continue
case "ami_groups":
case "ami_product_codes":
case "ami_regions":
case "ami_users":
case "launch_block_device_mappings":
case "run_tags":
case "security_group_ids":
case "tags":
default:
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// processAMIBlockDeviceMappings handles the ami_block_device_mappings
// array for Amazon builders. The mappings must be in the form of either
// []map[string]interface{} or [][]string. An error will occur is the
// data is anything else.
//
// For []map[string]interface{}, the data is returned without additional
// processing. Processing of the []map to only use valid keys may be added
// at some point in the future.
//
// For [][]string, processing will be done to convert the strings into
// key value pairs and place them in a map[string]interface{}. Values that
// are not supported settings for ami_block_device_mappings are ignored.
// The returned interface{} only includes the supported settings. When
// settings that are ints have invalid values specified, an error will be
// returned.
func (r *rawTemplate) processAMIBlockDeviceMappings(v interface{}) (interface{}, error) {
if reflect.TypeOf(v) == reflect.TypeOf([]map[string]interface{}{}) {
return v, nil
}
// Process the [][]string into a []map[string]interface{}
slices, ok := v.([][]string)
if !ok {
return nil, errors.New("not in a supported format")
}
ret := make([]map[string]interface{}, len(slices))
for i, settings := range slices {
vals := map[string]interface{}{}
for _, setting := range settings {
k, v := parseVar(setting)
switch k {
case "delete_on_termination":
vals[k], _ = strconv.ParseBool(v)
case "device_name":
vals[k] = v
case "encrypted":
vals[k], _ = strconv.ParseBool(v)
case "iops":
i, err := strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("iops: %s", err)
}
vals[k] = i
case "no_device":
vals[k], _ = strconv.ParseBool(v)
case "snapshot_id":
vals[k] = v
case "virtual_name":
vals[k] = v
case "volume_size":
i, err := strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("iops: %s", err)
}
vals[k] = i
case "volume_type":
vals[k] = v
}
}
ret[i] = vals
}
return ret, nil
}
// createDigitalOcean creates a map of settings for Packer's digitalocean
// builder. Any values that aren't supported by the digitalocean builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/digitalocean.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// api_token string
// image string
// region string
// size string
// Optional configuration options:
// droplet_name string
// private_networking bool
// snapshot_name string
// state_timeout string
// user_date string
func (r *rawTemplate) createDigitalOcean(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = DigitalOcean.String()
// If a common builder was defined, merge the settings between common and this builders.
_, ok = r.Builders[Common.String()]
var workSlice []string
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
_, err = r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
var hasAPIToken, hasImage, hasRegion, hasSize bool
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "api_token":
settings[k] = v
hasAPIToken = true
case "droplet_name":
settings[k] = v
case "image":
settings[k] = v
hasImage = true
case "private_networking":
settings[k], _ = strconv.ParseBool(v)
case "region":
settings[k] = v
hasRegion = true
case "size":
settings[k] = v
hasSize = true
case "snapshot_name":
settings[k] = v
case "state_timeout":
settings[k] = v
case "user_data":
settings[k] = v
}
}
if !hasAPIToken {
return nil, &RequiredSettingError{ID, "api_token"}
}
if !hasImage {
return nil, &RequiredSettingError{ID, "image"}
}
if !hasRegion {
return nil, &RequiredSettingError{ID, "region"}
}
if !hasSize {
return nil, &RequiredSettingError{ID, "size"}
}
return settings, nil
}
// createDocker creates a map of settings for Packer's docker builder. Any
// values that aren't supported by the digitalocean builder are ignored. Any
// required settings that don't exist result in an error and processing of the
// builder is stopped. For more information, refer to
// https://packer.io/docs/builders/docker.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// commit bool
// discard bool
// export_path string
// image string
// Optional configuration options:
// login bool
// login_email string
// login_username string
// login_password string
// login_server string
// pull bool
// run_command array of strings
// volumes map of strings to strings
func (r *rawTemplate) createDocker(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = Docker.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
// Process the communicator settings first, if there are any.
_, err = r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
var hasCommit, hasDiscard, hasExportPath, hasImage, hasRunCommandArray bool
var runCommandFile string
for _, s := range workSlice {
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "commit":
settings[k], _ = strconv.ParseBool(v)
hasCommit = true
case "discard":
settings[k], _ = strconv.ParseBool(v)
hasDiscard = true
case "export_path":
settings[k] = v
hasExportPath = true
case "image":
settings[k] = v
hasImage = true
case "login":
settings[k], _ = strconv.ParseBool(v)
case "login_email":
settings[k] = v
case "login_password":
settings[k] = v
case "login_username":
settings[k] = v
case "login_server":
settings[k] = v
case "pull":
settings[k], _ = strconv.ParseBool(v)
case "run_command":
// if it's here, cache the value, delay processing until arrays section
runCommandFile = v
}
}
if !hasCommit {
return nil, &RequiredSettingError{ID, "commit"}
}
if !hasDiscard {
return nil, &RequiredSettingError{ID, "discard"}
}
if !hasExportPath {
return nil, &RequiredSettingError{ID, "export_path"}
}
if !hasImage {
return nil, &RequiredSettingError{ID, "image"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
if name == "run_command" {
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
hasRunCommandArray = true
continue
}
if name == "volumes" {
settings[name] = deepcopy.Iface(val)
}
}
// if there wasn't an array of run commands, check to see if they should be loaded
// from a file
if !hasRunCommandArray {
if runCommandFile != "" {
commands, err := r.commandsFromFile(Docker.String(), runCommandFile)
if err != nil {
return nil, &SettingError{ID, "run_command", runCommandFile, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, "run_command", runCommandFile, ErrNoCommands}
}
settings["run_command"] = commands
}
}
return settings, nil
}
// createGoogleCompute creates a map of settings for Packer's googlecompute
// builder. Any values that aren't supported by the googlecompute builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/googlecompute.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// project_id string
// source_image string
// zone string
// Optional configuration options:
// account_file string
// address string
// disk_size int
// image_name string
// image_description string
// instance_name string
// machine_type string
// metadata object of key/value strings
// network string
// preemtipble bool
// state_timeout string
// tags array of strings
// use_internal_ip bool
func (r *rawTemplate) createGoogleCompute(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = GoogleCompute.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var hasProjectID, hasSourceImage, hasZone bool
// process communicator stuff first
_, err = r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "account_file":
src, err := r.findComponentSource(GoogleCompute.String(), v, false)
if err != nil {
return nil, err
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(GoogleCompute.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(GoogleCompute.String(), v)
case "address":
settings[k] = v
case "disk_size":
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "image_name":
settings[k] = v
case "image_description":
settings[k] = v
case "instance_name":
settings[k] = v
case "machine_type":
settings[k] = v
case "network":
settings[k] = v
case "preemtible":
settings[k], _ = strconv.ParseBool(v)
case "project_id":
settings[k] = v
hasProjectID = true
case "source_image":
settings[k] = v
hasSourceImage = true
case "state_timeout":
settings[k] = v
case "use_internal_ip":
settings[k], _ = strconv.ParseBool(v)
case "zone":
settings[k] = v
hasZone = true
}
}
if !hasProjectID {
return nil, &RequiredSettingError{ID, "project_id"}
}
if !hasSourceImage {
return nil, &RequiredSettingError{ID, "source_image"}
}
if !hasZone {
return nil, &RequiredSettingError{ID, "zone"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
if name == "metadata" {
settings[name] = val
continue
}
if name == "tags" {
array := deepcopy.InterfaceToSliceOfStrings(val)
if array != nil {
settings[name] = array
}
}
}
return settings, nil
}
// createNull creates a map of settings for Packer's null builder. Any values
// that aren't supported by the null builder are ignored. Any required
// settings that don't exist result in an error and processing of the builder
// is stopped. For more information, refer to
// https://packer.io/docs/builders/null.html
//
// Configuration options:
// Only settings provided by communicators are supported. See communicator
// documentation.
//
// communicator == none is considered invalid.
func (r *rawTemplate) createNull(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = Null.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[Null.String()].Settings
}
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
if prefix == "" {
// communicator == none; there must be a communicator
return nil, fmt.Errorf("%s: %s builder requires a communicator other than \"none\"", ID, Null.String())
}
return settings, nil
}
// createOpenStack creates a map of settings for Packer's OpenStack builder.
// Any values that aren't supported by the QEMU builder are ignored. Any
// required settings that doesn't exist result in an error and processing
// of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/openstack.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// flavor string
// image_name string
// source_image string
// username string
// password string
// Optional configuration options:
// api_key string
// availability_zone string
// config_drive bool
// floating_ip string
// floating_ip_pool string
// insecure bool
// metadata bool
// networks array of strings
// rackconnect_wait bool
// region string
// security_groups array of strings
// ssh_interface string
// tenant_id string
// tenant_name string
// use_floating_ip bool
func (r *rawTemplate) createOpenStack(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = map[string]interface{}{}
// Each create function is responsible for setting its own type.
settings["type"] = OpenStack.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var hasFlavor, hasImageName, hasSourceImage, hasUsername, hasPassword, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
_, ok = settings[prefix+"_password"]
if ok {
hasPassword = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "api_key":
settings[k] = v
case "availability_zone":
settings[k] = v
case "config_drive":
settings[k] = v
settings[k], _ = strconv.ParseBool(v)
case "flavor":
settings[k] = v
hasFlavor = true
case "floating_ip":
settings[k] = v
case "floating_ip_pool":
settings[k] = v
case "image_name":
settings[k] = v
hasImageName = true
case "insecure":
settings[k] = v
settings[k], _ = strconv.ParseBool(v)
case "metadata":
settings[k] = v
settings[k], _ = strconv.ParseBool(v)
case "password":
// skip if communicator was processed
if hasCommunicator {
continue
}
settings[k] = v
hasPassword = true
case "rackconnect_wait":
settings[k] = v
settings[k], _ = strconv.ParseBool(v)
case "region":
settings[k] = v
case "ssh_interface":
// If there's a communicator and it's not SSH skip.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k] = v
case "source_image":
settings[k] = v
hasSourceImage = true
case "tenant_id":
settings[k] = v
case "tenant_name":
settings[k] = v
case "use_floating_ip":
settings[k], _ = strconv.ParseBool(v)
case "username":
// skip if communicator was processed.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
}
}
// flavor is required
if !hasFlavor {
return nil, &RequiredSettingError{ID, "flavor"}
}
// image_name is required
if !hasImageName {
return nil, &RequiredSettingError{ID, "image_name"}
}
// source_image is required
if !hasSourceImage {
return nil, &RequiredSettingError{ID, "source_image"}
}
// Password is required
if !hasPassword {
if prefix == "" {
return nil, &RequiredSettingError{ID, "password"}
}
return nil, &RequiredSettingError{ID, prefix + "_password"}
}
// Username is required
if !hasUsername {
if prefix == "" {
return nil, &RequiredSettingError{ID, "username"}
}
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// Process arrays, iso_urls is only valid if iso_url is not set
for name, val := range r.Builders[ID].Arrays {
switch name {
case "metadata":
case "networks":
case "security_groups":
default:
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// createQEMU creates a map of settings for Packer's QEMU builder. Any
// values that aren't supported by the QEMU builder are ignored. Any
// required settings that doesn't exist result in an error and processing
// of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/qemu.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// iso_checksum string
// iso_checksum_type string
// iso_url string
// ssh_username string
// Optional configuration options:
// accelerator string
// boot_command array of strings
// boot_wait string
// disk_cache string
// disk_compression bool
// disk_discard string
// disk_image bool
// disk_interface string
// disk_size int
// floppy_files array_of_strings
// format string
// headless bool
// http_directory string
// http_port_max int
// http_port_min int
// iso_target_path string
// iso_urls array of strings
// net_device string
// output_directory string
// qemuargs array of array of strings
// qemu_binary string
// skip_compaction bool
func (r *rawTemplate) createQEMU(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = map[string]interface{}{}
// Each create function is responsible for setting its own type.
settings["type"] = QEMU.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var bootCmdProcessed, hasChecksum, hasChecksumType, hasISOURL, hasUsername, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "accelerator":
settings[k] = v
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "disk_cache":
settings[k] = v
case "disk_compression":
settings[k], _ = strconv.ParseBool(v)
case "disk_discard":
settings[k] = v
case "disk_image":
settings[k], _ = strconv.ParseBool(v)
case "disk_interface":
settings[k] = v
case "disk_size":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "format":
settings[k] = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "iso_checksum":
settings[k] = v
hasChecksum = true
case "iso_checksum_type":
settings[k] = v
hasChecksumType = true
case "iso_target_path":
// TODO should this have path location?
settings[k] = v
case "iso_url":
settings[k] = v
hasISOURL = true
case "net_device":
settings[k] = v
case "output_directory":
settings[k] = v
case "qemu_binary":
settings[k] = v
case "skip_compaction":
settings[k], _ = strconv.ParseBool(v)
case "ssh_username":
// Skip if communicator exists; this was already processed during communicator processing.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
}
}
// Username is required
if !hasUsername {
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// make sure http_directory is set and add to dir list
// TODO reconcile with above
err = r.setHTTP(QEMU.String(), settings)
if err != nil {
return nil, err
}
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "floppy_files":
settings[name] = val
case "iso_urls":
// iso_url takes precedence
if hasISOURL {
continue
}
settings[name] = val
hasISOURL = true
case "qemuargs":
settings[name] = val
}
}
if !hasISOURL {
return nil, &RequiredSettingError{ID, "iso_url"}
}
// If the iso info wasn't set from the Settings, get it from the distro's release
if !hasISOURL {
//handle iso lookup vs set in file
switch r.Distro {
case CentOS.String():
settings["iso_url"] = r.releaseISO.(*centos).imageURL()
settings["iso_checksum"] = r.releaseISO.(*centos).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*centos).ChecksumType
case Debian.String():
settings["iso_url"] = r.releaseISO.(*debian).imageURL()
settings["iso_checksum"] = r.releaseISO.(*debian).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*debian).ChecksumType
case Ubuntu.String():
settings["iso_url"] = r.releaseISO.(*ubuntu).imageURL()
settings["iso_checksum"] = r.releaseISO.(*ubuntu).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*ubuntu).ChecksumType
default:
err = fmt.Errorf("%q is not a supported Distro", r.Distro)
return nil, err
}
return settings, nil
}
if !hasChecksum {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum"}
}
if !hasChecksumType {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum_type"}
}
return settings, nil
}
// createVirtualBoxISO creates a map of settings for Packer's virtualbox-iso
// builder. Any values that aren't supported by the virtualbox-iso builder are
// ignored. Any required settings that doesn't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/virtualbox-iso.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// iso_checksum string
// iso_checksum_type string
// iso_url string
// ssh_password string
// ssh_username string
// Optional configuration options:
// boot_command array of strings
// boot_wait string
// disk_size int
// export_opts array of strings
// floppy_files array of strings
// format string; "ovf" or "ova"
// guest_additions_mode string
// guest_additions_path string
// guest_additions_sha256 string
// guest_additions_url string
// guest_os_type string; if empty, generated by rancher
// hard_drive_interface string
// headless bool
// http_directory string
// http_port_min int
// http_port_max int
// iso_interface string
// iso_target_path string
// iso_urls array_of_strings
// output_directory string
// shutdown_command string
// shutdown_timeout string
// ssh_host_port_min int
// ssh_host_port_max int
// ssh_skip_nat_mapping bool
// vboxmanage array of array of strings
// vboxmanage_post array of array of strings
// virtualbox_version_file string
// vm_name string
func (r *rawTemplate) createVirtualBoxISO(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = map[string]interface{}{}
// Each create function is responsible for setting its own type.
settings["type"] = VirtualBoxISO.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var bootCmdProcessed, hasChecksum, hasChecksumType, hasISOURL, hasUsername, hasPassword, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
_, ok = settings[prefix+"_password"]
if ok {
hasPassword = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "disk_size":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "format":
settings[k] = v
case "guest_additions_mode":
settings[k] = v
case "guest_additions_path":
settings[k] = v
case "guest_additions_sha256":
settings[k] = v
case "guest_additions_url":
settings[k] = v
case "guest_os_type":
settings[k] = v
case "hard_drive_interface":
settings[k] = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "iso_checksum":
settings[k] = v
hasChecksum = true
case "iso_checksum_type":
settings[k] = v
hasChecksumType = true
case "iso_interface":
settings[k] = v
case "iso_target_path":
// TODO should this have path location?
settings[k] = v
case "iso_url":
settings[k] = v
hasISOURL = true
case "output_directory":
settings[k] = v
case "shutdown_command":
//If it ends in .command, replace it with the command from the filepath
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
// Assume it's the first element.
settings[k] = commands[0]
} else {
settings[k] = v // the value is the command
}
case "shutdown_timeout":
settings[k] = v
case "ssh_host_port_min":
// Skip if prefix == winrm as SSH settings don't apply to WinRM
if prefix == "winrm" {
continue
}
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "ssh_host_port_max":
// Skip if prefix == winrm as SSH settings don't apply to WinRM
if prefix == "winrm" {
continue
}
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "ssh_password":
// Skip if communicator exists; this was already processed during communicator processing.
if hasCommunicator {
continue
}
settings[k] = v
hasPassword = true
case "ssh_username":
// Skip if communicator exists; this was already processed during communicator processing.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "virtualbox_version_file":
// TODO: should this have path resolution?
settings[k] = v
case "vm_name":
settings[k] = v
}
}
// Username is required
if !hasUsername {
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// Password is required
if !hasPassword {
return nil, &RequiredSettingError{ID, prefix + "_password"}
}
// make sure http_directory is set and add to dir list
// TODO reconcile with above
err = r.setHTTP(VirtualBoxISO.String(), settings)
if err != nil {
return nil, err
}
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "export_opts":
settings[name] = val
case "floppy_files":
settings[name] = val
case "iso_urls":
// iso_url takes precedence
if hasISOURL {
continue
}
settings[name] = val
hasISOURL = true
case "vboxmanage":
settings[name] = r.createVBoxManage(val)
case "vboxmanage_post":
settings[name] = r.createVBoxManage(val)
}
}
if !hasISOURL {
return nil, &RequiredSettingError{ID, "iso_url"}
}
if r.osType == "" { // if the os type hasn't been set, the ISO info hasn't been retrieved
err = r.ISOInfo(VirtualBoxISO, workSlice)
if err != nil {
return nil, err
}
}
// TODO: modify to select the proper virtualbox value based on distro and arch
/*
// set the guest_os_type
if tmpGuestOSType == "" {
tmpGuestOSType = r.osType
}
settings["guest_os_type"] = tmpGuestOSType
*/
// If the iso info wasn't set from the Settings, get it from the distro's release
if !hasISOURL {
//handle iso lookup vs set in file
switch r.Distro {
case CentOS.String():
settings["iso_url"] = r.releaseISO.(*centos).imageURL()
settings["iso_checksum"] = r.releaseISO.(*centos).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*centos).ChecksumType
case Debian.String():
settings["iso_url"] = r.releaseISO.(*debian).imageURL()
settings["iso_checksum"] = r.releaseISO.(*debian).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*debian).ChecksumType
case Ubuntu.String():
settings["iso_url"] = r.releaseISO.(*ubuntu).imageURL()
settings["iso_checksum"] = r.releaseISO.(*ubuntu).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*ubuntu).ChecksumType
default:
err = fmt.Errorf("%q is not a supported Distro", r.Distro)
return nil, err
}
return settings, nil
}
if !hasChecksum {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum"}
}
if !hasChecksumType {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum_type"}
}
return settings, nil
}
// createVirtualBoxOVF creates a map of settings for Packer's virtualbox-ovf
// builder. Any values that aren't supported by the virtualbox-ovf builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/virtualbox-ovf.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// source_path string
// ssh_username string
// Optional configuration options:
// boot_command array of strings
// boot_wait string
// export_opts array of strings
// floppy_files array of strings
// format string
// guest_additions_mode string
// guest_additions_path string
// guest_additions_sha256 string
// guest_additions_url string
// headless bool
// http_directory string
// http_port_min int
// http_port_max int
// import_flags array of strings
// import_opts string
// output_directory string
// shutdown_command string
// shutdown_timeout string
// ssh_host_port_min int
// ssh_host_port_max int
// ssh_skip_nat_mapping bool
// vboxmanage array of strings
// vboxmanage_post array of strings
// virtualbox_version_file string
// vm_name string
func (r *rawTemplate) createVirtualBoxOVF(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = map[string]interface{}{}
// Each create function is responsible for setting its own type.
settings["type"] = VirtualBoxOVF.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
var hasSourcePath, hasUsername, bootCmdProcessed, hasCommunicator, hasWinRMCommunicator bool
var userNameVal string
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix == "" {
// for communicator == none or no communicator setting assume ssh_username
// since the docs have that as required.
// TODO: revist after communicator doc clarification
userNameVal = "ssh_username"
} else {
userNameVal = prefix + "_username"
_, ok = settings[userNameVal]
if ok {
hasUsername = true
}
hasCommunicator = true
if prefix == "winrm" {
hasWinRMCommunicator = true
}
}
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "format":
settings[k] = v
case "guest_additions_mode":
settings[k] = v
case "guest_additions_path":
settings[k] = v
case "guest_additions_sha256":
settings[k] = v
case "guest_additions_url":
settings[k] = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
err = &SettingError{ID, k, v, err}
return nil, err
}
settings[k] = i
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
err = &SettingError{ID, k, v, err}
return nil, err
}
settings[k] = i
case "import_opts":
settings[k] = v
case "output_directory":
settings[k] = v
case "shutdown_command":
if strings.HasSuffix(v, ".command") {
//If it ends in .command, replace it with the command from the filepath
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
// Assume it's the first element.
settings[k] = commands[0]
} else {
settings[k] = v
}
case "shutdown_timeout":
settings[k] = v
case "source_path":
src, err := r.findComponentSource(VirtualBoxOVF.String(), v, true)
if err != nil {
return nil, err
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(VirtualBoxOVF.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(VirtualBoxOVF.String(), v)
hasSourcePath = true
case "ssh_host_port_min":
if hasWinRMCommunicator {
continue
}
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
err = &SettingError{ID, k, v, err}
return nil, err
}
settings[k] = i
case "ssh_host_port_max":
if hasWinRMCommunicator {
continue
}
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
err = &SettingError{ID, k, v, err}
return nil, err
}
settings[k] = i
case "ssh_skip_nat_mapping":
// SSH settings don't apply to winrm
if hasWinRMCommunicator {
continue
}
settings[k], _ = strconv.ParseBool(v)
case "ssh_username":
// skip if communicator exists (prefix will be empty)
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "virtualbox_version_file":
settings[k] = v
case "vm_name":
settings[k] = v
}
}
// Check to see if the required info was processed.
if !hasUsername {
return nil, &RequiredSettingError{ID, userNameVal}
}
if !hasSourcePath {
return nil, &RequiredSettingError{ID, "source_path"}
}
// make sure http_directory is set and add to dir list
err = r.setHTTP(VirtualBoxOVF.String(), settings)
if err != nil {
return nil, err
}
// Generate Packer Variables
// Generate builder specific section
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "export_opts":
settings[name] = val
case "floppy_files":
settings[name] = val
case "import_flags":
settings[name] = val
case "vboxmanage":
settings[name] = r.createVBoxManage(val)
case "vboxmanage_post":
settings[name] = r.createVBoxManage(val)
}
}
return settings, nil
}
// createVBoxManage creates the vboxmanage and vboxmanage_post arrays from the
// received interface.
func (r *rawTemplate) createVBoxManage(v interface{}) [][]string {
vms := deepcopy.InterfaceToSliceOfStrings(v)
tmp := make([][]string, len(vms))
for i, v := range vms {
k, vv := parseVar(v)
// ensure that the key starts with --. A naive concatonation is done.
if !strings.HasPrefix(k, "--") {
k = "--" + k
}
vv = r.replaceVariables(vv)
tmp[i] = make([]string, 4)
tmp[i][0] = "modifyvm"
tmp[i][1] = "{{.Name}}"
tmp[i][2] = k
tmp[i][3] = vv
}
return tmp
}
// createVMWareISO creates a map of settings for Packer's vmware-iso builder.
// Any values that aren't supported by the vmware-iso builder are ignored. Any
// required settings that don't exist result in an error and processing of the
// builder is stopped. For more information, refer to
// https://packer.io/docs/builders/vmware-iso.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// iso_checksum string
// iso_checksum_type string
// iso_url string
// ssh_username string
// Optional configuration options
// boot_command array of strings
// boot_wait string
// disk_additional_size array of ints
// disk_size int
// disk_type_id string
// floppy_files array of strings
// fusion_app_path string
// guest_os_type string; if not set, will be generated
// headless bool
// http_directory string
// http_port_min int
// http_port_max int
// iso_target_path string
// iso_urls array of strings
// output_directory string
// remote_cache_datastore string
// remote_cache_directory string
// remote_datastore string
// remote_host string
// remote_password string
// remote_private_key_file string
// remote_type string
// remote_username string
// shutdown_command string
// shutdown_timeout string
// skip_compaction bool
// tools_upload_flavor string
// tools_upload_path string
// version string
// vm_name string
// vmdk_name string
// vmx_data object of key/value strings
// vmx_data_post object of key/value strings
// vmx_template_path string
// vnc_port_min int
// vnc_port_max int
func (r *rawTemplate) createVMWareISO(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = VMWareISO.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var bootCmdProcessed, hasChecksum, hasChecksumType, hasISOURL, hasUsername, hasCommunicator bool
var guestOSType string
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
for _, s := range workSlice {
// to this builder.
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "disk_size":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "disk_type_id":
settings[k] = v
case "fusion_app_path":
settings[k] = v
case "guest_os_type":
guestOSType = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "iso_checksum":
settings[k] = v
hasChecksum = true
case "iso_checksum_type":
settings[k] = v
hasChecksumType = true
case "iso_target_path":
settings[k] = v
case "iso_url":
settings[k] = v
hasISOURL = true
case "output_directory":
settings[k] = v
case "remote_cache_datastore":
settings[k] = v
case "remote_cache_directory":
settings[k] = v
case "remote_datastore":
settings[k] = v
case "remote_host":
settings[k] = v
case "remote_password":
settings[k] = v
case "remote_private_key_file":
settings[k] = v
case "remote_type":
settings[k] = v
case "remote_username":
settings[k] = v
case "shutdown_command":
//If it ends in .command, replace it with the command from the filepath
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
// Assume it's the first element.
settings[k] = commands[0]
continue
}
settings[k] = v // the value is the command
case "shutdown_timeout":
settings[k] = v
case "skip_compaction":
settings[k], _ = strconv.ParseBool(v)
case "ssh_username":
// Skip if communicator exists; this was already processed during communicator processing.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "tools_upload_flavor":
settings[k] = v
case "tools_upload_path":
settings[k] = v
case "version":
settings[k] = v
case "vm_name":
settings[k] = v
case "vmdk_name":
settings[k] = v
case "vmx_template_path":
settings[k] = v
case "vnc_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "vnc_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
}
}
// Only check to see if the required ssh_username field was set. The required iso info is checked after Array processing
if !hasUsername {
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// make sure http_directory is set and add to dir list
err = r.setHTTP(VMWareISO.String(), settings)
if err != nil {
return nil, err
}
// Process arrays, iso_urls is only valid if iso_url is not set
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "disk_additional_size":
var tmp []int
// TODO it is assumed that it is a slice of strings. Is this a good assumption?
vals, ok := val.([]string)
if !ok {
return nil, &SettingError{ID, name, json.MarshalToString(val), fmt.Errorf("expected a string array")}
}
for _, v := range vals {
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, name, json.MarshalToString(val), err}
}
tmp = append(tmp, i)
}
settings[name] = tmp
case "floppy_files":
settings[name] = val
case "iso_urls":
// these are only added if iso_url isn't set
if hasISOURL {
continue
}
settings[name] = val
hasISOURL = true
case "vmx_data":
settings[name] = r.createVMXData(val)
case "vmx_data_post":
settings[name] = r.createVMXData(val)
}
}
// TODO how is this affected by checksum being set in the template?
if r.osType == "" { // if the os type hasn't been set, the ISO info hasn't been retrieved
err = r.ISOInfo(VirtualBoxISO, workSlice)
if err != nil {
return nil, err
}
}
// set the guest_os_type
if guestOSType == "" {
guestOSType = r.osType
}
settings["guest_os_type"] = guestOSType
// If the iso info wasn't set from the Settings, get it from the distro's release
if !hasISOURL {
//handle iso lookup vs set in file
switch r.Distro {
case CentOS.String():
settings["iso_url"] = r.releaseISO.(*centos).imageURL()
settings["iso_checksum"] = r.releaseISO.(*centos).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*centos).ChecksumType
case Debian.String():
settings["iso_url"] = r.releaseISO.(*debian).imageURL()
settings["iso_checksum"] = r.releaseISO.(*debian).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*debian).ChecksumType
case Ubuntu.String():
settings["iso_url"] = r.releaseISO.(*ubuntu).imageURL()
settings["iso_checksum"] = r.releaseISO.(*ubuntu).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*ubuntu).ChecksumType
default:
err = fmt.Errorf("%q is not a supported Distro", r.Distro)
return nil, err
}
return settings, nil
}
if !hasChecksum {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum"}
}
if !hasChecksumType {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum_type"}
}
return settings, nil
}
// createVMWareVMX creates a map of settings for Packer's vmware-vmx builder.
// Any values that aren't supported by the vmware-vmx builder are ignored. Any
// required settings that don't exist result in an error and processing of the
// builder is stopped. For more information, refer to
// https://packer.io/docs/builders/vmware-vmx.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// source_name string
// ssh_username string
// Optional configuration options
// boot_command array of strings*
// boot_wait string
// floppy_files array of strings
// fusion_app_path string
// headless bool
// http_directory string
// http_port_min int
// http_port_max int
// output_directory string
// shutdown_command string
// shutdown_timeout string
// skip_compaction bool
// vm_name string
// vmx_data object of key/value strings
// vmx_data_post object of key/value strings
// vnc_port_min int
// vnc_port_max int
func (r *rawTemplate) createVMWareVMX(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = VMWareVMX.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var hasSourcePath, hasUsername, bootCmdProcessed, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "fusion_app_path":
settings[k] = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "output_directory":
settings[k] = v
case "shutdown_timeout":
settings[k] = v
case "shutdown_command":
//If it ends in .command, replace it with the command from the filepath
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
// Assume it's the first element.
settings[k] = commands[0]
} else {
settings[k] = v // the value is the command
}
case "skip_compaction":
settings[k], _ = strconv.ParseBool(v)
case "source_path":
src, err := r.findComponentSource(VMWareVMX.String(), v, true)
if err != nil {
return nil, err
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(VMWareVMX.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(VMWareVMX.String(), v)
hasSourcePath = true
case "ssh_username":
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "vm_name":
settings[k] = v
case "vnc_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "vnc_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
}
}
// Check if required fields were processed
if !hasUsername {
return nil, &RequiredSettingError{ID, "ssh_username"}
}
if !hasSourcePath {
return nil, &RequiredSettingError{ID, "source_path"}
}
// make sure http_directory is set and add to dir list
err = r.setHTTP(VMWareVMX.String(), settings)
if err != nil {
return nil, err
}
// Process arrays, iso_urls is only valid if iso_url is not set
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "floppy_files":
settings[name] = val
case "vmx_data":
settings[name] = r.createVMXData(val)
case "vmx_data_post":
settings[name] = r.createVMXData(val)
}
}
return settings, nil
}
func (r *rawTemplate) createVMXData(v interface{}) map[string]string {
vms := deepcopy.InterfaceToSliceOfStrings(v)
tmp := make(map[string]string, len(vms))
for _, v := range vms {
k, val := parseVar(v)
val = r.replaceVariables(val)
tmp[k] = val
}
return tmp
}
// updateBuilders updates the rawTemplate's builders with the passed new
// builder.
// Builder Update rules:
// * If r's old builder does not have a matching builder in the new builder
// map, new, nothing is done.
// * If the builder exists in both r and new, the new builder updates r's
// builder.
// * If the new builder does not have a matching builder in r, the new
// builder is added to r's builder map.
//
// Settings update rules:
// * If the setting exists in r's builder but not in new, nothing is done.
// This means that deletion of settings via not having them exist in the
// new builder is not supported. This is to simplify overriding templates
// in the configuration files.
// * If the setting exists in both r's builder and new, r's builder is
// updated with new's value.
// * If the setting exists in new, but not r's builder, new's setting is
// added to r's builder.
// * To unset a setting, specify the key, without a value: `"key="`. In most
// situations, Rancher will interpret an key without a value as a deletion
// of that key. There is an exception:
// * `guest_os_type`: This is generally set at Packer Template generation
// time by Rancher.
func (r *rawTemplate) updateBuilders(newB map[string]builder) error {
// If there is nothing new, old equals merged.
if len(newB) == 0 || newB == nil {
return nil
}
// Convert the existing Builders to Componenter.
var oldC = make(map[string]Componenter, len(r.Builders))
oldC = DeepCopyMapStringBuilder(r.Builders)
// Convert the new Builders to Componenter.
var newC = make(map[string]Componenter, len(newB))
newC = DeepCopyMapStringBuilder(newB)
// Make the slice as long as the slices in both builders, odds are its shorter, but this is the worst case.
var keys []string
// Convert the keys to a map
keys = mergeKeysFromComponentMaps(oldC, newC)
// If there's a builder with the key CommonBuilder, merge them. This is a special case for builders only.
_, ok := newB[Common.String()]
if ok {
r.updateCommon(newB[Common.String()])
}
// Copy: if the key exists in the new builder only.
// Ignore: if the key does not exist in the new builder.
// Merge: if the key exists in both the new and old builder.
for _, v := range keys {
// If it doesn't exist in the old builder, add it.
b, ok := r.Builders[v]
if !ok {
bb, _ := newB[v]
r.Builders[v] = bb.DeepCopy()
continue
}
// If the element for this key doesn't exist, skip it.
bb, ok := newB[v]
if !ok {
continue
}
err := b.mergeSettings(bb.Settings)
if err != nil {
return fmt.Errorf("merge of settings failed: %s", err)
}
b.mergeArrays(bb.Arrays)
r.Builders[v] = b
}
return nil
}
// updateCommon updates rawTemplate's common builder settings.
// Update rules:
// * When both the existing common builder, r, and the new one, b, have the
// same setting, b's value replaces r's; the new value replaces the
// existing value.
// * When the setting in b is new, it is added to r: new settings are
// inserted into r's CommonBuilder setting list.
// * When r has a setting that does not exist in b, nothing is done. This
// method does not delete any settings that already exist in r.
func (r *rawTemplate) updateCommon(newB builder) error {
if r.Builders == nil {
r.Builders = map[string]builder{}
}
// If the existing builder doesn't have a CommonBuilder section, just add it
b, ok := r.Builders[Common.String()]
if !ok {
r.Builders[Common.String()] = builder{templateSection: templateSection{Type: newB.Type, Settings: newB.Settings, Arrays: newB.Arrays}}
return nil
}
// Otherwise merge the two
err := b.mergeSettings(b.Settings)
if err != nil {
return err
}
r.Builders[Common.String()] = b
return nil
}
// setHTTP ensures that http setting is set and adds it to the dirs info so that its
// contents can be copied. If it is not set, http is assumed.
//
// The http_directory doesn't include component
func (r *rawTemplate) setHTTP(component string, m map[string]interface{}) error {
v, ok := m["http_directory"]
if !ok {
v = "http"
}
src, err := r.findComponentSource(component, v.(string), true)
if err != nil {
return fmt.Errorf("setHTTP error: %s", err)
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.dirs[r.buildOutPath("", v.(string))] = src
}
m["http_directory"] = r.buildTemplateResourcePath("", v.(string))
return nil
}
// DeepCopyMapStringBuilder makes a deep copy of each builder passed and
// returns the copy map[string]builder as a map[string]Componenter{}
func DeepCopyMapStringBuilder(b map[string]builder) map[string]Componenter {
c := map[string]Componenter{}
for k, v := range b {
tmpB := builder{}
tmpB = v.DeepCopy()
c[k] = tmpB
}
return c
}
// commandFromSlice takes a []string and returns it as a string. If there is
// only 1 element, that is returned as the command without any additional
// processing. Otherwise each element of the slice is processed.
//
// Processing multi-line commands are done by trimming space characters
// (space, tabs, newlines) and joining them to form a single command string.
// The `\` character is used when a single line is split across multiple
// lines. As such, a line without one signals the end of the command being
// processed and if there are any additional lines in the string slice being
// processed, they will be ignored.
//
// Once a line without a `\` is encountered that line is added to the
// command string and the resulting command string is returned.
func commandFromSlice(lines []string) string {
if len(lines) == 0 {
return ""
}
if len(lines) == 1 {
return lines[0]
}
var cmd string
for _, line := range lines {
line = strings.TrimSpace(line)
if !strings.HasSuffix(line, `\`) {
cmd += line
return cmd
}
cmd += strings.TrimSuffix(line, `\`)
}
return cmd
}
update googlecompute: use account_file value as is; security related information is used as is instead of trying to resolve the path to a file within rancher's search path; refactor array processing
package app
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
json "github.com/mohae/customjson"
"github.com/mohae/utilitybelt/deepcopy"
)
// Builder constants
const (
UnsupportedBuilder Builder = iota
Common
Custom
AmazonChroot
AmazonEBS
AmazonInstance
DigitalOcean
Docker
GoogleCompute
Null
OpenStack
Parallels
QEMU
VirtualBoxISO
VirtualBoxOVF
VMWareISO
VMWareVMX
)
// Builder is a Packer supported builder.
type Builder int
var builders = [...]string{
"unsupported builder",
"common",
"custom",
"amazon-chroot",
"amazon-ebs",
"amazon-instance",
"digitalocean",
"docker",
"googlecompute",
"null",
"openstack",
"parallels",
"qemu",
"virtualbox-iso",
"virtualbox-ovf",
"vmware-iso",
"vmware-vmx",
}
func (b Builder) String() string { return builders[b] }
// BuilderFromString returns the builder constant for the passed string or
// unsupported. All incoming strings are normalized to lowercase.
func BuilderFromString(s string) Builder {
s = strings.ToLower(s)
switch s {
case "common":
return Common
case "custom":
return Custom
case "amazon-chroot":
return AmazonChroot
case "amazon-ebs":
return AmazonEBS
case "amazon-instance":
return AmazonInstance
case "digitalocean":
return DigitalOcean
case "docker":
return Docker
case "googlecompute":
return GoogleCompute
case "null":
return Null
case "openstack":
return OpenStack
case "parallels":
return Parallels
case "qemu":
return QEMU
case "virtualbox-iso":
return VirtualBoxISO
case "virtualbox-ovf":
return VirtualBoxOVF
case "vmware-iso":
return VMWareISO
case "vmware-vmx":
return VMWareVMX
}
return UnsupportedBuilder
}
// r.createBuilders takes a raw builder and create the appropriate Packer
// Builder
func (r *rawTemplate) createBuilders() (bldrs []interface{}, err error) {
if r.BuilderIDs == nil || len(r.BuilderIDs) <= 0 {
return nil, fmt.Errorf("unable to create builders: none specified")
}
var tmpS map[string]interface{}
var ndx int
bldrs = make([]interface{}, len(r.BuilderIDs))
// Set the CommonBuilder settings. Only the builder.Settings field is used
// for CommonBuilder as everything else is usually builder specific, even
// if they have common names, e.g. difference between specifying memory
// between VMWare and VirtualBox.
// r.updateCommonBuilder
//
// Generate the builders for each builder type.
for _, ID := range r.BuilderIDs {
bldr, ok := r.Builders[ID]
if !ok {
return nil, fmt.Errorf("builder configuration for %s not found", ID)
}
typ := BuilderFromString(bldr.Type)
switch typ {
case AmazonChroot:
tmpS, err = r.createAmazonChroot(ID)
if err != nil {
return nil, &Error{AmazonChroot.String(), err}
}
case AmazonEBS:
tmpS, err = r.createAmazonEBS(ID)
if err != nil {
return nil, &Error{AmazonEBS.String(), err}
}
case AmazonInstance:
tmpS, err = r.createAmazonInstance(ID)
if err != nil {
return nil, &Error{AmazonInstance.String(), err}
}
case DigitalOcean:
tmpS, err = r.createDigitalOcean(ID)
if err != nil {
return nil, &Error{DigitalOcean.String(), err}
}
case Docker:
tmpS, err = r.createDocker(ID)
if err != nil {
return nil, &Error{Docker.String(), err}
}
case GoogleCompute:
tmpS, err = r.createGoogleCompute(ID)
if err != nil {
return nil, &Error{GoogleCompute.String(), err}
}
case Null:
tmpS, err = r.createNull(ID)
if err != nil {
return nil, &Error{Null.String(), err}
}
case OpenStack:
tmpS, err = r.createOpenStack(ID)
if err != nil {
return nil, &Error{Null.String(), err}
}
// case ParallelsISO, ParallelsPVM:
case QEMU:
tmpS, err = r.createQEMU(ID)
if err != nil {
return nil, &Error{QEMU.String(), err}
}
case VirtualBoxISO:
tmpS, err = r.createVirtualBoxISO(ID)
if err != nil {
return nil, &Error{VirtualBoxISO.String(), err}
}
case VirtualBoxOVF:
tmpS, err = r.createVirtualBoxOVF(ID)
if err != nil {
return nil, &Error{VirtualBoxOVF.String(), err}
}
case VMWareISO:
tmpS, err = r.createVMWareISO(ID)
if err != nil {
return nil, &Error{VMWareISO.String(), err}
}
case VMWareVMX:
tmpS, err = r.createVMWareVMX(ID)
if err != nil {
return nil, &Error{VMWareVMX.String(), err}
}
default:
return nil, &Error{UnsupportedBuilder.String(), fmt.Errorf("%q is not supported", typ.String())}
}
bldrs[ndx] = tmpS
ndx++
}
return bldrs, nil
}
// Go through all of the Settings and convert them to a map. Each setting is
// parsed into its constituent parts. The value then goes through variable
// replacement to ensure that the settings are properly resolved.
func (b *builder) settingsToMap(r *rawTemplate) map[string]interface{} {
var k, v string
m := make(map[string]interface{})
for _, s := range b.Settings {
k, v = parseVar(s)
v = r.replaceVariables(v)
m[k] = v
}
return m
}
// createAmazonChroot creates a map of settings for Packer's amazon-chroot
// builder. Any values that aren't supported by the amazon-ebs builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/amazon-instance.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// access_key string
// ami_name string
// secret_key string
// source_ami string
// Optional configuration options:
// ami_description string
// ami_groups array of strings
// ami_product_codes array of strings
// ami_regions array of strings
// ami_users array of strings
// ami_virtualization_type string
// chroot_mounts array of array of strings
// command_wrapper string
// copy_files array of strings
// device_path string
// enhanced_networking bool
// force_deregister bool
// mount_options array of strings
// mount_path string
// root_volume_size int
// tags object of key/value strings
func (r *rawTemplate) createAmazonChroot(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[AmazonChroot.String()]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = AmazonChroot.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var k, v string
var hasAccessKey, hasAmiName, hasSecretKey, hasSourceAmi bool
// check for communicator first
_, err = r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v = parseVar(s)
v = r.replaceVariables(v)
switch k {
case "access_key":
settings[k] = v
hasAccessKey = true
case "ami_name":
settings[k] = v
hasAmiName = true
case "ami_description":
settings[k] = v
case "ami_virtualization_type":
settings[k] = v
case "command_wrapper":
settings[k] = v
case "device_path":
settings[k] = v
case "enhanced_networking":
settings[k], _ = strconv.ParseBool(v)
case "force_deregister":
settings[k], _ = strconv.ParseBool(v)
case "mount_path":
settings[k] = v
case "root_volume_size":
settings[k], err = strconv.Atoi(v)
return nil, &SettingError{ID, k, v, err}
case "secret_key":
settings[k] = v
hasSecretKey = true
case "source_ami":
settings[k] = v
hasSourceAmi = true
}
}
if !hasAccessKey {
return nil, &RequiredSettingError{ID, "access_key"}
}
if !hasAmiName {
return nil, &RequiredSettingError{ID, "ami_name"}
}
if !hasSecretKey {
return nil, &RequiredSettingError{ID, "secret_key"}
}
if !hasSourceAmi {
return nil, &RequiredSettingError{ID, "source_ami"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
// if it's not a supported array group, log a warning and move on
switch name {
case "ami_groups":
case "ami_product_codes":
case "ami_regions":
case "ami_users":
case "chroot_mounts":
case "copy_files":
case "mount_options":
case "tags":
default:
// not supported; skip
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// createAmazonEBS creates a map of settings for Packer's amazon-ebs builder.
// Any values that aren't supported by the amazon-ebs builder are ignored. Any
// required settings that don't exist result in an error and processing of the
// builder is stopped. For more information, refer to
// https://packer.io/docs/builders/amazon-ebs.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// access_key string
// ami_name string
// instance_type string
// region string
// secret_key string
// source_ami string
// ssh_username string
// Optional configuration options:
// ami_block_device_mappings array of block device mappings
// delete_on_termination bool
// device_name string
// encrypted bool
// iops int
// no_device bool
// snapshot_id string
// virtual_name string
// volume_type string
// volume_size int
// ami_description string
// ami_groups array of strings
// ami_product_codes array of strings
// ami_regions array of strings
// ami_users array of strings
// associate_public_ip_address bool
// availability_zone string
// ebs_optimized bool
// enhanced_networking bool
// force_deregister bool
// iam_instance_profile string
// launch_block_device_mappings array of block device mappings
// run_tags object of key/value strings
// security_group_id string
// security_group_ids array of strings
// spot_price string
// spot_price_auto_product string
// ssh_keypair_name string
// ssh_private_ip bool
// ssh_private_key_file string
// subnet_id string
// tags object of key/value strings
// temporary_key_pair_name string
// token string
// user_data string
// user_data_file string
// volume_run_tags object of key/value strings
// vpc_id string
// windows_password_timeout string
func (r *rawTemplate) createAmazonEBS(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = AmazonEBS.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var k, v string
var hasAccessKey, hasAmiName, hasInstanceType, hasRegion, hasSecretKey bool
var hasSourceAmi, hasUsername, hasCommunicator bool
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v = parseVar(s)
v = r.replaceVariables(v)
switch k {
case "access_key":
settings[k] = v
hasAccessKey = true
case "ami_description":
settings[k] = v
case "ami_name":
settings[k] = v
hasAmiName = true
case "associate_public_ip_address":
settings[k], _ = strconv.ParseBool(v)
case "availability_zone":
settings[k] = v
case "enhanced_networking":
settings[k], _ = strconv.ParseBool(v)
case "force_deregister":
settings[k], _ = strconv.ParseBool(v)
case "iam_instance_profile":
settings[k] = v
case "instance_type":
settings[k] = v
hasInstanceType = true
case "region":
settings[k] = v
hasRegion = true
case "secret_key":
settings[k] = v
hasSecretKey = true
case "security_group_id":
settings[k] = v
case "source_ami":
settings[k] = v
hasSourceAmi = true
case "spot_price":
settings[k] = v
case "spot_price_auto_product":
settings[k] = v
case "ssh_keypair_name":
// Only process if there's no communicator or if the communicator is SSH.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k] = v
case "ssh_private_key_file":
// Only process if there's no communicator or if the communicator is SSH.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k] = v
case "ssh_username":
// Only set if there wasn't a communicator to process.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "subnet_id":
settings[k] = v
case "temporary_key_pair_name":
settings[k] = v
case "token":
settings[k] = v
case "user_data":
settings[k] = v
case "user_data_file":
src, err := r.findComponentSource(AmazonEBS.String(), v, false)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(AmazonEBS.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(AmazonEBS.String(), v)
case "vpc_id":
settings[k] = v
case "windows_password_timeout":
// Don't set if there's a non WinRM communicator.
if hasCommunicator && prefix != "winrm" {
continue
}
settings[k] = v
}
}
if !hasAccessKey {
return nil, &RequiredSettingError{ID, "access_key"}
}
if !hasAmiName {
return nil, &RequiredSettingError{ID, "ami_name"}
}
if !hasInstanceType {
return nil, &RequiredSettingError{ID, "instance_type"}
}
if !hasRegion {
return nil, &RequiredSettingError{ID, "region"}
}
if !hasSecretKey {
return nil, &RequiredSettingError{ID, "secret_key"}
}
if !hasSourceAmi {
return nil, &RequiredSettingError{ID, "source_ami"}
}
if !hasUsername {
// If there isn't a prefix, use ssh as that's the setting
// that's required according to the docs.
if prefix == "" {
prefix = "ssh"
}
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
// only process supported array stuff
switch name {
case "ami_block_device_mappings":
// do ami_block_device_mappings processing
settings[name], err = r.processAMIBlockDeviceMappings(val)
if err != nil {
return nil, &SettingError{ID, "ami_block_device_mappings", "", err}
}
continue
case "ami_groups":
case "ami_product_codes":
case "ami_regions":
case "ami_users":
case "launch_block_device_mappings":
case "run_tags":
case "security_group_ids":
case "tags":
default:
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// createAmazonInstance creates a map of settings for Packer's amazon-instance
// builder. Any values that aren't supported by the amazon-ebs builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/amazon-ebs.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// access_key string
// account_id string
// ami_name string
// instance_type string
// region string
// s3_bucket string
// secret_key string
// source_ami string
// ssh_username string
// x509_cert_path string
// x509_key_path string
// Optional configuration options:
// ami_block_device_mappings array of block device mappings
// delete_on_termination bool
// device_name string
// encrypted bool
// iops int
// no_device bool
// snapshot_id string
// virtual_name string
// volume_size int
// volume_type string
// ami_description string
// ami_groups array of strings
// ami_product_codes array of strings
// ami_regions array of strings
// ami_users array of strings
// ami_virtualization_type string
// associate_public_ip_address bool
// availability_zone string
// bundle_destination string
// bundle_prefix string
// bundle_upload_command string
// bundle_vol_command string
// ebs_optimized bool
// enhanced_networking bool
// force_deregister bool
// iam_instance_profile string
// launch_block_device_mappings array of block device mappings
// run_tags object of key/value strings
// security_group_id string
// security_group_ids array of strings
// spot_price string
// spot_price_auto_product string
// ssh_keypair_name string
// ssh_private_ip bool
// ssh_private_key_file string
// subnet_id string
// tags object of key/value strings
// temporary_key_pair_name string
// user_data string
// user_data_file string
// vpc_id string
// x509_upload_path string
// windows_password_timeout string
func (r *rawTemplate) createAmazonInstance(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = AmazonInstance.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var k, v string
var hasAccessKey, hasAccountID, hasAmiName, hasInstanceType, hasRegion, hasS3Bucket bool
var hasSecretKey, hasSourceAmi, hasUsername, hasX509CertPath, hasX509KeyPath, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v = parseVar(s)
v = r.replaceVariables(v)
switch k {
case "access_key":
settings[k] = v
hasAccessKey = true
case "account_id":
settings[k] = v
hasAccountID = true
case "ami_description":
settings[k] = v
case "ami_name":
settings[k] = v
hasAmiName = true
case "ami_virtualization_type":
settings[k] = v
case "associate_public_ip_address":
settings[k], _ = strconv.ParseBool(v)
case "availability_zone":
settings[k] = v
case "bundle_destination":
settings[k] = v
case "bundle_prefix":
settings[k] = v
case "bundle_upload_command":
if !strings.HasSuffix(v, ".command") {
// The value is the command.
settings[k] = v
continue
}
// The value is a command file, load the contents of the
// file.
cmds, err := r.commandsFromFile(AmazonInstance.String(), v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
//
cmd := commandFromSlice(cmds)
if cmd == "" {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = cmd
case "bundle_vol_command":
if !strings.HasSuffix(v, ".command") {
// The value is the command.
settings[k] = v
continue
}
// The value is a command file, load the contents of the
// file.
cmds, err := r.commandsFromFile(AmazonInstance.String(), v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
//
cmd := commandFromSlice(cmds)
if cmd == "" {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = cmd
case "ebs_optimized":
settings[k], _ = strconv.ParseBool(v)
case "enhanced_networking":
settings[k], _ = strconv.ParseBool(v)
case "force_deregister":
settings[k], _ = strconv.ParseBool(v)
case "iam_instance_profile":
settings[k] = v
case "instance_type":
settings[k] = v
hasInstanceType = true
case "region":
settings[k] = v
hasRegion = true
case "s3_bucket":
settings[k] = v
hasS3Bucket = true
case "secret_key":
settings[k] = v
hasSecretKey = true
case "security_group_id":
settings[k] = v
case "spot_price":
settings[k] = v
case "spot_price_auto_product":
settings[k] = v
case "ssh_keypair_name":
// Don't process if there's a communicator and it wasn't SSH.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k] = v
case "ssh_private_ip":
// Don't process if there's a communicator and it wasn't SSH.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k], _ = strconv.ParseBool(v)
case "ssh_private_key_file":
// Don't process if there was a communicator.
if hasCommunicator {
continue
}
settings[k] = v
case "source_ami":
settings[k] = v
hasSourceAmi = true
case "ssh_username":
// Don't process if there was a communicator.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "subnet_id":
settings[k] = v
case "temporary_key_pair_name":
settings[k] = v
case "user_data":
settings[k] = v
case "user_data_file":
src, err := r.findComponentSource(AmazonInstance.String(), v, false)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
// if the source couldn't be found and an error wasn't generated, replace
// src with the original value; this occurs when it is an example.
// Nothing should be copied in this instance and it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(AmazonEBS.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(AmazonInstance.String(), v)
case "vpc_id":
settings[k] = v
case "windows_password_timeout":
// Don't process if there was a communicator and it wasn't WinRM.
if hasCommunicator && prefix != "winrm" {
continue
}
settings[k] = v
case "x509_cert_path":
settings[k] = v
hasX509CertPath = true
case "x509_key_path":
settings[k] = v
hasX509KeyPath = true
case "x509_upload_path":
settings[k] = v
}
}
if !hasAccessKey {
return nil, &RequiredSettingError{ID, "access_key"}
}
if !hasAccountID {
return nil, &RequiredSettingError{ID, "account_id"}
}
if !hasAmiName {
return nil, &RequiredSettingError{ID, "ami_name"}
}
if !hasInstanceType {
return nil, &RequiredSettingError{ID, "instance_type"}
}
if !hasRegion {
return nil, &RequiredSettingError{ID, "region"}
}
if !hasS3Bucket {
return nil, &RequiredSettingError{ID, "s3_bucket"}
}
if !hasSecretKey {
return nil, &RequiredSettingError{ID, "secret_key"}
}
if !hasSourceAmi {
return nil, &RequiredSettingError{ID, "source_ami"}
}
if !hasUsername {
// if prefix was empty, no communicator was used which means
// ssh_username is expected.
if prefix == "" {
prefix = "ssh"
}
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
if !hasX509CertPath {
return nil, &RequiredSettingError{ID, "x509_cert_path"}
}
if !hasX509KeyPath {
return nil, &RequiredSettingError{ID, "x509_key_path"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
// if it's not a supported array group skip
switch name {
case "ami_block_device_mappings":
// do ami_block_device_mappings processing
settings[name], err = r.processAMIBlockDeviceMappings(val)
if err != nil {
return nil, &SettingError{ID, "ami_block_device_mappings", "", err}
}
continue
case "ami_groups":
case "ami_product_codes":
case "ami_regions":
case "ami_users":
case "launch_block_device_mappings":
case "run_tags":
case "security_group_ids":
case "tags":
default:
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// processAMIBlockDeviceMappings handles the ami_block_device_mappings
// array for Amazon builders. The mappings must be in the form of either
// []map[string]interface{} or [][]string. An error will occur is the
// data is anything else.
//
// For []map[string]interface{}, the data is returned without additional
// processing. Processing of the []map to only use valid keys may be added
// at some point in the future.
//
// For [][]string, processing will be done to convert the strings into
// key value pairs and place them in a map[string]interface{}. Values that
// are not supported settings for ami_block_device_mappings are ignored.
// The returned interface{} only includes the supported settings. When
// settings that are ints have invalid values specified, an error will be
// returned.
func (r *rawTemplate) processAMIBlockDeviceMappings(v interface{}) (interface{}, error) {
if reflect.TypeOf(v) == reflect.TypeOf([]map[string]interface{}{}) {
return v, nil
}
// Process the [][]string into a []map[string]interface{}
slices, ok := v.([][]string)
if !ok {
return nil, errors.New("not in a supported format")
}
ret := make([]map[string]interface{}, len(slices))
for i, settings := range slices {
vals := map[string]interface{}{}
for _, setting := range settings {
k, v := parseVar(setting)
switch k {
case "delete_on_termination":
vals[k], _ = strconv.ParseBool(v)
case "device_name":
vals[k] = v
case "encrypted":
vals[k], _ = strconv.ParseBool(v)
case "iops":
i, err := strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("iops: %s", err)
}
vals[k] = i
case "no_device":
vals[k], _ = strconv.ParseBool(v)
case "snapshot_id":
vals[k] = v
case "virtual_name":
vals[k] = v
case "volume_size":
i, err := strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("iops: %s", err)
}
vals[k] = i
case "volume_type":
vals[k] = v
}
}
ret[i] = vals
}
return ret, nil
}
// createDigitalOcean creates a map of settings for Packer's digitalocean
// builder. Any values that aren't supported by the digitalocean builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/digitalocean.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// api_token string
// image string
// region string
// size string
// Optional configuration options:
// droplet_name string
// private_networking bool
// snapshot_name string
// state_timeout string
// user_date string
func (r *rawTemplate) createDigitalOcean(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = DigitalOcean.String()
// If a common builder was defined, merge the settings between common and this builders.
_, ok = r.Builders[Common.String()]
var workSlice []string
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
_, err = r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
var hasAPIToken, hasImage, hasRegion, hasSize bool
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "api_token":
settings[k] = v
hasAPIToken = true
case "droplet_name":
settings[k] = v
case "image":
settings[k] = v
hasImage = true
case "private_networking":
settings[k], _ = strconv.ParseBool(v)
case "region":
settings[k] = v
hasRegion = true
case "size":
settings[k] = v
hasSize = true
case "snapshot_name":
settings[k] = v
case "state_timeout":
settings[k] = v
case "user_data":
settings[k] = v
}
}
if !hasAPIToken {
return nil, &RequiredSettingError{ID, "api_token"}
}
if !hasImage {
return nil, &RequiredSettingError{ID, "image"}
}
if !hasRegion {
return nil, &RequiredSettingError{ID, "region"}
}
if !hasSize {
return nil, &RequiredSettingError{ID, "size"}
}
return settings, nil
}
// createDocker creates a map of settings for Packer's docker builder. Any
// values that aren't supported by the digitalocean builder are ignored. Any
// required settings that don't exist result in an error and processing of the
// builder is stopped. For more information, refer to
// https://packer.io/docs/builders/docker.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// commit bool
// discard bool
// export_path string
// image string
// Optional configuration options:
// login bool
// login_email string
// login_username string
// login_password string
// login_server string
// pull bool
// run_command array of strings
// volumes map of strings to strings
func (r *rawTemplate) createDocker(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = Docker.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
// Process the communicator settings first, if there are any.
_, err = r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
var hasCommit, hasDiscard, hasExportPath, hasImage, hasRunCommandArray bool
var runCommandFile string
for _, s := range workSlice {
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "commit":
settings[k], _ = strconv.ParseBool(v)
hasCommit = true
case "discard":
settings[k], _ = strconv.ParseBool(v)
hasDiscard = true
case "export_path":
settings[k] = v
hasExportPath = true
case "image":
settings[k] = v
hasImage = true
case "login":
settings[k], _ = strconv.ParseBool(v)
case "login_email":
settings[k] = v
case "login_password":
settings[k] = v
case "login_username":
settings[k] = v
case "login_server":
settings[k] = v
case "pull":
settings[k], _ = strconv.ParseBool(v)
case "run_command":
// if it's here, cache the value, delay processing until arrays section
runCommandFile = v
}
}
if !hasCommit {
return nil, &RequiredSettingError{ID, "commit"}
}
if !hasDiscard {
return nil, &RequiredSettingError{ID, "discard"}
}
if !hasExportPath {
return nil, &RequiredSettingError{ID, "export_path"}
}
if !hasImage {
return nil, &RequiredSettingError{ID, "image"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
if name == "run_command" {
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
hasRunCommandArray = true
continue
}
if name == "volumes" {
settings[name] = deepcopy.Iface(val)
}
}
// if there wasn't an array of run commands, check to see if they should be loaded
// from a file
if !hasRunCommandArray {
if runCommandFile != "" {
commands, err := r.commandsFromFile(Docker.String(), runCommandFile)
if err != nil {
return nil, &SettingError{ID, "run_command", runCommandFile, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, "run_command", runCommandFile, ErrNoCommands}
}
settings["run_command"] = commands
}
}
return settings, nil
}
// createGoogleCompute creates a map of settings for Packer's googlecompute
// builder. Any values that aren't supported by the googlecompute builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/googlecompute.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// project_id string
// source_image string
// zone string
// Optional configuration options:
// account_file string
// address string
// disk_size int
// image_name string
// image_description string
// instance_name string
// machine_type string
// metadata object of key/value strings
// network string
// preemtipble bool
// state_timeout string
// tags array of strings
// use_internal_ip bool
func (r *rawTemplate) createGoogleCompute(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = GoogleCompute.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var hasProjectID, hasSourceImage, hasZone bool
// process communicator stuff first
_, err = r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "account_file":
// Account file contains account credentials: the value
// is taken as is.
settings[k] = v
case "address":
settings[k] = v
case "disk_size":
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "image_name":
settings[k] = v
case "image_description":
settings[k] = v
case "instance_name":
settings[k] = v
case "machine_type":
settings[k] = v
case "network":
settings[k] = v
case "preemtible":
settings[k], _ = strconv.ParseBool(v)
case "project_id":
settings[k] = v
hasProjectID = true
case "source_image":
settings[k] = v
hasSourceImage = true
case "state_timeout":
settings[k] = v
case "use_internal_ip":
settings[k], _ = strconv.ParseBool(v)
case "zone":
settings[k] = v
hasZone = true
}
}
if !hasProjectID {
return nil, &RequiredSettingError{ID, "project_id"}
}
if !hasSourceImage {
return nil, &RequiredSettingError{ID, "source_image"}
}
if !hasZone {
return nil, &RequiredSettingError{ID, "zone"}
}
// Process the Arrays.
for name, val := range r.Builders[ID].Arrays {
switch name {
case "metadata":
case "tags":
default
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// createNull creates a map of settings for Packer's null builder. Any values
// that aren't supported by the null builder are ignored. Any required
// settings that don't exist result in an error and processing of the builder
// is stopped. For more information, refer to
// https://packer.io/docs/builders/null.html
//
// Configuration options:
// Only settings provided by communicators are supported. See communicator
// documentation.
//
// communicator == none is considered invalid.
func (r *rawTemplate) createNull(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = Null.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[Null.String()].Settings
}
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
if prefix == "" {
// communicator == none; there must be a communicator
return nil, fmt.Errorf("%s: %s builder requires a communicator other than \"none\"", ID, Null.String())
}
return settings, nil
}
// createOpenStack creates a map of settings for Packer's OpenStack builder.
// Any values that aren't supported by the QEMU builder are ignored. Any
// required settings that doesn't exist result in an error and processing
// of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/openstack.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// flavor string
// image_name string
// source_image string
// username string
// password string
// Optional configuration options:
// api_key string
// availability_zone string
// config_drive bool
// floating_ip string
// floating_ip_pool string
// insecure bool
// metadata bool
// networks array of strings
// rackconnect_wait bool
// region string
// security_groups array of strings
// ssh_interface string
// tenant_id string
// tenant_name string
// use_floating_ip bool
func (r *rawTemplate) createOpenStack(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = map[string]interface{}{}
// Each create function is responsible for setting its own type.
settings["type"] = OpenStack.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var hasFlavor, hasImageName, hasSourceImage, hasUsername, hasPassword, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
_, ok = settings[prefix+"_password"]
if ok {
hasPassword = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "api_key":
settings[k] = v
case "availability_zone":
settings[k] = v
case "config_drive":
settings[k] = v
settings[k], _ = strconv.ParseBool(v)
case "flavor":
settings[k] = v
hasFlavor = true
case "floating_ip":
settings[k] = v
case "floating_ip_pool":
settings[k] = v
case "image_name":
settings[k] = v
hasImageName = true
case "insecure":
settings[k] = v
settings[k], _ = strconv.ParseBool(v)
case "metadata":
settings[k] = v
settings[k], _ = strconv.ParseBool(v)
case "password":
// skip if communicator was processed
if hasCommunicator {
continue
}
settings[k] = v
hasPassword = true
case "rackconnect_wait":
settings[k] = v
settings[k], _ = strconv.ParseBool(v)
case "region":
settings[k] = v
case "ssh_interface":
// If there's a communicator and it's not SSH skip.
if hasCommunicator && prefix != "ssh" {
continue
}
settings[k] = v
case "source_image":
settings[k] = v
hasSourceImage = true
case "tenant_id":
settings[k] = v
case "tenant_name":
settings[k] = v
case "use_floating_ip":
settings[k], _ = strconv.ParseBool(v)
case "username":
// skip if communicator was processed.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
}
}
// flavor is required
if !hasFlavor {
return nil, &RequiredSettingError{ID, "flavor"}
}
// image_name is required
if !hasImageName {
return nil, &RequiredSettingError{ID, "image_name"}
}
// source_image is required
if !hasSourceImage {
return nil, &RequiredSettingError{ID, "source_image"}
}
// Password is required
if !hasPassword {
if prefix == "" {
return nil, &RequiredSettingError{ID, "password"}
}
return nil, &RequiredSettingError{ID, prefix + "_password"}
}
// Username is required
if !hasUsername {
if prefix == "" {
return nil, &RequiredSettingError{ID, "username"}
}
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// Process arrays, iso_urls is only valid if iso_url is not set
for name, val := range r.Builders[ID].Arrays {
switch name {
case "metadata":
case "networks":
case "security_groups":
default:
continue
}
array := deepcopy.Iface(val)
if array != nil {
settings[name] = array
}
}
return settings, nil
}
// createQEMU creates a map of settings for Packer's QEMU builder. Any
// values that aren't supported by the QEMU builder are ignored. Any
// required settings that doesn't exist result in an error and processing
// of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/qemu.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// iso_checksum string
// iso_checksum_type string
// iso_url string
// ssh_username string
// Optional configuration options:
// accelerator string
// boot_command array of strings
// boot_wait string
// disk_cache string
// disk_compression bool
// disk_discard string
// disk_image bool
// disk_interface string
// disk_size int
// floppy_files array_of_strings
// format string
// headless bool
// http_directory string
// http_port_max int
// http_port_min int
// iso_target_path string
// iso_urls array of strings
// net_device string
// output_directory string
// qemuargs array of array of strings
// qemu_binary string
// skip_compaction bool
func (r *rawTemplate) createQEMU(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = map[string]interface{}{}
// Each create function is responsible for setting its own type.
settings["type"] = QEMU.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var bootCmdProcessed, hasChecksum, hasChecksumType, hasISOURL, hasUsername, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "accelerator":
settings[k] = v
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "disk_cache":
settings[k] = v
case "disk_compression":
settings[k], _ = strconv.ParseBool(v)
case "disk_discard":
settings[k] = v
case "disk_image":
settings[k], _ = strconv.ParseBool(v)
case "disk_interface":
settings[k] = v
case "disk_size":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "format":
settings[k] = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "iso_checksum":
settings[k] = v
hasChecksum = true
case "iso_checksum_type":
settings[k] = v
hasChecksumType = true
case "iso_target_path":
// TODO should this have path location?
settings[k] = v
case "iso_url":
settings[k] = v
hasISOURL = true
case "net_device":
settings[k] = v
case "output_directory":
settings[k] = v
case "qemu_binary":
settings[k] = v
case "skip_compaction":
settings[k], _ = strconv.ParseBool(v)
case "ssh_username":
// Skip if communicator exists; this was already processed during communicator processing.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
}
}
// Username is required
if !hasUsername {
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// make sure http_directory is set and add to dir list
// TODO reconcile with above
err = r.setHTTP(QEMU.String(), settings)
if err != nil {
return nil, err
}
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "floppy_files":
settings[name] = val
case "iso_urls":
// iso_url takes precedence
if hasISOURL {
continue
}
settings[name] = val
hasISOURL = true
case "qemuargs":
settings[name] = val
}
}
if !hasISOURL {
return nil, &RequiredSettingError{ID, "iso_url"}
}
// If the iso info wasn't set from the Settings, get it from the distro's release
if !hasISOURL {
//handle iso lookup vs set in file
switch r.Distro {
case CentOS.String():
settings["iso_url"] = r.releaseISO.(*centos).imageURL()
settings["iso_checksum"] = r.releaseISO.(*centos).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*centos).ChecksumType
case Debian.String():
settings["iso_url"] = r.releaseISO.(*debian).imageURL()
settings["iso_checksum"] = r.releaseISO.(*debian).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*debian).ChecksumType
case Ubuntu.String():
settings["iso_url"] = r.releaseISO.(*ubuntu).imageURL()
settings["iso_checksum"] = r.releaseISO.(*ubuntu).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*ubuntu).ChecksumType
default:
err = fmt.Errorf("%q is not a supported Distro", r.Distro)
return nil, err
}
return settings, nil
}
if !hasChecksum {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum"}
}
if !hasChecksumType {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum_type"}
}
return settings, nil
}
// createVirtualBoxISO creates a map of settings for Packer's virtualbox-iso
// builder. Any values that aren't supported by the virtualbox-iso builder are
// ignored. Any required settings that doesn't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/virtualbox-iso.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// iso_checksum string
// iso_checksum_type string
// iso_url string
// ssh_password string
// ssh_username string
// Optional configuration options:
// boot_command array of strings
// boot_wait string
// disk_size int
// export_opts array of strings
// floppy_files array of strings
// format string; "ovf" or "ova"
// guest_additions_mode string
// guest_additions_path string
// guest_additions_sha256 string
// guest_additions_url string
// guest_os_type string; if empty, generated by rancher
// hard_drive_interface string
// headless bool
// http_directory string
// http_port_min int
// http_port_max int
// iso_interface string
// iso_target_path string
// iso_urls array_of_strings
// output_directory string
// shutdown_command string
// shutdown_timeout string
// ssh_host_port_min int
// ssh_host_port_max int
// ssh_skip_nat_mapping bool
// vboxmanage array of array of strings
// vboxmanage_post array of array of strings
// virtualbox_version_file string
// vm_name string
func (r *rawTemplate) createVirtualBoxISO(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = map[string]interface{}{}
// Each create function is responsible for setting its own type.
settings["type"] = VirtualBoxISO.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var bootCmdProcessed, hasChecksum, hasChecksumType, hasISOURL, hasUsername, hasPassword, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
_, ok = settings[prefix+"_password"]
if ok {
hasPassword = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "disk_size":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "format":
settings[k] = v
case "guest_additions_mode":
settings[k] = v
case "guest_additions_path":
settings[k] = v
case "guest_additions_sha256":
settings[k] = v
case "guest_additions_url":
settings[k] = v
case "guest_os_type":
settings[k] = v
case "hard_drive_interface":
settings[k] = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "iso_checksum":
settings[k] = v
hasChecksum = true
case "iso_checksum_type":
settings[k] = v
hasChecksumType = true
case "iso_interface":
settings[k] = v
case "iso_target_path":
// TODO should this have path location?
settings[k] = v
case "iso_url":
settings[k] = v
hasISOURL = true
case "output_directory":
settings[k] = v
case "shutdown_command":
//If it ends in .command, replace it with the command from the filepath
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
// Assume it's the first element.
settings[k] = commands[0]
} else {
settings[k] = v // the value is the command
}
case "shutdown_timeout":
settings[k] = v
case "ssh_host_port_min":
// Skip if prefix == winrm as SSH settings don't apply to WinRM
if prefix == "winrm" {
continue
}
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "ssh_host_port_max":
// Skip if prefix == winrm as SSH settings don't apply to WinRM
if prefix == "winrm" {
continue
}
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "ssh_password":
// Skip if communicator exists; this was already processed during communicator processing.
if hasCommunicator {
continue
}
settings[k] = v
hasPassword = true
case "ssh_username":
// Skip if communicator exists; this was already processed during communicator processing.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "virtualbox_version_file":
// TODO: should this have path resolution?
settings[k] = v
case "vm_name":
settings[k] = v
}
}
// Username is required
if !hasUsername {
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// Password is required
if !hasPassword {
return nil, &RequiredSettingError{ID, prefix + "_password"}
}
// make sure http_directory is set and add to dir list
// TODO reconcile with above
err = r.setHTTP(VirtualBoxISO.String(), settings)
if err != nil {
return nil, err
}
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "export_opts":
settings[name] = val
case "floppy_files":
settings[name] = val
case "iso_urls":
// iso_url takes precedence
if hasISOURL {
continue
}
settings[name] = val
hasISOURL = true
case "vboxmanage":
settings[name] = r.createVBoxManage(val)
case "vboxmanage_post":
settings[name] = r.createVBoxManage(val)
}
}
if !hasISOURL {
return nil, &RequiredSettingError{ID, "iso_url"}
}
if r.osType == "" { // if the os type hasn't been set, the ISO info hasn't been retrieved
err = r.ISOInfo(VirtualBoxISO, workSlice)
if err != nil {
return nil, err
}
}
// TODO: modify to select the proper virtualbox value based on distro and arch
/*
// set the guest_os_type
if tmpGuestOSType == "" {
tmpGuestOSType = r.osType
}
settings["guest_os_type"] = tmpGuestOSType
*/
// If the iso info wasn't set from the Settings, get it from the distro's release
if !hasISOURL {
//handle iso lookup vs set in file
switch r.Distro {
case CentOS.String():
settings["iso_url"] = r.releaseISO.(*centos).imageURL()
settings["iso_checksum"] = r.releaseISO.(*centos).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*centos).ChecksumType
case Debian.String():
settings["iso_url"] = r.releaseISO.(*debian).imageURL()
settings["iso_checksum"] = r.releaseISO.(*debian).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*debian).ChecksumType
case Ubuntu.String():
settings["iso_url"] = r.releaseISO.(*ubuntu).imageURL()
settings["iso_checksum"] = r.releaseISO.(*ubuntu).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*ubuntu).ChecksumType
default:
err = fmt.Errorf("%q is not a supported Distro", r.Distro)
return nil, err
}
return settings, nil
}
if !hasChecksum {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum"}
}
if !hasChecksumType {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum_type"}
}
return settings, nil
}
// createVirtualBoxOVF creates a map of settings for Packer's virtualbox-ovf
// builder. Any values that aren't supported by the virtualbox-ovf builder are
// ignored. Any required settings that don't exist result in an error and
// processing of the builder is stopped. For more information, refer to
// https://packer.io/docs/builders/virtualbox-ovf.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// source_path string
// ssh_username string
// Optional configuration options:
// boot_command array of strings
// boot_wait string
// export_opts array of strings
// floppy_files array of strings
// format string
// guest_additions_mode string
// guest_additions_path string
// guest_additions_sha256 string
// guest_additions_url string
// headless bool
// http_directory string
// http_port_min int
// http_port_max int
// import_flags array of strings
// import_opts string
// output_directory string
// shutdown_command string
// shutdown_timeout string
// ssh_host_port_min int
// ssh_host_port_max int
// ssh_skip_nat_mapping bool
// vboxmanage array of strings
// vboxmanage_post array of strings
// virtualbox_version_file string
// vm_name string
func (r *rawTemplate) createVirtualBoxOVF(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = map[string]interface{}{}
// Each create function is responsible for setting its own type.
settings["type"] = VirtualBoxOVF.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
var hasSourcePath, hasUsername, bootCmdProcessed, hasCommunicator, hasWinRMCommunicator bool
var userNameVal string
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix == "" {
// for communicator == none or no communicator setting assume ssh_username
// since the docs have that as required.
// TODO: revist after communicator doc clarification
userNameVal = "ssh_username"
} else {
userNameVal = prefix + "_username"
_, ok = settings[userNameVal]
if ok {
hasUsername = true
}
hasCommunicator = true
if prefix == "winrm" {
hasWinRMCommunicator = true
}
}
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "format":
settings[k] = v
case "guest_additions_mode":
settings[k] = v
case "guest_additions_path":
settings[k] = v
case "guest_additions_sha256":
settings[k] = v
case "guest_additions_url":
settings[k] = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
err = &SettingError{ID, k, v, err}
return nil, err
}
settings[k] = i
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
err = &SettingError{ID, k, v, err}
return nil, err
}
settings[k] = i
case "import_opts":
settings[k] = v
case "output_directory":
settings[k] = v
case "shutdown_command":
if strings.HasSuffix(v, ".command") {
//If it ends in .command, replace it with the command from the filepath
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
// Assume it's the first element.
settings[k] = commands[0]
} else {
settings[k] = v
}
case "shutdown_timeout":
settings[k] = v
case "source_path":
src, err := r.findComponentSource(VirtualBoxOVF.String(), v, true)
if err != nil {
return nil, err
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(VirtualBoxOVF.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(VirtualBoxOVF.String(), v)
hasSourcePath = true
case "ssh_host_port_min":
if hasWinRMCommunicator {
continue
}
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
err = &SettingError{ID, k, v, err}
return nil, err
}
settings[k] = i
case "ssh_host_port_max":
if hasWinRMCommunicator {
continue
}
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
err = &SettingError{ID, k, v, err}
return nil, err
}
settings[k] = i
case "ssh_skip_nat_mapping":
// SSH settings don't apply to winrm
if hasWinRMCommunicator {
continue
}
settings[k], _ = strconv.ParseBool(v)
case "ssh_username":
// skip if communicator exists (prefix will be empty)
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "virtualbox_version_file":
settings[k] = v
case "vm_name":
settings[k] = v
}
}
// Check to see if the required info was processed.
if !hasUsername {
return nil, &RequiredSettingError{ID, userNameVal}
}
if !hasSourcePath {
return nil, &RequiredSettingError{ID, "source_path"}
}
// make sure http_directory is set and add to dir list
err = r.setHTTP(VirtualBoxOVF.String(), settings)
if err != nil {
return nil, err
}
// Generate Packer Variables
// Generate builder specific section
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "export_opts":
settings[name] = val
case "floppy_files":
settings[name] = val
case "import_flags":
settings[name] = val
case "vboxmanage":
settings[name] = r.createVBoxManage(val)
case "vboxmanage_post":
settings[name] = r.createVBoxManage(val)
}
}
return settings, nil
}
// createVBoxManage creates the vboxmanage and vboxmanage_post arrays from the
// received interface.
func (r *rawTemplate) createVBoxManage(v interface{}) [][]string {
vms := deepcopy.InterfaceToSliceOfStrings(v)
tmp := make([][]string, len(vms))
for i, v := range vms {
k, vv := parseVar(v)
// ensure that the key starts with --. A naive concatonation is done.
if !strings.HasPrefix(k, "--") {
k = "--" + k
}
vv = r.replaceVariables(vv)
tmp[i] = make([]string, 4)
tmp[i][0] = "modifyvm"
tmp[i][1] = "{{.Name}}"
tmp[i][2] = k
tmp[i][3] = vv
}
return tmp
}
// createVMWareISO creates a map of settings for Packer's vmware-iso builder.
// Any values that aren't supported by the vmware-iso builder are ignored. Any
// required settings that don't exist result in an error and processing of the
// builder is stopped. For more information, refer to
// https://packer.io/docs/builders/vmware-iso.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// iso_checksum string
// iso_checksum_type string
// iso_url string
// ssh_username string
// Optional configuration options
// boot_command array of strings
// boot_wait string
// disk_additional_size array of ints
// disk_size int
// disk_type_id string
// floppy_files array of strings
// fusion_app_path string
// guest_os_type string; if not set, will be generated
// headless bool
// http_directory string
// http_port_min int
// http_port_max int
// iso_target_path string
// iso_urls array of strings
// output_directory string
// remote_cache_datastore string
// remote_cache_directory string
// remote_datastore string
// remote_host string
// remote_password string
// remote_private_key_file string
// remote_type string
// remote_username string
// shutdown_command string
// shutdown_timeout string
// skip_compaction bool
// tools_upload_flavor string
// tools_upload_path string
// version string
// vm_name string
// vmdk_name string
// vmx_data object of key/value strings
// vmx_data_post object of key/value strings
// vmx_template_path string
// vnc_port_min int
// vnc_port_max int
func (r *rawTemplate) createVMWareISO(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = VMWareISO.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var bootCmdProcessed, hasChecksum, hasChecksumType, hasISOURL, hasUsername, hasCommunicator bool
var guestOSType string
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
for _, s := range workSlice {
// to this builder.
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "disk_size":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "disk_type_id":
settings[k] = v
case "fusion_app_path":
settings[k] = v
case "guest_os_type":
guestOSType = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "iso_checksum":
settings[k] = v
hasChecksum = true
case "iso_checksum_type":
settings[k] = v
hasChecksumType = true
case "iso_target_path":
settings[k] = v
case "iso_url":
settings[k] = v
hasISOURL = true
case "output_directory":
settings[k] = v
case "remote_cache_datastore":
settings[k] = v
case "remote_cache_directory":
settings[k] = v
case "remote_datastore":
settings[k] = v
case "remote_host":
settings[k] = v
case "remote_password":
settings[k] = v
case "remote_private_key_file":
settings[k] = v
case "remote_type":
settings[k] = v
case "remote_username":
settings[k] = v
case "shutdown_command":
//If it ends in .command, replace it with the command from the filepath
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
// Assume it's the first element.
settings[k] = commands[0]
continue
}
settings[k] = v // the value is the command
case "shutdown_timeout":
settings[k] = v
case "skip_compaction":
settings[k], _ = strconv.ParseBool(v)
case "ssh_username":
// Skip if communicator exists; this was already processed during communicator processing.
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "tools_upload_flavor":
settings[k] = v
case "tools_upload_path":
settings[k] = v
case "version":
settings[k] = v
case "vm_name":
settings[k] = v
case "vmdk_name":
settings[k] = v
case "vmx_template_path":
settings[k] = v
case "vnc_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "vnc_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
}
}
// Only check to see if the required ssh_username field was set. The required iso info is checked after Array processing
if !hasUsername {
return nil, &RequiredSettingError{ID, prefix + "_username"}
}
// make sure http_directory is set and add to dir list
err = r.setHTTP(VMWareISO.String(), settings)
if err != nil {
return nil, err
}
// Process arrays, iso_urls is only valid if iso_url is not set
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "disk_additional_size":
var tmp []int
// TODO it is assumed that it is a slice of strings. Is this a good assumption?
vals, ok := val.([]string)
if !ok {
return nil, &SettingError{ID, name, json.MarshalToString(val), fmt.Errorf("expected a string array")}
}
for _, v := range vals {
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, name, json.MarshalToString(val), err}
}
tmp = append(tmp, i)
}
settings[name] = tmp
case "floppy_files":
settings[name] = val
case "iso_urls":
// these are only added if iso_url isn't set
if hasISOURL {
continue
}
settings[name] = val
hasISOURL = true
case "vmx_data":
settings[name] = r.createVMXData(val)
case "vmx_data_post":
settings[name] = r.createVMXData(val)
}
}
// TODO how is this affected by checksum being set in the template?
if r.osType == "" { // if the os type hasn't been set, the ISO info hasn't been retrieved
err = r.ISOInfo(VirtualBoxISO, workSlice)
if err != nil {
return nil, err
}
}
// set the guest_os_type
if guestOSType == "" {
guestOSType = r.osType
}
settings["guest_os_type"] = guestOSType
// If the iso info wasn't set from the Settings, get it from the distro's release
if !hasISOURL {
//handle iso lookup vs set in file
switch r.Distro {
case CentOS.String():
settings["iso_url"] = r.releaseISO.(*centos).imageURL()
settings["iso_checksum"] = r.releaseISO.(*centos).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*centos).ChecksumType
case Debian.String():
settings["iso_url"] = r.releaseISO.(*debian).imageURL()
settings["iso_checksum"] = r.releaseISO.(*debian).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*debian).ChecksumType
case Ubuntu.String():
settings["iso_url"] = r.releaseISO.(*ubuntu).imageURL()
settings["iso_checksum"] = r.releaseISO.(*ubuntu).Checksum
settings["iso_checksum_type"] = r.releaseISO.(*ubuntu).ChecksumType
default:
err = fmt.Errorf("%q is not a supported Distro", r.Distro)
return nil, err
}
return settings, nil
}
if !hasChecksum {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum"}
}
if !hasChecksumType {
return nil, &RequiredSettingError{ID: ID, Key: "iso_checksum_type"}
}
return settings, nil
}
// createVMWareVMX creates a map of settings for Packer's vmware-vmx builder.
// Any values that aren't supported by the vmware-vmx builder are ignored. Any
// required settings that don't exist result in an error and processing of the
// builder is stopped. For more information, refer to
// https://packer.io/docs/builders/vmware-vmx.html
//
// In addition to the following options, Packer communicators are supported.
// Check the communicator docs for valid options.
//
// Required configuration options:
// source_name string
// ssh_username string
// Optional configuration options
// boot_command array of strings*
// boot_wait string
// floppy_files array of strings
// fusion_app_path string
// headless bool
// http_directory string
// http_port_min int
// http_port_max int
// output_directory string
// shutdown_command string
// shutdown_timeout string
// skip_compaction bool
// vm_name string
// vmx_data object of key/value strings
// vmx_data_post object of key/value strings
// vnc_port_min int
// vnc_port_max int
func (r *rawTemplate) createVMWareVMX(ID string) (settings map[string]interface{}, err error) {
_, ok := r.Builders[ID]
if !ok {
return nil, NewErrConfigNotFound(ID)
}
settings = make(map[string]interface{})
// Each create function is responsible for setting its own type.
settings["type"] = VMWareVMX.String()
// Merge the settings between common and this builders.
var workSlice []string
_, ok = r.Builders[Common.String()]
if ok {
workSlice, err = mergeSettingsSlices(r.Builders[Common.String()].Settings, r.Builders[ID].Settings)
if err != nil {
return nil, err
}
} else {
workSlice = r.Builders[ID].Settings
}
var hasSourcePath, hasUsername, bootCmdProcessed, hasCommunicator bool
// check for communicator first
prefix, err := r.processCommunicator(ID, workSlice, settings)
if err != nil {
return nil, err
}
// see if the required settings include username/password
if prefix != "" {
_, ok = settings[prefix+"_username"]
if ok {
hasUsername = true
}
hasCommunicator = true
}
// Go through each element in the slice, only take the ones that matter
// to this builder.
for _, s := range workSlice {
// var tmp interface{}
k, v := parseVar(s)
v = r.replaceVariables(v)
switch k {
case "boot_command":
// if the boot_command exists in the Settings section, it should
// reference a file. This boot_command takes precedence over any
// boot_command in the array defined in the Arrays section.
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
settings[k] = commands
bootCmdProcessed = true
}
case "boot_wait":
settings[k] = v
case "fusion_app_path":
settings[k] = v
case "headless":
settings[k], _ = strconv.ParseBool(v)
case "http_directory":
settings[k] = v
case "http_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "http_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "output_directory":
settings[k] = v
case "shutdown_timeout":
settings[k] = v
case "shutdown_command":
//If it ends in .command, replace it with the command from the filepath
if strings.HasSuffix(v, ".command") {
var commands []string
commands, err = r.commandsFromFile("", v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
if len(commands) == 0 {
return nil, &SettingError{ID, k, v, ErrNoCommands}
}
// Assume it's the first element.
settings[k] = commands[0]
} else {
settings[k] = v // the value is the command
}
case "skip_compaction":
settings[k], _ = strconv.ParseBool(v)
case "source_path":
src, err := r.findComponentSource(VMWareVMX.String(), v, true)
if err != nil {
return nil, err
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.files[r.buildOutPath(VMWareVMX.String(), v)] = src
}
settings[k] = r.buildTemplateResourcePath(VMWareVMX.String(), v)
hasSourcePath = true
case "ssh_username":
if hasCommunicator {
continue
}
settings[k] = v
hasUsername = true
case "vm_name":
settings[k] = v
case "vnc_port_max":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
case "vnc_port_min":
// only add if its an int
i, err := strconv.Atoi(v)
if err != nil {
return nil, &SettingError{ID, k, v, err}
}
settings[k] = i
}
}
// Check if required fields were processed
if !hasUsername {
return nil, &RequiredSettingError{ID, "ssh_username"}
}
if !hasSourcePath {
return nil, &RequiredSettingError{ID, "source_path"}
}
// make sure http_directory is set and add to dir list
err = r.setHTTP(VMWareVMX.String(), settings)
if err != nil {
return nil, err
}
// Process arrays, iso_urls is only valid if iso_url is not set
for name, val := range r.Builders[ID].Arrays {
switch name {
case "boot_command":
if bootCmdProcessed {
continue // if the boot command was already set, don't use this array
}
settings[name] = val
case "floppy_files":
settings[name] = val
case "vmx_data":
settings[name] = r.createVMXData(val)
case "vmx_data_post":
settings[name] = r.createVMXData(val)
}
}
return settings, nil
}
func (r *rawTemplate) createVMXData(v interface{}) map[string]string {
vms := deepcopy.InterfaceToSliceOfStrings(v)
tmp := make(map[string]string, len(vms))
for _, v := range vms {
k, val := parseVar(v)
val = r.replaceVariables(val)
tmp[k] = val
}
return tmp
}
// updateBuilders updates the rawTemplate's builders with the passed new
// builder.
// Builder Update rules:
// * If r's old builder does not have a matching builder in the new builder
// map, new, nothing is done.
// * If the builder exists in both r and new, the new builder updates r's
// builder.
// * If the new builder does not have a matching builder in r, the new
// builder is added to r's builder map.
//
// Settings update rules:
// * If the setting exists in r's builder but not in new, nothing is done.
// This means that deletion of settings via not having them exist in the
// new builder is not supported. This is to simplify overriding templates
// in the configuration files.
// * If the setting exists in both r's builder and new, r's builder is
// updated with new's value.
// * If the setting exists in new, but not r's builder, new's setting is
// added to r's builder.
// * To unset a setting, specify the key, without a value: `"key="`. In most
// situations, Rancher will interpret an key without a value as a deletion
// of that key. There is an exception:
// * `guest_os_type`: This is generally set at Packer Template generation
// time by Rancher.
func (r *rawTemplate) updateBuilders(newB map[string]builder) error {
// If there is nothing new, old equals merged.
if len(newB) == 0 || newB == nil {
return nil
}
// Convert the existing Builders to Componenter.
var oldC = make(map[string]Componenter, len(r.Builders))
oldC = DeepCopyMapStringBuilder(r.Builders)
// Convert the new Builders to Componenter.
var newC = make(map[string]Componenter, len(newB))
newC = DeepCopyMapStringBuilder(newB)
// Make the slice as long as the slices in both builders, odds are its shorter, but this is the worst case.
var keys []string
// Convert the keys to a map
keys = mergeKeysFromComponentMaps(oldC, newC)
// If there's a builder with the key CommonBuilder, merge them. This is a special case for builders only.
_, ok := newB[Common.String()]
if ok {
r.updateCommon(newB[Common.String()])
}
// Copy: if the key exists in the new builder only.
// Ignore: if the key does not exist in the new builder.
// Merge: if the key exists in both the new and old builder.
for _, v := range keys {
// If it doesn't exist in the old builder, add it.
b, ok := r.Builders[v]
if !ok {
bb, _ := newB[v]
r.Builders[v] = bb.DeepCopy()
continue
}
// If the element for this key doesn't exist, skip it.
bb, ok := newB[v]
if !ok {
continue
}
err := b.mergeSettings(bb.Settings)
if err != nil {
return fmt.Errorf("merge of settings failed: %s", err)
}
b.mergeArrays(bb.Arrays)
r.Builders[v] = b
}
return nil
}
// updateCommon updates rawTemplate's common builder settings.
// Update rules:
// * When both the existing common builder, r, and the new one, b, have the
// same setting, b's value replaces r's; the new value replaces the
// existing value.
// * When the setting in b is new, it is added to r: new settings are
// inserted into r's CommonBuilder setting list.
// * When r has a setting that does not exist in b, nothing is done. This
// method does not delete any settings that already exist in r.
func (r *rawTemplate) updateCommon(newB builder) error {
if r.Builders == nil {
r.Builders = map[string]builder{}
}
// If the existing builder doesn't have a CommonBuilder section, just add it
b, ok := r.Builders[Common.String()]
if !ok {
r.Builders[Common.String()] = builder{templateSection: templateSection{Type: newB.Type, Settings: newB.Settings, Arrays: newB.Arrays}}
return nil
}
// Otherwise merge the two
err := b.mergeSettings(b.Settings)
if err != nil {
return err
}
r.Builders[Common.String()] = b
return nil
}
// setHTTP ensures that http setting is set and adds it to the dirs info so that its
// contents can be copied. If it is not set, http is assumed.
//
// The http_directory doesn't include component
func (r *rawTemplate) setHTTP(component string, m map[string]interface{}) error {
v, ok := m["http_directory"]
if !ok {
v = "http"
}
src, err := r.findComponentSource(component, v.(string), true)
if err != nil {
return fmt.Errorf("setHTTP error: %s", err)
}
// if the source couldn't be found and an error wasn't generated, replace
// s with the original value; this occurs when it is an example.
// Nothing should be copied in this instancel it should not be added
// to the copy info
if src != "" {
r.dirs[r.buildOutPath("", v.(string))] = src
}
m["http_directory"] = r.buildTemplateResourcePath("", v.(string))
return nil
}
// DeepCopyMapStringBuilder makes a deep copy of each builder passed and
// returns the copy map[string]builder as a map[string]Componenter{}
func DeepCopyMapStringBuilder(b map[string]builder) map[string]Componenter {
c := map[string]Componenter{}
for k, v := range b {
tmpB := builder{}
tmpB = v.DeepCopy()
c[k] = tmpB
}
return c
}
// commandFromSlice takes a []string and returns it as a string. If there is
// only 1 element, that is returned as the command without any additional
// processing. Otherwise each element of the slice is processed.
//
// Processing multi-line commands are done by trimming space characters
// (space, tabs, newlines) and joining them to form a single command string.
// The `\` character is used when a single line is split across multiple
// lines. As such, a line without one signals the end of the command being
// processed and if there are any additional lines in the string slice being
// processed, they will be ignored.
//
// Once a line without a `\` is encountered that line is added to the
// command string and the resulting command string is returned.
func commandFromSlice(lines []string) string {
if len(lines) == 0 {
return ""
}
if len(lines) == 1 {
return lines[0]
}
var cmd string
for _, line := range lines {
line = strings.TrimSpace(line)
if !strings.HasSuffix(line, `\`) {
cmd += line
return cmd
}
cmd += strings.TrimSuffix(line, `\`)
}
return cmd
}
|
package sftpd
import (
"golang.org/x/crypto/ssh"
"log"
"net"
"fmt"
"os"
"crypto/subtle"
"io/ioutil"
"crypto/rsa"
"encoding/pem"
"crypto/x509"
"io"
"crypto/rand"
"github.com/pkg/sftp"
"strconv"
)
func NewSimpleSftpServer(homePath, listenAddress string, listenPort int, username, password string, pathMapper *PathMapper) (net.Listener, error) {
config := &ssh.ServerConfig{
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
log.Printf("Login: %s\n", c.User())
if subtle.ConstantTimeCompare([]byte(username), []byte(c.User())) == 1 && subtle.ConstantTimeCompare(pass, []byte(password)) == 1 {
return nil, nil
}
return nil, fmt.Errorf("password rejected for %q", c.User())
},
}
generateKeysIfNotExist(homePath)
privateBytes, err := ioutil.ReadFile(homePath + "/id_rsa")
if err != nil {
log.Fatal("Failed to load private key", err)
return nil, err
}
private, err := ssh.ParsePrivateKey(privateBytes)
if err != nil {
log.Fatal("Failed to parse private key", err)
return nil, err
}
config.AddHostKey(private)
listener, err := net.Listen("tcp", listenAddress+":"+strconv.Itoa(listenPort))
if err != nil {
log.Fatal("failed to listen for connection", err)
return nil, err
}
log.Printf("Listening on %v\n", listener.Addr())
go func() {
for {
conn, e := listener.Accept()
if e != nil {
os.Exit(2)
}
go HandleConn(conn, config, pathMapper)
}
}()
return listener, err
}
func HandleConn(conn net.Conn, config *ssh.ServerConfig, pathMapper *PathMapper) {
defer conn.Close()
e := handleConn(conn, config, pathMapper)
if e != nil {
log.Println("sftpd connection errored:", e)
}
}
func handleConn(conn net.Conn, config *ssh.ServerConfig, pathMapper *PathMapper) error {
sconn, chans, reqs, e := ssh.NewServerConn(conn, config)
if e != nil {
return e
}
defer sconn.Close()
// The incoming Request channel must be serviced.
log.Println( "login detected:", sconn.User())
// The incoming Request channel must be serviced.
go ssh.DiscardRequests(reqs)
// Service the incoming Channel channel.
for newChannel := range chans {
if newChannel.ChannelType() != "session" {
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
continue
}
channel, requests, err := newChannel.Accept()
if err != nil {
return err
}
go func(in <-chan *ssh.Request) {
for req := range in {
log.Printf( "Request: %v\n", req.Type)
ok := false
switch req.Type {
case "subsystem":
log.Printf( "Subsystem: %s\n", req.Payload[4:])
if string(req.Payload[4:]) == "sftp" {
ok = true
}
}
log.Printf( " - accepted: %v\n", ok)
req.Reply(ok, nil)
}
}(requests)
root := VfsHandler(pathMapper)
server := sftp.NewRequestServer(channel, root)
if err := server.Serve(); err == io.EOF {
server.Close()
log.Print("sftp client exited session.")
} else if err != nil {
log.Fatal("sftp server completed with error:", err)
}
}
return nil
}
func generateKeysIfNotExist(homeDir string) {
privateKeyFile := homeDir + "/id_rsa"
publicKeyFile := homeDir + "/id_rsa.pub"
if _, err := os.Stat(privateKeyFile); os.IsNotExist(err) {
makeSSHKeyPair(publicKeyFile, privateKeyFile)
}
}
func makeSSHKeyPair(pubKeyPath, privateKeyPath string) error {
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
return err
}
// generate and write private key as PEM
privateKeyFile, err := os.Create(privateKeyPath)
defer privateKeyFile.Close()
if err != nil {
return err
}
privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil {
return err
}
// generate and write public key
pub, err := ssh.NewPublicKey(&privateKey.PublicKey)
if err != nil {
return err
}
return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0655)
}
Fixed sftpd
package sftpd
import (
"golang.org/x/crypto/ssh"
"log"
"net"
"fmt"
"os"
"crypto/subtle"
"io/ioutil"
"crypto/rsa"
"encoding/pem"
"crypto/x509"
"io"
"crypto/rand"
"github.com/pkg/sftp"
"strconv"
)
func NewSimpleSftpServer(homePath, listenAddress string, listenPort int, username, password string, pathMapper *PathMapper) (net.Listener, error) {
config := &ssh.ServerConfig{
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
log.Printf("Login: %s\n", c.User())
if subtle.ConstantTimeCompare([]byte(username), []byte(c.User())) == 1 && subtle.ConstantTimeCompare(pass, []byte(password)) == 1 {
return nil, nil
}
return nil, fmt.Errorf("password rejected for %q", c.User())
},
}
generateKeysIfNotExist(homePath)
privateBytes, err := ioutil.ReadFile(homePath + "/id_rsa")
if err != nil {
log.Fatal("Failed to load private key", err)
return nil, err
}
private, err := ssh.ParsePrivateKey(privateBytes)
if err != nil {
log.Fatal("Failed to parse private key", err)
return nil, err
}
config.AddHostKey(private)
listener, err := net.Listen("tcp", listenAddress+":"+strconv.Itoa(listenPort))
if err != nil {
log.Fatal("failed to listen for connection", err)
return nil, err
}
log.Printf("Listening on %v\n", listener.Addr())
for {
conn, e := listener.Accept()
if e != nil {
os.Exit(2)
}
go HandleConn(conn, config, pathMapper)
}
}
func HandleConn(conn net.Conn, config *ssh.ServerConfig, pathMapper *PathMapper) {
defer conn.Close()
e := handleConn(conn, config, pathMapper)
if e != nil {
log.Println("sftpd connection errored:", e)
}
}
func handleConn(conn net.Conn, config *ssh.ServerConfig, pathMapper *PathMapper) error {
sconn, chans, reqs, e := ssh.NewServerConn(conn, config)
if e != nil {
return e
}
defer sconn.Close()
// The incoming Request channel must be serviced.
log.Println( "login detected:", sconn.User())
// The incoming Request channel must be serviced.
go ssh.DiscardRequests(reqs)
// Service the incoming Channel channel.
for newChannel := range chans {
if newChannel.ChannelType() != "session" {
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
continue
}
channel, requests, err := newChannel.Accept()
if err != nil {
return err
}
go func(in <-chan *ssh.Request) {
for req := range in {
log.Printf( "Request: %v\n", req.Type)
ok := false
switch req.Type {
case "subsystem":
log.Printf( "Subsystem: %s\n", req.Payload[4:])
if string(req.Payload[4:]) == "sftp" {
ok = true
}
}
log.Printf( " - accepted: %v\n", ok)
req.Reply(ok, nil)
}
}(requests)
root := VfsHandler(pathMapper)
server := sftp.NewRequestServer(channel, root)
if err := server.Serve(); err == io.EOF {
server.Close()
log.Print("sftp client exited session.")
} else if err != nil {
log.Fatal("sftp server completed with error:", err)
}
}
return nil
}
func generateKeysIfNotExist(homeDir string) {
privateKeyFile := homeDir + "/id_rsa"
publicKeyFile := homeDir + "/id_rsa.pub"
if _, err := os.Stat(privateKeyFile); os.IsNotExist(err) {
makeSSHKeyPair(publicKeyFile, privateKeyFile)
}
}
func makeSSHKeyPair(pubKeyPath, privateKeyPath string) error {
privateKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
return err
}
// generate and write private key as PEM
privateKeyFile, err := os.Create(privateKeyPath)
defer privateKeyFile.Close()
if err != nil {
return err
}
privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil {
return err
}
// generate and write public key
pub, err := ssh.NewPublicKey(&privateKey.PublicKey)
if err != nil {
return err
}
return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0655)
} |
package chunk
import (
"bytes"
"compress/gzip"
"context"
"io"
"path"
"github.com/chmduquesne/rollinghash/buzhash64"
"github.com/pachyderm/pachyderm/src/server/pkg/obj"
"github.com/pachyderm/pachyderm/src/server/pkg/storage/hash"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
const (
// MB is Megabytes.
MB = 1024 * 1024
// WindowSize is the size of the rolling hash window.
WindowSize = 64
// (bryce) this should be configurable.
bufSize = 50 * MB
)
// initialWindow is the set of bytes used to initialize the window
// of the rolling hash function.
var initialWindow = make([]byte, WindowSize)
// WriterFunc is a callback that returns a data reference to the next chunk and the annotations within the chunk.
type WriterFunc func(*DataRef, []*Annotation) error
type byteSet struct {
data []byte
annotations []*Annotation
}
type chanSet struct {
bytes chan *byteSet
done chan struct{}
}
type worker struct {
ctx context.Context
objC obj.Client
hash *buzhash64.Buzhash64
splitMask uint64
first bool
buf *bytes.Buffer
annotations []*Annotation
f WriterFunc
fs []func() error
prev, next *chanSet
stats *stats
}
// (bryce) edge case where no split point was found, or found before window size bytes.
func (w *worker) run(byteSet *byteSet) error {
// Roll through the assigned byte set.
if err := w.rollByteSet(byteSet); err != nil {
return err
}
// Wait for the next byte set to roll.
select {
case byteSet, more := <-w.next.bytes:
// The next bytes channel is closed for the last worker,
// so it uploads the last buffer as a chunk.
if !more {
if err := w.put(); err != nil {
return err
}
} else if err := w.rollByteSet(byteSet); err != nil {
return err
}
case <-w.ctx.Done():
return w.ctx.Err()
}
// Execute the writer function for the chunks that were found.
return w.executeFuncs()
}
func (w *worker) rollByteSet(byteSet *byteSet) error {
// Roll across the byte set.
for i, a := range byteSet.annotations {
var data []byte
if i == len(byteSet.annotations)-1 {
data = byteSet.data[a.Offset:len(byteSet.data)]
} else {
data = byteSet.data[a.Offset:byteSet.annotations[i+1].Offset]
}
// Convert from byte set offset to chunk offset.
a.Offset = int64(w.buf.Len())
w.annotations = joinAnnotations(w.annotations, a)
if err := w.roll(data); err != nil {
return err
}
// Reset hash between annotations.
w.resetHash()
}
return nil
}
func joinAnnotations(as []*Annotation, a *Annotation) []*Annotation {
// If the annotation being added is the same as the
// last, then they are merged.
if as != nil && as[len(as)-1].Meta == a.Meta {
return as
}
return append(as, a)
}
func (w *worker) roll(data []byte) error {
offset := 0
for i, b := range data {
w.hash.Roll(b)
if w.hash.Sum64()&w.splitMask == 0 {
w.buf.Write(data[offset : i+1])
offset = i + 1
if w.prev != nil && w.first {
// We do not consider chunk split points within WindowSize bytes
// of the start of the byte set.
if w.buf.Len() < WindowSize {
continue
}
byteSet := &byteSet{
data: w.buf.Bytes(),
annotations: w.annotations,
}
w.prev.bytes <- byteSet
w.first = false
} else if err := w.put(); err != nil {
return err
}
w.buf = &bytes.Buffer{}
w.annotations = splitAnnotations(w.annotations)
}
}
w.buf.Write(data[offset:])
return nil
}
func (w *worker) put() error {
chunk := &Chunk{Hash: hash.EncodeHash(hash.Sum(w.buf.Bytes()))}
path := path.Join(prefix, chunk.Hash)
// If the chunk does not exist, upload it.
if !w.objC.Exists(w.ctx, path) {
if err := w.upload(path); err != nil {
return err
}
}
chunkRef := &DataRef{
Chunk: chunk,
SizeBytes: int64(w.buf.Len()),
}
// Update the annotations for the current chunk.
updateAnnotations(chunkRef, w.buf.Bytes(), w.annotations)
annotations := w.annotations
w.fs = append(w.fs, func() error {
return w.f(chunkRef, annotations)
})
return nil
}
func updateAnnotations(chunkRef *DataRef, buf []byte, annotations []*Annotation) {
// (bryce) add check for no buf and greater than one annotation.
// Fast path for next data reference being full chunk.
if len(annotations) == 1 && annotations[0].NextDataRef != nil {
annotations[0].NextDataRef = chunkRef
}
for i, a := range annotations {
// (bryce) probably a better way to communicate whether to compute datarefs for an annotation.
if a.NextDataRef != nil {
a.NextDataRef.Chunk = chunkRef.Chunk
var data []byte
if i == len(annotations)-1 {
data = buf[a.Offset:len(buf)]
} else {
data = buf[a.Offset:annotations[i+1].Offset]
}
a.NextDataRef.Hash = hash.EncodeHash(hash.Sum(data))
a.NextDataRef.OffsetBytes = a.Offset
a.NextDataRef.SizeBytes = int64(len(data))
}
}
}
func splitAnnotations(as []*Annotation) []*Annotation {
if len(as) == 0 {
return nil
}
// Copy the last annotation.
lastA := as[len(as)-1]
copyA := &Annotation{Meta: lastA.Meta}
if lastA.NextDataRef != nil {
copyA.NextDataRef = &DataRef{}
}
return []*Annotation{copyA}
}
func (w *worker) resetHash() {
w.hash.Reset()
w.hash.Write(initialWindow)
}
func (w *worker) upload(path string) error {
objW, err := w.objC.Writer(w.ctx, path)
if err != nil {
return err
}
defer objW.Close()
gzipW, err := gzip.NewWriterLevel(objW, gzip.BestSpeed)
if err != nil {
return err
}
defer gzipW.Close()
// (bryce) Encrypt?
_, err = io.Copy(gzipW, bytes.NewReader(w.buf.Bytes()))
return err
}
func (w *worker) executeFuncs() error {
// Wait for the prior worker in the chain to signal
// that it is done.
if w.prev != nil {
select {
case <-w.prev.done:
case <-w.ctx.Done():
return w.ctx.Err()
}
}
// Execute the writer function for each chunk.
for _, f := range w.fs {
w.stats.chunkCount++
if err := f(); err != nil {
return err
}
}
// Signal to the next worker in the chain that this worker is done.
w.next.done <- struct{}{}
return nil
}
type stats struct {
chunkCount int64
annotatedBytesSize int64
}
// Writer splits a byte stream into content defined chunks that are hashed and deduplicated/uploaded to object storage.
// Chunk split points are determined by a bit pattern in a rolling hash function (buzhash64 at https://github.com/chmduquesne/rollinghash).
type Writer struct {
ctx context.Context
buf *bytes.Buffer
annotations []*Annotation
memoryLimiter *semaphore.Weighted
eg *errgroup.Group
newWorkerFunc func(*chanSet, *chanSet) *worker
prev *chanSet
f WriterFunc
stats *stats
}
func newWriter(ctx context.Context, objC obj.Client, memoryLimiter *semaphore.Weighted, averageBits int, f WriterFunc, seed int64) *Writer {
eg, cancelCtx := errgroup.WithContext(ctx)
stats := &stats{}
newWorkerFunc := func(prev, next *chanSet) *worker {
w := &worker{
ctx: cancelCtx,
objC: objC,
hash: buzhash64.NewFromUint64Array(buzhash64.GenerateHashes(seed)),
splitMask: (1 << uint64(averageBits)) - 1,
first: true,
buf: &bytes.Buffer{},
prev: prev,
next: next,
f: f,
stats: stats,
}
w.resetHash()
return w
}
w := &Writer{
ctx: cancelCtx,
buf: &bytes.Buffer{},
memoryLimiter: memoryLimiter,
eg: eg,
newWorkerFunc: newWorkerFunc,
f: f,
stats: stats,
}
return w
}
// Annotate associates an annotation with the current byte set.
func (w *Writer) Annotate(a *Annotation) {
a.Offset = int64(w.buf.Len())
if a.Offset == 0 {
w.annotations = nil
}
w.annotations = append(w.annotations, a)
w.stats.annotatedBytesSize = 0
}
// AnnotatedBytesSize returns the size of the bytes for the current annotation.
func (w *Writer) AnnotatedBytesSize() int64 {
return w.stats.annotatedBytesSize
}
// Flush flushes the buffered data.
func (w *Writer) Flush() error {
// Write out the last buffer.
if w.buf.Len() > 0 {
w.writeByteSet()
}
// Signal to the last worker that it is last.
if w.prev != nil {
close(w.prev.bytes)
w.prev = nil
}
// Wait for the workers to finish.
return w.eg.Wait()
}
// Reset resets the buffer and annotations.
func (w *Writer) Reset() {
// (bryce) should cancel all workers.
w.buf = &bytes.Buffer{}
w.annotations = nil
w.stats.annotatedBytesSize = 0
}
// ChunkCount returns a count of the number of chunks created/referenced by
// the writer.
func (w *Writer) ChunkCount() int64 {
return w.stats.chunkCount
}
// Write rolls through the data written, calling c.f when a chunk is found.
// Note: If making changes to this function, be wary of the performance
// implications (check before and after performance with chunker benchmarks).
func (w *Writer) Write(data []byte) (int, error) {
var written int
for w.buf.Len()+len(data) >= bufSize {
i := bufSize - w.buf.Len()
w.buf.Write(data[:i])
w.writeByteSet()
written += i
data = data[i:]
}
w.buf.Write(data)
written += len(data)
w.stats.annotatedBytesSize += int64(written)
return written, nil
}
func (w *Writer) writeByteSet() {
w.memoryLimiter.Acquire(w.ctx, bufSize)
prev := w.prev
next := &chanSet{
bytes: make(chan *byteSet, 1),
done: make(chan struct{}, 1),
}
byteSet := &byteSet{
data: w.buf.Bytes(),
annotations: w.annotations,
}
w.eg.Go(func() error {
defer w.memoryLimiter.Release(bufSize)
return w.newWorkerFunc(prev, next).run(byteSet)
})
w.prev = next
w.buf = &bytes.Buffer{}
w.annotations = splitAnnotations(w.annotations)
}
// Copy does a cheap copy from a reader to a writer.
func (w *Writer) Copy(r *Reader, n ...int64) error {
c, err := r.ReadCopy(n...)
if err != nil {
return err
}
return w.WriteCopy(c)
}
// WriteCopy writes copy data to the writer.
func (w *Writer) WriteCopy(c *Copy) error {
if _, err := io.Copy(w, c.before); err != nil {
return err
}
for _, chunkRef := range c.chunkRefs {
w.stats.chunkCount++
// (bryce) might want to double check if this is correct.
w.stats.annotatedBytesSize += chunkRef.SizeBytes
updateAnnotations(chunkRef, nil, w.annotations)
if err := w.f(chunkRef, w.annotations); err != nil {
return err
}
}
_, err := io.Copy(w, c.after)
return err
}
// Close closes the writer.
func (w *Writer) Close() error {
return w.Flush()
}
Fix deadlock when no split point was found in a buffer
package chunk
import (
"bytes"
"compress/gzip"
"context"
"io"
"path"
"github.com/chmduquesne/rollinghash/buzhash64"
"github.com/pachyderm/pachyderm/src/server/pkg/obj"
"github.com/pachyderm/pachyderm/src/server/pkg/storage/hash"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
const (
// MB is Megabytes.
MB = 1024 * 1024
// WindowSize is the size of the rolling hash window.
WindowSize = 64
// (bryce) this should be configurable.
bufSize = 50 * MB
)
// initialWindow is the set of bytes used to initialize the window
// of the rolling hash function.
var initialWindow = make([]byte, WindowSize)
// WriterFunc is a callback that returns a data reference to the next chunk and the annotations within the chunk.
type WriterFunc func(*DataRef, []*Annotation) error
type byteSet struct {
data []byte
annotations []*Annotation
nextByteSet chan *byteSet
}
type chanSet struct {
bytes chan *byteSet
done chan struct{}
}
type worker struct {
ctx context.Context
objC obj.Client
hash *buzhash64.Buzhash64
splitMask uint64
first bool
buf *bytes.Buffer
annotations []*Annotation
f WriterFunc
fs []func() error
prev, next *chanSet
stats *stats
}
// (bryce) edge case where no split point was found, or found before window size bytes.
func (w *worker) run(byteSet *byteSet) error {
// Roll through the assigned byte set.
if err := w.rollByteSet(byteSet); err != nil {
return err
}
// No split point found.
if w.prev != nil && w.first {
byteSet.nextByteSet = w.next.bytes
select {
case w.prev.bytes <- byteSet:
case <-w.ctx.Done():
return w.ctx.Err()
}
} else {
// Wait for the next byte set to roll.
nextByteSet := w.next.bytes
for nextByteSet != nil {
select {
case byteSet, more := <-nextByteSet:
// The next bytes channel is closed for the last worker,
// so it uploads the last buffer as a chunk.
if !more {
if err := w.put(); err != nil {
return err
}
nextByteSet = nil
break
} else if err := w.rollByteSet(byteSet); err != nil {
return err
}
nextByteSet = byteSet.nextByteSet
case <-w.ctx.Done():
return w.ctx.Err()
}
}
}
// Execute the writer function for the chunks that were found.
return w.executeFuncs()
}
func (w *worker) rollByteSet(byteSet *byteSet) error {
// Roll across the byte set.
for i, a := range byteSet.annotations {
var data []byte
if i == len(byteSet.annotations)-1 {
data = byteSet.data[a.Offset:len(byteSet.data)]
} else {
data = byteSet.data[a.Offset:byteSet.annotations[i+1].Offset]
}
// Convert from byte set offset to chunk offset.
a.Offset = int64(w.buf.Len())
w.annotations = joinAnnotations(w.annotations, a)
if err := w.roll(data); err != nil {
return err
}
// Reset hash between annotations.
w.resetHash()
}
return nil
}
func joinAnnotations(as []*Annotation, a *Annotation) []*Annotation {
// If the annotation being added is the same as the
// last, then they are merged.
if as != nil && as[len(as)-1].Meta == a.Meta {
return as
}
return append(as, a)
}
func (w *worker) roll(data []byte) error {
offset := 0
for i, b := range data {
w.hash.Roll(b)
if w.hash.Sum64()&w.splitMask == 0 {
w.buf.Write(data[offset : i+1])
offset = i + 1
if w.prev != nil && w.first {
// We do not consider chunk split points within WindowSize bytes
// of the start of the byte set.
if w.buf.Len() < WindowSize {
continue
}
byteSet := &byteSet{
data: w.buf.Bytes(),
annotations: w.annotations,
}
w.prev.bytes <- byteSet
w.first = false
} else if err := w.put(); err != nil {
return err
}
w.buf = &bytes.Buffer{}
w.annotations = splitAnnotations(w.annotations)
}
}
w.buf.Write(data[offset:])
return nil
}
func (w *worker) put() error {
chunk := &Chunk{Hash: hash.EncodeHash(hash.Sum(w.buf.Bytes()))}
path := path.Join(prefix, chunk.Hash)
// If the chunk does not exist, upload it.
if !w.objC.Exists(w.ctx, path) {
if err := w.upload(path); err != nil {
return err
}
}
chunkRef := &DataRef{
Chunk: chunk,
SizeBytes: int64(w.buf.Len()),
}
// Update the annotations for the current chunk.
updateAnnotations(chunkRef, w.buf.Bytes(), w.annotations)
annotations := w.annotations
w.fs = append(w.fs, func() error {
return w.f(chunkRef, annotations)
})
return nil
}
func updateAnnotations(chunkRef *DataRef, buf []byte, annotations []*Annotation) {
// (bryce) add check for no buf and greater than one annotation.
// Fast path for next data reference being full chunk.
if len(annotations) == 1 && annotations[0].NextDataRef != nil {
annotations[0].NextDataRef = chunkRef
}
for i, a := range annotations {
// (bryce) probably a better way to communicate whether to compute datarefs for an annotation.
if a.NextDataRef != nil {
a.NextDataRef.Chunk = chunkRef.Chunk
var data []byte
if i == len(annotations)-1 {
data = buf[a.Offset:len(buf)]
} else {
data = buf[a.Offset:annotations[i+1].Offset]
}
a.NextDataRef.Hash = hash.EncodeHash(hash.Sum(data))
a.NextDataRef.OffsetBytes = a.Offset
a.NextDataRef.SizeBytes = int64(len(data))
}
}
}
func splitAnnotations(as []*Annotation) []*Annotation {
if len(as) == 0 {
return nil
}
// Copy the last annotation.
lastA := as[len(as)-1]
copyA := &Annotation{Meta: lastA.Meta}
if lastA.NextDataRef != nil {
copyA.NextDataRef = &DataRef{}
}
return []*Annotation{copyA}
}
func (w *worker) resetHash() {
w.hash.Reset()
w.hash.Write(initialWindow)
}
func (w *worker) upload(path string) error {
objW, err := w.objC.Writer(w.ctx, path)
if err != nil {
return err
}
defer objW.Close()
gzipW, err := gzip.NewWriterLevel(objW, gzip.BestSpeed)
if err != nil {
return err
}
defer gzipW.Close()
// (bryce) Encrypt?
_, err = io.Copy(gzipW, bytes.NewReader(w.buf.Bytes()))
return err
}
func (w *worker) executeFuncs() error {
// Wait for the prior worker in the chain to signal
// that it is done.
if w.prev != nil {
select {
case <-w.prev.done:
case <-w.ctx.Done():
return w.ctx.Err()
}
}
// Execute the writer function for each chunk.
for _, f := range w.fs {
w.stats.chunkCount++
if err := f(); err != nil {
return err
}
}
// Signal to the next worker in the chain that this worker is done.
w.next.done <- struct{}{}
return nil
}
type stats struct {
chunkCount int64
annotatedBytesSize int64
}
// Writer splits a byte stream into content defined chunks that are hashed and deduplicated/uploaded to object storage.
// Chunk split points are determined by a bit pattern in a rolling hash function (buzhash64 at https://github.com/chmduquesne/rollinghash).
type Writer struct {
ctx context.Context
buf *bytes.Buffer
annotations []*Annotation
memoryLimiter *semaphore.Weighted
eg *errgroup.Group
newWorkerFunc func(*chanSet, *chanSet) *worker
prev *chanSet
f WriterFunc
stats *stats
}
func newWriter(ctx context.Context, objC obj.Client, memoryLimiter *semaphore.Weighted, averageBits int, f WriterFunc, seed int64) *Writer {
eg, cancelCtx := errgroup.WithContext(ctx)
stats := &stats{}
newWorkerFunc := func(prev, next *chanSet) *worker {
w := &worker{
ctx: cancelCtx,
objC: objC,
hash: buzhash64.NewFromUint64Array(buzhash64.GenerateHashes(seed)),
splitMask: (1 << uint64(averageBits)) - 1,
first: true,
buf: &bytes.Buffer{},
prev: prev,
next: next,
f: f,
stats: stats,
}
w.resetHash()
return w
}
w := &Writer{
ctx: cancelCtx,
buf: &bytes.Buffer{},
memoryLimiter: memoryLimiter,
eg: eg,
newWorkerFunc: newWorkerFunc,
f: f,
stats: stats,
}
return w
}
// Annotate associates an annotation with the current byte set.
func (w *Writer) Annotate(a *Annotation) {
a.Offset = int64(w.buf.Len())
if a.Offset == 0 {
w.annotations = nil
}
w.annotations = append(w.annotations, a)
w.stats.annotatedBytesSize = 0
}
// AnnotatedBytesSize returns the size of the bytes for the current annotation.
func (w *Writer) AnnotatedBytesSize() int64 {
return w.stats.annotatedBytesSize
}
// Flush flushes the buffered data.
func (w *Writer) Flush() error {
// Write out the last buffer.
if w.buf.Len() > 0 {
w.writeByteSet()
}
// Signal to the last worker that it is last.
if w.prev != nil {
close(w.prev.bytes)
w.prev = nil
}
// Wait for the workers to finish.
return w.eg.Wait()
}
// Reset resets the buffer and annotations.
func (w *Writer) Reset() {
// (bryce) should cancel all workers.
w.buf = &bytes.Buffer{}
w.annotations = nil
w.stats.annotatedBytesSize = 0
}
// ChunkCount returns a count of the number of chunks created/referenced by
// the writer.
func (w *Writer) ChunkCount() int64 {
return w.stats.chunkCount
}
// Write rolls through the data written, calling c.f when a chunk is found.
// Note: If making changes to this function, be wary of the performance
// implications (check before and after performance with chunker benchmarks).
func (w *Writer) Write(data []byte) (int, error) {
var written int
for w.buf.Len()+len(data) >= bufSize {
i := bufSize - w.buf.Len()
w.buf.Write(data[:i])
w.writeByteSet()
written += i
data = data[i:]
}
w.buf.Write(data)
written += len(data)
w.stats.annotatedBytesSize += int64(written)
return written, nil
}
func (w *Writer) writeByteSet() {
w.memoryLimiter.Acquire(w.ctx, bufSize)
prev := w.prev
next := &chanSet{
bytes: make(chan *byteSet, 1),
done: make(chan struct{}, 1),
}
byteSet := &byteSet{
data: w.buf.Bytes(),
annotations: w.annotations,
}
w.eg.Go(func() error {
defer w.memoryLimiter.Release(bufSize)
return w.newWorkerFunc(prev, next).run(byteSet)
})
w.prev = next
w.buf = &bytes.Buffer{}
w.annotations = splitAnnotations(w.annotations)
}
// Copy does a cheap copy from a reader to a writer.
func (w *Writer) Copy(r *Reader, n ...int64) error {
c, err := r.ReadCopy(n...)
if err != nil {
return err
}
return w.WriteCopy(c)
}
// WriteCopy writes copy data to the writer.
func (w *Writer) WriteCopy(c *Copy) error {
if _, err := io.Copy(w, c.before); err != nil {
return err
}
for _, chunkRef := range c.chunkRefs {
w.stats.chunkCount++
// (bryce) might want to double check if this is correct.
w.stats.annotatedBytesSize += chunkRef.SizeBytes
updateAnnotations(chunkRef, nil, w.annotations)
if err := w.f(chunkRef, w.annotations); err != nil {
return err
}
}
_, err := io.Copy(w, c.after)
return err
}
// Close closes the writer.
func (w *Writer) Close() error {
return w.Flush()
}
|
package player
import (
"github.com/lean-poker/poker-player-go/leanpoker"
"strings"
)
const VERSION = "Default Go folding player"
const MAX_RANK = 30
const JQKA_RANK = 20
const LOW_RANK = 5
func BetRequest(state *leanpoker.Game) int {
p := state.GetPlayer()
if p == nil {
return 1000
}
if (state.IsCheckable()) {
return 0
}
HoleRank := rankHoleCards(p.HoleCards)
if len(state.CommunityCards) == 0 {
if (state.CurrentBuyIn > state.Pot + state.CurrentBuyIn * HoleRank || HoleRank <= LOW_RANK) {
if (HoleRank >= 18) {
// follow game with good hand
return state.CurrentBuyIn
}
return 0
}
} else {
switch true {
case isFourOfAKind(append(state.CommunityCards, p.HoleCards...)...):
case isThreeOfAKind(append(state.CommunityCards, p.HoleCards...)...):
return p.Stack
break;
case isTwoPair(append(state.CommunityCards, p.HoleCards...)...):
if (HoleRank >= JQKA_RANK) {
// all in
return p.Stack
}
return state.CurrentBuyIn + HoleRank
break;
default:
return HoleRank
break
}
}
return HoleRank
}
func Showdown(state *leanpoker.Game) {
}
func Version() string {
return VERSION
}
func isPair(cards []*leanpoker.Card) bool {
if len(cards) < 2 {
return false
}
return cards[0].Rank == cards[1].Rank
}
func rankHoleCards(cards []*leanpoker.Card) int {
if len(cards) < 2 {
return 0
}
card1 := rankCard(cards[0].Rank)
card2 := rankCard(cards[1].Rank)
return card1 + card2
}
func rankCard(rank string) int {
ranks := "12345678910JQKA"
return strings.Index(ranks, rank) + 1
}
/*func calcComunityCards(g *leanpoker.Game, currentBet) {
}*/
Fold even before flop
package player
import (
"github.com/lean-poker/poker-player-go/leanpoker"
"strings"
)
const VERSION = "Default Go folding player"
const MAX_RANK = 30
const JQKA_RANK = 20
const LOW_RANK = 5
func BetRequest(state *leanpoker.Game) int {
p := state.GetPlayer()
if p == nil {
return 1000
}
if (state.IsCheckable()) {
return 0
}
HoleRank := rankHoleCards(p.HoleCards)
if (state.CurrentBuyIn > state.Pot + state.CurrentBuyIn * HoleRank || HoleRank <= LOW_RANK) {
if (HoleRank >= 18) {
// follow game with good hand
return state.CurrentBuyIn
}
return 0
}
switch true {
case isFourOfAKind(append(state.CommunityCards, p.HoleCards...)...):
case isThreeOfAKind(append(state.CommunityCards, p.HoleCards...)...):
return p.Stack
break;
case isTwoPair(append(state.CommunityCards, p.HoleCards...)...):
if (HoleRank >= JQKA_RANK) {
// all in
return p.Stack
}
return state.CurrentBuyIn + HoleRank
break;
default:
return HoleRank
break
}
return HoleRank
}
func Showdown(state *leanpoker.Game) {
}
func Version() string {
return VERSION
}
func isPair(cards []*leanpoker.Card) bool {
if len(cards) < 2 {
return false
}
return cards[0].Rank == cards[1].Rank
}
func rankHoleCards(cards []*leanpoker.Card) int {
if len(cards) < 2 {
return 0
}
card1 := rankCard(cards[0].Rank)
card2 := rankCard(cards[1].Rank)
return card1 + card2
}
func rankCard(rank string) int {
ranks := "12345678910JQKA"
return strings.Index(ranks, rank) + 1
}
/*func calcComunityCards(g *leanpoker.Game, currentBet) {
}*/
|
package span
import (
"bytes"
"fmt"
"io/ioutil"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
func bb(s string) []byte { return []byte(s) }
type spanCase struct {
fname string
buf []byte
blocks [][]byte
spans []Span
}
type spans []Span
const dir = `../testdata/tests/span_level/`
func lines(filename string, spans spans) spanCase {
buf, err := ioutil.ReadFile(dir + filename)
if err != nil {
panic(err)
}
return spanCase{
fname: filename,
buf: buf,
blocks: bytes.FieldsFunc(buf, func(r rune) bool {
return r == '\n' || r == '\r'
}),
spans: spans,
}
}
func diff(ok, bad []Span) string {
for i := range ok {
if i >= len(bad) {
return fmt.Sprintf("ends abruptly at position %d, expected:\n%s",
i, spew.Sdump(ok[i]))
}
if !reflect.DeepEqual(ok[i], bad[i]) {
return fmt.Sprintf("position %d, expected:\n%sgot:\n%s",
i, spew.Sdump(ok[i]), spew.Sdump(bad[i]))
}
}
return fmt.Sprintf("too many nodes, starting at position %d:\n%s",
len(ok), spew.Sdump(bad[len(ok)]))
}
func TestSpan(test *testing.T) {
cases := []spanCase{
lines(`automatic_links/angle_brackets_in_link.md`, spans{
{bb("http://exampl"), AutoLink{URL: `http://exampl`, Text: `http://exampl`}},
{bb("http://exampl"), AutoLink{URL: `http://exampl`, Text: `http://exampl`}},
}),
lines("automatic_links/ending_with_punctuation.md", spans{
{bb("http://example.net"), AutoLink{URL: "http://example.net", Text: "http://example.net"}},
{bb("http://example.net/"), AutoLink{URL: "http://example.net/", Text: "http://example.net/"}},
{bb("http://example.net"), AutoLink{URL: "http://example.net", Text: "http://example.net"}},
{bb("http://example.net/"), AutoLink{URL: "http://example.net/", Text: "http://example.net/"}},
{bb("<http://example.net,>"), AutoLink{URL: "http://example.net,", Text: "http://example.net,"}},
{bb("<http://example.net/,>"), AutoLink{URL: "http://example.net/,", Text: "http://example.net/,"}},
{bb("<http://example.net)>"), AutoLink{URL: "http://example.net)", Text: "http://example.net)"}},
{bb("<http://example.net/)>"), AutoLink{URL: "http://example.net/)", Text: "http://example.net/)"}},
}),
lines("automatic_links/mail_url_in_angle_brackets.md", spans{
{bb("<mailto:someone@example.net>"), AutoLink{URL: "mailto:someone@example.net", Text: "mailto:someone@example.net"}},
{bb("<someone@example.net>"), AutoLink{URL: "mailto:someone@example.net", Text: "someone@example.net"}},
}),
lines("automatic_links/mail_url_without_angle_brackets.md", spans{
// NOTE(akavel): below line is unexpected according to
// testdata/, but from spec this seems totally expected,
// so I added it
{bb("mailto:someone@example.net"), AutoLink{URL: "mailto:someone@example.net", Text: "mailto:someone@example.net"}},
}),
lines("automatic_links/url_schemes.md", spans{
{bb("http://example.net"), AutoLink{URL: "http://example.net", Text: "http://example.net"}},
{bb("<http://example.net>"), AutoLink{URL: "http://example.net", Text: "http://example.net"}},
{bb("file:///tmp/tmp.html"), AutoLink{URL: "file:///tmp/tmp.html", Text: "file:///tmp/tmp.html"}},
{bb("<file:///tmp/tmp.html>"), AutoLink{URL: "file:///tmp/tmp.html", Text: "file:///tmp/tmp.html"}},
{bb("feed://example.net/rss.xml"), AutoLink{URL: "feed://example.net/rss.xml", Text: "feed://example.net/rss.xml"}},
{bb("<feed://example.net/rss.xml>"), AutoLink{URL: "feed://example.net/rss.xml", Text: "feed://example.net/rss.xml"}},
{bb("googlechrome://example.net/"), AutoLink{URL: "googlechrome://example.net/", Text: "googlechrome://example.net/"}},
{bb("<googlechrome://example.net/>"), AutoLink{URL: "googlechrome://example.net/", Text: "googlechrome://example.net/"}},
{bb("`<>`"), Code{bb("<>")}},
// NOTE(akavel): below line is unexpected according to
// testdata/, but from spec this seems totally expected,
// so I added it
{bb("mailto:me@example.net"), AutoLink{URL: "mailto:me@example.net", Text: "mailto:me@example.net"}},
{bb("<mailto:me@example.net>"), AutoLink{URL: "mailto:me@example.net", Text: "mailto:me@example.net"}},
}),
lines("automatic_links/url_special_chars.md", spans{
{bb(`http://example.net/*#$%^&\~/blah`), AutoLink{URL: `http://example.net/*#$%^&\~/blah`, Text: `http://example.net/*#$%^&\~/blah`}},
{bb(`<http://example.net/*#$%^&\~)/blah>`), AutoLink{URL: `http://example.net/*#$%^&\~)/blah`, Text: `http://example.net/*#$%^&\~)/blah`}},
// NOTE(akavel): testdata expects below commented entry,
// but this seems wrong compared to spec; I've added
// fixed entry
// {bb(`http://example.net/blah/`), AutoLink{URL: `http://example.net/blah/`, Text: `http://example.net/blah/`}},
{bb(`http://example.net/blah/*#$%^&\~`), AutoLink{URL: `http://example.net/blah/*#$%^&\~`, Text: `http://example.net/blah/*#$%^&\~`}},
{bb(`<http://example.net/blah/*#$%^&\~)>`), AutoLink{URL: `http://example.net/blah/*#$%^&\~)`, Text: `http://example.net/blah/*#$%^&\~)`}},
}),
lines("automatic_links/web_url_in_angle_brackets.md", spans{
{bb("<http://example.net/path/>"), AutoLink{URL: "http://example.net/path/", Text: "http://example.net/path/"}},
{bb("<https://example.net/path/>"), AutoLink{URL: "https://example.net/path/", Text: "https://example.net/path/"}},
{bb("<ftp://example.net/path/>"), AutoLink{URL: "ftp://example.net/path/", Text: "ftp://example.net/path/"}},
}),
lines("automatic_links/web_url_without_angle_brackets.md", spans{
{bb("http://example.net/path/"), AutoLink{URL: "http://example.net/path/", Text: "http://example.net/path/"}},
{bb("https://example.net/path/"), AutoLink{URL: "https://example.net/path/", Text: "https://example.net/path/"}},
{bb("ftp://example.net/path/"), AutoLink{URL: "ftp://example.net/path/", Text: "ftp://example.net/path/"}},
}),
lines("code/end_of_codespan.md", spans{
{bb("`code span`"), Code{bb("code span")}},
{bb("``code span` ends``"), Code{bb("code span` ends")}},
{bb("`code span`` ends`"), Code{bb("code span`` ends")}},
{bb("````code span`` ``ends````"), Code{bb("code span`` ``ends")}},
{bb("`code span\\`"), Code{bb(`code span\`)}},
}),
}
for _, c := range cases {
spans := []Span{}
for _, b := range c.blocks {
spans = append(spans, Process(b, nil)...)
}
if !reflect.DeepEqual(c.spans, spans) {
test.Errorf("case %s expected:\n%s",
c.fname, spew.Sdump(c.spans))
test.Errorf("got:")
for i, span := range spans {
off, err := span.OffsetIn(c.buf)
test.Errorf("[%d] @ %d [%v]: %s",
i, off, err, spew.Sdump(span))
}
test.Errorf("QUICK DIFF: %s\n", diff(c.spans, spans))
}
}
}
/*
in ROOT/testdata/tests/span_level:
code/multiline.md
code/vs_emph.md
code/vs_html.md
code/vs_image.md
code/vs_link.md
code/well_formed.md
emphasis/emphasis_tag_combinations.md
emphasis/intertwined.md
emphasis/intraword.md
emphasis/nested_homogenous.md
emphasis/opening_and_closing_tags.md
emphasis/simple.md
emphasis/vs_html.md
emphasis/within_whitespace.md
emphasis/with_punctuation.md
image/direct_link.md
image/direct_link_with_2separating_spaces.md
image/direct_link_with_separating_newline.md
image/direct_link_with_separating_space.md
image/image_title.md
image/incomplete.md
image/link_text_with_newline.md
image/link_with_parenthesis.md
image/multiple_ref_id_definitions.md
image/nested_images.md
image/ref_case_sensitivity.md
image/ref_id_matching.md
image/ref_link.md
image/ref_link_empty.md
image/ref_link_self.md
image/ref_link_with_2separating_spaces.md
image/ref_link_with_separating_newline.md
image/ref_link_with_separating_space.md
image/ref_resolution_within_other_blocks.md
image/square_brackets_in_link_or_ref.md
image/two_consecutive_refs.md
image/unused_ref.md
image/url_escapes.md
image/url_in_angle_brackets.md
image/url_special_chars.md
image/url_whitespace.md
image/vs_code.md
image/vs_emph.md
image/vs_html.md
image/within_link.md
link/direct_link.md
link/direct_link_with_2separating_spaces.md
link/direct_link_with_separating_newline.md
link/direct_link_with_separating_space.md
link/incomplete.md
link/link_text_with_newline.md
link/link_title.md
link/link_with_parenthesis.md
link/multiple_ref_id_definitions.md
link/nested_links.md
link/ref_case_sensitivity.md
link/ref_id_matching.md
link/ref_link.md
link/ref_link_empty.md
link/ref_link_self.md
link/ref_link_with_2separating_spaces.md
link/ref_link_with_separating_newline.md
link/ref_link_with_separating_space.md
link/ref_resolution_within_other_blocks.md
link/square_brackets_in_link_or_ref.md
link/two_consecutive_refs.md
link/unused_ref.md
link/url_escapes.md
link/url_in_angle_brackets.md
link/url_special_chars.md
link/url_whitespace.md
link/vs_code.md
link/vs_emph.md
link/vs_html.md
link/vs_image.md
*/
tests: span code/multiline
package span
import (
"bytes"
"fmt"
"io/ioutil"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
func bb(s string) []byte { return []byte(s) }
type spanCase struct {
fname string
buf []byte
blocks [][]byte
spans []Span
}
type spans []Span
const dir = `../testdata/tests/span_level/`
func lines(filename string, spans spans) spanCase {
buf, err := ioutil.ReadFile(dir + filename)
if err != nil {
panic(err)
}
return spanCase{
fname: filename,
buf: buf,
blocks: bytes.FieldsFunc(buf, func(r rune) bool {
return r == '\n' || r == '\r'
}),
spans: spans,
}
}
func blocks(filename string, spans spans) spanCase {
buf, err := ioutil.ReadFile(dir + filename)
if err != nil {
panic(err)
}
buf = bytes.Replace(buf, bb("\r"), bb(""), -1)
return spanCase{
fname: filename,
buf: buf,
blocks: bytes.Split(buf, []byte("\n\n")),
spans: spans,
}
}
func diff(ok, bad []Span) string {
for i := range ok {
if i >= len(bad) {
return fmt.Sprintf("ends abruptly at position %d, expected:\n%s",
i, spew.Sdump(ok[i]))
}
if !reflect.DeepEqual(ok[i], bad[i]) {
return fmt.Sprintf("position %d, expected:\n%sgot:\n%s",
i, spew.Sdump(ok[i]), spew.Sdump(bad[i]))
}
}
return fmt.Sprintf("too many nodes, starting at position %d:\n%s",
len(ok), spew.Sdump(bad[len(ok)]))
}
func TestSpan(test *testing.T) {
cases := []spanCase{
lines(`automatic_links/angle_brackets_in_link.md`, spans{
{bb("http://exampl"), AutoLink{URL: `http://exampl`, Text: `http://exampl`}},
{bb("http://exampl"), AutoLink{URL: `http://exampl`, Text: `http://exampl`}},
}),
lines("automatic_links/ending_with_punctuation.md", spans{
{bb("http://example.net"), AutoLink{URL: "http://example.net", Text: "http://example.net"}},
{bb("http://example.net/"), AutoLink{URL: "http://example.net/", Text: "http://example.net/"}},
{bb("http://example.net"), AutoLink{URL: "http://example.net", Text: "http://example.net"}},
{bb("http://example.net/"), AutoLink{URL: "http://example.net/", Text: "http://example.net/"}},
{bb("<http://example.net,>"), AutoLink{URL: "http://example.net,", Text: "http://example.net,"}},
{bb("<http://example.net/,>"), AutoLink{URL: "http://example.net/,", Text: "http://example.net/,"}},
{bb("<http://example.net)>"), AutoLink{URL: "http://example.net)", Text: "http://example.net)"}},
{bb("<http://example.net/)>"), AutoLink{URL: "http://example.net/)", Text: "http://example.net/)"}},
}),
lines("automatic_links/mail_url_in_angle_brackets.md", spans{
{bb("<mailto:someone@example.net>"), AutoLink{URL: "mailto:someone@example.net", Text: "mailto:someone@example.net"}},
{bb("<someone@example.net>"), AutoLink{URL: "mailto:someone@example.net", Text: "someone@example.net"}},
}),
lines("automatic_links/mail_url_without_angle_brackets.md", spans{
// NOTE(akavel): below line is unexpected according to
// testdata/, but from spec this seems totally expected,
// so I added it
{bb("mailto:someone@example.net"), AutoLink{URL: "mailto:someone@example.net", Text: "mailto:someone@example.net"}},
}),
lines("automatic_links/url_schemes.md", spans{
{bb("http://example.net"), AutoLink{URL: "http://example.net", Text: "http://example.net"}},
{bb("<http://example.net>"), AutoLink{URL: "http://example.net", Text: "http://example.net"}},
{bb("file:///tmp/tmp.html"), AutoLink{URL: "file:///tmp/tmp.html", Text: "file:///tmp/tmp.html"}},
{bb("<file:///tmp/tmp.html>"), AutoLink{URL: "file:///tmp/tmp.html", Text: "file:///tmp/tmp.html"}},
{bb("feed://example.net/rss.xml"), AutoLink{URL: "feed://example.net/rss.xml", Text: "feed://example.net/rss.xml"}},
{bb("<feed://example.net/rss.xml>"), AutoLink{URL: "feed://example.net/rss.xml", Text: "feed://example.net/rss.xml"}},
{bb("googlechrome://example.net/"), AutoLink{URL: "googlechrome://example.net/", Text: "googlechrome://example.net/"}},
{bb("<googlechrome://example.net/>"), AutoLink{URL: "googlechrome://example.net/", Text: "googlechrome://example.net/"}},
{bb("`<>`"), Code{bb("<>")}},
// NOTE(akavel): below line is unexpected according to
// testdata/, but from spec this seems totally expected,
// so I added it
{bb("mailto:me@example.net"), AutoLink{URL: "mailto:me@example.net", Text: "mailto:me@example.net"}},
{bb("<mailto:me@example.net>"), AutoLink{URL: "mailto:me@example.net", Text: "mailto:me@example.net"}},
}),
lines("automatic_links/url_special_chars.md", spans{
{bb(`http://example.net/*#$%^&\~/blah`), AutoLink{URL: `http://example.net/*#$%^&\~/blah`, Text: `http://example.net/*#$%^&\~/blah`}},
{bb(`<http://example.net/*#$%^&\~)/blah>`), AutoLink{URL: `http://example.net/*#$%^&\~)/blah`, Text: `http://example.net/*#$%^&\~)/blah`}},
// NOTE(akavel): testdata expects below commented entry,
// but this seems wrong compared to spec; I've added
// fixed entry
// {bb(`http://example.net/blah/`), AutoLink{URL: `http://example.net/blah/`, Text: `http://example.net/blah/`}},
{bb(`http://example.net/blah/*#$%^&\~`), AutoLink{URL: `http://example.net/blah/*#$%^&\~`, Text: `http://example.net/blah/*#$%^&\~`}},
{bb(`<http://example.net/blah/*#$%^&\~)>`), AutoLink{URL: `http://example.net/blah/*#$%^&\~)`, Text: `http://example.net/blah/*#$%^&\~)`}},
}),
lines("automatic_links/web_url_in_angle_brackets.md", spans{
{bb("<http://example.net/path/>"), AutoLink{URL: "http://example.net/path/", Text: "http://example.net/path/"}},
{bb("<https://example.net/path/>"), AutoLink{URL: "https://example.net/path/", Text: "https://example.net/path/"}},
{bb("<ftp://example.net/path/>"), AutoLink{URL: "ftp://example.net/path/", Text: "ftp://example.net/path/"}},
}),
lines("automatic_links/web_url_without_angle_brackets.md", spans{
{bb("http://example.net/path/"), AutoLink{URL: "http://example.net/path/", Text: "http://example.net/path/"}},
{bb("https://example.net/path/"), AutoLink{URL: "https://example.net/path/", Text: "https://example.net/path/"}},
{bb("ftp://example.net/path/"), AutoLink{URL: "ftp://example.net/path/", Text: "ftp://example.net/path/"}},
}),
lines("code/end_of_codespan.md", spans{
{bb("`code span`"), Code{bb("code span")}},
{bb("``code span` ends``"), Code{bb("code span` ends")}},
{bb("`code span`` ends`"), Code{bb("code span`` ends")}},
{bb("````code span`` ``ends````"), Code{bb("code span`` ``ends")}},
{bb("`code span\\`"), Code{bb(`code span\`)}},
}),
blocks("code/multiline.md", spans{
{bb("`code span\ncan span multiple\nlines`"), Code{bb("code span\ncan span multiple\nlines")}},
}),
}
for _, c := range cases {
spans := []Span{}
for _, b := range c.blocks {
spans = append(spans, Process(b, nil)...)
}
if !reflect.DeepEqual(c.spans, spans) {
test.Errorf("case %s expected:\n%s",
c.fname, spew.Sdump(c.spans))
test.Errorf("got:")
for i, span := range spans {
off, err := span.OffsetIn(c.buf)
test.Errorf("[%d] @ %d [%v]: %s",
i, off, err, spew.Sdump(span))
}
test.Errorf("QUICK DIFF: %s\n", diff(c.spans, spans))
}
}
}
/*
in ROOT/testdata/tests/span_level:
code/vs_emph.md
code/vs_html.md
code/vs_image.md
code/vs_link.md
code/well_formed.md
emphasis/emphasis_tag_combinations.md
emphasis/intertwined.md
emphasis/intraword.md
emphasis/nested_homogenous.md
emphasis/opening_and_closing_tags.md
emphasis/simple.md
emphasis/vs_html.md
emphasis/within_whitespace.md
emphasis/with_punctuation.md
image/direct_link.md
image/direct_link_with_2separating_spaces.md
image/direct_link_with_separating_newline.md
image/direct_link_with_separating_space.md
image/image_title.md
image/incomplete.md
image/link_text_with_newline.md
image/link_with_parenthesis.md
image/multiple_ref_id_definitions.md
image/nested_images.md
image/ref_case_sensitivity.md
image/ref_id_matching.md
image/ref_link.md
image/ref_link_empty.md
image/ref_link_self.md
image/ref_link_with_2separating_spaces.md
image/ref_link_with_separating_newline.md
image/ref_link_with_separating_space.md
image/ref_resolution_within_other_blocks.md
image/square_brackets_in_link_or_ref.md
image/two_consecutive_refs.md
image/unused_ref.md
image/url_escapes.md
image/url_in_angle_brackets.md
image/url_special_chars.md
image/url_whitespace.md
image/vs_code.md
image/vs_emph.md
image/vs_html.md
image/within_link.md
link/direct_link.md
link/direct_link_with_2separating_spaces.md
link/direct_link_with_separating_newline.md
link/direct_link_with_separating_space.md
link/incomplete.md
link/link_text_with_newline.md
link/link_title.md
link/link_with_parenthesis.md
link/multiple_ref_id_definitions.md
link/nested_links.md
link/ref_case_sensitivity.md
link/ref_id_matching.md
link/ref_link.md
link/ref_link_empty.md
link/ref_link_self.md
link/ref_link_with_2separating_spaces.md
link/ref_link_with_separating_newline.md
link/ref_link_with_separating_space.md
link/ref_resolution_within_other_blocks.md
link/square_brackets_in_link_or_ref.md
link/two_consecutive_refs.md
link/unused_ref.md
link/url_escapes.md
link/url_in_angle_brackets.md
link/url_special_chars.md
link/url_whitespace.md
link/vs_code.md
link/vs_emph.md
link/vs_html.md
link/vs_image.md
*/
|
/*
Copyright 2017 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spanner
import (
"fmt"
"regexp"
"sync/atomic"
"time"
"cloud.google.com/go/internal/version"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
sppb "google.golang.org/genproto/googleapis/spanner/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
prodAddr = "spanner.googleapis.com:443"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
resourcePrefixHeader = "google-cloud-resource-prefix"
// apiClientHeader is the name of the metadata header used to indicate client
// information.
apiClientHeader = "x-goog-api-client"
// numChannels is the default value for NumChannels of client
numChannels = 4
)
const (
// Scope is the scope for Cloud Spanner Data API.
Scope = "https://www.googleapis.com/auth/spanner.data"
// AdminScope is the scope for Cloud Spanner Admin APIs.
AdminScope = "https://www.googleapis.com/auth/spanner.admin"
)
var (
validDBPattern = regexp.MustCompile("^projects/[^/]+/instances/[^/]+/databases/[^/]+$")
clientUserAgent = fmt.Sprintf("gl-go/%s gccl/%s grpc/%s", version.Go(), version.Repo, grpc.Version)
)
func validDatabaseName(db string) error {
if matched := validDBPattern.MatchString(db); !matched {
return fmt.Errorf("database name %q should conform to pattern %q",
db, validDBPattern.String())
}
return nil
}
// Client is a client for reading and writing data to a Cloud Spanner database. A
// client is safe to use concurrently, except for its Close method.
type Client struct {
// rr must be accessed through atomic operations.
rr uint32
conns []*grpc.ClientConn
clients []sppb.SpannerClient
database string
// Metadata to be sent with each request.
md metadata.MD
idleSessions *sessionPool
}
// ClientConfig has configurations for the client.
type ClientConfig struct {
// NumChannels is the number of GRPC channels.
// If zero, numChannels is used.
NumChannels int
co []option.ClientOption
// SessionPoolConfig is the configuration for session pool.
SessionPoolConfig
}
// errDial returns error for dialing to Cloud Spanner.
func errDial(ci int, err error) error {
e := toSpannerError(err).(*Error)
e.decorate(fmt.Sprintf("dialing fails for channel[%v]", ci))
return e
}
func contextWithOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
existing, ok := metadata.FromOutgoingContext(ctx)
if ok {
md = metadata.Join(existing, md)
}
return metadata.NewOutgoingContext(ctx, md)
}
// NewClient creates a client to a database. A valid database name has the
// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses a default
// configuration.
func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) {
return NewClientWithConfig(ctx, database, ClientConfig{}, opts...)
}
// NewClientWithConfig creates a client to a database. A valid database name has the
// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
// Validate database path.
if err := validDatabaseName(database); err != nil {
return nil, err
}
c := &Client{
database: database,
md: metadata.Pairs(
resourcePrefixHeader, database,
apiClientHeader, clientUserAgent,
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
}
allOpts := []option.ClientOption{option.WithEndpoint(prodAddr), option.WithScopes(Scope), option.WithUserAgent(clientUserAgent)}
allOpts = append(allOpts, opts...)
// Prepare gRPC channels.
if config.NumChannels == 0 {
config.NumChannels = numChannels
}
// Default MaxOpened sessions
if config.MaxOpened == 0 {
config.MaxOpened = uint64(config.NumChannels * 100)
}
if config.MaxBurst == 0 {
config.MaxBurst = 10
}
for i := 0; i < config.NumChannels; i++ {
conn, err := transport.DialGRPC(ctx, allOpts...)
if err != nil {
return nil, errDial(i, err)
}
c.conns = append(c.conns, conn)
c.clients = append(c.clients, sppb.NewSpannerClient(conn))
}
// Prepare session pool.
config.SessionPoolConfig.getRPCClient = func() (sppb.SpannerClient, error) {
// TODO: support more loadbalancing options.
return c.rrNext(), nil
}
sp, err := newSessionPool(database, config.SessionPoolConfig, c.md)
if err != nil {
c.Close()
return nil, err
}
c.idleSessions = sp
return c, nil
}
// rrNext returns the next available Cloud Spanner RPC client in a round-robin manner.
func (c *Client) rrNext() sppb.SpannerClient {
return c.clients[atomic.AddUint32(&c.rr, 1)%uint32(len(c.clients))]
}
// Close closes the client.
func (c *Client) Close() {
if c.idleSessions != nil {
c.idleSessions.close()
}
for _, conn := range c.conns {
conn.Close()
}
}
// Single provides a read-only snapshot transaction optimized for the case
// where only a single read or query is needed. This is more efficient than
// using ReadOnlyTransaction() for a single read or query.
//
// Single will use a strong TimestampBound by default. Use
// ReadOnlyTransaction.WithTimestampBound to specify a different
// TimestampBound. A non-strong bound can be used to reduce latency, or
// "time-travel" to prior versions of the database, see the documentation of
// TimestampBound for details.
func (c *Client) Single() *ReadOnlyTransaction {
t := &ReadOnlyTransaction{singleUse: true, sp: c.idleSessions}
t.txReadOnly.txReadEnv = t
return t
}
// ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for
// multiple reads from the database. You must call Close() when the
// ReadOnlyTransaction is no longer needed to release resources on the server.
//
// ReadOnlyTransaction will use a strong TimestampBound by default. Use
// ReadOnlyTransaction.WithTimestampBound to specify a different
// TimestampBound. A non-strong bound can be used to reduce latency, or
// "time-travel" to prior versions of the database, see the documentation of
// TimestampBound for details.
func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction {
t := &ReadOnlyTransaction{
singleUse: false,
sp: c.idleSessions,
txReadyOrClosed: make(chan struct{}),
}
t.txReadOnly.txReadEnv = t
return t
}
// ReadWriteTransaction executes a read-write transaction, with retries as
// necessary.
//
// The function f will be called one or more times. It must not maintain
// any state between calls.
//
// If the transaction cannot be committed or if f returns an IsAborted error,
// ReadWriteTransaction will call f again. It will continue to call f until the
// transaction can be committed or the Context times out or is cancelled. If f
// returns an error other than IsAborted, ReadWriteTransaction will abort the
// transaction and return the error.
//
// To limit the number of retries, set a deadline on the Context rather than
// using a fixed limit on the number of attempts. ReadWriteTransaction will
// retry as needed until that deadline is met.
func (c *Client) ReadWriteTransaction(ctx context.Context, f func(t *ReadWriteTransaction) error) (time.Time, error) {
var (
ts time.Time
sh *sessionHandle
)
err := runRetryable(ctx, func(ctx context.Context) error {
var (
err error
t *ReadWriteTransaction
)
if sh == nil || sh.getID() == "" || sh.getClient() == nil {
// Session handle hasn't been allocated or has been destroyed.
sh, err = c.idleSessions.takeWriteSession(ctx)
if err != nil {
// If session retrieval fails, just fail the transaction.
return err
}
t = &ReadWriteTransaction{
sh: sh,
tx: sh.getTransactionID(),
}
} else {
t = &ReadWriteTransaction{
sh: sh,
}
}
t.txReadOnly.txReadEnv = t
if err = t.begin(ctx); err != nil {
// Mask error from begin operation as retryable error.
return errRetry(err)
}
ts, err = t.runInTransaction(ctx, f)
if err != nil {
return err
}
return nil
})
if sh != nil {
sh.recycle()
}
return ts, err
}
// applyOption controls the behavior of Client.Apply.
type applyOption struct {
// If atLeastOnce == true, Client.Apply will execute the mutations on Cloud Spanner at least once.
atLeastOnce bool
}
// An ApplyOption is an optional argument to Apply.
type ApplyOption func(*applyOption)
// ApplyAtLeastOnce returns an ApplyOption that removes replay protection.
//
// With this option, Apply may attempt to apply mutations more than once; if
// the mutations are not idempotent, this may lead to a failure being reported
// when the mutation was applied more than once. For example, an insert may
// fail with ALREADY_EXISTS even though the row did not exist before Apply was
// called. For this reason, most users of the library will prefer not to use
// this option. However, ApplyAtLeastOnce requires only a single RPC, whereas
// Apply's default replay protection may require an additional RPC. So this
// option may be appropriate for latency sensitive and/or high throughput blind
// writing.
func ApplyAtLeastOnce() ApplyOption {
return func(ao *applyOption) {
ao.atLeastOnce = true
}
}
// Apply applies a list of mutations atomically to the database.
func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (time.Time, error) {
ao := &applyOption{}
for _, opt := range opts {
opt(ao)
}
if !ao.atLeastOnce {
return c.ReadWriteTransaction(ctx, func(t *ReadWriteTransaction) error {
t.BufferWrite(ms)
return nil
})
}
t := &writeOnlyTransaction{c.idleSessions}
return t.applyAtLeastOnce(ctx, ms...)
}
spanner: remove duplicate header
We were setting the x-goog-api-client header twice.
Change-Id: Ia4576008dc9b33a5686c27c99ae9dbe0fcc139a4
Reviewed-on: https://code-review.googlesource.com/13150
Reviewed-by: Vikas Kedia <f6a34f50adc64ff8913356438469cce93f09537a@google.com>
Reviewed-by: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com>
/*
Copyright 2017 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spanner
import (
"fmt"
"regexp"
"sync/atomic"
"time"
"cloud.google.com/go/internal/version"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
sppb "google.golang.org/genproto/googleapis/spanner/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
prodAddr = "spanner.googleapis.com:443"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
resourcePrefixHeader = "google-cloud-resource-prefix"
// apiClientHeader is the name of the metadata header used to indicate client
// information.
apiClientHeader = "x-goog-api-client"
// numChannels is the default value for NumChannels of client
numChannels = 4
)
const (
// Scope is the scope for Cloud Spanner Data API.
Scope = "https://www.googleapis.com/auth/spanner.data"
// AdminScope is the scope for Cloud Spanner Admin APIs.
AdminScope = "https://www.googleapis.com/auth/spanner.admin"
)
var (
validDBPattern = regexp.MustCompile("^projects/[^/]+/instances/[^/]+/databases/[^/]+$")
clientUserAgent = fmt.Sprintf("gl-go/%s gccl/%s grpc/%s", version.Go(), version.Repo, grpc.Version)
)
func validDatabaseName(db string) error {
if matched := validDBPattern.MatchString(db); !matched {
return fmt.Errorf("database name %q should conform to pattern %q",
db, validDBPattern.String())
}
return nil
}
// Client is a client for reading and writing data to a Cloud Spanner database. A
// client is safe to use concurrently, except for its Close method.
type Client struct {
// rr must be accessed through atomic operations.
rr uint32
conns []*grpc.ClientConn
clients []sppb.SpannerClient
database string
// Metadata to be sent with each request.
md metadata.MD
idleSessions *sessionPool
}
// ClientConfig has configurations for the client.
type ClientConfig struct {
// NumChannels is the number of GRPC channels.
// If zero, numChannels is used.
NumChannels int
co []option.ClientOption
// SessionPoolConfig is the configuration for session pool.
SessionPoolConfig
}
// errDial returns error for dialing to Cloud Spanner.
func errDial(ci int, err error) error {
e := toSpannerError(err).(*Error)
e.decorate(fmt.Sprintf("dialing fails for channel[%v]", ci))
return e
}
func contextWithOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
existing, ok := metadata.FromOutgoingContext(ctx)
if ok {
md = metadata.Join(existing, md)
}
return metadata.NewOutgoingContext(ctx, md)
}
// NewClient creates a client to a database. A valid database name has the
// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses a default
// configuration.
func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) {
return NewClientWithConfig(ctx, database, ClientConfig{}, opts...)
}
// NewClientWithConfig creates a client to a database. A valid database name has the
// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
// Validate database path.
if err := validDatabaseName(database); err != nil {
return nil, err
}
c := &Client{
database: database,
md: metadata.Pairs(
resourcePrefixHeader, database,
apiClientHeader, clientUserAgent),
}
allOpts := []option.ClientOption{option.WithEndpoint(prodAddr), option.WithScopes(Scope), option.WithUserAgent(clientUserAgent)}
allOpts = append(allOpts, opts...)
// Prepare gRPC channels.
if config.NumChannels == 0 {
config.NumChannels = numChannels
}
// Default MaxOpened sessions
if config.MaxOpened == 0 {
config.MaxOpened = uint64(config.NumChannels * 100)
}
if config.MaxBurst == 0 {
config.MaxBurst = 10
}
for i := 0; i < config.NumChannels; i++ {
conn, err := transport.DialGRPC(ctx, allOpts...)
if err != nil {
return nil, errDial(i, err)
}
c.conns = append(c.conns, conn)
c.clients = append(c.clients, sppb.NewSpannerClient(conn))
}
// Prepare session pool.
config.SessionPoolConfig.getRPCClient = func() (sppb.SpannerClient, error) {
// TODO: support more loadbalancing options.
return c.rrNext(), nil
}
sp, err := newSessionPool(database, config.SessionPoolConfig, c.md)
if err != nil {
c.Close()
return nil, err
}
c.idleSessions = sp
return c, nil
}
// rrNext returns the next available Cloud Spanner RPC client in a round-robin manner.
func (c *Client) rrNext() sppb.SpannerClient {
return c.clients[atomic.AddUint32(&c.rr, 1)%uint32(len(c.clients))]
}
// Close closes the client.
func (c *Client) Close() {
if c.idleSessions != nil {
c.idleSessions.close()
}
for _, conn := range c.conns {
conn.Close()
}
}
// Single provides a read-only snapshot transaction optimized for the case
// where only a single read or query is needed. This is more efficient than
// using ReadOnlyTransaction() for a single read or query.
//
// Single will use a strong TimestampBound by default. Use
// ReadOnlyTransaction.WithTimestampBound to specify a different
// TimestampBound. A non-strong bound can be used to reduce latency, or
// "time-travel" to prior versions of the database, see the documentation of
// TimestampBound for details.
func (c *Client) Single() *ReadOnlyTransaction {
t := &ReadOnlyTransaction{singleUse: true, sp: c.idleSessions}
t.txReadOnly.txReadEnv = t
return t
}
// ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for
// multiple reads from the database. You must call Close() when the
// ReadOnlyTransaction is no longer needed to release resources on the server.
//
// ReadOnlyTransaction will use a strong TimestampBound by default. Use
// ReadOnlyTransaction.WithTimestampBound to specify a different
// TimestampBound. A non-strong bound can be used to reduce latency, or
// "time-travel" to prior versions of the database, see the documentation of
// TimestampBound for details.
func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction {
t := &ReadOnlyTransaction{
singleUse: false,
sp: c.idleSessions,
txReadyOrClosed: make(chan struct{}),
}
t.txReadOnly.txReadEnv = t
return t
}
// ReadWriteTransaction executes a read-write transaction, with retries as
// necessary.
//
// The function f will be called one or more times. It must not maintain
// any state between calls.
//
// If the transaction cannot be committed or if f returns an IsAborted error,
// ReadWriteTransaction will call f again. It will continue to call f until the
// transaction can be committed or the Context times out or is cancelled. If f
// returns an error other than IsAborted, ReadWriteTransaction will abort the
// transaction and return the error.
//
// To limit the number of retries, set a deadline on the Context rather than
// using a fixed limit on the number of attempts. ReadWriteTransaction will
// retry as needed until that deadline is met.
func (c *Client) ReadWriteTransaction(ctx context.Context, f func(t *ReadWriteTransaction) error) (time.Time, error) {
var (
ts time.Time
sh *sessionHandle
)
err := runRetryable(ctx, func(ctx context.Context) error {
var (
err error
t *ReadWriteTransaction
)
if sh == nil || sh.getID() == "" || sh.getClient() == nil {
// Session handle hasn't been allocated or has been destroyed.
sh, err = c.idleSessions.takeWriteSession(ctx)
if err != nil {
// If session retrieval fails, just fail the transaction.
return err
}
t = &ReadWriteTransaction{
sh: sh,
tx: sh.getTransactionID(),
}
} else {
t = &ReadWriteTransaction{
sh: sh,
}
}
t.txReadOnly.txReadEnv = t
if err = t.begin(ctx); err != nil {
// Mask error from begin operation as retryable error.
return errRetry(err)
}
ts, err = t.runInTransaction(ctx, f)
if err != nil {
return err
}
return nil
})
if sh != nil {
sh.recycle()
}
return ts, err
}
// applyOption controls the behavior of Client.Apply.
type applyOption struct {
// If atLeastOnce == true, Client.Apply will execute the mutations on Cloud Spanner at least once.
atLeastOnce bool
}
// An ApplyOption is an optional argument to Apply.
type ApplyOption func(*applyOption)
// ApplyAtLeastOnce returns an ApplyOption that removes replay protection.
//
// With this option, Apply may attempt to apply mutations more than once; if
// the mutations are not idempotent, this may lead to a failure being reported
// when the mutation was applied more than once. For example, an insert may
// fail with ALREADY_EXISTS even though the row did not exist before Apply was
// called. For this reason, most users of the library will prefer not to use
// this option. However, ApplyAtLeastOnce requires only a single RPC, whereas
// Apply's default replay protection may require an additional RPC. So this
// option may be appropriate for latency sensitive and/or high throughput blind
// writing.
func ApplyAtLeastOnce() ApplyOption {
return func(ao *applyOption) {
ao.atLeastOnce = true
}
}
// Apply applies a list of mutations atomically to the database.
func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (time.Time, error) {
ao := &applyOption{}
for _, opt := range opts {
opt(ao)
}
if !ao.atLeastOnce {
return c.ReadWriteTransaction(ctx, func(t *ReadWriteTransaction) error {
t.BufferWrite(ms)
return nil
})
}
t := &writeOnlyTransaction{c.idleSessions}
return t.applyAtLeastOnce(ctx, ms...)
}
|
package filer2
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) {
if p == "/" {
return nil
}
entry, findErr := f.FindEntry(ctx, p)
if findErr != nil {
return findErr
}
var chunks []*filer_pb.FileChunk
chunks = append(chunks, entry.Chunks...)
if entry.IsDirectory() {
// delete the folder children, not including the folder itself
var dirChunks []*filer_pb.FileChunk
dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks)
if err != nil {
return fmt.Errorf("delete directory %s: %v", p, err)
}
chunks = append(chunks, dirChunks...)
f.cacheDelDirectory(string(p))
}
// delete the file or folder
err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks)
if err != nil {
return fmt.Errorf("delete file %s: %v", p, err)
}
if shouldDeleteChunks {
go f.DeleteChunks(chunks)
}
return nil
}
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (chunks []*filer_pb.FileChunk, err error) {
lastFileName := ""
includeLastFile := false
for {
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize)
if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
}
if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop
return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
}
for _, sub := range entries {
lastFileName = sub.Name()
var dirChunks []*filer_pb.FileChunk
if sub.IsDirectory() {
dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks)
}
if err != nil && !ignoreRecursiveError {
return nil, err
}
if shouldDeleteChunks {
chunks = append(chunks, dirChunks...)
}
}
if len(entries) < PaginationSize {
break
}
}
f.cacheDelDirectory(string(entry.FullPath))
glog.V(3).Infof("deleting directory %v", entry.FullPath)
if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
return chunks, nil
}
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) {
glog.V(3).Infof("deleting entry %v", entry.FullPath)
if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
return nil
}
filer: recursively batch delete file chunks
fix https://github.com/chrislusf/seaweedfs/issues/1197
package filer2
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) {
if p == "/" {
return nil
}
entry, findErr := f.FindEntry(ctx, p)
if findErr != nil {
return findErr
}
var chunks []*filer_pb.FileChunk
chunks = append(chunks, entry.Chunks...)
if entry.IsDirectory() {
// delete the folder children, not including the folder itself
var dirChunks []*filer_pb.FileChunk
dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks)
if err != nil {
return fmt.Errorf("delete directory %s: %v", p, err)
}
chunks = append(chunks, dirChunks...)
f.cacheDelDirectory(string(p))
}
// delete the file or folder
err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks)
if err != nil {
return fmt.Errorf("delete file %s: %v", p, err)
}
if shouldDeleteChunks {
go f.DeleteChunks(chunks)
}
return nil
}
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (chunks []*filer_pb.FileChunk, err error) {
lastFileName := ""
includeLastFile := false
for {
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize)
if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
}
if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop
return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
}
for _, sub := range entries {
lastFileName = sub.Name()
var dirChunks []*filer_pb.FileChunk
if sub.IsDirectory() {
dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks)
chunks = append(chunks, dirChunks...)
} else {
chunks = append(chunks, sub.Chunks...)
}
if err != nil && !ignoreRecursiveError {
return nil, err
}
}
if len(entries) < PaginationSize {
break
}
}
f.cacheDelDirectory(string(entry.FullPath))
glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
return chunks, nil
}
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) {
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
return nil
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package flag implements command-line flag parsing.
Usage
Define flags using flag.String(), Bool(), Int(), etc.
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
import "flag"
var ip = flag.Int("flagname", 1234, "help message for flagname")
If you like, you can bind the flag to a variable using the Var() functions.
var flagvar int
func init() {
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
}
Or you can create custom flags that satisfy the Value interface (with
pointer receivers) and couple them to flag parsing by
flag.Var(&flagVal, "name", "help message for flagname")
For such flags, the default value is just the initial value of the variable.
After all flags are defined, call
flag.Parse()
to parse the command line into the defined flags.
Flags may then be used directly. If you're using the flags themselves,
they are all pointers; if you bind to variables, they're values.
fmt.Println("ip has value ", *ip)
fmt.Println("flagvar has value ", flagvar)
After parsing, the arguments following the flags are available as the
slice flag.Args() or individually as flag.Arg(i).
The arguments are indexed from 0 through flag.NArg()-1.
Command line flag syntax
The following forms are permitted:
-flag
-flag=x
-flag x // non-boolean flags only
One or two minus signs may be used; they are equivalent.
The last form is not permitted for boolean flags because the
meaning of the command
cmd -x *
where * is a Unix shell wildcard, will change if there is a file
called 0, false, etc. You must use the -flag=false form to turn
off a boolean flag.
Flag parsing stops just before the first non-flag argument
("-" is a non-flag argument) or after the terminator "--".
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags may be:
1, 0, t, f, T, F, true, false, TRUE, FALSE, True, False
Duration flags accept any input valid for time.ParseDuration.
The default set of command-line flags is controlled by
top-level functions. The FlagSet type allows one to define
independent sets of flags, such as to implement subcommands
in a command-line interface. The methods of FlagSet are
analogous to the top-level functions for the command-line
flag set.
*/
package flag
import (
"errors"
"fmt"
"io"
"os"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
// ErrHelp is the error returned if the -help or -h flag is invoked
// but no such flag is defined.
var ErrHelp = errors.New("flag: help requested")
// errParse is returned by Set if a flag's value fails to parse, such as with an invalid integer for Int.
// It then gets wrapped through failf to provide more information.
var errParse = errors.New("parse error")
// errRange is returned by Set if a flag's value is out of range.
// It then gets wrapped through failf to provide more information.
var errRange = errors.New("value out of range")
func numError(err error) error {
ne, ok := err.(*strconv.NumError)
if !ok {
return err
}
if ne.Err == strconv.ErrSyntax {
return errParse
}
if ne.Err == strconv.ErrRange {
return errRange
}
return err
}
// -- bool Value
type boolValue bool
func newBoolValue(val bool, p *bool) *boolValue {
*p = val
return (*boolValue)(p)
}
func (b *boolValue) Set(s string) error {
v, err := strconv.ParseBool(s)
if err != nil {
err = errParse
}
*b = boolValue(v)
return err
}
func (b *boolValue) Get() interface{} { return bool(*b) }
func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
func (b *boolValue) IsBoolFlag() bool { return true }
// optional interface to indicate boolean flags that can be
// supplied without "=value" text
type boolFlag interface {
Value
IsBoolFlag() bool
}
// -- int Value
type intValue int
func newIntValue(val int, p *int) *intValue {
*p = val
return (*intValue)(p)
}
func (i *intValue) Set(s string) error {
v, err := strconv.ParseInt(s, 0, strconv.IntSize)
if err != nil {
err = numError(err)
}
*i = intValue(v)
return err
}
func (i *intValue) Get() interface{} { return int(*i) }
func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
// -- int64 Value
type int64Value int64
func newInt64Value(val int64, p *int64) *int64Value {
*p = val
return (*int64Value)(p)
}
func (i *int64Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
if err != nil {
err = numError(err)
}
*i = int64Value(v)
return err
}
func (i *int64Value) Get() interface{} { return int64(*i) }
func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
// -- uint Value
type uintValue uint
func newUintValue(val uint, p *uint) *uintValue {
*p = val
return (*uintValue)(p)
}
func (i *uintValue) Set(s string) error {
v, err := strconv.ParseUint(s, 0, strconv.IntSize)
if err != nil {
err = numError(err)
}
*i = uintValue(v)
return err
}
func (i *uintValue) Get() interface{} { return uint(*i) }
func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
// -- uint64 Value
type uint64Value uint64
func newUint64Value(val uint64, p *uint64) *uint64Value {
*p = val
return (*uint64Value)(p)
}
func (i *uint64Value) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
if err != nil {
err = numError(err)
}
*i = uint64Value(v)
return err
}
func (i *uint64Value) Get() interface{} { return uint64(*i) }
func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
// -- string Value
type stringValue string
func newStringValue(val string, p *string) *stringValue {
*p = val
return (*stringValue)(p)
}
func (s *stringValue) Set(val string) error {
*s = stringValue(val)
return nil
}
func (s *stringValue) Get() interface{} { return string(*s) }
func (s *stringValue) String() string { return string(*s) }
// -- float64 Value
type float64Value float64
func newFloat64Value(val float64, p *float64) *float64Value {
*p = val
return (*float64Value)(p)
}
func (f *float64Value) Set(s string) error {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
err = numError(err)
}
*f = float64Value(v)
return err
}
func (f *float64Value) Get() interface{} { return float64(*f) }
func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
// -- time.Duration Value
type durationValue time.Duration
func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
*p = val
return (*durationValue)(p)
}
func (d *durationValue) Set(s string) error {
v, err := time.ParseDuration(s)
if err != nil {
err = errParse
}
*d = durationValue(v)
return err
}
func (d *durationValue) Get() interface{} { return time.Duration(*d) }
func (d *durationValue) String() string { return (*time.Duration)(d).String() }
// Value is the interface to the dynamic value stored in a flag.
// (The default value is represented as a string.)
//
// If a Value has an IsBoolFlag() bool method returning true,
// the command-line parser makes -name equivalent to -name=true
// rather than using the next command-line argument.
//
// Set is called once, in command line order, for each flag present.
// The flag package may call the String method with a zero-valued receiver,
// such as a nil pointer.
type Value interface {
String() string
Set(string) error
}
// Getter is an interface that allows the contents of a Value to be retrieved.
// It wraps the Value interface, rather than being part of it, because it
// appeared after Go 1 and its compatibility rules. All Value types provided
// by this package satisfy the Getter interface.
type Getter interface {
Value
Get() interface{}
}
// ErrorHandling defines how FlagSet.Parse behaves if the parse fails.
type ErrorHandling int
// These constants cause FlagSet.Parse to behave as described if the parse fails.
const (
ContinueOnError ErrorHandling = iota // Return a descriptive error.
ExitOnError // Call os.Exit(2).
PanicOnError // Call panic with a descriptive error.
)
// A FlagSet represents a set of defined flags. The zero value of a FlagSet
// has no name and has ContinueOnError error handling.
//
// Flag names must be unique within a FlagSet. An attempt to define a flag whose
// name is already in use will cause a panic.
type FlagSet struct {
// Usage is the function called when an error occurs while parsing flags.
// The field is a function (not a method) that may be changed to point to
// a custom error handler. What happens after Usage is called depends
// on the ErrorHandling setting; for the command line, this defaults
// to ExitOnError, which exits the program after calling Usage.
Usage func()
name string
parsed bool
actual map[string]*Flag
formal map[string]*Flag
args []string // arguments after flags
errorHandling ErrorHandling
output io.Writer // nil means stderr; use Output() accessor
}
// A Flag represents the state of a flag.
type Flag struct {
Name string // name as it appears on command line
Usage string // help message
Value Value // value as set
DefValue string // default value (as text); for usage message
}
// sortFlags returns the flags as a slice in lexicographical sorted order.
func sortFlags(flags map[string]*Flag) []*Flag {
result := make([]*Flag, len(flags))
i := 0
for _, f := range flags {
result[i] = f
i++
}
sort.Slice(result, func(i, j int) bool {
return result[i].Name < result[j].Name
})
return result
}
// Output returns the destination for usage and error messages. os.Stderr is returned if
// output was not set or was set to nil.
func (f *FlagSet) Output() io.Writer {
if f.output == nil {
return os.Stderr
}
return f.output
}
// Name returns the name of the flag set.
func (f *FlagSet) Name() string {
return f.name
}
// ErrorHandling returns the error handling behavior of the flag set.
func (f *FlagSet) ErrorHandling() ErrorHandling {
return f.errorHandling
}
// SetOutput sets the destination for usage and error messages.
// If output is nil, os.Stderr is used.
func (f *FlagSet) SetOutput(output io.Writer) {
f.output = output
}
// VisitAll visits the flags in lexicographical order, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
for _, flag := range sortFlags(f.formal) {
fn(flag)
}
}
// VisitAll visits the command-line flags in lexicographical order, calling
// fn for each. It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) {
CommandLine.VisitAll(fn)
}
// Visit visits the flags in lexicographical order, calling fn for each.
// It visits only those flags that have been set.
func (f *FlagSet) Visit(fn func(*Flag)) {
for _, flag := range sortFlags(f.actual) {
fn(flag)
}
}
// Visit visits the command-line flags in lexicographical order, calling fn
// for each. It visits only those flags that have been set.
func Visit(fn func(*Flag)) {
CommandLine.Visit(fn)
}
// Lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) Lookup(name string) *Flag {
return f.formal[name]
}
// Lookup returns the Flag structure of the named command-line flag,
// returning nil if none exists.
func Lookup(name string) *Flag {
return CommandLine.formal[name]
}
// Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error {
flag, ok := f.formal[name]
if !ok {
return fmt.Errorf("no such flag -%v", name)
}
err := flag.Value.Set(value)
if err != nil {
return err
}
if f.actual == nil {
f.actual = make(map[string]*Flag)
}
f.actual[name] = flag
return nil
}
// Set sets the value of the named command-line flag.
func Set(name, value string) error {
return CommandLine.Set(name, value)
}
// isZeroValue determines whether the string represents the zero
// value for a flag.
func isZeroValue(flag *Flag, value string) bool {
// Build a zero value of the flag's Value type, and see if the
// result of calling its String method equals the value passed in.
// This works unless the Value type is itself an interface type.
typ := reflect.TypeOf(flag.Value)
var z reflect.Value
if typ.Kind() == reflect.Ptr {
z = reflect.New(typ.Elem())
} else {
z = reflect.Zero(typ)
}
return value == z.Interface().(Value).String()
}
// UnquoteUsage extracts a back-quoted name from the usage
// string for a flag and returns it and the un-quoted usage.
// Given "a `name` to show" it returns ("name", "a name to show").
// If there are no back quotes, the name is an educated guess of the
// type of the flag's value, or the empty string if the flag is boolean.
func UnquoteUsage(flag *Flag) (name string, usage string) {
// Look for a back-quoted name, but avoid the strings package.
usage = flag.Usage
for i := 0; i < len(usage); i++ {
if usage[i] == '`' {
for j := i + 1; j < len(usage); j++ {
if usage[j] == '`' {
name = usage[i+1 : j]
usage = usage[:i] + name + usage[j+1:]
return name, usage
}
}
break // Only one back quote; use type name.
}
}
// No explicit name, so use type if we can find one.
name = "value"
switch flag.Value.(type) {
case boolFlag:
name = ""
case *durationValue:
name = "duration"
case *float64Value:
name = "float"
case *intValue, *int64Value:
name = "int"
case *stringValue:
name = "string"
case *uintValue, *uint64Value:
name = "uint"
}
return
}
// PrintDefaults prints, to standard error unless configured otherwise, the
// default values of all defined command-line flags in the set. See the
// documentation for the global function PrintDefaults for more information.
func (f *FlagSet) PrintDefaults() {
f.VisitAll(func(flag *Flag) {
s := fmt.Sprintf(" -%s", flag.Name) // Two spaces before -; see next two comments.
name, usage := UnquoteUsage(flag)
if len(name) > 0 {
s += " " + name
}
// Boolean flags of one ASCII letter are so common we
// treat them specially, putting their usage on the same line.
if len(s) <= 4 { // space, space, '-', 'x'.
s += "\t"
} else {
// Four spaces before the tab triggers good alignment
// for both 4- and 8-space tab stops.
s += "\n \t"
}
s += strings.ReplaceAll(usage, "\n", "\n \t")
if !isZeroValue(flag, flag.DefValue) {
if _, ok := flag.Value.(*stringValue); ok {
// put quotes on the value
s += fmt.Sprintf(" (default %q)", flag.DefValue)
} else {
s += fmt.Sprintf(" (default %v)", flag.DefValue)
}
}
fmt.Fprint(f.Output(), s, "\n")
})
}
// PrintDefaults prints, to standard error unless configured otherwise,
// a usage message showing the default settings of all defined
// command-line flags.
// For an integer valued flag x, the default output has the form
// -x int
// usage-message-for-x (default 7)
// The usage message will appear on a separate line for anything but
// a bool flag with a one-byte name. For bool flags, the type is
// omitted and if the flag name is one byte the usage message appears
// on the same line. The parenthetical default is omitted if the
// default is the zero value for the type. The listed type, here int,
// can be changed by placing a back-quoted name in the flag's usage
// string; the first such item in the message is taken to be a parameter
// name to show in the message and the back quotes are stripped from
// the message when displayed. For instance, given
// flag.String("I", "", "search `directory` for include files")
// the output will be
// -I directory
// search directory for include files.
//
// To change the destination for flag messages, call CommandLine.SetOutput.
func PrintDefaults() {
CommandLine.PrintDefaults()
}
// defaultUsage is the default function to print a usage message.
func (f *FlagSet) defaultUsage() {
if f.name == "" {
fmt.Fprintf(f.Output(), "Usage:\n")
} else {
fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name)
}
f.PrintDefaults()
}
// NOTE: Usage is not just defaultUsage(CommandLine)
// because it serves (via godoc flag Usage) as the example
// for how to write your own usage function.
// Usage prints a usage message documenting all defined command-line flags
// to CommandLine's output, which by default is os.Stderr.
// It is called when an error occurs while parsing flags.
// The function is a variable that may be changed to point to a custom function.
// By default it prints a simple header and calls PrintDefaults; for details about the
// format of the output and how to control it, see the documentation for PrintDefaults.
// Custom usage functions may choose to exit the program; by default exiting
// happens anyway as the command line's error handling strategy is set to
// ExitOnError.
var Usage = func() {
fmt.Fprintf(CommandLine.Output(), "Usage of %s:\n", os.Args[0])
PrintDefaults()
}
// NFlag returns the number of flags that have been set.
func (f *FlagSet) NFlag() int { return len(f.actual) }
// NFlag returns the number of command-line flags that have been set.
func NFlag() int { return len(CommandLine.actual) }
// Arg returns the i'th argument. Arg(0) is the first remaining argument
// after flags have been processed. Arg returns an empty string if the
// requested element does not exist.
func (f *FlagSet) Arg(i int) string {
if i < 0 || i >= len(f.args) {
return ""
}
return f.args[i]
}
// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
// after flags have been processed. Arg returns an empty string if the
// requested element does not exist.
func Arg(i int) string {
return CommandLine.Arg(i)
}
// NArg is the number of arguments remaining after flags have been processed.
func (f *FlagSet) NArg() int { return len(f.args) }
// NArg is the number of arguments remaining after flags have been processed.
func NArg() int { return len(CommandLine.args) }
// Args returns the non-flag arguments.
func (f *FlagSet) Args() []string { return f.args }
// Args returns the non-flag command-line arguments.
func Args() []string { return CommandLine.args }
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
f.Var(newBoolValue(value, p), name, usage)
}
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func BoolVar(p *bool, name string, value bool, usage string) {
CommandLine.Var(newBoolValue(value, p), name, usage)
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
p := new(bool)
f.BoolVar(p, name, value, usage)
return p
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func Bool(name string, value bool, usage string) *bool {
return CommandLine.Bool(name, value, usage)
}
// IntVar defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
f.Var(newIntValue(value, p), name, usage)
}
// IntVar defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func IntVar(p *int, name string, value int, usage string) {
CommandLine.Var(newIntValue(value, p), name, usage)
}
// Int defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func (f *FlagSet) Int(name string, value int, usage string) *int {
p := new(int)
f.IntVar(p, name, value, usage)
return p
}
// Int defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func Int(name string, value int, usage string) *int {
return CommandLine.Int(name, value, usage)
}
// Int64Var defines an int64 flag with specified name, default value, and usage string.
// The argument p points to an int64 variable in which to store the value of the flag.
func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
f.Var(newInt64Value(value, p), name, usage)
}
// Int64Var defines an int64 flag with specified name, default value, and usage string.
// The argument p points to an int64 variable in which to store the value of the flag.
func Int64Var(p *int64, name string, value int64, usage string) {
CommandLine.Var(newInt64Value(value, p), name, usage)
}
// Int64 defines an int64 flag with specified name, default value, and usage string.
// The return value is the address of an int64 variable that stores the value of the flag.
func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
p := new(int64)
f.Int64Var(p, name, value, usage)
return p
}
// Int64 defines an int64 flag with specified name, default value, and usage string.
// The return value is the address of an int64 variable that stores the value of the flag.
func Int64(name string, value int64, usage string) *int64 {
return CommandLine.Int64(name, value, usage)
}
// UintVar defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
f.Var(newUintValue(value, p), name, usage)
}
// UintVar defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func UintVar(p *uint, name string, value uint, usage string) {
CommandLine.Var(newUintValue(value, p), name, usage)
}
// Uint defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
p := new(uint)
f.UintVar(p, name, value, usage)
return p
}
// Uint defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func Uint(name string, value uint, usage string) *uint {
return CommandLine.Uint(name, value, usage)
}
// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
// The argument p points to a uint64 variable in which to store the value of the flag.
func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
f.Var(newUint64Value(value, p), name, usage)
}
// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
// The argument p points to a uint64 variable in which to store the value of the flag.
func Uint64Var(p *uint64, name string, value uint64, usage string) {
CommandLine.Var(newUint64Value(value, p), name, usage)
}
// Uint64 defines a uint64 flag with specified name, default value, and usage string.
// The return value is the address of a uint64 variable that stores the value of the flag.
func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
p := new(uint64)
f.Uint64Var(p, name, value, usage)
return p
}
// Uint64 defines a uint64 flag with specified name, default value, and usage string.
// The return value is the address of a uint64 variable that stores the value of the flag.
func Uint64(name string, value uint64, usage string) *uint64 {
return CommandLine.Uint64(name, value, usage)
}
// StringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
f.Var(newStringValue(value, p), name, usage)
}
// StringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func StringVar(p *string, name string, value string, usage string) {
CommandLine.Var(newStringValue(value, p), name, usage)
}
// String defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func (f *FlagSet) String(name string, value string, usage string) *string {
p := new(string)
f.StringVar(p, name, value, usage)
return p
}
// String defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func String(name string, value string, usage string) *string {
return CommandLine.String(name, value, usage)
}
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
f.Var(newFloat64Value(value, p), name, usage)
}
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func Float64Var(p *float64, name string, value float64, usage string) {
CommandLine.Var(newFloat64Value(value, p), name, usage)
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
p := new(float64)
f.Float64Var(p, name, value, usage)
return p
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func Float64(name string, value float64, usage string) *float64 {
return CommandLine.Float64(name, value, usage)
}
// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
// The argument p points to a time.Duration variable in which to store the value of the flag.
// The flag accepts a value acceptable to time.ParseDuration.
func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
f.Var(newDurationValue(value, p), name, usage)
}
// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
// The argument p points to a time.Duration variable in which to store the value of the flag.
// The flag accepts a value acceptable to time.ParseDuration.
func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
CommandLine.Var(newDurationValue(value, p), name, usage)
}
// Duration defines a time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a time.Duration variable that stores the value of the flag.
// The flag accepts a value acceptable to time.ParseDuration.
func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
p := new(time.Duration)
f.DurationVar(p, name, value, usage)
return p
}
// Duration defines a time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a time.Duration variable that stores the value of the flag.
// The flag accepts a value acceptable to time.ParseDuration.
func Duration(name string, value time.Duration, usage string) *time.Duration {
return CommandLine.Duration(name, value, usage)
}
// Var defines a flag with the specified name and usage string. The type and
// value of the flag are represented by the first argument, of type Value, which
// typically holds a user-defined implementation of Value. For instance, the
// caller could create a flag that turns a comma-separated string into a slice
// of strings by giving the slice the methods of Value; in particular, Set would
// decompose the comma-separated string into the slice.
func (f *FlagSet) Var(value Value, name string, usage string) {
// Remember the default value as a string; it won't change.
flag := &Flag{name, usage, value, value.String()}
_, alreadythere := f.formal[name]
if alreadythere {
var msg string
if f.name == "" {
msg = fmt.Sprintf("flag redefined: %s", name)
} else {
msg = fmt.Sprintf("%s flag redefined: %s", f.name, name)
}
fmt.Fprintln(f.Output(), msg)
panic(msg) // Happens only if flags are declared with identical names
}
if f.formal == nil {
f.formal = make(map[string]*Flag)
}
f.formal[name] = flag
}
// Var defines a flag with the specified name and usage string. The type and
// value of the flag are represented by the first argument, of type Value, which
// typically holds a user-defined implementation of Value. For instance, the
// caller could create a flag that turns a comma-separated string into a slice
// of strings by giving the slice the methods of Value; in particular, Set would
// decompose the comma-separated string into the slice.
func Var(value Value, name string, usage string) {
CommandLine.Var(value, name, usage)
}
// failf prints to standard error a formatted error and usage message and
// returns the error.
func (f *FlagSet) failf(format string, a ...interface{}) error {
err := fmt.Errorf(format, a...)
fmt.Fprintln(f.Output(), err)
f.usage()
return err
}
// usage calls the Usage method for the flag set if one is specified,
// or the appropriate default usage function otherwise.
func (f *FlagSet) usage() {
if f.Usage == nil {
f.defaultUsage()
} else {
f.Usage()
}
}
// parseOne parses one flag. It reports whether a flag was seen.
func (f *FlagSet) parseOne() (bool, error) {
if len(f.args) == 0 {
return false, nil
}
s := f.args[0]
if len(s) < 2 || s[0] != '-' {
return false, nil
}
numMinuses := 1
if s[1] == '-' {
numMinuses++
if len(s) == 2 { // "--" terminates the flags
f.args = f.args[1:]
return false, nil
}
}
name := s[numMinuses:]
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
return false, f.failf("bad flag syntax: %s", s)
}
// it's a flag. does it have an argument?
f.args = f.args[1:]
hasValue := false
value := ""
for i := 1; i < len(name); i++ { // equals cannot be first
if name[i] == '=' {
value = name[i+1:]
hasValue = true
name = name[0:i]
break
}
}
m := f.formal
flag, alreadythere := m[name] // BUG
if !alreadythere {
if name == "help" || name == "h" { // special case for nice help message.
f.usage()
return false, ErrHelp
}
return false, f.failf("flag provided but not defined: -%s", name)
}
if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
if hasValue {
if err := fv.Set(value); err != nil {
return false, f.failf("invalid boolean value %q for -%s: %v", value, name, err)
}
} else {
if err := fv.Set("true"); err != nil {
return false, f.failf("invalid boolean flag %s: %v", name, err)
}
}
} else {
// It must have a value, which might be the next argument.
if !hasValue && len(f.args) > 0 {
// value is the next arg
hasValue = true
value, f.args = f.args[0], f.args[1:]
}
if !hasValue {
return false, f.failf("flag needs an argument: -%s", name)
}
if err := flag.Value.Set(value); err != nil {
return false, f.failf("invalid value %q for flag -%s: %v", value, name, err)
}
}
if f.actual == nil {
f.actual = make(map[string]*Flag)
}
f.actual[name] = flag
return true, nil
}
// Parse parses flag definitions from the argument list, which should not
// include the command name. Must be called after all flags in the FlagSet
// are defined and before flags are accessed by the program.
// The return value will be ErrHelp if -help or -h were set but not defined.
func (f *FlagSet) Parse(arguments []string) error {
f.parsed = true
f.args = arguments
for {
seen, err := f.parseOne()
if seen {
continue
}
if err == nil {
break
}
switch f.errorHandling {
case ContinueOnError:
return err
case ExitOnError:
os.Exit(2)
case PanicOnError:
panic(err)
}
}
return nil
}
// Parsed reports whether f.Parse has been called.
func (f *FlagSet) Parsed() bool {
return f.parsed
}
// Parse parses the command-line flags from os.Args[1:]. Must be called
// after all flags are defined and before flags are accessed by the program.
func Parse() {
// Ignore errors; CommandLine is set for ExitOnError.
CommandLine.Parse(os.Args[1:])
}
// Parsed reports whether the command-line flags have been parsed.
func Parsed() bool {
return CommandLine.Parsed()
}
// CommandLine is the default set of command-line flags, parsed from os.Args.
// The top-level functions such as BoolVar, Arg, and so on are wrappers for the
// methods of CommandLine.
var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
func init() {
// Override generic FlagSet default Usage with call to global Usage.
// Note: This is not CommandLine.Usage = Usage,
// because we want any eventual call to use any updated value of Usage,
// not the value it has when this line is run.
CommandLine.Usage = commandLineUsage
}
func commandLineUsage() {
Usage()
}
// NewFlagSet returns a new, empty flag set with the specified name and
// error handling property. If the name is not empty, it will be printed
// in the default usage message and in error messages.
func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
f := &FlagSet{
name: name,
errorHandling: errorHandling,
}
f.Usage = f.defaultUsage
return f
}
// Init sets the name and error handling property for a flag set.
// By default, the zero FlagSet uses an empty name and the
// ContinueOnError error handling policy.
func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
f.name = name
f.errorHandling = errorHandling
}
flag: changed flag variable name in package doc, for clarity
Changed the flag variable name to nFlag instead of flagname,
because flagname was confusing.
Change-Id: I20dd4c4b4f605395d427a125ba4fd14580e5d766
Reviewed-on: https://go-review.googlesource.com/c/go/+/221678
Reviewed-by: Rob Pike <4dc7c9ec434ed06502767136789763ec11d2c4b7@golang.org>
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package flag implements command-line flag parsing.
Usage
Define flags using flag.String(), Bool(), Int(), etc.
This declares an integer flag, -n, stored in the pointer nFlag, with type *int:
import "flag"
var nFlag = flag.Int("n", 1234, "help message for flag n")
If you like, you can bind the flag to a variable using the Var() functions.
var flagvar int
func init() {
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
}
Or you can create custom flags that satisfy the Value interface (with
pointer receivers) and couple them to flag parsing by
flag.Var(&flagVal, "name", "help message for flagname")
For such flags, the default value is just the initial value of the variable.
After all flags are defined, call
flag.Parse()
to parse the command line into the defined flags.
Flags may then be used directly. If you're using the flags themselves,
they are all pointers; if you bind to variables, they're values.
fmt.Println("ip has value ", *ip)
fmt.Println("flagvar has value ", flagvar)
After parsing, the arguments following the flags are available as the
slice flag.Args() or individually as flag.Arg(i).
The arguments are indexed from 0 through flag.NArg()-1.
Command line flag syntax
The following forms are permitted:
-flag
-flag=x
-flag x // non-boolean flags only
One or two minus signs may be used; they are equivalent.
The last form is not permitted for boolean flags because the
meaning of the command
cmd -x *
where * is a Unix shell wildcard, will change if there is a file
called 0, false, etc. You must use the -flag=false form to turn
off a boolean flag.
Flag parsing stops just before the first non-flag argument
("-" is a non-flag argument) or after the terminator "--".
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags may be:
1, 0, t, f, T, F, true, false, TRUE, FALSE, True, False
Duration flags accept any input valid for time.ParseDuration.
The default set of command-line flags is controlled by
top-level functions. The FlagSet type allows one to define
independent sets of flags, such as to implement subcommands
in a command-line interface. The methods of FlagSet are
analogous to the top-level functions for the command-line
flag set.
*/
package flag
import (
"errors"
"fmt"
"io"
"os"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
// ErrHelp is the error returned if the -help or -h flag is invoked
// but no such flag is defined.
var ErrHelp = errors.New("flag: help requested")
// errParse is returned by Set if a flag's value fails to parse, such as with an invalid integer for Int.
// It then gets wrapped through failf to provide more information.
var errParse = errors.New("parse error")
// errRange is returned by Set if a flag's value is out of range.
// It then gets wrapped through failf to provide more information.
var errRange = errors.New("value out of range")
func numError(err error) error {
ne, ok := err.(*strconv.NumError)
if !ok {
return err
}
if ne.Err == strconv.ErrSyntax {
return errParse
}
if ne.Err == strconv.ErrRange {
return errRange
}
return err
}
// -- bool Value
type boolValue bool
func newBoolValue(val bool, p *bool) *boolValue {
*p = val
return (*boolValue)(p)
}
func (b *boolValue) Set(s string) error {
v, err := strconv.ParseBool(s)
if err != nil {
err = errParse
}
*b = boolValue(v)
return err
}
func (b *boolValue) Get() interface{} { return bool(*b) }
func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
func (b *boolValue) IsBoolFlag() bool { return true }
// optional interface to indicate boolean flags that can be
// supplied without "=value" text
type boolFlag interface {
Value
IsBoolFlag() bool
}
// -- int Value
type intValue int
func newIntValue(val int, p *int) *intValue {
*p = val
return (*intValue)(p)
}
func (i *intValue) Set(s string) error {
v, err := strconv.ParseInt(s, 0, strconv.IntSize)
if err != nil {
err = numError(err)
}
*i = intValue(v)
return err
}
func (i *intValue) Get() interface{} { return int(*i) }
func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
// -- int64 Value
type int64Value int64
func newInt64Value(val int64, p *int64) *int64Value {
*p = val
return (*int64Value)(p)
}
func (i *int64Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
if err != nil {
err = numError(err)
}
*i = int64Value(v)
return err
}
func (i *int64Value) Get() interface{} { return int64(*i) }
func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
// -- uint Value
type uintValue uint
func newUintValue(val uint, p *uint) *uintValue {
*p = val
return (*uintValue)(p)
}
func (i *uintValue) Set(s string) error {
v, err := strconv.ParseUint(s, 0, strconv.IntSize)
if err != nil {
err = numError(err)
}
*i = uintValue(v)
return err
}
func (i *uintValue) Get() interface{} { return uint(*i) }
func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
// -- uint64 Value
type uint64Value uint64
func newUint64Value(val uint64, p *uint64) *uint64Value {
*p = val
return (*uint64Value)(p)
}
func (i *uint64Value) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
if err != nil {
err = numError(err)
}
*i = uint64Value(v)
return err
}
func (i *uint64Value) Get() interface{} { return uint64(*i) }
func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
// -- string Value
type stringValue string
func newStringValue(val string, p *string) *stringValue {
*p = val
return (*stringValue)(p)
}
func (s *stringValue) Set(val string) error {
*s = stringValue(val)
return nil
}
func (s *stringValue) Get() interface{} { return string(*s) }
func (s *stringValue) String() string { return string(*s) }
// -- float64 Value
type float64Value float64
func newFloat64Value(val float64, p *float64) *float64Value {
*p = val
return (*float64Value)(p)
}
func (f *float64Value) Set(s string) error {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
err = numError(err)
}
*f = float64Value(v)
return err
}
func (f *float64Value) Get() interface{} { return float64(*f) }
func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
// -- time.Duration Value
type durationValue time.Duration
func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
*p = val
return (*durationValue)(p)
}
func (d *durationValue) Set(s string) error {
v, err := time.ParseDuration(s)
if err != nil {
err = errParse
}
*d = durationValue(v)
return err
}
func (d *durationValue) Get() interface{} { return time.Duration(*d) }
func (d *durationValue) String() string { return (*time.Duration)(d).String() }
// Value is the interface to the dynamic value stored in a flag.
// (The default value is represented as a string.)
//
// If a Value has an IsBoolFlag() bool method returning true,
// the command-line parser makes -name equivalent to -name=true
// rather than using the next command-line argument.
//
// Set is called once, in command line order, for each flag present.
// The flag package may call the String method with a zero-valued receiver,
// such as a nil pointer.
type Value interface {
String() string
Set(string) error
}
// Getter is an interface that allows the contents of a Value to be retrieved.
// It wraps the Value interface, rather than being part of it, because it
// appeared after Go 1 and its compatibility rules. All Value types provided
// by this package satisfy the Getter interface.
type Getter interface {
Value
Get() interface{}
}
// ErrorHandling defines how FlagSet.Parse behaves if the parse fails.
type ErrorHandling int
// These constants cause FlagSet.Parse to behave as described if the parse fails.
const (
ContinueOnError ErrorHandling = iota // Return a descriptive error.
ExitOnError // Call os.Exit(2).
PanicOnError // Call panic with a descriptive error.
)
// A FlagSet represents a set of defined flags. The zero value of a FlagSet
// has no name and has ContinueOnError error handling.
//
// Flag names must be unique within a FlagSet. An attempt to define a flag whose
// name is already in use will cause a panic.
type FlagSet struct {
// Usage is the function called when an error occurs while parsing flags.
// The field is a function (not a method) that may be changed to point to
// a custom error handler. What happens after Usage is called depends
// on the ErrorHandling setting; for the command line, this defaults
// to ExitOnError, which exits the program after calling Usage.
Usage func()
name string
parsed bool
actual map[string]*Flag
formal map[string]*Flag
args []string // arguments after flags
errorHandling ErrorHandling
output io.Writer // nil means stderr; use Output() accessor
}
// A Flag represents the state of a flag.
type Flag struct {
Name string // name as it appears on command line
Usage string // help message
Value Value // value as set
DefValue string // default value (as text); for usage message
}
// sortFlags returns the flags as a slice in lexicographical sorted order.
func sortFlags(flags map[string]*Flag) []*Flag {
result := make([]*Flag, len(flags))
i := 0
for _, f := range flags {
result[i] = f
i++
}
sort.Slice(result, func(i, j int) bool {
return result[i].Name < result[j].Name
})
return result
}
// Output returns the destination for usage and error messages. os.Stderr is returned if
// output was not set or was set to nil.
func (f *FlagSet) Output() io.Writer {
if f.output == nil {
return os.Stderr
}
return f.output
}
// Name returns the name of the flag set.
func (f *FlagSet) Name() string {
return f.name
}
// ErrorHandling returns the error handling behavior of the flag set.
func (f *FlagSet) ErrorHandling() ErrorHandling {
return f.errorHandling
}
// SetOutput sets the destination for usage and error messages.
// If output is nil, os.Stderr is used.
func (f *FlagSet) SetOutput(output io.Writer) {
f.output = output
}
// VisitAll visits the flags in lexicographical order, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
for _, flag := range sortFlags(f.formal) {
fn(flag)
}
}
// VisitAll visits the command-line flags in lexicographical order, calling
// fn for each. It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) {
CommandLine.VisitAll(fn)
}
// Visit visits the flags in lexicographical order, calling fn for each.
// It visits only those flags that have been set.
func (f *FlagSet) Visit(fn func(*Flag)) {
for _, flag := range sortFlags(f.actual) {
fn(flag)
}
}
// Visit visits the command-line flags in lexicographical order, calling fn
// for each. It visits only those flags that have been set.
func Visit(fn func(*Flag)) {
CommandLine.Visit(fn)
}
// Lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) Lookup(name string) *Flag {
return f.formal[name]
}
// Lookup returns the Flag structure of the named command-line flag,
// returning nil if none exists.
func Lookup(name string) *Flag {
return CommandLine.formal[name]
}
// Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error {
flag, ok := f.formal[name]
if !ok {
return fmt.Errorf("no such flag -%v", name)
}
err := flag.Value.Set(value)
if err != nil {
return err
}
if f.actual == nil {
f.actual = make(map[string]*Flag)
}
f.actual[name] = flag
return nil
}
// Set sets the value of the named command-line flag.
func Set(name, value string) error {
return CommandLine.Set(name, value)
}
// isZeroValue determines whether the string represents the zero
// value for a flag.
func isZeroValue(flag *Flag, value string) bool {
// Build a zero value of the flag's Value type, and see if the
// result of calling its String method equals the value passed in.
// This works unless the Value type is itself an interface type.
typ := reflect.TypeOf(flag.Value)
var z reflect.Value
if typ.Kind() == reflect.Ptr {
z = reflect.New(typ.Elem())
} else {
z = reflect.Zero(typ)
}
return value == z.Interface().(Value).String()
}
// UnquoteUsage extracts a back-quoted name from the usage
// string for a flag and returns it and the un-quoted usage.
// Given "a `name` to show" it returns ("name", "a name to show").
// If there are no back quotes, the name is an educated guess of the
// type of the flag's value, or the empty string if the flag is boolean.
func UnquoteUsage(flag *Flag) (name string, usage string) {
// Look for a back-quoted name, but avoid the strings package.
usage = flag.Usage
for i := 0; i < len(usage); i++ {
if usage[i] == '`' {
for j := i + 1; j < len(usage); j++ {
if usage[j] == '`' {
name = usage[i+1 : j]
usage = usage[:i] + name + usage[j+1:]
return name, usage
}
}
break // Only one back quote; use type name.
}
}
// No explicit name, so use type if we can find one.
name = "value"
switch flag.Value.(type) {
case boolFlag:
name = ""
case *durationValue:
name = "duration"
case *float64Value:
name = "float"
case *intValue, *int64Value:
name = "int"
case *stringValue:
name = "string"
case *uintValue, *uint64Value:
name = "uint"
}
return
}
// PrintDefaults prints, to standard error unless configured otherwise, the
// default values of all defined command-line flags in the set. See the
// documentation for the global function PrintDefaults for more information.
func (f *FlagSet) PrintDefaults() {
f.VisitAll(func(flag *Flag) {
s := fmt.Sprintf(" -%s", flag.Name) // Two spaces before -; see next two comments.
name, usage := UnquoteUsage(flag)
if len(name) > 0 {
s += " " + name
}
// Boolean flags of one ASCII letter are so common we
// treat them specially, putting their usage on the same line.
if len(s) <= 4 { // space, space, '-', 'x'.
s += "\t"
} else {
// Four spaces before the tab triggers good alignment
// for both 4- and 8-space tab stops.
s += "\n \t"
}
s += strings.ReplaceAll(usage, "\n", "\n \t")
if !isZeroValue(flag, flag.DefValue) {
if _, ok := flag.Value.(*stringValue); ok {
// put quotes on the value
s += fmt.Sprintf(" (default %q)", flag.DefValue)
} else {
s += fmt.Sprintf(" (default %v)", flag.DefValue)
}
}
fmt.Fprint(f.Output(), s, "\n")
})
}
// PrintDefaults prints, to standard error unless configured otherwise,
// a usage message showing the default settings of all defined
// command-line flags.
// For an integer valued flag x, the default output has the form
// -x int
// usage-message-for-x (default 7)
// The usage message will appear on a separate line for anything but
// a bool flag with a one-byte name. For bool flags, the type is
// omitted and if the flag name is one byte the usage message appears
// on the same line. The parenthetical default is omitted if the
// default is the zero value for the type. The listed type, here int,
// can be changed by placing a back-quoted name in the flag's usage
// string; the first such item in the message is taken to be a parameter
// name to show in the message and the back quotes are stripped from
// the message when displayed. For instance, given
// flag.String("I", "", "search `directory` for include files")
// the output will be
// -I directory
// search directory for include files.
//
// To change the destination for flag messages, call CommandLine.SetOutput.
func PrintDefaults() {
CommandLine.PrintDefaults()
}
// defaultUsage is the default function to print a usage message.
func (f *FlagSet) defaultUsage() {
if f.name == "" {
fmt.Fprintf(f.Output(), "Usage:\n")
} else {
fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name)
}
f.PrintDefaults()
}
// NOTE: Usage is not just defaultUsage(CommandLine)
// because it serves (via godoc flag Usage) as the example
// for how to write your own usage function.
// Usage prints a usage message documenting all defined command-line flags
// to CommandLine's output, which by default is os.Stderr.
// It is called when an error occurs while parsing flags.
// The function is a variable that may be changed to point to a custom function.
// By default it prints a simple header and calls PrintDefaults; for details about the
// format of the output and how to control it, see the documentation for PrintDefaults.
// Custom usage functions may choose to exit the program; by default exiting
// happens anyway as the command line's error handling strategy is set to
// ExitOnError.
var Usage = func() {
fmt.Fprintf(CommandLine.Output(), "Usage of %s:\n", os.Args[0])
PrintDefaults()
}
// NFlag returns the number of flags that have been set.
func (f *FlagSet) NFlag() int { return len(f.actual) }
// NFlag returns the number of command-line flags that have been set.
func NFlag() int { return len(CommandLine.actual) }
// Arg returns the i'th argument. Arg(0) is the first remaining argument
// after flags have been processed. Arg returns an empty string if the
// requested element does not exist.
func (f *FlagSet) Arg(i int) string {
if i < 0 || i >= len(f.args) {
return ""
}
return f.args[i]
}
// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
// after flags have been processed. Arg returns an empty string if the
// requested element does not exist.
func Arg(i int) string {
return CommandLine.Arg(i)
}
// NArg is the number of arguments remaining after flags have been processed.
func (f *FlagSet) NArg() int { return len(f.args) }
// NArg is the number of arguments remaining after flags have been processed.
func NArg() int { return len(CommandLine.args) }
// Args returns the non-flag arguments.
func (f *FlagSet) Args() []string { return f.args }
// Args returns the non-flag command-line arguments.
func Args() []string { return CommandLine.args }
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
f.Var(newBoolValue(value, p), name, usage)
}
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func BoolVar(p *bool, name string, value bool, usage string) {
CommandLine.Var(newBoolValue(value, p), name, usage)
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
p := new(bool)
f.BoolVar(p, name, value, usage)
return p
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func Bool(name string, value bool, usage string) *bool {
return CommandLine.Bool(name, value, usage)
}
// IntVar defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
f.Var(newIntValue(value, p), name, usage)
}
// IntVar defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func IntVar(p *int, name string, value int, usage string) {
CommandLine.Var(newIntValue(value, p), name, usage)
}
// Int defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func (f *FlagSet) Int(name string, value int, usage string) *int {
p := new(int)
f.IntVar(p, name, value, usage)
return p
}
// Int defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func Int(name string, value int, usage string) *int {
return CommandLine.Int(name, value, usage)
}
// Int64Var defines an int64 flag with specified name, default value, and usage string.
// The argument p points to an int64 variable in which to store the value of the flag.
func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
f.Var(newInt64Value(value, p), name, usage)
}
// Int64Var defines an int64 flag with specified name, default value, and usage string.
// The argument p points to an int64 variable in which to store the value of the flag.
func Int64Var(p *int64, name string, value int64, usage string) {
CommandLine.Var(newInt64Value(value, p), name, usage)
}
// Int64 defines an int64 flag with specified name, default value, and usage string.
// The return value is the address of an int64 variable that stores the value of the flag.
func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
p := new(int64)
f.Int64Var(p, name, value, usage)
return p
}
// Int64 defines an int64 flag with specified name, default value, and usage string.
// The return value is the address of an int64 variable that stores the value of the flag.
func Int64(name string, value int64, usage string) *int64 {
return CommandLine.Int64(name, value, usage)
}
// UintVar defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
f.Var(newUintValue(value, p), name, usage)
}
// UintVar defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func UintVar(p *uint, name string, value uint, usage string) {
CommandLine.Var(newUintValue(value, p), name, usage)
}
// Uint defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
p := new(uint)
f.UintVar(p, name, value, usage)
return p
}
// Uint defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func Uint(name string, value uint, usage string) *uint {
return CommandLine.Uint(name, value, usage)
}
// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
// The argument p points to a uint64 variable in which to store the value of the flag.
func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
f.Var(newUint64Value(value, p), name, usage)
}
// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
// The argument p points to a uint64 variable in which to store the value of the flag.
func Uint64Var(p *uint64, name string, value uint64, usage string) {
CommandLine.Var(newUint64Value(value, p), name, usage)
}
// Uint64 defines a uint64 flag with specified name, default value, and usage string.
// The return value is the address of a uint64 variable that stores the value of the flag.
func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
p := new(uint64)
f.Uint64Var(p, name, value, usage)
return p
}
// Uint64 defines a uint64 flag with specified name, default value, and usage string.
// The return value is the address of a uint64 variable that stores the value of the flag.
func Uint64(name string, value uint64, usage string) *uint64 {
return CommandLine.Uint64(name, value, usage)
}
// StringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
f.Var(newStringValue(value, p), name, usage)
}
// StringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func StringVar(p *string, name string, value string, usage string) {
CommandLine.Var(newStringValue(value, p), name, usage)
}
// String defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func (f *FlagSet) String(name string, value string, usage string) *string {
p := new(string)
f.StringVar(p, name, value, usage)
return p
}
// String defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func String(name string, value string, usage string) *string {
return CommandLine.String(name, value, usage)
}
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
f.Var(newFloat64Value(value, p), name, usage)
}
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func Float64Var(p *float64, name string, value float64, usage string) {
CommandLine.Var(newFloat64Value(value, p), name, usage)
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
p := new(float64)
f.Float64Var(p, name, value, usage)
return p
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func Float64(name string, value float64, usage string) *float64 {
return CommandLine.Float64(name, value, usage)
}
// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
// The argument p points to a time.Duration variable in which to store the value of the flag.
// The flag accepts a value acceptable to time.ParseDuration.
func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
f.Var(newDurationValue(value, p), name, usage)
}
// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
// The argument p points to a time.Duration variable in which to store the value of the flag.
// The flag accepts a value acceptable to time.ParseDuration.
func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
CommandLine.Var(newDurationValue(value, p), name, usage)
}
// Duration defines a time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a time.Duration variable that stores the value of the flag.
// The flag accepts a value acceptable to time.ParseDuration.
func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
p := new(time.Duration)
f.DurationVar(p, name, value, usage)
return p
}
// Duration defines a time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a time.Duration variable that stores the value of the flag.
// The flag accepts a value acceptable to time.ParseDuration.
func Duration(name string, value time.Duration, usage string) *time.Duration {
return CommandLine.Duration(name, value, usage)
}
// Var defines a flag with the specified name and usage string. The type and
// value of the flag are represented by the first argument, of type Value, which
// typically holds a user-defined implementation of Value. For instance, the
// caller could create a flag that turns a comma-separated string into a slice
// of strings by giving the slice the methods of Value; in particular, Set would
// decompose the comma-separated string into the slice.
func (f *FlagSet) Var(value Value, name string, usage string) {
// Remember the default value as a string; it won't change.
flag := &Flag{name, usage, value, value.String()}
_, alreadythere := f.formal[name]
if alreadythere {
var msg string
if f.name == "" {
msg = fmt.Sprintf("flag redefined: %s", name)
} else {
msg = fmt.Sprintf("%s flag redefined: %s", f.name, name)
}
fmt.Fprintln(f.Output(), msg)
panic(msg) // Happens only if flags are declared with identical names
}
if f.formal == nil {
f.formal = make(map[string]*Flag)
}
f.formal[name] = flag
}
// Var defines a flag with the specified name and usage string. The type and
// value of the flag are represented by the first argument, of type Value, which
// typically holds a user-defined implementation of Value. For instance, the
// caller could create a flag that turns a comma-separated string into a slice
// of strings by giving the slice the methods of Value; in particular, Set would
// decompose the comma-separated string into the slice.
func Var(value Value, name string, usage string) {
CommandLine.Var(value, name, usage)
}
// failf prints to standard error a formatted error and usage message and
// returns the error.
func (f *FlagSet) failf(format string, a ...interface{}) error {
err := fmt.Errorf(format, a...)
fmt.Fprintln(f.Output(), err)
f.usage()
return err
}
// usage calls the Usage method for the flag set if one is specified,
// or the appropriate default usage function otherwise.
func (f *FlagSet) usage() {
if f.Usage == nil {
f.defaultUsage()
} else {
f.Usage()
}
}
// parseOne parses one flag. It reports whether a flag was seen.
func (f *FlagSet) parseOne() (bool, error) {
if len(f.args) == 0 {
return false, nil
}
s := f.args[0]
if len(s) < 2 || s[0] != '-' {
return false, nil
}
numMinuses := 1
if s[1] == '-' {
numMinuses++
if len(s) == 2 { // "--" terminates the flags
f.args = f.args[1:]
return false, nil
}
}
name := s[numMinuses:]
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
return false, f.failf("bad flag syntax: %s", s)
}
// it's a flag. does it have an argument?
f.args = f.args[1:]
hasValue := false
value := ""
for i := 1; i < len(name); i++ { // equals cannot be first
if name[i] == '=' {
value = name[i+1:]
hasValue = true
name = name[0:i]
break
}
}
m := f.formal
flag, alreadythere := m[name] // BUG
if !alreadythere {
if name == "help" || name == "h" { // special case for nice help message.
f.usage()
return false, ErrHelp
}
return false, f.failf("flag provided but not defined: -%s", name)
}
if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
if hasValue {
if err := fv.Set(value); err != nil {
return false, f.failf("invalid boolean value %q for -%s: %v", value, name, err)
}
} else {
if err := fv.Set("true"); err != nil {
return false, f.failf("invalid boolean flag %s: %v", name, err)
}
}
} else {
// It must have a value, which might be the next argument.
if !hasValue && len(f.args) > 0 {
// value is the next arg
hasValue = true
value, f.args = f.args[0], f.args[1:]
}
if !hasValue {
return false, f.failf("flag needs an argument: -%s", name)
}
if err := flag.Value.Set(value); err != nil {
return false, f.failf("invalid value %q for flag -%s: %v", value, name, err)
}
}
if f.actual == nil {
f.actual = make(map[string]*Flag)
}
f.actual[name] = flag
return true, nil
}
// Parse parses flag definitions from the argument list, which should not
// include the command name. Must be called after all flags in the FlagSet
// are defined and before flags are accessed by the program.
// The return value will be ErrHelp if -help or -h were set but not defined.
func (f *FlagSet) Parse(arguments []string) error {
f.parsed = true
f.args = arguments
for {
seen, err := f.parseOne()
if seen {
continue
}
if err == nil {
break
}
switch f.errorHandling {
case ContinueOnError:
return err
case ExitOnError:
os.Exit(2)
case PanicOnError:
panic(err)
}
}
return nil
}
// Parsed reports whether f.Parse has been called.
func (f *FlagSet) Parsed() bool {
return f.parsed
}
// Parse parses the command-line flags from os.Args[1:]. Must be called
// after all flags are defined and before flags are accessed by the program.
func Parse() {
// Ignore errors; CommandLine is set for ExitOnError.
CommandLine.Parse(os.Args[1:])
}
// Parsed reports whether the command-line flags have been parsed.
func Parsed() bool {
return CommandLine.Parsed()
}
// CommandLine is the default set of command-line flags, parsed from os.Args.
// The top-level functions such as BoolVar, Arg, and so on are wrappers for the
// methods of CommandLine.
var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
func init() {
// Override generic FlagSet default Usage with call to global Usage.
// Note: This is not CommandLine.Usage = Usage,
// because we want any eventual call to use any updated value of Usage,
// not the value it has when this line is run.
CommandLine.Usage = commandLineUsage
}
func commandLineUsage() {
Usage()
}
// NewFlagSet returns a new, empty flag set with the specified name and
// error handling property. If the name is not empty, it will be printed
// in the default usage message and in error messages.
func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
f := &FlagSet{
name: name,
errorHandling: errorHandling,
}
f.Usage = f.defaultUsage
return f
}
// Init sets the name and error handling property for a flag set.
// By default, the zero FlagSet uses an empty name and the
// ContinueOnError error handling policy.
func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
f.name = name
f.errorHandling = errorHandling
}
|
/**
* Twist v0.1-dev
*
* (c) Harry Lawrence
*
* License: MIT
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
package main
import (
"fmt"
"strings"
"../vendor/gocui"
)
func New(g *gocui.Gui, v *gocui.View) error {
maxX, maxY := g.Size()
if v, err := g.SetView("main", 5, -1, maxX, maxY - 2); err != nil {
if err != gocui.ErrorUnkView {
return err
}
v.Editable = true
if err := g.SetCurrentView("main"); err != nil {
return err
}
}
return nil
}
func Quit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrorQuit
}
func UseConsole(g *gocui.Gui, v *gocui.View) error {
if err := g.SetCurrentView("console"); err != nil {
return err
}
return nil
}
func UseEditor(g *gocui.Gui, v *gocui.View) error {
if err := g.SetCurrentView("main"); err != nil {
return err
}
return nil
}
func ShowWriteDialog(g *gocui.Gui, v *gocui.View) error {
maxX, maxY := g.Size()
if v, err := g.SetView("dialog-write", maxX/2-30, maxY/2, maxX/2+30, maxY/2+2); err != nil {
if err != gocui.ErrorUnkView {
return err
}
v.Editable = true
fmt.Fprintf(v, "jsc >> editor.write('%s');", g.Filename())
cursor_position := 21 + strings.Index(g.Filename(), ".")
cx, cy := v.Cursor()
if err := v.SetCursor(cx + cursor_position, cy); err != nil {
ox, oy := v.Origin()
if err := v.SetOrigin(ox + cursor_position, oy); err != nil {
return err
}
}
if err := g.SetCurrentView("dialog-write"); err != nil {
return err
}
}
return nil
}
func HideWriteDialog(g *gocui.Gui, v *gocui.View) error {
g.View("console").Clear()
fmt.Fprint(g.View("console"), v.Buffer())
Execute(g, g.View("console"))
g.DeleteView("dialog-write")
g.SetCurrentView("main")
return nil
}
Fixed cursor position
/**
* Twist v0.1-dev
*
* (c) Harry Lawrence
*
* License: MIT
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
package main
import (
"fmt"
"strings"
"../vendor/gocui"
)
func New(g *gocui.Gui, v *gocui.View) error {
maxX, maxY := g.Size()
if v, err := g.SetView("main", 5, -1, maxX, maxY - 2); err != nil {
if err != gocui.ErrorUnkView {
return err
}
v.Editable = true
if err := g.SetCurrentView("main"); err != nil {
return err
}
}
return nil
}
func Quit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrorQuit
}
func UseConsole(g *gocui.Gui, v *gocui.View) error {
if err := g.SetCurrentView("console"); err != nil {
return err
}
return nil
}
func UseEditor(g *gocui.Gui, v *gocui.View) error {
if err := g.SetCurrentView("main"); err != nil {
return err
}
return nil
}
func ShowWriteDialog(g *gocui.Gui, v *gocui.View) error {
maxX, maxY := g.Size()
if v, err := g.SetView("dialog-write", maxX/2-30, maxY/2, maxX/2+30, maxY/2+2); err != nil {
if err != gocui.ErrorUnkView {
return err
}
v.Editable = true
fmt.Fprintf(v, "jsc >> editor.write('%s');", g.Filename())
cursor_position := 21 + strings.Index(g.Filename(), ".")
if (cursor_position == 20) {
cursor_position++
}
cx, cy := v.Cursor()
if err := v.SetCursor(cx + cursor_position, cy); err != nil {
ox, oy := v.Origin()
if err := v.SetOrigin(ox + cursor_position, oy); err != nil {
return err
}
}
if err := g.SetCurrentView("dialog-write"); err != nil {
return err
}
}
return nil
}
func HideWriteDialog(g *gocui.Gui, v *gocui.View) error {
g.View("console").Clear()
fmt.Fprint(g.View("console"), v.Buffer())
Execute(g, g.View("console"))
g.DeleteView("dialog-write")
g.SetCurrentView("main")
return nil
}
|
package clui
import (
term "github.com/nsf/termbox-go"
)
// ThumbPosition returns a scrollbar thumb position depending
// on currently active item(itemNo), total number of items
// (itemCount), and length/height of the scrollbar(length)
// including arrows. Returns position in interval of (1..lenght-2)
// or -1 if the thumb is not visible
func ThumbPosition(itemNo, itemCount, length int) int {
length -= 2
if itemNo < 0 {
return -1
}
if itemNo >= itemCount-1 {
return length - 1
}
if length < 4 {
return 0
}
ydiff := int(float32(itemNo) / float32(itemCount-1.0) * float32(length-1))
return ydiff
}
// ItemByThumbPosition calculates item number by scrollbar
// thumb position. Position - thumb position inside scrollbar,
// itemCount - total number of items, lenght - lenght or heigth
// of scrollbar. Return -1 if it is not possible to calculate:
// e.g, itemCount equals zero
func ItemByThumbPosition(position, itemCount, length int) int {
length -= 2
if position < 1 {
return -1
}
if itemCount < 1 {
return -1
}
if itemCount == 1 {
return 0
}
newPos := int(float32(itemCount-1)*float32(position-1)/float32(length-1) + 0.9)
if newPos < 0 {
newPos = 0
} else if newPos >= itemCount {
newPos = itemCount - 1
}
return newPos
}
// ChildAt returns the children of parent control that is at absolute
// coordinates x, y. Returns nil if x, y are outside parent control and
// returns parent if no child is at x, y
func ChildAt(parent Control, x, y int) Control {
px, py := parent.Pos()
pw, ph := parent.Size()
if px > x || py > y || px+pw <= x || py+ph <= y {
return nil
}
if len(parent.Children()) == 0 {
return parent
}
var ctrl Control
ctrl = parent
for _, child := range parent.Children() {
check := ChildAt(child, x, y)
if check != nil {
ctrl = check
break
}
}
return ctrl
}
// DeactivateControls makes all children of parent inactive
func DeactivateControls(parent Control) {
for _, ctrl := range parent.Children() {
if ctrl.Active() {
ctrl.SetActive(false)
ctrl.ProcessEvent(Event{Type: EventActivate, X: 0})
}
DeactivateControls(ctrl)
}
}
// ActivateControl makes control active and disables all other children of
// the parent. Returns true if control was found and activated
func ActivateControl(parent, control Control) bool {
DeactivateControls(parent)
res := false
ctrl := FindChild(parent, control)
if ctrl != nil {
res = true
if !ctrl.Active() {
ctrl.ProcessEvent(Event{Type: EventActivate, X: 1})
ctrl.SetActive(true)
}
}
return res
}
// FindChild returns control if it is a child of the parent and nil otherwise
func FindChild(parent, control Control) Control {
var res Control
if parent == control {
return parent
}
for _, ctrl := range parent.Children() {
if ctrl == control {
res = ctrl
break
}
res = FindChild(ctrl, control)
if res != nil {
break
}
}
return res
}
// IsMouseClickEvent returns if a user action can be treated as mouse click.
func IsMouseClickEvent(ev Event) bool {
if ev.Type == EventClick {
return true
}
if ev.Type == EventMouse && ev.Key == term.MouseLeft {
return true
}
return false
}
// FindFirstControl returns the first child for that fn returns true.
// The function is used to find active or tab-stop control
func FindFirstControl(parent Control, fn func(Control) bool) Control {
linear := getLinearControlList(parent, fn)
if len(linear) == 0 {
return nil
}
return linear[0]
}
// FindLastControl returns the first child for that fn returns true.
// The function is used by TAB processing method if a user goes backwards
// with TAB key - not supported now
func FindLastControl(parent Control, fn func(Control) bool) Control {
linear := getLinearControlList(parent, fn)
if len(linear) == 0 {
return nil
}
return linear[len(linear)-1]
}
// ActiveControl returns the active child of the parent or nil if no child is
// active
func ActiveControl(parent Control) Control {
fnActive := func(c Control) bool {
return c.Active()
}
return FindFirstControl(parent, fnActive)
}
// FindFirstActiveControl returns the first active control of a parent
func FindFirstActiveControl(parent Control) Control {
for _, curr := range getLinearControlList(parent, nil) {
if curr.Active() {
return curr
}
}
return nil
}
func getLinearControlList(parent Control, fn func(Control) bool) []Control {
result := []Control{}
for _, curr := range parent.Children() {
if fn != nil && fn(curr) {
result = append(result, curr)
}
if len(curr.Children()) == 0 {
continue
}
ch := getLinearControlList(curr, fn)
if len(ch) != 0 {
result = append(result, ch...)
}
}
return result
}
// NextControl returns the next or previous child (depends on next parameter)
// that has tab-stop feature on. Used by library when processing TAB key
func NextControl(parent Control, curr Control, next bool) Control {
fnTab := func(c Control) bool {
return c.TabStop() && c.Visible() && c.Enabled()
}
linear := getLinearControlList(parent, fnTab)
if len(linear) == 0 {
return nil
}
var pIndex, nIndex int
for i, ch := range linear {
if ch != curr {
continue
}
pIndex = i - 1
nIndex = i + 1
break
}
if nIndex > len(linear)-1 {
nIndex = 0
}
if pIndex < 0 {
pIndex = len(linear) - 1
}
if next {
return linear[nIndex]
} else {
return linear[pIndex]
}
}
// SendEventToChild tries to find a child control that should recieve the evetn
// For mouse click events it looks for a control at coordinates of event,
// makes it active, and then sends the event to it.
// If it is not mouse click event then it looks for the first active child and
// sends the event to it if it is not nil
func SendEventToChild(parent Control, ev Event) bool {
var child Control
if IsMouseClickEvent(ev) {
child = ChildAt(parent, ev.X, ev.Y)
if child != nil && !child.Active() {
ActivateControl(parent, child)
}
} else {
child = ActiveControl(parent)
}
if child != nil && child != parent {
ev.Target = child
res := child.ProcessEvent(ev)
if cparent := ClippedParent(child); cparent != nil && cparent != child {
cparent.ProcessEvent(ev)
}
return res
}
return false
}
// CalcClipper calculates the clipper size based on the control's size, position
// and paddings
func CalcClipper(c Control) (int, int, int, int) {
w, h := c.Size()
x, y := c.Pos()
px, py := c.Paddings()
x = x + px
y = y + py
w = w - 2*px
h = h - 2*py
return x, y, w, h
}
// ClippedParent finds the first c parent with clipped flag
func ClippedParent(c Control) Control {
var clipped Control
ctrl := c.Parent()
clipped = c
for ctrl != nil {
if ctrl.Clipped() {
clipped = ctrl
break
}
ctrl = ctrl.Parent()
}
return clipped
}
// ControlInRect returns true if c is within a given rect
func ControlInRect(c Control, x int, y int, w int, h int) bool {
xx, yy := c.Pos()
ww, hh := c.Size()
return xx >= x && ww <= x+w && yy <= y+h &&
yy+hh <= y+h && yy >= y && yy+h >= y
}
NextControl also considers parent's visibility
Whenever running through NextControl() also exclude those controls
which parents are also non visible.
Signed-off-by: Leandro Dorileo <27489cbbb360dd9a8af745ba3f6e7d2c506ebdd9@intel.com>
package clui
import (
term "github.com/nsf/termbox-go"
)
// ThumbPosition returns a scrollbar thumb position depending
// on currently active item(itemNo), total number of items
// (itemCount), and length/height of the scrollbar(length)
// including arrows. Returns position in interval of (1..lenght-2)
// or -1 if the thumb is not visible
func ThumbPosition(itemNo, itemCount, length int) int {
length -= 2
if itemNo < 0 {
return -1
}
if itemNo >= itemCount-1 {
return length - 1
}
if length < 4 {
return 0
}
ydiff := int(float32(itemNo) / float32(itemCount-1.0) * float32(length-1))
return ydiff
}
// ItemByThumbPosition calculates item number by scrollbar
// thumb position. Position - thumb position inside scrollbar,
// itemCount - total number of items, lenght - lenght or heigth
// of scrollbar. Return -1 if it is not possible to calculate:
// e.g, itemCount equals zero
func ItemByThumbPosition(position, itemCount, length int) int {
length -= 2
if position < 1 {
return -1
}
if itemCount < 1 {
return -1
}
if itemCount == 1 {
return 0
}
newPos := int(float32(itemCount-1)*float32(position-1)/float32(length-1) + 0.9)
if newPos < 0 {
newPos = 0
} else if newPos >= itemCount {
newPos = itemCount - 1
}
return newPos
}
// ChildAt returns the children of parent control that is at absolute
// coordinates x, y. Returns nil if x, y are outside parent control and
// returns parent if no child is at x, y
func ChildAt(parent Control, x, y int) Control {
px, py := parent.Pos()
pw, ph := parent.Size()
if px > x || py > y || px+pw <= x || py+ph <= y {
return nil
}
if len(parent.Children()) == 0 {
return parent
}
var ctrl Control
ctrl = parent
for _, child := range parent.Children() {
check := ChildAt(child, x, y)
if check != nil {
ctrl = check
break
}
}
return ctrl
}
// DeactivateControls makes all children of parent inactive
func DeactivateControls(parent Control) {
for _, ctrl := range parent.Children() {
if ctrl.Active() {
ctrl.SetActive(false)
ctrl.ProcessEvent(Event{Type: EventActivate, X: 0})
}
DeactivateControls(ctrl)
}
}
// ActivateControl makes control active and disables all other children of
// the parent. Returns true if control was found and activated
func ActivateControl(parent, control Control) bool {
DeactivateControls(parent)
res := false
ctrl := FindChild(parent, control)
if ctrl != nil {
res = true
if !ctrl.Active() {
ctrl.ProcessEvent(Event{Type: EventActivate, X: 1})
ctrl.SetActive(true)
}
}
return res
}
// FindChild returns control if it is a child of the parent and nil otherwise
func FindChild(parent, control Control) Control {
var res Control
if parent == control {
return parent
}
for _, ctrl := range parent.Children() {
if ctrl == control {
res = ctrl
break
}
res = FindChild(ctrl, control)
if res != nil {
break
}
}
return res
}
// IsMouseClickEvent returns if a user action can be treated as mouse click.
func IsMouseClickEvent(ev Event) bool {
if ev.Type == EventClick {
return true
}
if ev.Type == EventMouse && ev.Key == term.MouseLeft {
return true
}
return false
}
// FindFirstControl returns the first child for that fn returns true.
// The function is used to find active or tab-stop control
func FindFirstControl(parent Control, fn func(Control) bool) Control {
linear := getLinearControlList(parent, fn)
if len(linear) == 0 {
return nil
}
return linear[0]
}
// FindLastControl returns the first child for that fn returns true.
// The function is used by TAB processing method if a user goes backwards
// with TAB key - not supported now
func FindLastControl(parent Control, fn func(Control) bool) Control {
linear := getLinearControlList(parent, fn)
if len(linear) == 0 {
return nil
}
return linear[len(linear)-1]
}
// ActiveControl returns the active child of the parent or nil if no child is
// active
func ActiveControl(parent Control) Control {
fnActive := func(c Control) bool {
return c.Active()
}
return FindFirstControl(parent, fnActive)
}
// FindFirstActiveControl returns the first active control of a parent
func FindFirstActiveControl(parent Control) Control {
for _, curr := range getLinearControlList(parent, nil) {
if curr.Active() {
return curr
}
}
return nil
}
func getLinearControlList(parent Control, fn func(Control) bool) []Control {
result := []Control{}
for _, curr := range parent.Children() {
if fn != nil && fn(curr) {
result = append(result, curr)
}
if len(curr.Children()) == 0 {
continue
}
ch := getLinearControlList(curr, fn)
if len(ch) != 0 {
result = append(result, ch...)
}
}
return result
}
// NextControl returns the next or previous child (depends on next parameter)
// that has tab-stop feature on. Used by library when processing TAB key
func NextControl(parent Control, curr Control, next bool) Control {
fnTab := func(c Control) bool {
isVisible := func() bool {
ctrl := c.Parent()
for ctrl != nil {
if !ctrl.Visible() {
return false
}
ctrl = ctrl.Parent()
}
return c.Visible()
}
return c.TabStop() && isVisible() && c.Enabled()
}
linear := getLinearControlList(parent, fnTab)
if len(linear) == 0 {
return nil
}
var pIndex, nIndex int
for i, ch := range linear {
if ch != curr {
continue
}
pIndex = i - 1
nIndex = i + 1
break
}
if nIndex > len(linear)-1 {
nIndex = 0
}
if pIndex < 0 {
pIndex = len(linear) - 1
}
if next {
return linear[nIndex]
} else {
return linear[pIndex]
}
}
// SendEventToChild tries to find a child control that should recieve the evetn
// For mouse click events it looks for a control at coordinates of event,
// makes it active, and then sends the event to it.
// If it is not mouse click event then it looks for the first active child and
// sends the event to it if it is not nil
func SendEventToChild(parent Control, ev Event) bool {
var child Control
if IsMouseClickEvent(ev) {
child = ChildAt(parent, ev.X, ev.Y)
if child != nil && !child.Active() {
ActivateControl(parent, child)
}
} else {
child = ActiveControl(parent)
}
if child != nil && child != parent {
ev.Target = child
res := child.ProcessEvent(ev)
if cparent := ClippedParent(child); cparent != nil && cparent != child {
cparent.ProcessEvent(ev)
}
return res
}
return false
}
// CalcClipper calculates the clipper size based on the control's size, position
// and paddings
func CalcClipper(c Control) (int, int, int, int) {
w, h := c.Size()
x, y := c.Pos()
px, py := c.Paddings()
x = x + px
y = y + py
w = w - 2*px
h = h - 2*py
return x, y, w, h
}
// ClippedParent finds the first c parent with clipped flag
func ClippedParent(c Control) Control {
var clipped Control
ctrl := c.Parent()
clipped = c
for ctrl != nil {
if ctrl.Clipped() {
clipped = ctrl
break
}
ctrl = ctrl.Parent()
}
return clipped
}
// ControlInRect returns true if c is within a given rect
func ControlInRect(c Control, x int, y int, w int, h int) bool {
xx, yy := c.Pos()
ww, hh := c.Size()
return xx >= x && ww <= x+w && yy <= y+h &&
yy+hh <= y+h && yy >= y && yy+h >= y
}
|
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Test Summary:
NOTE: For each test, a nil pointer, a single pointer and double pointer to the
base test element are also tested to ensure proper indirection across all types.
- Max int8, int16, int32, int64, int
- Max uint8, uint16, uint32, uint64, uint
- Boolean true and false
- Standard complex64 and complex128
- Array containing standard ints
- Array containing type with custom formatter on pointer receiver only
- Slice containing standard float32 values
- Slice containing type with custom formatter on pointer receiver only
- Standard string
- Nil interface
- Map with string keys and int vals
- Map with custom formatter type on pointer receiver only keys and vals
- Map with interface keys and values
- Struct with primitives
- Struct that contains another struct
- Struct that contains custom type with Stringer pointer interface via both
exported and unexported fields
- Uintptr to 0 (null pointer)
- Uintptr address of real variable
- Unsafe.Pointer to 0 (null pointer)
- Unsafe.Pointer to address of real variable
- Nil channel
- Standard int channel
- Function with no params and no returns
- Function with param and no returns
- Function with multiple params and multiple returns
- Struct that is circular through self referencing
- Structs that are circular through cross referencing
- Structs that are indirectly circular
*/
package spew_test
import (
"bytes"
"fmt"
"github.com/davecgh/go-spew/spew"
"testing"
"unsafe"
)
// custom type to test Stinger interface on pointer receiver.
type pstringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with only pointer receivers.
func (s *pstringer) String() string {
return "stringer " + string(*s)
}
// xref1 and xref2 are cross referencing structs for testing circular reference
// detection.
type xref1 struct {
ps2 *xref2
}
type xref2 struct {
ps1 *xref1
}
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
// reference for testing detection.
type indirCir1 struct {
ps2 *indirCir2
}
type indirCir2 struct {
ps3 *indirCir3
}
type indirCir3 struct {
ps1 *indirCir1
}
// dumpTest is used to describe a test to be perfomed against the Dump method.
type dumpTest struct {
in interface{}
want string
}
// dumpTests houses all of the tests to be performed against the Dump method.
var dumpTests = make([]dumpTest, 0)
// addDumpTest is a helper method to append the passed input and desired result
// to dumpTests
func addDumpTest(in interface{}, want string) {
test := dumpTest{in, want}
dumpTests = append(dumpTests, test)
}
func addIntTests() {
// Max int8.
v := int8(127)
nv := (*int8)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "int8"
vs := "127"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Max int16.
v2 := int16(32767)
nv2 := (*int16)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "int16"
v2s := "32767"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Max int32.
v3 := int32(2147483647)
nv3 := (*int32)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "int32"
v3s := "2147483647"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
// Max int64.
v4 := int64(9223372036854775807)
nv4 := (*int64)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "int64"
v4s := "9223372036854775807"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
// Max int.
v5 := int(2147483647)
nv5 := (*int)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "int"
v5s := "2147483647"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
}
func addUintTests() {
// Max uint8.
v := uint8(255)
nv := (*uint8)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "uint8"
vs := "255"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Max uint16.
v2 := uint16(65535)
nv2 := (*uint16)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uint16"
v2s := "65535"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Max uint32.
v3 := uint32(4294967295)
nv3 := (*uint32)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "uint32"
v3s := "4294967295"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
// Max uint64.
v4 := uint64(18446744073709551615)
nv4 := (*uint64)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "uint64"
v4s := "18446744073709551615"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
// Max uint.
v5 := uint(4294967295)
nv5 := (*uint)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "uint"
v5s := "4294967295"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
}
func addBoolTests() {
// Boolean true.
v := bool(true)
nv := (*bool)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "bool"
vs := "true"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Boolean false.
v2 := bool(false)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "bool"
v2s := "false"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
}
func addFloatTests() {
// Standard float32.
v := float32(3.1415)
nv := (*float32)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "float32"
vs := "3.1415"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Standard float64.
v2 := float64(3.1415926)
nv2 := (*float64)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "float64"
v2s := "3.1415926"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
}
func addComplexTests() {
// Standard complex64.
v := complex(float32(6), -2)
nv := (*complex64)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "complex64"
vs := "(6-2i)"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Standard complex128.
v2 := complex(float64(-6), 2)
nv2 := (*complex128)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "complex128"
v2s := "(-6+2i)"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
}
func addArrayTests() {
// Array containing standard ints.
v := [3]int{1, 2, 3}
nv := (*[3]int)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "int"
vs := "{\n (" + vt + ") 1,\n (" + vt + ") 2,\n (" + vt + ") 3\n}"
addDumpTest(v, "([3]"+vt+") "+vs+"\n")
addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*[3]"+vt+")(<nil>)\n")
// Array containing type with custom formatter on pointer receiver only.
v2 := [3]pstringer{"1", "2", "3"}
nv2 := (*[3]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.pstringer"
v2s := "{\n (" + v2t + ") stringer 1,\n (" + v2t + ") stringer 2,\n (" +
v2t + ") stringer 3\n}"
addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*[3]"+v2t+")(<nil>)\n")
}
func addSliceTests() {
// Slice containing standard float32 values.
v := []float32{3.14, 6.28, 12.56}
nv := (*[]float32)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "float32"
vs := "{\n (" + vt + ") 3.14,\n (" + vt + ") 6.28,\n (" + vt + ") 12.56\n}"
addDumpTest(v, "([]"+vt+") "+vs+"\n")
addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*[]"+vt+")(<nil>)\n")
// Slice containing type with custom formatter on pointer receiver only.
v2 := []pstringer{"1", "2", "3"}
nv2 := (*[]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.pstringer"
v2s := "{\n (" + v2t + ") stringer 1,\n (" + v2t + ") stringer 2,\n (" +
v2t + ") stringer 3\n}"
addDumpTest(v2, "([]"+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*[]"+v2t+")(<nil>)\n")
}
func addStringTests() {
// Standard string.
v := "test"
nv := (*string)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "string"
vs := "\"test\""
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
}
func addNilInterfaceTests() {
// Nil interface.
var v interface{}
nv := (*interface{})(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "interface {}"
vs := "<nil>"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
}
func addMapTests() {
// Map with string keys and int vals.
v := map[string]int{"one": 1}
nv := (*map[string]int)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "map[string]int"
vt1 := "string"
vt2 := "int"
vs := "{\n (" + vt1 + ") \"one\": (" + vt2 + ") 1\n}"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Map with custom formatter type on pointer receiver only keys and vals.
v2 := map[pstringer]pstringer{"one": "1"}
nv2 := (*map[pstringer]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "map[spew_test.pstringer]spew_test.pstringer"
v2t1 := "spew_test.pstringer"
v2t2 := "spew_test.pstringer"
v2s := "{\n (" + v2t1 + ") stringer one: (" + v2t2 + ") stringer 1\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Map with interface keys and values.
v3 := map[interface{}]interface{}{"one": 1}
nv3 := (*map[interface{}]interface{})(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "map[interface {}]interface {}"
v3t1 := "string"
v3t2 := "int"
v3s := "{\n (" + v3t1 + ") \"one\": (" + v3t2 + ") 1\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
}
func addStructTests() {
// Struct with primitives.
type s1 struct {
a int8
b uint8
}
v := s1{127, 255}
nv := (*s1)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.s1"
vt2 := "int8"
vt3 := "uint8"
vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Struct that contains another struct.
type s2 struct {
s1 s1
b bool
}
v2 := s2{s1{127, 255}, true}
nv2 := (*s2)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.s2"
v2t2 := "spew_test.s1"
v2t3 := "int8"
v2t4 := "uint8"
v2t5 := "bool"
v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" +
v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Struct that contains custom type with Stringer pointer interface via both
// exported and unexported fields.
type s3 struct {
s pstringer
S pstringer
}
v3 := s3{"test", "test2"}
nv3 := (*s3)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "spew_test.s3"
v3t2 := "spew_test.pstringer"
v3s := "{\n s: (" + v3t2 + ") stringer test,\n S: (" + v3t2 +
") stringer test2\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
}
func addUintptrTests() {
// Null pointer.
v := uintptr(0)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "uintptr"
vs := "<nil>"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
// Address of real variable.
i := 1
v2 := uintptr(unsafe.Pointer(&i))
nv2 := (*uintptr)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uintptr"
v2s := fmt.Sprintf("%p", &i)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
}
func addUnsafePointerTests() {
// Null pointer.
v := unsafe.Pointer(uintptr(0))
nv := (*unsafe.Pointer)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "unsafe.Pointer"
vs := "<nil>"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Address of real variable.
i := 1
v2 := unsafe.Pointer(&i)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "unsafe.Pointer"
v2s := fmt.Sprintf("%p", &i)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
}
func addChanTests() {
// Nil channel.
var v chan int
pv := &v
nv := (*chan int)(nil)
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "chan int"
vs := "<nil>"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Real channel.
v2 := make(chan int)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "chan int"
v2s := fmt.Sprintf("%p", v2)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
}
func addFuncTests() {
// Function with no params and no returns.
v := addIntTests
nv := (*func())(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "func()"
vs := fmt.Sprintf("%p", v)
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Function with param and no returns.
v2 := TestDump
nv2 := (*func(*testing.T))(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "func(*testing.T)"
v2s := fmt.Sprintf("%p", v2)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Function with multiple params and multiple returns.
var v3 = func(i int, s string) (b bool, err error) {
return true, nil
}
nv3 := (*func(int, string)(bool, error))(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "func(int, string) (bool, error)"
v3s := fmt.Sprintf("%p", v3)
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
}
func addCircularTests() {
// Struct that is circular through self referencing.
type circular struct {
c *circular
}
v := circular{nil}
v.c = &v
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.circular"
vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" +
vAddr + ")(<already shown>)\n })\n}"
vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")(<already shown>)\n}"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n")
// Structs that are circular through cross referencing.
v2 := xref1{nil}
ts2 := xref2{&v2}
v2.ps2 = &ts2
pv2 := &v2
ts2Addr := fmt.Sprintf("%p", &ts2)
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.xref1"
v2t2 := "spew_test.xref2"
v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr +
")(<already shown>)\n })\n })\n}"
v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
")(" + v2Addr + ")(<already shown>)\n })\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n")
// Structs that are indirectly circular.
v3 := indirCir1{nil}
tic2 := indirCir2{nil}
tic3 := indirCir3{&v3}
tic2.ps3 = &tic3
v3.ps2 = &tic2
pv3 := &v3
tic2Addr := fmt.Sprintf("%p", &tic2)
tic3Addr := fmt.Sprintf("%p", &tic3)
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "spew_test.indirCir1"
v3t2 := "spew_test.indirCir2"
v3t3 := "spew_test.indirCir3"
v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
")({\n ps2: (*" + v3t2 + ")(" + tic2Addr +
")(<already shown>)\n })\n })\n })\n}"
v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
")(<already shown>)\n })\n })\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n")
}
// TestDump executes all of the tests described by dumpTests.
func TestDump(t *testing.T) {
t.Logf("Running %d tests", len(dumpTests))
for i, test := range dumpTests {
buf := new(bytes.Buffer)
spew.Fdump(buf, test.in)
s := buf.String()
if test.want != s {
t.Errorf("Dump #%d\n got: %s want: %s", i, s, test.want)
continue
}
}
}
// Setup tests.
func init() {
addIntTests()
addUintTests()
addBoolTests()
addFloatTests()
addComplexTests()
addArrayTests()
addSliceTests()
addStringTests()
addNilInterfaceTests()
addMapTests()
addStructTests()
addUintptrTests()
addUnsafePointerTests()
addChanTests()
addFuncTests()
addCircularTests()
}
Namespace dump tests.
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Test Summary:
NOTE: For each test, a nil pointer, a single pointer and double pointer to the
base test element are also tested to ensure proper indirection across all types.
- Max int8, int16, int32, int64, int
- Max uint8, uint16, uint32, uint64, uint
- Boolean true and false
- Standard complex64 and complex128
- Array containing standard ints
- Array containing type with custom formatter on pointer receiver only
- Slice containing standard float32 values
- Slice containing type with custom formatter on pointer receiver only
- Standard string
- Nil interface
- Map with string keys and int vals
- Map with custom formatter type on pointer receiver only keys and vals
- Map with interface keys and values
- Struct with primitives
- Struct that contains another struct
- Struct that contains custom type with Stringer pointer interface via both
exported and unexported fields
- Uintptr to 0 (null pointer)
- Uintptr address of real variable
- Unsafe.Pointer to 0 (null pointer)
- Unsafe.Pointer to address of real variable
- Nil channel
- Standard int channel
- Function with no params and no returns
- Function with param and no returns
- Function with multiple params and multiple returns
- Struct that is circular through self referencing
- Structs that are circular through cross referencing
- Structs that are indirectly circular
*/
package spew_test
import (
"bytes"
"fmt"
"github.com/davecgh/go-spew/spew"
"testing"
"unsafe"
)
// custom type to test Stinger interface on pointer receiver.
type pstringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with only pointer receivers.
func (s *pstringer) String() string {
return "stringer " + string(*s)
}
// xref1 and xref2 are cross referencing structs for testing circular reference
// detection.
type xref1 struct {
ps2 *xref2
}
type xref2 struct {
ps1 *xref1
}
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
// reference for testing detection.
type indirCir1 struct {
ps2 *indirCir2
}
type indirCir2 struct {
ps3 *indirCir3
}
type indirCir3 struct {
ps1 *indirCir1
}
// dumpTest is used to describe a test to be perfomed against the Dump method.
type dumpTest struct {
in interface{}
want string
}
// dumpTests houses all of the tests to be performed against the Dump method.
var dumpTests = make([]dumpTest, 0)
// addDumpTest is a helper method to append the passed input and desired result
// to dumpTests
func addDumpTest(in interface{}, want string) {
test := dumpTest{in, want}
dumpTests = append(dumpTests, test)
}
func addIntDumpTests() {
// Max int8.
v := int8(127)
nv := (*int8)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "int8"
vs := "127"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Max int16.
v2 := int16(32767)
nv2 := (*int16)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "int16"
v2s := "32767"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Max int32.
v3 := int32(2147483647)
nv3 := (*int32)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "int32"
v3s := "2147483647"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
// Max int64.
v4 := int64(9223372036854775807)
nv4 := (*int64)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "int64"
v4s := "9223372036854775807"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
// Max int.
v5 := int(2147483647)
nv5 := (*int)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "int"
v5s := "2147483647"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
}
func addUintDumpTests() {
// Max uint8.
v := uint8(255)
nv := (*uint8)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "uint8"
vs := "255"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Max uint16.
v2 := uint16(65535)
nv2 := (*uint16)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uint16"
v2s := "65535"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Max uint32.
v3 := uint32(4294967295)
nv3 := (*uint32)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "uint32"
v3s := "4294967295"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
// Max uint64.
v4 := uint64(18446744073709551615)
nv4 := (*uint64)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "uint64"
v4s := "18446744073709551615"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
// Max uint.
v5 := uint(4294967295)
nv5 := (*uint)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "uint"
v5s := "4294967295"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
}
func addBoolDumpTests() {
// Boolean true.
v := bool(true)
nv := (*bool)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "bool"
vs := "true"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Boolean false.
v2 := bool(false)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "bool"
v2s := "false"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
}
func addFloatDumpTests() {
// Standard float32.
v := float32(3.1415)
nv := (*float32)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "float32"
vs := "3.1415"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Standard float64.
v2 := float64(3.1415926)
nv2 := (*float64)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "float64"
v2s := "3.1415926"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
}
func addComplexDumpTests() {
// Standard complex64.
v := complex(float32(6), -2)
nv := (*complex64)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "complex64"
vs := "(6-2i)"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Standard complex128.
v2 := complex(float64(-6), 2)
nv2 := (*complex128)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "complex128"
v2s := "(-6+2i)"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
}
func addArrayDumpTests() {
// Array containing standard ints.
v := [3]int{1, 2, 3}
nv := (*[3]int)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "int"
vs := "{\n (" + vt + ") 1,\n (" + vt + ") 2,\n (" + vt + ") 3\n}"
addDumpTest(v, "([3]"+vt+") "+vs+"\n")
addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*[3]"+vt+")(<nil>)\n")
// Array containing type with custom formatter on pointer receiver only.
v2 := [3]pstringer{"1", "2", "3"}
nv2 := (*[3]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.pstringer"
v2s := "{\n (" + v2t + ") stringer 1,\n (" + v2t + ") stringer 2,\n (" +
v2t + ") stringer 3\n}"
addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*[3]"+v2t+")(<nil>)\n")
}
func addSliceDumpTests() {
// Slice containing standard float32 values.
v := []float32{3.14, 6.28, 12.56}
nv := (*[]float32)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "float32"
vs := "{\n (" + vt + ") 3.14,\n (" + vt + ") 6.28,\n (" + vt + ") 12.56\n}"
addDumpTest(v, "([]"+vt+") "+vs+"\n")
addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*[]"+vt+")(<nil>)\n")
// Slice containing type with custom formatter on pointer receiver only.
v2 := []pstringer{"1", "2", "3"}
nv2 := (*[]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.pstringer"
v2s := "{\n (" + v2t + ") stringer 1,\n (" + v2t + ") stringer 2,\n (" +
v2t + ") stringer 3\n}"
addDumpTest(v2, "([]"+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*[]"+v2t+")(<nil>)\n")
}
func addStringDumpTests() {
// Standard string.
v := "test"
nv := (*string)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "string"
vs := "\"test\""
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
}
func addNilInterfaceDumpTests() {
// Nil interface.
var v interface{}
nv := (*interface{})(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "interface {}"
vs := "<nil>"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
}
func addMapDumpTests() {
// Map with string keys and int vals.
v := map[string]int{"one": 1}
nv := (*map[string]int)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "map[string]int"
vt1 := "string"
vt2 := "int"
vs := "{\n (" + vt1 + ") \"one\": (" + vt2 + ") 1\n}"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Map with custom formatter type on pointer receiver only keys and vals.
v2 := map[pstringer]pstringer{"one": "1"}
nv2 := (*map[pstringer]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "map[spew_test.pstringer]spew_test.pstringer"
v2t1 := "spew_test.pstringer"
v2t2 := "spew_test.pstringer"
v2s := "{\n (" + v2t1 + ") stringer one: (" + v2t2 + ") stringer 1\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Map with interface keys and values.
v3 := map[interface{}]interface{}{"one": 1}
nv3 := (*map[interface{}]interface{})(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "map[interface {}]interface {}"
v3t1 := "string"
v3t2 := "int"
v3s := "{\n (" + v3t1 + ") \"one\": (" + v3t2 + ") 1\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
}
func addStructDumpTests() {
// Struct with primitives.
type s1 struct {
a int8
b uint8
}
v := s1{127, 255}
nv := (*s1)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.s1"
vt2 := "int8"
vt3 := "uint8"
vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Struct that contains another struct.
type s2 struct {
s1 s1
b bool
}
v2 := s2{s1{127, 255}, true}
nv2 := (*s2)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.s2"
v2t2 := "spew_test.s1"
v2t3 := "int8"
v2t4 := "uint8"
v2t5 := "bool"
v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" +
v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Struct that contains custom type with Stringer pointer interface via both
// exported and unexported fields.
type s3 struct {
s pstringer
S pstringer
}
v3 := s3{"test", "test2"}
nv3 := (*s3)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "spew_test.s3"
v3t2 := "spew_test.pstringer"
v3s := "{\n s: (" + v3t2 + ") stringer test,\n S: (" + v3t2 +
") stringer test2\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
}
func addUintptrDumpTests() {
// Null pointer.
v := uintptr(0)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "uintptr"
vs := "<nil>"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
// Address of real variable.
i := 1
v2 := uintptr(unsafe.Pointer(&i))
nv2 := (*uintptr)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uintptr"
v2s := fmt.Sprintf("%p", &i)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
}
func addUnsafePointerDumpTests() {
// Null pointer.
v := unsafe.Pointer(uintptr(0))
nv := (*unsafe.Pointer)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "unsafe.Pointer"
vs := "<nil>"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Address of real variable.
i := 1
v2 := unsafe.Pointer(&i)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "unsafe.Pointer"
v2s := fmt.Sprintf("%p", &i)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
}
func addChanDumpTests() {
// Nil channel.
var v chan int
pv := &v
nv := (*chan int)(nil)
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "chan int"
vs := "<nil>"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Real channel.
v2 := make(chan int)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "chan int"
v2s := fmt.Sprintf("%p", v2)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
}
func addFuncDumpTests() {
// Function with no params and no returns.
v := addIntDumpTests
nv := (*func())(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "func()"
vs := fmt.Sprintf("%p", v)
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")(<nil>)\n")
// Function with param and no returns.
v2 := TestDump
nv2 := (*func(*testing.T))(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "func(*testing.T)"
v2s := fmt.Sprintf("%p", v2)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
// Function with multiple params and multiple returns.
var v3 = func(i int, s string) (b bool, err error) {
return true, nil
}
nv3 := (*func(int, string)(bool, error))(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "func(int, string) (bool, error)"
v3s := fmt.Sprintf("%p", v3)
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
}
func addCircularDumpTests() {
// Struct that is circular through self referencing.
type circular struct {
c *circular
}
v := circular{nil}
v.c = &v
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.circular"
vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" +
vAddr + ")(<already shown>)\n })\n}"
vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")(<already shown>)\n}"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n")
// Structs that are circular through cross referencing.
v2 := xref1{nil}
ts2 := xref2{&v2}
v2.ps2 = &ts2
pv2 := &v2
ts2Addr := fmt.Sprintf("%p", &ts2)
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.xref1"
v2t2 := "spew_test.xref2"
v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr +
")(<already shown>)\n })\n })\n}"
v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
")(" + v2Addr + ")(<already shown>)\n })\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n")
// Structs that are indirectly circular.
v3 := indirCir1{nil}
tic2 := indirCir2{nil}
tic3 := indirCir3{&v3}
tic2.ps3 = &tic3
v3.ps2 = &tic2
pv3 := &v3
tic2Addr := fmt.Sprintf("%p", &tic2)
tic3Addr := fmt.Sprintf("%p", &tic3)
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "spew_test.indirCir1"
v3t2 := "spew_test.indirCir2"
v3t3 := "spew_test.indirCir3"
v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
")({\n ps2: (*" + v3t2 + ")(" + tic2Addr +
")(<already shown>)\n })\n })\n })\n}"
v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
")(<already shown>)\n })\n })\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n")
}
// TestDump executes all of the tests described by dumpTests.
func TestDump(t *testing.T) {
// Setup tests.
addIntDumpTests()
addUintDumpTests()
addBoolDumpTests()
addFloatDumpTests()
addComplexDumpTests()
addArrayDumpTests()
addSliceDumpTests()
addStringDumpTests()
addNilInterfaceDumpTests()
addMapDumpTests()
addStructDumpTests()
addUintptrDumpTests()
addUnsafePointerDumpTests()
addChanDumpTests()
addFuncDumpTests()
addCircularDumpTests()
t.Logf("Running %d tests", len(dumpTests))
for i, test := range dumpTests {
buf := new(bytes.Buffer)
spew.Fdump(buf, test.in)
s := buf.String()
if test.want != s {
t.Errorf("Dump #%d\n got: %s want: %s", i, s, test.want)
continue
}
}
}
|
package quiclatest
import (
"net"
"sync"
"github.com/ami-GS/gQUIC/latest/qtype"
)
type Server struct {
conn net.PacketConn
// TODO: replace to SessionStore
sessions map[string]*Session //ConnectionID.String():Session
addrSessions map[string]*Session //remoteAddr.String():Session, for identifing single connection if zero-len dest ID
sessionsMutex *sync.Mutex
SupportedVersions []qtype.Version
// TODO: consider here? or in utility? or decide dynamically?
SessionLimitNum int
NumHandshake int
}
func ListenAddr(addr string) (*Server, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", udpAddr)
if err != nil {
return nil, err
}
s := &Server{
conn: conn,
sessions: make(map[string]*Session),
addrSessions: make(map[string]*Session),
sessionsMutex: new(sync.Mutex),
SupportedVersions: qtype.SupportedVersions,
// TODO: experimental
SessionLimitNum: 1000,
}
go s.Serve()
return s, nil
}
func (s *Server) Serve() error {
buffer := make([]byte, qtype.MTUIPv4)
for {
length, remoteAddr, err := s.conn.ReadFrom(buffer)
if err != nil {
s.conn.Close()
return err
}
packets, _, err := ParsePackets(buffer[:length])
if err != nil {
// TODO: this type assertion is dangerous
_ = s.Close(err.(qtype.TransportError))
return err
}
for _, p := range packets {
err = s.handlePacket(remoteAddr, p)
if err != nil {
s.conn.Close()
return err
}
}
}
}
func (s *Server) Close(err qtype.TransportError) error {
wg := &sync.WaitGroup{}
frame := NewConnectionCloseFrame(err, "error: experimental")
for _, session := range s.sessions {
wg.Add(1)
go func(sess *Session) {
// this sends connection close frame to peer
sess.Close(frame)
wg.Done()
}(session)
}
wg.Wait()
s.conn.Close()
// close conn
return nil
}
func (s *Server) handlePacket(remoteAddr net.Addr, packet Packet) error {
ph := packet.GetHeader()
srcID, destID := ph.GetConnectionIDPair()
lh, ok := ph.(*LongHeader)
// need to check session existence?
if ok && !s.IsVersionSupported(lh.Version) {
err := s.SendVersionNegotiationPacket(srcID, destID, remoteAddr)
if err != nil {
}
return nil
}
var sess *Session
if len(destID) != 0 {
s.sessionsMutex.Lock()
sess, ok = s.sessions[destID.String()]
s.sessionsMutex.Unlock()
if !ok {
if !s.IsAcceptableSession(lh.Version, srcID, destID, remoteAddr) {
return nil
}
// TODO: have to reset Session when Retry Packet sent to client. then thsi can use DestID for packet maching
sess = NewSession(&Connection{conn: s.conn, remoteAddr: remoteAddr}, destID, srcID, false)
sess.server = s
// packet handler for each session on server is now defined in session.go
sess.packetHandler = sess
// TODO: be careful to use lh
sess.versionDecided = lh.Version
go sess.Run()
// might be deleted after handling packet
s.sessionsMutex.Lock()
s.sessions[destID.String()] = sess
s.addrSessions[remoteAddr.String()] = sess
s.sessionsMutex.Unlock()
}
} else {
sess, ok = s.addrSessions[remoteAddr.String()]
if !ok {
// drop packet if no corresponding connection
return nil
}
}
sess.HandlePacket(packet)
return nil
}
func (s *Server) IsVersionSupported(version qtype.Version) bool {
for _, v := range s.SupportedVersions {
if v == version {
return true
}
}
return false
}
func (s *Server) IsAcceptableSession(version qtype.Version, srcID, destID qtype.ConnectionID, remoteAddr net.Addr) bool {
s.sessionsMutex.Lock()
sessionNum := len(s.sessions)
s.sessionsMutex.Unlock()
if sessionNum >= s.SessionLimitNum {
p := NewHandshakePacket(version, srcID, destID, qtype.InitialPacketNumber,
[]Frame{NewConnectionCloseFrame(qtype.ServerBusy, "The number of session reached server limit")})
wire, err := p.GetWire()
if err != nil {
//
}
_, err = s.conn.WriteTo(wire, remoteAddr)
return false
}
return true
}
func (s *Server) DeleteSessionFromMap(ID qtype.ConnectionID) {
s.sessionsMutex.Lock()
defer s.sessionsMutex.Unlock()
delete(s.sessions, ID.String())
}
func (s *Server) ChangeConnectionID(fromID, toID qtype.ConnectionID) {
s.sessionsMutex.Lock()
defer s.sessionsMutex.Unlock()
session := s.sessions[fromID.String()]
delete(s.sessions, fromID.String())
s.sessions[toID.String()] = session
}
func (s *Server) SendVersionNegotiationPacket(srcID, destID qtype.ConnectionID, remoteAddr net.Addr) error {
p := NewVersionNegotiationPacket(srcID, destID, s.SupportedVersions)
wire, err := p.GetWire()
if err != nil {
//
}
_, err = s.conn.WriteTo(wire, remoteAddr)
return err
}
check long header is availabel
package quiclatest
import (
"net"
"sync"
"github.com/ami-GS/gQUIC/latest/qtype"
)
type Server struct {
conn net.PacketConn
// TODO: replace to SessionStore
sessions map[string]*Session //ConnectionID.String():Session
addrSessions map[string]*Session //remoteAddr.String():Session, for identifing single connection if zero-len dest ID
sessionsMutex *sync.Mutex
SupportedVersions []qtype.Version
// TODO: consider here? or in utility? or decide dynamically?
SessionLimitNum int
NumHandshake int
}
func ListenAddr(addr string) (*Server, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", udpAddr)
if err != nil {
return nil, err
}
s := &Server{
conn: conn,
sessions: make(map[string]*Session),
addrSessions: make(map[string]*Session),
sessionsMutex: new(sync.Mutex),
SupportedVersions: qtype.SupportedVersions,
// TODO: experimental
SessionLimitNum: 1000,
}
go s.Serve()
return s, nil
}
func (s *Server) Serve() error {
buffer := make([]byte, qtype.MTUIPv4)
for {
length, remoteAddr, err := s.conn.ReadFrom(buffer)
if err != nil {
s.conn.Close()
return err
}
packets, _, err := ParsePackets(buffer[:length])
if err != nil {
// TODO: this type assertion is dangerous
_ = s.Close(err.(qtype.TransportError))
return err
}
for _, p := range packets {
err = s.handlePacket(remoteAddr, p)
if err != nil {
s.conn.Close()
return err
}
}
}
}
func (s *Server) Close(err qtype.TransportError) error {
wg := &sync.WaitGroup{}
frame := NewConnectionCloseFrame(err, "error: experimental")
for _, session := range s.sessions {
wg.Add(1)
go func(sess *Session) {
// this sends connection close frame to peer
sess.Close(frame)
wg.Done()
}(session)
}
wg.Wait()
s.conn.Close()
// close conn
return nil
}
func (s *Server) handlePacket(remoteAddr net.Addr, packet Packet) error {
ph := packet.GetHeader()
srcID, destID := ph.GetConnectionIDPair()
lh, longHeaderOK := ph.(*LongHeader)
// need to check session existence?
if longHeaderOK && !s.IsVersionSupported(lh.Version) {
err := s.SendVersionNegotiationPacket(srcID, destID, remoteAddr)
if err != nil {
}
return nil
}
var ok bool
var sess *Session
if len(destID) != 0 {
s.sessionsMutex.Lock()
sess, ok = s.sessions[destID.String()]
s.sessionsMutex.Unlock()
if !ok {
if longHeaderOK && !s.IsAcceptableSession(lh.Version, srcID, destID, remoteAddr) {
return nil
}
// TODO: have to reset Session when Retry Packet sent to client. then thsi can use DestID for packet maching
sess = NewSession(&Connection{conn: s.conn, remoteAddr: remoteAddr}, destID, srcID, false)
sess.server = s
// packet handler for each session on server is now defined in session.go
sess.packetHandler = sess
// TODO: be careful to use lh
if longHeaderOK {
sess.versionDecided = lh.Version
} else {
// error ?
panic("not a long header")
}
go sess.Run()
// might be deleted after handling packet
s.sessionsMutex.Lock()
s.sessions[destID.String()] = sess
s.addrSessions[remoteAddr.String()] = sess
s.sessionsMutex.Unlock()
}
} else {
sess, ok = s.addrSessions[remoteAddr.String()]
if !ok {
// drop packet if no corresponding connection
return nil
}
}
sess.HandlePacket(packet)
return nil
}
func (s *Server) IsVersionSupported(version qtype.Version) bool {
for _, v := range s.SupportedVersions {
if v == version {
return true
}
}
return false
}
func (s *Server) IsAcceptableSession(version qtype.Version, srcID, destID qtype.ConnectionID, remoteAddr net.Addr) bool {
s.sessionsMutex.Lock()
sessionNum := len(s.sessions)
s.sessionsMutex.Unlock()
if sessionNum >= s.SessionLimitNum {
p := NewHandshakePacket(version, srcID, destID, qtype.InitialPacketNumber,
[]Frame{NewConnectionCloseFrame(qtype.ServerBusy, "The number of session reached server limit")})
wire, err := p.GetWire()
if err != nil {
//
}
_, err = s.conn.WriteTo(wire, remoteAddr)
return false
}
return true
}
func (s *Server) DeleteSessionFromMap(ID qtype.ConnectionID) {
s.sessionsMutex.Lock()
defer s.sessionsMutex.Unlock()
delete(s.sessions, ID.String())
}
func (s *Server) ChangeConnectionID(fromID, toID qtype.ConnectionID) {
s.sessionsMutex.Lock()
defer s.sessionsMutex.Unlock()
session := s.sessions[fromID.String()]
delete(s.sessions, fromID.String())
s.sessions[toID.String()] = session
}
func (s *Server) SendVersionNegotiationPacket(srcID, destID qtype.ConnectionID, remoteAddr net.Addr) error {
p := NewVersionNegotiationPacket(srcID, destID, s.SupportedVersions)
wire, err := p.GetWire()
if err != nil {
//
}
_, err = s.conn.WriteTo(wire, remoteAddr)
return err
}
|
package torrent
import (
"github.com/anacrolix/missinggo/iter"
"github.com/anacrolix/missinggo/v2/bitmap"
pp "github.com/anacrolix/torrent/peer_protocol"
)
// Provides default implementations for requestStrategy methods. Could be embedded, or delegated to.
type requestStrategyDefaults struct{}
func (requestStrategyDefaults) hooks() requestStrategyHooks {
return requestStrategyHooks{
sentRequest: func(request) {},
deletedRequest: func(request) {},
}
}
func (requestStrategyDefaults) iterUndirtiedChunks(p requestStrategyPiece, f func(chunkSpec) bool) bool {
chunkIndices := p.dirtyChunks().Copy()
chunkIndices.FlipRange(0, bitmap.BitIndex(p.numChunks()))
return iter.ForPerm(chunkIndices.Len(), func(i int) bool {
ci, err := chunkIndices.RB.Select(uint32(i))
if err != nil {
panic(err)
}
return f(p.chunkIndexRequest(pp.Integer(ci)).chunkSpec)
})
}
func (requestStrategyDefaults) nominalMaxRequests(cn requestStrategyConnection) int {
return int(
max(64,
cn.stats().ChunksReadUseful.Int64()-(cn.stats().ChunksRead.Int64()-cn.stats().ChunksReadUseful.Int64())))
}
func (requestStrategyDefaults) piecePriority(cn requestStrategyConnection, piece pieceIndex, tpp piecePriority, prio int) int {
return prio
}
func (requestStrategyDefaults) shouldRequestWithoutBias(cn requestStrategyConnection) bool {
return false
}
Readability
package torrent
import (
"github.com/anacrolix/missinggo/iter"
"github.com/anacrolix/missinggo/v2/bitmap"
pp "github.com/anacrolix/torrent/peer_protocol"
)
// Provides default implementations for requestStrategy methods. Could be embedded, or delegated to.
type requestStrategyDefaults struct{}
func (requestStrategyDefaults) hooks() requestStrategyHooks {
return requestStrategyHooks{
sentRequest: func(request) {},
deletedRequest: func(request) {},
}
}
func (requestStrategyDefaults) iterUndirtiedChunks(p requestStrategyPiece, f func(chunkSpec) bool) bool {
chunkIndices := p.dirtyChunks().Copy()
chunkIndices.FlipRange(0, bitmap.BitIndex(p.numChunks()))
return iter.ForPerm(chunkIndices.Len(), func(i int) bool {
ci, err := chunkIndices.RB.Select(uint32(i))
if err != nil {
panic(err)
}
return f(p.chunkIndexRequest(pp.Integer(ci)).chunkSpec)
})
}
func (requestStrategyDefaults) nominalMaxRequests(cn requestStrategyConnection) int {
return int(
max(
64,
cn.stats().ChunksReadUseful.Int64()-
(cn.stats().ChunksRead.Int64()-cn.stats().ChunksReadUseful.Int64())))
}
func (requestStrategyDefaults) piecePriority(cn requestStrategyConnection, piece pieceIndex, tpp piecePriority, prio int) int {
return prio
}
func (requestStrategyDefaults) shouldRequestWithoutBias(cn requestStrategyConnection) bool {
return false
}
|
package layout
import (
"fmt"
"github.com/lysrt/bro/css"
"github.com/lysrt/bro/style"
)
type Dimensions struct {
// Position of the content area relative to the document origin:
Content Rect
// Surrounding edges:
padding, Border, margin EdgeSizes
}
// marginBox returns the area covered by the content area plus padding, borders, and margin
func (d Dimensions) marginBox() Rect {
return d.BorderBox().expandedBy(d.margin)
}
// BorderBox returns the area covered by the content area plus padding and borders
func (d Dimensions) BorderBox() Rect {
return d.paddingBox().expandedBy(d.Border)
}
// paddingBox returns the area covered by the content area plus its padding
func (d Dimensions) paddingBox() Rect {
return d.Content.expandedBy(d.padding)
}
type Rect struct {
X, Y, Width, Height float64
}
func (r Rect) String() string {
return fmt.Sprintf("(%.f,%.f : w:%.f, h:%.f)", r.X, r.Y, r.Width, r.Height)
}
func (r Rect) expandedBy(edge EdgeSizes) Rect {
return Rect{
X: r.X - edge.Left,
Y: r.Y - edge.Top,
Width: r.Width + edge.Left + edge.Right,
Height: r.Height + edge.Top + edge.Bottom,
}
}
type EdgeSizes struct {
Left, Right, Top, Bottom float64
}
type BoxType string
const (
BlockNode BoxType = "block"
InlineNode = "inline"
AnonymousBlock = "anon"
)
type LayoutBox struct {
Dimensions Dimensions
BoxType BoxType
StyledNode *style.StyledNode
Children []*LayoutBox
}
func newLayoutBox(boxType BoxType, styledNode *style.StyledNode) *LayoutBox {
var children []*LayoutBox
return &LayoutBox{
BoxType: boxType,
StyledNode: styledNode,
Children: children,
}
}
func GenerateLayoutTree(styleTree *style.StyledNode) *LayoutBox {
var boxType BoxType
switch styleTree.Display() {
case style.Inline:
boxType = InlineNode
case style.Block:
boxType = BlockNode
case style.None:
panic("Root StyledNode has display:none")
}
root := newLayoutBox(boxType, styleTree)
for _, child := range styleTree.Children {
switch child.Display() {
case style.Inline:
ic := root.getInlineContainer()
ic.Children = append(ic.Children, GenerateLayoutTree(child))
case style.Block:
root.Children = append(root.Children, GenerateLayoutTree(child))
case style.None:
// Skip
}
}
return root
}
// If a block node contains an inline child:
func (box *LayoutBox) getInlineContainer() *LayoutBox {
switch box.BoxType {
case InlineNode:
fallthrough
case AnonymousBlock:
return box
case BlockNode:
// If we've just generated an anonymous block box, keep using it.
// Otherwise, create a new one.
if len(box.Children) == 0 {
box.Children = append(box.Children, newLayoutBox(AnonymousBlock, nil))
return box.Children[0]
}
lastChild := box.Children[len(box.Children)-1]
switch lastChild.BoxType {
case AnonymousBlock:
return lastChild
default:
box.Children = append(box.Children, newLayoutBox(AnonymousBlock, nil))
return box.Children[len(box.Children)-1]
}
}
panic("No more cases to switch")
}
func (box *LayoutBox) Layout(containingBlock Dimensions) {
switch box.BoxType {
case InlineNode:
// TODO
panic("Inline Node Unimplemented")
case BlockNode:
box.layoutBlock(containingBlock)
case AnonymousBlock:
// TODO
panic("Anonymous Block Unimplemented")
}
}
func (box *LayoutBox) layoutBlock(containingBlock Dimensions) {
// First go down the LayoutTree to compute the widths from parents' widths
// Then go up the tree to compute heights form children's heights
box.calculateWidth(containingBlock)
box.calculatePosition(containingBlock)
box.layoutBlockChildren()
box.calculateHeight()
}
func (box *LayoutBox) calculateWidth(containingBlock Dimensions) {
style := box.StyledNode
// width has initial value auto
auto := css.Value{Keyword: "auto"}
width, ok := style.Value("width")
if !ok {
width = auto
}
// margin, border, and padding have initial value 0
zero := css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
var marginLeft, marginRight, paddingLeft, paddingRight, borderLeft, borderRight css.Value
if marginLeft, ok = style.Value("margin-left"); !ok {
if marginLeft, ok = style.Value("margin"); !ok {
marginLeft = zero
}
}
if marginRight, ok = style.Value("margin-right"); !ok {
if marginRight, ok = style.Value("margin"); !ok {
marginRight = zero
}
}
if borderLeft, ok = style.Value("border-left-width"); !ok {
if borderLeft, ok = style.Value("border-width"); !ok {
borderLeft = zero
}
}
if borderRight, ok = style.Value("border-right-width"); !ok {
if borderRight, ok = style.Value("border-width"); !ok {
borderRight = zero
}
}
if paddingLeft, ok = style.Value("padding-left"); !ok {
if paddingLeft, ok = style.Value("padding"); !ok {
paddingLeft = zero
}
}
if paddingRight, ok = style.Value("padding-right"); !ok {
if paddingRight, ok = style.Value("padding"); !ok {
paddingRight = zero
}
}
// Formula for block width: https://www.w3.org/TR/CSS2/visudet.html#blockwidth
// Auto must count as zero
total := marginLeft.ToPx() + marginRight.ToPx() + borderLeft.ToPx() + borderRight.ToPx() +
paddingLeft.ToPx() + paddingRight.ToPx() + width.ToPx()
// Checking if the box is too big
// If width is not auto and the total is wider than the container, treat auto margins as 0.
if width != auto && total > containingBlock.Content.Width {
if marginLeft == auto {
marginLeft = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
}
if marginRight == auto {
marginRight = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
}
}
// Check for over or underflow, and adjust "auto" dimensions accordingly
underflow := containingBlock.Content.Width - total
widthAuto := width == auto
marginLeftAuto := marginLeft == auto
marginRightAuto := marginRight == auto
if !widthAuto && !marginLeftAuto && !marginRightAuto {
// If the values are overconstrained, calculate margin_right
marginRight = css.Value{Length: css.Length{Quantity: marginRight.ToPx() + underflow, Unit: css.Px}}
} else if !widthAuto && !marginLeftAuto && marginRightAuto {
// If exactly one size is auto, its used value follows from the equality
marginRight = css.Value{Length: css.Length{Quantity: underflow, Unit: css.Px}}
} else if !widthAuto && marginLeftAuto && !marginRightAuto {
// Idem
marginLeft = css.Value{Length: css.Length{Quantity: underflow, Unit: css.Px}}
} else if widthAuto {
// If width is set to auto, any other auto values become 0
if marginLeft == auto {
marginLeft = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
}
if marginRight == auto {
marginRight = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
}
if underflow >= 0.0 {
// Expand width to fill the underflow
width = css.Value{Length: css.Length{Quantity: underflow, Unit: css.Px}}
} else {
// Width can't be negative. Adjust the right margin instead
width = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
marginRight = css.Value{Length: css.Length{Quantity: marginRight.ToPx() + underflow, Unit: css.Px}}
}
} else if !widthAuto && marginLeftAuto && marginRightAuto {
// If margin-left and margin-right are both auto, their used values are equal
marginLeft = css.Value{Length: css.Length{Quantity: underflow / 2.0, Unit: css.Px}}
marginRight = css.Value{Length: css.Length{Quantity: underflow / 2.0, Unit: css.Px}}
}
box.Dimensions.Content.Width = width.ToPx()
box.Dimensions.padding.Left = paddingLeft.ToPx()
box.Dimensions.padding.Right = paddingRight.ToPx()
box.Dimensions.Border.Left = borderLeft.ToPx()
box.Dimensions.Border.Right = borderRight.ToPx()
box.Dimensions.margin.Left = marginLeft.ToPx()
box.Dimensions.margin.Right = marginRight.ToPx()
}
func (box *LayoutBox) calculatePosition(containingBlock Dimensions) {
style := box.StyledNode
// margin, border, and padding have initial value 0
zero := css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
var marginTop, marginBottom, borderTop, borderBottom, paddingTop, paddingBottom css.Value
var ok bool
if marginTop, ok = style.Value("margin-top"); !ok {
if marginTop, ok = style.Value("margin"); !ok {
marginTop = zero
}
}
if marginBottom, ok = style.Value("margin-bottom"); !ok {
if marginBottom, ok = style.Value("margin"); !ok {
marginBottom = zero
}
}
if borderTop, ok = style.Value("border-top-width"); !ok {
if borderTop, ok = style.Value("border-width"); !ok {
borderTop = zero
}
}
if borderBottom, ok = style.Value("border-bottom-width"); !ok {
if borderBottom, ok = style.Value("border-width"); !ok {
borderBottom = zero
}
}
if paddingTop, ok = style.Value("padding-top"); !ok {
if paddingTop, ok = style.Value("padding"); !ok {
paddingTop = zero
}
}
if paddingBottom, ok = style.Value("padding-bottom"); !ok {
if paddingBottom, ok = style.Value("padding"); !ok {
paddingBottom = zero
}
}
box.Dimensions.margin.Top = marginTop.ToPx()
box.Dimensions.margin.Bottom = marginBottom.ToPx()
box.Dimensions.Border.Top = borderTop.ToPx()
box.Dimensions.Border.Bottom = borderBottom.ToPx()
box.Dimensions.padding.Top = paddingTop.ToPx()
box.Dimensions.padding.Bottom = paddingBottom.ToPx()
box.Dimensions.Content.X = containingBlock.Content.X +
box.Dimensions.margin.Left + box.Dimensions.Border.Left + box.Dimensions.padding.Left
// Position the box below all the previous boxes in the container.
// Making sure the block is below content.height to stack components in the box
box.Dimensions.Content.Y = containingBlock.Content.Height + containingBlock.Content.Y +
box.Dimensions.margin.Top + box.Dimensions.Border.Top + box.Dimensions.padding.Top
}
func (box *LayoutBox) layoutBlockChildren() {
for _, child := range box.Children {
child.Layout(box.Dimensions)
// Track the height so each child is laid out below the previous content
box.Dimensions.Content.Height = box.Dimensions.Content.Height + child.Dimensions.marginBox().Height
}
}
func (box *LayoutBox) calculateHeight() {
// If the height is set to an explicit length, use that exact length
// Otherwise, just keep the value set by layoutBlockChildren()
if height, ok := box.StyledNode.Value("height"); ok {
if height.Length.Unit == css.Px {
box.Dimensions.Content.Height = height.Length.Quantity
}
}
}
Fix BoxType type
package layout
import (
"fmt"
"github.com/lysrt/bro/css"
"github.com/lysrt/bro/style"
)
// LayoutBox is the building block of the layout tree, associated to one StyleNode
type LayoutBox struct {
// Dimensions of the box
Dimensions Dimensions
// Type of the box
BoxType BoxType
StyledNode *style.StyledNode
Children []*LayoutBox
}
type BoxType int
const (
BlockNode BoxType = iota
InlineNode
AnonymousBlock
)
type Dimensions struct {
// Position of the content area relative to the document origin:
Content Rect
// Surrounding edges:
padding, Border, margin EdgeSizes
}
// marginBox returns the area covered by the content area plus padding, borders, and margin
func (d Dimensions) marginBox() Rect {
return d.BorderBox().expandedBy(d.margin)
}
// BorderBox returns the area covered by the content area plus padding and borders
func (d Dimensions) BorderBox() Rect {
return d.paddingBox().expandedBy(d.Border)
}
// paddingBox returns the area covered by the content area plus its padding
func (d Dimensions) paddingBox() Rect {
return d.Content.expandedBy(d.padding)
}
type Rect struct {
X, Y, Width, Height float64
}
func (r Rect) String() string {
return fmt.Sprintf("(%.f,%.f : w:%.f, h:%.f)", r.X, r.Y, r.Width, r.Height)
}
func (r Rect) expandedBy(edge EdgeSizes) Rect {
return Rect{
X: r.X - edge.Left,
Y: r.Y - edge.Top,
Width: r.Width + edge.Left + edge.Right,
Height: r.Height + edge.Top + edge.Bottom,
}
}
type EdgeSizes struct {
Left, Right, Top, Bottom float64
}
func newLayoutBox(boxType BoxType, styledNode *style.StyledNode) *LayoutBox {
var children []*LayoutBox
return &LayoutBox{
BoxType: boxType,
StyledNode: styledNode,
Children: children,
}
}
func GenerateLayoutTree(styleTree *style.StyledNode) *LayoutBox {
var boxType BoxType
switch styleTree.Display() {
case style.Inline:
boxType = InlineNode
case style.Block:
boxType = BlockNode
case style.None:
panic("Root StyledNode has display:none")
}
root := newLayoutBox(boxType, styleTree)
for _, child := range styleTree.Children {
switch child.Display() {
case style.Inline:
ic := root.getInlineContainer()
ic.Children = append(ic.Children, GenerateLayoutTree(child))
case style.Block:
root.Children = append(root.Children, GenerateLayoutTree(child))
case style.None:
// Skip
}
}
return root
}
// If a block node contains an inline child:
func (box *LayoutBox) getInlineContainer() *LayoutBox {
switch box.BoxType {
case InlineNode:
fallthrough
case AnonymousBlock:
return box
case BlockNode:
// If we've just generated an anonymous block box, keep using it.
// Otherwise, create a new one.
if len(box.Children) == 0 {
box.Children = append(box.Children, newLayoutBox(AnonymousBlock, nil))
return box.Children[0]
}
lastChild := box.Children[len(box.Children)-1]
switch lastChild.BoxType {
case AnonymousBlock:
return lastChild
default:
box.Children = append(box.Children, newLayoutBox(AnonymousBlock, nil))
return box.Children[len(box.Children)-1]
}
}
panic("No more cases to switch")
}
func (box *LayoutBox) Layout(containingBlock Dimensions) {
switch box.BoxType {
case InlineNode:
// TODO
panic("Inline Node Unimplemented")
case BlockNode:
box.layoutBlock(containingBlock)
case AnonymousBlock:
// TODO
panic("Anonymous Block Unimplemented")
}
}
func (box *LayoutBox) layoutBlock(containingBlock Dimensions) {
// First go down the LayoutTree to compute the widths from parents' widths
// Then go up the tree to compute heights form children's heights
box.calculateWidth(containingBlock)
box.calculatePosition(containingBlock)
box.layoutBlockChildren()
box.calculateHeight()
}
func (box *LayoutBox) calculateWidth(containingBlock Dimensions) {
style := box.StyledNode
// width has initial value auto
auto := css.Value{Keyword: "auto"}
width, ok := style.Value("width")
if !ok {
width = auto
}
// margin, border, and padding have initial value 0
zero := css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
var marginLeft, marginRight, paddingLeft, paddingRight, borderLeft, borderRight css.Value
if marginLeft, ok = style.Value("margin-left"); !ok {
if marginLeft, ok = style.Value("margin"); !ok {
marginLeft = zero
}
}
if marginRight, ok = style.Value("margin-right"); !ok {
if marginRight, ok = style.Value("margin"); !ok {
marginRight = zero
}
}
if borderLeft, ok = style.Value("border-left-width"); !ok {
if borderLeft, ok = style.Value("border-width"); !ok {
borderLeft = zero
}
}
if borderRight, ok = style.Value("border-right-width"); !ok {
if borderRight, ok = style.Value("border-width"); !ok {
borderRight = zero
}
}
if paddingLeft, ok = style.Value("padding-left"); !ok {
if paddingLeft, ok = style.Value("padding"); !ok {
paddingLeft = zero
}
}
if paddingRight, ok = style.Value("padding-right"); !ok {
if paddingRight, ok = style.Value("padding"); !ok {
paddingRight = zero
}
}
// Formula for block width: https://www.w3.org/TR/CSS2/visudet.html#blockwidth
// Auto must count as zero
total := marginLeft.ToPx() + marginRight.ToPx() + borderLeft.ToPx() + borderRight.ToPx() +
paddingLeft.ToPx() + paddingRight.ToPx() + width.ToPx()
// Checking if the box is too big
// If width is not auto and the total is wider than the container, treat auto margins as 0.
if width != auto && total > containingBlock.Content.Width {
if marginLeft == auto {
marginLeft = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
}
if marginRight == auto {
marginRight = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
}
}
// Check for over or underflow, and adjust "auto" dimensions accordingly
underflow := containingBlock.Content.Width - total
widthAuto := width == auto
marginLeftAuto := marginLeft == auto
marginRightAuto := marginRight == auto
if !widthAuto && !marginLeftAuto && !marginRightAuto {
// If the values are overconstrained, calculate margin_right
marginRight = css.Value{Length: css.Length{Quantity: marginRight.ToPx() + underflow, Unit: css.Px}}
} else if !widthAuto && !marginLeftAuto && marginRightAuto {
// If exactly one size is auto, its used value follows from the equality
marginRight = css.Value{Length: css.Length{Quantity: underflow, Unit: css.Px}}
} else if !widthAuto && marginLeftAuto && !marginRightAuto {
// Idem
marginLeft = css.Value{Length: css.Length{Quantity: underflow, Unit: css.Px}}
} else if widthAuto {
// If width is set to auto, any other auto values become 0
if marginLeft == auto {
marginLeft = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
}
if marginRight == auto {
marginRight = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
}
if underflow >= 0.0 {
// Expand width to fill the underflow
width = css.Value{Length: css.Length{Quantity: underflow, Unit: css.Px}}
} else {
// Width can't be negative. Adjust the right margin instead
width = css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
marginRight = css.Value{Length: css.Length{Quantity: marginRight.ToPx() + underflow, Unit: css.Px}}
}
} else if !widthAuto && marginLeftAuto && marginRightAuto {
// If margin-left and margin-right are both auto, their used values are equal
marginLeft = css.Value{Length: css.Length{Quantity: underflow / 2.0, Unit: css.Px}}
marginRight = css.Value{Length: css.Length{Quantity: underflow / 2.0, Unit: css.Px}}
}
box.Dimensions.Content.Width = width.ToPx()
box.Dimensions.padding.Left = paddingLeft.ToPx()
box.Dimensions.padding.Right = paddingRight.ToPx()
box.Dimensions.Border.Left = borderLeft.ToPx()
box.Dimensions.Border.Right = borderRight.ToPx()
box.Dimensions.margin.Left = marginLeft.ToPx()
box.Dimensions.margin.Right = marginRight.ToPx()
}
func (box *LayoutBox) calculatePosition(containingBlock Dimensions) {
style := box.StyledNode
// margin, border, and padding have initial value 0
zero := css.Value{Length: css.Length{Quantity: 0.0, Unit: css.Px}}
var marginTop, marginBottom, borderTop, borderBottom, paddingTop, paddingBottom css.Value
var ok bool
if marginTop, ok = style.Value("margin-top"); !ok {
if marginTop, ok = style.Value("margin"); !ok {
marginTop = zero
}
}
if marginBottom, ok = style.Value("margin-bottom"); !ok {
if marginBottom, ok = style.Value("margin"); !ok {
marginBottom = zero
}
}
if borderTop, ok = style.Value("border-top-width"); !ok {
if borderTop, ok = style.Value("border-width"); !ok {
borderTop = zero
}
}
if borderBottom, ok = style.Value("border-bottom-width"); !ok {
if borderBottom, ok = style.Value("border-width"); !ok {
borderBottom = zero
}
}
if paddingTop, ok = style.Value("padding-top"); !ok {
if paddingTop, ok = style.Value("padding"); !ok {
paddingTop = zero
}
}
if paddingBottom, ok = style.Value("padding-bottom"); !ok {
if paddingBottom, ok = style.Value("padding"); !ok {
paddingBottom = zero
}
}
box.Dimensions.margin.Top = marginTop.ToPx()
box.Dimensions.margin.Bottom = marginBottom.ToPx()
box.Dimensions.Border.Top = borderTop.ToPx()
box.Dimensions.Border.Bottom = borderBottom.ToPx()
box.Dimensions.padding.Top = paddingTop.ToPx()
box.Dimensions.padding.Bottom = paddingBottom.ToPx()
box.Dimensions.Content.X = containingBlock.Content.X +
box.Dimensions.margin.Left + box.Dimensions.Border.Left + box.Dimensions.padding.Left
// Position the box below all the previous boxes in the container.
// Making sure the block is below content.height to stack components in the box
box.Dimensions.Content.Y = containingBlock.Content.Height + containingBlock.Content.Y +
box.Dimensions.margin.Top + box.Dimensions.Border.Top + box.Dimensions.padding.Top
}
func (box *LayoutBox) layoutBlockChildren() {
for _, child := range box.Children {
child.Layout(box.Dimensions)
// Track the height so each child is laid out below the previous content
box.Dimensions.Content.Height = box.Dimensions.Content.Height + child.Dimensions.marginBox().Height
}
}
func (box *LayoutBox) calculateHeight() {
// If the height is set to an explicit length, use that exact length
// Otherwise, just keep the value set by layoutBlockChildren()
if height, ok := box.StyledNode.Value("height"); ok {
if height.Length.Unit == css.Px {
box.Dimensions.Content.Height = height.Length.Quantity
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.