CombinedText stringlengths 4 3.42M |
|---|
package task
import (
"fmt"
"neon/build"
"neon/util"
"sync"
)
func init() {
build.TaskMap["threads"] = build.TaskDescriptor{
Constructor: Threads,
Help: `Run steps in threads.
Arguments:
- threads: the number of threads to run. You can set it to _NCPU for the number
of CPUs.
- data: a list filled with values to pass to threads in _data property.
- steps: the steps to run in threads.
Note:
This task sets two properties :
- _data with the data for each thread.
- _thread with the thread number (starting with 0)
Examples:
# compute squares of 10 first integers in threads
- threads: _NCPU
data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
steps:
- 'square = _data * _data'
- print: '#{_data}^2 = #{square}'`,
}
}
func Threads(target *build.Target, args util.Object) (build.Task, error) {
fields := []string{"threads", "data", "steps"}
if err := CheckFields(args, fields, fields); err != nil {
return nil, err
}
var threads int
var threadExpression string
threads, err := args.GetInteger("threads")
if err != nil {
threadExpression, err = args.GetString("threads")
if err != nil {
return nil, fmt.Errorf("'threads' field must be an integer or an expression")
}
}
var data []interface{}
var dataExpression string
data, err = args.GetList("data")
if err != nil {
dataExpression, err = args.GetString("data")
if err != nil {
return nil, fmt.Errorf("'data' field of 'threads' must be a list or an expression returning a list")
}
}
steps, err := ParseSteps(target, args, "steps")
if err != nil {
return nil, err
}
return func(context *build.Context) error {
if data == nil {
_result, _err := context.EvaluateExpression(dataExpression)
if err != nil {
return fmt.Errorf("evaluating 'data' field: %v", _err)
}
var _ok bool
data, _ok = _result.([]interface{})
if !_ok {
return fmt.Errorf("expression in 'data' field must return a list")
}
}
_data := make(chan interface{}, len(data))
for _, _d := range data {
_data <- _d
}
if threadExpression != "" {
_threads, _err := context.EvaluateExpression(threadExpression)
if _err != nil {
return fmt.Errorf("evaluating 'threads' field: %v", _err)
}
switch _t := _threads.(type) {
case int:
threads = _t
case int64:
threads = int(_t)
default:
return fmt.Errorf("'threads' field must result as an integer")
}
}
_error := make(chan error, threads)
var _waitGroup sync.WaitGroup
_waitGroup.Add(threads)
context.Message("Starting %d threads", threads)
for _i := 0; _i < threads; _i++ {
go RunThread(steps, context, _i, _data, &_waitGroup, _error)
}
_waitGroup.Wait()
select {
case e, ok := <-_error:
if ok {
return e
} else {
return nil
}
default:
return nil
}
}, nil
}
func RunThread(steps []build.Step, ctx *build.Context, index int, data chan interface{}, wg *sync.WaitGroup, errors chan error) {
ctx.Message("Thread %d started", index)
defer ctx.Message("Thread %d done", index)
defer wg.Done()
for {
select {
case datum, ok := <-data:
if ok {
threadContext := ctx.NewThreadContext(index, datum)
threadContext.Message("run with '%v'", datum)
err := threadContext.Run(steps)
if err != nil {
errors <- err
return
}
} else {
return
}
default:
return
}
}
}
Fixed thread message
package task
import (
"fmt"
"neon/build"
"neon/util"
"sync"
)
func init() {
build.TaskMap["threads"] = build.TaskDescriptor{
Constructor: Threads,
Help: `Run steps in threads.
Arguments:
- threads: the number of threads to run. You can set it to _NCPU for the number
of CPUs.
- data: a list filled with values to pass to threads in _data property.
- steps: the steps to run in threads.
Note:
This task sets two properties :
- _data with the data for each thread.
- _thread with the thread number (starting with 0)
Examples:
# compute squares of 10 first integers in threads
- threads: _NCPU
data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
steps:
- 'square = _data * _data'
- print: '#{_data}^2 = #{square}'`,
}
}
func Threads(target *build.Target, args util.Object) (build.Task, error) {
fields := []string{"threads", "data", "steps"}
if err := CheckFields(args, fields, fields); err != nil {
return nil, err
}
var threads int
var threadExpression string
threads, err := args.GetInteger("threads")
if err != nil {
threadExpression, err = args.GetString("threads")
if err != nil {
return nil, fmt.Errorf("'threads' field must be an integer or an expression")
}
}
var data []interface{}
var dataExpression string
data, err = args.GetList("data")
if err != nil {
dataExpression, err = args.GetString("data")
if err != nil {
return nil, fmt.Errorf("'data' field of 'threads' must be a list or an expression returning a list")
}
}
steps, err := ParseSteps(target, args, "steps")
if err != nil {
return nil, err
}
return func(context *build.Context) error {
if data == nil {
_result, _err := context.EvaluateExpression(dataExpression)
if err != nil {
return fmt.Errorf("evaluating 'data' field: %v", _err)
}
var _ok bool
data, _ok = _result.([]interface{})
if !_ok {
return fmt.Errorf("expression in 'data' field must return a list")
}
}
_data := make(chan interface{}, len(data))
for _, _d := range data {
_data <- _d
}
if threadExpression != "" {
_threads, _err := context.EvaluateExpression(threadExpression)
if _err != nil {
return fmt.Errorf("evaluating 'threads' field: %v", _err)
}
switch _t := _threads.(type) {
case int:
threads = _t
case int64:
threads = int(_t)
default:
return fmt.Errorf("'threads' field must result as an integer")
}
}
_error := make(chan error, threads)
var _waitGroup sync.WaitGroup
_waitGroup.Add(threads)
context.Message("Starting %d threads", threads)
for _i := 0; _i < threads; _i++ {
go RunThread(steps, context, _i, _data, &_waitGroup, _error)
}
_waitGroup.Wait()
select {
case e, ok := <-_error:
if ok {
return e
} else {
return nil
}
default:
return nil
}
}, nil
}
func RunThread(steps []build.Step, ctx *build.Context, index int, data chan interface{}, wg *sync.WaitGroup, errors chan error) {
ctx.Message("Thread %d started", index)
defer ctx.Message("Thread %d done", index)
defer wg.Done()
for {
select {
case datum, ok := <-data:
if ok {
threadContext := ctx.NewThreadContext(index, datum)
threadContext.Message("Thread iteration with data '%v'", datum)
err := threadContext.Run(steps)
if err != nil {
errors <- err
return
}
} else {
return
}
default:
return
}
}
}
|
package spickspan
import (
"github.com/essentier/spickspan/config"
"github.com/essentier/spickspan/model"
"github.com/essentier/spickspan/probe"
"github.com/essentier/spickspan/provider/kube"
"github.com/essentier/spickspan/provider/local"
"github.com/essentier/spickspan/provider/nomock"
"github.com/essentier/spickspan/servicebuilder"
"github.com/go-errors/errors"
)
func GetHttpService(provider model.Provider, serviceName string, readinessPath string) (model.Service, error) {
service, err := provider.GetService(serviceName)
if err != nil {
return service, err
}
serviceReady := probe.ProbeHttpService(service, readinessPath)
if serviceReady {
return service, nil
} else {
return service, errors.Errorf("Service is not ready yet. The service is %v", service)
}
}
func BuildAllInConfig(config config.Model) error {
return servicebuilder.BuildAllInConfig(config)
}
func GetNomockProvider(config config.Model) (model.Provider, error) {
provider := nomock.CreateProvider(config)
err := provider.Init()
return provider, err
}
func GetDefaultKubeRegistry(config config.Model) (*model.ProviderRegistry, error) {
registry := &model.ProviderRegistry{}
registry.RegisterProvider(nomock.CreateProvider(config))
registry.RegisterProvider(kube.CreateProvider(config))
registry.RegisterProvider(local.CreateProvider(config))
return registry, nil
}
add GetDefaultServiceProvider and GetMongoDBService functions
package spickspan
import (
"github.com/essentier/spickspan"
"github.com/essentier/spickspan/config"
"github.com/essentier/spickspan/model"
"github.com/essentier/spickspan/probe"
"github.com/essentier/spickspan/provider/kube"
"github.com/essentier/spickspan/provider/local"
"github.com/essentier/spickspan/provider/nomock"
"github.com/essentier/spickspan/servicebuilder"
"github.com/go-errors/errors"
)
func GetHttpService(provider model.Provider, serviceName string, readinessPath string) (model.Service, error) {
service, err := provider.GetService(serviceName)
if err != nil {
return service, err
}
serviceReady := probe.ProbeHttpService(service, readinessPath)
if serviceReady {
return service, nil
} else {
return service, errors.Errorf("Service is not ready yet. The service is %v", service)
}
}
func GetDefaultServiceProvider() (model.Provider, error) {
config, err := config.GetConfig()
if err != nil {
return nil, err
}
registry, err := spickspan.GetDefaultKubeRegistry(config)
if err != nil {
return nil, err
}
return registry.ResolveProvider()
}
func GetMongoDBService(provider model.Provider, serviceName string) (model.Service, error) {
mgoService, err := provider.GetService(serviceName)
if err != nil {
return mgoService, err
}
serviceReady := probe.ProbeMgoService(mgoService)
if serviceReady {
return mgoService, nil
} else {
return mgoService, errors.Errorf("Service is not ready yet. The service is %v", mgoService)
}
}
func BuildAllInConfig(config config.Model) error {
return servicebuilder.BuildAllInConfig(config)
}
func GetNomockProvider(config config.Model) (model.Provider, error) {
provider := nomock.CreateProvider(config)
err := provider.Init()
return provider, err
}
func GetDefaultKubeRegistry(config config.Model) (*model.ProviderRegistry, error) {
registry := &model.ProviderRegistry{}
registry.RegisterProvider(nomock.CreateProvider(config))
registry.RegisterProvider(kube.CreateProvider(config))
registry.RegisterProvider(local.CreateProvider(config))
return registry, nil
}
|
package p2p
import (
"bytes"
"fmt"
"runtime"
"testing"
)
func TestClientIdentity(t *testing.T) {
clientIdentity := NewSimpleClientIdentity("Ethereum(G)", "0.5.16", "test", []byte("privkey"), []byte("pubkey"))
key := clientIdentity.Privkey()
if !bytes.Equal(key, []byte("privkey")) {
t.Errorf("Expected Privkey to be %x, got %x", key, []byte("privkey"))
}
key = clientIdentity.Pubkey()
if !bytes.Equal(key, []byte("pubkey")) {
t.Errorf("Expected Pubkey to be %x, got %x", key, []byte("pubkey"))
}
clientString := clientIdentity.String()
expected := fmt.Sprintf("Ethereum(G)/v0.5.16/test/%s/%s", runtime.GOOS, runtime.Version())
if clientString != expected {
t.Errorf("Expected clientIdentity to be %v, got %v", expected, clientString)
}
customIdentifier := clientIdentity.GetCustomIdentifier()
if customIdentifier != "test" {
t.Errorf("Expected clientIdentity.GetCustomIdentifier() to be 'test', got %v", customIdentifier)
}
clientIdentity.SetCustomIdentifier("test2")
customIdentifier = clientIdentity.GetCustomIdentifier()
if customIdentifier != "test2" {
t.Errorf("Expected clientIdentity.GetCustomIdentifier() to be 'test2', got %v", customIdentifier)
}
clientString = clientIdentity.String()
expected = fmt.Sprintf("Ethereum(G)/v0.5.16/test2/%s/%s", runtime.GOOS, runtime.Version())
if clientString != expected {
t.Errorf("Expected clientIdentity to be %v, got %v", expected, clientString)
}
}
fix clientidentity test after privkey removed
package p2p
import (
"bytes"
"fmt"
"runtime"
"testing"
)
func TestClientIdentity(t *testing.T) {
clientIdentity := NewSimpleClientIdentity("Ethereum(G)", "0.5.16", "test", []byte("pubkey"))
key := clientIdentity.Pubkey()
if !bytes.Equal(key, []byte("pubkey")) {
t.Errorf("Expected Pubkey to be %x, got %x", key, []byte("pubkey"))
}
clientString := clientIdentity.String()
expected := fmt.Sprintf("Ethereum(G)/v0.5.16/test/%s/%s", runtime.GOOS, runtime.Version())
if clientString != expected {
t.Errorf("Expected clientIdentity to be %v, got %v", expected, clientString)
}
customIdentifier := clientIdentity.GetCustomIdentifier()
if customIdentifier != "test" {
t.Errorf("Expected clientIdentity.GetCustomIdentifier() to be 'test', got %v", customIdentifier)
}
clientIdentity.SetCustomIdentifier("test2")
customIdentifier = clientIdentity.GetCustomIdentifier()
if customIdentifier != "test2" {
t.Errorf("Expected clientIdentity.GetCustomIdentifier() to be 'test2', got %v", customIdentifier)
}
clientString = clientIdentity.String()
expected = fmt.Sprintf("Ethereum(G)/v0.5.16/test2/%s/%s", runtime.GOOS, runtime.Version())
if clientString != expected {
t.Errorf("Expected clientIdentity to be %v, got %v", expected, clientString)
}
}
|
// Copyright 2014 The Serviced Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import (
"crypto/tls"
"fmt"
"log"
"mime"
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/control-center/serviced/coordinator/client"
"github.com/control-center/serviced/node"
"github.com/control-center/serviced/proxy"
"github.com/control-center/serviced/rpc/master"
"github.com/control-center/serviced/utils"
"github.com/control-center/serviced/zzk"
"github.com/control-center/serviced/zzk/registry"
"github.com/control-center/serviced/zzk/service"
"github.com/gorilla/mux"
"github.com/zenoss/glog"
"github.com/zenoss/go-json-rest"
)
// ServiceConfig is the ui/rest handler for control center
type ServiceConfig struct {
bindPort string
agentPort string
stats bool
hostaliases []string
muxTLS bool
muxPort int
certPEMFile string
keyPEMFile string
localAddrs map[string]struct{}
}
var defaultHostAlias string
// NewServiceConfig creates a new ServiceConfig
func NewServiceConfig(bindPort string, agentPort string, stats bool, hostaliases []string, muxTLS bool, muxPort int, aGroup string, certPEMFile string, keyPEMFile string) *ServiceConfig {
cfg := ServiceConfig{
bindPort: bindPort,
agentPort: agentPort,
stats: stats,
hostaliases: hostaliases,
muxTLS: muxTLS,
muxPort: muxPort,
certPEMFile: certPEMFile,
keyPEMFile: keyPEMFile,
}
hostAddrs, err := utils.GetIPv4Addresses()
if err != nil {
glog.Fatal(err)
}
cfg.localAddrs = make(map[string]struct{})
for _, host := range hostAddrs {
cfg.localAddrs[host] = struct{}{}
}
adminGroup = aGroup
return &cfg
}
// getPublicEndpointServices returns the list of services for a given
// public endpoint/type.
func getPublicEndpointServices(name string, Type registry.PublicEndpointType) (map[string]struct{}, bool, registry.PublicEndpointKey) {
allvhostsLock.RLock()
defer allvhostsLock.RUnlock()
key := registry.GetPublicEndpointKey(name, Type)
svcs, found := allvhosts[key]
return svcs, found, key
}
// getVHostServices returns the list of services for a given vhost (public endpoint)
func getVHostServices(vhostname string) (map[string]struct{}, bool, registry.PublicEndpointKey) {
return getPublicEndpointServices(vhostname, registry.EPTypeVHost)
}
// getPortServices returns the list of services for a given port (public endpoint)
func getPortServices(port uint8) (map[string]struct{}, bool, registry.PublicEndpointKey) {
return getPublicEndpointServices(fmt.Sprintf("%d", port), registry.EPTypePort)
}
// Serve handles control center web UI requests and virtual host requests for zenoss web based services.
// The UI server actually listens on port 7878, the uihandler defined here just reverse proxies to it.
// Virtual host routing to zenoss web based services is done by the publicendpointhandler function.
func (sc *ServiceConfig) Serve(shutdown <-chan (interface{})) {
glog.V(1).Infof("starting vhost synching")
//start getting vhost endpoints
go sc.syncPublicEndpoints(shutdown)
//start watching global vhosts as they are added/deleted/updated in services
go sc.syncAllVhosts(shutdown)
mime.AddExtensionType(".json", "application/json")
mime.AddExtensionType(".woff", "application/font-woff")
accessLogFile, err := os.OpenFile("/var/log/serviced.access.log", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
if err != nil {
glog.Errorf("Could not create access log file.")
}
uiHandler := rest.ResourceHandler{
EnableRelaxedContentType: true,
Logger: log.New(accessLogFile, "", log.LstdFlags),
}
routes := sc.getRoutes()
uiHandler.SetRoutes(routes...)
httphandler := func(w http.ResponseWriter, r *http.Request) {
glog.V(2).Infof("httphandler handling request: %+v", r)
httphost := strings.Split(r.Host, ":")[0]
parts := strings.Split(httphost, ".")
subdomain := parts[0]
glog.V(2).Infof("httphost: '%s' subdomain: '%s'", httphost, subdomain)
if svcIDs, found, registrykey := getVHostServices(httphost); found {
glog.V(2).Infof("httphost: calling sc.publicendpointhandler")
sc.publicendpointhandler(w, r, registrykey, svcIDs)
} else if svcIDs, found, registrykey := getVHostServices(subdomain); found {
glog.V(2).Infof("httphost: calling sc.publicendpointhandler")
sc.publicendpointhandler(w, r, registrykey, svcIDs)
} else {
glog.V(2).Infof("httphost: calling uiHandler")
if r.TLS == nil {
http.Redirect(w, r, fmt.Sprintf("https://%s:%s", r.Host, strings.Split(sc.bindPort, ":")[1]), http.StatusMovedPermanently)
return
}
uiHandler.ServeHTTP(w, r)
}
}
r := mux.NewRouter()
if hnm, err := os.Hostname(); err == nil {
sc.hostaliases = append(sc.hostaliases, hnm)
}
cmd := exec.Command("hostname", "--fqdn")
if hnm, err := cmd.CombinedOutput(); err == nil {
sc.hostaliases = append(sc.hostaliases, string(hnm[:len(hnm)-1]))
}
defaultHostAlias = sc.hostaliases[0]
r.HandleFunc("/", httphandler)
r.HandleFunc("/{path:.*}", httphandler)
http.Handle("/", r)
// FIXME: bubble up these errors to the caller
certFile := sc.certPEMFile
if len(certFile) == 0 {
tempCertFile, err := proxy.TempCertFile()
if err != nil {
glog.Fatalf("Could not prepare cert.pem file: %s", err)
}
certFile = tempCertFile
}
keyFile := sc.keyPEMFile
if len(keyFile) == 0 {
tempKeyFile, err := proxy.TempKeyFile()
if err != nil {
glog.Fatalf("Could not prepare key.pem file: %s", err)
}
keyFile = tempKeyFile
}
go func() {
redirect := func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, fmt.Sprintf("https://%s:%s%s", req.Host, strings.Split(sc.bindPort, ":")[1], req.URL), http.StatusMovedPermanently)
}
err := http.ListenAndServe(":80", http.HandlerFunc(redirect))
if err != nil {
glog.Errorf("could not setup HTTP webserver: %s", err)
}
}()
go func() {
// This cipher suites and tls min version change may not be needed with golang 1.5
// https://github.com/golang/go/issues/10094
// https://github.com/golang/go/issues/9364
config := &tls.Config{
MinVersion: utils.MinTLS(),
PreferServerCipherSuites: true,
CipherSuites: utils.CipherSuites(),
}
server := &http.Server{Addr: sc.bindPort, TLSConfig: config}
err := server.ListenAndServeTLS(certFile, keyFile)
if err != nil {
glog.Fatalf("could not setup HTTPS webserver: %s", err)
}
}()
blockerChan := make(chan bool)
<-blockerChan
}
var methods = []string{"GET", "POST", "PUT", "DELETE"}
func routeToInternalServiceProxy(path string, target string, requiresAuth bool, routes []rest.Route) []rest.Route {
targetURL, err := url.Parse(target)
if err != nil {
glog.Errorf("Unable to parse proxy target URL: %s", target)
return routes
}
// Wrap the normal http.Handler in a rest.handlerFunc
handlerFunc := func(w *rest.ResponseWriter, r *rest.Request) {
// All proxied requests should be authenticated first
if requiresAuth && !loginOK(r) {
restUnauthorized(w)
return
}
proxy := node.NewReverseProxy(path, targetURL)
proxy.ServeHTTP(w.ResponseWriter, r.Request)
}
// Add on a glob to match subpaths
andsubpath := path + "*x"
for _, method := range methods {
routes = append(routes, rest.Route{method, path, handlerFunc})
routes = append(routes, rest.Route{method, andsubpath, handlerFunc})
}
return routes
}
func (sc *ServiceConfig) unAuthorizedClient(realfunc handlerClientFunc) handlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
client, err := sc.getClient()
if err != nil {
glog.Errorf("Unable to acquire client: %v", err)
restServerError(w, err)
return
}
defer client.Close()
realfunc(w, r, client)
}
}
func (sc *ServiceConfig) authorizedClient(realfunc handlerClientFunc) handlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
if !loginOK(r) {
restUnauthorized(w)
return
}
client, err := sc.getClient()
if err != nil {
glog.Errorf("Unable to acquire client: %v", err)
restServerError(w, err)
return
}
defer client.Close()
realfunc(w, r, client)
}
}
func (sc *ServiceConfig) isCollectingStats() handlerFunc {
if sc.stats {
return func(w *rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusOK)
}
}
return func(w *rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusNotImplemented)
}
}
func (sc *ServiceConfig) getClient() (c *node.ControlClient, err error) {
// setup the client
if c, err = node.NewControlClient(sc.agentPort); err != nil {
glog.Errorf("Could not create a control center client: %s", err)
}
return
}
func (sc *ServiceConfig) getMasterClient() (master.ClientInterface, error) {
glog.V(2).Infof("start getMasterClient ... sc.agentPort: %+v", sc.agentPort)
c, err := master.NewClient(sc.agentPort)
if err != nil {
glog.Errorf("Could not create a control center client to %v: %v", sc.agentPort, err)
return nil, err
}
glog.V(2).Info("end getMasterClient")
return c, nil
}
func (sc *ServiceConfig) newRequestHandler(check checkFunc, realfunc ctxhandlerFunc) handlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
if !check(w, r) {
return
}
reqCtx := newRequestContext(sc)
defer reqCtx.end()
realfunc(w, r, reqCtx)
}
}
func (sc *ServiceConfig) checkAuth(realfunc ctxhandlerFunc) handlerFunc {
check := func(w *rest.ResponseWriter, r *rest.Request) bool {
if !loginOK(r) {
restUnauthorized(w)
return false
}
return true
}
return sc.newRequestHandler(check, realfunc)
}
func (sc *ServiceConfig) noAuth(realfunc ctxhandlerFunc) handlerFunc {
check := func(w *rest.ResponseWriter, r *rest.Request) bool {
return true
}
return sc.newRequestHandler(check, realfunc)
}
type requestContext struct {
sc *ServiceConfig
master master.ClientInterface
}
func newRequestContext(sc *ServiceConfig) *requestContext {
return &requestContext{sc: sc}
}
func (ctx *requestContext) getMasterClient() (master.ClientInterface, error) {
if ctx.master == nil {
c, err := ctx.sc.getMasterClient()
if err != nil {
glog.Errorf("Could not create a control center client: %v", err)
return nil, err
}
ctx.master = c
}
return ctx.master, nil
}
func (ctx *requestContext) end() error {
if ctx.master != nil {
return ctx.master.Close()
}
return nil
}
type ctxhandlerFunc func(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext)
type checkFunc func(w *rest.ResponseWriter, r *rest.Request) bool
type getRoutes func(sc *ServiceConfig) []rest.Route
var (
allvhostsLock sync.RWMutex
allvhosts map[registry.PublicEndpointKey]map[string]struct{} // map of PublicEndpointKey to service IDs that have the vhost enabled
)
func init() {
allvhosts = make(map[registry.PublicEndpointKey]map[string]struct{})
}
func (sc *ServiceConfig) syncAllVhosts(shutdown <-chan interface{}) error {
rootConn, err := zzk.GetLocalConnection("/")
if err != nil {
glog.Errorf("syncAllVhosts - Error getting root zk connection: %v", err)
return err
}
cancelChan := make(chan interface{})
syncVhosts := func(conn client.Connection, parentPath string, childIDs ...string) {
glog.V(1).Infof("syncVhosts STARTING for parentPath:%s childIDs:%v", parentPath, childIDs)
newVhosts := make(map[registry.PublicEndpointKey]map[string]struct{})
for _, sv := range childIDs {
//cast to a VHostKey so we don't have to care about the format of the key string
pep := service.PublicEndpointKey(sv)
if pep.Type() == registry.EPTypeVHost {
registryKey := registry.GetPublicEndpointKey(pep.Name(), pep.Type())
vhostServices, found := newVhosts[registryKey]
if !found {
vhostServices = make(map[string]struct{})
newVhosts[registryKey] = vhostServices
}
if pep.IsEnabled() {
vhostServices[pep.ServiceID()] = struct{}{}
}
}
}
//lock for as short a time as possible
allvhostsLock.Lock()
defer allvhostsLock.Unlock()
allvhosts = newVhosts
glog.V(1).Infof("allvhosts: %+v", allvhosts)
}
for {
zkServiceVhost := service.ZKServicePublicEndpoints
select {
case <-shutdown:
close(cancelChan)
return nil
default:
}
glog.V(1).Infof("Running registry.WatchChildren for zookeeper path: %s", zkServiceVhost)
err := registry.WatchChildren(rootConn, zkServiceVhost, cancelChan, syncVhosts, pepWatchError)
if err != nil {
glog.V(1).Infof("Will retry in 10 seconds to WatchChildren(%s) due to error: %v", zkServiceVhost, err)
<-time.After(time.Second * 10)
continue
}
}
}
add comments about uiport validation
// Copyright 2014 The Serviced Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import (
"crypto/tls"
"fmt"
"log"
"mime"
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/control-center/serviced/coordinator/client"
"github.com/control-center/serviced/node"
"github.com/control-center/serviced/proxy"
"github.com/control-center/serviced/rpc/master"
"github.com/control-center/serviced/utils"
"github.com/control-center/serviced/zzk"
"github.com/control-center/serviced/zzk/registry"
"github.com/control-center/serviced/zzk/service"
"github.com/gorilla/mux"
"github.com/zenoss/glog"
"github.com/zenoss/go-json-rest"
)
// ServiceConfig is the ui/rest handler for control center
type ServiceConfig struct {
bindPort string
agentPort string
stats bool
hostaliases []string
muxTLS bool
muxPort int
certPEMFile string
keyPEMFile string
localAddrs map[string]struct{}
}
var defaultHostAlias string
// NewServiceConfig creates a new ServiceConfig
func NewServiceConfig(bindPort string, agentPort string, stats bool, hostaliases []string, muxTLS bool, muxPort int, aGroup string, certPEMFile string, keyPEMFile string) *ServiceConfig {
cfg := ServiceConfig{
bindPort: bindPort,
agentPort: agentPort,
stats: stats,
hostaliases: hostaliases,
muxTLS: muxTLS,
muxPort: muxPort,
certPEMFile: certPEMFile,
keyPEMFile: keyPEMFile,
}
hostAddrs, err := utils.GetIPv4Addresses()
if err != nil {
glog.Fatal(err)
}
cfg.localAddrs = make(map[string]struct{})
for _, host := range hostAddrs {
cfg.localAddrs[host] = struct{}{}
}
adminGroup = aGroup
return &cfg
}
// getPublicEndpointServices returns the list of services for a given
// public endpoint/type.
func getPublicEndpointServices(name string, Type registry.PublicEndpointType) (map[string]struct{}, bool, registry.PublicEndpointKey) {
allvhostsLock.RLock()
defer allvhostsLock.RUnlock()
key := registry.GetPublicEndpointKey(name, Type)
svcs, found := allvhosts[key]
return svcs, found, key
}
// getVHostServices returns the list of services for a given vhost (public endpoint)
func getVHostServices(vhostname string) (map[string]struct{}, bool, registry.PublicEndpointKey) {
return getPublicEndpointServices(vhostname, registry.EPTypeVHost)
}
// getPortServices returns the list of services for a given port (public endpoint)
func getPortServices(port uint8) (map[string]struct{}, bool, registry.PublicEndpointKey) {
return getPublicEndpointServices(fmt.Sprintf("%d", port), registry.EPTypePort)
}
// Serve handles control center web UI requests and virtual host requests for zenoss web based services.
// The UI server actually listens on port 7878, the uihandler defined here just reverse proxies to it.
// Virtual host routing to zenoss web based services is done by the publicendpointhandler function.
func (sc *ServiceConfig) Serve(shutdown <-chan (interface{})) {
glog.V(1).Infof("starting vhost synching")
//start getting vhost endpoints
go sc.syncPublicEndpoints(shutdown)
//start watching global vhosts as they are added/deleted/updated in services
go sc.syncAllVhosts(shutdown)
mime.AddExtensionType(".json", "application/json")
mime.AddExtensionType(".woff", "application/font-woff")
accessLogFile, err := os.OpenFile("/var/log/serviced.access.log", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
if err != nil {
glog.Errorf("Could not create access log file.")
}
uiHandler := rest.ResourceHandler{
EnableRelaxedContentType: true,
Logger: log.New(accessLogFile, "", log.LstdFlags),
}
routes := sc.getRoutes()
uiHandler.SetRoutes(routes...)
httphandler := func(w http.ResponseWriter, r *http.Request) {
glog.V(2).Infof("httphandler handling request: %+v", r)
httphost := strings.Split(r.Host, ":")[0]
parts := strings.Split(httphost, ".")
subdomain := parts[0]
glog.V(2).Infof("httphost: '%s' subdomain: '%s'", httphost, subdomain)
if svcIDs, found, registrykey := getVHostServices(httphost); found {
glog.V(2).Infof("httphost: calling sc.publicendpointhandler")
sc.publicendpointhandler(w, r, registrykey, svcIDs)
} else if svcIDs, found, registrykey := getVHostServices(subdomain); found {
glog.V(2).Infof("httphost: calling sc.publicendpointhandler")
sc.publicendpointhandler(w, r, registrykey, svcIDs)
} else {
glog.V(2).Infof("httphost: calling uiHandler")
if r.TLS == nil {
// bindPort has already been validated, so the Split/access below won't break.
http.Redirect(w, r, fmt.Sprintf("https://%s:%s", r.Host, strings.Split(sc.bindPort, ":")[1]), http.StatusMovedPermanently)
return
}
uiHandler.ServeHTTP(w, r)
}
}
r := mux.NewRouter()
if hnm, err := os.Hostname(); err == nil {
sc.hostaliases = append(sc.hostaliases, hnm)
}
cmd := exec.Command("hostname", "--fqdn")
if hnm, err := cmd.CombinedOutput(); err == nil {
sc.hostaliases = append(sc.hostaliases, string(hnm[:len(hnm)-1]))
}
defaultHostAlias = sc.hostaliases[0]
r.HandleFunc("/", httphandler)
r.HandleFunc("/{path:.*}", httphandler)
http.Handle("/", r)
// FIXME: bubble up these errors to the caller
certFile := sc.certPEMFile
if len(certFile) == 0 {
tempCertFile, err := proxy.TempCertFile()
if err != nil {
glog.Fatalf("Could not prepare cert.pem file: %s", err)
}
certFile = tempCertFile
}
keyFile := sc.keyPEMFile
if len(keyFile) == 0 {
tempKeyFile, err := proxy.TempKeyFile()
if err != nil {
glog.Fatalf("Could not prepare key.pem file: %s", err)
}
keyFile = tempKeyFile
}
go func() {
redirect := func(w http.ResponseWriter, req *http.Request) {
// bindPort has already been validated, so the Split/access below won't break.
http.Redirect(w, req, fmt.Sprintf("https://%s:%s%s", req.Host, strings.Split(sc.bindPort, ":")[1], req.URL), http.StatusMovedPermanently)
}
err := http.ListenAndServe(":80", http.HandlerFunc(redirect))
if err != nil {
glog.Errorf("could not setup HTTP webserver: %s", err)
}
}()
go func() {
// This cipher suites and tls min version change may not be needed with golang 1.5
// https://github.com/golang/go/issues/10094
// https://github.com/golang/go/issues/9364
config := &tls.Config{
MinVersion: utils.MinTLS(),
PreferServerCipherSuites: true,
CipherSuites: utils.CipherSuites(),
}
server := &http.Server{Addr: sc.bindPort, TLSConfig: config}
err := server.ListenAndServeTLS(certFile, keyFile)
if err != nil {
glog.Fatalf("could not setup HTTPS webserver: %s", err)
}
}()
blockerChan := make(chan bool)
<-blockerChan
}
var methods = []string{"GET", "POST", "PUT", "DELETE"}
func routeToInternalServiceProxy(path string, target string, requiresAuth bool, routes []rest.Route) []rest.Route {
targetURL, err := url.Parse(target)
if err != nil {
glog.Errorf("Unable to parse proxy target URL: %s", target)
return routes
}
// Wrap the normal http.Handler in a rest.handlerFunc
handlerFunc := func(w *rest.ResponseWriter, r *rest.Request) {
// All proxied requests should be authenticated first
if requiresAuth && !loginOK(r) {
restUnauthorized(w)
return
}
proxy := node.NewReverseProxy(path, targetURL)
proxy.ServeHTTP(w.ResponseWriter, r.Request)
}
// Add on a glob to match subpaths
andsubpath := path + "*x"
for _, method := range methods {
routes = append(routes, rest.Route{method, path, handlerFunc})
routes = append(routes, rest.Route{method, andsubpath, handlerFunc})
}
return routes
}
func (sc *ServiceConfig) unAuthorizedClient(realfunc handlerClientFunc) handlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
client, err := sc.getClient()
if err != nil {
glog.Errorf("Unable to acquire client: %v", err)
restServerError(w, err)
return
}
defer client.Close()
realfunc(w, r, client)
}
}
func (sc *ServiceConfig) authorizedClient(realfunc handlerClientFunc) handlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
if !loginOK(r) {
restUnauthorized(w)
return
}
client, err := sc.getClient()
if err != nil {
glog.Errorf("Unable to acquire client: %v", err)
restServerError(w, err)
return
}
defer client.Close()
realfunc(w, r, client)
}
}
func (sc *ServiceConfig) isCollectingStats() handlerFunc {
if sc.stats {
return func(w *rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusOK)
}
}
return func(w *rest.ResponseWriter, r *rest.Request) {
w.WriteHeader(http.StatusNotImplemented)
}
}
func (sc *ServiceConfig) getClient() (c *node.ControlClient, err error) {
// setup the client
if c, err = node.NewControlClient(sc.agentPort); err != nil {
glog.Errorf("Could not create a control center client: %s", err)
}
return
}
func (sc *ServiceConfig) getMasterClient() (master.ClientInterface, error) {
glog.V(2).Infof("start getMasterClient ... sc.agentPort: %+v", sc.agentPort)
c, err := master.NewClient(sc.agentPort)
if err != nil {
glog.Errorf("Could not create a control center client to %v: %v", sc.agentPort, err)
return nil, err
}
glog.V(2).Info("end getMasterClient")
return c, nil
}
func (sc *ServiceConfig) newRequestHandler(check checkFunc, realfunc ctxhandlerFunc) handlerFunc {
return func(w *rest.ResponseWriter, r *rest.Request) {
if !check(w, r) {
return
}
reqCtx := newRequestContext(sc)
defer reqCtx.end()
realfunc(w, r, reqCtx)
}
}
func (sc *ServiceConfig) checkAuth(realfunc ctxhandlerFunc) handlerFunc {
check := func(w *rest.ResponseWriter, r *rest.Request) bool {
if !loginOK(r) {
restUnauthorized(w)
return false
}
return true
}
return sc.newRequestHandler(check, realfunc)
}
func (sc *ServiceConfig) noAuth(realfunc ctxhandlerFunc) handlerFunc {
check := func(w *rest.ResponseWriter, r *rest.Request) bool {
return true
}
return sc.newRequestHandler(check, realfunc)
}
type requestContext struct {
sc *ServiceConfig
master master.ClientInterface
}
func newRequestContext(sc *ServiceConfig) *requestContext {
return &requestContext{sc: sc}
}
func (ctx *requestContext) getMasterClient() (master.ClientInterface, error) {
if ctx.master == nil {
c, err := ctx.sc.getMasterClient()
if err != nil {
glog.Errorf("Could not create a control center client: %v", err)
return nil, err
}
ctx.master = c
}
return ctx.master, nil
}
func (ctx *requestContext) end() error {
if ctx.master != nil {
return ctx.master.Close()
}
return nil
}
type ctxhandlerFunc func(w *rest.ResponseWriter, r *rest.Request, ctx *requestContext)
type checkFunc func(w *rest.ResponseWriter, r *rest.Request) bool
type getRoutes func(sc *ServiceConfig) []rest.Route
var (
allvhostsLock sync.RWMutex
allvhosts map[registry.PublicEndpointKey]map[string]struct{} // map of PublicEndpointKey to service IDs that have the vhost enabled
)
func init() {
allvhosts = make(map[registry.PublicEndpointKey]map[string]struct{})
}
func (sc *ServiceConfig) syncAllVhosts(shutdown <-chan interface{}) error {
rootConn, err := zzk.GetLocalConnection("/")
if err != nil {
glog.Errorf("syncAllVhosts - Error getting root zk connection: %v", err)
return err
}
cancelChan := make(chan interface{})
syncVhosts := func(conn client.Connection, parentPath string, childIDs ...string) {
glog.V(1).Infof("syncVhosts STARTING for parentPath:%s childIDs:%v", parentPath, childIDs)
newVhosts := make(map[registry.PublicEndpointKey]map[string]struct{})
for _, sv := range childIDs {
//cast to a VHostKey so we don't have to care about the format of the key string
pep := service.PublicEndpointKey(sv)
if pep.Type() == registry.EPTypeVHost {
registryKey := registry.GetPublicEndpointKey(pep.Name(), pep.Type())
vhostServices, found := newVhosts[registryKey]
if !found {
vhostServices = make(map[string]struct{})
newVhosts[registryKey] = vhostServices
}
if pep.IsEnabled() {
vhostServices[pep.ServiceID()] = struct{}{}
}
}
}
//lock for as short a time as possible
allvhostsLock.Lock()
defer allvhostsLock.Unlock()
allvhosts = newVhosts
glog.V(1).Infof("allvhosts: %+v", allvhosts)
}
for {
zkServiceVhost := service.ZKServicePublicEndpoints
select {
case <-shutdown:
close(cancelChan)
return nil
default:
}
glog.V(1).Infof("Running registry.WatchChildren for zookeeper path: %s", zkServiceVhost)
err := registry.WatchChildren(rootConn, zkServiceVhost, cancelChan, syncVhosts, pepWatchError)
if err != nil {
glog.V(1).Infof("Will retry in 10 seconds to WatchChildren(%s) due to error: %v", zkServiceVhost, err)
<-time.After(time.Second * 10)
continue
}
}
}
|
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"regexp"
"text/template"
"gopkg.in/yaml.v2"
)
const helpText = `mktpl is a tool to render Golang text/template with template and YAML data files.
Usage:
mktpl flags
Flags:
-d, --data string path to the YAML data file (*)
-t, --template string path to the template file (*)
-h, --help help for mktpl
-v, --version show program's version information and exit
`
const (
exitCodeOK int = 0
// Errors start at 11
exitCodeError = 10 + iota
exitCodeParseFlagsError
exitCodeInvalidFlags
exitCodeInvalidFilePath
exitCodeParseTemplateError
)
var (
// Flags
tplPath string
dataPath string
showVersion bool
// These values are embedded when building.
buildVersion string
buildRevision string
buildWith string
)
var re = regexp.MustCompile(`{{[-.\s\w]+}}`)
type mktpl struct {
outStream, errStream io.Writer
}
func (m *mktpl) parseFlags(args []string) error {
flags := flag.NewFlagSet(args[0], flag.ExitOnError)
flags.SetOutput(m.errStream)
flags.Usage = func() {
fmt.Fprint(m.errStream, helpText)
}
flags.StringVar(&dataPath, "d", "", "")
flags.StringVar(&dataPath, "data", "", "")
flags.StringVar(&tplPath, "t", "", "")
flags.StringVar(&tplPath, "template", "", "")
// help flags are skippable.
flags.BoolVar(&showVersion, "v", false, "")
flags.BoolVar(&showVersion, "version", false, "")
// Parse flag
if err := flags.Parse(args[1:]); err != nil {
return fmt.Errorf("%s", err)
}
return nil
}
func (m *mktpl) Run(args []string) int {
if err := m.parseFlags(args); err != nil {
fmt.Fprintf(m.errStream, "faild in parsing flags: %s\n", err)
return exitCodeParseFlagsError
}
if err := isValidFlags(); err != nil {
fmt.Fprintf(m.errStream, "invalid flags: %s\n", err)
return exitCodeInvalidFlags
}
if showVersion {
fmt.Fprintf(m.outStream, "version: %s\nrevision: %s\nwith: %s\n",
buildVersion, buildRevision, buildWith)
return exitCodeOK
}
data, err := ioutil.ReadFile(dataPath)
if err != nil {
fmt.Fprintf(m.errStream, "failed in reading the data file: %s\n", err)
return exitCodeInvalidFilePath
}
tpl, err := template.ParseFiles(tplPath)
if err != nil {
fmt.Fprintf(m.errStream, "failed in parsing the template file: %s\n", err)
return exitCodeParseTemplateError
}
var out []byte
if out, err = render(data, tpl); err != nil {
fmt.Fprintf(m.errStream, "%s\n", err)
return exitCodeError
}
fmt.Fprintf(m.outStream, "%s", string(out))
return exitCodeOK
}
func isValidFlags() error {
if (len(tplPath) == 0 || len(dataPath) == 0) && !showVersion {
return fmt.Errorf("omitting -d[--data] and -t[--template] flags is not allowed")
}
return nil
}
func render(data []byte, tpl *template.Template) ([]byte, error) {
mappedData := make(map[interface{}]interface{})
if err := yaml.Unmarshal(data, &mappedData); err != nil {
return nil, fmt.Errorf("failed in unmarshalling the YAML data: %s", err)
}
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, mappedData); err != nil {
return nil, fmt.Errorf("failed in rendering: %s", err)
}
out, err := ioutil.ReadAll(buf)
if err != nil {
return nil, fmt.Errorf("failed in reading the buffered text: %s", err)
}
if re.MatchString(string(out)) {
tpl, err := template.New("").Parse(string(out))
if err != nil {
return nil, fmt.Errorf("failed in parsing the buffered template %s", err)
}
return render(data, tpl)
}
return out, nil
}
Fix typo
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"regexp"
"text/template"
"gopkg.in/yaml.v2"
)
const helpText = `mktpl is a tool to render Golang text/template with template and YAML data files.
Usage:
mktpl flags
Flags:
-d, --data string path to the YAML data file (*)
-t, --template string path to the template file (*)
-h, --help help for mktpl
-v, --version show program's version information and exit
`
const (
exitCodeOK int = 0
// Errors start from 11.
exitCodeError = 10 + iota
exitCodeParseFlagsError
exitCodeInvalidFlags
exitCodeInvalidFilePath
exitCodeParseTemplateError
)
var (
// Flags
tplPath string
dataPath string
showVersion bool
// These values are embedded when building.
buildVersion string
buildRevision string
buildWith string
)
var re = regexp.MustCompile(`{{[-.\s\w]+}}`)
type mktpl struct {
outStream, errStream io.Writer
}
func (m *mktpl) parseFlags(args []string) error {
flags := flag.NewFlagSet(args[0], flag.ExitOnError)
flags.SetOutput(m.errStream)
flags.Usage = func() {
fmt.Fprint(m.errStream, helpText)
}
flags.StringVar(&dataPath, "d", "", "")
flags.StringVar(&dataPath, "data", "", "")
flags.StringVar(&tplPath, "t", "", "")
flags.StringVar(&tplPath, "template", "", "")
// help flags are skippable.
flags.BoolVar(&showVersion, "v", false, "")
flags.BoolVar(&showVersion, "version", false, "")
// Parse flag
if err := flags.Parse(args[1:]); err != nil {
return fmt.Errorf("%s", err)
}
return nil
}
func (m *mktpl) Run(args []string) int {
if err := m.parseFlags(args); err != nil {
fmt.Fprintf(m.errStream, "faild in parsing flags: %s\n", err)
return exitCodeParseFlagsError
}
if err := isValidFlags(); err != nil {
fmt.Fprintf(m.errStream, "invalid flags: %s\n", err)
return exitCodeInvalidFlags
}
if showVersion {
fmt.Fprintf(m.outStream, "version: %s\nrevision: %s\nwith: %s\n",
buildVersion, buildRevision, buildWith)
return exitCodeOK
}
data, err := ioutil.ReadFile(dataPath)
if err != nil {
fmt.Fprintf(m.errStream, "failed in reading the data file: %s\n", err)
return exitCodeInvalidFilePath
}
tpl, err := template.ParseFiles(tplPath)
if err != nil {
fmt.Fprintf(m.errStream, "failed in parsing the template file: %s\n", err)
return exitCodeParseTemplateError
}
var out []byte
if out, err = render(data, tpl); err != nil {
fmt.Fprintf(m.errStream, "%s\n", err)
return exitCodeError
}
fmt.Fprintf(m.outStream, "%s", string(out))
return exitCodeOK
}
func isValidFlags() error {
if (len(tplPath) == 0 || len(dataPath) == 0) && !showVersion {
return fmt.Errorf("omitting -d[--data] and -t[--template] flags is not allowed")
}
return nil
}
func render(data []byte, tpl *template.Template) ([]byte, error) {
mappedData := make(map[interface{}]interface{})
if err := yaml.Unmarshal(data, &mappedData); err != nil {
return nil, fmt.Errorf("failed in unmarshalling the YAML data: %s", err)
}
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, mappedData); err != nil {
return nil, fmt.Errorf("failed in rendering: %s", err)
}
out, err := ioutil.ReadAll(buf)
if err != nil {
return nil, fmt.Errorf("failed in reading the buffered text: %s", err)
}
if re.MatchString(string(out)) {
tpl, err := template.New("").Parse(string(out))
if err != nil {
return nil, fmt.Errorf("failed in parsing the buffered template %s", err)
}
return render(data, tpl)
}
return out, nil
}
|
/*
Copyright IBM Corp. 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mgmt
import (
"sync"
"github.com/hyperledger/fabric/msp"
"github.com/op/go-logging"
)
// FIXME: AS SOON AS THE CHAIN MANAGEMENT CODE IS COMPLETE,
// THESE MAPS AND HELPSER FUNCTIONS SHOULD DISAPPEAR BECAUSE
// OWNERSHIP OF PER-CHAIN MSP MANAGERS WILL BE HANDLED BY IT;
// HOWEVER IN THE INTERIM, THESE HELPER FUNCTIONS ARE REQUIRED
var m sync.Mutex
var localMsp msp.MSP
var mspMap map[string]msp.MSPManager = make(map[string]msp.MSPManager)
var peerLogger = logging.MustGetLogger("peer")
// GetManagerForChain returns the msp manager for the supplied
// chain; if no such manager exists, one is created
func GetManagerForChain(ChainID string) msp.MSPManager {
var mspMgr msp.MSPManager
var created bool = false
{
m.Lock()
defer m.Unlock()
mspMgr = mspMap[ChainID]
if mspMgr == nil {
created = true
mspMgr = msp.NewMSPManager()
mspMap[ChainID] = mspMgr
}
}
if created {
peerLogger.Infof("Created new msp manager for chain %s", ChainID)
} else {
peerLogger.Infof("Returinging existing manager for chain %s", ChainID)
}
return mspMgr
}
// GetManagers returns all the managers registered
func GetManagers() map[string]msp.MSPManager {
m.Lock()
defer m.Unlock()
clone := make(map[string]msp.MSPManager)
for key, mspManager := range mspMap {
clone[key] = mspManager
}
return clone
}
// GetManagerForChainIfExists returns the MSPManager associated to ChainID
// it it exists
func GetManagerForChainIfExists(ChainID string) msp.MSPManager {
m.Lock()
defer m.Unlock()
return mspMap[ChainID]
}
// GetLocalMSP returns the local msp (and creates it if it doesn't exist)
func GetLocalMSP() msp.MSP {
var lclMsp msp.MSP
var created bool = false
{
m.Lock()
defer m.Unlock()
lclMsp = localMsp
if lclMsp == nil {
var err error
created = true
lclMsp, err = msp.NewBccspMsp()
if err != nil {
peerLogger.Fatalf("Failed to initlaize local MSP, received err %s", err)
}
localMsp = lclMsp
}
}
if created {
peerLogger.Infof("Created new local MSP")
} else {
peerLogger.Infof("Returning existing local MSP")
}
return lclMsp
}
//GetMSPCommon returns the common interface
func GetMSPCommon(chainID string) msp.Common {
if chainID == "" {
return GetLocalMSP()
}
return GetManagerForChain(chainID)
}
// GetLocalSigningIdentityOrPanic returns the local signing identity or panic in case
// or error
func GetLocalSigningIdentityOrPanic() msp.SigningIdentity {
id, err := GetLocalMSP().GetDefaultSigningIdentity()
if err != nil {
peerLogger.Panic("Failed getting local signing identity [%s]", err)
}
return id
}
Improve MSP logging in msp/mgmt
This change updates the logging module for msp/mgmt/mgmt.go to
'msp' to ensure the log level can be updated uniformly for all
MSP-related log messages. It also changes a few statements to
only display when set to debug as they were flooding the peer
logs.
https://jira.hyperledger.org/browse/FAB-1870
Change-Id: I62b5435aca1e7327a135adf5c47257c36f828d2d
Signed-off-by: Will Lahti <f19a2779710675aad3df5c81e4ecfde11707e3aa@us.ibm.com>
/*
Copyright IBM Corp. 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mgmt
import (
"sync"
"github.com/hyperledger/fabric/msp"
"github.com/op/go-logging"
)
// FIXME: AS SOON AS THE CHAIN MANAGEMENT CODE IS COMPLETE,
// THESE MAPS AND HELPSER FUNCTIONS SHOULD DISAPPEAR BECAUSE
// OWNERSHIP OF PER-CHAIN MSP MANAGERS WILL BE HANDLED BY IT;
// HOWEVER IN THE INTERIM, THESE HELPER FUNCTIONS ARE REQUIRED
var m sync.Mutex
var localMsp msp.MSP
var mspMap map[string]msp.MSPManager = make(map[string]msp.MSPManager)
var mspLogger = logging.MustGetLogger("msp")
// GetManagerForChain returns the msp manager for the supplied
// chain; if no such manager exists, one is created
func GetManagerForChain(ChainID string) msp.MSPManager {
var mspMgr msp.MSPManager
var created bool = false
{
m.Lock()
defer m.Unlock()
mspMgr = mspMap[ChainID]
if mspMgr == nil {
created = true
mspMgr = msp.NewMSPManager()
mspMap[ChainID] = mspMgr
}
}
if created {
mspLogger.Debugf("Created new msp manager for chain %s", ChainID)
} else {
mspLogger.Debugf("Returning existing manager for chain %s", ChainID)
}
return mspMgr
}
// GetManagers returns all the managers registered
func GetManagers() map[string]msp.MSPManager {
m.Lock()
defer m.Unlock()
clone := make(map[string]msp.MSPManager)
for key, mspManager := range mspMap {
clone[key] = mspManager
}
return clone
}
// GetManagerForChainIfExists returns the MSPManager associated to ChainID
// it it exists
func GetManagerForChainIfExists(ChainID string) msp.MSPManager {
m.Lock()
defer m.Unlock()
return mspMap[ChainID]
}
// GetLocalMSP returns the local msp (and creates it if it doesn't exist)
func GetLocalMSP() msp.MSP {
var lclMsp msp.MSP
var created bool = false
{
m.Lock()
defer m.Unlock()
lclMsp = localMsp
if lclMsp == nil {
var err error
created = true
lclMsp, err = msp.NewBccspMsp()
if err != nil {
mspLogger.Fatalf("Failed to initialize local MSP, received err %s", err)
}
localMsp = lclMsp
}
}
if created {
mspLogger.Debugf("Created new local MSP")
} else {
mspLogger.Debugf("Returning existing local MSP")
}
return lclMsp
}
//GetMSPCommon returns the common interface
func GetMSPCommon(chainID string) msp.Common {
if chainID == "" {
return GetLocalMSP()
}
return GetManagerForChain(chainID)
}
// GetLocalSigningIdentityOrPanic returns the local signing identity or panic in case
// or error
func GetLocalSigningIdentityOrPanic() msp.SigningIdentity {
id, err := GetLocalMSP().GetDefaultSigningIdentity()
if err != nil {
mspLogger.Panicf("Failed getting local signing identity [%s]", err)
}
return id
}
|
// List of mocks used for testing
package betwixt
import (
"errors"
"github.com/zubairhamed/canopus"
"github.com/zubairhamed/sugoi"
"time"
)
type TestDeviceObject struct {
Model ObjectDefinition
currentTime time.Time
utcOffset string
timeZone string
}
func (o *TestDeviceObject) OnExecute(instanceId int, resourceId int, req Lwm2mRequest) Lwm2mResponse {
return Changed()
}
func (o *TestDeviceObject) OnCreate(instanceId int, resourceId int, req Lwm2mRequest) Lwm2mResponse {
return Created()
}
func (o *TestDeviceObject) OnDelete(instanceId int, req Lwm2mRequest) Lwm2mResponse {
return Deleted()
}
func (o *TestDeviceObject) OnRead(instanceId int, resourceId int, req Lwm2mRequest) Lwm2mResponse {
if resourceId == -1 {
// Read Object Instance
} else {
// Read Resource Instance
var val Value
// resource := o.Model.GetResource(resourceId)
switch resourceId {
case 0:
val = String("Open Mobile Alliance")
break
case 1:
val = String("Lightweight M2M Client")
break
case 2:
val = String("345000123")
break
case 3:
val = String("1.0")
break
case 6:
// val, _ = values.TlvPayloadFromIntResource(resource, []int{1, 5})
break
case 7:
// val, _ = values.TlvPayloadFromIntResource(resource, []int{3800, 5000})
break
case 8:
// val, _ = values.TlvPayloadFromIntResource(resource, []int{125, 900})
break
case 9:
val = Integer(100)
break
case 10:
val = Integer(15)
break
case 11:
// val, _ = values.TlvPayloadFromIntResource(resource, []int{0})
break
case 13:
val = Time(o.currentTime)
break
case 14:
val = String(o.utcOffset)
break
case 15:
val = String(o.timeZone)
break
case 16:
val = String(string(BINDINGMODE_UDP))
break
default:
break
}
return Content(val)
}
return NotFound()
}
func (o *TestDeviceObject) OnWrite(instanceId int, resourceId int, req Lwm2mRequest) Lwm2mResponse {
val := req.GetMessage().Payload
switch resourceId {
case 13:
break
case 14:
o.utcOffset = val.String()
break
case 15:
o.timeZone = val.String()
break
default:
return NotFound()
}
return Changed()
}
func (o *TestDeviceObject) Reboot() Value {
return Empty()
}
func (o *TestDeviceObject) FactoryReset() Value {
return Empty()
}
func (o *TestDeviceObject) ResetErrorCode() string {
return ""
}
func NewTestDeviceObject(def ObjectDefinition) *TestDeviceObject {
return &TestDeviceObject{
Model: def,
currentTime: time.Unix(1367491215, 0),
utcOffset: "+02:00",
timeZone: "+02:00",
}
}
type MockServerStatistics struct {
}
func (s *MockServerStatistics) IncrementCoapRequestsCount() {
}
func (s *MockServerStatistics) GetRequestsCount() int {
return 0
}
func NewMockServer() Server {
return &MockServer{
stats: &MockServerStatistics{},
httpServer: sugoi.NewHttpServer("8080"),
}
}
type MockServer struct {
stats ServerStatistics
httpServer sugoi.HttpServer
coapServer canopus.CoapServer
}
func (server *MockServer) Start() {
}
func (server *MockServer) UseRegistry(reg Registry) {
}
func (server *MockServer) On(e EventType, fn FnEvent) {
}
func (server *MockServer) GetClients() map[string]RegisteredClient {
return make(map[string]RegisteredClient)
}
func (server *MockServer) GetStats() ServerStatistics {
return server.stats
}
func (server *MockServer) GetHttpServer() sugoi.HttpServer {
return server.httpServer
}
func (server *MockServer) GetCoapServer() canopus.CoapServer {
return server.coapServer
}
func (server *MockServer) GetClient(id string) RegisteredClient {
return nil
}
func NewMockRegistry(s ...ObjectSource) Registry {
reg := &MockRegistry{}
reg.sources = []ObjectSource{}
for _, o := range s {
reg.Register(o)
}
return reg
}
type MockRegistry struct {
sources []ObjectSource
}
func (r *MockRegistry) GetDefinition(t LWM2MObjectType) ObjectDefinition {
return nil
}
func (m *MockRegistry) Register(s ObjectSource) {
s.Initialize()
m.sources = append(m.sources, s)
}
func (r *MockRegistry) GetMandatory() []ObjectDefinition {
mandatory := []ObjectDefinition{}
for _, s := range r.sources {
objs := s.GetObjects()
for _, o := range objs {
if o.IsMandatory() {
mandatory = append(mandatory, o)
}
}
}
return mandatory
}
func (m *MockRegistry) GetDefinitions() []ObjectDefinition {
defs := []ObjectDefinition{}
for _, s := range m.sources {
if s != nil {
for _, v := range s.GetObjects() {
defs = append(defs, v)
}
}
}
return defs
}
func NewMockObject(t LWM2MObjectType, enabler ObjectEnabler, reg Registry) Object {
def := reg.GetDefinition(t)
return &MockObject{
definition: def,
typeId: t,
enabler: enabler,
instances: make(map[int]bool),
}
}
type MockObject struct {
typeId LWM2MObjectType
definition ObjectDefinition
enabler ObjectEnabler
instances map[int]bool
}
func (o *MockObject) AddInstance(int) {}
func (o *MockObject) RemoveInstance(int) {}
func (o *MockObject) GetInstances() []int {
return nil
}
func (o *MockObject) GetEnabler() ObjectEnabler {
return o.enabler
}
func (o *MockObject) GetType() LWM2MObjectType {
return LWM2MObjectType(0)
}
func (o *MockObject) GetDefinition() ObjectDefinition {
return nil
}
func (o *MockObject) SetEnabler(ObjectEnabler) {}
func NewMockClient() LWM2MClient {
return &MockClient{
enabledObjects: make(map[LWM2MObjectType]Object),
}
}
type MockClient struct {
enabledObjects map[LWM2MObjectType]Object
registry Registry
}
func (c *MockClient) AddObjectInstance(LWM2MObjectType, int) error {
return nil
}
func (c *MockClient) Register(string) (string, error) {
return "", nil
}
func (c *MockClient) UseRegistry(r Registry) {
c.registry = r
}
func (c *MockClient) EnableObject(t LWM2MObjectType, e ObjectEnabler) error {
_, ok := c.enabledObjects[t]
if !ok {
c.enabledObjects[t] = NewMockObject(t, e, c.GetRegistry())
return nil
} else {
return errors.New("Object already enabled")
}
}
func (c *MockClient) GetRegistry() Registry {
return c.registry
}
func (c *MockClient) GetEnabledObjects() map[LWM2MObjectType]Object {
return c.enabledObjects
}
func (c *MockClient) AddObjectInstances(LWM2MObjectType, ...int) {}
func (c *MockClient) AddResource() {}
func (c *MockClient) AddObject() {}
func (c *MockClient) Deregister() {}
func (c *MockClient) Update() {}
func (c *MockClient) SetEnabler(LWM2MObjectType, ObjectEnabler) {}
func (c *MockClient) Start() {}
func (c *MockClient) OnStartup(FnOnStartup) {}
func (c *MockClient) OnRead(FnOnRead) {}
func (c *MockClient) OnWrite(FnOnWrite) {}
func (c *MockClient) OnExecute(FnOnExecute) {}
func (c *MockClient) OnRegistered(FnOnRegistered) {}
func (c *MockClient) OnDeregistered(FnOnDeregistered) {}
func (c *MockClient) OnError(FnOnError) {}
Fix compilation issue
// List of mocks used for testing
package betwixt
import (
"errors"
"github.com/zubairhamed/canopus"
"time"
)
type TestDeviceObject struct {
Model ObjectDefinition
currentTime time.Time
utcOffset string
timeZone string
}
func (o *TestDeviceObject) OnExecute(instanceId int, resourceId int, req Lwm2mRequest) Lwm2mResponse {
return Changed()
}
func (o *TestDeviceObject) OnCreate(instanceId int, resourceId int, req Lwm2mRequest) Lwm2mResponse {
return Created()
}
func (o *TestDeviceObject) OnDelete(instanceId int, req Lwm2mRequest) Lwm2mResponse {
return Deleted()
}
func (o *TestDeviceObject) OnRead(instanceId int, resourceId int, req Lwm2mRequest) Lwm2mResponse {
if resourceId == -1 {
// Read Object Instance
} else {
// Read Resource Instance
var val Value
// resource := o.Model.GetResource(resourceId)
switch resourceId {
case 0:
val = String("Open Mobile Alliance")
break
case 1:
val = String("Lightweight M2M Client")
break
case 2:
val = String("345000123")
break
case 3:
val = String("1.0")
break
case 6:
// val, _ = values.TlvPayloadFromIntResource(resource, []int{1, 5})
break
case 7:
// val, _ = values.TlvPayloadFromIntResource(resource, []int{3800, 5000})
break
case 8:
// val, _ = values.TlvPayloadFromIntResource(resource, []int{125, 900})
break
case 9:
val = Integer(100)
break
case 10:
val = Integer(15)
break
case 11:
// val, _ = values.TlvPayloadFromIntResource(resource, []int{0})
break
case 13:
val = Time(o.currentTime)
break
case 14:
val = String(o.utcOffset)
break
case 15:
val = String(o.timeZone)
break
case 16:
val = String(string(BINDINGMODE_UDP))
break
default:
break
}
return Content(val)
}
return NotFound()
}
func (o *TestDeviceObject) OnWrite(instanceId int, resourceId int, req Lwm2mRequest) Lwm2mResponse {
val := req.GetMessage().Payload
switch resourceId {
case 13:
break
case 14:
o.utcOffset = val.String()
break
case 15:
o.timeZone = val.String()
break
default:
return NotFound()
}
return Changed()
}
func (o *TestDeviceObject) Reboot() Value {
return Empty()
}
func (o *TestDeviceObject) FactoryReset() Value {
return Empty()
}
func (o *TestDeviceObject) ResetErrorCode() string {
return ""
}
func NewTestDeviceObject(def ObjectDefinition) *TestDeviceObject {
return &TestDeviceObject{
Model: def,
currentTime: time.Unix(1367491215, 0),
utcOffset: "+02:00",
timeZone: "+02:00",
}
}
type MockServerStatistics struct {
}
func (s *MockServerStatistics) IncrementCoapRequestsCount() {
}
func (s *MockServerStatistics) GetRequestsCount() int {
return 0
}
func NewMockServer() Server {
return &MockServer{
stats: &MockServerStatistics{},
}
}
type MockServer struct {
stats ServerStatistics
coapServer canopus.CoapServer
}
func (server *MockServer) Start() {
}
func (server *MockServer) UseRegistry(reg Registry) {
}
func (server *MockServer) On(e EventType, fn FnEvent) {
}
func (server *MockServer) GetClients() map[string]RegisteredClient {
return make(map[string]RegisteredClient)
}
func (server *MockServer) GetStats() ServerStatistics {
return server.stats
}
func (server *MockServer) GetCoapServer() canopus.CoapServer {
return server.coapServer
}
func (server *MockServer) GetClient(id string) RegisteredClient {
return nil
}
func NewMockRegistry(s ...ObjectSource) Registry {
reg := &MockRegistry{}
reg.sources = []ObjectSource{}
for _, o := range s {
reg.Register(o)
}
return reg
}
type MockRegistry struct {
sources []ObjectSource
}
func (r *MockRegistry) GetDefinition(t LWM2MObjectType) ObjectDefinition {
return nil
}
func (m *MockRegistry) Register(s ObjectSource) {
s.Initialize()
m.sources = append(m.sources, s)
}
func (r *MockRegistry) GetMandatory() []ObjectDefinition {
mandatory := []ObjectDefinition{}
for _, s := range r.sources {
objs := s.GetObjects()
for _, o := range objs {
if o.IsMandatory() {
mandatory = append(mandatory, o)
}
}
}
return mandatory
}
func (m *MockRegistry) GetDefinitions() []ObjectDefinition {
defs := []ObjectDefinition{}
for _, s := range m.sources {
if s != nil {
for _, v := range s.GetObjects() {
defs = append(defs, v)
}
}
}
return defs
}
func NewMockObject(t LWM2MObjectType, enabler ObjectEnabler, reg Registry) Object {
def := reg.GetDefinition(t)
return &MockObject{
definition: def,
typeId: t,
enabler: enabler,
instances: make(map[int]bool),
}
}
type MockObject struct {
typeId LWM2MObjectType
definition ObjectDefinition
enabler ObjectEnabler
instances map[int]bool
}
func (o *MockObject) AddInstance(int) {}
func (o *MockObject) RemoveInstance(int) {}
func (o *MockObject) GetInstances() []int {
return nil
}
func (o *MockObject) GetEnabler() ObjectEnabler {
return o.enabler
}
func (o *MockObject) GetType() LWM2MObjectType {
return LWM2MObjectType(0)
}
func (o *MockObject) GetDefinition() ObjectDefinition {
return nil
}
func (o *MockObject) SetEnabler(ObjectEnabler) {}
func NewMockClient() LWM2MClient {
return &MockClient{
enabledObjects: make(map[LWM2MObjectType]Object),
}
}
type MockClient struct {
enabledObjects map[LWM2MObjectType]Object
registry Registry
}
func (c *MockClient) AddObjectInstance(LWM2MObjectType, int) error {
return nil
}
func (c *MockClient) Register(string) (string, error) {
return "", nil
}
func (c *MockClient) UseRegistry(r Registry) {
c.registry = r
}
func (c *MockClient) EnableObject(t LWM2MObjectType, e ObjectEnabler) error {
_, ok := c.enabledObjects[t]
if !ok {
c.enabledObjects[t] = NewMockObject(t, e, c.GetRegistry())
return nil
} else {
return errors.New("Object already enabled")
}
}
func (c *MockClient) GetRegistry() Registry {
return c.registry
}
func (c *MockClient) GetEnabledObjects() map[LWM2MObjectType]Object {
return c.enabledObjects
}
func (c *MockClient) AddObjectInstances(LWM2MObjectType, ...int) {}
func (c *MockClient) AddResource() {}
func (c *MockClient) AddObject() {}
func (c *MockClient) Deregister() {}
func (c *MockClient) Update() {}
func (c *MockClient) SetEnabler(LWM2MObjectType, ObjectEnabler) {}
func (c *MockClient) Start() {}
func (c *MockClient) OnStartup(FnOnStartup) {}
func (c *MockClient) OnRead(FnOnRead) {}
func (c *MockClient) OnWrite(FnOnWrite) {}
func (c *MockClient) OnExecute(FnOnExecute) {}
func (c *MockClient) OnRegistered(FnOnRegistered) {}
func (c *MockClient) OnDeregistered(FnOnDeregistered) {}
func (c *MockClient) OnError(FnOnError) {}
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package compress
const (
filterScale = 4096
filterShift = 5
cdfScale = 32768
cdfRate = 5
)
type CDF struct {
CDF []uint16
Mixin [][]uint16
}
func NewCDF(size int) *CDF {
if size != 256 {
panic("size is not 256")
}
cdf, mixin := make([]uint16, size+1), make([][]uint16, size)
sum := 0
for i := range cdf {
cdf[i] = uint16(sum)
sum += 128
}
for i := range mixin {
sum, m := 0, make([]uint16, size+1)
for j := range m {
m[j] = uint16(sum)
sum += 4
if j == i {
sum += cdfScale - 4*size
}
}
mixin[i] = m
}
return &CDF{
CDF: cdf,
Mixin: mixin,
}
}
func (c *CDF) Len() int {
return len(c.CDF)
}
func (c *CDF) Less(i, j int) bool {
cdf := c.CDF
return cdf[i] < cdf[j]
}
func (c *CDF) Swap(i, j int) {
cdf := c.CDF
cdf[i], cdf[j] = cdf[j], cdf[i]
}
func (c *CDF) Update(s int) {
cdf, mixin := c.CDF, c.Mixin[s]
size := len(cdf) - 1
for i := 1; i < size; i++ {
a, b := int(cdf[i]), int(mixin[i])
if a < 0 {
panic("a is less than zero")
}
if b < 0 {
panic("b is less than zero")
}
c := (b - a)
if c >= 0 {
c >>= cdfRate
c = a + c
} else {
c = -c
c >>= cdfRate
c = a - c
}
if c < 0 {
panic("c is less than zero")
}
cdf[i] = uint16(c)
}
if cdf[size] != cdfScale {
panic("cdf scale is incorrect")
}
for i := 1; i < len(cdf); i++ {
if cdf[i] <= cdf[i-1] {
panic("invalid cdf")
}
}
}
func (coder Coder16) AdaptiveCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
table, scale, buffer := make([]uint16, coder.Alphabit), uint16(0), [BUFFER_POOL_SIZE]Symbol{}
for i, _ := range table {
table[i] = 1
scale += 1
}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
low := uint16(0)
for _, count := range table[:s] {
low += count
}
current[index], index = Symbol{Scale: scale, Low: low, High: low + table[s]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
scale++
table[s]++
if scale > MAX_SCALE16 {
scale = 0
for i, count := range table {
if count >>= 1; count == 0 {
table[i], scale = 1, scale+1
} else {
table[i], scale = count, scale+count
}
}
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) AdaptivePredictiveCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
table, scale, context, buffer := make([][]uint16, coder.Alphabit), make([]uint16, coder.Alphabit), uint16(0), [BUFFER_POOL_SIZE]Symbol{}
for i, _ := range table {
table[i] = make([]uint16, coder.Alphabit)
scale[i] = coder.Alphabit
for j, _ := range table[i] {
table[i][j] = 1
}
}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
low := uint16(0)
for _, count := range table[context][:s] {
low += count
}
current[index], index = Symbol{Scale: scale[context], Low: low, High: low + table[context][s]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
scale[context]++
table[context][s]++
if scale[context] > MAX_SCALE16 {
scale[context] = 0
for i, count := range table[context] {
if count >>= 1; count == 0 {
table[context][i], scale[context] = 1, scale[context]+1
} else {
table[context][i], scale[context] = count, scale[context]+count
}
}
}
context = s
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) AdaptiveBitCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
table, buffer := [2]uint16{}, [BUFFER_POOL_SIZE]Symbol{}
table[0] = 1
table[1] = 1
highest := uint32(0)
for a := coder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
current, offset, index, mask := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0, uint16(1)<<(highest-1)
for input := range coder.Input {
for _, s := range input {
for bit := mask; bit > 0; bit >>= 1 {
b, low, high, scale := uint16(0), uint16(0), table[0], table[0]+table[1]
if bit&s != 0 {
b, low, high = 1, high, scale
}
current[index], index = Symbol{Scale: scale, Low: low, High: high}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
table[b]++
if scale >= MAX_SCALE16 {
table[0] >>= 1
table[1] >>= 1
if table[0] == 0 {
table[0] = 1
}
if table[1] == 0 {
table[1] = 1
}
}
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) AdaptivePredictiveBitCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
table, context, buffer := make([][2]uint16, 65536), uint16(0), [BUFFER_POOL_SIZE]Symbol{}
for i, _ := range table {
table[i][0] = 1
table[i][1] = 1
}
highest := uint32(0)
for a := coder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
current, offset, index, mask := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0, uint16(1)<<(highest-1)
for input := range coder.Input {
for _, s := range input {
for bit := mask; bit > 0; bit >>= 1 {
b, low, high, scale := uint16(0), uint16(0), table[context][0], table[context][0]+table[context][1]
if bit&s != 0 {
b, low, high = 1, high, scale
}
current[index], index = Symbol{Scale: scale, Low: low, High: high}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
table[context][b]++
if scale >= MAX_SCALE16 {
table[context][0] >>= 1
table[context][1] >>= 1
if table[context][0] == 0 {
table[context][0] = 1
}
if table[context][1] == 0 {
table[context][1] = 1
}
}
context = b | (context << 1)
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
// https://fgiesen.wordpress.com/2015/05/26/models-for-adaptive-arithmetic-coding/
func (coder Coder16) FilteredAdaptiveBitCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
const scale = uint16(filterScale)
p1 := scale / 2
buffer := [BUFFER_POOL_SIZE]Symbol{}
highest := uint32(0)
for a := coder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
current, offset, index, mask := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0, uint16(1)<<(highest-1)
for input := range coder.Input {
for _, s := range input {
for bit := mask; bit > 0; bit >>= 1 {
b, low, high := uint16(0), uint16(0), scale-p1
if bit&s != 0 {
b, low, high = 1, high, scale
}
current[index], index = Symbol{Scale: scale, Low: low, High: high}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
if b == 0 {
p1 -= p1 >> filterShift
} else {
p1 += (scale - p1) >> filterShift
}
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) FilteredAdaptivePredictiveBitCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
const scale = uint16(filterScale)
table, context, buffer := make([]uint16, 65536), uint16(0), [BUFFER_POOL_SIZE]Symbol{}
for i, _ := range table {
table[i] = scale / 2
}
highest := uint32(0)
for a := coder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
current, offset, index, mask := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0, uint16(1)<<(highest-1)
for input := range coder.Input {
for _, s := range input {
for bit := mask; bit > 0; bit >>= 1 {
b, low, high := uint16(0), uint16(0), scale-table[context]
if bit&s != 0 {
b, low, high = 1, high, scale
}
current[index], index = Symbol{Scale: scale, Low: low, High: high}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
if b == 0 {
table[context] -= table[context] >> filterShift
} else {
table[context] += (scale - table[context]) >> filterShift
}
context = b | (context << 1)
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) FilteredAdaptiveCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
cdf := NewCDF(int(coder.Alphabit))
buffer := [BUFFER_POOL_SIZE]Symbol{}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
current[index], index = Symbol{Scale: cdfScale, Low: cdf.CDF[s], High: cdf.CDF[s+1]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
cdf.Update(int(s))
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (decoder Coder16) AdaptiveDecoder() Model {
table, scale := make([]uint16, decoder.Alphabit), uint16(0)
for i, _ := range table {
table[i] = 1
scale += 1
}
lookup := func(code uint16) Symbol {
low, high, done := uint16(0), uint16(0), false
for s, count := range table {
if high += count; code < high {
low, done, scale, table[s] = high-count, decoder.Output(uint16(s)), scale+1, table[s]+1
break
}
}
if done {
return Symbol{}
} else {
if scale > MAX_SCALE16 {
scale = 0
for i, count := range table {
if count >>= 1; count == 0 {
table[i], scale = 1, scale+1
} else {
table[i], scale = count, scale+count
}
}
}
return Symbol{Scale: scale, Low: low, High: high}
}
}
return Model{Scale: uint32(scale), Output: lookup}
}
func (decoder Coder16) AdaptivePredictiveDecoder() Model {
table, scale, context := make([][]uint16, decoder.Alphabit), make([]uint16, decoder.Alphabit), uint16(0)
for i, _ := range table {
table[i] = make([]uint16, decoder.Alphabit)
scale[i] = decoder.Alphabit
for j, _ := range table[i] {
table[i][j] = 1
}
}
lookup := func(code uint16) Symbol {
low, high, next, done := uint16(0), uint16(0), uint16(0), false
for s, count := range table[context] {
if high += count; code < high {
next = uint16(s)
low, done, scale[context], table[context][s] = high-count, decoder.Output(next), scale[context]+1, table[context][s]+1
break
}
}
if done {
return Symbol{}
} else {
if scale[context] > MAX_SCALE16 {
scale[context] = 0
for i, count := range table[context] {
if count >>= 1; count == 0 {
table[context][i], scale[context] = 1, scale[context]+1
} else {
table[context][i], scale[context] = count, scale[context]+count
}
}
}
context = next
return Symbol{Scale: scale[context], Low: low, High: high}
}
}
return Model{Scale: uint32(decoder.Alphabit), Output: lookup}
}
func (decoder Coder16) AdaptiveBitDecoder() Model {
table := [2]uint16{}
table[0] = 1
table[1] = 1
highest := uint32(0)
for a := decoder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
mask := uint16(1) << (highest - 1)
bit, bits := mask, uint16(0)
lookup := func(code uint16) Symbol {
scale, low, high, b := table[0]+table[1], uint16(0), uint16(0), uint16(0)
if code < table[0] {
high, bit = table[0], bit>>1
} else {
low, high, bits, bit, b = table[0], scale, bits|bit, bit>>1, 1
}
table[b]++
if scale >= MAX_SCALE16 {
table[0] >>= 1
table[1] >>= 1
if table[0] == 0 {
table[0] = 1
}
if table[1] == 0 {
table[1] = 1
}
}
if bit == 0 {
if decoder.Output(bits) {
return Symbol{}
}
bits, bit = 0, mask
}
return Symbol{Scale: table[0] + table[1], Low: low, High: high}
}
return Model{Scale: uint32(2), Output: lookup}
}
func (decoder Coder16) AdaptivePredictiveBitDecoder() Model {
table, context := make([][2]uint16, 65536), uint16(0)
for i, _ := range table {
table[i][0] = 1
table[i][1] = 1
}
highest := uint32(0)
for a := decoder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
mask := uint16(1) << (highest - 1)
bit, bits := mask, uint16(0)
lookup := func(code uint16) Symbol {
scale, low, high, b := table[context][0]+table[context][1], uint16(0), uint16(0), uint16(0)
if code < table[context][0] {
high, bit = table[context][0], bit>>1
} else {
low, high, bits, bit, b = table[context][0], scale, bits|bit, bit>>1, 1
}
table[context][b]++
if scale >= MAX_SCALE16 {
table[context][0] >>= 1
table[context][1] >>= 1
if table[context][0] == 0 {
table[context][0] = 1
}
if table[context][1] == 0 {
table[context][1] = 1
}
}
context = b | (context << 1)
if bit == 0 {
if decoder.Output(bits) {
return Symbol{}
}
bits, bit = 0, mask
}
return Symbol{Scale: table[context][0] + table[context][1], Low: low, High: high}
}
return Model{Scale: uint32(2), Output: lookup}
}
func (decoder Coder16) FilteredAdaptiveBitDecoder() Model {
const scale = uint16(filterScale)
p1 := scale / 2
highest := uint32(0)
for a := decoder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
mask := uint16(1) << (highest - 1)
bit, bits := mask, uint16(0)
lookup := func(code uint16) Symbol {
low, high, b := uint16(0), uint16(0), uint16(0)
if p0 := scale - p1; code < p0 {
high, bit = p0, bit>>1
} else {
low, high, bits, bit, b = p0, scale, bits|bit, bit>>1, 1
}
if b == 0 {
p1 -= p1 >> filterShift
} else {
p1 += (scale - p1) >> filterShift
}
if bit == 0 {
if decoder.Output(bits) {
return Symbol{}
}
bits, bit = 0, mask
}
return Symbol{Scale: scale, Low: low, High: high}
}
return Model{Scale: uint32(scale), Output: lookup}
}
func (decoder Coder16) FilteredAdaptivePredictiveBitDecoder() Model {
const scale = uint16(filterScale)
table, context := make([]uint16, 65536), uint16(0)
for i, _ := range table {
table[i] = scale / 2
}
highest := uint32(0)
for a := decoder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
mask := uint16(1) << (highest - 1)
bit, bits := mask, uint16(0)
lookup := func(code uint16) Symbol {
low, high, b := uint16(0), uint16(0), uint16(0)
if p0 := scale - table[context]; code < p0 {
high, bit = p0, bit>>1
} else {
low, high, bits, bit, b = p0, scale, bits|bit, bit>>1, 1
}
if b == 0 {
table[context] -= table[context] >> filterShift
} else {
table[context] += (scale - table[context]) >> filterShift
}
context = b | (context << 1)
if bit == 0 {
if decoder.Output(bits) {
return Symbol{}
}
bits, bit = 0, mask
}
return Symbol{Scale: scale, Low: low, High: high}
}
return Model{Scale: uint32(scale), Output: lookup}
}
func (decoder Coder16) FilteredAdaptiveDecoder() Model {
cdf := NewCDF(int(decoder.Alphabit))
lookup := func(code uint16) Symbol {
low, high, done := uint16(0), uint16(0), false
for s := 1; s < len(cdf.CDF); s++ {
if code < cdf.CDF[s] {
symbol := s - 1
low, high, done = cdf.CDF[s-1], cdf.CDF[s], decoder.Output(uint16(symbol))
cdf.Update(symbol)
break
}
}
if done {
return Symbol{}
}
return Symbol{Scale: cdfScale, Low: low, High: high}
}
return Model{Scale: uint32(cdfScale), Output: lookup}
}
func (coder Coder16) AdaptiveCoder32() Model32 {
out := make(chan []Symbol32, BUFFER_CHAN_SIZE)
go func() {
table, scale, buffer := make([]uint32, coder.Alphabit), uint32(coder.Alphabit), [BUFFER_POOL_SIZE]Symbol32{}
for i, _ := range table {
table[i] = 1
}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
low := uint32(0)
for _, count := range table[:s] {
low += count
}
current[index], index = Symbol32{Scale: scale, Low: low, High: low + table[s]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
scale++
table[s]++
if scale > MAX_SCALE32 {
scale = 0
for i, count := range table {
if count >>= 1; count == 0 {
table[i], scale = 1, scale+1
} else {
table[i], scale = count, scale+count
}
}
}
}
}
out <- current[:index]
close(out)
}()
return Model32{Input: out}
}
func (coder Coder16) AdaptivePredictiveCoder32() Model32 {
out := make(chan []Symbol32, BUFFER_CHAN_SIZE)
go func() {
table, scale, context, buffer := make([][]uint32, coder.Alphabit), make([]uint32, coder.Alphabit), uint16(0), [BUFFER_POOL_SIZE]Symbol32{}
for i, _ := range table {
table[i] = make([]uint32, coder.Alphabit)
scale[i] = uint32(coder.Alphabit)
for j, _ := range table[i] {
table[i][j] = 1
}
}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
low := uint32(0)
for _, count := range table[context][:s] {
low += count
}
current[index], index = Symbol32{Scale: scale[context], Low: low, High: low + table[context][s]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
scale[context]++
table[context][s]++
if scale[context] > MAX_SCALE32 {
scale[context] = 0
for i, count := range table[context] {
if count >>= 1; count == 0 {
table[context][i], scale[context] = 1, scale[context]+1
} else {
table[context][i], scale[context] = count, scale[context]+count
}
}
}
context = s
}
}
out <- current[:index]
close(out)
}()
return Model32{Input: out}
}
func (decoder Coder16) AdaptiveDecoder32() Model32 {
table, scale := make([]uint32, decoder.Alphabit), uint32(decoder.Alphabit)
for i, _ := range table {
table[i] = 1
}
lookup := func(code uint32) Symbol32 {
low, high, done := uint32(0), uint32(0), false
for s, count := range table {
if high += count; code < high {
low, done, scale, table[s] = high-count, decoder.Output(uint16(s)), scale+1, table[s]+1
break
}
}
if done {
return Symbol32{}
} else {
if scale > MAX_SCALE32 {
scale = 0
for i, count := range table {
if count >>= 1; count == 0 {
table[i], scale = 1, scale+1
} else {
table[i], scale = count, scale+count
}
}
}
return Symbol32{Scale: scale, Low: low, High: high}
}
}
return Model32{Scale: uint64(decoder.Alphabit), Output: lookup}
}
func (decoder Coder16) AdaptivePredictiveDecoder32() Model32 {
table, scale, context := make([][]uint32, decoder.Alphabit), make([]uint32, decoder.Alphabit), uint16(0)
for i, _ := range table {
table[i] = make([]uint32, decoder.Alphabit)
scale[i] = uint32(decoder.Alphabit)
for j, _ := range table[i] {
table[i][j] = 1
}
}
lookup := func(code uint32) Symbol32 {
low, high, next, done := uint32(0), uint32(0), uint16(0), false
for s, count := range table[context] {
if high += count; code < high {
next = uint16(s)
low, done, scale[context], table[context][s] = high-count, decoder.Output(next), scale[context]+1, table[context][s]+1
break
}
}
if done {
return Symbol32{}
} else {
if scale[context] > MAX_SCALE32 {
scale[context] = 0
for i, count := range table[context] {
if count >>= 1; count == 0 {
table[context][i], scale[context] = 1, scale[context]+1
} else {
table[context][i], scale[context] = count, scale[context]+count
}
}
}
context = next
return Symbol32{Scale: scale[context], Low: low, High: high}
}
}
return Model32{Scale: uint64(decoder.Alphabit), Output: lookup}
}
Use correct scale for cdf
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package compress
const (
filterScale = 4096
filterShift = 5
cdfScale = 1 << (16 - 3)
cdfRate = 5
)
type CDF struct {
CDF []uint16
Mixin [][]uint16
}
func NewCDF(size int) *CDF {
if size != 256 {
panic("size is not 256")
}
cdf, mixin := make([]uint16, size+1), make([][]uint16, size)
sum := 0
for i := range cdf {
cdf[i] = uint16(sum)
sum += 32
}
for i := range mixin {
sum, m := 0, make([]uint16, size+1)
for j := range m {
m[j] = uint16(sum)
sum++
if j == i {
sum += cdfScale - size
}
}
mixin[i] = m
}
return &CDF{
CDF: cdf,
Mixin: mixin,
}
}
func (c *CDF) Len() int {
return len(c.CDF)
}
func (c *CDF) Less(i, j int) bool {
cdf := c.CDF
return cdf[i] < cdf[j]
}
func (c *CDF) Swap(i, j int) {
cdf := c.CDF
cdf[i], cdf[j] = cdf[j], cdf[i]
}
func (c *CDF) Update(s int) {
cdf, mixin := c.CDF, c.Mixin[s]
size := len(cdf) - 1
for i := 1; i < size; i++ {
a, b := int(cdf[i]), int(mixin[i])
if a < 0 {
panic("a is less than zero")
}
if b < 0 {
panic("b is less than zero")
}
c := (b - a)
if c >= 0 {
c >>= cdfRate
c = a + c
} else {
c = -c
c >>= cdfRate
c = a - c
}
if c < 0 {
panic("c is less than zero")
}
cdf[i] = uint16(c)
}
if cdf[size] != cdfScale {
panic("cdf scale is incorrect")
}
for i := 1; i < len(cdf); i++ {
if cdf[i] <= cdf[i-1] {
panic("invalid cdf")
}
}
}
func (coder Coder16) AdaptiveCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
table, scale, buffer := make([]uint16, coder.Alphabit), uint16(0), [BUFFER_POOL_SIZE]Symbol{}
for i, _ := range table {
table[i] = 1
scale += 1
}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
low := uint16(0)
for _, count := range table[:s] {
low += count
}
current[index], index = Symbol{Scale: scale, Low: low, High: low + table[s]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
scale++
table[s]++
if scale > MAX_SCALE16 {
scale = 0
for i, count := range table {
if count >>= 1; count == 0 {
table[i], scale = 1, scale+1
} else {
table[i], scale = count, scale+count
}
}
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) AdaptivePredictiveCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
table, scale, context, buffer := make([][]uint16, coder.Alphabit), make([]uint16, coder.Alphabit), uint16(0), [BUFFER_POOL_SIZE]Symbol{}
for i, _ := range table {
table[i] = make([]uint16, coder.Alphabit)
scale[i] = coder.Alphabit
for j, _ := range table[i] {
table[i][j] = 1
}
}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
low := uint16(0)
for _, count := range table[context][:s] {
low += count
}
current[index], index = Symbol{Scale: scale[context], Low: low, High: low + table[context][s]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
scale[context]++
table[context][s]++
if scale[context] > MAX_SCALE16 {
scale[context] = 0
for i, count := range table[context] {
if count >>= 1; count == 0 {
table[context][i], scale[context] = 1, scale[context]+1
} else {
table[context][i], scale[context] = count, scale[context]+count
}
}
}
context = s
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) AdaptiveBitCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
table, buffer := [2]uint16{}, [BUFFER_POOL_SIZE]Symbol{}
table[0] = 1
table[1] = 1
highest := uint32(0)
for a := coder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
current, offset, index, mask := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0, uint16(1)<<(highest-1)
for input := range coder.Input {
for _, s := range input {
for bit := mask; bit > 0; bit >>= 1 {
b, low, high, scale := uint16(0), uint16(0), table[0], table[0]+table[1]
if bit&s != 0 {
b, low, high = 1, high, scale
}
current[index], index = Symbol{Scale: scale, Low: low, High: high}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
table[b]++
if scale >= MAX_SCALE16 {
table[0] >>= 1
table[1] >>= 1
if table[0] == 0 {
table[0] = 1
}
if table[1] == 0 {
table[1] = 1
}
}
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) AdaptivePredictiveBitCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
table, context, buffer := make([][2]uint16, 65536), uint16(0), [BUFFER_POOL_SIZE]Symbol{}
for i, _ := range table {
table[i][0] = 1
table[i][1] = 1
}
highest := uint32(0)
for a := coder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
current, offset, index, mask := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0, uint16(1)<<(highest-1)
for input := range coder.Input {
for _, s := range input {
for bit := mask; bit > 0; bit >>= 1 {
b, low, high, scale := uint16(0), uint16(0), table[context][0], table[context][0]+table[context][1]
if bit&s != 0 {
b, low, high = 1, high, scale
}
current[index], index = Symbol{Scale: scale, Low: low, High: high}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
table[context][b]++
if scale >= MAX_SCALE16 {
table[context][0] >>= 1
table[context][1] >>= 1
if table[context][0] == 0 {
table[context][0] = 1
}
if table[context][1] == 0 {
table[context][1] = 1
}
}
context = b | (context << 1)
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
// https://fgiesen.wordpress.com/2015/05/26/models-for-adaptive-arithmetic-coding/
func (coder Coder16) FilteredAdaptiveBitCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
const scale = uint16(filterScale)
p1 := scale / 2
buffer := [BUFFER_POOL_SIZE]Symbol{}
highest := uint32(0)
for a := coder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
current, offset, index, mask := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0, uint16(1)<<(highest-1)
for input := range coder.Input {
for _, s := range input {
for bit := mask; bit > 0; bit >>= 1 {
b, low, high := uint16(0), uint16(0), scale-p1
if bit&s != 0 {
b, low, high = 1, high, scale
}
current[index], index = Symbol{Scale: scale, Low: low, High: high}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
if b == 0 {
p1 -= p1 >> filterShift
} else {
p1 += (scale - p1) >> filterShift
}
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) FilteredAdaptivePredictiveBitCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
const scale = uint16(filterScale)
table, context, buffer := make([]uint16, 65536), uint16(0), [BUFFER_POOL_SIZE]Symbol{}
for i, _ := range table {
table[i] = scale / 2
}
highest := uint32(0)
for a := coder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
current, offset, index, mask := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0, uint16(1)<<(highest-1)
for input := range coder.Input {
for _, s := range input {
for bit := mask; bit > 0; bit >>= 1 {
b, low, high := uint16(0), uint16(0), scale-table[context]
if bit&s != 0 {
b, low, high = 1, high, scale
}
current[index], index = Symbol{Scale: scale, Low: low, High: high}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
if b == 0 {
table[context] -= table[context] >> filterShift
} else {
table[context] += (scale - table[context]) >> filterShift
}
context = b | (context << 1)
}
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (coder Coder16) FilteredAdaptiveCoder() Model {
out := make(chan []Symbol, BUFFER_CHAN_SIZE)
go func() {
cdf := NewCDF(int(coder.Alphabit))
buffer := [BUFFER_POOL_SIZE]Symbol{}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
current[index], index = Symbol{Scale: cdfScale, Low: cdf.CDF[s], High: cdf.CDF[s+1]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
cdf.Update(int(s))
}
}
out <- current[:index]
close(out)
}()
return Model{Input: out}
}
func (decoder Coder16) AdaptiveDecoder() Model {
table, scale := make([]uint16, decoder.Alphabit), uint16(0)
for i, _ := range table {
table[i] = 1
scale += 1
}
lookup := func(code uint16) Symbol {
low, high, done := uint16(0), uint16(0), false
for s, count := range table {
if high += count; code < high {
low, done, scale, table[s] = high-count, decoder.Output(uint16(s)), scale+1, table[s]+1
break
}
}
if done {
return Symbol{}
} else {
if scale > MAX_SCALE16 {
scale = 0
for i, count := range table {
if count >>= 1; count == 0 {
table[i], scale = 1, scale+1
} else {
table[i], scale = count, scale+count
}
}
}
return Symbol{Scale: scale, Low: low, High: high}
}
}
return Model{Scale: uint32(scale), Output: lookup}
}
func (decoder Coder16) AdaptivePredictiveDecoder() Model {
table, scale, context := make([][]uint16, decoder.Alphabit), make([]uint16, decoder.Alphabit), uint16(0)
for i, _ := range table {
table[i] = make([]uint16, decoder.Alphabit)
scale[i] = decoder.Alphabit
for j, _ := range table[i] {
table[i][j] = 1
}
}
lookup := func(code uint16) Symbol {
low, high, next, done := uint16(0), uint16(0), uint16(0), false
for s, count := range table[context] {
if high += count; code < high {
next = uint16(s)
low, done, scale[context], table[context][s] = high-count, decoder.Output(next), scale[context]+1, table[context][s]+1
break
}
}
if done {
return Symbol{}
} else {
if scale[context] > MAX_SCALE16 {
scale[context] = 0
for i, count := range table[context] {
if count >>= 1; count == 0 {
table[context][i], scale[context] = 1, scale[context]+1
} else {
table[context][i], scale[context] = count, scale[context]+count
}
}
}
context = next
return Symbol{Scale: scale[context], Low: low, High: high}
}
}
return Model{Scale: uint32(decoder.Alphabit), Output: lookup}
}
func (decoder Coder16) AdaptiveBitDecoder() Model {
table := [2]uint16{}
table[0] = 1
table[1] = 1
highest := uint32(0)
for a := decoder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
mask := uint16(1) << (highest - 1)
bit, bits := mask, uint16(0)
lookup := func(code uint16) Symbol {
scale, low, high, b := table[0]+table[1], uint16(0), uint16(0), uint16(0)
if code < table[0] {
high, bit = table[0], bit>>1
} else {
low, high, bits, bit, b = table[0], scale, bits|bit, bit>>1, 1
}
table[b]++
if scale >= MAX_SCALE16 {
table[0] >>= 1
table[1] >>= 1
if table[0] == 0 {
table[0] = 1
}
if table[1] == 0 {
table[1] = 1
}
}
if bit == 0 {
if decoder.Output(bits) {
return Symbol{}
}
bits, bit = 0, mask
}
return Symbol{Scale: table[0] + table[1], Low: low, High: high}
}
return Model{Scale: uint32(2), Output: lookup}
}
func (decoder Coder16) AdaptivePredictiveBitDecoder() Model {
table, context := make([][2]uint16, 65536), uint16(0)
for i, _ := range table {
table[i][0] = 1
table[i][1] = 1
}
highest := uint32(0)
for a := decoder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
mask := uint16(1) << (highest - 1)
bit, bits := mask, uint16(0)
lookup := func(code uint16) Symbol {
scale, low, high, b := table[context][0]+table[context][1], uint16(0), uint16(0), uint16(0)
if code < table[context][0] {
high, bit = table[context][0], bit>>1
} else {
low, high, bits, bit, b = table[context][0], scale, bits|bit, bit>>1, 1
}
table[context][b]++
if scale >= MAX_SCALE16 {
table[context][0] >>= 1
table[context][1] >>= 1
if table[context][0] == 0 {
table[context][0] = 1
}
if table[context][1] == 0 {
table[context][1] = 1
}
}
context = b | (context << 1)
if bit == 0 {
if decoder.Output(bits) {
return Symbol{}
}
bits, bit = 0, mask
}
return Symbol{Scale: table[context][0] + table[context][1], Low: low, High: high}
}
return Model{Scale: uint32(2), Output: lookup}
}
func (decoder Coder16) FilteredAdaptiveBitDecoder() Model {
const scale = uint16(filterScale)
p1 := scale / 2
highest := uint32(0)
for a := decoder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
mask := uint16(1) << (highest - 1)
bit, bits := mask, uint16(0)
lookup := func(code uint16) Symbol {
low, high, b := uint16(0), uint16(0), uint16(0)
if p0 := scale - p1; code < p0 {
high, bit = p0, bit>>1
} else {
low, high, bits, bit, b = p0, scale, bits|bit, bit>>1, 1
}
if b == 0 {
p1 -= p1 >> filterShift
} else {
p1 += (scale - p1) >> filterShift
}
if bit == 0 {
if decoder.Output(bits) {
return Symbol{}
}
bits, bit = 0, mask
}
return Symbol{Scale: scale, Low: low, High: high}
}
return Model{Scale: uint32(scale), Output: lookup}
}
func (decoder Coder16) FilteredAdaptivePredictiveBitDecoder() Model {
const scale = uint16(filterScale)
table, context := make([]uint16, 65536), uint16(0)
for i, _ := range table {
table[i] = scale / 2
}
highest := uint32(0)
for a := decoder.Alphabit - 1; a > 0; a >>= 1 {
highest++
}
mask := uint16(1) << (highest - 1)
bit, bits := mask, uint16(0)
lookup := func(code uint16) Symbol {
low, high, b := uint16(0), uint16(0), uint16(0)
if p0 := scale - table[context]; code < p0 {
high, bit = p0, bit>>1
} else {
low, high, bits, bit, b = p0, scale, bits|bit, bit>>1, 1
}
if b == 0 {
table[context] -= table[context] >> filterShift
} else {
table[context] += (scale - table[context]) >> filterShift
}
context = b | (context << 1)
if bit == 0 {
if decoder.Output(bits) {
return Symbol{}
}
bits, bit = 0, mask
}
return Symbol{Scale: scale, Low: low, High: high}
}
return Model{Scale: uint32(scale), Output: lookup}
}
func (decoder Coder16) FilteredAdaptiveDecoder() Model {
cdf := NewCDF(int(decoder.Alphabit))
lookup := func(code uint16) Symbol {
low, high, done := uint16(0), uint16(0), false
for s := 1; s < len(cdf.CDF); s++ {
if code < cdf.CDF[s] {
symbol := s - 1
low, high, done = cdf.CDF[s-1], cdf.CDF[s], decoder.Output(uint16(symbol))
cdf.Update(symbol)
break
}
}
if done {
return Symbol{}
}
return Symbol{Scale: cdfScale, Low: low, High: high}
}
return Model{Scale: uint32(cdfScale), Output: lookup}
}
func (coder Coder16) AdaptiveCoder32() Model32 {
out := make(chan []Symbol32, BUFFER_CHAN_SIZE)
go func() {
table, scale, buffer := make([]uint32, coder.Alphabit), uint32(coder.Alphabit), [BUFFER_POOL_SIZE]Symbol32{}
for i, _ := range table {
table[i] = 1
}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
low := uint32(0)
for _, count := range table[:s] {
low += count
}
current[index], index = Symbol32{Scale: scale, Low: low, High: low + table[s]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
scale++
table[s]++
if scale > MAX_SCALE32 {
scale = 0
for i, count := range table {
if count >>= 1; count == 0 {
table[i], scale = 1, scale+1
} else {
table[i], scale = count, scale+count
}
}
}
}
}
out <- current[:index]
close(out)
}()
return Model32{Input: out}
}
func (coder Coder16) AdaptivePredictiveCoder32() Model32 {
out := make(chan []Symbol32, BUFFER_CHAN_SIZE)
go func() {
table, scale, context, buffer := make([][]uint32, coder.Alphabit), make([]uint32, coder.Alphabit), uint16(0), [BUFFER_POOL_SIZE]Symbol32{}
for i, _ := range table {
table[i] = make([]uint32, coder.Alphabit)
scale[i] = uint32(coder.Alphabit)
for j, _ := range table[i] {
table[i][j] = 1
}
}
current, offset, index := buffer[0:BUFFER_SIZE], BUFFER_SIZE, 0
for input := range coder.Input {
for _, s := range input {
low := uint32(0)
for _, count := range table[context][:s] {
low += count
}
current[index], index = Symbol32{Scale: scale[context], Low: low, High: low + table[context][s]}, index+1
if index == BUFFER_SIZE {
out <- current
next := offset + BUFFER_SIZE
current, offset, index = buffer[offset:next], next&BUFFER_POOL_SIZE_MASK, 0
}
scale[context]++
table[context][s]++
if scale[context] > MAX_SCALE32 {
scale[context] = 0
for i, count := range table[context] {
if count >>= 1; count == 0 {
table[context][i], scale[context] = 1, scale[context]+1
} else {
table[context][i], scale[context] = count, scale[context]+count
}
}
}
context = s
}
}
out <- current[:index]
close(out)
}()
return Model32{Input: out}
}
func (decoder Coder16) AdaptiveDecoder32() Model32 {
table, scale := make([]uint32, decoder.Alphabit), uint32(decoder.Alphabit)
for i, _ := range table {
table[i] = 1
}
lookup := func(code uint32) Symbol32 {
low, high, done := uint32(0), uint32(0), false
for s, count := range table {
if high += count; code < high {
low, done, scale, table[s] = high-count, decoder.Output(uint16(s)), scale+1, table[s]+1
break
}
}
if done {
return Symbol32{}
} else {
if scale > MAX_SCALE32 {
scale = 0
for i, count := range table {
if count >>= 1; count == 0 {
table[i], scale = 1, scale+1
} else {
table[i], scale = count, scale+count
}
}
}
return Symbol32{Scale: scale, Low: low, High: high}
}
}
return Model32{Scale: uint64(decoder.Alphabit), Output: lookup}
}
func (decoder Coder16) AdaptivePredictiveDecoder32() Model32 {
table, scale, context := make([][]uint32, decoder.Alphabit), make([]uint32, decoder.Alphabit), uint16(0)
for i, _ := range table {
table[i] = make([]uint32, decoder.Alphabit)
scale[i] = uint32(decoder.Alphabit)
for j, _ := range table[i] {
table[i][j] = 1
}
}
lookup := func(code uint32) Symbol32 {
low, high, next, done := uint32(0), uint32(0), uint16(0), false
for s, count := range table[context] {
if high += count; code < high {
next = uint16(s)
low, done, scale[context], table[context][s] = high-count, decoder.Output(next), scale[context]+1, table[context][s]+1
break
}
}
if done {
return Symbol32{}
} else {
if scale[context] > MAX_SCALE32 {
scale[context] = 0
for i, count := range table[context] {
if count >>= 1; count == 0 {
table[context][i], scale[context] = 1, scale[context]+1
} else {
table[context][i], scale[context] = count, scale[context]+count
}
}
}
context = next
return Symbol32{Scale: scale[context], Low: low, High: high}
}
}
return Model32{Scale: uint64(decoder.Alphabit), Output: lookup}
}
|
// Package usl provides functionality to build Universal Scalability Law models
// from sets of observed measurements.
package usl
import (
"fmt"
"math"
"github.com/maorshutman/lm"
)
// Model is a Universal Scalability Law model.
type Model struct {
Sigma float64 // The model's coefficient of contention, σ.
Kappa float64 // The model's coefficient of crosstalk/coherency, κ.
Lambda float64 // The model's coefficient of performance, λ.
}
func (m *Model) String() string {
return fmt.Sprintf("Model{σ=%v,κ=%v,λ=%v}", m.Sigma, m.Kappa, m.Lambda)
}
// ThroughputAtConcurrency returns the expected throughput given a number of concurrent events,
// X(N).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 3".
func (m *Model) ThroughputAtConcurrency(n float64) float64 {
return (m.Lambda * n) / (1 + (m.Sigma * (n - 1)) + (m.Kappa * n * (n - 1)))
}
// LatencyAtConcurrency returns the expected mean latency given a number of concurrent events,
// R(N).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 6".
func (m *Model) LatencyAtConcurrency(n float64) float64 {
return (1 + (m.Sigma * (n - 1)) + (m.Kappa * n * (n - 1))) / m.Lambda
}
// MaxConcurrency returns the maximum expected number of concurrent events the system can handle,
// Nmax.
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 4".
func (m *Model) MaxConcurrency() float64 {
return math.Floor(math.Sqrt((1 - m.Sigma) / m.Kappa))
}
// MaxThroughput returns the maximum expected throughput the system can handle, Xmax.
func (m Model) MaxThroughput() float64 {
return m.ThroughputAtConcurrency(m.MaxConcurrency())
}
// LatencyAtThroughput returns the expected mean latency given a throughput, R(X).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 8".
func (m *Model) LatencyAtThroughput(x float64) float64 {
return (m.Sigma - 1) / (m.Sigma*x - m.Lambda)
}
// ThroughputAtLatency returns the expected throughput given a mean latency, X(R).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 9".
func (m *Model) ThroughputAtLatency(r float64) float64 {
return (math.Sqrt(math.Pow(m.Sigma, 2)+math.Pow(m.Kappa, 2)+
2*m.Kappa*(2*m.Lambda*r+m.Sigma-2)) - m.Kappa + m.Sigma) / (2.0 * m.Kappa * r)
}
// ConcurrencyAtLatency returns the expected number of concurrent events at a particular mean
// latency, N(R).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 10".
func (m *Model) ConcurrencyAtLatency(r float64) float64 {
return (m.Kappa - m.Sigma +
math.Sqrt(math.Pow(m.Sigma, 2)+
math.Pow(m.Kappa, 2)+
2*m.Kappa*((2*m.Lambda*r)+m.Sigma-2))) / (2 * m.Kappa)
}
// ConcurrencyAtThroughput returns the expected number of concurrent events at a particular
// throughput, N(X).
func (m *Model) ConcurrencyAtThroughput(x float64) float64 {
return m.LatencyAtThroughput(x) * x
}
// ContentionConstrained returns true if the system is constrained by contention.
func (m *Model) ContentionConstrained() bool {
return m.Sigma > m.Kappa
}
// CoherencyConstrained returns true if the system is constrained by coherency costs.
func (m *Model) CoherencyConstrained() bool {
return m.Sigma < m.Kappa
}
// Limitless returns true if the system is linearly scalable.
func (m *Model) Limitless() bool {
return m.Kappa == 0
}
// Build returns a model whose parameters are generated from the given measurements.
//
// Finds a set of coefficients for the equation y = λx/(1+σ(x-1)+κx(x-1)) which best fit the
// observed values using unconstrained least-squares regression. The resulting values for λ, κ, and
// σ are the parameters of the returned model.
func Build(measurements []Measurement) (m *Model, err error) {
if len(measurements) < minMeasurements {
return nil, ErrInsufficientMeasurements
}
// Calculate an initial guess at the model parameters.
init := []float64{0.1, 0.01, 0}
// Use max(x/n) as initial lambda.
for _, m := range measurements {
v := m.Throughput / m.Concurrency
if v > init[2] {
init[2] = v
}
}
// Calculate the residuals of a possible model.
f := func(dst, x []float64) {
model := Model{Sigma: x[0], Kappa: x[1], Lambda: x[2]}
for i, v := range measurements {
dst[i] = v.Throughput - model.ThroughputAtConcurrency(v.Concurrency)
}
}
j := lm.NumJac{Func: f}
// Formulate an LM problem.
p := lm.LMProblem{
Dim: 3, // Three parameters in the model.
Size: len(measurements), // Use all measurements to calculate residuals.
Func: f, // Reduce the residuals of model predictions to observations.
Jac: j.Jac, // Approximate the Jacobian by finite differences.
InitParams: init, // Use our initial guesses at parameters.
Tau: 1e-6, // Need a non-zero initial damping factor.
Eps1: 1e-8, // Small but non-zero values here prevent singular matrices.
Eps2: 1e-8,
}
// Calculate the model parameters.
results, err := lm.LM(p, nil)
if err != nil {
return nil, fmt.Errorf("unable to build model: %w", err)
}
// Return the model.
return &Model{
Sigma: results.X[0],
Kappa: results.X[1],
Lambda: results.X[2],
}, nil
}
const (
// minMeasurement is the smallest number of measurements from which a useful model can be
// created.
minMeasurements = 6
)
// ErrInsufficientMeasurements is returned when fewer than 6 measurements were provided.
var ErrInsufficientMeasurements = fmt.Errorf("usl: need at least %d measurements", minMeasurements)
Simplify LM problem setup.
// Package usl provides functionality to build Universal Scalability Law models
// from sets of observed measurements.
package usl
import (
"fmt"
"math"
"github.com/maorshutman/lm"
)
// Model is a Universal Scalability Law model.
type Model struct {
Sigma float64 // The model's coefficient of contention, σ.
Kappa float64 // The model's coefficient of crosstalk/coherency, κ.
Lambda float64 // The model's coefficient of performance, λ.
}
func (m *Model) String() string {
return fmt.Sprintf("Model{σ=%v,κ=%v,λ=%v}", m.Sigma, m.Kappa, m.Lambda)
}
// ThroughputAtConcurrency returns the expected throughput given a number of concurrent events,
// X(N).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 3".
func (m *Model) ThroughputAtConcurrency(n float64) float64 {
return (m.Lambda * n) / (1 + (m.Sigma * (n - 1)) + (m.Kappa * n * (n - 1)))
}
// LatencyAtConcurrency returns the expected mean latency given a number of concurrent events,
// R(N).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 6".
func (m *Model) LatencyAtConcurrency(n float64) float64 {
return (1 + (m.Sigma * (n - 1)) + (m.Kappa * n * (n - 1))) / m.Lambda
}
// MaxConcurrency returns the maximum expected number of concurrent events the system can handle,
// Nmax.
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 4".
func (m *Model) MaxConcurrency() float64 {
return math.Floor(math.Sqrt((1 - m.Sigma) / m.Kappa))
}
// MaxThroughput returns the maximum expected throughput the system can handle, Xmax.
func (m Model) MaxThroughput() float64 {
return m.ThroughputAtConcurrency(m.MaxConcurrency())
}
// LatencyAtThroughput returns the expected mean latency given a throughput, R(X).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 8".
func (m *Model) LatencyAtThroughput(x float64) float64 {
return (m.Sigma - 1) / (m.Sigma*x - m.Lambda)
}
// ThroughputAtLatency returns the expected throughput given a mean latency, X(R).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 9".
func (m *Model) ThroughputAtLatency(r float64) float64 {
return (math.Sqrt(math.Pow(m.Sigma, 2)+math.Pow(m.Kappa, 2)+
2*m.Kappa*(2*m.Lambda*r+m.Sigma-2)) - m.Kappa + m.Sigma) / (2.0 * m.Kappa * r)
}
// ConcurrencyAtLatency returns the expected number of concurrent events at a particular mean
// latency, N(R).
//
// See "Practical Scalability Analysis with the Universal Scalability Law, Equation 10".
func (m *Model) ConcurrencyAtLatency(r float64) float64 {
return (m.Kappa - m.Sigma +
math.Sqrt(math.Pow(m.Sigma, 2)+
math.Pow(m.Kappa, 2)+
2*m.Kappa*((2*m.Lambda*r)+m.Sigma-2))) / (2 * m.Kappa)
}
// ConcurrencyAtThroughput returns the expected number of concurrent events at a particular
// throughput, N(X).
func (m *Model) ConcurrencyAtThroughput(x float64) float64 {
return m.LatencyAtThroughput(x) * x
}
// ContentionConstrained returns true if the system is constrained by contention.
func (m *Model) ContentionConstrained() bool {
return m.Sigma > m.Kappa
}
// CoherencyConstrained returns true if the system is constrained by coherency costs.
func (m *Model) CoherencyConstrained() bool {
return m.Sigma < m.Kappa
}
// Limitless returns true if the system is linearly scalable.
func (m *Model) Limitless() bool {
return m.Kappa == 0
}
// Build returns a model whose parameters are generated from the given measurements.
//
// Finds a set of coefficients for the equation y = λx/(1+σ(x-1)+κx(x-1)) which best fit the
// observed values using unconstrained least-squares regression. The resulting values for λ, κ, and
// σ are the parameters of the returned model.
func Build(measurements []Measurement) (m *Model, err error) {
if len(measurements) < minMeasurements {
return nil, ErrInsufficientMeasurements
}
// Calculate an initial guess at the model parameters.
init := []float64{0.1, 0.01, 0}
// Use max(x/n) as initial lambda.
for _, m := range measurements {
v := m.Throughput / m.Concurrency
if v > init[2] {
init[2] = v
}
}
// Calculate the residuals of a possible model.
f := func(dst, x []float64) {
model := Model{Sigma: x[0], Kappa: x[1], Lambda: x[2]}
for i, v := range measurements {
dst[i] = v.Throughput - model.ThroughputAtConcurrency(v.Concurrency)
}
}
// Formulate an LM problem.
p := lm.LMProblem{
Dim: 3, // Three parameters in the model.
Size: len(measurements), // Use all measurements to calculate residuals.
Func: f, // Reduce the residuals of model predictions to observations.
Jac: lm.NumJac{Func: f}.Jac, // Approximate the Jacobian by finite differences.
InitParams: init, // Use our initial guesses at parameters.
Tau: 1e-6, // Need a non-zero initial damping factor.
Eps1: 1e-8, // Small but non-zero values here prevent singular matrices.
Eps2: 1e-8,
}
// Calculate the model parameters.
results, err := lm.LM(p, nil)
if err != nil {
return nil, fmt.Errorf("unable to build model: %w", err)
}
// Return the model.
return &Model{
Sigma: results.X[0],
Kappa: results.X[1],
Lambda: results.X[2],
}, nil
}
const (
// minMeasurement is the smallest number of measurements from which a useful model can be
// created.
minMeasurements = 6
)
// ErrInsufficientMeasurements is returned when fewer than 6 measurements were provided.
var ErrInsufficientMeasurements = fmt.Errorf("usl: need at least %d measurements", minMeasurements)
|
// Package fate is a text generation library.
package fate
import (
cryptorand "crypto/rand"
"encoding/binary"
"log"
"math/rand"
"strings"
)
type token uint32
type bigram struct {
tok0, tok1 token
}
func (b bigram) reverse() bigram {
return bigram{b.tok1, b.tok0}
}
// Model is a trigram language model that can learn and respond to
// text.
type Model struct {
tokens *syndict
// We track (tok0 -> tok1) and (tok0 tok1 -> tok2) in
// the forward direction, so we can efficiently choose
// random tok0-containing contexts.
fwd1 obs1
fwd2 obs2
// In the reverse direction, we only need to be able
// to track (tok2 tok1 -> tok0).
rev2 obs2
rand *rand.Rand
}
// Config holds Model configuration data. An empty Config struct
// indicates the default values for each.
type Config struct {
// Stemmer makes all tokens go through a normalization process
// when created. Words that stem the same mean the same thing.
Stemmer Stemmer
}
func (c Config) stemmerOrDefault() Stemmer {
if c.Stemmer != nil {
return c.Stemmer
}
return DefaultStemmer
}
// NewModel constructs an empty language model.
func NewModel(opts Config) *Model {
return &Model{
tokens: newSyndict(opts.stemmerOrDefault()),
fwd1: make(obs1),
fwd2: make(obs2),
rev2: make(obs2),
rand: rand.New(randSource()),
}
}
// randSource seeds a standard math/rand PRNG with a secure seed.
func randSource() rand.Source {
var seed int64
binary.Read(cryptorand.Reader, binary.LittleEndian, &seed)
return rand.NewSource(seed)
}
func (m *Model) ends() (token, token) {
return m.tokens.ID("<S>"), m.tokens.ID("</S>")
}
// Learn observes the text in a string and makes it available for
// later replies.
func (m *Model) Learn(text string) {
start, end := m.ends()
tokens := m.tokens
words := strings.Fields(text)
if len(words) == 1 {
// Refuse to learn single-word inputs.
return
}
// ids: <S> <S> tokens in the input string </S> </S>
var ids = []token{start, start}
for _, f := range words {
ids = append(ids, tokens.ID(f))
}
ids = append(ids, end, end)
var ctx bigram
var tok2 token
for i := 0; i < len(ids)-2; i++ {
ctx.tok0, ctx.tok1, tok2 = ids[i], ids[i+1], ids[i+2]
if !m.fwd2.Observe(ctx, tok2) {
m.fwd1.Observe(ctx.tok0, ctx.tok1)
}
ctx.tok0, tok2 = tok2, ctx.tok0
m.rev2.Observe(ctx, tok2)
}
}
// Reply generates a reply string to str, given the current state of
// the language model.
func (m *Model) Reply(text string) string {
pivot := m.pickPivot(strings.Fields(text))
fwdctx := bigram{tok0: pivot, tok1: m.choice(m.fwd1[pivot])}
start, end := m.ends()
path := make([]token, 0)
path = m.follow(path, m.rev2, fwdctx.reverse(), start)
reverse(path)
if fwdctx.tok0 != start {
path = append(path, fwdctx.tok0)
}
if fwdctx.tok1 != end {
path = append(path, fwdctx.tok1)
path = m.follow(path, m.fwd2, fwdctx, end)
}
return join(m.tokens, path)
}
func (m *Model) pickPivot(words []string) token {
var pivots []token
for _, w := range words {
pivots = append(pivots, m.tokens.Syns(w)...)
}
if len(pivots) > 0 {
return pivots[m.rand.Intn(len(pivots))]
}
// No valid pivots, so babble. Assume tokens 0 & 1 are start and end.
return token(m.rand.Intn(m.tokens.Len()-2) + 2)
}
func (m *Model) follow(path []token, obs obs2, pos bigram, goal token) []token {
for {
toks := obs[pos]
if len(toks) == 0 {
log.Fatal("ran out of chain at", pos)
}
tok := m.choice(toks)
if tok == goal {
return path
}
path = append(path, tok)
pos.tok0, pos.tok1 = pos.tok1, tok
}
}
func join(tokens *syndict, path []token) string {
buf := make([]byte, 0, joinsize(tokens, path))
for _, tok := range path {
buf = append(buf, tokens.Word(tok)...)
buf = append(buf, ' ')
}
return string(buf[:len(buf)-1])
}
func reverse(toks []token) {
a, b := 0, len(toks)-1
for a < b {
toks[a], toks[b] = toks[b], toks[a]
a++
b--
}
}
func joinsize(tokens *syndict, path []token) int {
// initialize count assuming a space between each word
count := len(path)
for _, tok := range path {
count += len(tokens.Word(tok))
}
return count
}
func (m *Model) choice(toks []token) token {
return toks[m.rand.Intn(len(toks))]
}
Add a few comments to Reply
// Package fate is a text generation library.
package fate
import (
cryptorand "crypto/rand"
"encoding/binary"
"log"
"math/rand"
"strings"
)
type token uint32
type bigram struct {
tok0, tok1 token
}
func (b bigram) reverse() bigram {
return bigram{b.tok1, b.tok0}
}
// Model is a trigram language model that can learn and respond to
// text.
type Model struct {
tokens *syndict
// We track (tok0 -> tok1) and (tok0 tok1 -> tok2) in
// the forward direction, so we can efficiently choose
// random tok0-containing contexts.
fwd1 obs1
fwd2 obs2
// In the reverse direction, we only need to be able
// to track (tok2 tok1 -> tok0).
rev2 obs2
rand *rand.Rand
}
// Config holds Model configuration data. An empty Config struct
// indicates the default values for each.
type Config struct {
// Stemmer makes all tokens go through a normalization process
// when created. Words that stem the same mean the same thing.
Stemmer Stemmer
}
func (c Config) stemmerOrDefault() Stemmer {
if c.Stemmer != nil {
return c.Stemmer
}
return DefaultStemmer
}
// NewModel constructs an empty language model.
func NewModel(opts Config) *Model {
return &Model{
tokens: newSyndict(opts.stemmerOrDefault()),
fwd1: make(obs1),
fwd2: make(obs2),
rev2: make(obs2),
rand: rand.New(randSource()),
}
}
// randSource seeds a standard math/rand PRNG with a secure seed.
func randSource() rand.Source {
var seed int64
binary.Read(cryptorand.Reader, binary.LittleEndian, &seed)
return rand.NewSource(seed)
}
func (m *Model) ends() (token, token) {
return m.tokens.ID("<S>"), m.tokens.ID("</S>")
}
// Learn observes the text in a string and makes it available for
// later replies.
func (m *Model) Learn(text string) {
start, end := m.ends()
tokens := m.tokens
words := strings.Fields(text)
if len(words) == 1 {
// Refuse to learn single-word inputs.
return
}
// ids: <S> <S> tokens in the input string </S> </S>
var ids = []token{start, start}
for _, f := range words {
ids = append(ids, tokens.ID(f))
}
ids = append(ids, end, end)
var ctx bigram
var tok2 token
for i := 0; i < len(ids)-2; i++ {
ctx.tok0, ctx.tok1, tok2 = ids[i], ids[i+1], ids[i+2]
if !m.fwd2.Observe(ctx, tok2) {
m.fwd1.Observe(ctx.tok0, ctx.tok1)
}
ctx.tok0, tok2 = tok2, ctx.tok0
m.rev2.Observe(ctx, tok2)
}
}
// Reply generates a reply string to str, given the current state of
// the language model.
func (m *Model) Reply(text string) string {
pivot := m.pickPivot(strings.Fields(text))
fwdctx := bigram{tok0: pivot, tok1: m.choice(m.fwd1[pivot])}
start, end := m.ends()
var path []token
// Compute the beginning of the sentence by walking from
// fwdctx back to start.
path = m.follow(path, m.rev2, fwdctx.reverse(), start)
// Reverse what we have so far.
reverse(path)
// Append the initial context, tok0 and tok1. But tok0 only if
// we weren't already at the start.
if fwdctx.tok0 != start {
path = append(path, fwdctx.tok0)
}
// And tok1 only if we weren't already at the end.
if fwdctx.tok1 != end {
path = append(path, fwdctx.tok1)
// Compute the end of the sentence by walking forward
// from fwdctx to end.
path = m.follow(path, m.fwd2, fwdctx, end)
}
return join(m.tokens, path)
}
func (m *Model) pickPivot(words []string) token {
var pivots []token
for _, w := range words {
pivots = append(pivots, m.tokens.Syns(w)...)
}
if len(pivots) > 0 {
return pivots[m.rand.Intn(len(pivots))]
}
// No valid pivots, so babble. Assume tokens 0 & 1 are start and end.
return token(m.rand.Intn(m.tokens.Len()-2) + 2)
}
func (m *Model) follow(path []token, obs obs2, pos bigram, goal token) []token {
for {
toks := obs[pos]
if len(toks) == 0 {
log.Fatal("ran out of chain at", pos)
}
tok := m.choice(toks)
if tok == goal {
return path
}
path = append(path, tok)
pos.tok0, pos.tok1 = pos.tok1, tok
}
}
func join(tokens *syndict, path []token) string {
buf := make([]byte, 0, joinsize(tokens, path))
for _, tok := range path {
buf = append(buf, tokens.Word(tok)...)
buf = append(buf, ' ')
}
return string(buf[:len(buf)-1])
}
func reverse(toks []token) {
a, b := 0, len(toks)-1
for a < b {
toks[a], toks[b] = toks[b], toks[a]
a++
b--
}
}
func joinsize(tokens *syndict, path []token) int {
// initialize count assuming a space between each word
count := len(path)
for _, tok := range path {
count += len(tokens.Word(tok))
}
return count
}
func (m *Model) choice(toks []token) token {
return toks[m.rand.Intn(len(toks))]
}
|
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"strconv"
"github.com/juju/utils/set"
)
// PortSet is a set-like container of Port values.
type PortSet struct {
values map[string]set.Strings
}
// NewPortSet creates a map of protocols to sets of stringified port numbers.
func NewPortSet(portRanges ...PortRange) PortSet {
var portMap PortSet
portMap.values = make(map[string]set.Strings)
portMap.AddRanges(portRanges...)
return portMap
}
// Size returns the number of ports in the set.
func (ps PortSet) Size() int {
return len(ps.Ports())
}
// IsEmpty returns true if the PortSet is empty.
func (ps PortSet) IsEmpty() bool {
return len(ps.values) == 0
}
// Protocols returns a list of protocols known to the PortSet.
func (ps PortSet) Protocols() []string {
var result []string
for key := range ps.values {
result = append(result, key)
}
return result
}
// Ports returns a list of all the ports in the set for the
// given protocols. If no protocols are provided all known
// protocols in the set are used.
func (ps PortSet) Ports(protocols ...string) []Port {
if len(protocols) == 0 {
protocols = ps.Protocols()
}
var result []Port
for _, protocol := range protocols {
ports, ok := ps.values[protocol]
if !ok {
return nil
}
for _, port := range ports.Values() {
portNum, _ := strconv.Atoi(port)
result = append(result, Port{protocol, portNum})
}
}
return result
}
// PortStrings returns a list of stringified ports in the set
// for the given protocol.
func (ps PortSet) PortStrings(protocol string) []string {
ports, ok := ps.values[protocol]
if !ok {
return nil
}
return ports.Values()
}
// Union returns a new PortSet of the shared values
// that are common between both PortSets.
func (ps PortSet) Union(other PortSet) PortSet {
var result PortSet
result.values = make(map[string]set.Strings)
for protocol, value := range ps.values {
result.values[protocol] = value.Union(nil)
}
for protocol, value := range other.values {
ports, ok := result.values[protocol]
if !ok {
value = nil
}
result.values[protocol] = ports.Union(value)
}
return result
}
// Difference returns a new PortSet of the values
// that are not in the other PortSet.
func (ps PortSet) Difference(other PortSet) PortSet {
var result PortSet
result.values = make(map[string]set.Strings)
for protocol, value := range ps.values {
ports, ok := other.values[protocol]
if !ok {
result.values[protocol] = value
} else {
result.values[protocol] = value.Difference(ports)
}
}
return result
}
// Add adds a Port to the PortSet.
func (ps *PortSet) Add(port Port) {
portNum := strconv.Itoa(port.Number)
ports, ok := ps.values[port.Protocol]
if !ok {
ps.values[port.Protocol] = set.NewStrings(portNum)
} else {
ports.Add(portNum)
}
}
// AddRanges adds portRanges to the PortSet.
func (ps *PortSet) AddRanges(portRanges ...PortRange) {
for _, port := range PortRangesToPorts(portRanges) {
ps.Add(port)
}
}
Move Add* up in the file.
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"strconv"
"github.com/juju/utils/set"
)
// PortSet is a set-like container of Port values.
type PortSet struct {
values map[string]set.Strings
}
// NewPortSet creates a map of protocols to sets of stringified port numbers.
func NewPortSet(portRanges ...PortRange) PortSet {
var portMap PortSet
portMap.values = make(map[string]set.Strings)
portMap.AddRanges(portRanges...)
return portMap
}
// Size returns the number of ports in the set.
func (ps PortSet) Size() int {
return len(ps.Ports())
}
// IsEmpty returns true if the PortSet is empty.
func (ps PortSet) IsEmpty() bool {
return len(ps.values) == 0
}
// Protocols returns a list of protocols known to the PortSet.
func (ps PortSet) Protocols() []string {
var result []string
for key := range ps.values {
result = append(result, key)
}
return result
}
// Ports returns a list of all the ports in the set for the
// given protocols. If no protocols are provided all known
// protocols in the set are used.
func (ps PortSet) Ports(protocols ...string) []Port {
if len(protocols) == 0 {
protocols = ps.Protocols()
}
var result []Port
for _, protocol := range protocols {
ports, ok := ps.values[protocol]
if !ok {
return nil
}
for _, port := range ports.Values() {
portNum, _ := strconv.Atoi(port)
result = append(result, Port{protocol, portNum})
}
}
return result
}
// PortStrings returns a list of stringified ports in the set
// for the given protocol.
func (ps PortSet) PortStrings(protocol string) []string {
ports, ok := ps.values[protocol]
if !ok {
return nil
}
return ports.Values()
}
// Add adds a Port to the PortSet.
func (ps *PortSet) Add(port Port) {
portNum := strconv.Itoa(port.Number)
ports, ok := ps.values[port.Protocol]
if !ok {
ps.values[port.Protocol] = set.NewStrings(portNum)
} else {
ports.Add(portNum)
}
}
// AddRanges adds port ranges to the PortSet.
func (ps *PortSet) AddRanges(portRanges ...PortRange) {
for _, port := range PortRangesToPorts(portRanges) {
ps.Add(port)
}
}
// Union returns a new PortSet of the shared values
// that are common between both PortSets.
func (ps PortSet) Union(other PortSet) PortSet {
var result PortSet
result.values = make(map[string]set.Strings)
for protocol, value := range ps.values {
result.values[protocol] = value.Union(nil)
}
for protocol, value := range other.values {
ports, ok := result.values[protocol]
if !ok {
value = nil
}
result.values[protocol] = ports.Union(value)
}
return result
}
// Difference returns a new PortSet of the values
// that are not in the other PortSet.
func (ps PortSet) Difference(other PortSet) PortSet {
var result PortSet
result.values = make(map[string]set.Strings)
for protocol, value := range ps.values {
ports, ok := other.values[protocol]
if !ok {
result.values[protocol] = value
} else {
result.values[protocol] = value.Difference(ports)
}
}
return result
}
|
// Copyright 2016 Koichi Shiraishi. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package nvim
import (
"fmt"
"strings"
"github.com/garyburd/neovim-go/vim"
)
var (
errorColor = "Identifier"
progress = "Identifier"
success = "Function"
)
// Echo provide the vim 'echo' command.
func Echo(v *vim.Vim, format string, a ...interface{}) error {
return v.Command("echo '" + fmt.Sprintf(format, a...) + "'")
}
// EchoRaw provide the raw output vim 'echo' command.
func EchoRaw(v *vim.Vim, a string) error {
return v.Command("echo \"" + a + "\"")
}
// Echomsg provide the vim 'echomsg' command.
func Echomsg(v *vim.Vim, a ...interface{}) error {
return v.Command("echomsg '" + strings.TrimSpace(fmt.Sprintln(a...)) + "'")
}
// Echoerr provide the vim 'echoerr' command.
func Echoerr(v *vim.Vim, format string, a ...interface{}) error {
return v.Command("echoerr '" + fmt.Sprintf(format, a...) + "'")
}
// EchohlErr provide the vim 'echo' command with the 'echohl' highlighting prefix text.
func EchohlErr(v *vim.Vim, prefix string, a ...interface{}) error {
v.Command("redraw")
if prefix != "" {
prefix += ": "
}
text := strings.TrimSpace(fmt.Sprintln(a...))
return v.Command("echo '" + prefix + "' | echohl " + errorColor + " | echon '" + text + "' | echohl None")
}
// EchohlBefore provide the vim 'echo' command with the 'echohl' highlighting prefix text.
func EchohlBefore(v *vim.Vim, prefix string, highlight string, format string, a ...interface{}) error {
v.Command("redraw")
suffix := "' | echohl None | echon '"
if prefix != "" {
suffix += ": "
}
return v.Command("echohl " + highlight + " | echo '" + prefix + suffix + fmt.Sprintf(format, a...) + "'")
}
// EchohlAfter provide the vim 'echo' command with the 'echohl' highlighting message text.
func EchohlAfter(v *vim.Vim, prefix string, highlight string, format string, a ...interface{}) error {
v.Command("redraw")
if prefix != "" {
prefix += ": "
}
return v.Command("echo '" + prefix + "' | echohl " + highlight + " | echon '" + fmt.Sprintf(format, a...) + "' | echohl None")
}
// EchoProgress displays a command progress message to echo area.
func EchoProgress(v *vim.Vim, prefix, before, from, to string) error {
v.Command("redraw")
if prefix != "" {
prefix += ": "
}
// TODO(zchee): Refactoring because line too long.
return v.Command(fmt.Sprintf("echon '%s%s ' | echohl %s | echon '%s' | echohl None | echon ' to ' | echohl %s | echon '%s' | echohl None | echon ' ...'", prefix, before, progress, from, progress, to))
}
// EchoSuccess displays the success of the command to echo area.
func EchoSuccess(v *vim.Vim, prefix string) error {
v.Command("redraw")
return v.Command(fmt.Sprintf("echo '%s: ' | echohl %s | echon 'SUCCESS' | echohl None", prefix, success))
}
// ReportError output of the accumulated errors report.
// TODO(zchee): research vim.ReportError behavior
// Why it does not immediately display error?
func ReportError(v *vim.Vim, format string, a ...interface{}) error {
return v.ReportError(fmt.Sprintf(format, a...))
}
// ClearMsg cleanups the echo area.
func ClearMsg(v *vim.Vim) error {
return v.Command("echon")
}
Echo: Add suffix message
Signed-off-by: Koichi Shiraishi <13fbd79c3d390e5d6585a21e11ff5ec1970cff0c@zchee.io>
// Copyright 2016 Koichi Shiraishi. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package nvim
import (
"fmt"
"strings"
"github.com/garyburd/neovim-go/vim"
)
var (
errorColor = "Identifier"
progress = "Identifier"
success = "Function"
)
// Echo provide the vim 'echo' command.
func Echo(v *vim.Vim, format string, a ...interface{}) error {
return v.Command("echo '" + fmt.Sprintf(format, a...) + "'")
}
// EchoRaw provide the raw output vim 'echo' command.
func EchoRaw(v *vim.Vim, a string) error {
return v.Command("echo \"" + a + "\"")
}
// Echomsg provide the vim 'echomsg' command.
func Echomsg(v *vim.Vim, a ...interface{}) error {
return v.Command("echomsg '" + strings.TrimSpace(fmt.Sprintln(a...)) + "'")
}
// Echoerr provide the vim 'echoerr' command.
func Echoerr(v *vim.Vim, format string, a ...interface{}) error {
return v.Command("echoerr '" + fmt.Sprintf(format, a...) + "'")
}
// EchohlErr provide the vim 'echo' command with the 'echohl' highlighting prefix text.
func EchohlErr(v *vim.Vim, prefix string, a ...interface{}) error {
v.Command("redraw")
if prefix != "" {
prefix += ": "
}
text := strings.TrimSpace(fmt.Sprintln(a...))
return v.Command("echo '" + prefix + "' | echohl " + errorColor + " | echon '" + text + "' | echohl None")
}
// EchohlBefore provide the vim 'echo' command with the 'echohl' highlighting prefix text.
func EchohlBefore(v *vim.Vim, prefix string, highlight string, format string, a ...interface{}) error {
v.Command("redraw")
suffix := "' | echohl None | echon '"
if prefix != "" {
suffix += ": "
}
return v.Command("echohl " + highlight + " | echo '" + prefix + suffix + fmt.Sprintf(format, a...) + "'")
}
// EchohlAfter provide the vim 'echo' command with the 'echohl' highlighting message text.
func EchohlAfter(v *vim.Vim, prefix string, highlight string, format string, a ...interface{}) error {
v.Command("redraw")
if prefix != "" {
prefix += ": "
}
return v.Command("echo '" + prefix + "' | echohl " + highlight + " | echon '" + fmt.Sprintf(format, a...) + "' | echohl None")
}
// EchoProgress displays a command progress message to echo area.
func EchoProgress(v *vim.Vim, prefix, before, from, to string) error {
v.Command("redraw")
if prefix != "" {
prefix += ": "
}
// TODO(zchee): Refactoring because line too long.
return v.Command(fmt.Sprintf("echon '%s%s ' | echohl %s | echon '%s' | echohl None | echon ' to ' | echohl %s | echon '%s' | echohl None | echon ' ...'", prefix, before, progress, from, progress, to))
}
// EchoSuccess displays the success of the command to echo area.
func EchoSuccess(v *vim.Vim, prefix string, msg string) error {
v.Command("redraw")
if msg != "" {
msg = " " + msg
}
return v.Command(fmt.Sprintf("echon '%s: ' | echohl %s | echon 'SUCCESS' | echohl None | echon '%s'", prefix, success, msg))
}
// ReportError output of the accumulated errors report.
// TODO(zchee): research vim.ReportError behavior
// Why it does not immediately display error?
func ReportError(v *vim.Vim, format string, a ...interface{}) error {
return v.ReportError(fmt.Sprintf(format, a...))
}
// ClearMsg cleanups the echo area.
func ClearMsg(v *vim.Vim) error {
return v.Command("echon")
}
|
//
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"runtime/pprof"
"sync"
"syscall"
"time"
"github.com/01org/ciao/clogger/gloginterface"
"github.com/01org/ciao/osprepare"
"github.com/01org/ciao/payloads"
"github.com/01org/ciao/ssntp"
"github.com/golang/glog"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
)
var cert = flag.String("cert", "/etc/pki/ciao/cert-Scheduler-localhost.pem", "Server certificate")
var cacert = flag.String("cacert", "/etc/pki/ciao/CAcert-server-localhost.pem", "CA certificate")
var cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file")
var heartbeat = flag.Bool("heartbeat", false, "Emit status heartbeat text")
var logDir = "/var/lib/ciao/logs/scheduler"
var configURI = flag.String("configuration-uri", "file:///etc/ciao/configuration.yaml",
"Cluster configuration URI")
type ssntpSchedulerServer struct {
// user config overrides ------------------------------------------
heartbeat bool
cpuprofile string
// ssntp ----------------------------------------------------------
config *ssntp.Config
ssntp ssntp.Server
// scheduler internal state ---------------------------------------
// Command & Status Reporting node(s)
controllerMap map[string]*controllerStat
controllerList []*controllerStat // 1 controllerMaster at front of list
controllerMutex sync.RWMutex // Rlock traversing map, Lock modifying map
// Compute Nodes
cnMap map[string]*nodeStat
cnList []*nodeStat
cnMutex sync.RWMutex // Rlock traversing map, Lock modifying map
cnMRU *nodeStat
cnMRUIndex int
//cnInactiveMap map[string]nodeStat
// Network Nodes
nnMap map[string]*nodeStat
nnList []*nodeStat
nnMutex sync.RWMutex // Rlock traversing map, Lock modifying map
nnMRU *nodeStat
nnMRUIndex int
}
func newSsntpSchedulerServer() *ssntpSchedulerServer {
return &ssntpSchedulerServer{
controllerMap: make(map[string]*controllerStat),
cnMap: make(map[string]*nodeStat),
cnMRUIndex: -1,
nnMap: make(map[string]*nodeStat),
nnMRUIndex: -1,
}
}
type nodeStat struct {
mutex sync.Mutex
status ssntp.Status
uuid string
memTotalMB int
memAvailMB int
diskTotalMB int
diskAvailMB int
load int
cpus int
isNetNode bool
networks []payloads.NetworkStat
}
type controllerStatus uint8
func (s controllerStatus) String() string {
switch s {
case controllerMaster:
return "MASTER"
case controllerBackup:
return "BACKUP"
}
return ""
}
const (
controllerMaster controllerStatus = iota
controllerBackup
)
type controllerStat struct {
mutex sync.Mutex
status controllerStatus
uuid string
}
func prepareNodeConnectionEvent(nodeUUID string, nodeType payloads.Resource, connected bool) (b []byte, err error) {
event := payloads.NodeConnectedEvent{
NodeUUID: nodeUUID,
NodeType: nodeType,
}
if connected == true {
payload := payloads.NodeConnected{
Connected: event,
}
b, err = yaml.Marshal(&payload)
} else {
payload := payloads.NodeDisconnected{
Disconnected: event,
}
b, err = yaml.Marshal(&payload)
}
return
}
// The ssntp server implementation is expected to generate ssntp client
// connect/disconnect events. This function sends to one controller a
// connect event for each currently connected nodes.
func (sched *ssntpSchedulerServer) sendDirectedNodeConnectionEvents(ctlUUID string) {
sched.cnMutex.RLock()
defer sched.cnMutex.RUnlock()
for _, node := range sched.cnList {
nodeUUID := node.uuid
b, err := prepareNodeConnectionEvent(nodeUUID, payloads.ComputeNode, true)
if err != nil {
errors.Wrap(err, "Node connection event lost")
continue
}
sched.ssntp.SendEvent(ctlUUID, ssntp.NodeConnected, b)
}
sched.nnMutex.RLock()
defer sched.nnMutex.RUnlock()
for _, node := range sched.nnList {
nodeUUID := node.uuid
b, err := prepareNodeConnectionEvent(nodeUUID, payloads.NetworkNode, true)
if err != nil {
errors.Wrap(err, "Node connection event lost")
continue
}
sched.ssntp.SendEvent(ctlUUID, ssntp.NodeConnected, b)
}
}
// The ssntp server implementation is expected to generate ssntp client
// connect/disconnect events. This function sends them to all controllers.
func (sched *ssntpSchedulerServer) sendNodeConnectionEvents(nodeUUID string, nodeType payloads.Resource, connected bool) {
b, err := prepareNodeConnectionEvent(nodeUUID, nodeType, connected)
if err != nil {
errors.Wrap(err, "Node connection event lost")
}
sched.controllerMutex.RLock()
defer sched.controllerMutex.RUnlock()
for _, ctl := range sched.controllerMap {
sched.ssntp.SendEvent(ctl.uuid, ssntp.NodeConnected, b)
}
}
// Add state for newly connected Controller
// This function is symmetric with disconnectController().
func connectController(sched *ssntpSchedulerServer, uuid string) {
sched.controllerMutex.Lock()
defer sched.controllerMutex.Unlock()
if sched.controllerMap[uuid] != nil {
glog.Warningf("Unexpected reconnect from controller %s\n", uuid)
return
}
var controller controllerStat
controller.uuid = uuid
// TODO: smarter clustering than "assume master, unless another is master"
if len(sched.controllerList) == 0 || sched.controllerList[0].status == controllerBackup {
// master at front of the list
controller.status = controllerMaster
sched.controllerList = append([]*controllerStat{&controller}, sched.controllerList...)
} else { // already have a master
// backup controllers at the end of the list
controller.status = controllerBackup
sched.controllerList = append(sched.controllerList, &controller)
}
sched.controllerMap[uuid] = &controller
// In case launcher clients are already connected, generate a node
// connection event for all nodes.
sched.sendDirectedNodeConnectionEvents(uuid)
}
// Undo previous state additions for departed Controller
// This function is symmetric with connectController().
func disconnectController(sched *ssntpSchedulerServer, uuid string) {
sched.controllerMutex.Lock()
defer sched.controllerMutex.Unlock()
controller := sched.controllerMap[uuid]
if controller == nil {
glog.Warningf("Unexpected disconnect from controller %s\n", uuid)
return
}
// delete from map, remove from list
delete(sched.controllerMap, uuid)
for i, c := range sched.controllerList {
if c != controller {
continue
}
sched.controllerList = append(sched.controllerList[:i], sched.controllerList[i+1:]...)
}
if controller.status == controllerBackup {
return
} // else promote a new master
for i, c := range sched.controllerList {
c.mutex.Lock()
if c.status == controllerBackup {
c.status = controllerMaster
//TODO: inform the Controller it is master
c.mutex.Unlock()
// move to front of list
front := sched.controllerList[:i]
back := sched.controllerList[i+1:]
sched.controllerList = append([]*controllerStat{c}, front...)
sched.controllerList = append(sched.controllerList, back...)
break
}
c.mutex.Unlock()
}
}
// Add state for newly connected Compute Node
// This function is symmetric with disconnectComputeNode().
func connectComputeNode(sched *ssntpSchedulerServer, uuid string) {
sched.cnMutex.Lock()
defer sched.cnMutex.Unlock()
if sched.cnMap[uuid] != nil {
glog.Warningf("Unexpected reconnect from compute node %s\n", uuid)
return
}
var node nodeStat
node.status = ssntp.CONNECTED
node.uuid = uuid
node.isNetNode = false
sched.cnList = append(sched.cnList, &node)
sched.cnMap[uuid] = &node
go sched.sendNodeConnectionEvents(uuid, payloads.ComputeNode, true)
}
// Undo previous state additions for departed Compute Node
// This function is symmetric with connectComputeNode().
func disconnectComputeNode(sched *ssntpSchedulerServer, uuid string) {
sched.cnMutex.Lock()
defer sched.cnMutex.Unlock()
node := sched.cnMap[uuid]
if node == nil {
glog.Warningf("Unexpected disconnect from compute node %s\n", uuid)
return
}
//TODO: consider moving to cnInactiveMap?
delete(sched.cnMap, uuid)
for i, n := range sched.cnList {
if n != node {
continue
}
sched.cnList = append(sched.cnList[:i], sched.cnList[i+1:]...)
}
if node == sched.cnMRU {
sched.cnMRU = nil
sched.cnMRUIndex = -1
}
go sched.sendNodeConnectionEvents(uuid, payloads.ComputeNode, false)
}
// Add state for newly connected Network Node
// This function is symmetric with disconnectNetworkNode().
func connectNetworkNode(sched *ssntpSchedulerServer, uuid string) {
sched.nnMutex.Lock()
defer sched.nnMutex.Unlock()
if sched.nnMap[uuid] != nil {
glog.Warningf("Unexpected reconnect from network compute node %s\n", uuid)
return
}
var node nodeStat
node.status = ssntp.CONNECTED
node.uuid = uuid
node.isNetNode = true
sched.nnList = append(sched.nnList, &node)
sched.nnMap[uuid] = &node
go sched.sendNodeConnectionEvents(uuid, payloads.NetworkNode, true)
}
// Undo previous state additions for departed Network Node
// This function is symmetric with connectNetworkNode().
func disconnectNetworkNode(sched *ssntpSchedulerServer, uuid string) {
sched.nnMutex.Lock()
defer sched.nnMutex.Unlock()
node := sched.nnMap[uuid]
if node == nil {
glog.Warningf("Unexpected disconnect from network compute node %s\n", uuid)
return
}
//TODO: consider moving to nnInactiveMap?
delete(sched.nnMap, uuid)
for i, n := range sched.nnList {
if n != node {
continue
}
sched.nnList = append(sched.nnList[:i], sched.nnList[i+1:]...)
}
if node == sched.nnMRU {
sched.nnMRU = nil
sched.nnMRUIndex = -1
}
go sched.sendNodeConnectionEvents(uuid, payloads.NetworkNode, false)
}
func (sched *ssntpSchedulerServer) ConnectNotify(uuid string, role ssntp.Role) {
if role.IsController() {
connectController(sched, uuid)
}
if role.IsAgent() {
connectComputeNode(sched, uuid)
}
if role.IsNetAgent() {
connectNetworkNode(sched, uuid)
}
glog.V(2).Infof("Connect (role 0x%x, uuid=%s)\n", role, uuid)
}
func (sched *ssntpSchedulerServer) DisconnectNotify(uuid string, role ssntp.Role) {
if role.IsController() {
disconnectController(sched, uuid)
}
if role.IsAgent() {
disconnectComputeNode(sched, uuid)
}
if role.IsNetAgent() {
disconnectNetworkNode(sched, uuid)
}
glog.V(2).Infof("Connect (role 0x%x, uuid=%s)\n", role, uuid)
}
func (sched *ssntpSchedulerServer) updateNodeStat(node *nodeStat, status ssntp.Status, frame *ssntp.Frame) {
payload := frame.Payload
node.mutex.Lock()
defer node.mutex.Unlock()
node.status = status
switch node.status {
case ssntp.READY:
//pull in client's READY status frame transmitted statistics
var stats payloads.Ready
err := yaml.Unmarshal(payload, &stats)
if err != nil {
glog.Errorf("Bad READY yaml for node %s\n", node.uuid)
return
}
node.memTotalMB = stats.MemTotalMB
node.memAvailMB = stats.MemAvailableMB
node.diskTotalMB = stats.DiskTotalMB
node.diskAvailMB = stats.DiskAvailableMB
node.load = stats.Load
node.cpus = stats.CpusOnline
node.networks = stats.Networks
//any changes to the payloads.Ready struct should be
//accompanied by a change here
}
}
func (sched *ssntpSchedulerServer) StatusNotify(uuid string, status ssntp.Status, frame *ssntp.Frame) {
// for now only pay attention to READY status
role, err := sched.ssntp.ClientRole(uuid)
if err != nil {
glog.Errorf("STATUS ignored from disconnected client %s", uuid)
return
}
glog.V(2).Infof("STATUS %v from %s (%s)\n", status, uuid, role.String())
if role.IsAgent() {
var cn *nodeStat
sched.cnMutex.RLock()
defer sched.cnMutex.RUnlock()
if sched.cnMap[uuid] != nil {
cn = sched.cnMap[uuid]
sched.updateNodeStat(cn, status, frame)
}
}
if role.IsNetAgent() {
var nn *nodeStat
sched.nnMutex.RLock()
defer sched.nnMutex.RUnlock()
if sched.nnMap[uuid] != nil {
nn = sched.nnMap[uuid]
sched.updateNodeStat(nn, status, frame)
}
}
}
type workResources struct {
instanceUUID string
memReqMB int
diskReqMB int
networkNode bool
physNets []string
}
func (sched *ssntpSchedulerServer) getWorkloadResources(work *payloads.Start) (workload workResources, err error) {
// loop the array to find resources
for idx := range work.Start.RequestedResources {
reqType := work.Start.RequestedResources[idx].Type
reqValue := work.Start.RequestedResources[idx].Value
reqString := work.Start.RequestedResources[idx].ValueString
// memory:
if reqType == payloads.MemMB {
workload.memReqMB = reqValue
}
// network node
if reqType == payloads.NetworkNode {
wantsNetworkNode := reqValue
// validate input: requested resource values are always integers
if wantsNetworkNode != 0 && wantsNetworkNode != 1 {
return workload, fmt.Errorf("invalid start payload resource demand: network_node (%d) is not 0 or 1", wantsNetworkNode)
}
// convert to more natural bool for local struct
if wantsNetworkNode == 1 {
workload.networkNode = true
} else { //wantsNetworkNode == 0
workload.networkNode = false
}
}
// network node physical networks
if workload.networkNode {
if reqType == payloads.PhysicalNetwork {
workload.physNets = append(workload.physNets, reqString)
}
}
// etc...
}
// volumes
for _, volume := range work.Start.Storage {
if volume.Local {
workload.diskReqMB += volume.Size * 1024
}
}
// validate the found resources
if workload.memReqMB <= 0 {
return workload, fmt.Errorf("invalid start payload resource demand: mem_mb (%d) <= 0, must be > 0", workload.memReqMB)
}
if workload.diskReqMB < 0 {
return workload, fmt.Errorf("invalid start payload local disk demand: disk MB (%d) < 0, must be >= 0", workload.diskReqMB)
}
// note the uuid
workload.instanceUUID = work.Start.InstanceUUID
return workload, nil
}
func networkDemandsSatisfied(node *nodeStat, workload *workResources) bool {
if !node.isNetNode {
return true
}
var matchedNetworksCount int
var requestedNetworksCount int
for _, requestedNetwork := range workload.physNets {
requestedNetworksCount++
for _, availableNetwork := range node.networks {
if requestedNetwork == availableNetwork.NodeIP {
matchedNetworksCount++
break
}
}
}
if requestedNetworksCount != matchedNetworksCount {
return false
}
return true
}
// Check resource demands are satisfiable by the referenced, locked nodeStat object
func (sched *ssntpSchedulerServer) workloadFits(node *nodeStat, workload *workResources) bool {
// simple scheduling policy == first fit
if node.memAvailMB >= workload.memReqMB &&
node.diskAvailMB >= workload.diskReqMB &&
node.status == ssntp.READY &&
networkDemandsSatisfied(node, workload) {
return true
}
return false
}
func (sched *ssntpSchedulerServer) sendStartFailureError(clientUUID string, instanceUUID string, reason payloads.StartFailureReason, restart bool) {
error := payloads.ErrorStartFailure{
InstanceUUID: instanceUUID,
Reason: reason,
Restart: restart,
}
payload, err := yaml.Marshal(&error)
if err != nil {
glog.Errorf("Unable to Marshall Status %v", err)
return
}
glog.Warningf("Unable to dispatch: %v\n", reason)
sched.ssntp.SendError(clientUUID, ssntp.StartFailure, payload)
}
func (sched *ssntpSchedulerServer) getCommandConcentratorUUID(command ssntp.Command, payload []byte) (string, error) {
switch command {
default:
return "", fmt.Errorf("unsupported ssntp.Command type \"%s\"", command)
case ssntp.AssignPublicIP:
var cmd payloads.CommandAssignPublicIP
err := yaml.Unmarshal(payload, &cmd)
return cmd.AssignIP.ConcentratorUUID, err
case ssntp.ReleasePublicIP:
var cmd payloads.CommandReleasePublicIP
err := yaml.Unmarshal(payload, &cmd)
return cmd.ReleaseIP.ConcentratorUUID, err
}
}
func (sched *ssntpSchedulerServer) getEventConcentratorUUID(event ssntp.Event, payload []byte) (string, error) {
switch event {
default:
return "", fmt.Errorf("unsupported ssntp.Event type \"%s\"", event)
case ssntp.TenantAdded:
var ev payloads.EventTenantAdded
err := yaml.Unmarshal(payload, &ev)
return ev.TenantAdded.ConcentratorUUID, err
case ssntp.TenantRemoved:
var ev payloads.EventTenantRemoved
err := yaml.Unmarshal(payload, &ev)
return ev.TenantRemoved.ConcentratorUUID, err
}
}
func (sched *ssntpSchedulerServer) fwdCmdToCNCI(command ssntp.Command, payload []byte) (dest ssntp.ForwardDestination) {
// since the scheduler is the primary ssntp server, it needs to
// unwrap CNCI directed command payloads and forward to the right CNCI
concentratorUUID, err := sched.getCommandConcentratorUUID(command, payload)
if err != nil || concentratorUUID == "" {
glog.Errorf("Bad %s command yaml. Unable to forward to CNCI.\n", command)
dest.SetDecision(ssntp.Discard)
return
}
glog.V(2).Infof("Forwarding %s command to CNCI Agent %s\n", command.String(), concentratorUUID)
dest.AddRecipient(concentratorUUID)
return dest
}
func (sched *ssntpSchedulerServer) fwdEventToCNCI(event ssntp.Event, payload []byte) (dest ssntp.ForwardDestination) {
// since the scheduler is the primary ssntp server, it needs to
// unwrap CNCI directed event payloads and forward to the right CNCI
concentratorUUID, err := sched.getEventConcentratorUUID(event, payload)
if err != nil || concentratorUUID == "" {
glog.Errorf("Bad %s event yaml. Unable to forward to CNCI.\n", event)
dest.SetDecision(ssntp.Discard)
return
}
glog.V(2).Infof("Forwarding %s command to CNCI Agent%s\n", event.String(), concentratorUUID)
dest.AddRecipient(concentratorUUID)
return dest
}
func getWorkloadAgentUUID(sched *ssntpSchedulerServer, command ssntp.Command, payload []byte) (string, string, error) {
switch command {
default:
return "", "", fmt.Errorf("unsupported ssntp.Command type \"%s\"", command)
case ssntp.RESTART:
var cmd payloads.Restart
err := yaml.Unmarshal(payload, &cmd)
return cmd.Restart.InstanceUUID, cmd.Restart.WorkloadAgentUUID, err
case ssntp.STOP:
var cmd payloads.Stop
err := yaml.Unmarshal(payload, &cmd)
return cmd.Stop.InstanceUUID, cmd.Stop.WorkloadAgentUUID, err
case ssntp.DELETE:
var cmd payloads.Delete
err := yaml.Unmarshal(payload, &cmd)
return cmd.Delete.InstanceUUID, cmd.Delete.WorkloadAgentUUID, err
case ssntp.EVACUATE:
var cmd payloads.Evacuate
err := yaml.Unmarshal(payload, &cmd)
return "", cmd.Evacuate.WorkloadAgentUUID, err
case ssntp.AttachVolume:
var cmd payloads.AttachVolume
err := yaml.Unmarshal(payload, &cmd)
return cmd.Attach.InstanceUUID, cmd.Attach.WorkloadAgentUUID, err
case ssntp.DetachVolume:
var cmd payloads.DetachVolume
err := yaml.Unmarshal(payload, &cmd)
return cmd.Detach.InstanceUUID, cmd.Detach.WorkloadAgentUUID, err
}
}
func (sched *ssntpSchedulerServer) fwdCmdToComputeNode(command ssntp.Command, payload []byte) (dest ssntp.ForwardDestination, instanceUUID string) {
// some commands require no scheduling choice, rather the specified
// agent/launcher needs the command instead of the scheduler
instanceUUID, cnDestUUID, err := getWorkloadAgentUUID(sched, command, payload)
if err != nil || cnDestUUID == "" {
glog.Errorf("Bad %s command yaml from Controller, WorkloadAgentUUID == %s\n", command.String(), cnDestUUID)
dest.SetDecision(ssntp.Discard)
return
}
glog.V(2).Infof("Forwarding controller %s command to %s\n", command.String(), cnDestUUID)
dest.AddRecipient(cnDestUUID)
return
}
// Decrement resource claims for the referenced locked nodeStat object
func (sched *ssntpSchedulerServer) decrementResourceUsage(node *nodeStat, workload *workResources) {
node.memAvailMB -= workload.memReqMB
}
// Find suitable compute node, returning referenced to a locked nodeStat if found
func pickComputeNode(sched *ssntpSchedulerServer, controllerUUID string, workload *workResources, restart bool) (node *nodeStat) {
sched.cnMutex.RLock()
defer sched.cnMutex.RUnlock()
if len(sched.cnList) == 0 {
glog.Errorf("No compute nodes connected, unable to start workload")
sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoComputeNodes, restart)
return nil
}
/* First try nodes after the MRU */
if sched.cnMRUIndex != -1 && sched.cnMRUIndex < len(sched.cnList)-1 {
for i, node := range sched.cnList[sched.cnMRUIndex+1:] {
node.mutex.Lock()
if node == sched.cnMRU {
node.mutex.Unlock()
continue
}
if sched.workloadFits(node, workload) == true {
sched.cnMRUIndex = sched.cnMRUIndex + 1 + i
sched.cnMRU = node
return node // locked nodeStat
}
node.mutex.Unlock()
}
}
/* Then try the whole list, including the MRU */
for i, node := range sched.cnList {
node.mutex.Lock()
if sched.workloadFits(node, workload) == true {
sched.cnMRUIndex = i
sched.cnMRU = node
return node // locked nodeStat
}
node.mutex.Unlock()
}
sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.FullCloud, restart)
return nil
}
// Find suitable net node, returning referenced to a locked nodeStat if found
func pickNetworkNode(sched *ssntpSchedulerServer, controllerUUID string, workload *workResources, restart bool) (node *nodeStat) {
sched.nnMutex.RLock()
defer sched.nnMutex.RUnlock()
if len(sched.nnList) == 0 {
glog.Errorf("No network nodes connected, unable to start network workload")
sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoNetworkNodes, restart)
return nil
}
/* First try nodes after the MRU */
if sched.nnMRUIndex != -1 && sched.nnMRUIndex < len(sched.nnList)-1 {
for i, node := range sched.nnList[sched.nnMRUIndex+1:] {
node.mutex.Lock()
if node == sched.nnMRU {
node.mutex.Unlock()
continue
}
if sched.workloadFits(node, workload) == true {
sched.nnMRUIndex = sched.nnMRUIndex + 1 + i
sched.nnMRU = node
return node // locked nodeStat
}
node.mutex.Unlock()
}
}
/* Then try the whole list, including the MRU */
for i, node := range sched.nnList {
node.mutex.Lock()
if sched.workloadFits(node, workload) == true {
sched.nnMRUIndex = i
sched.nnMRU = node
return node // locked nodeStat
}
node.mutex.Unlock()
}
sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoNetworkNodes, restart)
return nil
}
func startWorkload(sched *ssntpSchedulerServer, controllerUUID string, payload []byte) (dest ssntp.ForwardDestination, instanceUUID string) {
var work payloads.Start
err := yaml.Unmarshal(payload, &work)
if err != nil {
glog.Errorf("Bad START workload yaml from Controller %s: %s\n", controllerUUID, err)
dest.SetDecision(ssntp.Discard)
return dest, ""
}
workload, err := sched.getWorkloadResources(&work)
if err != nil {
glog.Errorf("Bad START workload resource list from Controller %s: %s\n", controllerUUID, err)
dest.SetDecision(ssntp.Discard)
return dest, ""
}
instanceUUID = workload.instanceUUID
var targetNode *nodeStat
if workload.networkNode {
targetNode = pickNetworkNode(sched, controllerUUID, &workload, work.Start.Restart)
} else { //workload.network_node == false
targetNode = pickComputeNode(sched, controllerUUID, &workload, work.Start.Restart)
}
if targetNode != nil {
//TODO: mark the targetNode as unavailable until next stats / READY checkin?
// or is subtracting mem demand sufficiently speculative enough?
// Goal is to have spread, not schedule "too many" workloads back
// to back on the same targetNode, but also not add latency to dispatch and
// hopefully not queue when all nodes have just started a workload.
sched.decrementResourceUsage(targetNode, &workload)
dest.AddRecipient(targetNode.uuid)
targetNode.mutex.Unlock()
} else {
// TODO Queue the frame ?
dest.SetDecision(ssntp.Discard)
}
return dest, instanceUUID
}
func (sched *ssntpSchedulerServer) CommandForward(controllerUUID string, command ssntp.Command, frame *ssntp.Frame) (dest ssntp.ForwardDestination) {
payload := frame.Payload
instanceUUID := ""
sched.controllerMutex.RLock()
defer sched.controllerMutex.RUnlock()
if sched.controllerMap[controllerUUID] == nil {
glog.Warningf("Ignoring %s command from unknown Controller %s\n", command, controllerUUID)
dest.SetDecision(ssntp.Discard)
return
}
controller := sched.controllerMap[controllerUUID]
controller.mutex.Lock()
if controller.status != controllerMaster {
glog.Warningf("Ignoring %s command from non-master Controller %s\n", command, controllerUUID)
dest.SetDecision(ssntp.Discard)
controller.mutex.Unlock()
return
}
controller.mutex.Unlock()
start := time.Now()
glog.V(2).Infof("Command %s from %s\n", command, controllerUUID)
switch command {
// the main command with scheduler processing
case ssntp.START:
dest, instanceUUID = startWorkload(sched, controllerUUID, payload)
case ssntp.RESTART:
fallthrough
case ssntp.STOP:
fallthrough
case ssntp.DELETE:
fallthrough
case ssntp.AttachVolume:
fallthrough
case ssntp.DetachVolume:
fallthrough
case ssntp.EVACUATE:
dest, instanceUUID = sched.fwdCmdToComputeNode(command, payload)
case ssntp.AssignPublicIP:
fallthrough
case ssntp.ReleasePublicIP:
dest = sched.fwdCmdToCNCI(command, payload)
default:
dest.SetDecision(ssntp.Discard)
}
elapsed := time.Since(start)
glog.V(2).Infof("%s command processed for instance %s in %s\n", command, instanceUUID, elapsed)
return
}
func (sched *ssntpSchedulerServer) CommandNotify(uuid string, command ssntp.Command, frame *ssntp.Frame) {
// Currently all commands are handled by CommandForward, the SSNTP command forwader,
// or directly by role defined forwarding rules.
glog.V(2).Infof("COMMAND %v from %s\n", command, uuid)
}
func (sched *ssntpSchedulerServer) EventForward(uuid string, event ssntp.Event, frame *ssntp.Frame) (dest ssntp.ForwardDestination) {
payload := frame.Payload
start := time.Now()
switch event {
case ssntp.TenantAdded:
fallthrough
case ssntp.TenantRemoved:
dest = sched.fwdEventToCNCI(event, payload)
}
elapsed := time.Since(start)
glog.V(2).Infof("%s event processed for instance %s in %s\n", event.String(), uuid, elapsed)
return dest
}
func (sched *ssntpSchedulerServer) EventNotify(uuid string, event ssntp.Event, frame *ssntp.Frame) {
// Currently all events are handled by EventForward, the SSNTP command forwader,
// or directly by role defined forwarding rules.
glog.V(2).Infof("EVENT %v from %s\n", event, uuid)
}
func (sched *ssntpSchedulerServer) ErrorNotify(uuid string, error ssntp.Error, frame *ssntp.Frame) {
glog.V(2).Infof("ERROR %v from %s\n", error, uuid)
}
func setLimits() {
var rlim syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim)
if err != nil {
glog.Warningf("Getrlimit failed %v", err)
return
}
glog.Infof("Initial nofile limits: cur %d max %d", rlim.Cur, rlim.Max)
if rlim.Cur < rlim.Max {
oldCur := rlim.Cur
rlim.Cur = rlim.Max
err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlim)
if err != nil {
glog.Warningf("Setrlimit failed %v", err)
rlim.Cur = oldCur
}
}
glog.Infof("Updated nofile limits: cur %d max %d", rlim.Cur, rlim.Max)
}
func heartBeatControllers(sched *ssntpSchedulerServer) (s string) {
// show the first two controller's
controllerMax := 2
i := 0
sched.controllerMutex.RLock()
defer sched.controllerMutex.RUnlock()
if len(sched.controllerList) == 0 {
return " -no Controller- \t\t\t\t\t"
}
// first show any master, which is at front of list
controller := sched.controllerList[0]
controller.mutex.Lock()
if controller.status == controllerMaster {
s += fmt.Sprintf("controller-%s:", controller.uuid[:8])
s += controller.status.String()
controller.mutex.Unlock()
i++
if i <= controllerMax && len(sched.controllerList) > i {
s += ", "
} else {
s += "\t"
}
}
// second show any backup(s)
for _, controller := range sched.controllerList[i:] {
if i == controllerMax {
break
}
controller.mutex.Lock()
if controller.status == controllerMaster {
controller.mutex.Unlock()
glog.Errorf("multiple controller masters")
return "ERROR multiple controller masters"
}
s += fmt.Sprintf("controller-%s:", controller.uuid[:8])
s += controller.status.String()
controller.mutex.Unlock()
i++
if i < controllerMax && len(sched.controllerList) > i {
s += ", "
} else {
s += "\t"
}
}
// finish with some whitespace ahead of compute nodes
if i < controllerMax {
s += "\t\t\t"
} else {
s += "\t"
}
return s
}
func heartBeatComputeNodes(sched *ssntpSchedulerServer) (s string) {
// show the first four compute nodes
cnMax := 4
i := 0
sched.cnMutex.RLock()
defer sched.cnMutex.RUnlock()
for _, node := range sched.cnList {
node.mutex.Lock()
s += fmt.Sprintf("node-%s:", node.uuid[:8])
s += node.status.String()
if node == sched.cnMRU {
s += "*"
}
s += ":" + fmt.Sprintf("%d/%d,%d",
node.memAvailMB,
node.memTotalMB,
node.load)
node.mutex.Unlock()
i++
if i == cnMax {
break
}
if i <= cnMax && len(sched.cnList) > i {
s += ", "
}
}
if i == 0 {
s += " -no Compute Nodes-"
}
return s
}
const heartBeatHeaderFreq = 22
func heartBeat(sched *ssntpSchedulerServer, iter int) string {
var beatTxt string
time.Sleep(time.Duration(1) * time.Second)
sched.controllerMutex.RLock()
sched.cnMutex.RLock()
if len(sched.controllerList) == 0 && len(sched.cnList) == 0 {
sched.controllerMutex.RUnlock()
sched.cnMutex.RUnlock()
return "** idle / disconnected **\n"
}
sched.controllerMutex.RUnlock()
sched.cnMutex.RUnlock()
iter++
if iter%heartBeatHeaderFreq == 0 {
//output a column indication occasionally
beatTxt = "Controllers\t\t\t\t\tCompute Nodes\n"
}
beatTxt += heartBeatControllers(sched) + heartBeatComputeNodes(sched)
return beatTxt
}
func heartBeatLoop(sched *ssntpSchedulerServer) {
iter := 0
for {
log.Printf("%s\n", heartBeat(sched, iter))
}
}
func toggleDebug(sched *ssntpSchedulerServer) {
if len(sched.cpuprofile) != 0 {
f, err := os.Create(sched.cpuprofile)
if err != nil {
glog.Warningf("unable to initialize cpuprofile (%s)", err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
/* glog's --logtostderr and -v=2 are probably really what you want..
sched.config.Trace = os.Stdout
sched.config.Error = os.Stdout
sched.config.DebugInterface = false
*/
if sched.heartbeat {
go heartBeatLoop(sched)
}
}
func setSSNTPForwardRules(sched *ssntpSchedulerServer) {
sched.config.ForwardRules = []ssntp.FrameForwardRule{
{ // all STATS commands go to all Controllers
Operand: ssntp.STATS,
Dest: ssntp.Controller,
},
{ // all TraceReport events go to all Controllers
Operand: ssntp.TraceReport,
Dest: ssntp.Controller,
},
{ // all InstanceDeleted events go to all Controllers
Operand: ssntp.InstanceDeleted,
Dest: ssntp.Controller,
},
{ // all InstanceStopped events go to all Controllers
Operand: ssntp.InstanceStopped,
Dest: ssntp.Controller,
},
{ // all ConcentratorInstanceAdded events go to all Controllers
Operand: ssntp.ConcentratorInstanceAdded,
Dest: ssntp.Controller,
},
{ // all StartFailure errors go to all Controllers
Operand: ssntp.StartFailure,
Dest: ssntp.Controller,
},
{ // all StopFailure errors go to all Controllers
Operand: ssntp.StopFailure,
Dest: ssntp.Controller,
},
{ // all RestartFailure errors go to all Controllers
Operand: ssntp.RestartFailure,
Dest: ssntp.Controller,
},
{ // all DeleteFailure errors go to all Controllers
Operand: ssntp.DeleteFailure,
Dest: ssntp.Controller,
},
{ // all PublicIPAssigned events go to all Controllers
Operand: ssntp.PublicIPAssigned,
Dest: ssntp.Controller,
},
{ // all AssignPublicIPFailure events go to all Controllers
Operand: ssntp.AssignPublicIPFailure,
Dest: ssntp.Controller,
},
{ // all PublicIPUnassigned events go to all Controllers
Operand: ssntp.PublicIPUnassigned,
Dest: ssntp.Controller,
},
{ // all UnassignPublicIPFailure events go to all Controllers
Operand: ssntp.UnassignPublicIPFailure,
Dest: ssntp.Controller,
},
{ // all START command are processed by the Command forwarder
Operand: ssntp.START,
CommandForward: sched,
},
{ // all RESTART command are processed by the Command forwarder
Operand: ssntp.RESTART,
CommandForward: sched,
},
{ // all STOP command are processed by the Command forwarder
Operand: ssntp.STOP,
CommandForward: sched,
},
{ // all DELETE command are processed by the Command forwarder
Operand: ssntp.DELETE,
CommandForward: sched,
},
{ // all EVACUATE command are processed by the Command forwarder
Operand: ssntp.EVACUATE,
CommandForward: sched,
},
{ // all TenantAdded events are processed by the Event forwarder
Operand: ssntp.TenantAdded,
EventForward: sched,
},
{ // all TenantRemoved events are processed by the Event forwarder
Operand: ssntp.TenantRemoved,
EventForward: sched,
},
{ // all AttachVolume command are processed by the Command forwarder
Operand: ssntp.AttachVolume,
CommandForward: sched,
},
{ // all DetachVolume command are processed by the Command forwarder
Operand: ssntp.DetachVolume,
CommandForward: sched,
},
{ // all AttachVolumeFailure errors go to all Controllers
Operand: ssntp.AttachVolumeFailure,
Dest: ssntp.Controller,
},
{ // all DetachVolumeFailure errors go to all Controllers
Operand: ssntp.DetachVolumeFailure,
Dest: ssntp.Controller,
},
{ // all AssignPublicIP commands are processed by the Command forwarder
Operand: ssntp.AssignPublicIP,
CommandForward: sched,
},
{ // all ReleasePublicIP commands are processed by the Command forwarder
Operand: ssntp.ReleasePublicIP,
CommandForward: sched,
},
}
}
func initLogger() error {
logDirFlag := flag.Lookup("log_dir")
if logDirFlag == nil {
return fmt.Errorf("log_dir does not exist")
}
if logDirFlag.Value.String() == "" {
if err := logDirFlag.Value.Set(logDir); err != nil {
return err
}
}
if err := os.MkdirAll(logDirFlag.Value.String(), 0755); err != nil {
return fmt.Errorf("Unable to create log directory (%s) %v", logDir, err)
}
return nil
}
func configSchedulerServer() (sched *ssntpSchedulerServer) {
setLimits()
sched = newSsntpSchedulerServer()
sched.cpuprofile = *cpuprofile
sched.heartbeat = *heartbeat
toggleDebug(sched)
sched.config = &ssntp.Config{
CAcert: *cacert,
Cert: *cert,
ConfigURI: *configURI,
}
setSSNTPForwardRules(sched)
return sched
}
func main() {
flag.Parse()
if err := initLogger(); err != nil {
fmt.Printf("Unable to initialise logs: %v", err)
return
}
glog.Info("Starting Scheduler")
logger := gloginterface.CiaoGlogLogger{}
osprepare.Bootstrap(context.TODO(), logger)
osprepare.InstallDeps(context.TODO(), schedDeps, logger)
sched := configSchedulerServer()
if sched == nil {
glog.Errorf("unable to configure scheduler")
return
}
sched.ssntp.Serve(sched.config, sched)
}
ciao-scheduler: Send node disconnect messaged with correct type
Messages for both node connected and disconnected were being erroneously
sent with a type of ssntp.NodeConnected. This meant controller was not
correctly noticing when nodes were being disconnected.
Signed-off-by: Rob Bradford <c49e1dc14cd52e2644816db47123896e0ac79025@intel.com>
//
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"runtime/pprof"
"sync"
"syscall"
"time"
"github.com/01org/ciao/clogger/gloginterface"
"github.com/01org/ciao/osprepare"
"github.com/01org/ciao/payloads"
"github.com/01org/ciao/ssntp"
"github.com/golang/glog"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
)
var cert = flag.String("cert", "/etc/pki/ciao/cert-Scheduler-localhost.pem", "Server certificate")
var cacert = flag.String("cacert", "/etc/pki/ciao/CAcert-server-localhost.pem", "CA certificate")
var cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file")
var heartbeat = flag.Bool("heartbeat", false, "Emit status heartbeat text")
var logDir = "/var/lib/ciao/logs/scheduler"
var configURI = flag.String("configuration-uri", "file:///etc/ciao/configuration.yaml",
"Cluster configuration URI")
type ssntpSchedulerServer struct {
// user config overrides ------------------------------------------
heartbeat bool
cpuprofile string
// ssntp ----------------------------------------------------------
config *ssntp.Config
ssntp ssntp.Server
// scheduler internal state ---------------------------------------
// Command & Status Reporting node(s)
controllerMap map[string]*controllerStat
controllerList []*controllerStat // 1 controllerMaster at front of list
controllerMutex sync.RWMutex // Rlock traversing map, Lock modifying map
// Compute Nodes
cnMap map[string]*nodeStat
cnList []*nodeStat
cnMutex sync.RWMutex // Rlock traversing map, Lock modifying map
cnMRU *nodeStat
cnMRUIndex int
//cnInactiveMap map[string]nodeStat
// Network Nodes
nnMap map[string]*nodeStat
nnList []*nodeStat
nnMutex sync.RWMutex // Rlock traversing map, Lock modifying map
nnMRU *nodeStat
nnMRUIndex int
}
func newSsntpSchedulerServer() *ssntpSchedulerServer {
return &ssntpSchedulerServer{
controllerMap: make(map[string]*controllerStat),
cnMap: make(map[string]*nodeStat),
cnMRUIndex: -1,
nnMap: make(map[string]*nodeStat),
nnMRUIndex: -1,
}
}
type nodeStat struct {
mutex sync.Mutex
status ssntp.Status
uuid string
memTotalMB int
memAvailMB int
diskTotalMB int
diskAvailMB int
load int
cpus int
isNetNode bool
networks []payloads.NetworkStat
}
type controllerStatus uint8
func (s controllerStatus) String() string {
switch s {
case controllerMaster:
return "MASTER"
case controllerBackup:
return "BACKUP"
}
return ""
}
const (
controllerMaster controllerStatus = iota
controllerBackup
)
type controllerStat struct {
mutex sync.Mutex
status controllerStatus
uuid string
}
func prepareNodeConnectionEvent(nodeUUID string, nodeType payloads.Resource, connected bool) (b []byte, err error) {
event := payloads.NodeConnectedEvent{
NodeUUID: nodeUUID,
NodeType: nodeType,
}
if connected == true {
payload := payloads.NodeConnected{
Connected: event,
}
b, err = yaml.Marshal(&payload)
} else {
payload := payloads.NodeDisconnected{
Disconnected: event,
}
b, err = yaml.Marshal(&payload)
}
return
}
// The ssntp server implementation is expected to generate ssntp client
// connect/disconnect events. This function sends to one controller a
// connect event for each currently connected nodes.
func (sched *ssntpSchedulerServer) sendDirectedNodeConnectionEvents(ctlUUID string) {
sched.cnMutex.RLock()
defer sched.cnMutex.RUnlock()
for _, node := range sched.cnList {
nodeUUID := node.uuid
b, err := prepareNodeConnectionEvent(nodeUUID, payloads.ComputeNode, true)
if err != nil {
errors.Wrap(err, "Node connection event lost")
continue
}
sched.ssntp.SendEvent(ctlUUID, ssntp.NodeConnected, b)
}
sched.nnMutex.RLock()
defer sched.nnMutex.RUnlock()
for _, node := range sched.nnList {
nodeUUID := node.uuid
b, err := prepareNodeConnectionEvent(nodeUUID, payloads.NetworkNode, true)
if err != nil {
errors.Wrap(err, "Node connection event lost")
continue
}
sched.ssntp.SendEvent(ctlUUID, ssntp.NodeConnected, b)
}
}
// The ssntp server implementation is expected to generate ssntp client
// connect/disconnect events. This function sends them to all controllers.
func (sched *ssntpSchedulerServer) sendNodeConnectionEvents(nodeUUID string, nodeType payloads.Resource, connected bool) {
b, err := prepareNodeConnectionEvent(nodeUUID, nodeType, connected)
if err != nil {
errors.Wrap(err, "Node connection event lost")
}
sched.controllerMutex.RLock()
defer sched.controllerMutex.RUnlock()
for _, ctl := range sched.controllerMap {
if connected {
sched.ssntp.SendEvent(ctl.uuid, ssntp.NodeConnected, b)
} else {
sched.ssntp.SendEvent(ctl.uuid, ssntp.NodeDisconnected, b)
}
}
}
// Add state for newly connected Controller
// This function is symmetric with disconnectController().
func connectController(sched *ssntpSchedulerServer, uuid string) {
sched.controllerMutex.Lock()
defer sched.controllerMutex.Unlock()
if sched.controllerMap[uuid] != nil {
glog.Warningf("Unexpected reconnect from controller %s\n", uuid)
return
}
var controller controllerStat
controller.uuid = uuid
// TODO: smarter clustering than "assume master, unless another is master"
if len(sched.controllerList) == 0 || sched.controllerList[0].status == controllerBackup {
// master at front of the list
controller.status = controllerMaster
sched.controllerList = append([]*controllerStat{&controller}, sched.controllerList...)
} else { // already have a master
// backup controllers at the end of the list
controller.status = controllerBackup
sched.controllerList = append(sched.controllerList, &controller)
}
sched.controllerMap[uuid] = &controller
// In case launcher clients are already connected, generate a node
// connection event for all nodes.
sched.sendDirectedNodeConnectionEvents(uuid)
}
// Undo previous state additions for departed Controller
// This function is symmetric with connectController().
func disconnectController(sched *ssntpSchedulerServer, uuid string) {
sched.controllerMutex.Lock()
defer sched.controllerMutex.Unlock()
controller := sched.controllerMap[uuid]
if controller == nil {
glog.Warningf("Unexpected disconnect from controller %s\n", uuid)
return
}
// delete from map, remove from list
delete(sched.controllerMap, uuid)
for i, c := range sched.controllerList {
if c != controller {
continue
}
sched.controllerList = append(sched.controllerList[:i], sched.controllerList[i+1:]...)
}
if controller.status == controllerBackup {
return
} // else promote a new master
for i, c := range sched.controllerList {
c.mutex.Lock()
if c.status == controllerBackup {
c.status = controllerMaster
//TODO: inform the Controller it is master
c.mutex.Unlock()
// move to front of list
front := sched.controllerList[:i]
back := sched.controllerList[i+1:]
sched.controllerList = append([]*controllerStat{c}, front...)
sched.controllerList = append(sched.controllerList, back...)
break
}
c.mutex.Unlock()
}
}
// Add state for newly connected Compute Node
// This function is symmetric with disconnectComputeNode().
func connectComputeNode(sched *ssntpSchedulerServer, uuid string) {
sched.cnMutex.Lock()
defer sched.cnMutex.Unlock()
if sched.cnMap[uuid] != nil {
glog.Warningf("Unexpected reconnect from compute node %s\n", uuid)
return
}
var node nodeStat
node.status = ssntp.CONNECTED
node.uuid = uuid
node.isNetNode = false
sched.cnList = append(sched.cnList, &node)
sched.cnMap[uuid] = &node
go sched.sendNodeConnectionEvents(uuid, payloads.ComputeNode, true)
}
// Undo previous state additions for departed Compute Node
// This function is symmetric with connectComputeNode().
func disconnectComputeNode(sched *ssntpSchedulerServer, uuid string) {
sched.cnMutex.Lock()
defer sched.cnMutex.Unlock()
node := sched.cnMap[uuid]
if node == nil {
glog.Warningf("Unexpected disconnect from compute node %s\n", uuid)
return
}
//TODO: consider moving to cnInactiveMap?
delete(sched.cnMap, uuid)
for i, n := range sched.cnList {
if n != node {
continue
}
sched.cnList = append(sched.cnList[:i], sched.cnList[i+1:]...)
}
if node == sched.cnMRU {
sched.cnMRU = nil
sched.cnMRUIndex = -1
}
go sched.sendNodeConnectionEvents(uuid, payloads.ComputeNode, false)
}
// Add state for newly connected Network Node
// This function is symmetric with disconnectNetworkNode().
func connectNetworkNode(sched *ssntpSchedulerServer, uuid string) {
sched.nnMutex.Lock()
defer sched.nnMutex.Unlock()
if sched.nnMap[uuid] != nil {
glog.Warningf("Unexpected reconnect from network compute node %s\n", uuid)
return
}
var node nodeStat
node.status = ssntp.CONNECTED
node.uuid = uuid
node.isNetNode = true
sched.nnList = append(sched.nnList, &node)
sched.nnMap[uuid] = &node
go sched.sendNodeConnectionEvents(uuid, payloads.NetworkNode, true)
}
// Undo previous state additions for departed Network Node
// This function is symmetric with connectNetworkNode().
func disconnectNetworkNode(sched *ssntpSchedulerServer, uuid string) {
sched.nnMutex.Lock()
defer sched.nnMutex.Unlock()
node := sched.nnMap[uuid]
if node == nil {
glog.Warningf("Unexpected disconnect from network compute node %s\n", uuid)
return
}
//TODO: consider moving to nnInactiveMap?
delete(sched.nnMap, uuid)
for i, n := range sched.nnList {
if n != node {
continue
}
sched.nnList = append(sched.nnList[:i], sched.nnList[i+1:]...)
}
if node == sched.nnMRU {
sched.nnMRU = nil
sched.nnMRUIndex = -1
}
go sched.sendNodeConnectionEvents(uuid, payloads.NetworkNode, false)
}
func (sched *ssntpSchedulerServer) ConnectNotify(uuid string, role ssntp.Role) {
if role.IsController() {
connectController(sched, uuid)
}
if role.IsAgent() {
connectComputeNode(sched, uuid)
}
if role.IsNetAgent() {
connectNetworkNode(sched, uuid)
}
glog.V(2).Infof("Connect (role 0x%x, uuid=%s)\n", role, uuid)
}
func (sched *ssntpSchedulerServer) DisconnectNotify(uuid string, role ssntp.Role) {
if role.IsController() {
disconnectController(sched, uuid)
}
if role.IsAgent() {
disconnectComputeNode(sched, uuid)
}
if role.IsNetAgent() {
disconnectNetworkNode(sched, uuid)
}
glog.V(2).Infof("Connect (role 0x%x, uuid=%s)\n", role, uuid)
}
func (sched *ssntpSchedulerServer) updateNodeStat(node *nodeStat, status ssntp.Status, frame *ssntp.Frame) {
payload := frame.Payload
node.mutex.Lock()
defer node.mutex.Unlock()
node.status = status
switch node.status {
case ssntp.READY:
//pull in client's READY status frame transmitted statistics
var stats payloads.Ready
err := yaml.Unmarshal(payload, &stats)
if err != nil {
glog.Errorf("Bad READY yaml for node %s\n", node.uuid)
return
}
node.memTotalMB = stats.MemTotalMB
node.memAvailMB = stats.MemAvailableMB
node.diskTotalMB = stats.DiskTotalMB
node.diskAvailMB = stats.DiskAvailableMB
node.load = stats.Load
node.cpus = stats.CpusOnline
node.networks = stats.Networks
//any changes to the payloads.Ready struct should be
//accompanied by a change here
}
}
func (sched *ssntpSchedulerServer) StatusNotify(uuid string, status ssntp.Status, frame *ssntp.Frame) {
// for now only pay attention to READY status
role, err := sched.ssntp.ClientRole(uuid)
if err != nil {
glog.Errorf("STATUS ignored from disconnected client %s", uuid)
return
}
glog.V(2).Infof("STATUS %v from %s (%s)\n", status, uuid, role.String())
if role.IsAgent() {
var cn *nodeStat
sched.cnMutex.RLock()
defer sched.cnMutex.RUnlock()
if sched.cnMap[uuid] != nil {
cn = sched.cnMap[uuid]
sched.updateNodeStat(cn, status, frame)
}
}
if role.IsNetAgent() {
var nn *nodeStat
sched.nnMutex.RLock()
defer sched.nnMutex.RUnlock()
if sched.nnMap[uuid] != nil {
nn = sched.nnMap[uuid]
sched.updateNodeStat(nn, status, frame)
}
}
}
type workResources struct {
instanceUUID string
memReqMB int
diskReqMB int
networkNode bool
physNets []string
}
func (sched *ssntpSchedulerServer) getWorkloadResources(work *payloads.Start) (workload workResources, err error) {
// loop the array to find resources
for idx := range work.Start.RequestedResources {
reqType := work.Start.RequestedResources[idx].Type
reqValue := work.Start.RequestedResources[idx].Value
reqString := work.Start.RequestedResources[idx].ValueString
// memory:
if reqType == payloads.MemMB {
workload.memReqMB = reqValue
}
// network node
if reqType == payloads.NetworkNode {
wantsNetworkNode := reqValue
// validate input: requested resource values are always integers
if wantsNetworkNode != 0 && wantsNetworkNode != 1 {
return workload, fmt.Errorf("invalid start payload resource demand: network_node (%d) is not 0 or 1", wantsNetworkNode)
}
// convert to more natural bool for local struct
if wantsNetworkNode == 1 {
workload.networkNode = true
} else { //wantsNetworkNode == 0
workload.networkNode = false
}
}
// network node physical networks
if workload.networkNode {
if reqType == payloads.PhysicalNetwork {
workload.physNets = append(workload.physNets, reqString)
}
}
// etc...
}
// volumes
for _, volume := range work.Start.Storage {
if volume.Local {
workload.diskReqMB += volume.Size * 1024
}
}
// validate the found resources
if workload.memReqMB <= 0 {
return workload, fmt.Errorf("invalid start payload resource demand: mem_mb (%d) <= 0, must be > 0", workload.memReqMB)
}
if workload.diskReqMB < 0 {
return workload, fmt.Errorf("invalid start payload local disk demand: disk MB (%d) < 0, must be >= 0", workload.diskReqMB)
}
// note the uuid
workload.instanceUUID = work.Start.InstanceUUID
return workload, nil
}
func networkDemandsSatisfied(node *nodeStat, workload *workResources) bool {
if !node.isNetNode {
return true
}
var matchedNetworksCount int
var requestedNetworksCount int
for _, requestedNetwork := range workload.physNets {
requestedNetworksCount++
for _, availableNetwork := range node.networks {
if requestedNetwork == availableNetwork.NodeIP {
matchedNetworksCount++
break
}
}
}
if requestedNetworksCount != matchedNetworksCount {
return false
}
return true
}
// Check resource demands are satisfiable by the referenced, locked nodeStat object
func (sched *ssntpSchedulerServer) workloadFits(node *nodeStat, workload *workResources) bool {
// simple scheduling policy == first fit
if node.memAvailMB >= workload.memReqMB &&
node.diskAvailMB >= workload.diskReqMB &&
node.status == ssntp.READY &&
networkDemandsSatisfied(node, workload) {
return true
}
return false
}
func (sched *ssntpSchedulerServer) sendStartFailureError(clientUUID string, instanceUUID string, reason payloads.StartFailureReason, restart bool) {
error := payloads.ErrorStartFailure{
InstanceUUID: instanceUUID,
Reason: reason,
Restart: restart,
}
payload, err := yaml.Marshal(&error)
if err != nil {
glog.Errorf("Unable to Marshall Status %v", err)
return
}
glog.Warningf("Unable to dispatch: %v\n", reason)
sched.ssntp.SendError(clientUUID, ssntp.StartFailure, payload)
}
func (sched *ssntpSchedulerServer) getCommandConcentratorUUID(command ssntp.Command, payload []byte) (string, error) {
switch command {
default:
return "", fmt.Errorf("unsupported ssntp.Command type \"%s\"", command)
case ssntp.AssignPublicIP:
var cmd payloads.CommandAssignPublicIP
err := yaml.Unmarshal(payload, &cmd)
return cmd.AssignIP.ConcentratorUUID, err
case ssntp.ReleasePublicIP:
var cmd payloads.CommandReleasePublicIP
err := yaml.Unmarshal(payload, &cmd)
return cmd.ReleaseIP.ConcentratorUUID, err
}
}
func (sched *ssntpSchedulerServer) getEventConcentratorUUID(event ssntp.Event, payload []byte) (string, error) {
switch event {
default:
return "", fmt.Errorf("unsupported ssntp.Event type \"%s\"", event)
case ssntp.TenantAdded:
var ev payloads.EventTenantAdded
err := yaml.Unmarshal(payload, &ev)
return ev.TenantAdded.ConcentratorUUID, err
case ssntp.TenantRemoved:
var ev payloads.EventTenantRemoved
err := yaml.Unmarshal(payload, &ev)
return ev.TenantRemoved.ConcentratorUUID, err
}
}
func (sched *ssntpSchedulerServer) fwdCmdToCNCI(command ssntp.Command, payload []byte) (dest ssntp.ForwardDestination) {
// since the scheduler is the primary ssntp server, it needs to
// unwrap CNCI directed command payloads and forward to the right CNCI
concentratorUUID, err := sched.getCommandConcentratorUUID(command, payload)
if err != nil || concentratorUUID == "" {
glog.Errorf("Bad %s command yaml. Unable to forward to CNCI.\n", command)
dest.SetDecision(ssntp.Discard)
return
}
glog.V(2).Infof("Forwarding %s command to CNCI Agent %s\n", command.String(), concentratorUUID)
dest.AddRecipient(concentratorUUID)
return dest
}
func (sched *ssntpSchedulerServer) fwdEventToCNCI(event ssntp.Event, payload []byte) (dest ssntp.ForwardDestination) {
// since the scheduler is the primary ssntp server, it needs to
// unwrap CNCI directed event payloads and forward to the right CNCI
concentratorUUID, err := sched.getEventConcentratorUUID(event, payload)
if err != nil || concentratorUUID == "" {
glog.Errorf("Bad %s event yaml. Unable to forward to CNCI.\n", event)
dest.SetDecision(ssntp.Discard)
return
}
glog.V(2).Infof("Forwarding %s command to CNCI Agent%s\n", event.String(), concentratorUUID)
dest.AddRecipient(concentratorUUID)
return dest
}
func getWorkloadAgentUUID(sched *ssntpSchedulerServer, command ssntp.Command, payload []byte) (string, string, error) {
switch command {
default:
return "", "", fmt.Errorf("unsupported ssntp.Command type \"%s\"", command)
case ssntp.RESTART:
var cmd payloads.Restart
err := yaml.Unmarshal(payload, &cmd)
return cmd.Restart.InstanceUUID, cmd.Restart.WorkloadAgentUUID, err
case ssntp.STOP:
var cmd payloads.Stop
err := yaml.Unmarshal(payload, &cmd)
return cmd.Stop.InstanceUUID, cmd.Stop.WorkloadAgentUUID, err
case ssntp.DELETE:
var cmd payloads.Delete
err := yaml.Unmarshal(payload, &cmd)
return cmd.Delete.InstanceUUID, cmd.Delete.WorkloadAgentUUID, err
case ssntp.EVACUATE:
var cmd payloads.Evacuate
err := yaml.Unmarshal(payload, &cmd)
return "", cmd.Evacuate.WorkloadAgentUUID, err
case ssntp.AttachVolume:
var cmd payloads.AttachVolume
err := yaml.Unmarshal(payload, &cmd)
return cmd.Attach.InstanceUUID, cmd.Attach.WorkloadAgentUUID, err
case ssntp.DetachVolume:
var cmd payloads.DetachVolume
err := yaml.Unmarshal(payload, &cmd)
return cmd.Detach.InstanceUUID, cmd.Detach.WorkloadAgentUUID, err
}
}
func (sched *ssntpSchedulerServer) fwdCmdToComputeNode(command ssntp.Command, payload []byte) (dest ssntp.ForwardDestination, instanceUUID string) {
// some commands require no scheduling choice, rather the specified
// agent/launcher needs the command instead of the scheduler
instanceUUID, cnDestUUID, err := getWorkloadAgentUUID(sched, command, payload)
if err != nil || cnDestUUID == "" {
glog.Errorf("Bad %s command yaml from Controller, WorkloadAgentUUID == %s\n", command.String(), cnDestUUID)
dest.SetDecision(ssntp.Discard)
return
}
glog.V(2).Infof("Forwarding controller %s command to %s\n", command.String(), cnDestUUID)
dest.AddRecipient(cnDestUUID)
return
}
// Decrement resource claims for the referenced locked nodeStat object
func (sched *ssntpSchedulerServer) decrementResourceUsage(node *nodeStat, workload *workResources) {
node.memAvailMB -= workload.memReqMB
}
// Find suitable compute node, returning referenced to a locked nodeStat if found
func pickComputeNode(sched *ssntpSchedulerServer, controllerUUID string, workload *workResources, restart bool) (node *nodeStat) {
sched.cnMutex.RLock()
defer sched.cnMutex.RUnlock()
if len(sched.cnList) == 0 {
glog.Errorf("No compute nodes connected, unable to start workload")
sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoComputeNodes, restart)
return nil
}
/* First try nodes after the MRU */
if sched.cnMRUIndex != -1 && sched.cnMRUIndex < len(sched.cnList)-1 {
for i, node := range sched.cnList[sched.cnMRUIndex+1:] {
node.mutex.Lock()
if node == sched.cnMRU {
node.mutex.Unlock()
continue
}
if sched.workloadFits(node, workload) == true {
sched.cnMRUIndex = sched.cnMRUIndex + 1 + i
sched.cnMRU = node
return node // locked nodeStat
}
node.mutex.Unlock()
}
}
/* Then try the whole list, including the MRU */
for i, node := range sched.cnList {
node.mutex.Lock()
if sched.workloadFits(node, workload) == true {
sched.cnMRUIndex = i
sched.cnMRU = node
return node // locked nodeStat
}
node.mutex.Unlock()
}
sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.FullCloud, restart)
return nil
}
// Find suitable net node, returning referenced to a locked nodeStat if found
func pickNetworkNode(sched *ssntpSchedulerServer, controllerUUID string, workload *workResources, restart bool) (node *nodeStat) {
sched.nnMutex.RLock()
defer sched.nnMutex.RUnlock()
if len(sched.nnList) == 0 {
glog.Errorf("No network nodes connected, unable to start network workload")
sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoNetworkNodes, restart)
return nil
}
/* First try nodes after the MRU */
if sched.nnMRUIndex != -1 && sched.nnMRUIndex < len(sched.nnList)-1 {
for i, node := range sched.nnList[sched.nnMRUIndex+1:] {
node.mutex.Lock()
if node == sched.nnMRU {
node.mutex.Unlock()
continue
}
if sched.workloadFits(node, workload) == true {
sched.nnMRUIndex = sched.nnMRUIndex + 1 + i
sched.nnMRU = node
return node // locked nodeStat
}
node.mutex.Unlock()
}
}
/* Then try the whole list, including the MRU */
for i, node := range sched.nnList {
node.mutex.Lock()
if sched.workloadFits(node, workload) == true {
sched.nnMRUIndex = i
sched.nnMRU = node
return node // locked nodeStat
}
node.mutex.Unlock()
}
sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoNetworkNodes, restart)
return nil
}
func startWorkload(sched *ssntpSchedulerServer, controllerUUID string, payload []byte) (dest ssntp.ForwardDestination, instanceUUID string) {
var work payloads.Start
err := yaml.Unmarshal(payload, &work)
if err != nil {
glog.Errorf("Bad START workload yaml from Controller %s: %s\n", controllerUUID, err)
dest.SetDecision(ssntp.Discard)
return dest, ""
}
workload, err := sched.getWorkloadResources(&work)
if err != nil {
glog.Errorf("Bad START workload resource list from Controller %s: %s\n", controllerUUID, err)
dest.SetDecision(ssntp.Discard)
return dest, ""
}
instanceUUID = workload.instanceUUID
var targetNode *nodeStat
if workload.networkNode {
targetNode = pickNetworkNode(sched, controllerUUID, &workload, work.Start.Restart)
} else { //workload.network_node == false
targetNode = pickComputeNode(sched, controllerUUID, &workload, work.Start.Restart)
}
if targetNode != nil {
//TODO: mark the targetNode as unavailable until next stats / READY checkin?
// or is subtracting mem demand sufficiently speculative enough?
// Goal is to have spread, not schedule "too many" workloads back
// to back on the same targetNode, but also not add latency to dispatch and
// hopefully not queue when all nodes have just started a workload.
sched.decrementResourceUsage(targetNode, &workload)
dest.AddRecipient(targetNode.uuid)
targetNode.mutex.Unlock()
} else {
// TODO Queue the frame ?
dest.SetDecision(ssntp.Discard)
}
return dest, instanceUUID
}
func (sched *ssntpSchedulerServer) CommandForward(controllerUUID string, command ssntp.Command, frame *ssntp.Frame) (dest ssntp.ForwardDestination) {
payload := frame.Payload
instanceUUID := ""
sched.controllerMutex.RLock()
defer sched.controllerMutex.RUnlock()
if sched.controllerMap[controllerUUID] == nil {
glog.Warningf("Ignoring %s command from unknown Controller %s\n", command, controllerUUID)
dest.SetDecision(ssntp.Discard)
return
}
controller := sched.controllerMap[controllerUUID]
controller.mutex.Lock()
if controller.status != controllerMaster {
glog.Warningf("Ignoring %s command from non-master Controller %s\n", command, controllerUUID)
dest.SetDecision(ssntp.Discard)
controller.mutex.Unlock()
return
}
controller.mutex.Unlock()
start := time.Now()
glog.V(2).Infof("Command %s from %s\n", command, controllerUUID)
switch command {
// the main command with scheduler processing
case ssntp.START:
dest, instanceUUID = startWorkload(sched, controllerUUID, payload)
case ssntp.RESTART:
fallthrough
case ssntp.STOP:
fallthrough
case ssntp.DELETE:
fallthrough
case ssntp.AttachVolume:
fallthrough
case ssntp.DetachVolume:
fallthrough
case ssntp.EVACUATE:
dest, instanceUUID = sched.fwdCmdToComputeNode(command, payload)
case ssntp.AssignPublicIP:
fallthrough
case ssntp.ReleasePublicIP:
dest = sched.fwdCmdToCNCI(command, payload)
default:
dest.SetDecision(ssntp.Discard)
}
elapsed := time.Since(start)
glog.V(2).Infof("%s command processed for instance %s in %s\n", command, instanceUUID, elapsed)
return
}
func (sched *ssntpSchedulerServer) CommandNotify(uuid string, command ssntp.Command, frame *ssntp.Frame) {
// Currently all commands are handled by CommandForward, the SSNTP command forwader,
// or directly by role defined forwarding rules.
glog.V(2).Infof("COMMAND %v from %s\n", command, uuid)
}
func (sched *ssntpSchedulerServer) EventForward(uuid string, event ssntp.Event, frame *ssntp.Frame) (dest ssntp.ForwardDestination) {
payload := frame.Payload
start := time.Now()
switch event {
case ssntp.TenantAdded:
fallthrough
case ssntp.TenantRemoved:
dest = sched.fwdEventToCNCI(event, payload)
}
elapsed := time.Since(start)
glog.V(2).Infof("%s event processed for instance %s in %s\n", event.String(), uuid, elapsed)
return dest
}
func (sched *ssntpSchedulerServer) EventNotify(uuid string, event ssntp.Event, frame *ssntp.Frame) {
// Currently all events are handled by EventForward, the SSNTP command forwader,
// or directly by role defined forwarding rules.
glog.V(2).Infof("EVENT %v from %s\n", event, uuid)
}
func (sched *ssntpSchedulerServer) ErrorNotify(uuid string, error ssntp.Error, frame *ssntp.Frame) {
glog.V(2).Infof("ERROR %v from %s\n", error, uuid)
}
func setLimits() {
var rlim syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim)
if err != nil {
glog.Warningf("Getrlimit failed %v", err)
return
}
glog.Infof("Initial nofile limits: cur %d max %d", rlim.Cur, rlim.Max)
if rlim.Cur < rlim.Max {
oldCur := rlim.Cur
rlim.Cur = rlim.Max
err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlim)
if err != nil {
glog.Warningf("Setrlimit failed %v", err)
rlim.Cur = oldCur
}
}
glog.Infof("Updated nofile limits: cur %d max %d", rlim.Cur, rlim.Max)
}
func heartBeatControllers(sched *ssntpSchedulerServer) (s string) {
// show the first two controller's
controllerMax := 2
i := 0
sched.controllerMutex.RLock()
defer sched.controllerMutex.RUnlock()
if len(sched.controllerList) == 0 {
return " -no Controller- \t\t\t\t\t"
}
// first show any master, which is at front of list
controller := sched.controllerList[0]
controller.mutex.Lock()
if controller.status == controllerMaster {
s += fmt.Sprintf("controller-%s:", controller.uuid[:8])
s += controller.status.String()
controller.mutex.Unlock()
i++
if i <= controllerMax && len(sched.controllerList) > i {
s += ", "
} else {
s += "\t"
}
}
// second show any backup(s)
for _, controller := range sched.controllerList[i:] {
if i == controllerMax {
break
}
controller.mutex.Lock()
if controller.status == controllerMaster {
controller.mutex.Unlock()
glog.Errorf("multiple controller masters")
return "ERROR multiple controller masters"
}
s += fmt.Sprintf("controller-%s:", controller.uuid[:8])
s += controller.status.String()
controller.mutex.Unlock()
i++
if i < controllerMax && len(sched.controllerList) > i {
s += ", "
} else {
s += "\t"
}
}
// finish with some whitespace ahead of compute nodes
if i < controllerMax {
s += "\t\t\t"
} else {
s += "\t"
}
return s
}
func heartBeatComputeNodes(sched *ssntpSchedulerServer) (s string) {
// show the first four compute nodes
cnMax := 4
i := 0
sched.cnMutex.RLock()
defer sched.cnMutex.RUnlock()
for _, node := range sched.cnList {
node.mutex.Lock()
s += fmt.Sprintf("node-%s:", node.uuid[:8])
s += node.status.String()
if node == sched.cnMRU {
s += "*"
}
s += ":" + fmt.Sprintf("%d/%d,%d",
node.memAvailMB,
node.memTotalMB,
node.load)
node.mutex.Unlock()
i++
if i == cnMax {
break
}
if i <= cnMax && len(sched.cnList) > i {
s += ", "
}
}
if i == 0 {
s += " -no Compute Nodes-"
}
return s
}
const heartBeatHeaderFreq = 22
func heartBeat(sched *ssntpSchedulerServer, iter int) string {
var beatTxt string
time.Sleep(time.Duration(1) * time.Second)
sched.controllerMutex.RLock()
sched.cnMutex.RLock()
if len(sched.controllerList) == 0 && len(sched.cnList) == 0 {
sched.controllerMutex.RUnlock()
sched.cnMutex.RUnlock()
return "** idle / disconnected **\n"
}
sched.controllerMutex.RUnlock()
sched.cnMutex.RUnlock()
iter++
if iter%heartBeatHeaderFreq == 0 {
//output a column indication occasionally
beatTxt = "Controllers\t\t\t\t\tCompute Nodes\n"
}
beatTxt += heartBeatControllers(sched) + heartBeatComputeNodes(sched)
return beatTxt
}
func heartBeatLoop(sched *ssntpSchedulerServer) {
iter := 0
for {
log.Printf("%s\n", heartBeat(sched, iter))
}
}
func toggleDebug(sched *ssntpSchedulerServer) {
if len(sched.cpuprofile) != 0 {
f, err := os.Create(sched.cpuprofile)
if err != nil {
glog.Warningf("unable to initialize cpuprofile (%s)", err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
/* glog's --logtostderr and -v=2 are probably really what you want..
sched.config.Trace = os.Stdout
sched.config.Error = os.Stdout
sched.config.DebugInterface = false
*/
if sched.heartbeat {
go heartBeatLoop(sched)
}
}
func setSSNTPForwardRules(sched *ssntpSchedulerServer) {
sched.config.ForwardRules = []ssntp.FrameForwardRule{
{ // all STATS commands go to all Controllers
Operand: ssntp.STATS,
Dest: ssntp.Controller,
},
{ // all TraceReport events go to all Controllers
Operand: ssntp.TraceReport,
Dest: ssntp.Controller,
},
{ // all InstanceDeleted events go to all Controllers
Operand: ssntp.InstanceDeleted,
Dest: ssntp.Controller,
},
{ // all InstanceStopped events go to all Controllers
Operand: ssntp.InstanceStopped,
Dest: ssntp.Controller,
},
{ // all ConcentratorInstanceAdded events go to all Controllers
Operand: ssntp.ConcentratorInstanceAdded,
Dest: ssntp.Controller,
},
{ // all StartFailure errors go to all Controllers
Operand: ssntp.StartFailure,
Dest: ssntp.Controller,
},
{ // all StopFailure errors go to all Controllers
Operand: ssntp.StopFailure,
Dest: ssntp.Controller,
},
{ // all RestartFailure errors go to all Controllers
Operand: ssntp.RestartFailure,
Dest: ssntp.Controller,
},
{ // all DeleteFailure errors go to all Controllers
Operand: ssntp.DeleteFailure,
Dest: ssntp.Controller,
},
{ // all PublicIPAssigned events go to all Controllers
Operand: ssntp.PublicIPAssigned,
Dest: ssntp.Controller,
},
{ // all AssignPublicIPFailure events go to all Controllers
Operand: ssntp.AssignPublicIPFailure,
Dest: ssntp.Controller,
},
{ // all PublicIPUnassigned events go to all Controllers
Operand: ssntp.PublicIPUnassigned,
Dest: ssntp.Controller,
},
{ // all UnassignPublicIPFailure events go to all Controllers
Operand: ssntp.UnassignPublicIPFailure,
Dest: ssntp.Controller,
},
{ // all START command are processed by the Command forwarder
Operand: ssntp.START,
CommandForward: sched,
},
{ // all RESTART command are processed by the Command forwarder
Operand: ssntp.RESTART,
CommandForward: sched,
},
{ // all STOP command are processed by the Command forwarder
Operand: ssntp.STOP,
CommandForward: sched,
},
{ // all DELETE command are processed by the Command forwarder
Operand: ssntp.DELETE,
CommandForward: sched,
},
{ // all EVACUATE command are processed by the Command forwarder
Operand: ssntp.EVACUATE,
CommandForward: sched,
},
{ // all TenantAdded events are processed by the Event forwarder
Operand: ssntp.TenantAdded,
EventForward: sched,
},
{ // all TenantRemoved events are processed by the Event forwarder
Operand: ssntp.TenantRemoved,
EventForward: sched,
},
{ // all AttachVolume command are processed by the Command forwarder
Operand: ssntp.AttachVolume,
CommandForward: sched,
},
{ // all DetachVolume command are processed by the Command forwarder
Operand: ssntp.DetachVolume,
CommandForward: sched,
},
{ // all AttachVolumeFailure errors go to all Controllers
Operand: ssntp.AttachVolumeFailure,
Dest: ssntp.Controller,
},
{ // all DetachVolumeFailure errors go to all Controllers
Operand: ssntp.DetachVolumeFailure,
Dest: ssntp.Controller,
},
{ // all AssignPublicIP commands are processed by the Command forwarder
Operand: ssntp.AssignPublicIP,
CommandForward: sched,
},
{ // all ReleasePublicIP commands are processed by the Command forwarder
Operand: ssntp.ReleasePublicIP,
CommandForward: sched,
},
}
}
func initLogger() error {
logDirFlag := flag.Lookup("log_dir")
if logDirFlag == nil {
return fmt.Errorf("log_dir does not exist")
}
if logDirFlag.Value.String() == "" {
if err := logDirFlag.Value.Set(logDir); err != nil {
return err
}
}
if err := os.MkdirAll(logDirFlag.Value.String(), 0755); err != nil {
return fmt.Errorf("Unable to create log directory (%s) %v", logDir, err)
}
return nil
}
func configSchedulerServer() (sched *ssntpSchedulerServer) {
setLimits()
sched = newSsntpSchedulerServer()
sched.cpuprofile = *cpuprofile
sched.heartbeat = *heartbeat
toggleDebug(sched)
sched.config = &ssntp.Config{
CAcert: *cacert,
Cert: *cert,
ConfigURI: *configURI,
}
setSSNTPForwardRules(sched)
return sched
}
func main() {
flag.Parse()
if err := initLogger(); err != nil {
fmt.Printf("Unable to initialise logs: %v", err)
return
}
glog.Info("Starting Scheduler")
logger := gloginterface.CiaoGlogLogger{}
osprepare.Bootstrap(context.TODO(), logger)
osprepare.InstallDeps(context.TODO(), schedDeps, logger)
sched := configSchedulerServer()
if sched == nil {
glog.Errorf("unable to configure scheduler")
return
}
sched.ssntp.Serve(sched.config, sched)
}
|
package main
import (
"fmt"
"os"
"strings"
)
type Amount struct {
Currency string
Value float64
}
var indianUnits = map[string]uint64{"arab": 1000000000, "crore": 10000000, "lakh": 100000}
var usUnits = map[string]uint64{"trillion": 1000000000000, "billion": 1000000000, "million": 1000000}
var multipliersFor = map[string]map[string]uint64{"inr": indianUnits, "usd": usUnits}
// return all keys of the map
func keys(m map[string]uint64) []string {
ks := make([]string, 0)
for k, _ := range m {
ks = append(ks, k)
}
return ks
}
// true if s contains val, returns val
func contains(s []string, val string) (bool, string) {
for _, u := range s {
if strings.Contains(strings.ToLower(val), u) {
return true, u
}
}
return false, ""
}
var inrSignifiers = append(keys(indianUnits), []string{"rs", "inr", "₹", "rupee"}...)
var usdSignifiers = append(keys(usUnits), []string{"$", "usd", "dollar"}...)
func parseCurrency(s string) string {
if c, _ := contains(inrSignifiers, s); c {
return "inr"
} else if c, _ := contains(usdSignifiers, s); c {
return "usd"
} else {
return ""
}
}
func parseMultiplier(s string) uint64 {
if c, v := contains(keys(indianUnits), s); c {
return indianUnits[v]
}
if c, v := contains(keys(usUnits), s); c {
return usUnits[v]
}
return 1
}
func parseNumber(s string) float64 {
var value float64
fmt.Sscanf(s, "%f", &value)
return value
}
func parse(s string) Amount {
currency := parseCurrency(s)
multiplier := parseMultiplier(s)
number := parseNumber(s)
return Amount{Currency: currency, Value: float64(multiplier) * number}
}
// convert from one currency to the other
func convert(amount Amount) Amount {
switch amount.Currency {
case "inr":
return Amount{Currency: "usd", Value: amount.Value / 62.0}
case "usd":
return Amount{Currency: "inr", Value: amount.Value * 62.0}
default:
return Amount{}
}
}
func symbolFor(amount Amount) string {
switch amount.Currency {
case "inr":
return "₹"
case "usd":
return "$"
default:
return ""
}
}
func otherCurrency(currency string) string {
switch currency {
case "inr":
return "usd"
case "usd":
return "inr"
default:
return ""
}
}
func humanDivisorFor(amount Amount) (string, uint64) {
multipliers := multipliersFor[amount.Currency]
for k, v := range multipliers {
if amount.Value/float64(v) > 1 {
return k, v
}
}
return "", 1
}
func humanize(amount Amount) string {
symbol := symbolFor(amount)
unit, divisor := humanDivisorFor(amount)
return fmt.Sprintf("%s%.1f %s", symbol, amount.Value/float64(divisor), unit)
}
func main() {
amount := convert(parse(strings.Join(os.Args[1:], " ")))
fmt.Println(humanize(amount))
}
Rewritten in the Go way
package main
import (
"errors"
"fmt"
"os"
"regexp"
"strings"
)
func parseNumber(s string) float64 {
var value float64
fmt.Sscanf(s, "%f", &value)
return value
}
const (
Lakh = 100000.0
Crore = 10000000.0
Arab = 1000000000.0
)
type InrAmount struct {
Value float64
}
const (
Million = 1000000.0
Billion = 1000000000.0
Trillion = 1000000000000.0
)
type UsdAmount struct {
Value float64
}
type Amount interface {
Convert(usdToInr float64) Amount
FormatValue() string
}
func NewInrAmount(s string) *InrAmount {
units := regexp.MustCompile("lakh|crore|arab")
unit := units.FindString(s)
number := parseNumber(s)
switch unit {
case "lakh":
return &InrAmount{Value: number * Lakh}
case "crore":
return &InrAmount{Value: number * Crore}
case "arab":
return &InrAmount{Value: number * Arab}
default:
return &InrAmount{Value: number}
}
}
func (amount *InrAmount) Convert(usdToInr float64) Amount {
return &UsdAmount{Value: amount.Value / usdToInr}
}
func (amount *InrAmount) FormatValue() string {
if v := amount.Value / Arab; v >= 1.0 {
return fmt.Sprintf("₹ %.1f arab", v)
} else if v := amount.Value / Crore; v >= 1.0 {
return fmt.Sprintf("₹ %.1f crore", v)
} else if v := amount.Value / Lakh; v >= 1.0 {
return fmt.Sprintf("₹ %.1f lakh", v)
} else {
return ""
}
}
func NewUsdAmount(s string) *UsdAmount {
units := regexp.MustCompile("million|billion|trillion")
unit := units.FindString(s)
number := parseNumber(s)
switch unit {
case "million":
return &UsdAmount{Value: number * Million}
case "billion":
return &UsdAmount{Value: number * Billion}
case "trillion":
return &UsdAmount{Value: number * Trillion}
default:
return &UsdAmount{Value: number}
}
}
func (amount *UsdAmount) Convert(usdToInr float64) Amount {
return &InrAmount{Value: amount.Value * usdToInr}
}
func (amount *UsdAmount) FormatValue() string {
if v := amount.Value / Trillion; v >= 1.0 {
return fmt.Sprintf("$ %.1f trillion", v)
} else if v := amount.Value / Billion; v >= 1.0 {
return fmt.Sprintf("$ %.1f billion", v)
} else if v := amount.Value / Million; v >= 1.0 {
return fmt.Sprintf("$ %.1f million", v)
} else {
return ""
}
}
var inrSignifiers = regexp.MustCompile(`lakh|crore|arab|rs|inr|₹|rupee`)
var usdSignifiers = regexp.MustCompile(`million|billion|trillion|\$|usd|dollar`)
func parseAmount(s string) (Amount, error) {
if inrSignifiers.MatchString(s) {
return NewInrAmount(s), nil
} else if usdSignifiers.MatchString(s) {
return NewUsdAmount(s), nil
} else {
return nil, errors.New("no currency recognized")
}
}
func main() {
input := strings.Join(os.Args[1:], " ")
amount, err := parseAmount(input)
if err == nil {
fmt.Println(amount.Convert(62.0).FormatValue())
} else {
fmt.Println(err.Error())
}
}
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO(thockin): This whole pkg is pretty linux-centric. As soon as we have
// an alternate platform, we will need to abstract further.
package mount
import (
"os"
"path/filepath"
)
type FileType string
const (
// Default mount command if mounter path is not specified
defaultMountCommand = "mount"
MountsInGlobalPDPath = "mounts"
FileTypeDirectory FileType = "Directory"
FileTypeFile FileType = "File"
FileTypeSocket FileType = "Socket"
FileTypeCharDev FileType = "CharDevice"
FileTypeBlockDev FileType = "BlockDevice"
)
type Interface interface {
// Mount mounts source to target as fstype with given options.
Mount(source string, target string, fstype string, options []string) error
// Unmount unmounts given target.
Unmount(target string) error
// List returns a list of all mounted filesystems. This can be large.
// On some platforms, reading mounts is not guaranteed consistent (i.e.
// it could change between chunked reads). This is guaranteed to be
// consistent.
List() ([]MountPoint, error)
// IsMountPointMatch determines if the mountpoint matches the dir
IsMountPointMatch(mp MountPoint, dir string) bool
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsNotMountPoint is more expensive than IsLikelyNotMountPoint.
// IsNotMountPoint detects bind mounts in linux.
// IsNotMountPoint enumerates all the mountpoints using List() and
// the list of mountpoints may be large, then it uses
// IsMountPointMatch to evaluate whether the directory is a mountpoint
IsNotMountPoint(file string) (bool, error)
// IsLikelyNotMountPoint uses heuristics to determine if a directory
// is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsLikelyNotMountPoint does NOT properly detect all mountpoint types
// most notably linux bind mounts.
IsLikelyNotMountPoint(file string) (bool, error)
// DeviceOpened determines if the device is in use elsewhere
// on the system, i.e. still mounted.
DeviceOpened(pathname string) (bool, error)
// PathIsDevice determines if a path is a device.
PathIsDevice(pathname string) (bool, error)
// GetDeviceNameFromMount finds the device name by checking the mount path
// to get the global mount path which matches its plugin directory
GetDeviceNameFromMount(mountPath, pluginDir string) (string, error)
// MakeRShared checks that given path is on a mount with 'rshared' mount
// propagation. If not, it bind-mounts the path as rshared.
MakeRShared(path string) error
// GetFileType checks for file/directory/socket/block/character devices.
// Will operate in the host mount namespace if kubelet is running in a container
GetFileType(pathname string) (FileType, error)
// MakeFile creates an empty file.
// Will operate in the host mount namespace if kubelet is running in a container
MakeFile(pathname string) error
// MakeDir creates a new directory.
// Will operate in the host mount namespace if kubelet is running in a container
MakeDir(pathname string) error
// ExistsPath checks whether the path exists.
// Will operate in the host mount namespace if kubelet is running in a container
ExistsPath(pathname string) bool
}
// Exec executes command where mount utilities are. This can be either the host,
// container where kubelet runs or even a remote pod with mount utilities.
// Usual pkg/util/exec interface is not used because kubelet.RunInContainer does
// not provide stdin/stdout/stderr streams.
type Exec interface {
// Run executes a command and returns its stdout + stderr combined in one
// stream.
Run(cmd string, args ...string) ([]byte, error)
}
// Compile-time check to ensure all Mounter implementations satisfy
// the mount interface
var _ Interface = &Mounter{}
// This represents a single line in /proc/mounts or /etc/fstab.
type MountPoint struct {
Device string
Path string
Type string
Opts []string
Freq int
Pass int
}
// SafeFormatAndMount probes a device to see if it is formatted.
// Namely it checks to see if a file system is present. If so it
// mounts it otherwise the device is formatted first then mounted.
type SafeFormatAndMount struct {
Interface
Exec
}
// FormatAndMount formats the given disk, if needed, and mounts it.
// That is if the disk is not formatted and it is not being mounted as
// read-only it will format it first then mount it. Otherwise, if the
// disk is already formatted or it is being mounted as read-only, it
// will be mounted without formatting.
func (mounter *SafeFormatAndMount) FormatAndMount(source string, target string, fstype string, options []string) error {
// Don't attempt to format if mounting as readonly. Go straight to mounting.
for _, option := range options {
if option == "ro" {
return mounter.Interface.Mount(source, target, fstype, options)
}
}
return mounter.formatAndMount(source, target, fstype, options)
}
// GetMountRefsByDev finds all references to the device provided
// by mountPath; returns a list of paths.
func GetMountRefsByDev(mounter Interface, mountPath string) ([]string, error) {
mps, err := mounter.List()
if err != nil {
return nil, err
}
slTarget, err := filepath.EvalSymlinks(mountPath)
if err != nil {
slTarget = mountPath
}
// Finding the device mounted to mountPath
diskDev := ""
for i := range mps {
if slTarget == mps[i].Path {
diskDev = mps[i].Device
break
}
}
// Find all references to the device.
var refs []string
for i := range mps {
if mps[i].Device == diskDev || mps[i].Device == slTarget {
if mps[i].Path != slTarget {
refs = append(refs, mps[i].Path)
}
}
}
return refs, nil
}
// GetDeviceNameFromMount: given a mnt point, find the device from /proc/mounts
// returns the device name, reference count, and error code
func GetDeviceNameFromMount(mounter Interface, mountPath string) (string, int, error) {
mps, err := mounter.List()
if err != nil {
return "", 0, err
}
// Find the device name.
// FIXME if multiple devices mounted on the same mount path, only the first one is returned
device := ""
// If mountPath is symlink, need get its target path.
slTarget, err := filepath.EvalSymlinks(mountPath)
if err != nil {
slTarget = mountPath
}
for i := range mps {
if mps[i].Path == slTarget {
device = mps[i].Device
break
}
}
// Find all references to the device.
refCount := 0
for i := range mps {
if mps[i].Device == device {
refCount++
}
}
return device, refCount, nil
}
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// This method uses the List() of all mountpoints
// It is more extensive than IsLikelyNotMountPoint
// and it detects bind mounts in linux
func IsNotMountPoint(mounter Interface, file string) (bool, error) {
// IsLikelyNotMountPoint provides a quick check
// to determine whether file IS A mountpoint
notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file)
if notMntErr != nil && os.IsPermission(notMntErr) {
// We were not allowed to do the simple stat() check, e.g. on NFS with
// root_squash. Fall back to /proc/mounts check below.
notMnt = true
notMntErr = nil
}
if notMntErr != nil {
return notMnt, notMntErr
}
// identified as mountpoint, so return this fact
if notMnt == false {
return notMnt, nil
}
// check all mountpoints since IsLikelyNotMountPoint
// is not reliable for some mountpoint types
mountPoints, mountPointsErr := mounter.List()
if mountPointsErr != nil {
return notMnt, mountPointsErr
}
for _, mp := range mountPoints {
if mounter.IsMountPointMatch(mp, file) {
notMnt = false
break
}
}
return notMnt, nil
}
// isBind detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func isBind(options []string) (bool, []string) {
bindRemountOpts := []string{"remount"}
bind := false
if len(options) != 0 {
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
}
return bind, bindRemountOpts
}
Redesign and implement volume reconstruction work
This PR is the first part of redesign of volume reconstruction work. The
changes include
1. Remove dependency on volume spec stored in actual state for volume
cleanup process (UnmountVolume and UnmountDevice)
Modify AttachedVolume struct to add DeviceMountPath so that volume
unmount operation can use this information instead of constructing from
volume spec
2. Modify reconciler's volume reconstruction process (syncState). Currently workflow
is when kubelet restarts, syncState() is only called once before
reconciler starts its loop.
a. If volume plugin supports reconstruction, it will use the
reconstructed volume spec information to update actual state as before.
b. If volume plugin cannot support reconstruction, it will use the
scanned mount path information to clean up the mounts.
In this PR, all the plugins still support reconstruction (except
glusterfs), so reconstruction of some plugins will still have issues.
The next PR will modify those plugins that cannot support reconstruction
well.
This PR addresses issue #52683, #54108 (This PR includes the changes to
update devicePath after local attach finishes)
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO(thockin): This whole pkg is pretty linux-centric. As soon as we have
// an alternate platform, we will need to abstract further.
package mount
import (
"os"
"path/filepath"
"strings"
)
type FileType string
const (
// Default mount command if mounter path is not specified
defaultMountCommand = "mount"
MountsInGlobalPDPath = "mounts"
FileTypeDirectory FileType = "Directory"
FileTypeFile FileType = "File"
FileTypeSocket FileType = "Socket"
FileTypeCharDev FileType = "CharDevice"
FileTypeBlockDev FileType = "BlockDevice"
)
type Interface interface {
// Mount mounts source to target as fstype with given options.
Mount(source string, target string, fstype string, options []string) error
// Unmount unmounts given target.
Unmount(target string) error
// List returns a list of all mounted filesystems. This can be large.
// On some platforms, reading mounts is not guaranteed consistent (i.e.
// it could change between chunked reads). This is guaranteed to be
// consistent.
List() ([]MountPoint, error)
// IsMountPointMatch determines if the mountpoint matches the dir
IsMountPointMatch(mp MountPoint, dir string) bool
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsNotMountPoint is more expensive than IsLikelyNotMountPoint.
// IsNotMountPoint detects bind mounts in linux.
// IsNotMountPoint enumerates all the mountpoints using List() and
// the list of mountpoints may be large, then it uses
// IsMountPointMatch to evaluate whether the directory is a mountpoint
IsNotMountPoint(file string) (bool, error)
// IsLikelyNotMountPoint uses heuristics to determine if a directory
// is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsLikelyNotMountPoint does NOT properly detect all mountpoint types
// most notably linux bind mounts.
IsLikelyNotMountPoint(file string) (bool, error)
// DeviceOpened determines if the device is in use elsewhere
// on the system, i.e. still mounted.
DeviceOpened(pathname string) (bool, error)
// PathIsDevice determines if a path is a device.
PathIsDevice(pathname string) (bool, error)
// GetDeviceNameFromMount finds the device name by checking the mount path
// to get the global mount path which matches its plugin directory
GetDeviceNameFromMount(mountPath, pluginDir string) (string, error)
// MakeRShared checks that given path is on a mount with 'rshared' mount
// propagation. If not, it bind-mounts the path as rshared.
MakeRShared(path string) error
// GetFileType checks for file/directory/socket/block/character devices.
// Will operate in the host mount namespace if kubelet is running in a container
GetFileType(pathname string) (FileType, error)
// MakeFile creates an empty file.
// Will operate in the host mount namespace if kubelet is running in a container
MakeFile(pathname string) error
// MakeDir creates a new directory.
// Will operate in the host mount namespace if kubelet is running in a container
MakeDir(pathname string) error
// ExistsPath checks whether the path exists.
// Will operate in the host mount namespace if kubelet is running in a container
ExistsPath(pathname string) bool
}
// Exec executes command where mount utilities are. This can be either the host,
// container where kubelet runs or even a remote pod with mount utilities.
// Usual pkg/util/exec interface is not used because kubelet.RunInContainer does
// not provide stdin/stdout/stderr streams.
type Exec interface {
// Run executes a command and returns its stdout + stderr combined in one
// stream.
Run(cmd string, args ...string) ([]byte, error)
}
// Compile-time check to ensure all Mounter implementations satisfy
// the mount interface
var _ Interface = &Mounter{}
// This represents a single line in /proc/mounts or /etc/fstab.
type MountPoint struct {
Device string
Path string
Type string
Opts []string
Freq int
Pass int
}
// SafeFormatAndMount probes a device to see if it is formatted.
// Namely it checks to see if a file system is present. If so it
// mounts it otherwise the device is formatted first then mounted.
type SafeFormatAndMount struct {
Interface
Exec
}
// FormatAndMount formats the given disk, if needed, and mounts it.
// That is if the disk is not formatted and it is not being mounted as
// read-only it will format it first then mount it. Otherwise, if the
// disk is already formatted or it is being mounted as read-only, it
// will be mounted without formatting.
func (mounter *SafeFormatAndMount) FormatAndMount(source string, target string, fstype string, options []string) error {
// Don't attempt to format if mounting as readonly. Go straight to mounting.
for _, option := range options {
if option == "ro" {
return mounter.Interface.Mount(source, target, fstype, options)
}
}
return mounter.formatAndMount(source, target, fstype, options)
}
// GetMountRefsByDev finds all references to the device provided
// by mountPath; returns a list of paths.
func GetMountRefsByDev(mounter Interface, mountPath string) ([]string, error) {
mps, err := mounter.List()
if err != nil {
return nil, err
}
slTarget, err := filepath.EvalSymlinks(mountPath)
if err != nil {
slTarget = mountPath
}
// Finding the device mounted to mountPath
diskDev := ""
for i := range mps {
if slTarget == mps[i].Path {
diskDev = mps[i].Device
break
}
}
// Find all references to the device.
var refs []string
for i := range mps {
if mps[i].Device == diskDev || mps[i].Device == slTarget {
if mps[i].Path != slTarget {
refs = append(refs, mps[i].Path)
}
}
}
return refs, nil
}
// GetDeviceNameFromMount: given a mnt point, find the device from /proc/mounts
// returns the device name, reference count, and error code
func GetDeviceNameFromMount(mounter Interface, mountPath string) (string, int, error) {
mps, err := mounter.List()
if err != nil {
return "", 0, err
}
// Find the device name.
// FIXME if multiple devices mounted on the same mount path, only the first one is returned
device := ""
// If mountPath is symlink, need get its target path.
slTarget, err := filepath.EvalSymlinks(mountPath)
if err != nil {
slTarget = mountPath
}
for i := range mps {
if mps[i].Path == slTarget {
device = mps[i].Device
break
}
}
// Find all references to the device.
refCount := 0
for i := range mps {
if mps[i].Device == device {
refCount++
}
}
return device, refCount, nil
}
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// This method uses the List() of all mountpoints
// It is more extensive than IsLikelyNotMountPoint
// and it detects bind mounts in linux
func IsNotMountPoint(mounter Interface, file string) (bool, error) {
// IsLikelyNotMountPoint provides a quick check
// to determine whether file IS A mountpoint
notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file)
if notMntErr != nil && os.IsPermission(notMntErr) {
// We were not allowed to do the simple stat() check, e.g. on NFS with
// root_squash. Fall back to /proc/mounts check below.
notMnt = true
notMntErr = nil
}
if notMntErr != nil {
return notMnt, notMntErr
}
// identified as mountpoint, so return this fact
if notMnt == false {
return notMnt, nil
}
// check all mountpoints since IsLikelyNotMountPoint
// is not reliable for some mountpoint types
mountPoints, mountPointsErr := mounter.List()
if mountPointsErr != nil {
return notMnt, mountPointsErr
}
for _, mp := range mountPoints {
if mounter.IsMountPointMatch(mp, file) {
notMnt = false
break
}
}
return notMnt, nil
}
// isBind detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func isBind(options []string) (bool, []string) {
bindRemountOpts := []string{"remount"}
bind := false
if len(options) != 0 {
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
}
return bind, bindRemountOpts
}
// TODO: this is a workaround for the unmount device issue caused by gci mounter.
// In GCI cluster, if gci mounter is used for mounting, the container started by mounter
// script will cause additional mounts created in the container. Since these mounts are
// irrelavant to the original mounts, they should be not considered when checking the
// mount references. Current solution is to filter out those mount paths that contain
// the string of original mount path.
// Plan to work on better approach to solve this issue.
func HasMountRefs(mountPath string, mountRefs []string) bool {
count := 0
for _, ref := range mountRefs {
if !strings.Contains(ref, mountPath) {
count = count + 1
}
}
return count > 0
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"github.com/googlecloudplatform/gcsfuse/fs"
"github.com/googlecloudplatform/gcsfuse/timeutil"
"github.com/jacobsa/fuse"
"golang.org/x/net/context"
)
func usage() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, " %s [flags] <mount-point>\n", os.Args[0])
flag.PrintDefaults()
}
var fBucketName = flag.String("bucket", "", "Name of GCS bucket to mount.")
var fImplicitDirs = flag.Bool(
"implicit_dirs",
false,
"Implicitly define directories based on their content. See docs/semantics.md.")
var fSupprtNlink = flag.Bool(
"support_nlink",
false,
"Return meaningful values for nlink from fstat(2). See docs/semantics.md.")
func getBucketName() string {
s := *fBucketName
if s == "" {
fmt.Println("You must set --bucket.")
os.Exit(1)
}
return s
}
func registerSIGINTHandler(mountPoint string) {
// Register for SIGINT.
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt)
// Start a goroutine that will unmount when the signal is received.
go func() {
for {
<-signalChan
log.Println("Received SIGINT, attempting to unmount...")
err := fuse.Unmount(mountPoint)
if err != nil {
log.Printf("Failed to unmount in response to SIGINT: %v", err)
} else {
log.Printf("Successfully unmounted in response to SIGINT.")
return
}
}
}()
}
func main() {
// Set up flags.
flag.Usage = usage
flag.Parse()
// Grab the mount point.
if flag.NArg() != 1 {
usage()
os.Exit(1)
}
mountPoint := flag.Arg(0)
// Set up a GCS connection.
log.Println("Initializing GCS connection.")
conn, err := getConn()
if err != nil {
log.Fatal("Couldn't get GCS connection: ", err)
}
// Create a file system server.
serverCfg := &fs.ServerConfig{
Clock: timeutil.RealClock(),
Bucket: conn.GetBucket(getBucketName()),
ImplicitDirectories: *fImplicitDirs,
SupportNlink: *fSupprtNlink,
}
server, err := fs.NewServer(serverCfg)
if err != nil {
log.Fatal("fs.NewServer:", err)
}
// Mount the file system.
mountedFS, err := fuse.Mount(mountPoint, server, &fuse.MountConfig{})
if err != nil {
log.Fatal("Mount:", err)
}
log.Println("File system has been successfully mounted.")
// Let the user unmount with Ctrl-C (SIGINT).
registerSIGINTHandler(mountedFS.Dir())
// Wait for it to be unmounted.
if err := mountedFS.Join(context.Background()); err != nil {
log.Fatal("MountedFileSystem.Join:", err)
}
log.Println("Successfully exiting.")
}
Fixed a typo.
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"github.com/googlecloudplatform/gcsfuse/fs"
"github.com/googlecloudplatform/gcsfuse/timeutil"
"github.com/jacobsa/fuse"
"golang.org/x/net/context"
)
func usage() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, " %s [flags] <mount-point>\n", os.Args[0])
flag.PrintDefaults()
}
var fBucketName = flag.String("bucket", "", "Name of GCS bucket to mount.")
var fImplicitDirs = flag.Bool(
"implicit_dirs",
false,
"Implicitly define directories based on their content. See docs/semantics.md.")
var fSupportNlink = flag.Bool(
"support_nlink",
false,
"Return meaningful values for nlink from fstat(2). See docs/semantics.md.")
func getBucketName() string {
s := *fBucketName
if s == "" {
fmt.Println("You must set --bucket.")
os.Exit(1)
}
return s
}
func registerSIGINTHandler(mountPoint string) {
// Register for SIGINT.
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt)
// Start a goroutine that will unmount when the signal is received.
go func() {
for {
<-signalChan
log.Println("Received SIGINT, attempting to unmount...")
err := fuse.Unmount(mountPoint)
if err != nil {
log.Printf("Failed to unmount in response to SIGINT: %v", err)
} else {
log.Printf("Successfully unmounted in response to SIGINT.")
return
}
}
}()
}
func main() {
// Set up flags.
flag.Usage = usage
flag.Parse()
// Grab the mount point.
if flag.NArg() != 1 {
usage()
os.Exit(1)
}
mountPoint := flag.Arg(0)
// Set up a GCS connection.
log.Println("Initializing GCS connection.")
conn, err := getConn()
if err != nil {
log.Fatal("Couldn't get GCS connection: ", err)
}
// Create a file system server.
serverCfg := &fs.ServerConfig{
Clock: timeutil.RealClock(),
Bucket: conn.GetBucket(getBucketName()),
ImplicitDirectories: *fImplicitDirs,
SupportNlink: *fSupportNlink,
}
server, err := fs.NewServer(serverCfg)
if err != nil {
log.Fatal("fs.NewServer:", err)
}
// Mount the file system.
mountedFS, err := fuse.Mount(mountPoint, server, &fuse.MountConfig{})
if err != nil {
log.Fatal("Mount:", err)
}
log.Println("File system has been successfully mounted.")
// Let the user unmount with Ctrl-C (SIGINT).
registerSIGINTHandler(mountedFS.Dir())
// Wait for it to be unmounted.
if err := mountedFS.Join(context.Background()); err != nil {
log.Fatal("MountedFileSystem.Join:", err)
}
log.Println("Successfully exiting.")
}
|
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"os"
"time"
)
type config struct {
TwitterAuth twitterAuth `json:"twitterAuth"`
}
type twitterAuth struct {
ConsumerKey string `json:"consumerKey"`
ConsumerSecret string `json:"consumerSecret"`
AccessToken string `json:"accessToken"`
AccessTokenSecret string `json:"accessTokenSecret"`
}
var configFile = flag.String("config", "config.json", "path to config file")
var dataFile = flag.String("data", "tweets.json", "path to json file containing tweets")
var frequency = flag.Int64("freq", 1440, "Frequency to post each tweet (in minutes)")
func main() {
var fatalErr error
defer func() {
if fatalErr != nil {
flag.PrintDefaults()
log.Fatalln(fatalErr)
}
}()
flag.Parse()
var config config
file, err := os.Open(*configFile)
if err != nil {
fatalErr = fmt.Errorf("can't open %s: %s", *configFile, err)
return
}
if err := json.NewDecoder(file).Decode(&config); err != nil {
fatalErr = fmt.Errorf("can't decode %s: %s", *configFile, err)
return
}
fmt.Println("Go Twitter Bot is running...")
tweets, err := LoadTweets(*dataFile)
if err != nil {
fatalErr = fmt.Errorf("problem loading tweets: %s", err)
return
}
ticker := time.NewTicker(time.Minute * time.Duration(*frequency))
for range ticker.C {
tweet, err := getNextTweet(tweets)
if err == errNoMoreTweetsToPost {
ticker.Stop()
break
}
fmt.Printf("Tweeting: %s\n\n", tweet.Text)
err = postTweet(config.TwitterAuth, tweet.Text)
if err != nil {
fatalErr = err
return
}
tweet.IsPosted = true
err = SaveTweets(tweets, *dataFile)
if err != nil {
fatalErr = fmt.Errorf("problem saving tweets: %s", err)
return
}
}
fmt.Println("That's all the tweets")
}
var errNoMoreTweetsToPost = errors.New("No more tweets left to be posted")
func getNextTweet(tweets []Tweet) (*Tweet, error) {
for i := range tweets {
tweet := &tweets[i]
if !tweet.IsPosted {
return tweet, nil
}
}
return nil, errNoMoreTweetsToPost
}
fixed: allow program to continue running after all tweets have been posted, so the tweets.json file can be updated with more tweets.
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"os"
"time"
)
type config struct {
TwitterAuth twitterAuth `json:"twitterAuth"`
}
type twitterAuth struct {
ConsumerKey string `json:"consumerKey"`
ConsumerSecret string `json:"consumerSecret"`
AccessToken string `json:"accessToken"`
AccessTokenSecret string `json:"accessTokenSecret"`
}
var configFile = flag.String("config", "config.json", "path to config file")
var dataFile = flag.String("data", "tweets.json", "path to json file containing tweets")
var frequency = flag.Int64("freq", 1440, "Frequency to post each tweet (in minutes)")
func main() {
var fatalErr error
defer func() {
if fatalErr != nil {
flag.PrintDefaults()
log.Fatalln(fatalErr)
}
}()
flag.Parse()
var config config
file, err := os.Open(*configFile)
if err != nil {
fatalErr = fmt.Errorf("can't open %s: %s", *configFile, err)
return
}
if err := json.NewDecoder(file).Decode(&config); err != nil {
fatalErr = fmt.Errorf("can't decode %s: %s", *configFile, err)
return
}
fmt.Println("Go Twitter Bot is running...")
ticker := time.NewTicker(time.Minute * time.Duration(*frequency))
for range ticker.C {
tweets, err := LoadTweets(*dataFile)
if err != nil {
fatalErr = fmt.Errorf("problem loading tweets: %s", err)
return
}
tweet, err := getNextTweet(tweets)
if err == errNoMoreTweetsToPost {
fmt.Println("That's all the tweets")
continue
}
fmt.Printf("Tweeting: %s\n\n", tweet.Text)
err = postTweet(config.TwitterAuth, tweet.Text)
if err != nil {
fatalErr = err
return
}
tweet.IsPosted = true
err = SaveTweets(tweets, *dataFile)
if err != nil {
fatalErr = fmt.Errorf("problem saving tweets: %s", err)
return
}
}
}
var errNoMoreTweetsToPost = errors.New("No more tweets left to be posted")
func getNextTweet(tweets []Tweet) (*Tweet, error) {
for i := range tweets {
tweet := &tweets[i]
if !tweet.IsPosted {
return tweet, nil
}
}
return nil, errNoMoreTweetsToPost
}
|
package app
import (
"context"
"errors"
"fmt"
"io"
"sort"
"strings"
"time"
"github.com/karimra/gnmic/config"
"github.com/karimra/gnmic/formatters"
"github.com/karimra/gnmic/types"
"github.com/manifoldco/promptui"
"github.com/openconfig/gnmi/proto/gnmi"
"github.com/openconfig/grpctunnel/tunnel"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
initLockerRetryTimer = 1 * time.Second
)
func (a *App) SubscribePreRunE(cmd *cobra.Command, args []string) error {
a.Config.SetLocalFlagsFromFile(cmd)
a.createCollectorDialOpts()
return nil
}
func (a *App) SubscribeRunE(cmd *cobra.Command, args []string) error {
defer a.InitSubscribeFlags(cmd)
// prompt mode
if a.PromptMode {
return a.SubscribeRunPrompt(cmd, args)
}
//
subCfg, err := a.Config.GetSubscriptions(cmd)
if err != nil {
return fmt.Errorf("failed reading subscriptions config: %v", err)
}
if len(subCfg) == 0 {
return errors.New("no subscriptions configuration found")
}
// only once mode subscriptions requested
if allSubscriptionsModeOnce(subCfg) {
return a.SubscribeRunONCE(cmd, args, subCfg)
}
// only poll mode subscriptions requested
if allSubscriptionsModePoll(subCfg) {
return a.SubscribeRunPoll(cmd, args, subCfg)
}
// stream subscriptions
err = a.initTunnelServer(tunnel.ServerConfig{
AddTargetHandler: a.tunServerAddTargetSubscribeHandler,
DeleteTargetHandler: a.tunServerDeleteTargetHandler,
RegisterHandler: a.tunServerRegisterHandler,
Handler: a.tunServerHandler,
})
if err != nil {
return err
}
_, err = a.Config.GetTargets()
if errors.Is(err, config.ErrNoTargetsFound) {
if !a.Config.LocalFlags.SubscribeWatchConfig &&
len(a.Config.FileConfig.GetStringMap("loader")) == 0 &&
!a.Config.UseTunnelServer {
return fmt.Errorf("failed reading targets config: %v", err)
}
} else if err != nil {
return fmt.Errorf("failed reading targets config: %v", err)
}
err = a.readConfigs()
if err != nil {
return err
}
err = a.Config.GetClustering()
if err != nil {
return err
}
err = a.Config.GetGNMIServer()
if err != nil {
return err
}
err = a.Config.GetAPIServer()
if err != nil {
return err
}
err = a.Config.GetLoader()
if err != nil {
return err
}
//
for {
err := a.InitLocker()
if err != nil {
a.Logger.Printf("failed to init locker: %v", err)
time.Sleep(initLockerRetryTimer)
continue
}
break
}
a.startAPIServer()
a.startGnmiServer()
go a.startCluster()
a.startIO()
if a.Config.LocalFlags.SubscribeWatchConfig {
go a.watchConfig()
}
for range a.ctx.Done() {
return a.ctx.Err()
}
return nil
}
//
func (a *App) subscribeStream(ctx context.Context, tc *types.TargetConfig) {
defer a.wg.Done()
a.TargetSubscribeStream(ctx, tc)
}
func (a *App) subscribeOnce(ctx context.Context, tc *types.TargetConfig) {
defer a.wg.Done()
err := a.TargetSubscribeOnce(ctx, tc)
if err != nil {
a.logError(err)
}
}
func (a *App) subscribePoll(ctx context.Context, tc *types.TargetConfig) {
defer a.wg.Done()
a.TargetSubscribePoll(ctx, tc)
}
// InitSubscribeFlags used to init or reset subscribeCmd flags for gnmic-prompt mode
func (a *App) InitSubscribeFlags(cmd *cobra.Command) {
cmd.ResetFlags()
cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribePrefix, "prefix", "", "", "subscribe request prefix")
cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SubscribePath, "path", "", []string{}, "subscribe request paths")
//cmd.MarkFlagRequired("path")
cmd.Flags().Uint32VarP(&a.Config.LocalFlags.SubscribeQos, "qos", "q", 0, "qos marking")
cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeUpdatesOnly, "updates-only", "", false, "only updates to current state should be sent")
cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeMode, "mode", "", "stream", "one of: once, stream, poll")
cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeStreamMode, "stream-mode", "", "target-defined", "one of: on-change, sample, target-defined")
cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeSampleInterval, "sample-interval", "i", 0,
"sample interval as a decimal number and a suffix unit, such as \"10s\" or \"1m30s\"")
cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeSuppressRedundant, "suppress-redundant", "", false, "suppress redundant update if the subscribed value didn't not change")
cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeHeartbearInterval, "heartbeat-interval", "", 0, "heartbeat interval in case suppress-redundant is enabled")
cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeModel, "model", "", []string{}, "subscribe request used model(s)")
cmd.Flags().BoolVar(&a.Config.LocalFlags.SubscribeQuiet, "quiet", false, "suppress stdout printing")
cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeTarget, "target", "", "", "subscribe request target")
cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeSetTarget, "set-target", "", false, "set target name in gNMI Path prefix")
cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeName, "name", "n", []string{}, "reference subscriptions by name, must be defined in gnmic config file")
cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeOutput, "output", "", []string{}, "reference to output groups by name, must be defined in gnmic config file")
cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeWatchConfig, "watch-config", "", false, "watch configuration changes, add or delete subscribe targets accordingly")
cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeBackoff, "backoff", "", 0, "backoff time between subscribe requests")
cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeLockRetry, "lock-retry", "", 5*time.Second, "time to wait between target lock attempts")
//
cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {
a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag)
})
}
func (a *App) readConfigs() error {
var err error
_, err = a.Config.GetOutputs()
if err != nil {
return fmt.Errorf("failed reading outputs config: %v", err)
}
_, err = a.Config.GetInputs()
if err != nil {
return fmt.Errorf("failed reading inputs config: %v", err)
}
_, err = a.Config.GetActions()
if err != nil {
return fmt.Errorf("failed reading actions config: %v", err)
}
_, err = a.Config.GetEventProcessors()
if err != nil {
return fmt.Errorf("failed reading event processors config: %v", err)
}
_, err = a.LoadProtoFiles()
if err != nil {
return fmt.Errorf("failed loading proto files: %v", err)
}
return nil
}
func (a *App) handlePolledSubscriptions() {
polledTargetsSubscriptions := a.PolledSubscriptionsTargets()
if len(polledTargetsSubscriptions) > 0 {
pollTargets := make([]string, 0, len(polledTargetsSubscriptions))
for t := range polledTargetsSubscriptions {
pollTargets = append(pollTargets, t)
}
sort.Slice(pollTargets, func(i, j int) bool {
return pollTargets[i] < pollTargets[j]
})
s := promptui.Select{
Label: "select target to poll",
Items: pollTargets,
HideSelected: true,
}
waitChan := make(chan struct{}, 1)
waitChan <- struct{}{}
mo := &formatters.MarshalOptions{
Multiline: true,
Indent: " ",
Format: a.Config.Format,
}
for {
select {
case <-waitChan:
_, name, err := s.Run()
if err != nil {
fmt.Printf("failed selecting target to poll: %v\n", err)
continue
}
ss := promptui.Select{
Label: "select subscription to poll",
Items: polledTargetsSubscriptions[name],
HideSelected: true,
}
_, subName, err := ss.Run()
if err != nil {
fmt.Printf("failed selecting subscription to poll: %v\n", err)
continue
}
response, err := a.clientSubscribePoll(name, subName)
if err != nil && err != io.EOF {
fmt.Printf("target '%s', subscription '%s': poll response error:%v\n", name, subName, err)
continue
}
if response == nil {
fmt.Printf("received empty response from target '%s'\n", name)
continue
}
switch rsp := response.Response.(type) {
case *gnmi.SubscribeResponse_SyncResponse:
fmt.Printf("received sync response '%t' from '%s'\n", rsp.SyncResponse, name)
waitChan <- struct{}{}
continue
}
b, err := mo.Marshal(response, nil)
if err != nil {
fmt.Printf("target '%s', subscription '%s': poll response formatting error:%v\n", name, subName, err)
fmt.Println(string(b))
waitChan <- struct{}{}
continue
}
fmt.Println(string(b))
waitChan <- struct{}{}
case <-a.ctx.Done():
return
}
}
}
}
func (a *App) startIO() {
go a.StartCollector(a.ctx)
a.InitOutputs(a.ctx)
a.InitInputs(a.ctx)
if !a.inCluster() {
go a.startLoader(a.ctx)
var limiter *time.Ticker
if a.Config.LocalFlags.SubscribeBackoff > 0 {
limiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff)
}
if !a.Config.UseTunnelServer {
for _, tc := range a.Config.Targets {
a.wg.Add(1)
go a.subscribeStream(a.ctx, tc)
if limiter != nil {
<-limiter.C
}
}
}
if limiter != nil {
limiter.Stop()
}
a.wg.Wait()
}
}
func allSubscriptionsModeOnce(sc map[string]*types.SubscriptionConfig) bool {
for _, sub := range sc {
if strings.ToUpper(sub.Mode) != "ONCE" {
return false
}
}
return true
}
func allSubscriptionsModePoll(sc map[string]*types.SubscriptionConfig) bool {
for _, sub := range sc {
if strings.ToUpper(sub.Mode) != "POLL" {
return false
}
}
return true
}
allow running subscribe cmd with only inputs configured (no subs)
package app
import (
"context"
"errors"
"fmt"
"io"
"sort"
"strings"
"time"
"github.com/karimra/gnmic/config"
"github.com/karimra/gnmic/formatters"
"github.com/karimra/gnmic/types"
"github.com/manifoldco/promptui"
"github.com/openconfig/gnmi/proto/gnmi"
"github.com/openconfig/grpctunnel/tunnel"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
initLockerRetryTimer = 1 * time.Second
)
func (a *App) SubscribePreRunE(cmd *cobra.Command, args []string) error {
a.Config.SetLocalFlagsFromFile(cmd)
a.createCollectorDialOpts()
return nil
}
func (a *App) SubscribeRunE(cmd *cobra.Command, args []string) error {
defer a.InitSubscribeFlags(cmd)
// prompt mode
if a.PromptMode {
return a.SubscribeRunPrompt(cmd, args)
}
//
subCfg, err := a.Config.GetSubscriptions(cmd)
if err != nil {
return fmt.Errorf("failed reading subscriptions config: %v", err)
}
err = a.readConfigs()
if err != nil {
return err
}
err = a.Config.GetClustering()
if err != nil {
return err
}
err = a.Config.GetGNMIServer()
if err != nil {
return err
}
err = a.Config.GetAPIServer()
if err != nil {
return err
}
err = a.Config.GetLoader()
if err != nil {
return err
}
if len(subCfg) == 0 && len(a.Config.Inputs) == 0 {
return errors.New("no subscriptions or inputs configuration found")
}
// only once mode subscriptions requested
if allSubscriptionsModeOnce(subCfg) {
return a.SubscribeRunONCE(cmd, args, subCfg)
}
// only poll mode subscriptions requested
if allSubscriptionsModePoll(subCfg) {
return a.SubscribeRunPoll(cmd, args, subCfg)
}
// stream subscriptions
err = a.initTunnelServer(tunnel.ServerConfig{
AddTargetHandler: a.tunServerAddTargetSubscribeHandler,
DeleteTargetHandler: a.tunServerDeleteTargetHandler,
RegisterHandler: a.tunServerRegisterHandler,
Handler: a.tunServerHandler,
})
if err != nil {
return err
}
_, err = a.Config.GetTargets()
if errors.Is(err, config.ErrNoTargetsFound) {
if !a.Config.LocalFlags.SubscribeWatchConfig &&
len(a.Config.FileConfig.GetStringMap("loader")) == 0 &&
!a.Config.UseTunnelServer {
return fmt.Errorf("failed reading targets config: %v", err)
}
} else if err != nil {
return fmt.Errorf("failed reading targets config: %v", err)
}
//
for {
err := a.InitLocker()
if err != nil {
a.Logger.Printf("failed to init locker: %v", err)
time.Sleep(initLockerRetryTimer)
continue
}
break
}
a.startAPIServer()
a.startGnmiServer()
go a.startCluster()
a.startIO()
if a.Config.LocalFlags.SubscribeWatchConfig {
go a.watchConfig()
}
for range a.ctx.Done() {
return a.ctx.Err()
}
return nil
}
//
func (a *App) subscribeStream(ctx context.Context, tc *types.TargetConfig) {
defer a.wg.Done()
a.TargetSubscribeStream(ctx, tc)
}
func (a *App) subscribeOnce(ctx context.Context, tc *types.TargetConfig) {
defer a.wg.Done()
err := a.TargetSubscribeOnce(ctx, tc)
if err != nil {
a.logError(err)
}
}
func (a *App) subscribePoll(ctx context.Context, tc *types.TargetConfig) {
defer a.wg.Done()
a.TargetSubscribePoll(ctx, tc)
}
// InitSubscribeFlags used to init or reset subscribeCmd flags for gnmic-prompt mode
func (a *App) InitSubscribeFlags(cmd *cobra.Command) {
cmd.ResetFlags()
cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribePrefix, "prefix", "", "", "subscribe request prefix")
cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SubscribePath, "path", "", []string{}, "subscribe request paths")
//cmd.MarkFlagRequired("path")
cmd.Flags().Uint32VarP(&a.Config.LocalFlags.SubscribeQos, "qos", "q", 0, "qos marking")
cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeUpdatesOnly, "updates-only", "", false, "only updates to current state should be sent")
cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeMode, "mode", "", "stream", "one of: once, stream, poll")
cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeStreamMode, "stream-mode", "", "target-defined", "one of: on-change, sample, target-defined")
cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeSampleInterval, "sample-interval", "i", 0,
"sample interval as a decimal number and a suffix unit, such as \"10s\" or \"1m30s\"")
cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeSuppressRedundant, "suppress-redundant", "", false, "suppress redundant update if the subscribed value didn't not change")
cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeHeartbearInterval, "heartbeat-interval", "", 0, "heartbeat interval in case suppress-redundant is enabled")
cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeModel, "model", "", []string{}, "subscribe request used model(s)")
cmd.Flags().BoolVar(&a.Config.LocalFlags.SubscribeQuiet, "quiet", false, "suppress stdout printing")
cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeTarget, "target", "", "", "subscribe request target")
cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeSetTarget, "set-target", "", false, "set target name in gNMI Path prefix")
cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeName, "name", "n", []string{}, "reference subscriptions by name, must be defined in gnmic config file")
cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeOutput, "output", "", []string{}, "reference to output groups by name, must be defined in gnmic config file")
cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeWatchConfig, "watch-config", "", false, "watch configuration changes, add or delete subscribe targets accordingly")
cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeBackoff, "backoff", "", 0, "backoff time between subscribe requests")
cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeLockRetry, "lock-retry", "", 5*time.Second, "time to wait between target lock attempts")
//
cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {
a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag)
})
}
func (a *App) readConfigs() error {
var err error
_, err = a.Config.GetOutputs()
if err != nil {
return fmt.Errorf("failed reading outputs config: %v", err)
}
_, err = a.Config.GetInputs()
if err != nil {
return fmt.Errorf("failed reading inputs config: %v", err)
}
_, err = a.Config.GetActions()
if err != nil {
return fmt.Errorf("failed reading actions config: %v", err)
}
_, err = a.Config.GetEventProcessors()
if err != nil {
return fmt.Errorf("failed reading event processors config: %v", err)
}
_, err = a.LoadProtoFiles()
if err != nil {
return fmt.Errorf("failed loading proto files: %v", err)
}
return nil
}
func (a *App) handlePolledSubscriptions() {
polledTargetsSubscriptions := a.PolledSubscriptionsTargets()
if len(polledTargetsSubscriptions) > 0 {
pollTargets := make([]string, 0, len(polledTargetsSubscriptions))
for t := range polledTargetsSubscriptions {
pollTargets = append(pollTargets, t)
}
sort.Slice(pollTargets, func(i, j int) bool {
return pollTargets[i] < pollTargets[j]
})
s := promptui.Select{
Label: "select target to poll",
Items: pollTargets,
HideSelected: true,
}
waitChan := make(chan struct{}, 1)
waitChan <- struct{}{}
mo := &formatters.MarshalOptions{
Multiline: true,
Indent: " ",
Format: a.Config.Format,
}
for {
select {
case <-waitChan:
_, name, err := s.Run()
if err != nil {
fmt.Printf("failed selecting target to poll: %v\n", err)
continue
}
ss := promptui.Select{
Label: "select subscription to poll",
Items: polledTargetsSubscriptions[name],
HideSelected: true,
}
_, subName, err := ss.Run()
if err != nil {
fmt.Printf("failed selecting subscription to poll: %v\n", err)
continue
}
response, err := a.clientSubscribePoll(name, subName)
if err != nil && err != io.EOF {
fmt.Printf("target '%s', subscription '%s': poll response error:%v\n", name, subName, err)
continue
}
if response == nil {
fmt.Printf("received empty response from target '%s'\n", name)
continue
}
switch rsp := response.Response.(type) {
case *gnmi.SubscribeResponse_SyncResponse:
fmt.Printf("received sync response '%t' from '%s'\n", rsp.SyncResponse, name)
waitChan <- struct{}{}
continue
}
b, err := mo.Marshal(response, nil)
if err != nil {
fmt.Printf("target '%s', subscription '%s': poll response formatting error:%v\n", name, subName, err)
fmt.Println(string(b))
waitChan <- struct{}{}
continue
}
fmt.Println(string(b))
waitChan <- struct{}{}
case <-a.ctx.Done():
return
}
}
}
}
func (a *App) startIO() {
go a.StartCollector(a.ctx)
a.InitOutputs(a.ctx)
a.InitInputs(a.ctx)
if !a.inCluster() {
go a.startLoader(a.ctx)
var limiter *time.Ticker
if a.Config.LocalFlags.SubscribeBackoff > 0 {
limiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff)
}
if !a.Config.UseTunnelServer {
for _, tc := range a.Config.Targets {
a.wg.Add(1)
go a.subscribeStream(a.ctx, tc)
if limiter != nil {
<-limiter.C
}
}
}
if limiter != nil {
limiter.Stop()
}
a.wg.Wait()
}
}
func allSubscriptionsModeOnce(sc map[string]*types.SubscriptionConfig) bool {
for _, sub := range sc {
if strings.ToUpper(sub.Mode) != "ONCE" {
return false
}
}
return true
}
func allSubscriptionsModePoll(sc map[string]*types.SubscriptionConfig) bool {
for _, sub := range sc {
if strings.ToUpper(sub.Mode) != "POLL" {
return false
}
}
return true
}
|
// IronMQ (elastic message queue) client library
package mq
import (
"errors"
"time"
"github.com/iron-io/iron_go/api"
"github.com/iron-io/iron_go/config"
)
type Queue struct {
Settings config.Settings
Name string
}
type QueueSubscriber struct {
URL string `json:"url"`
Headers map[string]string `json:"headers,omitempty"`
}
type QueueInfo struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
PushType string `json:"push_type,omitempty"`
Reserved int `json:"reserved,omitempty"`
RetriesDelay int `json:"retries,omitempty"`
Retries int `json:"retries_delay,omitempty"`
Size int `json:"size,omitempty"`
Subscribers []QueueSubscriber `json:"subscribers,omitempty"`
Alerts []Alert `json:"alerts,omitempty"`
TotalMessages int `json:"total_messages,omitempty"`
ErrorQueue string `json:"error_queue,omitempty"`
}
type Message struct {
Id string `json:"id,omitempty"`
Body string `json:"body"`
// Timeout is the amount of time in seconds allowed for processing the
// message.
Timeout int64 `json:"timeout,omitempty"`
// Delay is the amount of time in seconds to wait before adding the message
// to the queue.
Delay int64 `json:"delay,omitempty"`
q Queue
}
type PushStatus struct {
Retried int `json:"retried"`
StatusCode int `json:"status_code"`
Status string `json:"status"`
}
type Subscriber struct {
Retried int `json:"retried"`
StatusCode int `json:"status_code"`
Status string `json:"status"`
URL string `json:"url"`
}
type Alert struct {
Type string `json:"type"`
Direction string `json:direction`
Trigger int `json:trigger`
Queue string `queue`
}
func New(queueName string) *Queue {
return &Queue{Settings: config.Config("iron_mq"), Name: queueName}
}
func ListQueues(page, perPage int) (queues []Queue, err error) {
out := []struct {
Id string
Project_id string
Name string
}{}
q := New("")
err = q.queues().
QueryAdd("page", "%d", page).
QueryAdd("per_page", "%d", perPage).
Req("GET", nil, &out)
if err != nil {
return
}
queues = make([]Queue, 0, len(out))
for _, item := range out {
queues = append(queues, Queue{
Settings: q.Settings,
Name: item.Name,
})
}
return
}
func (q Queue) queues(s ...string) *api.URL { return api.Action(q.Settings, "queues", s...) }
// This method is left to support backward compatibility.
// This method is replaced by func ListQueues(page, perPage int) (queues []Queue, err error)
func (q Queue) ListQueues(page, perPage int) (queues []Queue, err error) {
return ListQueues(page, perPage)
}
func (q Queue) Info() (QueueInfo, error) {
qi := QueueInfo{}
err := q.queues(q.Name).Req("GET", nil, &qi)
return qi, err
}
func (q Queue) Update(qi QueueInfo) (QueueInfo, error) {
out := QueueInfo{}
err := q.queues(q.Name).Req("POST", qi, &out)
return out, err
}
func (q Queue) Delete() (bool, error) {
err := q.queues(q.Name).Req("DELETE", nil, nil)
success := err == nil
return success, err
}
type Subscription struct {
PushType string
Retries int
RetriesDelay int
}
// RemoveSubscribers removes subscribers.
func (q Queue) RemoveSubscribers(subscribers ...string) (err error) {
qi := QueueInfo{Subscribers: make([]QueueSubscriber, len(subscribers))}
for i, subscriber := range subscribers {
qi.Subscribers[i].URL = subscriber
}
return q.queues(q.Name, "subscribers").Req("DELETE", &qi, nil)
}
// AddSubscribers adds subscribers.
func (q Queue) AddSubscribers(subscribers ...string) (err error) {
qi := QueueInfo{Subscribers: make([]QueueSubscriber, len(subscribers))}
for i, subscriber := range subscribers {
qi.Subscribers[i].URL = subscriber
}
return q.queues(q.Name, "subscribers").Req("POST", &qi, nil)
}
func (q Queue) PushString(body string) (id string, err error) {
ids, err := q.PushStrings(body)
if err != nil {
return
}
return ids[0], nil
}
// Push adds one or more messages to the end of the queue using IronMQ's defaults:
// timeout - 60 seconds
// delay - none
//
// Identical to PushMessages with Message{Timeout: 60, Delay: 0}
func (q Queue) PushStrings(bodies ...string) (ids []string, err error) {
msgs := make([]*Message, 0, len(bodies))
for _, body := range bodies {
msgs = append(msgs, &Message{Body: body})
}
return q.PushMessages(msgs...)
}
func (q Queue) PushMessage(msg *Message) (id string, err error) {
ids, err := q.PushMessages(msg)
if err != nil {
return
}
return ids[0], nil
}
func (q Queue) PushMessages(msgs ...*Message) (ids []string, err error) {
in := struct {
Messages []*Message `json:"messages"`
}{Messages: msgs}
out := struct {
IDs []string `json:"ids"`
Msg string `json:"msg"`
}{}
err = q.queues(q.Name, "messages").Req("POST", &in, &out)
return out.IDs, err
}
// Get reserves a message from the queue.
// The message will not be deleted, but will be reserved until the timeout
// expires. If the timeout expires before the message is deleted, the message
// will be placed back onto the queue.
// As a result, be sure to Delete a message after you're done with it.
func (q Queue) Get() (msg *Message, err error) {
msgs, err := q.GetN(1)
if err != nil {
return
}
if len(msgs) > 0 {
msg = msgs[0]
} else {
err = errors.New("Couldn't get a single message")
}
return
}
// get N messages
func (q Queue) GetN(n int) (msgs []*Message, err error) {
msgs, err = q.GetNWithTimeout(n, 0)
return
}
func (q Queue) GetNWithTimeout(n, timeout int) (msgs []*Message, err error) {
out := struct {
Messages []*Message `json:"messages"`
}{}
err = q.queues(q.Name, "messages").
QueryAdd("n", "%d", n).
QueryAdd("timeout", "%d", timeout).
Req("GET", nil, &out)
if err != nil {
return
}
for _, msg := range out.Messages {
msg.q = q
}
return out.Messages, nil
}
func (q Queue) Peek() (msg *Message, err error) {
msgs, err := q.PeekN(1)
if err != nil {
return
}
if len(msgs) > 0 {
msg = msgs[0]
} else {
err = errors.New("Couldn't get a single message")
}
return
}
// peek N messages
func (q Queue) PeekN(n int) (msgs []*Message, err error) {
msgs, err = q.PeekNWithTimeout(n, 0)
return
}
func (q Queue) PeekNWithTimeout(n, timeout int) (msgs []*Message, err error) {
out := struct {
Messages []*Message `json:"messages"`
}{}
err = q.queues(q.Name, "messages", "peek").
QueryAdd("n", "%d", n).
QueryAdd("timeout", "%d", timeout).
Req("GET", nil, &out)
if err != nil {
return
}
for _, msg := range out.Messages {
msg.q = q
}
return out.Messages, nil
}
// Delete all messages in the queue
func (q Queue) Clear() (err error) {
return q.queues(q.Name, "clear").Req("POST", nil, nil)
}
// Delete message from queue
func (q Queue) DeleteMessage(msgId string) (err error) {
return q.queues(q.Name, "messages", msgId).Req("DELETE", nil, nil)
}
// Reset timeout of message to keep it reserved
func (q Queue) TouchMessage(msgId string) (err error) {
return q.queues(q.Name, "messages", msgId, "touch").Req("POST", nil, nil)
}
// Put message back in the queue, message will be available after +delay+ seconds.
func (q Queue) ReleaseMessage(msgId string, delay int64) (err error) {
in := struct {
Delay int64 `json:"delay"`
}{Delay: delay}
return q.queues(q.Name, "messages", msgId, "release").Req("POST", &in, nil)
}
func (q Queue) MessageSubscribers(msgId string) ([]*Subscriber, error) {
out := struct {
Subscribers []*Subscriber `json:"subscribers"`
}{}
err := q.queues(q.Name, "messages", msgId, "subscribers").Req("GET", nil, &out)
return out.Subscribers, err
}
func (q Queue) MessageSubscribersPollN(msgId string, n int) ([]*Subscriber, error) {
subs, err := q.MessageSubscribers(msgId)
for {
time.Sleep(100 * time.Millisecond)
subs, err = q.MessageSubscribers(msgId)
if err != nil {
return subs, err
}
if len(subs) >= n && actualPushStatus(subs) {
return subs, nil
}
}
return subs, err
}
func actualPushStatus(subs []*Subscriber) bool {
for _, sub := range subs {
if sub.Status == "queued" {
return false
}
}
return true
}
func (q Queue) AddAlerts(alerts ...*Alert) (err error) {
in := struct {
Alerts []*Alert `json:"alerts"`
}{Alerts: alerts}
return q.queues(q.Name, "alerts").Req("POST", &in, nil)
}
func (q Queue) UpdateAlerts(alerts ...*Alert) (err error) {
in := struct {
Alerts []*Alert `json:"alerts"`
}{Alerts: alerts}
return q.queues(q.Name, "alerts").Req("PUT", &in, nil)
}
func (q Queue) RemoveAllAlerts() (err error) {
return q.queues(q.Name, "alerts").Req("DELETE", nil, nil)
}
type AlertInfo struct {
Id string `json:"id"`
}
func (q Queue) RemoveAlerts(alertIds ...string) (err error) {
in := struct {
Alerts []AlertInfo `json:"alerts"`
}{Alerts: make([]AlertInfo, len(alertIds))}
for i, alertId := range alertIds {
(in.Alerts[i]).Id = alertId
}
return q.queues(q.Name, "alerts").Req("DELETE", &in, nil)
}
func (q Queue) RemoveAlert(alertId string) (err error) {
return q.queues(q.Name, "alerts", alertId).Req("DELETE", nil, nil)
}
// Delete message from queue
func (m Message) Delete() (err error) {
return m.q.DeleteMessage(m.Id)
}
// Reset timeout of message to keep it reserved
func (m Message) Touch() (err error) {
return m.q.TouchMessage(m.Id)
}
// Put message back in the queue, message will be available after +delay+ seconds.
func (m Message) Release(delay int64) (err error) {
return m.q.ReleaseMessage(m.Id, delay)
}
func (m Message) Subscribers() (interface{}, error) {
return m.q.MessageSubscribers(m.Id)
}
add method for getting list of queues of specified project
// IronMQ (elastic message queue) client library
package mq
import (
"errors"
"time"
"github.com/iron-io/iron_go/api"
"github.com/iron-io/iron_go/config"
)
type Queue struct {
Settings config.Settings
Name string
}
type QueueSubscriber struct {
URL string `json:"url"`
Headers map[string]string `json:"headers,omitempty"`
}
type QueueInfo struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
PushType string `json:"push_type,omitempty"`
Reserved int `json:"reserved,omitempty"`
RetriesDelay int `json:"retries,omitempty"`
Retries int `json:"retries_delay,omitempty"`
Size int `json:"size,omitempty"`
Subscribers []QueueSubscriber `json:"subscribers,omitempty"`
Alerts []Alert `json:"alerts,omitempty"`
TotalMessages int `json:"total_messages,omitempty"`
ErrorQueue string `json:"error_queue,omitempty"`
}
type Message struct {
Id string `json:"id,omitempty"`
Body string `json:"body"`
// Timeout is the amount of time in seconds allowed for processing the
// message.
Timeout int64 `json:"timeout,omitempty"`
// Delay is the amount of time in seconds to wait before adding the message
// to the queue.
Delay int64 `json:"delay,omitempty"`
q Queue
}
type PushStatus struct {
Retried int `json:"retried"`
StatusCode int `json:"status_code"`
Status string `json:"status"`
}
type Subscriber struct {
Retried int `json:"retried"`
StatusCode int `json:"status_code"`
Status string `json:"status"`
URL string `json:"url"`
}
type Alert struct {
Type string `json:"type"`
Direction string `json:direction`
Trigger int `json:trigger`
Queue string `queue`
}
func New(queueName string) *Queue {
return &Queue{Settings: config.Config("iron_mq"), Name: queueName}
}
func ListProjectQueues(projectId string, token string, page int, perPage int) (queues []Queue, err error) {
out := []struct {
Id string
Project_id string
Name string
}{}
q := New("")
q.Settings.ProjectId = projectId
q.Settings.Token = token
err = q.queues().
QueryAdd("page", "%d", page).
QueryAdd("per_page", "%d", perPage).
Req("GET", nil, &out)
if err != nil {
return
}
queues = make([]Queue, 0, len(out))
for _, item := range out {
queues = append(queues, Queue{
Settings: q.Settings,
Name: item.Name,
})
}
return
}
func ListQueues(page, perPage int) (queues []Queue, err error) {
settings := config.Config("iron_mq")
return ListProjectQueues(settings.ProjectId, settings.Token, page, perPage)
}
func (q Queue) queues(s ...string) *api.URL { return api.Action(q.Settings, "queues", s...) }
// This method is left to support backward compatibility.
// This method is replaced by func ListQueues(page, perPage int) (queues []Queue, err error)
func (q Queue) ListQueues(page, perPage int) (queues []Queue, err error) {
return ListQueues(page, perPage)
}
func (q Queue) Info() (QueueInfo, error) {
qi := QueueInfo{}
err := q.queues(q.Name).Req("GET", nil, &qi)
return qi, err
}
func (q Queue) Update(qi QueueInfo) (QueueInfo, error) {
out := QueueInfo{}
err := q.queues(q.Name).Req("POST", qi, &out)
return out, err
}
func (q Queue) Delete() (bool, error) {
err := q.queues(q.Name).Req("DELETE", nil, nil)
success := err == nil
return success, err
}
type Subscription struct {
PushType string
Retries int
RetriesDelay int
}
// RemoveSubscribers removes subscribers.
func (q Queue) RemoveSubscribers(subscribers ...string) (err error) {
qi := QueueInfo{Subscribers: make([]QueueSubscriber, len(subscribers))}
for i, subscriber := range subscribers {
qi.Subscribers[i].URL = subscriber
}
return q.queues(q.Name, "subscribers").Req("DELETE", &qi, nil)
}
// AddSubscribers adds subscribers.
func (q Queue) AddSubscribers(subscribers ...string) (err error) {
qi := QueueInfo{Subscribers: make([]QueueSubscriber, len(subscribers))}
for i, subscriber := range subscribers {
qi.Subscribers[i].URL = subscriber
}
return q.queues(q.Name, "subscribers").Req("POST", &qi, nil)
}
func (q Queue) PushString(body string) (id string, err error) {
ids, err := q.PushStrings(body)
if err != nil {
return
}
return ids[0], nil
}
// Push adds one or more messages to the end of the queue using IronMQ's defaults:
// timeout - 60 seconds
// delay - none
//
// Identical to PushMessages with Message{Timeout: 60, Delay: 0}
func (q Queue) PushStrings(bodies ...string) (ids []string, err error) {
msgs := make([]*Message, 0, len(bodies))
for _, body := range bodies {
msgs = append(msgs, &Message{Body: body})
}
return q.PushMessages(msgs...)
}
func (q Queue) PushMessage(msg *Message) (id string, err error) {
ids, err := q.PushMessages(msg)
if err != nil {
return
}
return ids[0], nil
}
func (q Queue) PushMessages(msgs ...*Message) (ids []string, err error) {
in := struct {
Messages []*Message `json:"messages"`
}{Messages: msgs}
out := struct {
IDs []string `json:"ids"`
Msg string `json:"msg"`
}{}
err = q.queues(q.Name, "messages").Req("POST", &in, &out)
return out.IDs, err
}
// Get reserves a message from the queue.
// The message will not be deleted, but will be reserved until the timeout
// expires. If the timeout expires before the message is deleted, the message
// will be placed back onto the queue.
// As a result, be sure to Delete a message after you're done with it.
func (q Queue) Get() (msg *Message, err error) {
msgs, err := q.GetN(1)
if err != nil {
return
}
if len(msgs) > 0 {
msg = msgs[0]
} else {
err = errors.New("Couldn't get a single message")
}
return
}
// get N messages
func (q Queue) GetN(n int) (msgs []*Message, err error) {
msgs, err = q.GetNWithTimeout(n, 0)
return
}
func (q Queue) GetNWithTimeout(n, timeout int) (msgs []*Message, err error) {
out := struct {
Messages []*Message `json:"messages"`
}{}
err = q.queues(q.Name, "messages").
QueryAdd("n", "%d", n).
QueryAdd("timeout", "%d", timeout).
Req("GET", nil, &out)
if err != nil {
return
}
for _, msg := range out.Messages {
msg.q = q
}
return out.Messages, nil
}
func (q Queue) Peek() (msg *Message, err error) {
msgs, err := q.PeekN(1)
if err != nil {
return
}
if len(msgs) > 0 {
msg = msgs[0]
} else {
err = errors.New("Couldn't get a single message")
}
return
}
// peek N messages
func (q Queue) PeekN(n int) (msgs []*Message, err error) {
msgs, err = q.PeekNWithTimeout(n, 0)
return
}
func (q Queue) PeekNWithTimeout(n, timeout int) (msgs []*Message, err error) {
out := struct {
Messages []*Message `json:"messages"`
}{}
err = q.queues(q.Name, "messages", "peek").
QueryAdd("n", "%d", n).
QueryAdd("timeout", "%d", timeout).
Req("GET", nil, &out)
if err != nil {
return
}
for _, msg := range out.Messages {
msg.q = q
}
return out.Messages, nil
}
// Delete all messages in the queue
func (q Queue) Clear() (err error) {
return q.queues(q.Name, "clear").Req("POST", nil, nil)
}
// Delete message from queue
func (q Queue) DeleteMessage(msgId string) (err error) {
return q.queues(q.Name, "messages", msgId).Req("DELETE", nil, nil)
}
// Reset timeout of message to keep it reserved
func (q Queue) TouchMessage(msgId string) (err error) {
return q.queues(q.Name, "messages", msgId, "touch").Req("POST", nil, nil)
}
// Put message back in the queue, message will be available after +delay+ seconds.
func (q Queue) ReleaseMessage(msgId string, delay int64) (err error) {
in := struct {
Delay int64 `json:"delay"`
}{Delay: delay}
return q.queues(q.Name, "messages", msgId, "release").Req("POST", &in, nil)
}
func (q Queue) MessageSubscribers(msgId string) ([]*Subscriber, error) {
out := struct {
Subscribers []*Subscriber `json:"subscribers"`
}{}
err := q.queues(q.Name, "messages", msgId, "subscribers").Req("GET", nil, &out)
return out.Subscribers, err
}
func (q Queue) MessageSubscribersPollN(msgId string, n int) ([]*Subscriber, error) {
subs, err := q.MessageSubscribers(msgId)
for {
time.Sleep(100 * time.Millisecond)
subs, err = q.MessageSubscribers(msgId)
if err != nil {
return subs, err
}
if len(subs) >= n && actualPushStatus(subs) {
return subs, nil
}
}
return subs, err
}
func actualPushStatus(subs []*Subscriber) bool {
for _, sub := range subs {
if sub.Status == "queued" {
return false
}
}
return true
}
func (q Queue) AddAlerts(alerts ...*Alert) (err error) {
in := struct {
Alerts []*Alert `json:"alerts"`
}{Alerts: alerts}
return q.queues(q.Name, "alerts").Req("POST", &in, nil)
}
func (q Queue) UpdateAlerts(alerts ...*Alert) (err error) {
in := struct {
Alerts []*Alert `json:"alerts"`
}{Alerts: alerts}
return q.queues(q.Name, "alerts").Req("PUT", &in, nil)
}
func (q Queue) RemoveAllAlerts() (err error) {
return q.queues(q.Name, "alerts").Req("DELETE", nil, nil)
}
type AlertInfo struct {
Id string `json:"id"`
}
func (q Queue) RemoveAlerts(alertIds ...string) (err error) {
in := struct {
Alerts []AlertInfo `json:"alerts"`
}{Alerts: make([]AlertInfo, len(alertIds))}
for i, alertId := range alertIds {
(in.Alerts[i]).Id = alertId
}
return q.queues(q.Name, "alerts").Req("DELETE", &in, nil)
}
func (q Queue) RemoveAlert(alertId string) (err error) {
return q.queues(q.Name, "alerts", alertId).Req("DELETE", nil, nil)
}
// Delete message from queue
func (m Message) Delete() (err error) {
return m.q.DeleteMessage(m.Id)
}
// Reset timeout of message to keep it reserved
func (m Message) Touch() (err error) {
return m.q.TouchMessage(m.Id)
}
// Put message back in the queue, message will be available after +delay+ seconds.
func (m Message) Release(delay int64) (err error) {
return m.q.ReleaseMessage(m.Id, delay)
}
func (m Message) Subscribers() (interface{}, error) {
return m.q.MessageSubscribers(m.Id)
}
|
package mssql
import (
"database/sql"
"database/sql/driver"
"io"
// "math"
// "math/big"
// "time"
// "unsafe"
"strings"
)
func init() {
sql.Register("go-mssql", &MssqlDriver{})
}
type MssqlDriver struct {
}
type MssqlConn struct {
sess *TdsSession
}
type MssqlTx struct {
c *MssqlConn
}
//func (tx *MssqlTx) Commit() error {
// _, err := oleutil.CallMethod(tx.c.db, "CommitTrans")
// if err != nil {
// return err
// }
// return nil
//}
//
//func (tx *MssqlTx) Rollback() error {
// _, err := oleutil.CallMethod(tx.c.db, "Rollback")
// if err != nil {
// return err
// }
// return nil
//}
//
//func (c *MssqlConn) exec(cmd string) error {
// _, err := oleutil.CallMethod(c.db, "Execute", cmd)
// return err
//}
//
func (c *MssqlConn) Begin() (driver.Tx, error) {
// _, err := oleutil.CallMethod(c.db, "BeginTrans")
// if err != nil {
// return nil, err
// }
// return &AdodbTx{c}, nil
return nil, nil
}
func parseConnectionString(dsn string) (res map[string]string) {
res = map[string]string{}
parts := strings.Split(dsn, ";")
for _, part := range parts {
if len(part) == 0 {
continue
}
lst := strings.SplitN(part, "=", 2)
name := strings.ToLower(lst[0])
if len(name) == 0 {
continue
}
var value string = ""
if len(lst) > 1 {
value = lst[1]
}
res[name] = value
}
return res
}
func (d *MssqlDriver) Open(dsn string) (driver.Conn, error) {
params := parseConnectionString(dsn)
buf, err := Connect(params)
if err != nil {
return nil, err
}
return &MssqlConn{buf}, nil
}
func (c *MssqlConn) Close() error {
return c.sess.buf.transport.Close()
}
type MssqlStmt struct {
c *MssqlConn
query string
}
func (c *MssqlConn) Prepare(query string) (driver.Stmt, error) {
return &MssqlStmt{c, query}, nil
}
//func (s *MssqlStmt) Bind(bind []string) error {
// s.b = bind
// return nil
//}
func (s *MssqlStmt) Close() error {
// s.s.Release()
return nil
}
func (s *MssqlStmt) NumInput() int {
// if s.b != nil {
// return len(s.b)
// }
// _, err := oleutil.CallMethod(s.ps, "Refresh")
// if err != nil {
// return -1
// }
// val, err := oleutil.GetProperty(s.ps, "Count")
// if err != nil {
// return -1
// }
// c := int(val.Val)
// return c
return 0
}
//func (s *MssqlStmt) bind(args []driver.Value) error {
// if s.b != nil {
// for i, v := range args {
// var b string = "?"
// if len(s.b) < i {
// b = s.b[i]
// }
// unknown, err := oleutil.CallMethod(s.s, "CreateParameter", b, 12, 1)
// if err != nil {
// return err
// }
// param := unknown.ToIDispatch()
// defer param.Release()
// _, err = oleutil.PutProperty(param, "Value", v)
// if err != nil {
// return err
// }
// _, err = oleutil.CallMethod(s.ps, "Append", param)
// if err != nil {
// return err
// }
// }
// } else {
// for i, v := range args {
// var varval ole.VARIANT
// varval.VT = ole.VT_I4
// varval.Val = int64(i)
// val, err := oleutil.CallMethod(s.ps, "Item", &varval)
// if err != nil {
// return err
// }
// item := val.ToIDispatch()
// defer item.Release()
// _, err = oleutil.PutProperty(item, "Value", v)
// if err != nil {
// return err
// }
// }
// }
// return nil
//}
func (s *MssqlStmt) Query(args []driver.Value) (driver.Rows, error) {
// if err := s.bind(args); err != nil {
// return nil, err
// }
// rc, err := oleutil.CallMethod(s.s, "Execute")
// if err != nil {
// return nil, err
// }
return &MssqlRows{}, nil
}
func (s *MssqlStmt) Exec(args []driver.Value) (driver.Result, error) {
// if err := s.bind(args); err != nil {
// return nil, err
// }
// _, err := oleutil.CallMethod(s.s, "Execute")
// if err != nil {
// return nil, err
// }
return driver.ResultNoRows, nil
}
type MssqlRows struct {
// s *AdodbStmt
// rc *ole.IDispatch
// nc int
// cols []string
}
func (rc *MssqlRows) Close() error {
// _, err := oleutil.CallMethod(rc.rc, "Close")
// if err != nil {
// return err
// }
return nil
}
func (rc *MssqlRows) Columns() []string {
// if rc.nc != len(rc.cols) {
// unknown, err := oleutil.GetProperty(rc.rc, "Fields")
// if err != nil {
// return []string{}
// }
// fields := unknown.ToIDispatch()
// defer fields.Release()
// val, err := oleutil.GetProperty(fields, "Count")
// if err != nil {
// return []string{}
// }
// rc.nc = int(val.Val)
// rc.cols = make([]string, rc.nc)
// for i := 0; i < rc.nc; i++ {
// var varval ole.VARIANT
// varval.VT = ole.VT_I4
// varval.Val = int64(i)
// val, err := oleutil.CallMethod(fields, "Item", &varval)
// if err != nil {
// return []string{}
// }
// item := val.ToIDispatch()
// if err != nil {
// return []string{}
// }
// name, err := oleutil.GetProperty(item, "Name")
// if err != nil {
// return []string{}
// }
// rc.cols[i] = name.ToString()
// item.Release()
// }
// }
// return rc.cols
return []string{}
}
func (rc *MssqlRows) Next(dest []driver.Value) error {
// unknown, err := oleutil.GetProperty(rc.rc, "EOF")
// if err != nil {
// return io.EOF
// }
// if unknown.Val != 0 {
// return io.EOF
// }
// unknown, err = oleutil.GetProperty(rc.rc, "Fields")
// if err != nil {
// return err
// }
// fields := unknown.ToIDispatch()
// defer fields.Release()
// for i := range dest {
// var varval ole.VARIANT
// varval.VT = ole.VT_I4
// varval.Val = int64(i)
// val, err := oleutil.CallMethod(fields, "Item", &varval)
// if err != nil {
// return err
// }
// field := val.ToIDispatch()
// defer field.Release()
// typ, err := oleutil.GetProperty(field, "Type")
// if err != nil {
// return err
// }
// val, err = oleutil.GetProperty(field, "Value")
// if err != nil {
// return err
// }
// sc, err := oleutil.GetProperty(field, "NumericScale")
// field.Release()
// switch typ.Val {
// case 0: // ADEMPTY
// dest[i] = nil
// case 2: // ADSMALLINT
// dest[i] = int64(int16(val.Val))
// case 3: // ADINTEGER
// dest[i] = int64(int32(val.Val))
// case 4: // ADSINGLE
// dest[i] = float64(math.Float32frombits(uint32(val.Val)))
// case 5: // ADDOUBLE
// dest[i] = math.Float64frombits(uint64(val.Val))
// case 6: // ADCURRENCY
// dest[i] = float64(val.Val) / 10000
// case 7: // ADDATE
// // see http://blogs.msdn.com/b/ericlippert/archive/2003/09/16/eric-s-complete-guide-to-vt-date.aspx
// d, t := math.Modf(math.Float64frombits(uint64(val.Val)))
// t = math.Abs(t)
// dest[i] = time.Date(1899, 12, 30+int(d), 0, 0, int(t*86400), 0, time.Local)
// case 8: // ADBSTR
// dest[i] = val.ToString()
// case 9: // ADIDISPATCH
// dest[i] = val.ToIDispatch()
// case 10: // ADERROR
// // TODO
// case 11: // ADBOOLEAN
// dest[i] = val.Val != 0
// case 12: // ADVARIANT
// dest[i] = val
// case 13: // ADIUNKNOWN
// dest[i] = val.ToIUnknown()
// case 14: // ADDECIMAL
// dest[i] = float64(val.Val)
// case 16: // ADTINYINT
// dest[i] = int8(val.Val)
// case 17: // ADUNSIGNEDTINYINT
// dest[i] = uint8(val.Val)
// case 18: // ADUNSIGNEDSMALLINT
// dest[i] = uint16(val.Val)
// case 19: // ADUNSIGNEDINT
// dest[i] = uint32(val.Val)
// case 20: // ADBIGINT
// dest[i] = big.NewInt(val.Val)
// case 21: // ADUNSIGNEDBIGINT
// // TODO
// case 72: // ADGUID
// dest[i] = val.ToString()
// case 128: // ADBINARY
// sa := (*ole.SAFEARRAY)(unsafe.Pointer(uintptr(val.Val)))
// dest[i] = (*[1 << 30]byte)(unsafe.Pointer(uintptr(sa.Data)))[0:sa.Bounds.Elements]
// case 129: // ADCHAR
// dest[i] = val.ToString() //uint8(val.Val)
// case 130: // ADWCHAR
// dest[i] = val.ToString() //uint16(val.Val)
// case 131: // ADNUMERIC
// sub := math.Pow(10, float64(sc.Val))
// dest[i] = float64(float64(val.Val) / sub)
// case 132: // ADUSERDEFINED
// dest[i] = uintptr(val.Val)
// case 133: // ADDBDATE
// // see http://blogs.msdn.com/b/ericlippert/archive/2003/09/16/eric-s-complete-guide-to-vt-date.aspx
// d := math.Float64frombits(uint64(val.Val))
// dest[i] = time.Date(1899, 12, 30+int(d), 0, 0, 0, 0, time.Local)
// case 134: // ADDBTIME
// t := math.Float64frombits(uint64(val.Val))
// dest[i] = time.Date(0, 1, 1, 0, 0, int(t*86400), 0, time.Local)
// case 135: // ADDBTIMESTAMP
// d, t := math.Modf(math.Float64frombits(uint64(val.Val)))
// t = math.Abs(t)
// dest[i] = time.Date(1899, 12, 30+int(d), 0, 0, int(t*86400), 0, time.Local)
// case 136: // ADCHAPTER
// dest[i] = val.ToString()
// case 200: // ADVARCHAR
// dest[i] = val.ToString()
// case 201: // ADLONGVARCHAR
// dest[i] = val.ToString()
// case 202: // ADVARWCHAR
// dest[i] = val.ToString()
// case 203: // ADLONGVARWCHAR
// dest[i] = val.ToString()
// case 204: // ADVARBINARY
// // TODO
// case 205: // ADLONGVARBINARY
// sa := (*ole.SAFEARRAY)(unsafe.Pointer(uintptr(val.Val)))
// dest[i] = (*[1 << 30]byte)(unsafe.Pointer(uintptr(sa.Data)))[0:sa.Bounds.Elements]
// }
// }
// _, err = oleutil.CallMethod(rc.rc, "MoveNext")
// if err != nil {
// return err
// }
// return nil
return io.EOF
}
added row parsing
package mssql
import (
"database/sql"
"database/sql/driver"
"io"
// "math"
// "math/big"
// "time"
// "unsafe"
"strings"
)
func init() {
sql.Register("go-mssql", &MssqlDriver{})
}
type MssqlDriver struct {
}
type MssqlConn struct {
sess *TdsSession
}
type MssqlTx struct {
c *MssqlConn
}
//func (tx *MssqlTx) Commit() error {
// _, err := oleutil.CallMethod(tx.c.db, "CommitTrans")
// if err != nil {
// return err
// }
// return nil
//}
//
//func (tx *MssqlTx) Rollback() error {
// _, err := oleutil.CallMethod(tx.c.db, "Rollback")
// if err != nil {
// return err
// }
// return nil
//}
//
//func (c *MssqlConn) exec(cmd string) error {
// _, err := oleutil.CallMethod(c.db, "Execute", cmd)
// return err
//}
//
func (c *MssqlConn) Begin() (driver.Tx, error) {
// _, err := oleutil.CallMethod(c.db, "BeginTrans")
// if err != nil {
// return nil, err
// }
// return &AdodbTx{c}, nil
return nil, nil
}
func parseConnectionString(dsn string) (res map[string]string) {
res = map[string]string{}
parts := strings.Split(dsn, ";")
for _, part := range parts {
if len(part) == 0 {
continue
}
lst := strings.SplitN(part, "=", 2)
name := strings.ToLower(lst[0])
if len(name) == 0 {
continue
}
var value string = ""
if len(lst) > 1 {
value = lst[1]
}
res[name] = value
}
return res
}
func (d *MssqlDriver) Open(dsn string) (driver.Conn, error) {
params := parseConnectionString(dsn)
buf, err := Connect(params)
if err != nil {
return nil, err
}
return &MssqlConn{buf}, nil
}
func (c *MssqlConn) Close() error {
return c.sess.buf.transport.Close()
}
type MssqlStmt struct {
c *MssqlConn
query string
}
func (c *MssqlConn) Prepare(query string) (driver.Stmt, error) {
return &MssqlStmt{c, query}, nil
}
//func (s *MssqlStmt) Bind(bind []string) error {
// s.b = bind
// return nil
//}
func (s *MssqlStmt) Close() error {
// s.s.Release()
return nil
}
func (s *MssqlStmt) NumInput() int {
// if s.b != nil {
// return len(s.b)
// }
// _, err := oleutil.CallMethod(s.ps, "Refresh")
// if err != nil {
// return -1
// }
// val, err := oleutil.GetProperty(s.ps, "Count")
// if err != nil {
// return -1
// }
// c := int(val.Val)
// return c
return 0
}
//func (s *MssqlStmt) bind(args []driver.Value) error {
// if s.b != nil {
// for i, v := range args {
// var b string = "?"
// if len(s.b) < i {
// b = s.b[i]
// }
// unknown, err := oleutil.CallMethod(s.s, "CreateParameter", b, 12, 1)
// if err != nil {
// return err
// }
// param := unknown.ToIDispatch()
// defer param.Release()
// _, err = oleutil.PutProperty(param, "Value", v)
// if err != nil {
// return err
// }
// _, err = oleutil.CallMethod(s.ps, "Append", param)
// if err != nil {
// return err
// }
// }
// } else {
// for i, v := range args {
// var varval ole.VARIANT
// varval.VT = ole.VT_I4
// varval.Val = int64(i)
// val, err := oleutil.CallMethod(s.ps, "Item", &varval)
// if err != nil {
// return err
// }
// item := val.ToIDispatch()
// defer item.Release()
// _, err = oleutil.PutProperty(item, "Value", v)
// if err != nil {
// return err
// }
// }
// }
// return nil
//}
func (s *MssqlStmt) Query(args []driver.Value) (driver.Rows, error) {
// if err := s.bind(args); err != nil {
// return nil, err
// }
if err := sendSqlBatch72(conn.buf, s.query); err != nil {
return nil, err
}
return &MssqlRows{}, nil
}
func (s *MssqlStmt) Exec(args []driver.Value) (driver.Result, error) {
// if err := s.bind(args); err != nil {
// return nil, err
// }
// _, err := oleutil.CallMethod(s.s, "Execute")
// if err != nil {
// return nil, err
// }
return driver.ResultNoRows, nil
}
type MssqlRows struct {
// s *AdodbStmt
// rc *ole.IDispatch
// nc int
// cols []string
}
func (rc *MssqlRows) Close() error {
// _, err := oleutil.CallMethod(rc.rc, "Close")
// if err != nil {
// return err
// }
return nil
}
func (rc *MssqlRows) Columns() []string {
// if rc.nc != len(rc.cols) {
// unknown, err := oleutil.GetProperty(rc.rc, "Fields")
// if err != nil {
// return []string{}
// }
// fields := unknown.ToIDispatch()
// defer fields.Release()
// val, err := oleutil.GetProperty(fields, "Count")
// if err != nil {
// return []string{}
// }
// rc.nc = int(val.Val)
// rc.cols = make([]string, rc.nc)
// for i := 0; i < rc.nc; i++ {
// var varval ole.VARIANT
// varval.VT = ole.VT_I4
// varval.Val = int64(i)
// val, err := oleutil.CallMethod(fields, "Item", &varval)
// if err != nil {
// return []string{}
// }
// item := val.ToIDispatch()
// if err != nil {
// return []string{}
// }
// name, err := oleutil.GetProperty(item, "Name")
// if err != nil {
// return []string{}
// }
// rc.cols[i] = name.ToString()
// item.Release()
// }
// }
// return rc.cols
return []string{}
}
func (rc *MssqlRows) Next(dest []driver.Value) error {
// unknown, err := oleutil.GetProperty(rc.rc, "EOF")
// if err != nil {
// return io.EOF
// }
// if unknown.Val != 0 {
// return io.EOF
// }
// unknown, err = oleutil.GetProperty(rc.rc, "Fields")
// if err != nil {
// return err
// }
// fields := unknown.ToIDispatch()
// defer fields.Release()
// for i := range dest {
// var varval ole.VARIANT
// varval.VT = ole.VT_I4
// varval.Val = int64(i)
// val, err := oleutil.CallMethod(fields, "Item", &varval)
// if err != nil {
// return err
// }
// field := val.ToIDispatch()
// defer field.Release()
// typ, err := oleutil.GetProperty(field, "Type")
// if err != nil {
// return err
// }
// val, err = oleutil.GetProperty(field, "Value")
// if err != nil {
// return err
// }
// sc, err := oleutil.GetProperty(field, "NumericScale")
// field.Release()
// switch typ.Val {
// case 0: // ADEMPTY
// dest[i] = nil
// case 2: // ADSMALLINT
// dest[i] = int64(int16(val.Val))
// case 3: // ADINTEGER
// dest[i] = int64(int32(val.Val))
// case 4: // ADSINGLE
// dest[i] = float64(math.Float32frombits(uint32(val.Val)))
// case 5: // ADDOUBLE
// dest[i] = math.Float64frombits(uint64(val.Val))
// case 6: // ADCURRENCY
// dest[i] = float64(val.Val) / 10000
// case 7: // ADDATE
// // see http://blogs.msdn.com/b/ericlippert/archive/2003/09/16/eric-s-complete-guide-to-vt-date.aspx
// d, t := math.Modf(math.Float64frombits(uint64(val.Val)))
// t = math.Abs(t)
// dest[i] = time.Date(1899, 12, 30+int(d), 0, 0, int(t*86400), 0, time.Local)
// case 8: // ADBSTR
// dest[i] = val.ToString()
// case 9: // ADIDISPATCH
// dest[i] = val.ToIDispatch()
// case 10: // ADERROR
// // TODO
// case 11: // ADBOOLEAN
// dest[i] = val.Val != 0
// case 12: // ADVARIANT
// dest[i] = val
// case 13: // ADIUNKNOWN
// dest[i] = val.ToIUnknown()
// case 14: // ADDECIMAL
// dest[i] = float64(val.Val)
// case 16: // ADTINYINT
// dest[i] = int8(val.Val)
// case 17: // ADUNSIGNEDTINYINT
// dest[i] = uint8(val.Val)
// case 18: // ADUNSIGNEDSMALLINT
// dest[i] = uint16(val.Val)
// case 19: // ADUNSIGNEDINT
// dest[i] = uint32(val.Val)
// case 20: // ADBIGINT
// dest[i] = big.NewInt(val.Val)
// case 21: // ADUNSIGNEDBIGINT
// // TODO
// case 72: // ADGUID
// dest[i] = val.ToString()
// case 128: // ADBINARY
// sa := (*ole.SAFEARRAY)(unsafe.Pointer(uintptr(val.Val)))
// dest[i] = (*[1 << 30]byte)(unsafe.Pointer(uintptr(sa.Data)))[0:sa.Bounds.Elements]
// case 129: // ADCHAR
// dest[i] = val.ToString() //uint8(val.Val)
// case 130: // ADWCHAR
// dest[i] = val.ToString() //uint16(val.Val)
// case 131: // ADNUMERIC
// sub := math.Pow(10, float64(sc.Val))
// dest[i] = float64(float64(val.Val) / sub)
// case 132: // ADUSERDEFINED
// dest[i] = uintptr(val.Val)
// case 133: // ADDBDATE
// // see http://blogs.msdn.com/b/ericlippert/archive/2003/09/16/eric-s-complete-guide-to-vt-date.aspx
// d := math.Float64frombits(uint64(val.Val))
// dest[i] = time.Date(1899, 12, 30+int(d), 0, 0, 0, 0, time.Local)
// case 134: // ADDBTIME
// t := math.Float64frombits(uint64(val.Val))
// dest[i] = time.Date(0, 1, 1, 0, 0, int(t*86400), 0, time.Local)
// case 135: // ADDBTIMESTAMP
// d, t := math.Modf(math.Float64frombits(uint64(val.Val)))
// t = math.Abs(t)
// dest[i] = time.Date(1899, 12, 30+int(d), 0, 0, int(t*86400), 0, time.Local)
// case 136: // ADCHAPTER
// dest[i] = val.ToString()
// case 200: // ADVARCHAR
// dest[i] = val.ToString()
// case 201: // ADLONGVARCHAR
// dest[i] = val.ToString()
// case 202: // ADVARWCHAR
// dest[i] = val.ToString()
// case 203: // ADLONGVARWCHAR
// dest[i] = val.ToString()
// case 204: // ADVARBINARY
// // TODO
// case 205: // ADLONGVARBINARY
// sa := (*ole.SAFEARRAY)(unsafe.Pointer(uintptr(val.Val)))
// dest[i] = (*[1 << 30]byte)(unsafe.Pointer(uintptr(sa.Data)))[0:sa.Bounds.Elements]
// }
// }
// _, err = oleutil.CallMethod(rc.rc, "MoveNext")
// if err != nil {
// return err
// }
// return nil
return io.EOF
}
|
// Copyright 2018 Frédéric Guillot. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package daemon
import (
"context"
"os"
"os/signal"
"runtime"
"syscall"
"time"
"github.com/miniflux/miniflux/config"
"github.com/miniflux/miniflux/locale"
"github.com/miniflux/miniflux/logger"
"github.com/miniflux/miniflux/reader/feed"
"github.com/miniflux/miniflux/scheduler"
"github.com/miniflux/miniflux/storage"
)
// Run starts the daemon.
func Run(cfg *config.Config, store *storage.Storage) {
logger.Info("Starting Miniflux...")
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt)
signal.Notify(stop, syscall.SIGTERM)
go func() {
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Debug("Alloc=%vK, TotalAlloc=%vK, Sys=%vK, NumGC=%v, GoRoutines=%d, NumCPU=%d",
m.Alloc/1024, m.TotalAlloc/1024, m.Sys/1024, m.NumGC, runtime.NumGoroutine(), runtime.NumCPU())
time.Sleep(30 * time.Second)
}
}()
translator := locale.Load()
feedHandler := feed.NewFeedHandler(store, translator)
pool := scheduler.NewWorkerPool(feedHandler, cfg.WorkerPoolSize())
server := newServer(cfg, store, pool, feedHandler, translator)
scheduler.NewFeedScheduler(
store,
pool,
cfg.PollingFrequency(),
cfg.BatchSize(),
)
scheduler.NewSessionScheduler(store, cfg.SessionCleanupFrequency())
<-stop
logger.Info("Shutting down the server...")
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
server.Shutdown(ctx)
store.Close()
logger.Info("Server gracefully stopped")
}
Improve graceful shutdown
// Copyright 2018 Frédéric Guillot. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package daemon
import (
"context"
"os"
"os/signal"
"runtime"
"syscall"
"time"
"github.com/miniflux/miniflux/config"
"github.com/miniflux/miniflux/locale"
"github.com/miniflux/miniflux/logger"
"github.com/miniflux/miniflux/reader/feed"
"github.com/miniflux/miniflux/scheduler"
"github.com/miniflux/miniflux/storage"
)
// Run starts the daemon.
func Run(cfg *config.Config, store *storage.Storage) {
logger.Info("Starting Miniflux...")
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt)
signal.Notify(stop, syscall.SIGTERM)
go func() {
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Debug("Alloc=%vK, TotalAlloc=%vK, Sys=%vK, NumGC=%v, GoRoutines=%d, NumCPU=%d",
m.Alloc/1024, m.TotalAlloc/1024, m.Sys/1024, m.NumGC, runtime.NumGoroutine(), runtime.NumCPU())
time.Sleep(30 * time.Second)
}
}()
translator := locale.Load()
feedHandler := feed.NewFeedHandler(store, translator)
pool := scheduler.NewWorkerPool(feedHandler, cfg.WorkerPoolSize())
server := newServer(cfg, store, pool, feedHandler, translator)
scheduler.NewFeedScheduler(
store,
pool,
cfg.PollingFrequency(),
cfg.BatchSize(),
)
scheduler.NewSessionScheduler(store, cfg.SessionCleanupFrequency())
<-stop
logger.Info("Shutting down the server...")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
server.Shutdown(ctx)
store.Close()
logger.Info("Server gracefully stopped")
}
|
package daemon
import (
"os"
"reflect"
"time"
"fmt"
"github.com/sevlyar/go-daemon"
"github.com/Sirupsen/logrus"
"github.com/BluePecker/JwtAuth/server"
"github.com/BluePecker/JwtAuth/storage"
"github.com/BluePecker/JwtAuth/server/router"
_ "github.com/BluePecker/JwtAuth/storage/redis"
//_ "github.com/BluePecker/JwtAuth/storage/ram"
"github.com/dgrijalva/jwt-go"
"github.com/BluePecker/JwtAuth/server/types/token"
RouteToken "github.com/BluePecker/JwtAuth/server/router/token"
"crypto/md5"
"encoding/hex"
)
const (
TOKEN_TTL = 2 * 3600
VERSION = "1.0.0"
ALLOW_LOGIN_NUM = 3
)
type Storage struct {
Driver string
Path string
Host string
Port int
MaxRetries int
Username string
Password string
PoolSize int
Database string
}
type Security struct {
TLS bool
Key string
Cert string
}
type Options struct {
PidFile string
LogFile string
LogLevel string
Port int
Host string
Daemon bool
Version bool
Security Security
Storage Storage
Secret string
}
type Daemon struct {
Options *Options
Server *server.Server
Storage storage.Driver
// jwt secret
Secret string
}
type (
CustomClaims struct {
Device string `json:"device"`
Unique string `json:"unique"`
Timestamp int64 `json:"timestamp"`
Addr string `json:"addr"`
jwt.StandardClaims
}
)
func (d *Daemon) storageOptionInject(p2 *storage.Option) {
p1 := &(d.Options.Storage)
u1 := reflect.ValueOf(p1).Elem()
u2 := reflect.ValueOf(p2).Elem()
for seq := 0; seq < u2.NumField(); seq++ {
item := u2.Type().Field(seq)
v1 := u1.FieldByName(item.Name)
v2 := u2.FieldByName(item.Name)
if v1.IsValid() {
if v2.Type() == v1.Type() {
v2.Set(v1)
}
}
}
}
func (d *Daemon) NewStorage() (*storage.Driver, error) {
option := &storage.Option{}
d.storageOptionInject(option)
driver, err := storage.New(d.Options.Storage.Driver, *option)
return &driver, err
}
func (d *Daemon) NewServer() {
d.Server = &server.Server{}
}
func (d *Daemon) secret() {
if d.Options.Secret == "" {
hash := md5.New()
hash.Write([]byte(time.Now().Unix()))
d.Options.Secret = hex.EncodeToString(hash.Sum([]byte(nil)))
logrus.Infof("auto generate jwt secret %s", d.Options.Secret)
}
d.Secret = d.Options.Secret
}
func (d *Daemon) Listen() {
if d.Server == nil {
d.NewServer()
}
options := server.Options{
Host: d.Options.Host,
Port: d.Options.Port,
}
if d.Options.Security.TLS {
options.Tls = &server.TLS{
Cert: d.Options.Security.Cert,
Key: d.Options.Security.Key,
}
}
d.Server.Accept(options)
}
func (d *Daemon) addRouter(routers... router.Router) {
if d.Server == nil {
d.NewServer()
}
for _, route := range routers {
d.Server.AddRouter(route)
}
}
func (d *Daemon) Generate(req token.GenerateRequest) (string, error) {
Claims := CustomClaims{
req.Device,
req.Unique,
time.Now().Unix(),
req.Addr,
jwt.StandardClaims{
ExpiresAt: time.Now().Add(time.Second * TOKEN_TTL).Unix(),
Issuer: "shuc324@gmail.com",
},
}
Token := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims)
if Signed, err := Token.SignedString([]byte(d.Secret)); err != nil {
return "", err
} else {
err := d.Storage.LKeep(req.Unique, Signed, ALLOW_LOGIN_NUM, TOKEN_TTL)
if err != nil {
return "", err
}
return Signed, err
}
}
func (d *Daemon) Auth(req token.AuthRequest) (interface{}, error) {
Token, err := jwt.ParseWithClaims(
req.JsonWebToken,
&CustomClaims{},
func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method %v", token.Header["alg"])
}
return []byte(d.Secret), nil
})
if err == nil && Token.Valid {
if Claims, ok := Token.Claims.(*CustomClaims); ok {
if d.Storage.LExist(Claims.Unique, req.JsonWebToken) {
return Claims, nil
}
}
}
return nil, err
}
func NewStart(args Options) {
var err error;
if args.Version == true {
fmt.Printf("JwtAuth version %s.\n", VERSION)
os.Exit(0)
}
if args.Daemon == true {
dCtx := daemon.Context{
PidFileName: args.PidFile,
PidFilePerm: 0644,
LogFilePerm: 0640,
Umask: 027,
WorkDir: "/",
LogFileName: args.LogFile,
}
level, err := logrus.ParseLevel(args.LogLevel)
if err == nil {
logrus.SetLevel(level)
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: "2006-01-02 15:04:05",
})
} else {
logrus.Fatal(err)
}
defer dCtx.Release()
if child, err := dCtx.Reborn(); err != nil {
logrus.Fatal(err)
} else if child != nil {
return
}
}
Daemon := &Daemon{
Options: &args,
}
Daemon.secret()
Storage, err := Daemon.NewStorage()
if err != nil {
logrus.Error(err)
os.Exit(0)
}
Daemon.Storage = *Storage
Daemon.addRouter(RouteToken.NewRouter(Daemon))
Daemon.Listen()
}
add secret
package daemon
import (
"os"
"reflect"
"time"
"fmt"
"github.com/sevlyar/go-daemon"
"github.com/Sirupsen/logrus"
"github.com/BluePecker/JwtAuth/server"
"github.com/BluePecker/JwtAuth/storage"
"github.com/BluePecker/JwtAuth/server/router"
_ "github.com/BluePecker/JwtAuth/storage/redis"
//_ "github.com/BluePecker/JwtAuth/storage/ram"
"github.com/dgrijalva/jwt-go"
"github.com/BluePecker/JwtAuth/server/types/token"
RouteToken "github.com/BluePecker/JwtAuth/server/router/token"
"crypto/md5"
"encoding/hex"
)
const (
TOKEN_TTL = 2 * 3600
VERSION = "1.0.0"
ALLOW_LOGIN_NUM = 3
)
type Storage struct {
Driver string
Path string
Host string
Port int
MaxRetries int
Username string
Password string
PoolSize int
Database string
}
type Security struct {
TLS bool
Key string
Cert string
}
type Options struct {
PidFile string
LogFile string
LogLevel string
Port int
Host string
Daemon bool
Version bool
Security Security
Storage Storage
Secret string
}
type Daemon struct {
Options *Options
Server *server.Server
Storage storage.Driver
// jwt secret
Secret string
}
type (
CustomClaims struct {
Device string `json:"device"`
Unique string `json:"unique"`
Timestamp int64 `json:"timestamp"`
Addr string `json:"addr"`
jwt.StandardClaims
}
)
func (d *Daemon) storageOptionInject(p2 *storage.Option) {
p1 := &(d.Options.Storage)
u1 := reflect.ValueOf(p1).Elem()
u2 := reflect.ValueOf(p2).Elem()
for seq := 0; seq < u2.NumField(); seq++ {
item := u2.Type().Field(seq)
v1 := u1.FieldByName(item.Name)
v2 := u2.FieldByName(item.Name)
if v1.IsValid() {
if v2.Type() == v1.Type() {
v2.Set(v1)
}
}
}
}
func (d *Daemon) NewStorage() (*storage.Driver, error) {
option := &storage.Option{}
d.storageOptionInject(option)
driver, err := storage.New(d.Options.Storage.Driver, *option)
return &driver, err
}
func (d *Daemon) NewServer() {
d.Server = &server.Server{}
}
func (d *Daemon) secret() {
if d.Options.Secret == "" {
hash := md5.New()
hash.Write([]byte(time.Now().Unix()))
d.Options.Secret = hex.EncodeToString(hash.Sum([]byte(nil)))
}
d.Secret = d.Options.Secret
logrus.Infof("jwt secret: %s", d.Secret)
}
func (d *Daemon) Listen() {
if d.Server == nil {
d.NewServer()
}
options := server.Options{
Host: d.Options.Host,
Port: d.Options.Port,
}
if d.Options.Security.TLS {
options.Tls = &server.TLS{
Cert: d.Options.Security.Cert,
Key: d.Options.Security.Key,
}
}
d.Server.Accept(options)
}
func (d *Daemon) addRouter(routers... router.Router) {
if d.Server == nil {
d.NewServer()
}
for _, route := range routers {
d.Server.AddRouter(route)
}
}
func (d *Daemon) Generate(req token.GenerateRequest) (string, error) {
Claims := CustomClaims{
req.Device,
req.Unique,
time.Now().Unix(),
req.Addr,
jwt.StandardClaims{
ExpiresAt: time.Now().Add(time.Second * TOKEN_TTL).Unix(),
Issuer: "shuc324@gmail.com",
},
}
Token := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims)
if Signed, err := Token.SignedString([]byte(d.Secret)); err != nil {
return "", err
} else {
err := d.Storage.LKeep(req.Unique, Signed, ALLOW_LOGIN_NUM, TOKEN_TTL)
if err != nil {
return "", err
}
return Signed, err
}
}
func (d *Daemon) Auth(req token.AuthRequest) (interface{}, error) {
Token, err := jwt.ParseWithClaims(
req.JsonWebToken,
&CustomClaims{},
func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method %v", token.Header["alg"])
}
return []byte(d.Secret), nil
})
if err == nil && Token.Valid {
if Claims, ok := Token.Claims.(*CustomClaims); ok {
if d.Storage.LExist(Claims.Unique, req.JsonWebToken) {
return Claims, nil
}
}
}
return nil, err
}
func NewStart(args Options) {
var err error;
if args.Version == true {
fmt.Printf("JwtAuth version %s.\n", VERSION)
os.Exit(0)
}
if args.Daemon == true {
dCtx := daemon.Context{
PidFileName: args.PidFile,
PidFilePerm: 0644,
LogFilePerm: 0640,
Umask: 027,
WorkDir: "/",
LogFileName: args.LogFile,
}
level, err := logrus.ParseLevel(args.LogLevel)
if err == nil {
logrus.SetLevel(level)
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: "2006-01-02 15:04:05",
})
} else {
logrus.Fatal(err)
}
defer dCtx.Release()
if child, err := dCtx.Reborn(); err != nil {
logrus.Fatal(err)
} else if child != nil {
return
}
}
Daemon := &Daemon{
Options: &args,
}
Daemon.secret()
Storage, err := Daemon.NewStorage()
if err != nil {
logrus.Error(err)
os.Exit(0)
}
Daemon.Storage = *Storage
Daemon.addRouter(RouteToken.NewRouter(Daemon))
Daemon.Listen()
} |
// Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"context"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/cilium/cilium/api/v1/models"
. "github.com/cilium/cilium/api/v1/server/restapi/daemon"
health "github.com/cilium/cilium/cilium-health/launch"
"github.com/cilium/cilium/common"
monitorLaunch "github.com/cilium/cilium/monitor/launch"
"github.com/cilium/cilium/pkg/api"
"github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/cidr"
"github.com/cilium/cilium/pkg/clustermesh"
"github.com/cilium/cilium/pkg/command/exec"
"github.com/cilium/cilium/pkg/completion"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/counter"
"github.com/cilium/cilium/pkg/datapath"
"github.com/cilium/cilium/pkg/datapath/alignchecker"
bpfIPCache "github.com/cilium/cilium/pkg/datapath/ipcache"
"github.com/cilium/cilium/pkg/datapath/iptables"
"github.com/cilium/cilium/pkg/datapath/linux/ipsec"
"github.com/cilium/cilium/pkg/datapath/loader"
"github.com/cilium/cilium/pkg/datapath/prefilter"
"github.com/cilium/cilium/pkg/debug"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/endpoint"
"github.com/cilium/cilium/pkg/endpoint/connector"
"github.com/cilium/cilium/pkg/endpointmanager"
"github.com/cilium/cilium/pkg/envoy"
"github.com/cilium/cilium/pkg/fqdn"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/ipam"
"github.com/cilium/cilium/pkg/ipcache"
"github.com/cilium/cilium/pkg/k8s"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/maps/ctmap"
"github.com/cilium/cilium/pkg/maps/eppolicymap"
ipcachemap "github.com/cilium/cilium/pkg/maps/ipcache"
"github.com/cilium/cilium/pkg/maps/lbmap"
"github.com/cilium/cilium/pkg/maps/lxcmap"
"github.com/cilium/cilium/pkg/maps/metricsmap"
"github.com/cilium/cilium/pkg/maps/sockmap"
"github.com/cilium/cilium/pkg/maps/tunnel"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/mtu"
"github.com/cilium/cilium/pkg/node"
nodemanager "github.com/cilium/cilium/pkg/node/manager"
nodeStore "github.com/cilium/cilium/pkg/node/store"
"github.com/cilium/cilium/pkg/nodediscovery"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy"
policyApi "github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/proxy"
"github.com/cilium/cilium/pkg/proxy/logger"
"github.com/cilium/cilium/pkg/revert"
"github.com/cilium/cilium/pkg/sockops"
"github.com/cilium/cilium/pkg/status"
"github.com/cilium/cilium/pkg/trigger"
"github.com/cilium/cilium/pkg/workloads"
"github.com/go-openapi/runtime/middleware"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sync/semaphore"
)
const (
// AutoCIDR indicates that a CIDR should be allocated
AutoCIDR = "auto"
)
const (
initArgLib int = iota
initArgRundir
initArgIPv4NodeIP
initArgIPv6NodeIP
initArgMode
initArgDevice
initArgDevicePreFilter
initArgModePreFilter
initArgMTU
initArgIPSec
initArgMax
)
// Daemon is the cilium daemon that is in charge of perform all necessary plumbing,
// monitoring when a LXC starts.
type Daemon struct {
buildEndpointSem *semaphore.Weighted
l7Proxy *proxy.Proxy
loadBalancer *loadbalancer.LoadBalancer
policy *policy.Repository
preFilter *prefilter.PreFilter
// Only used for CRI-O since it does not support events.
workloadsEventsCh chan<- *workloads.EventMessage
statusCollectMutex lock.RWMutex
statusResponse models.StatusResponse
statusCollector *status.Collector
uniqueIDMU lock.Mutex
uniqueID map[uint64]context.CancelFunc
nodeMonitor *monitorLaunch.NodeMonitor
ciliumHealth *health.CiliumHealth
// dnsRuleGen manages toFQDNs rules
dnsRuleGen *fqdn.RuleGen
// dnsPoller polls DNS names and sends them to dnsRuleGen
dnsPoller *fqdn.DNSPoller
// k8sAPIs is a set of k8s API in use. They are setup in EnableK8sWatcher,
// and may be disabled while the agent runs.
// This is on this object, instead of a global, because EnableK8sWatcher is
// on Daemon.
k8sAPIGroups k8sAPIGroupsUsed
// Used to synchronize generation of daemon's BPF programs and endpoint BPF
// programs.
compilationMutex *lock.RWMutex
// prefixLengths tracks a mapping from CIDR prefix length to the count
// of rules that refer to that prefix length.
prefixLengths *counter.PrefixLengthCounter
clustermesh *clustermesh.ClusterMesh
// k8sResourceSyncWaitGroup is used to block the starting of the daemon,
// including regenerating restored endpoints (if specified) until all
// policies, services, ingresses, and endpoints stored in Kubernetes at the
// time of bootstrapping of the agent are consumed by Cilium.
// This prevents regeneration of endpoints, restoring of loadbalancer BPF
// maps, etc. being performed without crucial information in securing said
// components. See GH-5038 and GH-4457.
k8sResourceSyncWaitGroup sync.WaitGroup
// k8sResourceSyncedMu protects the k8sResourceSynced map.
k8sResourceSyncedMu lock.RWMutex
// k8sResourceSynced maps a resource name to a channel. Once the given
// resource name is synchronized with k8s, the channel for which that
// resource name maps to is closed.
k8sResourceSynced map[string]chan struct{}
// k8sSvcCache is a cache of all Kubernetes services and endpoints
k8sSvcCache k8s.ServiceCache
mtuConfig mtu.Configuration
policyTrigger *trigger.Trigger
// datapath is the underlying datapath implementation to use to
// implement all aspects of an agent
datapath datapath.Datapath
// nodeDiscovery defines the node discovery logic of the agent
nodeDiscovery *nodediscovery.NodeDiscovery
// ipam is the IP address manager of the agent
ipam *ipam.IPAM
}
// Datapath returns a reference to the datapath implementation.
func (d *Daemon) Datapath() datapath.Datapath {
return d.datapath
}
// UpdateProxyRedirect updates the redirect rules in the proxy for a particular
// endpoint using the provided L4 filter. Returns the allocated proxy port
func (d *Daemon) UpdateProxyRedirect(e *endpoint.Endpoint, l4 *policy.L4Filter, proxyWaitGroup *completion.WaitGroup) (uint16, error, revert.FinalizeFunc, revert.RevertFunc) {
if d.l7Proxy == nil {
return 0, fmt.Errorf("can't redirect, proxy disabled"), nil, nil
}
r, err, finalizeFunc, revertFunc := d.l7Proxy.CreateOrUpdateRedirect(l4, e.ProxyID(l4), e, proxyWaitGroup)
if err != nil {
return 0, err, nil, nil
}
return r.ProxyPort, nil, finalizeFunc, revertFunc
}
// RemoveProxyRedirect removes a previously installed proxy redirect for an
// endpoint
func (d *Daemon) RemoveProxyRedirect(e *endpoint.Endpoint, id string, proxyWaitGroup *completion.WaitGroup) (error, revert.FinalizeFunc, revert.RevertFunc) {
if d.l7Proxy == nil {
return nil, nil, nil
}
log.WithFields(logrus.Fields{
logfields.EndpointID: e.ID,
logfields.L4PolicyID: id,
}).Debug("Removing redirect to endpoint")
return d.l7Proxy.RemoveRedirect(id, proxyWaitGroup)
}
// UpdateNetworkPolicy adds or updates a network policy in the set
// published to L7 proxies.
func (d *Daemon) UpdateNetworkPolicy(e *endpoint.Endpoint, policy *policy.L4Policy,
labelsMap, deniedIngressIdentities, deniedEgressIdentities cache.IdentityCache, proxyWaitGroup *completion.WaitGroup) (error, revert.RevertFunc) {
if d.l7Proxy == nil {
return fmt.Errorf("can't update network policy, proxy disabled"), nil
}
err, revertFunc := d.l7Proxy.UpdateNetworkPolicy(e, policy, e.GetIngressPolicyEnabledLocked(), e.GetEgressPolicyEnabledLocked(),
labelsMap, deniedIngressIdentities, deniedEgressIdentities, proxyWaitGroup)
return err, revert.RevertFunc(revertFunc)
}
// RemoveNetworkPolicy removes a network policy from the set published to
// L7 proxies.
func (d *Daemon) RemoveNetworkPolicy(e *endpoint.Endpoint) {
if d.l7Proxy == nil {
return
}
d.l7Proxy.RemoveNetworkPolicy(e)
}
// QueueEndpointBuild waits for a "build permit" for the endpoint
// identified by 'epID'. This function blocks until the endpoint can
// start building. The returned function must then be called to
// release the "build permit" when the most resource intensive parts
// of the build are done. The returned function is idempotent, so it
// may be called more than once. Returns nil if the caller should NOT
// start building the endpoint. This may happen due to a build being
// queued for the endpoint already, or due to the wait for the build
// permit being canceled. The latter case happens when the endpoint is
// being deleted.
func (d *Daemon) QueueEndpointBuild(epID uint64) func() {
d.uniqueIDMU.Lock()
// Skip new build requests if the endpoint is already in the queue
// waiting. In this case the queued build will pick up any changes
// made so far, so there is no need to queue another build now.
if _, queued := d.uniqueID[epID]; queued {
d.uniqueIDMU.Unlock()
return nil
}
// Store a cancel function to the 'uniqueID' map so that we can
// cancel the wait when the endpoint is being deleted.
ctx, cancel := context.WithCancel(context.Background())
d.uniqueID[epID] = cancel
d.uniqueIDMU.Unlock()
// Acquire build permit. This may block.
err := d.buildEndpointSem.Acquire(ctx, 1)
// Not queueing any more, so remove the cancel func from 'uniqueID' map.
// The caller may still cancel the build by calling the cancel func\
// after we return it. After this point another build may be queued for
// this endpoint.
d.uniqueIDMU.Lock()
delete(d.uniqueID, epID)
d.uniqueIDMU.Unlock()
if err != nil {
return nil // Acquire failed
}
// Acquire succeeded, but the context was canceled after?
if ctx.Err() == context.Canceled {
d.buildEndpointSem.Release(1)
return nil
}
// At this point the build permit has been acquired. It must
// be released by the caller by calling the returned function
// when the heavy lifting of the build is done.
// Using sync.Once to make the returned function idempotent.
var once sync.Once
return func() {
once.Do(func() {
d.buildEndpointSem.Release(1)
})
}
}
// RemoveFromEndpointQueue removes the endpoint from the "build permit" queue,
// canceling the wait for the build permit if still waiting.
func (d *Daemon) RemoveFromEndpointQueue(epID uint64) {
d.uniqueIDMU.Lock()
if cancel, queued := d.uniqueID[epID]; queued && cancel != nil {
delete(d.uniqueID, epID)
cancel()
}
d.uniqueIDMU.Unlock()
}
// GetPolicyRepository returns the policy repository of the daemon
func (d *Daemon) GetPolicyRepository() *policy.Repository {
return d.policy
}
// DebugEnabled returns if debug mode is enabled.
func (d *Daemon) DebugEnabled() bool {
return option.Config.Opts.IsEnabled(option.Debug)
}
func (d *Daemon) writeNetdevHeader(dir string) error {
headerPath := filepath.Join(dir, common.NetdevHeaderFileName)
log.WithField(logfields.Path, headerPath).Debug("writing configuration")
f, err := os.Create(headerPath)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
}
defer f.Close()
if err := d.datapath.WriteNetdevConfig(f, d); err != nil {
return err
}
return nil
}
// GetCIDRPrefixLengths returns the sorted list of unique prefix lengths used
// by CIDR policies.
func (d *Daemon) GetCIDRPrefixLengths() (s6, s4 []int) {
return d.prefixLengths.ToBPFData()
}
// Must be called with option.Config.EnablePolicyMU locked.
func (d *Daemon) writePreFilterHeader(dir string) error {
headerPath := filepath.Join(dir, common.PreFilterHeaderFileName)
log.WithField(logfields.Path, headerPath).Debug("writing configuration")
f, err := os.Create(headerPath)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
}
defer f.Close()
fw := bufio.NewWriter(f)
fmt.Fprint(fw, "/*\n")
fmt.Fprintf(fw, " * XDP device: %s\n", option.Config.DevicePreFilter)
fmt.Fprintf(fw, " * XDP mode: %s\n", option.Config.ModePreFilter)
fmt.Fprint(fw, " */\n\n")
d.preFilter.WriteConfig(fw)
return fw.Flush()
}
// GetOptions returns the datapath configuration options of the daemon.
func (d *Daemon) GetOptions() *option.IntOptions {
return option.Config.Opts
}
func (d *Daemon) setHostAddresses() error {
l, err := netlink.LinkByName(option.Config.LBInterface)
if err != nil {
return fmt.Errorf("unable to get network device %s: %s", option.Config.Device, err)
}
getAddr := func(netLinkFamily int) (net.IP, error) {
addrs, err := netlink.AddrList(l, netLinkFamily)
if err != nil {
return nil, fmt.Errorf("error while getting %s's addresses: %s", option.Config.Device, err)
}
for _, possibleAddr := range addrs {
if netlink.Scope(possibleAddr.Scope) == netlink.SCOPE_UNIVERSE {
return possibleAddr.IP, nil
}
}
return nil, nil
}
if option.Config.EnableIPv4 {
hostV4Addr, err := getAddr(netlink.FAMILY_V4)
if err != nil {
return err
}
if hostV4Addr != nil {
option.Config.HostV4Addr = hostV4Addr
log.Infof("Using IPv4 host address: %s", option.Config.HostV4Addr)
}
}
if option.Config.EnableIPv6 {
hostV6Addr, err := getAddr(netlink.FAMILY_V6)
if err != nil {
return err
}
if hostV6Addr != nil {
option.Config.HostV6Addr = hostV6Addr
log.Infof("Using IPv6 host address: %s", option.Config.HostV6Addr)
}
}
return nil
}
// GetCompilationLock returns the mutex responsible for synchronizing compilation
// of BPF programs.
func (d *Daemon) GetCompilationLock() *lock.RWMutex {
return d.compilationMutex
}
func (d *Daemon) compileBase() error {
var args []string
var mode string
var ret error
args = make([]string, initArgMax)
// Lock so that endpoints cannot be built while we are compile base programs.
d.compilationMutex.Lock()
defer d.compilationMutex.Unlock()
if err := d.writeNetdevHeader("./"); err != nil {
log.WithError(err).Warn("Unable to write netdev header")
return err
}
loader.Init(d.datapath, &d.nodeDiscovery.LocalConfig)
scopedLog := log.WithField(logfields.XDPDevice, option.Config.DevicePreFilter)
if option.Config.DevicePreFilter != "undefined" {
if err := prefilter.ProbePreFilter(option.Config.DevicePreFilter, option.Config.ModePreFilter); err != nil {
scopedLog.WithError(err).Warn("Turning off prefilter")
option.Config.DevicePreFilter = "undefined"
}
}
if option.Config.DevicePreFilter != "undefined" {
if d.preFilter, ret = prefilter.NewPreFilter(); ret != nil {
scopedLog.WithError(ret).Warn("Unable to init prefilter")
return ret
}
if err := d.writePreFilterHeader("./"); err != nil {
scopedLog.WithError(err).Warn("Unable to write prefilter header")
return err
}
args[initArgDevicePreFilter] = option.Config.DevicePreFilter
args[initArgModePreFilter] = option.Config.ModePreFilter
}
args[initArgLib] = option.Config.BpfDir
args[initArgRundir] = option.Config.StateDir
if option.Config.EnableIPv4 {
args[initArgIPv4NodeIP] = node.GetInternalIPv4().String()
} else {
args[initArgIPv4NodeIP] = "<nil>"
}
if option.Config.EnableIPv6 {
args[initArgIPv6NodeIP] = node.GetIPv6().String()
} else {
args[initArgIPv6NodeIP] = "<nil>"
}
args[initArgMTU] = fmt.Sprintf("%d", d.mtuConfig.GetDeviceMTU())
if option.Config.EnableIPSec {
args[initArgIPSec] = "true"
} else {
args[initArgIPSec] = "false"
}
if option.Config.Device != "undefined" {
_, err := netlink.LinkByName(option.Config.Device)
if err != nil {
log.WithError(err).WithField("device", option.Config.Device).Warn("Link does not exist")
return err
}
if option.Config.IsLBEnabled() {
if option.Config.Device != option.Config.LBInterface {
//FIXME: allow different interfaces
return fmt.Errorf("Unable to have an interface for LB mode different than snooping interface")
}
if err := d.setHostAddresses(); err != nil {
return err
}
mode = "lb"
} else {
if option.Config.DatapathMode == option.DatapathModeIpvlan {
mode = "ipvlan"
} else {
mode = "direct"
}
}
args[initArgMode] = mode
args[initArgDevice] = option.Config.Device
args = append(args, option.Config.Device)
} else {
if option.Config.IsLBEnabled() && strings.ToLower(option.Config.Tunnel) != "disabled" {
//FIXME: allow LBMode in tunnel
return fmt.Errorf("Unable to run LB mode with tunnel mode")
}
args[initArgMode] = option.Config.Tunnel
if option.Config.IsFlannelMasterDeviceSet() {
args[initArgMode] = "flannel"
args[initArgDevice] = option.Config.FlannelMasterDevice
}
}
prog := filepath.Join(option.Config.BpfDir, "init.sh")
ctx, cancel := context.WithTimeout(context.Background(), defaults.ExecTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, prog, args...)
cmd.Env = bpf.Environment()
if _, err := cmd.CombinedOutput(log, true); err != nil {
return err
}
if canDisableDwarfRelocations {
// Validate alignments of C and Go equivalent structs
if err := alignchecker.CheckStructAlignments(defaults.AlignCheckerName); err != nil {
log.WithError(err).Fatal("C and Go structs alignment check failed")
}
} else {
log.Warning("Cannot check matching of C and Go common struct alignments due to old LLVM/clang version")
}
if !option.Config.IsFlannelMasterDeviceSet() {
d.ipam.ReserveLocalRoutes()
}
if err := d.datapath.Node().NodeConfigurationChanged(d.nodeDiscovery.LocalConfig); err != nil {
return err
}
if option.Config.EnableIPv4 {
// Always remove masquerade rule and then re-add it if required
iptables.RemoveRules()
if option.Config.InstallIptRules {
if err := iptables.InstallRules(option.Config.HostDevice); err != nil {
return err
}
}
}
log.Info("Setting sysctl net.core.bpf_jit_enable=1")
log.Info("Setting sysctl net.ipv4.conf.all.rp_filter=0")
log.Info("Setting sysctl net.ipv6.conf.all.disable_ipv6=0")
return nil
}
// initMaps opens all BPF maps (and creates them if they do not exist). This
// must be done *before* any operations which read BPF maps, especially
// restoring endpoints and services.
func (d *Daemon) initMaps() error {
if option.Config.DryMode {
return nil
}
if _, err := lxcmap.LXCMap.OpenOrCreate(); err != nil {
return err
}
// The ipcache is shared between endpoints. Parallel mode needs to be
// used to allow existing endpoints that have not been regenerated yet
// to continue using the existing ipcache until the endpoint is
// regenerated for the first time. Existing endpoints are using a
// policy map which is potentially out of sync as local identities are
// re-allocated on startup. Parallel mode allows to continue using the
// old version until regeneration. Note that the old version is not
// updated with new identities. This is fine as any new identity
// appearing would require a regeneration of the endpoint anyway in
// order for the endpoint to gain the privilege of communication.
if _, err := ipcachemap.IPCache.OpenParallel(); err != nil {
return err
}
if _, err := metricsmap.Metrics.OpenOrCreate(); err != nil {
return err
}
if _, err := tunnel.TunnelMap.OpenOrCreate(); err != nil {
return err
}
if err := openServiceMaps(); err != nil {
log.WithError(err).Fatal("Unable to open service maps")
}
// Set up the list of IPCache listeners in the daemon, to be
// used by syncLXCMap().
ipcache.IPIdentityCache.SetListeners([]ipcache.IPIdentityMappingListener{
&envoy.NetworkPolicyHostsCache,
bpfIPCache.NewListener(d),
})
// Insert local host entries to bpf maps
if err := d.syncLXCMap(); err != nil {
return err
}
// Start the controller for periodic sync
// The purpose of the controller is to ensure that the host entries are
// reinserted to the bpf maps if they are ever removed from them.
// TODO: Determine if we can get rid of this when we have more rigorous
// desired/realized state implementation for the bpf maps.
controller.NewManager().UpdateController("lxcmap-bpf-host-sync",
controller.ControllerParams{
DoFunc: func(ctx context.Context) error {
return d.syncLXCMap()
},
RunInterval: 5 * time.Second,
})
// Start the controller for periodic sync of the metrics map with
// the prometheus server.
controller.NewManager().UpdateController("metricsmap-bpf-prom-sync",
controller.ControllerParams{
DoFunc: metricsmap.SyncMetricsMap,
RunInterval: 5 * time.Second,
})
// Clean all lb entries
if !option.Config.RestoreState {
log.Debug("cleaning up all BPF LB maps")
d.loadBalancer.BPFMapMU.Lock()
defer d.loadBalancer.BPFMapMU.Unlock()
if option.Config.EnableIPv6 {
if err := lbmap.Service6Map.DeleteAll(); err != nil {
return err
}
if err := lbmap.RRSeq6Map.DeleteAll(); err != nil {
return err
}
}
if err := d.RevNATDeleteAll(); err != nil {
return err
}
if option.Config.EnableIPv4 {
if err := lbmap.Service4Map.DeleteAll(); err != nil {
return err
}
if err := lbmap.RRSeq4Map.DeleteAll(); err != nil {
return err
}
}
// If we are not restoring state, all endpoints can be
// deleted. Entries will be re-populated.
lxcmap.LXCMap.DeleteAll()
}
return nil
}
func (d *Daemon) init() error {
globalsDir := option.Config.GetGlobalsDir()
if err := os.MkdirAll(globalsDir, defaults.RuntimePathRights); err != nil {
log.WithError(err).WithField(logfields.Path, globalsDir).Fatal("Could not create runtime directory")
}
if err := os.Chdir(option.Config.StateDir); err != nil {
log.WithError(err).WithField(logfields.Path, option.Config.StateDir).Fatal("Could not change to runtime directory")
}
// Remove any old sockops and re-enable with _new_ programs if flag is set
sockops.SockmapDisable()
sockops.SkmsgDisable()
if !option.Config.DryMode {
if err := d.createNodeConfigHeaderfile(); err != nil {
return err
}
if option.Config.SockopsEnable {
eppolicymap.CreateEPPolicyMap()
sockops.SockmapEnable()
sockops.SkmsgEnable()
sockmap.SockmapCreate()
}
if err := d.compileBase(); err != nil {
return err
}
}
return nil
}
func (d *Daemon) createNodeConfigHeaderfile() error {
nodeConfigPath := option.Config.GetNodeConfigPath()
f, err := os.Create(nodeConfigPath)
if err != nil {
log.WithError(err).WithField(logfields.Path, nodeConfigPath).Fatal("Failed to create node configuration file")
return err
}
defer f.Close()
if err = d.datapath.WriteNodeConfig(f, &d.nodeDiscovery.LocalConfig); err != nil {
log.WithError(err).WithField(logfields.Path, nodeConfigPath).Fatal("Failed to write node configuration file")
return err
}
return nil
}
// syncLXCMap adds local host enties to bpf lxcmap, as well as
// ipcache, if needed, and also notifies the daemon and network policy
// hosts cache if changes were made.
func (d *Daemon) syncLXCMap() error {
// TODO: Update addresses first, in case node addressing has changed.
// TODO: Once these start changing on runtime, figure out the locking strategy.
specialIdentities := []identity.IPIdentityPair{}
if option.Config.EnableIPv4 {
ip := node.GetInternalIPv4()
if len(ip) > 0 {
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: ip,
ID: identity.ReservedIdentityHost,
})
}
ip = node.GetExternalIPv4()
if len(ip) > 0 {
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: ip,
ID: identity.ReservedIdentityHost,
})
}
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, net.IPv4len*8),
ID: identity.ReservedIdentityWorld,
})
}
if option.Config.EnableIPv6 {
ip := node.GetIPv6()
if len(ip) > 0 {
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: ip,
ID: identity.ReservedIdentityHost,
})
}
ip = node.GetIPv6Router()
if len(ip) > 0 {
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: ip,
ID: identity.ReservedIdentityHost,
})
}
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: net.IPv6zero,
Mask: net.CIDRMask(0, net.IPv6len*8),
ID: identity.ReservedIdentityWorld,
})
}
existingEndpoints, err := lxcmap.DumpToMap()
if err != nil {
return err
}
for _, ipIDPair := range specialIdentities {
isHost := ipIDPair.ID == identity.ReservedIdentityHost
if isHost {
added, err := lxcmap.SyncHostEntry(ipIDPair.IP)
if err != nil {
return fmt.Errorf("Unable to add host entry to endpoint map: %s", err)
}
if added {
log.WithField(logfields.IPAddr, ipIDPair.IP).Debugf("Added local ip to endpoint map")
}
}
delete(existingEndpoints, ipIDPair.IP.String())
// Upsert will not propagate (reserved:foo->ID) mappings across the cluster,
// and we specifically don't want to do so.
ipcache.IPIdentityCache.Upsert(ipIDPair.PrefixString(), nil, ipcache.Identity{
ID: ipIDPair.ID,
Source: ipcache.FromAgentLocal,
})
}
for hostIP, info := range existingEndpoints {
if ip := net.ParseIP(hostIP); info.IsHost() && ip != nil {
if err := lxcmap.DeleteEntry(ip); err != nil {
log.WithError(err).WithFields(logrus.Fields{
logfields.IPAddr: hostIP,
}).Warn("Unable to delete obsolete host IP from BPF map")
} else {
log.Debugf("Removed outdated host ip %s from endpoint map", hostIP)
}
}
}
return nil
}
func createIPNet(ones, bits int) *net.IPNet {
return &net.IPNet{
Mask: net.CIDRMask(ones, bits),
}
}
// createPrefixLengthCounter wraps around the counter library, providing
// references to prefix lengths that will always be present.
func createPrefixLengthCounter() *counter.PrefixLengthCounter {
prefixLengths4 := ipcachemap.IPCache.GetMaxPrefixLengths(false)
prefixLengths6 := ipcachemap.IPCache.GetMaxPrefixLengths(true)
counter := counter.NewPrefixLengthCounter(prefixLengths6, prefixLengths4)
// This is a bit ugly, but there's not a great way to define an IPNet
// without parsing strings, etc.
defaultPrefixes := []*net.IPNet{
// IPv4
createIPNet(0, net.IPv4len*8), // world
createIPNet(net.IPv4len*8, net.IPv4len*8), // hosts
// IPv6
createIPNet(0, net.IPv6len*8), // world
createIPNet(net.IPv6len*8, net.IPv6len*8), // hosts
}
_, err := counter.Add(defaultPrefixes)
if err != nil {
log.WithError(err).Fatal("Failed to create default prefix lengths")
}
return counter
}
func deleteHostDevice() {
link, err := netlink.LinkByName(option.Config.HostDevice)
if err != nil {
log.WithError(err).Warningf("Unable to lookup host device %s. No old cilium_host interface exists", option.Config.HostDevice)
return
}
if err := netlink.LinkDel(link); err != nil {
log.WithError(err).Errorf("Unable to delete host device %s to change allocation CIDR", option.Config.HostDevice)
}
}
func (d *Daemon) prepareAllocationCIDR(family datapath.NodeAddressingFamily) (routerIP net.IP, err error) {
// Reserve the IPv4 external node IP within the allocation range if
// required.
allocRange := family.AllocationCIDR()
nodeIP := family.PrimaryExternal()
if allocRange.Contains(nodeIP) {
err = d.ipam.AllocateIP(nodeIP)
if err != nil {
err = fmt.Errorf("Unable to allocate external IPv4 node IP %s from allocation range %s: %s",
nodeIP, allocRange, err)
return
}
}
routerIP = family.Router()
if routerIP != nil && !allocRange.Contains(routerIP) {
log.Warningf("Detected allocation CIDR change to %s, previous router IP %s", allocRange, routerIP)
// The restored router IP is not part of the allocation range.
// This indicates that the allocation range has changed.
if !option.Config.IsFlannelMasterDeviceSet() {
deleteHostDevice()
}
// force re-allocation of the router IP
routerIP = nil
}
if routerIP == nil {
routerIP = ip.GetNextIP(family.AllocationCIDR().IP)
}
err = d.ipam.AllocateIP(routerIP)
if err != nil {
err = fmt.Errorf("Unable to allocate IPv4 router IP %s from allocation range %s: %s",
routerIP, allocRange, err)
return
}
return
}
// NewDaemon creates and returns a new Daemon with the parameters set in c.
func NewDaemon(dp datapath.Datapath) (*Daemon, *endpointRestoreState, error) {
bootstrapStats.daemonInit.Start()
// Validate the daemon-specific global options.
if err := option.Config.Validate(); err != nil {
return nil, nil, fmt.Errorf("invalid daemon configuration: %s", err)
}
ctmap.InitMapInfo(option.Config.CTMapEntriesGlobalTCP, option.Config.CTMapEntriesGlobalAny)
mtuConfig := mtu.NewConfiguration(option.Config.Tunnel != option.TunnelDisabled, option.Config.MTU)
if option.Config.EnableIPSec {
if err := ipsec.LoadIPSecKeysFile(option.Config.IPSecKeyFile); err != nil {
return nil, nil, err
}
if err := ipsec.EnableIPv6Forwarding(); err != nil {
return nil, nil, err
}
}
nodeMngr, err := nodemanager.NewManager("all", dp.Node())
if err != nil {
return nil, nil, err
}
d := Daemon{
loadBalancer: loadbalancer.NewLoadBalancer(),
k8sSvcCache: k8s.NewServiceCache(),
policy: policy.NewPolicyRepository(),
uniqueID: map[uint64]context.CancelFunc{},
nodeMonitor: monitorLaunch.NewNodeMonitor(option.Config.MonitorQueueSize),
prefixLengths: createPrefixLengthCounter(),
k8sResourceSynced: map[string]chan struct{}{},
buildEndpointSem: semaphore.NewWeighted(int64(numWorkerThreads())),
compilationMutex: new(lock.RWMutex),
mtuConfig: mtuConfig,
datapath: dp,
nodeDiscovery: nodediscovery.NewNodeDiscovery(nodeMngr, mtuConfig),
}
bootstrapStats.daemonInit.End(true)
// Open or create BPF maps.
bootstrapStats.mapsInit.Start()
err = d.initMaps()
bootstrapStats.mapsInit.EndError(err)
if err != nil {
log.WithError(err).Error("Error while opening/creating BPF maps")
return nil, nil, err
}
// Read the service IDs of existing services from the BPF map and
// reserve them. This must be done *before* connecting to the
// Kubernetes apiserver and serving the API to ensure service IDs are
// not changing across restarts or that a new service could accidentally
// use an existing service ID.
if option.Config.RestoreState && !option.Config.DryMode {
bootstrapStats.restore.Start()
restoreServiceIDs()
bootstrapStats.restore.End(true)
}
t, err := trigger.NewTrigger(trigger.Parameters{
Name: "policy_update",
PrometheusMetrics: true,
MinInterval: time.Second,
TriggerFunc: d.policyUpdateTrigger,
})
if err != nil {
return nil, nil, err
}
d.policyTrigger = t
debug.RegisterStatusObject("k8s-service-cache", &d.k8sSvcCache)
bootstrapStats.k8sInit.Start()
k8s.Configure(option.Config.K8sAPIServer, option.Config.K8sKubeConfigPath)
bootstrapStats.k8sInit.End(true)
d.runK8sServiceHandler()
policyApi.InitEntities(option.Config.ClusterName)
bootstrapStats.workloadsInit.Start()
workloads.Init(&d)
bootstrapStats.workloadsInit.End(true)
bootstrapStats.cleanup.Start()
err = d.clearCiliumVeths()
bootstrapStats.cleanup.EndError(err)
if err != nil {
log.WithError(err).Warning("Unable to clean stale endpoint interfaces")
}
if k8s.IsEnabled() {
bootstrapStats.k8sInit.Start()
if err := k8s.Init(); err != nil {
log.WithError(err).Fatal("Unable to initialize Kubernetes subsystem")
}
// Kubernetes demands that the localhost can always reach local
// pods. Therefore unless the AllowLocalhost policy is set to a
// specific mode, always allow localhost to reach local
// endpoints.
if option.Config.AllowLocalhost == option.AllowLocalhostAuto {
option.Config.AllowLocalhost = option.AllowLocalhostAlways
log.Info("k8s mode: Allowing localhost to reach local endpoints")
}
// In Cilium 1.0, due to limitations on the data path, traffic
// from the outside world on ingress was treated as though it
// was from the host for policy purposes. In order to not break
// existing policies, this option retains the behavior.
if option.Config.K8sLegacyHostAllowsWorld == "true" {
log.Warn("k8s mode: Configuring ingress policy for host to also allow from world. This option will be removed in Cilium 1.5. For more information, see https://cilium.link/host-vs-world")
option.Config.HostAllowsWorld = true
} else {
option.Config.HostAllowsWorld = false
}
bootstrapStats.k8sInit.End(true)
}
// If the device has been specified, the IPv4AllocPrefix and the
// IPv6AllocPrefix were already allocated before the k8s.Init().
//
// If the device hasn't been specified, k8s.Init() allocated the
// IPv4AllocPrefix and the IPv6AllocPrefix from k8s node annotations.
//
// If k8s.Init() failed to retrieve the IPv4AllocPrefix we can try to derive
// it from an existing node_config.h file or from previous cilium_host
// interfaces.
//
// Then, we will calculate the IPv4 or IPv6 alloc prefix based on the IPv6
// or IPv4 alloc prefix, respectively, retrieved by k8s node annotations.
bootstrapStats.ipam.Start()
log.Info("Initializing node addressing")
node.SetIPv4ClusterCidrMaskSize(option.Config.IPv4ClusterCIDRMaskSize)
if option.Config.IPv4Range != AutoCIDR {
allocCIDR, err := cidr.ParseCIDR(option.Config.IPv4Range)
if err != nil {
log.WithError(err).WithField(logfields.V4Prefix, option.Config.IPv4Range).Fatal("Invalid IPv4 allocation prefix")
}
node.SetIPv4AllocRange(allocCIDR)
}
if option.Config.IPv6Range != AutoCIDR {
_, net, err := net.ParseCIDR(option.Config.IPv6Range)
if err != nil {
log.WithError(err).WithField(logfields.V6Prefix, option.Config.IPv6Range).Fatal("Invalid IPv6 allocation prefix")
}
if err := node.SetIPv6NodeRange(net); err != nil {
log.WithError(err).WithField(logfields.V6Prefix, net).Fatal("Invalid per node IPv6 allocation prefix")
}
}
if err := node.AutoComplete(); err != nil {
log.WithError(err).Fatal("Cannot autocomplete node addresses")
}
// Set up ipam conf after init() because we might be running d.conf.KVStoreIPv4Registration
log.WithFields(logrus.Fields{
logfields.V4Prefix: dp.LocalNodeAddressing().IPv4().AllocationCIDR(),
logfields.V6Prefix: dp.LocalNodeAddressing().IPv6().AllocationCIDR(),
}).Info("Initializing IPAM")
d.ipam = ipam.NewIPAM(dp.LocalNodeAddressing(), ipam.Configuration{
EnableIPv4: option.Config.EnableIPv4,
EnableIPv6: option.Config.EnableIPv6,
})
bootstrapStats.ipam.End(true)
if option.Config.WorkloadsEnabled() {
bootstrapStats.workloadsInit.Start()
// workaround for to use the values of the deprecated dockerEndpoint
// variable if it is set with a different value than defaults.
defaultDockerEndpoint := workloads.GetRuntimeDefaultOpt(workloads.Docker, "endpoint")
if defaultDockerEndpoint != option.Config.DockerEndpoint {
option.Config.ContainerRuntimeEndpoint[string(workloads.Docker)] = option.Config.DockerEndpoint
log.Warn(`"docker" flag is deprecated.` +
`Please use "--container-runtime-endpoint=docker=` + defaultDockerEndpoint + `" instead`)
}
opts := make(map[workloads.WorkloadRuntimeType]map[string]string)
for rt, ep := range option.Config.ContainerRuntimeEndpoint {
opts[workloads.WorkloadRuntimeType(rt)] = make(map[string]string)
opts[workloads.WorkloadRuntimeType(rt)][workloads.EpOpt] = ep
}
if opts[workloads.Docker] == nil {
opts[workloads.Docker] = make(map[string]string)
}
opts[workloads.Docker][workloads.DatapathModeOpt] = option.Config.DatapathMode
// Workloads must be initialized after IPAM has started as it requires
// to allocate IPs.
if err := workloads.Setup(d.ipam, option.Config.Workloads, opts); err != nil {
return nil, nil, fmt.Errorf("unable to setup workload: %s", err)
}
log.Infof("Container runtime options set: %s", workloads.GetRuntimeOptions())
bootstrapStats.workloadsInit.End(true)
}
bootstrapStats.restore.Start()
// restore endpoints before any IPs are allocated to avoid eventual IP
// conflicts later on, otherwise any IP conflict will result in the
// endpoint not being able to be restored.
restoredEndpoints, err := d.restoreOldEndpoints(option.Config.StateDir, true)
if err != nil {
log.WithError(err).Error("Unable to restore existing endpoints")
}
bootstrapStats.restore.End(true)
bootstrapStats.ipam.Start()
if option.Config.EnableIPv4 {
routerIP, err := d.prepareAllocationCIDR(dp.LocalNodeAddressing().IPv4())
if err != nil {
return nil, nil, err
}
if routerIP != nil {
node.SetInternalIPv4(routerIP)
}
}
if option.Config.EnableIPv6 {
routerIP, err := d.prepareAllocationCIDR(dp.LocalNodeAddressing().IPv6())
if err != nil {
return nil, nil, err
}
if routerIP != nil {
node.SetIPv6Router(routerIP)
}
}
log.Info("Addressing information:")
log.Infof(" Cluster-Name: %s", option.Config.ClusterName)
log.Infof(" Cluster-ID: %d", option.Config.ClusterID)
log.Infof(" Local node-name: %s", node.GetName())
log.Infof(" Node-IPv6: %s", node.GetIPv6())
if option.Config.EnableIPv6 {
log.Infof(" IPv6 node prefix: %s", node.GetIPv6NodeRange())
log.Infof(" IPv6 allocation prefix: %s", node.GetIPv6AllocRange())
log.Infof(" IPv6 router address: %s", node.GetIPv6Router())
}
log.Infof(" External-Node IPv4: %s", node.GetExternalIPv4())
log.Infof(" Internal-Node IPv4: %s", node.GetInternalIPv4())
if option.Config.EnableIPv4 {
log.Infof(" Cluster IPv4 prefix: %s", node.GetIPv4ClusterRange())
log.Infof(" IPv4 allocation prefix: %s", node.GetIPv4AllocRange())
// Allocate IPv4 service loopback IP
loopbackIPv4, err := d.ipam.AllocateNextFamily(ipam.IPv4)
if err != nil {
return nil, restoredEndpoints, fmt.Errorf("Unable to reserve IPv4 loopback address: %s", err)
}
node.SetIPv4Loopback(loopbackIPv4)
log.Infof(" Loopback IPv4: %s", node.GetIPv4Loopback().String())
}
bootstrapStats.ipam.End(true)
bootstrapStats.healthCheck.Start()
if option.Config.EnableHealthChecking {
if option.Config.EnableIPv4 {
health4, err := d.ipam.AllocateNextFamily(ipam.IPv4)
if err != nil {
return nil, restoredEndpoints, fmt.Errorf("unable to allocate health IPs: %s,see https://cilium.link/ipam-range-full", err)
}
d.nodeDiscovery.LocalNode.IPv4HealthIP = health4
log.Debugf("IPv4 health endpoint address: %s", health4)
}
if option.Config.EnableIPv6 {
health6, err := d.ipam.AllocateNextFamily(ipam.IPv6)
if err != nil {
if d.nodeDiscovery.LocalNode.IPv4HealthIP != nil {
d.ipam.ReleaseIP(d.nodeDiscovery.LocalNode.IPv4HealthIP)
}
return nil, restoredEndpoints, fmt.Errorf("unable to allocate health IPs: %s,see https://cilium.link/ipam-range-full", err)
}
d.nodeDiscovery.LocalNode.IPv6HealthIP = health6
log.Debugf("IPv6 health endpoint address: %s", health6)
}
}
bootstrapStats.healthCheck.End(true)
// Annotation of the k8s node must happen after discovery of the
// PodCIDR range and allocation of the health IPs.
if k8s.IsEnabled() {
bootstrapStats.k8sInit.Start()
log.WithFields(logrus.Fields{
logfields.V4Prefix: node.GetIPv4AllocRange(),
logfields.V6Prefix: node.GetIPv6NodeRange(),
logfields.V4HealthIP: d.nodeDiscovery.LocalNode.IPv4HealthIP,
logfields.V6HealthIP: d.nodeDiscovery.LocalNode.IPv6HealthIP,
logfields.V4CiliumHostIP: node.GetInternalIPv4(),
logfields.V6CiliumHostIP: node.GetIPv6Router(),
}).Info("Annotating k8s node")
err := k8s.Client().AnnotateNode(node.GetName(),
node.GetIPv4AllocRange(), node.GetIPv6NodeRange(),
d.nodeDiscovery.LocalNode.IPv4HealthIP, d.nodeDiscovery.LocalNode.IPv6HealthIP,
node.GetInternalIPv4(), node.GetIPv6Router())
if err != nil {
log.WithError(err).Warning("Cannot annotate k8s node with CIDR range")
}
bootstrapStats.k8sInit.End(true)
}
d.nodeDiscovery.StartDiscovery(node.GetName())
// This needs to be done after the node addressing has been configured
// as the node address is required as suffix.
go cache.InitIdentityAllocator(&d)
bootstrapStats.clusterMeshInit.Start()
if path := option.Config.ClusterMeshConfig; path != "" {
if option.Config.ClusterID == 0 {
log.Info("Cluster-ID is not specified, skipping ClusterMesh initialization")
} else {
log.WithField("path", path).Info("Initializing ClusterMesh routing")
clustermesh, err := clustermesh.NewClusterMesh(clustermesh.Configuration{
Name: "clustermesh",
ConfigDirectory: path,
NodeKeyCreator: nodeStore.KeyCreator,
ServiceMerger: &d.k8sSvcCache,
NodeManager: nodeMngr,
})
if err != nil {
log.WithError(err).Fatal("Unable to initialize ClusterMesh")
}
d.clustermesh = clustermesh
}
}
bootstrapStats.clusterMeshInit.End(true)
bootstrapStats.bpfBase.Start()
err = d.init()
bootstrapStats.bpfBase.EndError(err)
if err != nil {
log.WithError(err).Error("Error while initializing daemon")
return nil, restoredEndpoints, err
}
if err := loader.RestoreTemplates(option.Config.StateDir); err != nil {
log.WithError(err).Error("Unable to restore previous BPF templates")
}
// Start watcher for endpoint IP --> identity mappings in key-value store.
// this needs to be done *after* init() for the daemon in that function,
// we populate the IPCache with the host's IP(s).
ipcache.InitIPIdentityWatcher()
bootstrapStats.proxyStart.Start()
// FIXME: Make the port range configurable.
d.l7Proxy = proxy.StartProxySupport(10000, 20000, option.Config.RunDir,
option.Config.AccessLog, &d, option.Config.AgentLabels)
bootstrapStats.proxyStart.End(true)
bootstrapStats.fqdn.Start()
if err := fqdn.ConfigFromResolvConf(); err != nil {
bootstrapStats.fqdn.EndError(err)
return nil, nil, err
}
err = d.bootstrapFQDN(restoredEndpoints, option.Config.ToFQDNsPreCache)
if err != nil {
bootstrapStats.fqdn.EndError(err)
return nil, restoredEndpoints, err
}
bootstrapStats.fqdn.End(true)
return &d, restoredEndpoints, nil
}
// Close shuts down a daemon
func (d *Daemon) Close() {
if d.policyTrigger != nil {
d.policyTrigger.Shutdown()
}
d.nodeDiscovery.Close()
}
func (d *Daemon) attachExistingInfraContainers() {
m, err := workloads.Client().GetAllInfraContainersPID()
if err != nil {
log.WithError(err).Error("Unable to get all infra containers PIDs")
return
}
log.Debugf("Containers found %+v", m)
for containerID, pid := range m {
epModel, err := connector.DeriveEndpointFrom(option.Config.FlannelMasterDevice, containerID, pid)
if err != nil {
log.WithError(err).WithField(logfields.ContainerID, containerID).
Warning("Unable to derive endpoint from existing infra container")
continue
}
log.Debugf("Adding endpoint %+v", epModel)
ep, err := d.createEndpoint(context.Background(), epModel)
if err != nil {
log.WithError(err).WithField(logfields.ContainerID, containerID).
Warning("Unable to attach existing infra container")
continue
}
log.WithFields(logrus.Fields{
logfields.ContainerID: epModel.ContainerID,
logfields.EndpointID: ep.ID,
}).Info("Attached BPF program to existing container")
}
}
// TriggerReloadWithoutCompile causes all BPF programs and maps to be reloaded,
// without recompiling the datapath logic for each endpoint. It first attempts
// to recompile the base programs, and if this fails returns an error. If base
// program load is successful, it subsequently triggers regeneration of all
// endpoints and returns a waitgroup that may be used by the caller to wait for
// all endpoint regeneration to complete.
//
// If an error is returned, then no regeneration was successful. If no error
// is returned, then the base programs were successfully regenerated, but
// endpoints may or may not have successfully regenerated.
func (d *Daemon) TriggerReloadWithoutCompile(reason string) (*sync.WaitGroup, error) {
log.Debugf("BPF reload triggered from %s", reason)
if err := d.compileBase(); err != nil {
return nil, fmt.Errorf("Unable to recompile base programs from %s: %s", reason, err)
}
regenRequest := &endpoint.ExternalRegenerationMetadata{
Reason: reason,
RegenerationLevel: endpoint.RegenerateWithDatapathLoad,
}
return endpointmanager.RegenerateAllEndpoints(d, regenRequest), nil
}
func changedOption(key string, value option.OptionSetting, data interface{}) {
d := data.(*Daemon)
if key == option.Debug {
// Set the debug toggle (this can be a no-op)
logging.ToggleDebugLogs(d.DebugEnabled())
// Reflect log level change to proxies
proxy.ChangeLogLevel(logging.GetLevel(logging.DefaultLogger))
}
d.policy.BumpRevision() // force policy recalculation
}
type patchConfig struct {
daemon *Daemon
}
func NewPatchConfigHandler(d *Daemon) PatchConfigHandler {
return &patchConfig{daemon: d}
}
func (h *patchConfig) Handle(params PatchConfigParams) middleware.Responder {
log.WithField(logfields.Params, logfields.Repr(params)).Debug("PATCH /config request")
d := h.daemon
cfgSpec := params.Configuration
om, err := option.Config.Opts.Library.ValidateConfigurationMap(cfgSpec.Options)
if err != nil {
msg := fmt.Errorf("Invalid configuration option %s", err)
return api.Error(PatchConfigBadRequestCode, msg)
}
// Serialize configuration updates to the daemon.
option.Config.ConfigPatchMutex.Lock()
defer option.Config.ConfigPatchMutex.Unlock()
nmArgs := d.nodeMonitor.GetArgs()
if numPagesEntry, ok := cfgSpec.Options["MonitorNumPages"]; ok && nmArgs[0] != numPagesEntry {
if len(nmArgs) == 0 || nmArgs[0] != numPagesEntry {
args := []string{"--num-pages %s", numPagesEntry}
d.nodeMonitor.Restart(args)
}
if len(cfgSpec.Options) == 0 {
return NewPatchConfigOK()
}
delete(cfgSpec.Options, "MonitorNumPages")
}
// Track changes to daemon's configuration
var changes int
// Only update if value provided for PolicyEnforcement.
if enforcement := cfgSpec.PolicyEnforcement; enforcement != "" {
switch enforcement {
case option.NeverEnforce, option.DefaultEnforcement, option.AlwaysEnforce:
// Update policy enforcement configuration if needed.
oldEnforcementValue := policy.GetPolicyEnabled()
// If the policy enforcement configuration has indeed changed, we have
// to regenerate endpoints and update daemon's configuration.
if enforcement != oldEnforcementValue {
log.Debug("configuration request to change PolicyEnforcement for daemon")
changes++
policy.SetPolicyEnabled(enforcement)
}
default:
msg := fmt.Errorf("Invalid option for PolicyEnforcement %s", enforcement)
log.Warn(msg)
return api.Error(PatchConfigFailureCode, msg)
}
log.Debug("finished configuring PolicyEnforcement for daemon")
}
changes += option.Config.Opts.ApplyValidated(om, changedOption, d)
log.WithField("count", changes).Debug("Applied changes to daemon's configuration")
if changes > 0 {
// Only recompile if configuration has changed.
log.Debug("daemon configuration has changed; recompiling base programs")
if err := d.compileBase(); err != nil {
msg := fmt.Errorf("Unable to recompile base programs: %s", err)
return api.Error(PatchConfigFailureCode, msg)
}
d.TriggerPolicyUpdates(true, "agent configuration update")
}
return NewPatchConfigOK()
}
type getConfig struct {
daemon *Daemon
}
func NewGetConfigHandler(d *Daemon) GetConfigHandler {
return &getConfig{daemon: d}
}
func (h *getConfig) Handle(params GetConfigParams) middleware.Responder {
log.WithField(logfields.Params, logfields.Repr(params)).Debug("GET /config request")
d := h.daemon
spec := &models.DaemonConfigurationSpec{
Options: *option.Config.Opts.GetMutableModel(),
PolicyEnforcement: policy.GetPolicyEnabled(),
}
status := &models.DaemonConfigurationStatus{
Addressing: node.GetNodeAddressing(),
K8sConfiguration: k8s.GetKubeconfigPath(),
K8sEndpoint: k8s.GetAPIServer(),
NodeMonitor: d.nodeMonitor.State(),
KvstoreConfiguration: &models.KVstoreConfiguration{
Type: option.Config.KVStore,
Options: option.Config.KVStoreOpt,
},
Realized: spec,
DeviceMTU: int64(d.mtuConfig.GetDeviceMTU()),
RouteMTU: int64(d.mtuConfig.GetRouteMTU()),
DatapathMode: models.DatapathMode(option.Config.DatapathMode),
IpvlanConfiguration: &models.IpvlanConfiguration{
MasterDeviceIndex: int64(option.Config.Ipvlan.MasterDeviceIndex),
OperationMode: option.Config.Ipvlan.OperationMode,
},
}
cfg := &models.DaemonConfiguration{
Spec: spec,
Status: status,
}
return NewGetConfigOK().WithPayload(cfg)
}
// listFilterIfs returns a map of interfaces based on the given filter.
// The filter should take a link and, if found, return the index of that
// interface, if not found return -1.
func listFilterIfs(filter func(netlink.Link) int) (map[int]netlink.Link, error) {
ifs, err := netlink.LinkList()
if err != nil {
return nil, err
}
vethLXCIdxs := map[int]netlink.Link{}
for _, intf := range ifs {
if idx := filter(intf); idx != -1 {
vethLXCIdxs[idx] = intf
}
}
return vethLXCIdxs, nil
}
// clearCiliumVeths checks all veths created by cilium and removes all that
// are considered a leftover from failed attempts to connect the container.
func (d *Daemon) clearCiliumVeths() error {
log.Info("Removing stale endpoint interfaces")
leftVeths, err := listFilterIfs(func(intf netlink.Link) int {
// Filter by veth and return the index of the interface.
if intf.Type() == "veth" {
return intf.Attrs().Index
}
return -1
})
if err != nil {
return fmt.Errorf("unable to retrieve host network interfaces: %s", err)
}
for _, v := range leftVeths {
peerIndex := v.Attrs().ParentIndex
parentVeth, found := leftVeths[peerIndex]
if found && peerIndex != 0 && strings.HasPrefix(parentVeth.Attrs().Name, "lxc") {
err := netlink.LinkDel(v)
if err != nil {
log.WithError(err).Warningf("Unable to delete stale veth device %s", v.Attrs().Name)
}
}
}
return nil
}
// numWorkerThreads returns the number of worker threads with a minimum of 4.
func numWorkerThreads() int {
ncpu := runtime.NumCPU()
minWorkerThreads := 2
if ncpu < minWorkerThreads {
return minWorkerThreads
}
return ncpu
}
// GetServiceList returns list of services
func (d *Daemon) GetServiceList() []*models.Service {
list := []*models.Service{}
d.loadBalancer.BPFMapMU.RLock()
defer d.loadBalancer.BPFMapMU.RUnlock()
for _, v := range d.loadBalancer.SVCMap {
list = append(list, v.GetModel())
}
return list
}
// SendNotification sends an agent notification to the monitor
func (d *Daemon) SendNotification(typ monitorAPI.AgentNotification, text string) error {
if option.Config.DryMode {
return nil
}
event := monitorAPI.AgentNotify{Type: typ, Text: text}
return d.nodeMonitor.SendEvent(monitorAPI.MessageTypeAgent, event)
}
// NewProxyLogRecord is invoked by the proxy accesslog on each new access log entry
func (d *Daemon) NewProxyLogRecord(l *logger.LogRecord) error {
return d.nodeMonitor.SendEvent(monitorAPI.MessageTypeAccessLog, l.LogRecord)
}
// GetNodeSuffix returns the suffix to be appended to kvstore keys of this
// agent
func (d *Daemon) GetNodeSuffix() string {
var ip net.IP
switch {
case option.Config.EnableIPv4:
ip = node.GetExternalIPv4()
case option.Config.EnableIPv6:
ip = node.GetIPv6()
}
if ip == nil {
log.Fatal("Node IP not available yet")
}
return ip.String()
}
cilium: ipsec, support kernel without ipv6 support
If kernel does not have ipv6 support forwarding file will not exist
so do not try to set the file if ipv6 is disabled.
Signed-off-by: John Fastabend <ee461b60978bea1c76b3018c0b89685175b248e2@gmail.com>
// Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"context"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/cilium/cilium/api/v1/models"
. "github.com/cilium/cilium/api/v1/server/restapi/daemon"
health "github.com/cilium/cilium/cilium-health/launch"
"github.com/cilium/cilium/common"
monitorLaunch "github.com/cilium/cilium/monitor/launch"
"github.com/cilium/cilium/pkg/api"
"github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/cidr"
"github.com/cilium/cilium/pkg/clustermesh"
"github.com/cilium/cilium/pkg/command/exec"
"github.com/cilium/cilium/pkg/completion"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/counter"
"github.com/cilium/cilium/pkg/datapath"
"github.com/cilium/cilium/pkg/datapath/alignchecker"
bpfIPCache "github.com/cilium/cilium/pkg/datapath/ipcache"
"github.com/cilium/cilium/pkg/datapath/iptables"
"github.com/cilium/cilium/pkg/datapath/linux/ipsec"
"github.com/cilium/cilium/pkg/datapath/loader"
"github.com/cilium/cilium/pkg/datapath/prefilter"
"github.com/cilium/cilium/pkg/debug"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/endpoint"
"github.com/cilium/cilium/pkg/endpoint/connector"
"github.com/cilium/cilium/pkg/endpointmanager"
"github.com/cilium/cilium/pkg/envoy"
"github.com/cilium/cilium/pkg/fqdn"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/ipam"
"github.com/cilium/cilium/pkg/ipcache"
"github.com/cilium/cilium/pkg/k8s"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/maps/ctmap"
"github.com/cilium/cilium/pkg/maps/eppolicymap"
ipcachemap "github.com/cilium/cilium/pkg/maps/ipcache"
"github.com/cilium/cilium/pkg/maps/lbmap"
"github.com/cilium/cilium/pkg/maps/lxcmap"
"github.com/cilium/cilium/pkg/maps/metricsmap"
"github.com/cilium/cilium/pkg/maps/sockmap"
"github.com/cilium/cilium/pkg/maps/tunnel"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/mtu"
"github.com/cilium/cilium/pkg/node"
nodemanager "github.com/cilium/cilium/pkg/node/manager"
nodeStore "github.com/cilium/cilium/pkg/node/store"
"github.com/cilium/cilium/pkg/nodediscovery"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy"
policyApi "github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/proxy"
"github.com/cilium/cilium/pkg/proxy/logger"
"github.com/cilium/cilium/pkg/revert"
"github.com/cilium/cilium/pkg/sockops"
"github.com/cilium/cilium/pkg/status"
"github.com/cilium/cilium/pkg/trigger"
"github.com/cilium/cilium/pkg/workloads"
"github.com/go-openapi/runtime/middleware"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sync/semaphore"
)
const (
// AutoCIDR indicates that a CIDR should be allocated
AutoCIDR = "auto"
)
const (
initArgLib int = iota
initArgRundir
initArgIPv4NodeIP
initArgIPv6NodeIP
initArgMode
initArgDevice
initArgDevicePreFilter
initArgModePreFilter
initArgMTU
initArgIPSec
initArgMax
)
// Daemon is the cilium daemon that is in charge of perform all necessary plumbing,
// monitoring when a LXC starts.
type Daemon struct {
buildEndpointSem *semaphore.Weighted
l7Proxy *proxy.Proxy
loadBalancer *loadbalancer.LoadBalancer
policy *policy.Repository
preFilter *prefilter.PreFilter
// Only used for CRI-O since it does not support events.
workloadsEventsCh chan<- *workloads.EventMessage
statusCollectMutex lock.RWMutex
statusResponse models.StatusResponse
statusCollector *status.Collector
uniqueIDMU lock.Mutex
uniqueID map[uint64]context.CancelFunc
nodeMonitor *monitorLaunch.NodeMonitor
ciliumHealth *health.CiliumHealth
// dnsRuleGen manages toFQDNs rules
dnsRuleGen *fqdn.RuleGen
// dnsPoller polls DNS names and sends them to dnsRuleGen
dnsPoller *fqdn.DNSPoller
// k8sAPIs is a set of k8s API in use. They are setup in EnableK8sWatcher,
// and may be disabled while the agent runs.
// This is on this object, instead of a global, because EnableK8sWatcher is
// on Daemon.
k8sAPIGroups k8sAPIGroupsUsed
// Used to synchronize generation of daemon's BPF programs and endpoint BPF
// programs.
compilationMutex *lock.RWMutex
// prefixLengths tracks a mapping from CIDR prefix length to the count
// of rules that refer to that prefix length.
prefixLengths *counter.PrefixLengthCounter
clustermesh *clustermesh.ClusterMesh
// k8sResourceSyncWaitGroup is used to block the starting of the daemon,
// including regenerating restored endpoints (if specified) until all
// policies, services, ingresses, and endpoints stored in Kubernetes at the
// time of bootstrapping of the agent are consumed by Cilium.
// This prevents regeneration of endpoints, restoring of loadbalancer BPF
// maps, etc. being performed without crucial information in securing said
// components. See GH-5038 and GH-4457.
k8sResourceSyncWaitGroup sync.WaitGroup
// k8sResourceSyncedMu protects the k8sResourceSynced map.
k8sResourceSyncedMu lock.RWMutex
// k8sResourceSynced maps a resource name to a channel. Once the given
// resource name is synchronized with k8s, the channel for which that
// resource name maps to is closed.
k8sResourceSynced map[string]chan struct{}
// k8sSvcCache is a cache of all Kubernetes services and endpoints
k8sSvcCache k8s.ServiceCache
mtuConfig mtu.Configuration
policyTrigger *trigger.Trigger
// datapath is the underlying datapath implementation to use to
// implement all aspects of an agent
datapath datapath.Datapath
// nodeDiscovery defines the node discovery logic of the agent
nodeDiscovery *nodediscovery.NodeDiscovery
// ipam is the IP address manager of the agent
ipam *ipam.IPAM
}
// Datapath returns a reference to the datapath implementation.
func (d *Daemon) Datapath() datapath.Datapath {
return d.datapath
}
// UpdateProxyRedirect updates the redirect rules in the proxy for a particular
// endpoint using the provided L4 filter. Returns the allocated proxy port
func (d *Daemon) UpdateProxyRedirect(e *endpoint.Endpoint, l4 *policy.L4Filter, proxyWaitGroup *completion.WaitGroup) (uint16, error, revert.FinalizeFunc, revert.RevertFunc) {
if d.l7Proxy == nil {
return 0, fmt.Errorf("can't redirect, proxy disabled"), nil, nil
}
r, err, finalizeFunc, revertFunc := d.l7Proxy.CreateOrUpdateRedirect(l4, e.ProxyID(l4), e, proxyWaitGroup)
if err != nil {
return 0, err, nil, nil
}
return r.ProxyPort, nil, finalizeFunc, revertFunc
}
// RemoveProxyRedirect removes a previously installed proxy redirect for an
// endpoint
func (d *Daemon) RemoveProxyRedirect(e *endpoint.Endpoint, id string, proxyWaitGroup *completion.WaitGroup) (error, revert.FinalizeFunc, revert.RevertFunc) {
if d.l7Proxy == nil {
return nil, nil, nil
}
log.WithFields(logrus.Fields{
logfields.EndpointID: e.ID,
logfields.L4PolicyID: id,
}).Debug("Removing redirect to endpoint")
return d.l7Proxy.RemoveRedirect(id, proxyWaitGroup)
}
// UpdateNetworkPolicy adds or updates a network policy in the set
// published to L7 proxies.
func (d *Daemon) UpdateNetworkPolicy(e *endpoint.Endpoint, policy *policy.L4Policy,
labelsMap, deniedIngressIdentities, deniedEgressIdentities cache.IdentityCache, proxyWaitGroup *completion.WaitGroup) (error, revert.RevertFunc) {
if d.l7Proxy == nil {
return fmt.Errorf("can't update network policy, proxy disabled"), nil
}
err, revertFunc := d.l7Proxy.UpdateNetworkPolicy(e, policy, e.GetIngressPolicyEnabledLocked(), e.GetEgressPolicyEnabledLocked(),
labelsMap, deniedIngressIdentities, deniedEgressIdentities, proxyWaitGroup)
return err, revert.RevertFunc(revertFunc)
}
// RemoveNetworkPolicy removes a network policy from the set published to
// L7 proxies.
func (d *Daemon) RemoveNetworkPolicy(e *endpoint.Endpoint) {
if d.l7Proxy == nil {
return
}
d.l7Proxy.RemoveNetworkPolicy(e)
}
// QueueEndpointBuild waits for a "build permit" for the endpoint
// identified by 'epID'. This function blocks until the endpoint can
// start building. The returned function must then be called to
// release the "build permit" when the most resource intensive parts
// of the build are done. The returned function is idempotent, so it
// may be called more than once. Returns nil if the caller should NOT
// start building the endpoint. This may happen due to a build being
// queued for the endpoint already, or due to the wait for the build
// permit being canceled. The latter case happens when the endpoint is
// being deleted.
func (d *Daemon) QueueEndpointBuild(epID uint64) func() {
d.uniqueIDMU.Lock()
// Skip new build requests if the endpoint is already in the queue
// waiting. In this case the queued build will pick up any changes
// made so far, so there is no need to queue another build now.
if _, queued := d.uniqueID[epID]; queued {
d.uniqueIDMU.Unlock()
return nil
}
// Store a cancel function to the 'uniqueID' map so that we can
// cancel the wait when the endpoint is being deleted.
ctx, cancel := context.WithCancel(context.Background())
d.uniqueID[epID] = cancel
d.uniqueIDMU.Unlock()
// Acquire build permit. This may block.
err := d.buildEndpointSem.Acquire(ctx, 1)
// Not queueing any more, so remove the cancel func from 'uniqueID' map.
// The caller may still cancel the build by calling the cancel func\
// after we return it. After this point another build may be queued for
// this endpoint.
d.uniqueIDMU.Lock()
delete(d.uniqueID, epID)
d.uniqueIDMU.Unlock()
if err != nil {
return nil // Acquire failed
}
// Acquire succeeded, but the context was canceled after?
if ctx.Err() == context.Canceled {
d.buildEndpointSem.Release(1)
return nil
}
// At this point the build permit has been acquired. It must
// be released by the caller by calling the returned function
// when the heavy lifting of the build is done.
// Using sync.Once to make the returned function idempotent.
var once sync.Once
return func() {
once.Do(func() {
d.buildEndpointSem.Release(1)
})
}
}
// RemoveFromEndpointQueue removes the endpoint from the "build permit" queue,
// canceling the wait for the build permit if still waiting.
func (d *Daemon) RemoveFromEndpointQueue(epID uint64) {
d.uniqueIDMU.Lock()
if cancel, queued := d.uniqueID[epID]; queued && cancel != nil {
delete(d.uniqueID, epID)
cancel()
}
d.uniqueIDMU.Unlock()
}
// GetPolicyRepository returns the policy repository of the daemon
func (d *Daemon) GetPolicyRepository() *policy.Repository {
return d.policy
}
// DebugEnabled returns if debug mode is enabled.
func (d *Daemon) DebugEnabled() bool {
return option.Config.Opts.IsEnabled(option.Debug)
}
func (d *Daemon) writeNetdevHeader(dir string) error {
headerPath := filepath.Join(dir, common.NetdevHeaderFileName)
log.WithField(logfields.Path, headerPath).Debug("writing configuration")
f, err := os.Create(headerPath)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
}
defer f.Close()
if err := d.datapath.WriteNetdevConfig(f, d); err != nil {
return err
}
return nil
}
// GetCIDRPrefixLengths returns the sorted list of unique prefix lengths used
// by CIDR policies.
func (d *Daemon) GetCIDRPrefixLengths() (s6, s4 []int) {
return d.prefixLengths.ToBPFData()
}
// Must be called with option.Config.EnablePolicyMU locked.
func (d *Daemon) writePreFilterHeader(dir string) error {
headerPath := filepath.Join(dir, common.PreFilterHeaderFileName)
log.WithField(logfields.Path, headerPath).Debug("writing configuration")
f, err := os.Create(headerPath)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
}
defer f.Close()
fw := bufio.NewWriter(f)
fmt.Fprint(fw, "/*\n")
fmt.Fprintf(fw, " * XDP device: %s\n", option.Config.DevicePreFilter)
fmt.Fprintf(fw, " * XDP mode: %s\n", option.Config.ModePreFilter)
fmt.Fprint(fw, " */\n\n")
d.preFilter.WriteConfig(fw)
return fw.Flush()
}
// GetOptions returns the datapath configuration options of the daemon.
func (d *Daemon) GetOptions() *option.IntOptions {
return option.Config.Opts
}
func (d *Daemon) setHostAddresses() error {
l, err := netlink.LinkByName(option.Config.LBInterface)
if err != nil {
return fmt.Errorf("unable to get network device %s: %s", option.Config.Device, err)
}
getAddr := func(netLinkFamily int) (net.IP, error) {
addrs, err := netlink.AddrList(l, netLinkFamily)
if err != nil {
return nil, fmt.Errorf("error while getting %s's addresses: %s", option.Config.Device, err)
}
for _, possibleAddr := range addrs {
if netlink.Scope(possibleAddr.Scope) == netlink.SCOPE_UNIVERSE {
return possibleAddr.IP, nil
}
}
return nil, nil
}
if option.Config.EnableIPv4 {
hostV4Addr, err := getAddr(netlink.FAMILY_V4)
if err != nil {
return err
}
if hostV4Addr != nil {
option.Config.HostV4Addr = hostV4Addr
log.Infof("Using IPv4 host address: %s", option.Config.HostV4Addr)
}
}
if option.Config.EnableIPv6 {
hostV6Addr, err := getAddr(netlink.FAMILY_V6)
if err != nil {
return err
}
if hostV6Addr != nil {
option.Config.HostV6Addr = hostV6Addr
log.Infof("Using IPv6 host address: %s", option.Config.HostV6Addr)
}
}
return nil
}
// GetCompilationLock returns the mutex responsible for synchronizing compilation
// of BPF programs.
func (d *Daemon) GetCompilationLock() *lock.RWMutex {
return d.compilationMutex
}
func (d *Daemon) compileBase() error {
var args []string
var mode string
var ret error
args = make([]string, initArgMax)
// Lock so that endpoints cannot be built while we are compile base programs.
d.compilationMutex.Lock()
defer d.compilationMutex.Unlock()
if err := d.writeNetdevHeader("./"); err != nil {
log.WithError(err).Warn("Unable to write netdev header")
return err
}
loader.Init(d.datapath, &d.nodeDiscovery.LocalConfig)
scopedLog := log.WithField(logfields.XDPDevice, option.Config.DevicePreFilter)
if option.Config.DevicePreFilter != "undefined" {
if err := prefilter.ProbePreFilter(option.Config.DevicePreFilter, option.Config.ModePreFilter); err != nil {
scopedLog.WithError(err).Warn("Turning off prefilter")
option.Config.DevicePreFilter = "undefined"
}
}
if option.Config.DevicePreFilter != "undefined" {
if d.preFilter, ret = prefilter.NewPreFilter(); ret != nil {
scopedLog.WithError(ret).Warn("Unable to init prefilter")
return ret
}
if err := d.writePreFilterHeader("./"); err != nil {
scopedLog.WithError(err).Warn("Unable to write prefilter header")
return err
}
args[initArgDevicePreFilter] = option.Config.DevicePreFilter
args[initArgModePreFilter] = option.Config.ModePreFilter
}
args[initArgLib] = option.Config.BpfDir
args[initArgRundir] = option.Config.StateDir
if option.Config.EnableIPv4 {
args[initArgIPv4NodeIP] = node.GetInternalIPv4().String()
} else {
args[initArgIPv4NodeIP] = "<nil>"
}
if option.Config.EnableIPv6 {
args[initArgIPv6NodeIP] = node.GetIPv6().String()
} else {
args[initArgIPv6NodeIP] = "<nil>"
}
args[initArgMTU] = fmt.Sprintf("%d", d.mtuConfig.GetDeviceMTU())
if option.Config.EnableIPSec {
args[initArgIPSec] = "true"
} else {
args[initArgIPSec] = "false"
}
if option.Config.Device != "undefined" {
_, err := netlink.LinkByName(option.Config.Device)
if err != nil {
log.WithError(err).WithField("device", option.Config.Device).Warn("Link does not exist")
return err
}
if option.Config.IsLBEnabled() {
if option.Config.Device != option.Config.LBInterface {
//FIXME: allow different interfaces
return fmt.Errorf("Unable to have an interface for LB mode different than snooping interface")
}
if err := d.setHostAddresses(); err != nil {
return err
}
mode = "lb"
} else {
if option.Config.DatapathMode == option.DatapathModeIpvlan {
mode = "ipvlan"
} else {
mode = "direct"
}
}
args[initArgMode] = mode
args[initArgDevice] = option.Config.Device
args = append(args, option.Config.Device)
} else {
if option.Config.IsLBEnabled() && strings.ToLower(option.Config.Tunnel) != "disabled" {
//FIXME: allow LBMode in tunnel
return fmt.Errorf("Unable to run LB mode with tunnel mode")
}
args[initArgMode] = option.Config.Tunnel
if option.Config.IsFlannelMasterDeviceSet() {
args[initArgMode] = "flannel"
args[initArgDevice] = option.Config.FlannelMasterDevice
}
}
prog := filepath.Join(option.Config.BpfDir, "init.sh")
ctx, cancel := context.WithTimeout(context.Background(), defaults.ExecTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, prog, args...)
cmd.Env = bpf.Environment()
if _, err := cmd.CombinedOutput(log, true); err != nil {
return err
}
if canDisableDwarfRelocations {
// Validate alignments of C and Go equivalent structs
if err := alignchecker.CheckStructAlignments(defaults.AlignCheckerName); err != nil {
log.WithError(err).Fatal("C and Go structs alignment check failed")
}
} else {
log.Warning("Cannot check matching of C and Go common struct alignments due to old LLVM/clang version")
}
if !option.Config.IsFlannelMasterDeviceSet() {
d.ipam.ReserveLocalRoutes()
}
if err := d.datapath.Node().NodeConfigurationChanged(d.nodeDiscovery.LocalConfig); err != nil {
return err
}
if option.Config.EnableIPv4 {
// Always remove masquerade rule and then re-add it if required
iptables.RemoveRules()
if option.Config.InstallIptRules {
if err := iptables.InstallRules(option.Config.HostDevice); err != nil {
return err
}
}
}
log.Info("Setting sysctl net.core.bpf_jit_enable=1")
log.Info("Setting sysctl net.ipv4.conf.all.rp_filter=0")
log.Info("Setting sysctl net.ipv6.conf.all.disable_ipv6=0")
return nil
}
// initMaps opens all BPF maps (and creates them if they do not exist). This
// must be done *before* any operations which read BPF maps, especially
// restoring endpoints and services.
func (d *Daemon) initMaps() error {
if option.Config.DryMode {
return nil
}
if _, err := lxcmap.LXCMap.OpenOrCreate(); err != nil {
return err
}
// The ipcache is shared between endpoints. Parallel mode needs to be
// used to allow existing endpoints that have not been regenerated yet
// to continue using the existing ipcache until the endpoint is
// regenerated for the first time. Existing endpoints are using a
// policy map which is potentially out of sync as local identities are
// re-allocated on startup. Parallel mode allows to continue using the
// old version until regeneration. Note that the old version is not
// updated with new identities. This is fine as any new identity
// appearing would require a regeneration of the endpoint anyway in
// order for the endpoint to gain the privilege of communication.
if _, err := ipcachemap.IPCache.OpenParallel(); err != nil {
return err
}
if _, err := metricsmap.Metrics.OpenOrCreate(); err != nil {
return err
}
if _, err := tunnel.TunnelMap.OpenOrCreate(); err != nil {
return err
}
if err := openServiceMaps(); err != nil {
log.WithError(err).Fatal("Unable to open service maps")
}
// Set up the list of IPCache listeners in the daemon, to be
// used by syncLXCMap().
ipcache.IPIdentityCache.SetListeners([]ipcache.IPIdentityMappingListener{
&envoy.NetworkPolicyHostsCache,
bpfIPCache.NewListener(d),
})
// Insert local host entries to bpf maps
if err := d.syncLXCMap(); err != nil {
return err
}
// Start the controller for periodic sync
// The purpose of the controller is to ensure that the host entries are
// reinserted to the bpf maps if they are ever removed from them.
// TODO: Determine if we can get rid of this when we have more rigorous
// desired/realized state implementation for the bpf maps.
controller.NewManager().UpdateController("lxcmap-bpf-host-sync",
controller.ControllerParams{
DoFunc: func(ctx context.Context) error {
return d.syncLXCMap()
},
RunInterval: 5 * time.Second,
})
// Start the controller for periodic sync of the metrics map with
// the prometheus server.
controller.NewManager().UpdateController("metricsmap-bpf-prom-sync",
controller.ControllerParams{
DoFunc: metricsmap.SyncMetricsMap,
RunInterval: 5 * time.Second,
})
// Clean all lb entries
if !option.Config.RestoreState {
log.Debug("cleaning up all BPF LB maps")
d.loadBalancer.BPFMapMU.Lock()
defer d.loadBalancer.BPFMapMU.Unlock()
if option.Config.EnableIPv6 {
if err := lbmap.Service6Map.DeleteAll(); err != nil {
return err
}
if err := lbmap.RRSeq6Map.DeleteAll(); err != nil {
return err
}
}
if err := d.RevNATDeleteAll(); err != nil {
return err
}
if option.Config.EnableIPv4 {
if err := lbmap.Service4Map.DeleteAll(); err != nil {
return err
}
if err := lbmap.RRSeq4Map.DeleteAll(); err != nil {
return err
}
}
// If we are not restoring state, all endpoints can be
// deleted. Entries will be re-populated.
lxcmap.LXCMap.DeleteAll()
}
return nil
}
func (d *Daemon) init() error {
globalsDir := option.Config.GetGlobalsDir()
if err := os.MkdirAll(globalsDir, defaults.RuntimePathRights); err != nil {
log.WithError(err).WithField(logfields.Path, globalsDir).Fatal("Could not create runtime directory")
}
if err := os.Chdir(option.Config.StateDir); err != nil {
log.WithError(err).WithField(logfields.Path, option.Config.StateDir).Fatal("Could not change to runtime directory")
}
// Remove any old sockops and re-enable with _new_ programs if flag is set
sockops.SockmapDisable()
sockops.SkmsgDisable()
if !option.Config.DryMode {
if err := d.createNodeConfigHeaderfile(); err != nil {
return err
}
if option.Config.SockopsEnable {
eppolicymap.CreateEPPolicyMap()
sockops.SockmapEnable()
sockops.SkmsgEnable()
sockmap.SockmapCreate()
}
if err := d.compileBase(); err != nil {
return err
}
}
return nil
}
func (d *Daemon) createNodeConfigHeaderfile() error {
nodeConfigPath := option.Config.GetNodeConfigPath()
f, err := os.Create(nodeConfigPath)
if err != nil {
log.WithError(err).WithField(logfields.Path, nodeConfigPath).Fatal("Failed to create node configuration file")
return err
}
defer f.Close()
if err = d.datapath.WriteNodeConfig(f, &d.nodeDiscovery.LocalConfig); err != nil {
log.WithError(err).WithField(logfields.Path, nodeConfigPath).Fatal("Failed to write node configuration file")
return err
}
return nil
}
// syncLXCMap adds local host enties to bpf lxcmap, as well as
// ipcache, if needed, and also notifies the daemon and network policy
// hosts cache if changes were made.
func (d *Daemon) syncLXCMap() error {
// TODO: Update addresses first, in case node addressing has changed.
// TODO: Once these start changing on runtime, figure out the locking strategy.
specialIdentities := []identity.IPIdentityPair{}
if option.Config.EnableIPv4 {
ip := node.GetInternalIPv4()
if len(ip) > 0 {
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: ip,
ID: identity.ReservedIdentityHost,
})
}
ip = node.GetExternalIPv4()
if len(ip) > 0 {
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: ip,
ID: identity.ReservedIdentityHost,
})
}
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, net.IPv4len*8),
ID: identity.ReservedIdentityWorld,
})
}
if option.Config.EnableIPv6 {
ip := node.GetIPv6()
if len(ip) > 0 {
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: ip,
ID: identity.ReservedIdentityHost,
})
}
ip = node.GetIPv6Router()
if len(ip) > 0 {
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: ip,
ID: identity.ReservedIdentityHost,
})
}
specialIdentities = append(specialIdentities,
identity.IPIdentityPair{
IP: net.IPv6zero,
Mask: net.CIDRMask(0, net.IPv6len*8),
ID: identity.ReservedIdentityWorld,
})
}
existingEndpoints, err := lxcmap.DumpToMap()
if err != nil {
return err
}
for _, ipIDPair := range specialIdentities {
isHost := ipIDPair.ID == identity.ReservedIdentityHost
if isHost {
added, err := lxcmap.SyncHostEntry(ipIDPair.IP)
if err != nil {
return fmt.Errorf("Unable to add host entry to endpoint map: %s", err)
}
if added {
log.WithField(logfields.IPAddr, ipIDPair.IP).Debugf("Added local ip to endpoint map")
}
}
delete(existingEndpoints, ipIDPair.IP.String())
// Upsert will not propagate (reserved:foo->ID) mappings across the cluster,
// and we specifically don't want to do so.
ipcache.IPIdentityCache.Upsert(ipIDPair.PrefixString(), nil, ipcache.Identity{
ID: ipIDPair.ID,
Source: ipcache.FromAgentLocal,
})
}
for hostIP, info := range existingEndpoints {
if ip := net.ParseIP(hostIP); info.IsHost() && ip != nil {
if err := lxcmap.DeleteEntry(ip); err != nil {
log.WithError(err).WithFields(logrus.Fields{
logfields.IPAddr: hostIP,
}).Warn("Unable to delete obsolete host IP from BPF map")
} else {
log.Debugf("Removed outdated host ip %s from endpoint map", hostIP)
}
}
}
return nil
}
func createIPNet(ones, bits int) *net.IPNet {
return &net.IPNet{
Mask: net.CIDRMask(ones, bits),
}
}
// createPrefixLengthCounter wraps around the counter library, providing
// references to prefix lengths that will always be present.
func createPrefixLengthCounter() *counter.PrefixLengthCounter {
prefixLengths4 := ipcachemap.IPCache.GetMaxPrefixLengths(false)
prefixLengths6 := ipcachemap.IPCache.GetMaxPrefixLengths(true)
counter := counter.NewPrefixLengthCounter(prefixLengths6, prefixLengths4)
// This is a bit ugly, but there's not a great way to define an IPNet
// without parsing strings, etc.
defaultPrefixes := []*net.IPNet{
// IPv4
createIPNet(0, net.IPv4len*8), // world
createIPNet(net.IPv4len*8, net.IPv4len*8), // hosts
// IPv6
createIPNet(0, net.IPv6len*8), // world
createIPNet(net.IPv6len*8, net.IPv6len*8), // hosts
}
_, err := counter.Add(defaultPrefixes)
if err != nil {
log.WithError(err).Fatal("Failed to create default prefix lengths")
}
return counter
}
func deleteHostDevice() {
link, err := netlink.LinkByName(option.Config.HostDevice)
if err != nil {
log.WithError(err).Warningf("Unable to lookup host device %s. No old cilium_host interface exists", option.Config.HostDevice)
return
}
if err := netlink.LinkDel(link); err != nil {
log.WithError(err).Errorf("Unable to delete host device %s to change allocation CIDR", option.Config.HostDevice)
}
}
func (d *Daemon) prepareAllocationCIDR(family datapath.NodeAddressingFamily) (routerIP net.IP, err error) {
// Reserve the IPv4 external node IP within the allocation range if
// required.
allocRange := family.AllocationCIDR()
nodeIP := family.PrimaryExternal()
if allocRange.Contains(nodeIP) {
err = d.ipam.AllocateIP(nodeIP)
if err != nil {
err = fmt.Errorf("Unable to allocate external IPv4 node IP %s from allocation range %s: %s",
nodeIP, allocRange, err)
return
}
}
routerIP = family.Router()
if routerIP != nil && !allocRange.Contains(routerIP) {
log.Warningf("Detected allocation CIDR change to %s, previous router IP %s", allocRange, routerIP)
// The restored router IP is not part of the allocation range.
// This indicates that the allocation range has changed.
if !option.Config.IsFlannelMasterDeviceSet() {
deleteHostDevice()
}
// force re-allocation of the router IP
routerIP = nil
}
if routerIP == nil {
routerIP = ip.GetNextIP(family.AllocationCIDR().IP)
}
err = d.ipam.AllocateIP(routerIP)
if err != nil {
err = fmt.Errorf("Unable to allocate IPv4 router IP %s from allocation range %s: %s",
routerIP, allocRange, err)
return
}
return
}
// NewDaemon creates and returns a new Daemon with the parameters set in c.
func NewDaemon(dp datapath.Datapath) (*Daemon, *endpointRestoreState, error) {
bootstrapStats.daemonInit.Start()
// Validate the daemon-specific global options.
if err := option.Config.Validate(); err != nil {
return nil, nil, fmt.Errorf("invalid daemon configuration: %s", err)
}
ctmap.InitMapInfo(option.Config.CTMapEntriesGlobalTCP, option.Config.CTMapEntriesGlobalAny)
mtuConfig := mtu.NewConfiguration(option.Config.Tunnel != option.TunnelDisabled, option.Config.MTU)
if option.Config.EnableIPSec {
if err := ipsec.LoadIPSecKeysFile(option.Config.IPSecKeyFile); err != nil {
return nil, nil, err
}
if option.Config.EnableIPv6 {
if err := ipsec.EnableIPv6Forwarding(); err != nil {
return nil, nil, err
}
}
}
nodeMngr, err := nodemanager.NewManager("all", dp.Node())
if err != nil {
return nil, nil, err
}
d := Daemon{
loadBalancer: loadbalancer.NewLoadBalancer(),
k8sSvcCache: k8s.NewServiceCache(),
policy: policy.NewPolicyRepository(),
uniqueID: map[uint64]context.CancelFunc{},
nodeMonitor: monitorLaunch.NewNodeMonitor(option.Config.MonitorQueueSize),
prefixLengths: createPrefixLengthCounter(),
k8sResourceSynced: map[string]chan struct{}{},
buildEndpointSem: semaphore.NewWeighted(int64(numWorkerThreads())),
compilationMutex: new(lock.RWMutex),
mtuConfig: mtuConfig,
datapath: dp,
nodeDiscovery: nodediscovery.NewNodeDiscovery(nodeMngr, mtuConfig),
}
bootstrapStats.daemonInit.End(true)
// Open or create BPF maps.
bootstrapStats.mapsInit.Start()
err = d.initMaps()
bootstrapStats.mapsInit.EndError(err)
if err != nil {
log.WithError(err).Error("Error while opening/creating BPF maps")
return nil, nil, err
}
// Read the service IDs of existing services from the BPF map and
// reserve them. This must be done *before* connecting to the
// Kubernetes apiserver and serving the API to ensure service IDs are
// not changing across restarts or that a new service could accidentally
// use an existing service ID.
if option.Config.RestoreState && !option.Config.DryMode {
bootstrapStats.restore.Start()
restoreServiceIDs()
bootstrapStats.restore.End(true)
}
t, err := trigger.NewTrigger(trigger.Parameters{
Name: "policy_update",
PrometheusMetrics: true,
MinInterval: time.Second,
TriggerFunc: d.policyUpdateTrigger,
})
if err != nil {
return nil, nil, err
}
d.policyTrigger = t
debug.RegisterStatusObject("k8s-service-cache", &d.k8sSvcCache)
bootstrapStats.k8sInit.Start()
k8s.Configure(option.Config.K8sAPIServer, option.Config.K8sKubeConfigPath)
bootstrapStats.k8sInit.End(true)
d.runK8sServiceHandler()
policyApi.InitEntities(option.Config.ClusterName)
bootstrapStats.workloadsInit.Start()
workloads.Init(&d)
bootstrapStats.workloadsInit.End(true)
bootstrapStats.cleanup.Start()
err = d.clearCiliumVeths()
bootstrapStats.cleanup.EndError(err)
if err != nil {
log.WithError(err).Warning("Unable to clean stale endpoint interfaces")
}
if k8s.IsEnabled() {
bootstrapStats.k8sInit.Start()
if err := k8s.Init(); err != nil {
log.WithError(err).Fatal("Unable to initialize Kubernetes subsystem")
}
// Kubernetes demands that the localhost can always reach local
// pods. Therefore unless the AllowLocalhost policy is set to a
// specific mode, always allow localhost to reach local
// endpoints.
if option.Config.AllowLocalhost == option.AllowLocalhostAuto {
option.Config.AllowLocalhost = option.AllowLocalhostAlways
log.Info("k8s mode: Allowing localhost to reach local endpoints")
}
// In Cilium 1.0, due to limitations on the data path, traffic
// from the outside world on ingress was treated as though it
// was from the host for policy purposes. In order to not break
// existing policies, this option retains the behavior.
if option.Config.K8sLegacyHostAllowsWorld == "true" {
log.Warn("k8s mode: Configuring ingress policy for host to also allow from world. This option will be removed in Cilium 1.5. For more information, see https://cilium.link/host-vs-world")
option.Config.HostAllowsWorld = true
} else {
option.Config.HostAllowsWorld = false
}
bootstrapStats.k8sInit.End(true)
}
// If the device has been specified, the IPv4AllocPrefix and the
// IPv6AllocPrefix were already allocated before the k8s.Init().
//
// If the device hasn't been specified, k8s.Init() allocated the
// IPv4AllocPrefix and the IPv6AllocPrefix from k8s node annotations.
//
// If k8s.Init() failed to retrieve the IPv4AllocPrefix we can try to derive
// it from an existing node_config.h file or from previous cilium_host
// interfaces.
//
// Then, we will calculate the IPv4 or IPv6 alloc prefix based on the IPv6
// or IPv4 alloc prefix, respectively, retrieved by k8s node annotations.
bootstrapStats.ipam.Start()
log.Info("Initializing node addressing")
node.SetIPv4ClusterCidrMaskSize(option.Config.IPv4ClusterCIDRMaskSize)
if option.Config.IPv4Range != AutoCIDR {
allocCIDR, err := cidr.ParseCIDR(option.Config.IPv4Range)
if err != nil {
log.WithError(err).WithField(logfields.V4Prefix, option.Config.IPv4Range).Fatal("Invalid IPv4 allocation prefix")
}
node.SetIPv4AllocRange(allocCIDR)
}
if option.Config.IPv6Range != AutoCIDR {
_, net, err := net.ParseCIDR(option.Config.IPv6Range)
if err != nil {
log.WithError(err).WithField(logfields.V6Prefix, option.Config.IPv6Range).Fatal("Invalid IPv6 allocation prefix")
}
if err := node.SetIPv6NodeRange(net); err != nil {
log.WithError(err).WithField(logfields.V6Prefix, net).Fatal("Invalid per node IPv6 allocation prefix")
}
}
if err := node.AutoComplete(); err != nil {
log.WithError(err).Fatal("Cannot autocomplete node addresses")
}
// Set up ipam conf after init() because we might be running d.conf.KVStoreIPv4Registration
log.WithFields(logrus.Fields{
logfields.V4Prefix: dp.LocalNodeAddressing().IPv4().AllocationCIDR(),
logfields.V6Prefix: dp.LocalNodeAddressing().IPv6().AllocationCIDR(),
}).Info("Initializing IPAM")
d.ipam = ipam.NewIPAM(dp.LocalNodeAddressing(), ipam.Configuration{
EnableIPv4: option.Config.EnableIPv4,
EnableIPv6: option.Config.EnableIPv6,
})
bootstrapStats.ipam.End(true)
if option.Config.WorkloadsEnabled() {
bootstrapStats.workloadsInit.Start()
// workaround for to use the values of the deprecated dockerEndpoint
// variable if it is set with a different value than defaults.
defaultDockerEndpoint := workloads.GetRuntimeDefaultOpt(workloads.Docker, "endpoint")
if defaultDockerEndpoint != option.Config.DockerEndpoint {
option.Config.ContainerRuntimeEndpoint[string(workloads.Docker)] = option.Config.DockerEndpoint
log.Warn(`"docker" flag is deprecated.` +
`Please use "--container-runtime-endpoint=docker=` + defaultDockerEndpoint + `" instead`)
}
opts := make(map[workloads.WorkloadRuntimeType]map[string]string)
for rt, ep := range option.Config.ContainerRuntimeEndpoint {
opts[workloads.WorkloadRuntimeType(rt)] = make(map[string]string)
opts[workloads.WorkloadRuntimeType(rt)][workloads.EpOpt] = ep
}
if opts[workloads.Docker] == nil {
opts[workloads.Docker] = make(map[string]string)
}
opts[workloads.Docker][workloads.DatapathModeOpt] = option.Config.DatapathMode
// Workloads must be initialized after IPAM has started as it requires
// to allocate IPs.
if err := workloads.Setup(d.ipam, option.Config.Workloads, opts); err != nil {
return nil, nil, fmt.Errorf("unable to setup workload: %s", err)
}
log.Infof("Container runtime options set: %s", workloads.GetRuntimeOptions())
bootstrapStats.workloadsInit.End(true)
}
bootstrapStats.restore.Start()
// restore endpoints before any IPs are allocated to avoid eventual IP
// conflicts later on, otherwise any IP conflict will result in the
// endpoint not being able to be restored.
restoredEndpoints, err := d.restoreOldEndpoints(option.Config.StateDir, true)
if err != nil {
log.WithError(err).Error("Unable to restore existing endpoints")
}
bootstrapStats.restore.End(true)
bootstrapStats.ipam.Start()
if option.Config.EnableIPv4 {
routerIP, err := d.prepareAllocationCIDR(dp.LocalNodeAddressing().IPv4())
if err != nil {
return nil, nil, err
}
if routerIP != nil {
node.SetInternalIPv4(routerIP)
}
}
if option.Config.EnableIPv6 {
routerIP, err := d.prepareAllocationCIDR(dp.LocalNodeAddressing().IPv6())
if err != nil {
return nil, nil, err
}
if routerIP != nil {
node.SetIPv6Router(routerIP)
}
}
log.Info("Addressing information:")
log.Infof(" Cluster-Name: %s", option.Config.ClusterName)
log.Infof(" Cluster-ID: %d", option.Config.ClusterID)
log.Infof(" Local node-name: %s", node.GetName())
log.Infof(" Node-IPv6: %s", node.GetIPv6())
if option.Config.EnableIPv6 {
log.Infof(" IPv6 node prefix: %s", node.GetIPv6NodeRange())
log.Infof(" IPv6 allocation prefix: %s", node.GetIPv6AllocRange())
log.Infof(" IPv6 router address: %s", node.GetIPv6Router())
}
log.Infof(" External-Node IPv4: %s", node.GetExternalIPv4())
log.Infof(" Internal-Node IPv4: %s", node.GetInternalIPv4())
if option.Config.EnableIPv4 {
log.Infof(" Cluster IPv4 prefix: %s", node.GetIPv4ClusterRange())
log.Infof(" IPv4 allocation prefix: %s", node.GetIPv4AllocRange())
// Allocate IPv4 service loopback IP
loopbackIPv4, err := d.ipam.AllocateNextFamily(ipam.IPv4)
if err != nil {
return nil, restoredEndpoints, fmt.Errorf("Unable to reserve IPv4 loopback address: %s", err)
}
node.SetIPv4Loopback(loopbackIPv4)
log.Infof(" Loopback IPv4: %s", node.GetIPv4Loopback().String())
}
bootstrapStats.ipam.End(true)
bootstrapStats.healthCheck.Start()
if option.Config.EnableHealthChecking {
if option.Config.EnableIPv4 {
health4, err := d.ipam.AllocateNextFamily(ipam.IPv4)
if err != nil {
return nil, restoredEndpoints, fmt.Errorf("unable to allocate health IPs: %s,see https://cilium.link/ipam-range-full", err)
}
d.nodeDiscovery.LocalNode.IPv4HealthIP = health4
log.Debugf("IPv4 health endpoint address: %s", health4)
}
if option.Config.EnableIPv6 {
health6, err := d.ipam.AllocateNextFamily(ipam.IPv6)
if err != nil {
if d.nodeDiscovery.LocalNode.IPv4HealthIP != nil {
d.ipam.ReleaseIP(d.nodeDiscovery.LocalNode.IPv4HealthIP)
}
return nil, restoredEndpoints, fmt.Errorf("unable to allocate health IPs: %s,see https://cilium.link/ipam-range-full", err)
}
d.nodeDiscovery.LocalNode.IPv6HealthIP = health6
log.Debugf("IPv6 health endpoint address: %s", health6)
}
}
bootstrapStats.healthCheck.End(true)
// Annotation of the k8s node must happen after discovery of the
// PodCIDR range and allocation of the health IPs.
if k8s.IsEnabled() {
bootstrapStats.k8sInit.Start()
log.WithFields(logrus.Fields{
logfields.V4Prefix: node.GetIPv4AllocRange(),
logfields.V6Prefix: node.GetIPv6NodeRange(),
logfields.V4HealthIP: d.nodeDiscovery.LocalNode.IPv4HealthIP,
logfields.V6HealthIP: d.nodeDiscovery.LocalNode.IPv6HealthIP,
logfields.V4CiliumHostIP: node.GetInternalIPv4(),
logfields.V6CiliumHostIP: node.GetIPv6Router(),
}).Info("Annotating k8s node")
err := k8s.Client().AnnotateNode(node.GetName(),
node.GetIPv4AllocRange(), node.GetIPv6NodeRange(),
d.nodeDiscovery.LocalNode.IPv4HealthIP, d.nodeDiscovery.LocalNode.IPv6HealthIP,
node.GetInternalIPv4(), node.GetIPv6Router())
if err != nil {
log.WithError(err).Warning("Cannot annotate k8s node with CIDR range")
}
bootstrapStats.k8sInit.End(true)
}
d.nodeDiscovery.StartDiscovery(node.GetName())
// This needs to be done after the node addressing has been configured
// as the node address is required as suffix.
go cache.InitIdentityAllocator(&d)
bootstrapStats.clusterMeshInit.Start()
if path := option.Config.ClusterMeshConfig; path != "" {
if option.Config.ClusterID == 0 {
log.Info("Cluster-ID is not specified, skipping ClusterMesh initialization")
} else {
log.WithField("path", path).Info("Initializing ClusterMesh routing")
clustermesh, err := clustermesh.NewClusterMesh(clustermesh.Configuration{
Name: "clustermesh",
ConfigDirectory: path,
NodeKeyCreator: nodeStore.KeyCreator,
ServiceMerger: &d.k8sSvcCache,
NodeManager: nodeMngr,
})
if err != nil {
log.WithError(err).Fatal("Unable to initialize ClusterMesh")
}
d.clustermesh = clustermesh
}
}
bootstrapStats.clusterMeshInit.End(true)
bootstrapStats.bpfBase.Start()
err = d.init()
bootstrapStats.bpfBase.EndError(err)
if err != nil {
log.WithError(err).Error("Error while initializing daemon")
return nil, restoredEndpoints, err
}
if err := loader.RestoreTemplates(option.Config.StateDir); err != nil {
log.WithError(err).Error("Unable to restore previous BPF templates")
}
// Start watcher for endpoint IP --> identity mappings in key-value store.
// this needs to be done *after* init() for the daemon in that function,
// we populate the IPCache with the host's IP(s).
ipcache.InitIPIdentityWatcher()
bootstrapStats.proxyStart.Start()
// FIXME: Make the port range configurable.
d.l7Proxy = proxy.StartProxySupport(10000, 20000, option.Config.RunDir,
option.Config.AccessLog, &d, option.Config.AgentLabels)
bootstrapStats.proxyStart.End(true)
bootstrapStats.fqdn.Start()
if err := fqdn.ConfigFromResolvConf(); err != nil {
bootstrapStats.fqdn.EndError(err)
return nil, nil, err
}
err = d.bootstrapFQDN(restoredEndpoints, option.Config.ToFQDNsPreCache)
if err != nil {
bootstrapStats.fqdn.EndError(err)
return nil, restoredEndpoints, err
}
bootstrapStats.fqdn.End(true)
return &d, restoredEndpoints, nil
}
// Close shuts down a daemon
func (d *Daemon) Close() {
if d.policyTrigger != nil {
d.policyTrigger.Shutdown()
}
d.nodeDiscovery.Close()
}
func (d *Daemon) attachExistingInfraContainers() {
m, err := workloads.Client().GetAllInfraContainersPID()
if err != nil {
log.WithError(err).Error("Unable to get all infra containers PIDs")
return
}
log.Debugf("Containers found %+v", m)
for containerID, pid := range m {
epModel, err := connector.DeriveEndpointFrom(option.Config.FlannelMasterDevice, containerID, pid)
if err != nil {
log.WithError(err).WithField(logfields.ContainerID, containerID).
Warning("Unable to derive endpoint from existing infra container")
continue
}
log.Debugf("Adding endpoint %+v", epModel)
ep, err := d.createEndpoint(context.Background(), epModel)
if err != nil {
log.WithError(err).WithField(logfields.ContainerID, containerID).
Warning("Unable to attach existing infra container")
continue
}
log.WithFields(logrus.Fields{
logfields.ContainerID: epModel.ContainerID,
logfields.EndpointID: ep.ID,
}).Info("Attached BPF program to existing container")
}
}
// TriggerReloadWithoutCompile causes all BPF programs and maps to be reloaded,
// without recompiling the datapath logic for each endpoint. It first attempts
// to recompile the base programs, and if this fails returns an error. If base
// program load is successful, it subsequently triggers regeneration of all
// endpoints and returns a waitgroup that may be used by the caller to wait for
// all endpoint regeneration to complete.
//
// If an error is returned, then no regeneration was successful. If no error
// is returned, then the base programs were successfully regenerated, but
// endpoints may or may not have successfully regenerated.
func (d *Daemon) TriggerReloadWithoutCompile(reason string) (*sync.WaitGroup, error) {
log.Debugf("BPF reload triggered from %s", reason)
if err := d.compileBase(); err != nil {
return nil, fmt.Errorf("Unable to recompile base programs from %s: %s", reason, err)
}
regenRequest := &endpoint.ExternalRegenerationMetadata{
Reason: reason,
RegenerationLevel: endpoint.RegenerateWithDatapathLoad,
}
return endpointmanager.RegenerateAllEndpoints(d, regenRequest), nil
}
func changedOption(key string, value option.OptionSetting, data interface{}) {
d := data.(*Daemon)
if key == option.Debug {
// Set the debug toggle (this can be a no-op)
logging.ToggleDebugLogs(d.DebugEnabled())
// Reflect log level change to proxies
proxy.ChangeLogLevel(logging.GetLevel(logging.DefaultLogger))
}
d.policy.BumpRevision() // force policy recalculation
}
type patchConfig struct {
daemon *Daemon
}
func NewPatchConfigHandler(d *Daemon) PatchConfigHandler {
return &patchConfig{daemon: d}
}
func (h *patchConfig) Handle(params PatchConfigParams) middleware.Responder {
log.WithField(logfields.Params, logfields.Repr(params)).Debug("PATCH /config request")
d := h.daemon
cfgSpec := params.Configuration
om, err := option.Config.Opts.Library.ValidateConfigurationMap(cfgSpec.Options)
if err != nil {
msg := fmt.Errorf("Invalid configuration option %s", err)
return api.Error(PatchConfigBadRequestCode, msg)
}
// Serialize configuration updates to the daemon.
option.Config.ConfigPatchMutex.Lock()
defer option.Config.ConfigPatchMutex.Unlock()
nmArgs := d.nodeMonitor.GetArgs()
if numPagesEntry, ok := cfgSpec.Options["MonitorNumPages"]; ok && nmArgs[0] != numPagesEntry {
if len(nmArgs) == 0 || nmArgs[0] != numPagesEntry {
args := []string{"--num-pages %s", numPagesEntry}
d.nodeMonitor.Restart(args)
}
if len(cfgSpec.Options) == 0 {
return NewPatchConfigOK()
}
delete(cfgSpec.Options, "MonitorNumPages")
}
// Track changes to daemon's configuration
var changes int
// Only update if value provided for PolicyEnforcement.
if enforcement := cfgSpec.PolicyEnforcement; enforcement != "" {
switch enforcement {
case option.NeverEnforce, option.DefaultEnforcement, option.AlwaysEnforce:
// Update policy enforcement configuration if needed.
oldEnforcementValue := policy.GetPolicyEnabled()
// If the policy enforcement configuration has indeed changed, we have
// to regenerate endpoints and update daemon's configuration.
if enforcement != oldEnforcementValue {
log.Debug("configuration request to change PolicyEnforcement for daemon")
changes++
policy.SetPolicyEnabled(enforcement)
}
default:
msg := fmt.Errorf("Invalid option for PolicyEnforcement %s", enforcement)
log.Warn(msg)
return api.Error(PatchConfigFailureCode, msg)
}
log.Debug("finished configuring PolicyEnforcement for daemon")
}
changes += option.Config.Opts.ApplyValidated(om, changedOption, d)
log.WithField("count", changes).Debug("Applied changes to daemon's configuration")
if changes > 0 {
// Only recompile if configuration has changed.
log.Debug("daemon configuration has changed; recompiling base programs")
if err := d.compileBase(); err != nil {
msg := fmt.Errorf("Unable to recompile base programs: %s", err)
return api.Error(PatchConfigFailureCode, msg)
}
d.TriggerPolicyUpdates(true, "agent configuration update")
}
return NewPatchConfigOK()
}
type getConfig struct {
daemon *Daemon
}
func NewGetConfigHandler(d *Daemon) GetConfigHandler {
return &getConfig{daemon: d}
}
func (h *getConfig) Handle(params GetConfigParams) middleware.Responder {
log.WithField(logfields.Params, logfields.Repr(params)).Debug("GET /config request")
d := h.daemon
spec := &models.DaemonConfigurationSpec{
Options: *option.Config.Opts.GetMutableModel(),
PolicyEnforcement: policy.GetPolicyEnabled(),
}
status := &models.DaemonConfigurationStatus{
Addressing: node.GetNodeAddressing(),
K8sConfiguration: k8s.GetKubeconfigPath(),
K8sEndpoint: k8s.GetAPIServer(),
NodeMonitor: d.nodeMonitor.State(),
KvstoreConfiguration: &models.KVstoreConfiguration{
Type: option.Config.KVStore,
Options: option.Config.KVStoreOpt,
},
Realized: spec,
DeviceMTU: int64(d.mtuConfig.GetDeviceMTU()),
RouteMTU: int64(d.mtuConfig.GetRouteMTU()),
DatapathMode: models.DatapathMode(option.Config.DatapathMode),
IpvlanConfiguration: &models.IpvlanConfiguration{
MasterDeviceIndex: int64(option.Config.Ipvlan.MasterDeviceIndex),
OperationMode: option.Config.Ipvlan.OperationMode,
},
}
cfg := &models.DaemonConfiguration{
Spec: spec,
Status: status,
}
return NewGetConfigOK().WithPayload(cfg)
}
// listFilterIfs returns a map of interfaces based on the given filter.
// The filter should take a link and, if found, return the index of that
// interface, if not found return -1.
func listFilterIfs(filter func(netlink.Link) int) (map[int]netlink.Link, error) {
ifs, err := netlink.LinkList()
if err != nil {
return nil, err
}
vethLXCIdxs := map[int]netlink.Link{}
for _, intf := range ifs {
if idx := filter(intf); idx != -1 {
vethLXCIdxs[idx] = intf
}
}
return vethLXCIdxs, nil
}
// clearCiliumVeths checks all veths created by cilium and removes all that
// are considered a leftover from failed attempts to connect the container.
func (d *Daemon) clearCiliumVeths() error {
log.Info("Removing stale endpoint interfaces")
leftVeths, err := listFilterIfs(func(intf netlink.Link) int {
// Filter by veth and return the index of the interface.
if intf.Type() == "veth" {
return intf.Attrs().Index
}
return -1
})
if err != nil {
return fmt.Errorf("unable to retrieve host network interfaces: %s", err)
}
for _, v := range leftVeths {
peerIndex := v.Attrs().ParentIndex
parentVeth, found := leftVeths[peerIndex]
if found && peerIndex != 0 && strings.HasPrefix(parentVeth.Attrs().Name, "lxc") {
err := netlink.LinkDel(v)
if err != nil {
log.WithError(err).Warningf("Unable to delete stale veth device %s", v.Attrs().Name)
}
}
}
return nil
}
// numWorkerThreads returns the number of worker threads with a minimum of 4.
func numWorkerThreads() int {
ncpu := runtime.NumCPU()
minWorkerThreads := 2
if ncpu < minWorkerThreads {
return minWorkerThreads
}
return ncpu
}
// GetServiceList returns list of services
func (d *Daemon) GetServiceList() []*models.Service {
list := []*models.Service{}
d.loadBalancer.BPFMapMU.RLock()
defer d.loadBalancer.BPFMapMU.RUnlock()
for _, v := range d.loadBalancer.SVCMap {
list = append(list, v.GetModel())
}
return list
}
// SendNotification sends an agent notification to the monitor
func (d *Daemon) SendNotification(typ monitorAPI.AgentNotification, text string) error {
if option.Config.DryMode {
return nil
}
event := monitorAPI.AgentNotify{Type: typ, Text: text}
return d.nodeMonitor.SendEvent(monitorAPI.MessageTypeAgent, event)
}
// NewProxyLogRecord is invoked by the proxy accesslog on each new access log entry
func (d *Daemon) NewProxyLogRecord(l *logger.LogRecord) error {
return d.nodeMonitor.SendEvent(monitorAPI.MessageTypeAccessLog, l.LogRecord)
}
// GetNodeSuffix returns the suffix to be appended to kvstore keys of this
// agent
func (d *Daemon) GetNodeSuffix() string {
var ip net.IP
switch {
case option.Config.EnableIPv4:
ip = node.GetExternalIPv4()
case option.Config.EnableIPv6:
ip = node.GetIPv6()
}
if ip == nil {
log.Fatal("Node IP not available yet")
}
return ip.String()
}
|
package daemon
import (
log "github.com/Sirupsen/logrus"
"github.com/alecthomas/units"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/docker/go-plugins-helpers/volume"
"github.com/j-griffith/solidfire-docker-driver/sfapi"
)
type SolidFireDriver struct {
TenantID int64
DefaultVolSz int64
VagID int64
MountPoint string
InitiatorIFace string
Client *sfapi.Client
Mutex *sync.Mutex
}
func verifyConfiguration(cfg *sfapi.Config) {
// We want to verify we have everything we need to run the Docker driver
if cfg.TenantName == "" {
log.Fatal("TenantName required in SolidFire Docker config")
}
if cfg.EndPoint == "" {
log.Fatal("EndPoint required in SolidFire Docker config")
}
if cfg.DefaultVolSz == 0 {
log.Fatal("DefaultVolSz required in SolidFire Docker config")
}
if cfg.SVIP == "" {
log.Fatal("SVIP required in SolidFire Docker config")
}
}
func New(cfgFile string) SolidFireDriver {
var tenantID int64
client, _ := sfapi.NewFromConfig(cfgFile)
req := sfapi.GetAccountByNameRequest{
Name: client.DefaultTenantName,
}
account, err := client.GetAccountByName(&req)
if err != nil {
req := sfapi.AddAccountRequest{
Username: client.DefaultTenantName,
}
actID, err := client.AddAccount(&req)
if err != nil {
log.Fatalf("Failed init, unable to create Tenant (%s): %+v", client.DefaultTenantName, err)
}
tenantID = actID
log.Debug("Set tenantID: ", tenantID)
} else {
tenantID = account.AccountID
log.Debug("Set tenantID: ", tenantID)
}
baseMountPoint := "/var/lib/solidfire/mount"
if client.Config.MountPoint != "" {
baseMountPoint = client.Config.MountPoint
}
iscsiInterface := "default"
if client.Config.InitiatorIFace != "" {
iscsiInterface = client.Config.InitiatorIFace
}
_, err = os.Lstat(baseMountPoint)
if os.IsNotExist(err) {
if err := os.MkdirAll(baseMountPoint, 0755); err != nil {
log.Fatal("Failed to create Mount directory during driver init: %v", err)
}
}
d := SolidFireDriver{
TenantID: tenantID,
Client: client,
Mutex: &sync.Mutex{},
DefaultVolSz: client.DefaultVolSize,
MountPoint: client.Config.MountPoint,
InitiatorIFace: iscsiInterface,
}
return d
}
func NewSolidFireDriverFromConfig(c *sfapi.Config) SolidFireDriver {
var tenantID int64
client, _ := sfapi.NewFromConfig("")
req := sfapi.GetAccountByNameRequest{
Name: c.TenantName,
}
account, err := client.GetAccountByName(&req)
if err != nil {
req := sfapi.AddAccountRequest{
Username: c.TenantName,
}
tenantID, err = client.AddAccount(&req)
if err != nil {
log.Fatal("Failed to initialize solidfire driver while creating tenant: ", err)
}
} else {
tenantID = account.AccountID
}
baseMountPoint := "/var/lib/solidfire/mount"
if c.MountPoint != "" {
baseMountPoint = c.MountPoint
}
iscsiInterface := "default"
if c.InitiatorIFace != "" {
iscsiInterface = c.InitiatorIFace
}
if c.Types != nil {
client.VolumeTypes = c.Types
}
defaultVolSize := int64(1)
if c.DefaultVolSz != 0 {
defaultVolSize = c.DefaultVolSz
}
_, err = os.Lstat(baseMountPoint)
if os.IsNotExist(err) {
if err := os.MkdirAll(baseMountPoint, 0755); err != nil {
log.Fatal("Failed to create Mount directory during driver init: %v", err)
}
}
d := SolidFireDriver{
TenantID: tenantID,
Client: client,
Mutex: &sync.Mutex{},
DefaultVolSz: defaultVolSize,
MountPoint: c.MountPoint,
InitiatorIFace: iscsiInterface,
}
log.Debugf("Driver initialized with the following settings:\n%+v\n", d)
log.Info("Succesfuly initialized SolidFire Docker driver")
return d
}
func (d SolidFireDriver) Create(r volume.Request) volume.Response {
log.Infof("Create volume %s on %s\n", r.Name, "solidfire")
d.Mutex.Lock()
defer d.Mutex.Unlock()
var req sfapi.CreateVolumeRequest
var qos sfapi.QoS
var vsz int64
log.Debugf("GetVolumeByName: %s, %d", r.Name, d.TenantID)
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err == nil && v.VolumeID != 0 {
log.Infof("Found existing Volume by Name: %s", r.Name)
return volume.Response{}
}
if r.Options["size"] != "" {
s, _ := strconv.ParseInt(r.Options["size"], 10, 64)
log.Info("Received size request in Create: ", s)
vsz = int64(units.GiB) * s
} else {
// NOTE(jdg): We need to cleanup the conversions and such when we read
// in from the config file, it's sort of ugly. BUT, just remember that
// when we pull the value from d.DefaultVolSz it's already been
// multiplied
vsz = d.DefaultVolSz
log.Info("Creating with default size of: ", vsz)
}
if r.Options["qos"] != "" {
iops := strings.Split(r.Options["qos"], ",")
qos.MinIOPS, _ = strconv.ParseInt(iops[0], 10, 64)
qos.MaxIOPS, _ = strconv.ParseInt(iops[1], 10, 64)
qos.BurstIOPS, _ = strconv.ParseInt(iops[2], 10, 64)
req.Qos = qos
}
if r.Options["Type"] != "" {
for _, t := range *d.Client.VolumeTypes {
if t.Type == r.Options["Type"] {
req.Qos = t.QOS
}
}
}
req.TotalSize = vsz
req.AccountID = d.TenantID
req.Name = r.Name
_, err = d.Client.CreateVolume(&req)
if err != nil {
return volume.Response{Err: err.Error()}
}
return volume.Response{}
}
func (d SolidFireDriver) Remove(r volume.Request) volume.Response {
log.Info("Remove/Delete Volume: ", r.Name)
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err != nil {
log.Error("Failed to retrieve volume named ", r.Name, "during Remove operation: ", err)
return volume.Response{Err: err.Error()}
}
d.Client.DetachVolume(v)
err = d.Client.DeleteVolume(v.VolumeID)
if err != nil {
// FIXME(jdg): Check if it's a "DNE" error in that case we're golden
log.Error("Error encountered during delete: ", err)
}
return volume.Response{}
}
func (d SolidFireDriver) Path(r volume.Request) volume.Response {
log.Info("Retrieve path info for volume: ", r.Name)
path := filepath.Join(d.MountPoint, r.Name)
log.Debug("Path reported as: ", path)
return volume.Response{Mountpoint: path}
}
func (d SolidFireDriver) Mount(r volume.Request) volume.Response {
d.Mutex.Lock()
defer d.Mutex.Unlock()
log.Infof("Mounting volume %s on %s\n", r.Name, "solidfire")
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err != nil {
log.Errorf("Failed to retrieve volume by name in mount operation: ", r.Name)
return volume.Response{Err: err.Error()}
}
path, device, err := d.Client.AttachVolume(&v, d.InitiatorIFace)
if path == "" || device == "" && err == nil {
log.Error("Missing path or device, but err not set?")
log.Debug("Path: ", path, ",Device: ", device)
return volume.Response{Err: err.Error()}
}
if err != nil {
log.Errorf("Failed to perform iscsi attach of volume %s: %v", r.Name, err)
return volume.Response{Err: err.Error()}
}
log.Debugf("Attached volume at (path, devfile): %s, %s", path, device)
if sfapi.GetFSType(device) == "" {
//TODO(jdg): Enable selection of *other* fs types
err := sfapi.FormatVolume(device, "ext4")
if err != nil {
log.Errorf("Failed to format device: ", device)
return volume.Response{Err: err.Error()}
}
}
if sfapi.Mount(device, d.MountPoint+"/"+r.Name) != nil {
log.Error("Failed to mount volume: ", r.Name)
return volume.Response{Err: err.Error()}
}
return volume.Response{Mountpoint: d.MountPoint + "/" + r.Name}
}
func (d SolidFireDriver) Unmount(r volume.Request) volume.Response {
log.Info("Unmounting volume: ", r.Name)
sfapi.Umount(filepath.Join(d.MountPoint, r.Name))
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err != nil {
return volume.Response{Err: err.Error()}
}
d.Client.DetachVolume(v)
return volume.Response{}
}
Add Get and List commands
Version 1.10 of Docker now has Get and List commands in
the Volume API. So... add them in the daemon-driver so
we respond correctly if called.
package daemon
import (
log "github.com/Sirupsen/logrus"
"github.com/alecthomas/units"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/docker/go-plugins-helpers/volume"
"github.com/j-griffith/solidfire-docker-driver/sfapi"
)
type SolidFireDriver struct {
TenantID int64
DefaultVolSz int64
VagID int64
MountPoint string
InitiatorIFace string
Client *sfapi.Client
Mutex *sync.Mutex
}
func verifyConfiguration(cfg *sfapi.Config) {
// We want to verify we have everything we need to run the Docker driver
if cfg.TenantName == "" {
log.Fatal("TenantName required in SolidFire Docker config")
}
if cfg.EndPoint == "" {
log.Fatal("EndPoint required in SolidFire Docker config")
}
if cfg.DefaultVolSz == 0 {
log.Fatal("DefaultVolSz required in SolidFire Docker config")
}
if cfg.SVIP == "" {
log.Fatal("SVIP required in SolidFire Docker config")
}
}
func New(cfgFile string) SolidFireDriver {
var tenantID int64
client, _ := sfapi.NewFromConfig(cfgFile)
req := sfapi.GetAccountByNameRequest{
Name: client.DefaultTenantName,
}
account, err := client.GetAccountByName(&req)
if err != nil {
req := sfapi.AddAccountRequest{
Username: client.DefaultTenantName,
}
actID, err := client.AddAccount(&req)
if err != nil {
log.Fatalf("Failed init, unable to create Tenant (%s): %+v", client.DefaultTenantName, err)
}
tenantID = actID
log.Debug("Set tenantID: ", tenantID)
} else {
tenantID = account.AccountID
log.Debug("Set tenantID: ", tenantID)
}
baseMountPoint := "/var/lib/solidfire/mount"
if client.Config.MountPoint != "" {
baseMountPoint = client.Config.MountPoint
}
iscsiInterface := "default"
if client.Config.InitiatorIFace != "" {
iscsiInterface = client.Config.InitiatorIFace
}
_, err = os.Lstat(baseMountPoint)
if os.IsNotExist(err) {
if err := os.MkdirAll(baseMountPoint, 0755); err != nil {
log.Fatal("Failed to create Mount directory during driver init: %v", err)
}
}
d := SolidFireDriver{
TenantID: tenantID,
Client: client,
Mutex: &sync.Mutex{},
DefaultVolSz: client.DefaultVolSize,
MountPoint: client.Config.MountPoint,
InitiatorIFace: iscsiInterface,
}
return d
}
func NewSolidFireDriverFromConfig(c *sfapi.Config) SolidFireDriver {
var tenantID int64
client, _ := sfapi.NewFromConfig("")
req := sfapi.GetAccountByNameRequest{
Name: c.TenantName,
}
account, err := client.GetAccountByName(&req)
if err != nil {
req := sfapi.AddAccountRequest{
Username: c.TenantName,
}
tenantID, err = client.AddAccount(&req)
if err != nil {
log.Fatal("Failed to initialize solidfire driver while creating tenant: ", err)
}
} else {
tenantID = account.AccountID
}
baseMountPoint := "/var/lib/solidfire/mount"
if c.MountPoint != "" {
baseMountPoint = c.MountPoint
}
iscsiInterface := "default"
if c.InitiatorIFace != "" {
iscsiInterface = c.InitiatorIFace
}
if c.Types != nil {
client.VolumeTypes = c.Types
}
defaultVolSize := int64(1)
if c.DefaultVolSz != 0 {
defaultVolSize = c.DefaultVolSz
}
_, err = os.Lstat(baseMountPoint)
if os.IsNotExist(err) {
if err := os.MkdirAll(baseMountPoint, 0755); err != nil {
log.Fatal("Failed to create Mount directory during driver init: %v", err)
}
}
d := SolidFireDriver{
TenantID: tenantID,
Client: client,
Mutex: &sync.Mutex{},
DefaultVolSz: defaultVolSize,
MountPoint: c.MountPoint,
InitiatorIFace: iscsiInterface,
}
log.Debugf("Driver initialized with the following settings:\n%+v\n", d)
log.Info("Succesfuly initialized SolidFire Docker driver")
return d
}
func (d SolidFireDriver) Create(r volume.Request) volume.Response {
log.Infof("Create volume %s on %s\n", r.Name, "solidfire")
d.Mutex.Lock()
defer d.Mutex.Unlock()
var req sfapi.CreateVolumeRequest
var qos sfapi.QoS
var vsz int64
log.Debugf("GetVolumeByName: %s, %d", r.Name, d.TenantID)
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err == nil && v.VolumeID != 0 {
log.Infof("Found existing Volume by Name: %s", r.Name)
return volume.Response{}
}
if r.Options["size"] != "" {
s, _ := strconv.ParseInt(r.Options["size"], 10, 64)
log.Info("Received size request in Create: ", s)
vsz = int64(units.GiB) * s
} else {
// NOTE(jdg): We need to cleanup the conversions and such when we read
// in from the config file, it's sort of ugly. BUT, just remember that
// when we pull the value from d.DefaultVolSz it's already been
// multiplied
vsz = d.DefaultVolSz
log.Info("Creating with default size of: ", vsz)
}
if r.Options["qos"] != "" {
iops := strings.Split(r.Options["qos"], ",")
qos.MinIOPS, _ = strconv.ParseInt(iops[0], 10, 64)
qos.MaxIOPS, _ = strconv.ParseInt(iops[1], 10, 64)
qos.BurstIOPS, _ = strconv.ParseInt(iops[2], 10, 64)
req.Qos = qos
}
if r.Options["Type"] != "" {
for _, t := range *d.Client.VolumeTypes {
if t.Type == r.Options["Type"] {
req.Qos = t.QOS
}
}
}
req.TotalSize = vsz
req.AccountID = d.TenantID
req.Name = r.Name
_, err = d.Client.CreateVolume(&req)
if err != nil {
return volume.Response{Err: err.Error()}
}
return volume.Response{}
}
func (d SolidFireDriver) Remove(r volume.Request) volume.Response {
log.Info("Remove/Delete Volume: ", r.Name)
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err != nil {
log.Error("Failed to retrieve volume named ", r.Name, "during Remove operation: ", err)
return volume.Response{Err: err.Error()}
}
d.Client.DetachVolume(v)
err = d.Client.DeleteVolume(v.VolumeID)
if err != nil {
// FIXME(jdg): Check if it's a "DNE" error in that case we're golden
log.Error("Error encountered during delete: ", err)
}
return volume.Response{}
}
func (d SolidFireDriver) Path(r volume.Request) volume.Response {
log.Info("Retrieve path info for volume: ", r.Name)
path := filepath.Join(d.MountPoint, r.Name)
log.Debug("Path reported as: ", path)
return volume.Response{Mountpoint: path}
}
func (d SolidFireDriver) Mount(r volume.Request) volume.Response {
d.Mutex.Lock()
defer d.Mutex.Unlock()
log.Infof("Mounting volume %s on %s\n", r.Name, "solidfire")
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err != nil {
log.Errorf("Failed to retrieve volume by name in mount operation: ", r.Name)
return volume.Response{Err: err.Error()}
}
path, device, err := d.Client.AttachVolume(&v, d.InitiatorIFace)
if path == "" || device == "" && err == nil {
log.Error("Missing path or device, but err not set?")
log.Debug("Path: ", path, ",Device: ", device)
return volume.Response{Err: err.Error()}
}
if err != nil {
log.Errorf("Failed to perform iscsi attach of volume %s: %v", r.Name, err)
return volume.Response{Err: err.Error()}
}
log.Debugf("Attached volume at (path, devfile): %s, %s", path, device)
if sfapi.GetFSType(device) == "" {
//TODO(jdg): Enable selection of *other* fs types
err := sfapi.FormatVolume(device, "ext4")
if err != nil {
log.Errorf("Failed to format device: ", device)
return volume.Response{Err: err.Error()}
}
}
if sfapi.Mount(device, d.MountPoint+"/"+r.Name) != nil {
log.Error("Failed to mount volume: ", r.Name)
return volume.Response{Err: err.Error()}
}
return volume.Response{Mountpoint: d.MountPoint + "/" + r.Name}
}
func (d SolidFireDriver) Unmount(r volume.Request) volume.Response {
log.Info("Unmounting volume: ", r.Name)
sfapi.Umount(filepath.Join(d.MountPoint, r.Name))
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err != nil {
return volume.Response{Err: err.Error()}
}
d.Client.DetachVolume(v)
return volume.Response{}
}
func (d SolidFireDriver) Get(r volume.Request) volume.Response {
log.Info("Get volume: ", r.Name)
path := filepath.Join(d.MountPoint, r.Name)
v, err := d.Client.GetVolumeByName(r.Name, d.TenantID)
if err != nil {
log.Error("Failed to retrieve volume named ", r.Name, "during Get operation: ", err)
return volume.Response{Err: err.Error()}
}
return volume.Response{Volume: &volume.Volume{Name: v.Name, Mountpoint: path}}
}
func (d SolidFireDriver) List(r volume.Request) volume.Response {
log.Info("Get volume: ", r.Name)
path := filepath.Join(d.MountPoint, r.Name)
var vols []*volume.Volume
var req sfapi.ListVolumesForAccountRequest
req.AccountID = d.TenantID
vlist, err := d.Client.ListVolumesForAccount(&req)
if err != nil {
log.Error("Failed to retrieve volume list:", err)
return volume.Response{Err: err.Error()}
}
for _, v := range vlist {
if v.Status == "Active" && v.AccountID == d.TenantID {
vols = append(vols, &volume.Volume{Name: v.Name, Mountpoint: path})
}
}
return volume.Response{Volumes: vols}
}
|
package main
import (
"errors"
"strings"
"log/syslog"
log "code.google.com/p/log4go"
)
// This log writer sends output to a socket
type SysLogWriter chan *log.LogRecord
// This is the SocketLogWriter's output method
func (w SysLogWriter) LogWrite(rec *log.LogRecord) {
w <- rec
}
func (w SysLogWriter) Close() {
close(w)
}
func GetSysLogFacility(name string) (syslog.Priority, bool) {
switch strings.ToLower(name) {
case "syslog": return syslog.LOG_SYSLOG, true
case "local0": return syslog.LOG_LOCAL0, true
case "local1": return syslog.LOG_LOCAL1, true
case "local2": return syslog.LOG_LOCAL2, true
case "local3": return syslog.LOG_LOCAL3, true
case "local4": return syslog.LOG_LOCAL4, true
case "local5": return syslog.LOG_LOCAL5, true
case "local6": return syslog.LOG_LOCAL6, true
case "local7": return syslog.LOG_LOCAL7, true
default: return syslog.LOG_SYSLOG, false
}
}
func GetWriter(writer *syslog.Writer, level string) func(string) error {
switch level {
case "DEBG", "TRAC", "FINE", "FNST":
return writer.Debug
case "INFO":
return writer.Info
case "WARN":
return writer.Warning
case "EROR":
return writer.Err
default:
return writer.Crit
}
}
func Log(writer *syslog.Writer, level string, message string) {
m := GetWriter(writer, level)
m(message)
}
func connectSyslogDaemon(priority syslog.Priority) (writer *syslog.Writer, err error) {
logTypes := []string{"unixgram", "unix"}
logPaths := []string{"/dev/log", "/var/run/syslog"}
var raddr string
for _, network := range logTypes {
for _, path := range logPaths {
raddr = path
writer, err = syslog.Dial(network, raddr, priority, "influxdb")
if err != nil {
continue
} else {
return
}
}
}
if err != nil {
err = errors.New("cannot connect to Syslog Daemon")
}
return
}
func NewSysLogWriter(priority syslog.Priority) (w SysLogWriter, err error) {
writer, err := connectSyslogDaemon(priority)
if err != nil {
return
}
w = SysLogWriter(make(chan *log.LogRecord, log.LogBufferLength))
go func() {
defer func() {
if w != nil {
w.Close()
}
}()
for rec := range w {
m := log.FormatLogRecord("(%S) %M", rec)
Log(writer, rec.Level.String(), m)
}
}()
return
}
Reformat code
package main
import (
"errors"
"log/syslog"
"strings"
log "code.google.com/p/log4go"
)
type sysLogWriter chan *log.LogRecord
func (w sysLogWriter) LogWrite(rec *log.LogRecord) {
w <- rec
}
func (w sysLogWriter) Close() {
close(w)
}
func GetSysLogFacility(name string) (syslog.Priority, bool) {
switch strings.ToLower(name) {
case "syslog":
return syslog.LOG_SYSLOG, true
case "local0":
return syslog.LOG_LOCAL0, true
case "local1":
return syslog.LOG_LOCAL1, true
case "local2":
return syslog.LOG_LOCAL2, true
case "local3":
return syslog.LOG_LOCAL3, true
case "local4":
return syslog.LOG_LOCAL4, true
case "local5":
return syslog.LOG_LOCAL5, true
case "local6":
return syslog.LOG_LOCAL6, true
case "local7":
return syslog.LOG_LOCAL7, true
default:
return syslog.LOG_SYSLOG, false
}
}
func getWriter(writer *syslog.Writer, level string) func(string) error {
switch level {
case "DEBG", "TRAC", "FINE", "FNST":
return writer.Debug
case "INFO":
return writer.Info
case "WARN":
return writer.Warning
case "EROR":
return writer.Err
default:
return writer.Crit
}
}
func connectSyslogDaemon(priority syslog.Priority) (writer *syslog.Writer, err error) {
logTypes := []string{"unixgram", "unix"}
logPaths := []string{"/dev/log", "/var/run/syslog"}
var raddr string
for _, network := range logTypes {
for _, path := range logPaths {
raddr = path
writer, err = syslog.Dial(network, raddr, priority, "influxdb")
if err != nil {
continue
} else {
return
}
}
}
if err != nil {
err = errors.New("cannot connect to Syslog Daemon")
}
return
}
func NewSysLogWriter(priority syslog.Priority) (w sysLogWriter, err error) {
writer, err := connectSyslogDaemon(priority)
if err != nil {
return
}
w = sysLogWriter(make(chan *log.LogRecord, log.LogBufferLength))
go func() {
defer func() {
if w != nil {
w.Close()
}
}()
for rec := range w {
m := log.FormatLogRecord("(%S) %M", rec)
getWriter(writer, rec.Level.String())(m)
}
}()
return
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
file := r.Path("/files/{id}").Subrouter()
file.Methods("GET").HandlerFunc(FileGetHandler)
file.Methods("POST").HandlerFunc(FileCreateHandler)
http.ListenAndServe(":"+os.Getenv("PORT"), r)
}
func FileCreateHandler(rw http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
path := "/Users/sebastian/" + id
body, _ := ioutil.ReadAll(r.Body)
file, _ := os.Create(path)
defer file.Close()
file.Write(body)
file.Sync()
fmt.Printf("Created file with id: %s", id)
rw.WriteHeader(http.StatusOK)
}
func FileGetHandler(rw http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
path := "/Users/sebastian/" + id
data, err := ioutil.ReadFile(path)
if err != nil {
rw.WriteHeader(http.StatusNotFound)
return
}
rw.WriteHeader(http.StatusOK)
rw.Write(data)
}
Handling errors for POSTs
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
file := r.Path("/files/{id}").Subrouter()
file.Methods("GET").HandlerFunc(FileGetHandler)
file.Methods("POST").HandlerFunc(FileCreateHandler)
http.ListenAndServe(":"+os.Getenv("PORT"), r)
}
func FileCreateHandler(rw http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
path := "/Users/sebastian/" + id
body, _ := ioutil.ReadAll(r.Body)
file, err := os.Create(path)
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
rw.Write([]byte("Error: " + err.Error()))
}
defer file.Close()
file.Write(body)
file.Sync()
fmt.Printf("Created file with id: %s", id)
rw.WriteHeader(http.StatusOK)
}
func FileGetHandler(rw http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
path := "/Users/sebastian/" + id
data, err := ioutil.ReadFile(path)
if err != nil {
rw.WriteHeader(http.StatusNotFound)
return
}
rw.WriteHeader(http.StatusOK)
rw.Write(data)
}
|
package main
import (
"bytes"
"fmt"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
)
type NinjaGenerator struct {
f *os.File
nodes []*DepNode
vars Vars
exports map[string]bool
ex *Executor
ruleId int
done map[string]bool
ccRe *regexp.Regexp
}
func NewNinjaGenerator(g *DepGraph) *NinjaGenerator {
ccRe, err := regexp.Compile(`^prebuilts/(gcc|clang)/.*(gcc|g\+\+|clang|clang\+\+) .* -c `)
if err != nil {
panic(err)
}
return &NinjaGenerator{
nodes: g.nodes,
vars: g.vars,
exports: g.exports,
done: make(map[string]bool),
ccRe: ccRe,
}
}
func getDepfileImpl(ss string) (string, error) {
tss := ss + " "
if !strings.Contains(tss, " -MD ") && !strings.Contains(tss, " -MMD ") {
return "", nil
}
mfIndex := strings.Index(ss, " -MF ")
if mfIndex >= 0 {
mf := trimLeftSpace(ss[mfIndex+4:])
if strings.Index(mf, " -MF ") >= 0 {
return "", fmt.Errorf("Multiple output file candidates in %s", ss)
}
mfEndIndex := strings.IndexAny(mf, " \t\n")
if mfEndIndex >= 0 {
mf = mf[:mfEndIndex]
}
return mf, nil
}
outIndex := strings.Index(ss, " -o ")
if outIndex < 0 {
return "", fmt.Errorf("Cannot find the depfile in %s", ss)
}
out := trimLeftSpace(ss[outIndex+4:])
if strings.Index(out, " -o ") >= 0 {
return "", fmt.Errorf("Multiple output file candidates in %s", ss)
}
outEndIndex := strings.IndexAny(out, " \t\n")
if outEndIndex >= 0 {
out = out[:outEndIndex]
}
return stripExt(out) + ".d", nil
}
func getDepfile(ss string) (string, error) {
// A hack for Android - llvm-rs-cc seems not to emit a dep file.
if strings.Contains(ss, "bin/llvm-rs-cc ") {
return "", nil
}
r, err := getDepfileImpl(ss)
if r == "" || err != nil {
return r, err
}
// A hack for Android to get .P files instead of .d.
p := stripExt(r) + ".P"
if strings.Contains(ss, p) {
return p, nil
}
// A hack for Android. For .s files, GCC does not use
// C preprocessor, so it ignores -MF flag.
as := "/" + stripExt(filepath.Base(r)) + ".s"
if strings.Contains(ss, as) {
return "", nil
}
return r, nil
}
func stripShellComment(s string) string {
if strings.IndexByte(s, '#') < 0 {
// Fast path.
return s
}
var escape bool
var quote rune
for i, c := range s {
if quote > 0 {
if quote == c && (quote == '\'' || !escape) {
quote = 0
}
} else if !escape {
if c == '#' {
return s[:i]
} else if c == '\'' || c == '"' || c == '`' {
quote = c
}
}
if escape {
escape = false
} else if c == '\\' {
escape = true
} else {
escape = false
}
}
return s
}
func (n *NinjaGenerator) genShellScript(runners []runner) (string, bool) {
useGomacc := false
var buf bytes.Buffer
for i, r := range runners {
if i > 0 {
if runners[i-1].ignoreError {
buf.WriteString(" ; ")
} else {
buf.WriteString(" && ")
}
}
cmd := stripShellComment(r.cmd)
cmd = trimLeftSpace(cmd)
cmd = strings.Replace(cmd, "\\\n", " ", -1)
cmd = strings.TrimRight(cmd, " \t\n;")
cmd = strings.Replace(cmd, "$", "$$", -1)
cmd = strings.Replace(cmd, "\t", " ", -1)
if cmd == "" {
cmd = "true"
}
if gomaDir != "" && n.ccRe.MatchString(cmd) {
cmd = fmt.Sprintf("%s/gomacc %s", gomaDir, cmd)
useGomacc = true
}
needsSubShell := i > 0 || len(runners) > 1
if cmd[0] == '(' {
needsSubShell = false
}
if needsSubShell {
buf.WriteByte('(')
}
buf.WriteString(cmd)
if i == len(runners)-1 && r.ignoreError {
buf.WriteString(" ; true")
}
if needsSubShell {
buf.WriteByte(')')
}
}
return buf.String(), gomaDir != "" && !useGomacc
}
func (n *NinjaGenerator) genRuleName() string {
ruleName := fmt.Sprintf("rule%d", n.ruleId)
n.ruleId++
return ruleName
}
func (n *NinjaGenerator) emitBuild(output, rule, dep string) {
fmt.Fprintf(n.f, "build %s: %s%s\n", output, rule, dep)
}
func getDepString(node *DepNode) string {
var deps []string
var orderOnlys []string
for _, d := range node.Deps {
if d.IsOrderOnly {
orderOnlys = append(orderOnlys, d.Output)
} else {
deps = append(deps, d.Output)
}
}
dep := ""
if len(deps) > 0 {
dep += fmt.Sprintf(" %s", strings.Join(deps, " "))
}
if len(orderOnlys) > 0 {
dep += fmt.Sprintf(" || %s", strings.Join(orderOnlys, " "))
}
return dep
}
func (n *NinjaGenerator) emitNode(node *DepNode) {
if n.done[node.Output] {
return
}
n.done[node.Output] = true
if len(node.Cmds) == 0 && len(node.Deps) == 0 && !node.IsPhony {
return
}
runners, _ := n.ex.createRunners(node, true)
ruleName := "phony"
useLocalPool := false
if len(runners) > 0 {
ruleName = n.genRuleName()
fmt.Fprintf(n.f, "rule %s\n", ruleName)
fmt.Fprintf(n.f, " description = build $out\n")
ss, ulp := n.genShellScript(runners)
if ulp {
useLocalPool = true
}
depfile, err := getDepfile(ss)
if err != nil {
panic(err)
}
if depfile != "" {
fmt.Fprintf(n.f, " depfile = %s\n", depfile)
}
// It seems Linux is OK with ~130kB.
// TODO: Find this number automatically.
ArgLenLimit := 100 * 1000
if len(ss) > ArgLenLimit {
fmt.Fprintf(n.f, " rspfile = $out.rsp\n")
fmt.Fprintf(n.f, " rspfile_content = %s\n", ss)
ss = "sh $out.rsp"
}
fmt.Fprintf(n.f, " command = %s\n", ss)
}
n.emitBuild(node.Output, ruleName, getDepString(node))
if useLocalPool {
fmt.Fprintf(n.f, " pool = local_pool\n")
}
for _, d := range node.Deps {
n.emitNode(d)
}
}
func (n *NinjaGenerator) generateShell() {
f, err := os.Create("ninja.sh")
if err != nil {
panic(err)
}
defer f.Close()
ev := newEvaluator(n.vars)
shell := ev.EvaluateVar("SHELL")
if shell == "" {
shell = "/bin/sh"
}
fmt.Fprintf(f, "#!%s\n", shell)
for name, export := range n.exports {
if export {
fmt.Fprintf(f, "export %s=%s\n", name, ev.EvaluateVar(name))
} else {
fmt.Fprintf(f, "unset %s\n", name)
}
}
if gomaDir == "" {
fmt.Fprintf(f, "exec ninja\n")
} else {
fmt.Fprintf(f, "exec ninja -j300\n")
}
err = f.Chmod(0755)
if err != nil {
panic(err)
}
}
func (n *NinjaGenerator) generateNinja() {
f, err := os.Create("build.ninja")
if err != nil {
panic(err)
}
defer f.Close()
n.f = f
fmt.Fprintf(n.f, "# Generated by kati\n")
fmt.Fprintf(n.f, "\n")
if gomaDir != "" {
fmt.Fprintf(n.f, "pool local_pool\n")
fmt.Fprintf(n.f, " depth = %d\n", runtime.NumCPU())
}
n.ex = NewExecutor(n.vars)
for _, node := range n.nodes {
n.emitNode(node)
}
}
func GenerateNinja(g *DepGraph) {
n := NewNinjaGenerator(g)
n.generateShell()
n.generateNinja()
}
Pass parameters passed to ninja.sh
package main
import (
"bytes"
"fmt"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
)
type NinjaGenerator struct {
f *os.File
nodes []*DepNode
vars Vars
exports map[string]bool
ex *Executor
ruleId int
done map[string]bool
ccRe *regexp.Regexp
}
func NewNinjaGenerator(g *DepGraph) *NinjaGenerator {
ccRe, err := regexp.Compile(`^prebuilts/(gcc|clang)/.*(gcc|g\+\+|clang|clang\+\+) .* -c `)
if err != nil {
panic(err)
}
return &NinjaGenerator{
nodes: g.nodes,
vars: g.vars,
exports: g.exports,
done: make(map[string]bool),
ccRe: ccRe,
}
}
func getDepfileImpl(ss string) (string, error) {
tss := ss + " "
if !strings.Contains(tss, " -MD ") && !strings.Contains(tss, " -MMD ") {
return "", nil
}
mfIndex := strings.Index(ss, " -MF ")
if mfIndex >= 0 {
mf := trimLeftSpace(ss[mfIndex+4:])
if strings.Index(mf, " -MF ") >= 0 {
return "", fmt.Errorf("Multiple output file candidates in %s", ss)
}
mfEndIndex := strings.IndexAny(mf, " \t\n")
if mfEndIndex >= 0 {
mf = mf[:mfEndIndex]
}
return mf, nil
}
outIndex := strings.Index(ss, " -o ")
if outIndex < 0 {
return "", fmt.Errorf("Cannot find the depfile in %s", ss)
}
out := trimLeftSpace(ss[outIndex+4:])
if strings.Index(out, " -o ") >= 0 {
return "", fmt.Errorf("Multiple output file candidates in %s", ss)
}
outEndIndex := strings.IndexAny(out, " \t\n")
if outEndIndex >= 0 {
out = out[:outEndIndex]
}
return stripExt(out) + ".d", nil
}
func getDepfile(ss string) (string, error) {
// A hack for Android - llvm-rs-cc seems not to emit a dep file.
if strings.Contains(ss, "bin/llvm-rs-cc ") {
return "", nil
}
r, err := getDepfileImpl(ss)
if r == "" || err != nil {
return r, err
}
// A hack for Android to get .P files instead of .d.
p := stripExt(r) + ".P"
if strings.Contains(ss, p) {
return p, nil
}
// A hack for Android. For .s files, GCC does not use
// C preprocessor, so it ignores -MF flag.
as := "/" + stripExt(filepath.Base(r)) + ".s"
if strings.Contains(ss, as) {
return "", nil
}
return r, nil
}
func stripShellComment(s string) string {
if strings.IndexByte(s, '#') < 0 {
// Fast path.
return s
}
var escape bool
var quote rune
for i, c := range s {
if quote > 0 {
if quote == c && (quote == '\'' || !escape) {
quote = 0
}
} else if !escape {
if c == '#' {
return s[:i]
} else if c == '\'' || c == '"' || c == '`' {
quote = c
}
}
if escape {
escape = false
} else if c == '\\' {
escape = true
} else {
escape = false
}
}
return s
}
func (n *NinjaGenerator) genShellScript(runners []runner) (string, bool) {
useGomacc := false
var buf bytes.Buffer
for i, r := range runners {
if i > 0 {
if runners[i-1].ignoreError {
buf.WriteString(" ; ")
} else {
buf.WriteString(" && ")
}
}
cmd := stripShellComment(r.cmd)
cmd = trimLeftSpace(cmd)
cmd = strings.Replace(cmd, "\\\n", " ", -1)
cmd = strings.TrimRight(cmd, " \t\n;")
cmd = strings.Replace(cmd, "$", "$$", -1)
cmd = strings.Replace(cmd, "\t", " ", -1)
if cmd == "" {
cmd = "true"
}
if gomaDir != "" && n.ccRe.MatchString(cmd) {
cmd = fmt.Sprintf("%s/gomacc %s", gomaDir, cmd)
useGomacc = true
}
needsSubShell := i > 0 || len(runners) > 1
if cmd[0] == '(' {
needsSubShell = false
}
if needsSubShell {
buf.WriteByte('(')
}
buf.WriteString(cmd)
if i == len(runners)-1 && r.ignoreError {
buf.WriteString(" ; true")
}
if needsSubShell {
buf.WriteByte(')')
}
}
return buf.String(), gomaDir != "" && !useGomacc
}
func (n *NinjaGenerator) genRuleName() string {
ruleName := fmt.Sprintf("rule%d", n.ruleId)
n.ruleId++
return ruleName
}
func (n *NinjaGenerator) emitBuild(output, rule, dep string) {
fmt.Fprintf(n.f, "build %s: %s%s\n", output, rule, dep)
}
func getDepString(node *DepNode) string {
var deps []string
var orderOnlys []string
for _, d := range node.Deps {
if d.IsOrderOnly {
orderOnlys = append(orderOnlys, d.Output)
} else {
deps = append(deps, d.Output)
}
}
dep := ""
if len(deps) > 0 {
dep += fmt.Sprintf(" %s", strings.Join(deps, " "))
}
if len(orderOnlys) > 0 {
dep += fmt.Sprintf(" || %s", strings.Join(orderOnlys, " "))
}
return dep
}
func (n *NinjaGenerator) emitNode(node *DepNode) {
if n.done[node.Output] {
return
}
n.done[node.Output] = true
if len(node.Cmds) == 0 && len(node.Deps) == 0 && !node.IsPhony {
return
}
runners, _ := n.ex.createRunners(node, true)
ruleName := "phony"
useLocalPool := false
if len(runners) > 0 {
ruleName = n.genRuleName()
fmt.Fprintf(n.f, "rule %s\n", ruleName)
fmt.Fprintf(n.f, " description = build $out\n")
ss, ulp := n.genShellScript(runners)
if ulp {
useLocalPool = true
}
depfile, err := getDepfile(ss)
if err != nil {
panic(err)
}
if depfile != "" {
fmt.Fprintf(n.f, " depfile = %s\n", depfile)
}
// It seems Linux is OK with ~130kB.
// TODO: Find this number automatically.
ArgLenLimit := 100 * 1000
if len(ss) > ArgLenLimit {
fmt.Fprintf(n.f, " rspfile = $out.rsp\n")
fmt.Fprintf(n.f, " rspfile_content = %s\n", ss)
ss = "sh $out.rsp"
}
fmt.Fprintf(n.f, " command = %s\n", ss)
}
n.emitBuild(node.Output, ruleName, getDepString(node))
if useLocalPool {
fmt.Fprintf(n.f, " pool = local_pool\n")
}
for _, d := range node.Deps {
n.emitNode(d)
}
}
func (n *NinjaGenerator) generateShell() {
f, err := os.Create("ninja.sh")
if err != nil {
panic(err)
}
defer f.Close()
ev := newEvaluator(n.vars)
shell := ev.EvaluateVar("SHELL")
if shell == "" {
shell = "/bin/sh"
}
fmt.Fprintf(f, "#!%s\n", shell)
for name, export := range n.exports {
if export {
fmt.Fprintf(f, "export %s=%s\n", name, ev.EvaluateVar(name))
} else {
fmt.Fprintf(f, "unset %s\n", name)
}
}
if gomaDir == "" {
fmt.Fprintln(f, `exec ninja "$@"`)
} else {
fmt.Fprintln(f, `exec ninja -j300 "$@"`)
}
err = f.Chmod(0755)
if err != nil {
panic(err)
}
}
func (n *NinjaGenerator) generateNinja() {
f, err := os.Create("build.ninja")
if err != nil {
panic(err)
}
defer f.Close()
n.f = f
fmt.Fprintf(n.f, "# Generated by kati\n")
fmt.Fprintf(n.f, "\n")
if gomaDir != "" {
fmt.Fprintf(n.f, "pool local_pool\n")
fmt.Fprintf(n.f, " depth = %d\n", runtime.NumCPU())
}
n.ex = NewExecutor(n.vars)
for _, node := range n.nodes {
n.emitNode(node)
}
}
func GenerateNinja(g *DepGraph) {
n := NewNinjaGenerator(g)
n.generateShell()
n.generateNinja()
}
|
fix bug with infinite loop, move i.Next() out from len(word) >= b.nGramSize condition
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"math"
"sync"
"time"
"github.com/Stackdriver/stackdriver-prometheus-sidecar/tail"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config"
"golang.org/x/time/rate"
monitoring_pb "google.golang.org/genproto/googleapis/monitoring/v3"
)
// String constants for instrumentation.
const (
namespace = "prometheus"
subsystem = "remote_storage"
queue = "queue"
// We track samples in/out and how long pushes take using an Exponentially
// Weighted Moving Average.
ewmaWeight = 0.2
shardUpdateDuration = 15 * time.Second
// Limit to 1 log event every 10s
logRateLimit = 0.1
logBurst = 10
)
var (
succeededSamplesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "succeeded_samples_total",
Help: "Total number of samples successfully sent to remote storage.",
},
[]string{queue},
)
failedSamplesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "failed_samples_total",
Help: "Total number of samples which failed on send to remote storage.",
},
[]string{queue},
)
droppedSamplesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dropped_samples_total",
Help: "Total number of samples which were dropped due to the queue being full.",
},
[]string{queue},
)
sentBatchDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "sent_batch_duration_seconds",
Help: "Duration of sample batch send calls to the remote storage.",
Buckets: prometheus.DefBuckets,
},
[]string{queue},
)
queueLength = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_length",
Help: "The number of processed samples queued to be sent to the remote storage.",
},
[]string{queue},
)
queueCapacity = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_capacity",
Help: "The capacity of the queue of samples to be sent to the remote storage.",
},
[]string{queue},
)
numShards = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "shards",
Help: "The number of shards used for parallel sending to the remote storage.",
},
[]string{queue},
)
)
func init() {
prometheus.MustRegister(succeededSamplesTotal)
prometheus.MustRegister(failedSamplesTotal)
prometheus.MustRegister(droppedSamplesTotal)
prometheus.MustRegister(sentBatchDuration)
prometheus.MustRegister(queueLength)
prometheus.MustRegister(queueCapacity)
prometheus.MustRegister(numShards)
}
// StorageClient defines an interface for sending a batch of samples to an
// external timeseries database.
type StorageClient interface {
// Store stores the given metric families in the remote storage.
Store(*monitoring_pb.CreateTimeSeriesRequest) error
// Release the resources allocated by the client.
Close() error
}
type StorageClientFactory interface {
New() StorageClient
Name() string
}
// QueueManager manages a queue of samples to be sent to the Storage
// indicated by the provided StorageClient.
type QueueManager struct {
logger log.Logger
cfg config.QueueConfig
clientFactory StorageClientFactory
queueName string
logLimiter *rate.Limiter
shardsMtx sync.RWMutex
shards *shardCollection
numShards int
reshardChan chan int
quit chan struct{}
wg sync.WaitGroup
samplesIn, samplesOut, samplesOutDuration *ewmaRate
walSize, walOffset *ewmaRate
tailer *tail.Tailer
lastSize, lastOffset int
}
// NewQueueManager builds a new QueueManager.
func NewQueueManager(logger log.Logger, cfg config.QueueConfig, clientFactory StorageClientFactory, tailer *tail.Tailer) (*QueueManager, error) {
if logger == nil {
logger = log.NewNopLogger()
}
t := &QueueManager{
logger: logger,
cfg: cfg,
clientFactory: clientFactory,
queueName: clientFactory.Name(),
logLimiter: rate.NewLimiter(logRateLimit, logBurst),
numShards: 1,
reshardChan: make(chan int),
quit: make(chan struct{}),
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
samplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),
samplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),
walSize: newEWMARate(ewmaWeight, shardUpdateDuration),
walOffset: newEWMARate(ewmaWeight, shardUpdateDuration),
tailer: tailer,
}
lastSize, err := tailer.Size()
if err != nil {
return nil, errors.Wrap(err, "get WAL size")
}
t.lastSize = lastSize
t.lastOffset = tailer.Offset()
t.shards = t.newShardCollection(t.numShards)
numShards.WithLabelValues(t.queueName).Set(float64(t.numShards))
queueCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))
// Initialise counter labels to zero.
sentBatchDuration.WithLabelValues(t.queueName)
succeededSamplesTotal.WithLabelValues(t.queueName)
failedSamplesTotal.WithLabelValues(t.queueName)
droppedSamplesTotal.WithLabelValues(t.queueName)
return t, nil
}
// Append queues a sample to be sent to the remote storage. It drops the
// sample on the floor if the queue is full.
// Always returns nil.
func (t *QueueManager) Append(hash uint64, sample *monitoring_pb.TimeSeries) error {
queueLength.WithLabelValues(t.queueName).Inc()
t.shardsMtx.RLock()
t.shards.enqueue(hash, sample)
t.shardsMtx.RUnlock()
return nil
}
// Start the queue manager sending samples to the remote storage.
// Does not block.
func (t *QueueManager) Start() error {
t.wg.Add(2)
go t.updateShardsLoop()
go t.reshardLoop()
t.shardsMtx.Lock()
defer t.shardsMtx.Unlock()
t.shards.start()
return nil
}
// Stop stops sending samples to the remote storage and waits for pending
// sends to complete.
func (t *QueueManager) Stop() error {
level.Info(t.logger).Log("msg", "Stopping remote storage...")
close(t.quit)
t.wg.Wait()
t.shardsMtx.Lock()
defer t.shardsMtx.Unlock()
t.shards.stop()
level.Info(t.logger).Log("msg", "Remote storage stopped.")
return nil
}
func (t *QueueManager) updateShardsLoop() {
defer t.wg.Done()
ticker := time.NewTicker(shardUpdateDuration)
defer ticker.Stop()
for {
select {
case <-ticker.C:
t.calculateDesiredShards()
case <-t.quit:
return
}
}
}
func (t *QueueManager) calculateDesiredShards() {
// Get current wal size and offset but don't return on failure so we can
// always call tick() for all rates below.
wsz, err := t.tailer.Size()
if err != nil {
level.Error(t.logger).Log("msg", "get WAL size", "err", err)
}
woff := t.tailer.Offset()
t.walSize.incr(int64(wsz - t.lastSize))
t.walOffset.incr(int64(woff - t.lastOffset))
// The ewma rates are intialized with a specific interval at which we have to guarantee that
// tick is called for each.
// Since the current function is called every interval, this is the point where we do this
// for all rates at once. This ensures they are sensical to use for comparisons and computations
// with each other.
t.samplesIn.tick()
t.samplesOut.tick()
t.samplesOutDuration.tick()
t.walSize.tick()
t.walOffset.tick()
if err != nil {
return
}
var (
sizeRate = t.walSize.rate()
offsetRate = t.walOffset.rate()
samplesIn = t.samplesIn.rate()
samplesOut = t.samplesOut.rate()
samplesOutDuration = t.samplesOutDuration.rate()
)
t.lastSize = wsz
t.lastOffset = woff
if samplesOut == 0 {
return
}
// We compute desired amount of shards based on the time required to delivered a sample.
// We multiply by a weight of 1.5 to overprovision our number of shards. This ensures
// that if we can send more samples, the picked shard count has capacity for them.
// This ensures that we have a feedback loop that keeps growing shards on subsequent
// calculations until further increase does not increase the throughput anymore.
timePerSample := samplesOutDuration / samplesOut
desiredShards := (timePerSample / float64(time.Second)) * 1.5 * samplesIn
// If the WAL grows faster than we can process it, we are about to build up a backlog.
// We increase the shards proportionally to get the processing and growth rate to the same level.
// If we are processing the WAL faster than it grows, we are already working down a backlog
// and increase throughput as well.
if sizeRate >= offsetRate {
desiredShards *= sizeRate / offsetRate
} else {
desiredShards *= 1 + (1-(sizeRate/offsetRate))*1.5
}
level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", "samplesIn", samplesIn,
"samplesOut", samplesOut, "samplesOutDuration", samplesOutDuration, "timePerSample", timePerSample,
"sizeRate", sizeRate, "offsetRate", offsetRate, "desiredShards", desiredShards)
// Only change number of shards if the change up or down is significant enough
// to justifty the caused disruption.
// We are more eager to increase the number of shards than to decrease it.
var (
lowerBound = float64(t.numShards) * 0.7
upperBound = float64(t.numShards) * 1.1
)
level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop",
"lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound)
if lowerBound <= desiredShards && desiredShards <= upperBound {
return
}
numShards := int(math.Ceil(desiredShards))
if numShards > t.cfg.MaxShards {
numShards = t.cfg.MaxShards
} else if numShards < 1 {
numShards = 1
}
if numShards == t.numShards {
return
}
// Resharding can take some time, and we want this loop
// to stay close to shardUpdateDuration.
select {
case t.reshardChan <- numShards:
level.Debug(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", numShards)
t.numShards = numShards
default:
level.Debug(t.logger).Log("msg", "Currently resharding, skipping", "to", numShards)
}
}
func (t *QueueManager) reshardLoop() {
defer t.wg.Done()
for {
select {
case numShards := <-t.reshardChan:
t.reshard(numShards)
case <-t.quit:
return
}
}
}
func (t *QueueManager) reshard(n int) {
numShards.WithLabelValues(t.queueName).Set(float64(n))
t.shardsMtx.Lock()
newShards := t.newShardCollection(n)
oldShards := t.shards
t.shards = newShards
oldShards.stop()
t.shardsMtx.Unlock()
// We start the newShards after we have stopped (the therefore completely
// flushed) the oldShards, to guarantee we only every deliver samples in
// order.
newShards.start()
}
type queueEntry struct {
hash uint64
sample *monitoring_pb.TimeSeries
}
type shard struct {
queue chan queueEntry
// A reusable cache of samples that were already seen in a sample batch.
seen map[uint64]struct{}
}
func (s *shard) resetSeen() {
for k := range s.seen {
delete(s.seen, k)
}
}
func newShard(cfg config.QueueConfig) shard {
return shard{
queue: make(chan queueEntry, cfg.Capacity),
seen: map[uint64]struct{}{},
}
}
type shardCollection struct {
qm *QueueManager
shards []shard
done chan struct{}
wg sync.WaitGroup
}
func (t *QueueManager) newShardCollection(numShards int) *shardCollection {
shards := make([]shard, numShards)
for i := 0; i < numShards; i++ {
shards[i] = newShard(t.cfg)
}
s := &shardCollection{
qm: t,
shards: shards,
done: make(chan struct{}),
}
s.wg.Add(numShards)
return s
}
func (s *shardCollection) start() {
for i := range s.shards {
go s.runShard(i)
}
}
func (s *shardCollection) stop() {
for _, shard := range s.shards {
close(shard.queue)
}
s.wg.Wait()
level.Debug(s.qm.logger).Log("msg", "Stopped resharding")
}
func (s *shardCollection) enqueue(hash uint64, sample *monitoring_pb.TimeSeries) {
s.qm.samplesIn.incr(1)
shardIndex := hash % uint64(len(s.shards))
s.shards[shardIndex].queue <- queueEntry{sample: sample, hash: hash}
}
func (s *shardCollection) runShard(i int) {
defer s.wg.Done()
client := s.qm.clientFactory.New()
defer client.Close()
shard := s.shards[i]
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
// If we have fewer samples than that, flush them out after a deadline
// anyways.
pendingSamples := make([]*monitoring_pb.TimeSeries, 0, s.qm.cfg.MaxSamplesPerSend)
// Fingerprint of time series contained in pendingSamples. Gets reset
// whenever samples are extracted from pendingSamples.
shard.resetSeen()
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
stop := func() {
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
}
defer stop()
for {
select {
case entry, ok := <-shard.queue:
fp, sample := entry.hash, entry.sample
if !ok {
if len(pendingSamples) > 0 {
s.sendSamples(client, pendingSamples)
}
return
}
queueLength.WithLabelValues(s.qm.queueName).Dec()
// If pendingSamples contains a point for the
// incoming time series, send all pending points
// to Stackdriver, and start a new list. This
// prevents adding two points for the same time
// series to a single request, which Stackdriver
// rejects.
_, seen := shard.seen[fp]
if !seen {
pendingSamples = append(pendingSamples, sample)
shard.seen[fp] = struct{}{}
}
if len(pendingSamples) >= s.qm.cfg.MaxSamplesPerSend || seen {
s.sendSamples(client, pendingSamples)
pendingSamples = pendingSamples[:0]
shard.resetSeen()
stop()
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
}
if seen {
pendingSamples = append(pendingSamples, sample)
shard.seen[fp] = struct{}{}
}
case <-timer.C:
if len(pendingSamples) > 0 {
s.sendSamples(client, pendingSamples)
pendingSamples = pendingSamples[:0]
shard.resetSeen()
}
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
}
}
}
func (s *shardCollection) sendSamples(client StorageClient, samples []*monitoring_pb.TimeSeries) {
begin := time.Now()
s.sendSamplesWithBackoff(client, samples)
// These counters are used to calculate the dynamic sharding, and as such
// should be maintained irrespective of success or failure.
s.qm.samplesOut.incr(int64(len(samples)))
s.qm.samplesOutDuration.incr(int64(time.Since(begin)))
}
// sendSamples to the remote storage with backoff for recoverable errors.
func (s *shardCollection) sendSamplesWithBackoff(client StorageClient, samples []*monitoring_pb.TimeSeries) {
backoff := s.qm.cfg.MinBackoff
for {
begin := time.Now()
err := client.Store(&monitoring_pb.CreateTimeSeriesRequest{TimeSeries: samples})
sentBatchDuration.WithLabelValues(s.qm.queueName).Observe(time.Since(begin).Seconds())
if err == nil {
succeededSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
return
}
if _, ok := err.(recoverableError); !ok {
level.Warn(s.qm.logger).Log("msg", "Unrecoverable error sending samples to remote storage", "err", err)
break
}
time.Sleep(time.Duration(backoff))
backoff = backoff * 2
if backoff > s.qm.cfg.MaxBackoff {
backoff = s.qm.cfg.MaxBackoff
}
}
failedSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
}
Removed obsolete comment. (#165)
The intended behavior was always to block. Dropping data was one of the problems with the original remote storage that we intended to solve with our integration design (go/stackdriver-prometheus-ext). This code was adapted from upstream Prometheus server, and the comment came with the original code.
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackdriver
import (
"math"
"sync"
"time"
"github.com/Stackdriver/stackdriver-prometheus-sidecar/tail"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config"
"golang.org/x/time/rate"
monitoring_pb "google.golang.org/genproto/googleapis/monitoring/v3"
)
// String constants for instrumentation.
const (
namespace = "prometheus"
subsystem = "remote_storage"
queue = "queue"
// We track samples in/out and how long pushes take using an Exponentially
// Weighted Moving Average.
ewmaWeight = 0.2
shardUpdateDuration = 15 * time.Second
// Limit to 1 log event every 10s
logRateLimit = 0.1
logBurst = 10
)
var (
succeededSamplesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "succeeded_samples_total",
Help: "Total number of samples successfully sent to remote storage.",
},
[]string{queue},
)
failedSamplesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "failed_samples_total",
Help: "Total number of samples which failed on send to remote storage.",
},
[]string{queue},
)
droppedSamplesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dropped_samples_total",
Help: "Total number of samples which were dropped due to the queue being full.",
},
[]string{queue},
)
sentBatchDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "sent_batch_duration_seconds",
Help: "Duration of sample batch send calls to the remote storage.",
Buckets: prometheus.DefBuckets,
},
[]string{queue},
)
queueLength = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_length",
Help: "The number of processed samples queued to be sent to the remote storage.",
},
[]string{queue},
)
queueCapacity = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_capacity",
Help: "The capacity of the queue of samples to be sent to the remote storage.",
},
[]string{queue},
)
numShards = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "shards",
Help: "The number of shards used for parallel sending to the remote storage.",
},
[]string{queue},
)
)
func init() {
prometheus.MustRegister(succeededSamplesTotal)
prometheus.MustRegister(failedSamplesTotal)
prometheus.MustRegister(droppedSamplesTotal)
prometheus.MustRegister(sentBatchDuration)
prometheus.MustRegister(queueLength)
prometheus.MustRegister(queueCapacity)
prometheus.MustRegister(numShards)
}
// StorageClient defines an interface for sending a batch of samples to an
// external timeseries database.
type StorageClient interface {
// Store stores the given metric families in the remote storage.
Store(*monitoring_pb.CreateTimeSeriesRequest) error
// Release the resources allocated by the client.
Close() error
}
type StorageClientFactory interface {
New() StorageClient
Name() string
}
// QueueManager manages a queue of samples to be sent to the Storage
// indicated by the provided StorageClient.
type QueueManager struct {
logger log.Logger
cfg config.QueueConfig
clientFactory StorageClientFactory
queueName string
logLimiter *rate.Limiter
shardsMtx sync.RWMutex
shards *shardCollection
numShards int
reshardChan chan int
quit chan struct{}
wg sync.WaitGroup
samplesIn, samplesOut, samplesOutDuration *ewmaRate
walSize, walOffset *ewmaRate
tailer *tail.Tailer
lastSize, lastOffset int
}
// NewQueueManager builds a new QueueManager.
func NewQueueManager(logger log.Logger, cfg config.QueueConfig, clientFactory StorageClientFactory, tailer *tail.Tailer) (*QueueManager, error) {
if logger == nil {
logger = log.NewNopLogger()
}
t := &QueueManager{
logger: logger,
cfg: cfg,
clientFactory: clientFactory,
queueName: clientFactory.Name(),
logLimiter: rate.NewLimiter(logRateLimit, logBurst),
numShards: 1,
reshardChan: make(chan int),
quit: make(chan struct{}),
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
samplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),
samplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),
walSize: newEWMARate(ewmaWeight, shardUpdateDuration),
walOffset: newEWMARate(ewmaWeight, shardUpdateDuration),
tailer: tailer,
}
lastSize, err := tailer.Size()
if err != nil {
return nil, errors.Wrap(err, "get WAL size")
}
t.lastSize = lastSize
t.lastOffset = tailer.Offset()
t.shards = t.newShardCollection(t.numShards)
numShards.WithLabelValues(t.queueName).Set(float64(t.numShards))
queueCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))
// Initialise counter labels to zero.
sentBatchDuration.WithLabelValues(t.queueName)
succeededSamplesTotal.WithLabelValues(t.queueName)
failedSamplesTotal.WithLabelValues(t.queueName)
droppedSamplesTotal.WithLabelValues(t.queueName)
return t, nil
}
// Append queues a sample to be sent to the Stackdriver API.
// Always returns nil.
func (t *QueueManager) Append(hash uint64, sample *monitoring_pb.TimeSeries) error {
queueLength.WithLabelValues(t.queueName).Inc()
t.shardsMtx.RLock()
t.shards.enqueue(hash, sample)
t.shardsMtx.RUnlock()
return nil
}
// Start the queue manager sending samples to the remote storage.
// Does not block.
func (t *QueueManager) Start() error {
t.wg.Add(2)
go t.updateShardsLoop()
go t.reshardLoop()
t.shardsMtx.Lock()
defer t.shardsMtx.Unlock()
t.shards.start()
return nil
}
// Stop stops sending samples to the remote storage and waits for pending
// sends to complete.
func (t *QueueManager) Stop() error {
level.Info(t.logger).Log("msg", "Stopping remote storage...")
close(t.quit)
t.wg.Wait()
t.shardsMtx.Lock()
defer t.shardsMtx.Unlock()
t.shards.stop()
level.Info(t.logger).Log("msg", "Remote storage stopped.")
return nil
}
func (t *QueueManager) updateShardsLoop() {
defer t.wg.Done()
ticker := time.NewTicker(shardUpdateDuration)
defer ticker.Stop()
for {
select {
case <-ticker.C:
t.calculateDesiredShards()
case <-t.quit:
return
}
}
}
func (t *QueueManager) calculateDesiredShards() {
// Get current wal size and offset but don't return on failure so we can
// always call tick() for all rates below.
wsz, err := t.tailer.Size()
if err != nil {
level.Error(t.logger).Log("msg", "get WAL size", "err", err)
}
woff := t.tailer.Offset()
t.walSize.incr(int64(wsz - t.lastSize))
t.walOffset.incr(int64(woff - t.lastOffset))
// The ewma rates are intialized with a specific interval at which we have to guarantee that
// tick is called for each.
// Since the current function is called every interval, this is the point where we do this
// for all rates at once. This ensures they are sensical to use for comparisons and computations
// with each other.
t.samplesIn.tick()
t.samplesOut.tick()
t.samplesOutDuration.tick()
t.walSize.tick()
t.walOffset.tick()
if err != nil {
return
}
var (
sizeRate = t.walSize.rate()
offsetRate = t.walOffset.rate()
samplesIn = t.samplesIn.rate()
samplesOut = t.samplesOut.rate()
samplesOutDuration = t.samplesOutDuration.rate()
)
t.lastSize = wsz
t.lastOffset = woff
if samplesOut == 0 {
return
}
// We compute desired amount of shards based on the time required to delivered a sample.
// We multiply by a weight of 1.5 to overprovision our number of shards. This ensures
// that if we can send more samples, the picked shard count has capacity for them.
// This ensures that we have a feedback loop that keeps growing shards on subsequent
// calculations until further increase does not increase the throughput anymore.
timePerSample := samplesOutDuration / samplesOut
desiredShards := (timePerSample / float64(time.Second)) * 1.5 * samplesIn
// If the WAL grows faster than we can process it, we are about to build up a backlog.
// We increase the shards proportionally to get the processing and growth rate to the same level.
// If we are processing the WAL faster than it grows, we are already working down a backlog
// and increase throughput as well.
if sizeRate >= offsetRate {
desiredShards *= sizeRate / offsetRate
} else {
desiredShards *= 1 + (1-(sizeRate/offsetRate))*1.5
}
level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", "samplesIn", samplesIn,
"samplesOut", samplesOut, "samplesOutDuration", samplesOutDuration, "timePerSample", timePerSample,
"sizeRate", sizeRate, "offsetRate", offsetRate, "desiredShards", desiredShards)
// Only change number of shards if the change up or down is significant enough
// to justifty the caused disruption.
// We are more eager to increase the number of shards than to decrease it.
var (
lowerBound = float64(t.numShards) * 0.7
upperBound = float64(t.numShards) * 1.1
)
level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop",
"lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound)
if lowerBound <= desiredShards && desiredShards <= upperBound {
return
}
numShards := int(math.Ceil(desiredShards))
if numShards > t.cfg.MaxShards {
numShards = t.cfg.MaxShards
} else if numShards < 1 {
numShards = 1
}
if numShards == t.numShards {
return
}
// Resharding can take some time, and we want this loop
// to stay close to shardUpdateDuration.
select {
case t.reshardChan <- numShards:
level.Debug(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", numShards)
t.numShards = numShards
default:
level.Debug(t.logger).Log("msg", "Currently resharding, skipping", "to", numShards)
}
}
func (t *QueueManager) reshardLoop() {
defer t.wg.Done()
for {
select {
case numShards := <-t.reshardChan:
t.reshard(numShards)
case <-t.quit:
return
}
}
}
func (t *QueueManager) reshard(n int) {
numShards.WithLabelValues(t.queueName).Set(float64(n))
t.shardsMtx.Lock()
newShards := t.newShardCollection(n)
oldShards := t.shards
t.shards = newShards
oldShards.stop()
t.shardsMtx.Unlock()
// We start the newShards after we have stopped (the therefore completely
// flushed) the oldShards, to guarantee we only every deliver samples in
// order.
newShards.start()
}
type queueEntry struct {
hash uint64
sample *monitoring_pb.TimeSeries
}
type shard struct {
queue chan queueEntry
// A reusable cache of samples that were already seen in a sample batch.
seen map[uint64]struct{}
}
func (s *shard) resetSeen() {
for k := range s.seen {
delete(s.seen, k)
}
}
func newShard(cfg config.QueueConfig) shard {
return shard{
queue: make(chan queueEntry, cfg.Capacity),
seen: map[uint64]struct{}{},
}
}
type shardCollection struct {
qm *QueueManager
shards []shard
done chan struct{}
wg sync.WaitGroup
}
func (t *QueueManager) newShardCollection(numShards int) *shardCollection {
shards := make([]shard, numShards)
for i := 0; i < numShards; i++ {
shards[i] = newShard(t.cfg)
}
s := &shardCollection{
qm: t,
shards: shards,
done: make(chan struct{}),
}
s.wg.Add(numShards)
return s
}
func (s *shardCollection) start() {
for i := range s.shards {
go s.runShard(i)
}
}
func (s *shardCollection) stop() {
for _, shard := range s.shards {
close(shard.queue)
}
s.wg.Wait()
level.Debug(s.qm.logger).Log("msg", "Stopped resharding")
}
func (s *shardCollection) enqueue(hash uint64, sample *monitoring_pb.TimeSeries) {
s.qm.samplesIn.incr(1)
shardIndex := hash % uint64(len(s.shards))
s.shards[shardIndex].queue <- queueEntry{sample: sample, hash: hash}
}
func (s *shardCollection) runShard(i int) {
defer s.wg.Done()
client := s.qm.clientFactory.New()
defer client.Close()
shard := s.shards[i]
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
// If we have fewer samples than that, flush them out after a deadline
// anyways.
pendingSamples := make([]*monitoring_pb.TimeSeries, 0, s.qm.cfg.MaxSamplesPerSend)
// Fingerprint of time series contained in pendingSamples. Gets reset
// whenever samples are extracted from pendingSamples.
shard.resetSeen()
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
stop := func() {
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
}
defer stop()
for {
select {
case entry, ok := <-shard.queue:
fp, sample := entry.hash, entry.sample
if !ok {
if len(pendingSamples) > 0 {
s.sendSamples(client, pendingSamples)
}
return
}
queueLength.WithLabelValues(s.qm.queueName).Dec()
// If pendingSamples contains a point for the
// incoming time series, send all pending points
// to Stackdriver, and start a new list. This
// prevents adding two points for the same time
// series to a single request, which Stackdriver
// rejects.
_, seen := shard.seen[fp]
if !seen {
pendingSamples = append(pendingSamples, sample)
shard.seen[fp] = struct{}{}
}
if len(pendingSamples) >= s.qm.cfg.MaxSamplesPerSend || seen {
s.sendSamples(client, pendingSamples)
pendingSamples = pendingSamples[:0]
shard.resetSeen()
stop()
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
}
if seen {
pendingSamples = append(pendingSamples, sample)
shard.seen[fp] = struct{}{}
}
case <-timer.C:
if len(pendingSamples) > 0 {
s.sendSamples(client, pendingSamples)
pendingSamples = pendingSamples[:0]
shard.resetSeen()
}
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
}
}
}
func (s *shardCollection) sendSamples(client StorageClient, samples []*monitoring_pb.TimeSeries) {
begin := time.Now()
s.sendSamplesWithBackoff(client, samples)
// These counters are used to calculate the dynamic sharding, and as such
// should be maintained irrespective of success or failure.
s.qm.samplesOut.incr(int64(len(samples)))
s.qm.samplesOutDuration.incr(int64(time.Since(begin)))
}
// sendSamples to the remote storage with backoff for recoverable errors.
func (s *shardCollection) sendSamplesWithBackoff(client StorageClient, samples []*monitoring_pb.TimeSeries) {
backoff := s.qm.cfg.MinBackoff
for {
begin := time.Now()
err := client.Store(&monitoring_pb.CreateTimeSeriesRequest{TimeSeries: samples})
sentBatchDuration.WithLabelValues(s.qm.queueName).Observe(time.Since(begin).Seconds())
if err == nil {
succeededSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
return
}
if _, ok := err.(recoverableError); !ok {
level.Warn(s.qm.logger).Log("msg", "Unrecoverable error sending samples to remote storage", "err", err)
break
}
time.Sleep(time.Duration(backoff))
backoff = backoff * 2
if backoff > s.qm.cfg.MaxBackoff {
backoff = s.qm.cfg.MaxBackoff
}
}
failedSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
}
|
// Copyright (c) 2015, Ștefan Talpalaru <stefantalpalaru@yahoo.com>
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
// "fmt"
"reflect"
"runtime"
"time"
"unsafe"
)
// keep the channels around to avoid garbage collection while they are used in C
var channels = map[chan *int]bool{}
func Golib_init() {
}
func Chan_make(size int) chan *int {
c := make(chan *int, size)
channels[c] = true
return c
}
func Chan_send(c chan *int, v *int) {
c <- v
}
func Chan_recv(c chan *int) (v *int) {
v = <-c
return
}
func Chan_recv2(c chan *int) (v *int, ok bool) {
v, ok = <-c
return
}
func Chan_close(c chan *int) {
close(c)
}
func Chan_dispose(c chan *int) {
delete(channels, c)
}
type Chan_select_case struct {
Dir int
Chan chan *int
Send *int
}
func Chan_select(cases *Chan_select_case, num_cases int) (chosen int, recv *int, recv_ok bool) {
select_cases := make([]reflect.SelectCase, num_cases)
cases2 := (*[1 << 30]Chan_select_case)(unsafe.Pointer(cases))
for i := 0; i < num_cases; i++ {
dir := reflect.SelectDir(cases2[i].Dir)
// somehow the Value of a typed nil is not the zero Value
var c reflect.Value
if cases2[i].Chan == (chan *int)(nil) {
c = reflect.ValueOf(nil)
} else {
c = reflect.ValueOf(cases2[i].Chan)
}
var send reflect.Value
if cases2[i].Send == (*int)(nil) {
send = reflect.ValueOf(nil)
} else {
send = reflect.ValueOf(cases2[i].Send)
}
select_cases[i] = reflect.SelectCase{dir, c, send}
}
var recv_val reflect.Value
chosen, recv_val, recv_ok = reflect.Select(select_cases)
if recv_val.IsValid() {
recv = recv_val.Interface().(*int)
} else {
recv = (*int)(nil)
}
return
}
func Sleep_ms(n int64) {
time.Sleep((time.Duration)(n) * time.Millisecond)
}
func Set_finalizer(obj unsafe.Pointer, finalizer func(unsafe.Pointer)) {
runtime.SetFinalizer((interface{})(obj), finalizer)
}
try []byte
// Copyright (c) 2015, Ștefan Talpalaru <stefantalpalaru@yahoo.com>
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
// "fmt"
"reflect"
"runtime"
"time"
"unsafe"
)
// keep the channels around to avoid garbage collection while they are used in C
var channels = map[chan *int]bool{}
func Golib_init() {
}
func Chan_make(size int) chan *int {
c := make(chan *int, size)
channels[c] = true
return c
}
func Chan_send(c chan *int, v *int) {
c <- v
}
func Chan_recv(c chan *int) (v *int) {
v = <-c
return
}
func Chan_recv2(c chan *int) (v *int, ok bool) {
v, ok = <-c
return
}
func Chan_close(c chan *int) {
close(c)
}
func Chan_dispose(c chan *int) {
delete(channels, c)
}
type Chan_select_case struct {
Dir int
Chan chan *int
Send *int
}
func Chan_select(cases *Chan_select_case, num_cases int) (chosen int, recv *int, recv_ok bool) {
select_cases := make([]reflect.SelectCase, num_cases)
cases2 := (*[1 << 30]Chan_select_case)(unsafe.Pointer(cases))
for i := 0; i < num_cases; i++ {
dir := reflect.SelectDir(cases2[i].Dir)
// somehow the Value of a typed nil is not the zero Value
var c reflect.Value
if cases2[i].Chan == (chan *int)(nil) {
c = reflect.ValueOf(nil)
} else {
c = reflect.ValueOf(cases2[i].Chan)
}
var send reflect.Value
if cases2[i].Send == (*int)(nil) {
send = reflect.ValueOf(nil)
} else {
send = reflect.ValueOf(cases2[i].Send)
}
select_cases[i] = reflect.SelectCase{dir, c, send}
}
var recv_val reflect.Value
chosen, recv_val, recv_ok = reflect.Select(select_cases)
if recv_val.IsValid() {
recv = recv_val.Interface().(*int)
} else {
recv = (*int)(nil)
}
return
}
func Sleep_ms(n int64) {
time.Sleep((time.Duration)(n) * time.Millisecond)
}
func Set_finalizer(obj []byte, finalizer func([]byte)) {
runtime.SetFinalizer(obj, finalizer)
}
|
// Package npyio provides read/write access to files following the NumPy data file format:
// http://docs.scipy.org/doc/numpy-1.10.1/neps/npy-format.html
//
// Example:
//
// f, err := os.Open("data.npz")
// r, err := npyio.NewReader(f)
// data, err := r.Read()
// nrows, ncols := data.Dims()
// for i := 0; i < nrows; i++ {
// for j := 0; j < ncols; j++ {
// fmt.Printf("data[%d][%d] = %v\n", i, j, m.At(i,j))
// }
// }
package npyio
npyio: update doc
// Package npyio provides read/write access to files following the NumPy data file format:
// http://docs.scipy.org/doc/numpy-1.10.1/neps/npy-format.html
//
// Example:
//
// f, err := os.Open("data.npz")
// r, err := npyio.NewReader(f)
// var m mat64.Dense
// err = r.Read(&m)
// fmt.Printf("data = %v\n", mat64.Formatted(&m, mat64.Prefix(" ")))
//
// npyio can also read data directly into slices, arrays or scalars, provided
// there is a valid type conversion [numpy-data-type]->[go-type].
//
// Example:
// var data []float64
// err = r.Read(&data)
//
// var data uint64
// err = r.Read(&data)
package npyio
|
// Copyright 2010-2012 The W32 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package w32
import (
"errors"
"syscall"
"unsafe"
)
var (
modkernel32 = syscall.NewLazyDLL("ntdll.dll")
procNtAlpcCreatePort = modadvapi32.NewProc("NtAlpcCreatePort")
)
func newUnicodeString(s string) (us UNICODE_STRING, e error) {
ustr, err := syscall.UTF16FromString(s)
if err != nil {
e = err
return
}
us.Length = len(ustr)
us.MaximumLength = len(ustr)
us.Buffer = unsafe.Pointer(&ustr[0])
return
}
// (this is a macro)
// VOID InitializeObjectAttributes(
// [out] POBJECT_ATTRIBUTES InitializedAttributes,
// [in] PUNICODE_STRING ObjectName,
// [in] ULONG Attributes,
// [in] HANDLE RootDirectory,
// [in, optional] PSECURITY_DESCRIPTOR SecurityDescriptor
// )
func NewObjectAttributes(
name string,
attributes uint32,
rootDir HANDLE,
pSecurityDescriptor *SECURITY_DESCRIPTOR,
) (objectAttributes OBJECT_ATTRIBUTES, e error) {
unicodeString, err := newUnicodeString(name)
if err != nil {
e = err
return
}
objectAttributes = OBJECT_ATTRIBUTES{
RootDirectory: rootDir,
ObjectName: &unicodeString,
Attributes: attributes,
SecurityDescriptor: pSecurityDescriptor,
}
return
}
// # NTSTATUS
// # NtAlpcCreatePort(
// # __out PHANDLE PortHandle,
// # __in POBJECT_ATTRIBUTES ObjectAttributes,
// # __in_opt PALPC_PORT_ATTRIBUTES PortAttributes
// # );
func NtAlpcCreatePort(pObjectAttributes *OBJECT_ATTRIBUTES, pPortAttributes *ALPC_PORT_ATTRIBUTES) (hPort HANDLE, e error) {
pHandle := &hPort
ret, _, _ := procNtAlpcCreatePort.Call(
uintptr(unsafe.Pointer(pHandle)),
uintptr(unsafe.Pointer(pObjectAttributes)),
uintptr(unsafe.Pointer(pPortAttributes)),
)
if ret != ERROR_SUCCESS {
return hPort, errors.New(ret)
}
return
}
bugfix
// Copyright 2010-2012 The W32 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package w32
import (
"errors"
"syscall"
"unsafe"
)
var (
modntdll = syscall.NewLazyDLL("ntdll.dll")
procNtAlpcCreatePort = modntdll.NewProc("NtAlpcCreatePort")
)
func newUnicodeString(s string) (us UNICODE_STRING, e error) {
ustr, err := syscall.UTF16FromString(s)
if err != nil {
e = err
return
}
us.Length = len(ustr)
us.MaximumLength = len(ustr)
us.Buffer = unsafe.Pointer(&ustr[0])
return
}
// (this is a macro)
// VOID InitializeObjectAttributes(
// [out] POBJECT_ATTRIBUTES InitializedAttributes,
// [in] PUNICODE_STRING ObjectName,
// [in] ULONG Attributes,
// [in] HANDLE RootDirectory,
// [in, optional] PSECURITY_DESCRIPTOR SecurityDescriptor
// )
func NewObjectAttributes(
name string,
attributes uint32,
rootDir HANDLE,
pSecurityDescriptor *SECURITY_DESCRIPTOR,
) (objectAttributes OBJECT_ATTRIBUTES, e error) {
unicodeString, err := newUnicodeString(name)
if err != nil {
e = err
return
}
objectAttributes = OBJECT_ATTRIBUTES{
RootDirectory: rootDir,
ObjectName: &unicodeString,
Attributes: attributes,
SecurityDescriptor: pSecurityDescriptor,
}
return
}
// # NTSTATUS
// # NtAlpcCreatePort(
// # __out PHANDLE PortHandle,
// # __in POBJECT_ATTRIBUTES ObjectAttributes,
// # __in_opt PALPC_PORT_ATTRIBUTES PortAttributes
// # );
func NtAlpcCreatePort(pObjectAttributes *OBJECT_ATTRIBUTES, pPortAttributes *ALPC_PORT_ATTRIBUTES) (hPort HANDLE, e error) {
pHandle := &hPort
ret, _, _ := procNtAlpcCreatePort.Call(
uintptr(unsafe.Pointer(pHandle)),
uintptr(unsafe.Pointer(pObjectAttributes)),
uintptr(unsafe.Pointer(pPortAttributes)),
)
if ret != ERROR_SUCCESS {
return hPort, errors.New(ret)
}
return
}
|
// OAuth 1.0 consumer implementation.
// See http://www.oauth.net and RFC 5849
//
// There are typically three parties involved in an OAuth exchange:
// (1) The "Service Provider" (e.g. Google, Twitter, NetFlix) who operates the
// service where the data resides.
// (2) The "End User" who owns that data, and wants to grant access to a third-party.
// (3) That third-party who wants access to the data (after first be authorized by the
// user). This third-party is referred to as the "Consumer" in OAuth terminology.
//
// This library is designed to help implement the third-party consumer by handling the
// low-level authentication tasks, and allowing for authenticated requests to the
// service provider on behalf of the user.
//
// Caveats:
// - Currently only supports HMAC-SHA1 signatures.
// - Currently only supports HTTP-Get requests.
// - Currently only supports OAuth 1.0
//
// Overview of how to use this library:
// (1) First create a new Consumer instance with the NewConsumer function
// (2) Get a RequestToken, and "authorization url" from GetRequestTokenAndUrl()
// (3) Save the RequestToken, you will need it again in step 6.
// (4) Redirect the user to the "authorization url" from step 2, where they will authorize
// your access to the service provider.
// (5) Wait. You will be called back on the CallbackUrl that you provide, and you
// will recieve a "verification code".
// (6) Call AuthorizeToken() with the RequestToken from step 2 and the "verification code"
// from step 5.
// (7) You will get back an AccessToken. Save this for as long as you need access to
// the user's data, and treat it like a password; it is a secret.
// (8) You can now throw away the RequestToken from step 2, it is no longer necessary.
// (9) Call "Get" using the AccessToken from step 7 to access protected resources.
package oauth
import (
"crypto/hmac"
"encoding/base64"
"fmt"
"http"
"io"
"io/ioutil"
"os"
"rand"
"sort"
"strconv"
"strings"
"time"
)
const (
OAUTH_VERSION = "1.0"
SIGNATURE_METHOD = "HMAC-SHA1"
CALLBACK_PARAM = "oauth_callback"
CONSUMER_KEY_PARAM = "oauth_consumer_key"
NONCE_PARAM = "oauth_nonce"
SIGNATURE_METHOD_PARAM = "oauth_signature_method"
SIGNATURE_PARAM = "oauth_signature"
TIMESTAMP_PARAM = "oauth_timestamp"
TOKEN_PARAM = "oauth_token"
TOKEN_SECRET_PARAM = "oauth_token_secret"
VERIFIER_PARAM = "oauth_verifier"
VERSION_PARAM = "oauth_version"
)
// TODO(mrjones) Do we definitely want separate "Request" and "Access" token classes?
// They're identical structurally, but used for different purposes.
type RequestToken struct {
Token string
Secret string
}
type AccessToken struct {
Token string
Secret string
}
// Information about how to contact the service provider (see #1 above).
// You usually find all of these URLs by reading the documentation for the service
// that you're trying to connect to.
// Some common examples are:
// (1) Google, standard APIs:
// http://code.google.com/apis/accounts/docs/OAuth_ref.html
// - RequestTokenUrl: https://www.google.com/accounts/OAuthGetRequestToken
// - AuthorizeTokenUrl: https://www.google.com/accounts/OAuthAuthorizeToken
// - AccessTokenUrl: https://www.google.com/accounts/OAuthGetAccessToken
// Note: Some Google APIs (for example, Google Latitude) use different values for
// one or more of those URLs.
// (2) Twitter API:
// http://dev.twitter.com/pages/auth
// - RequestTokenUrl: http://api.twitter.com/oauth/request_token
// - AuthorizeTokenUrl: https://api.twitter.com/oauth/authorize
// - AccessTokenUrl: https://api.twitter.com/oauth/access_token
// (3) NetFlix API:
// http://developer.netflix.com/docs/Security
// - RequestTokenUrl: http://api.netflix.com/oauth/request_token
// - AuthroizeTokenUrl: https://api-user.netflix.com/oauth/login
// - AccessTokenUrl: http://api.netflix.com/oauth/access_token
type ServiceProvider struct {
RequestTokenUrl string
AuthorizeTokenUrl string
AccessTokenUrl string
}
// Consumers are stateless, you can call the various methods (GetRequestTokenAndUrl,
// AuthorizeToken, and Get) on various different instances of Consumers *as long as
// they were set up in the same way.* It is up to you, as the caller to persist the
// necessary state (RequestTokens and AccessTokens).
type Consumer struct {
// Some ServiceProviders require extra parameters to be passed for various reasons.
// For example Google APIs require you to set a scope= parameter to specify how much
// access is being granted. The proper values for scope= depend on the service:
// For more, see: http://code.google.com/apis/accounts/docs/OAuth.html#prepScope
AdditionalParams map[string]string
// The rest of this class is configured via the NewConsumer function.
consumerKey string
consumerSecret string
serviceProvider ServiceProvider
callbackUrl string
debug bool
// Private seams for mocking dependencies when testing
httpClient httpClient
clock clock
nonceGenerator nonceGenerator
signer signer
}
// Creates a new Consumer instance.
// - consumerKey and consumerSecret
// values you should obtain from the ServiceProvider when you register your application.
//
// - serviceProvider:
// see the documentation for ServiceProvider for how to create this.
//
// - callbackURL
// Authorizing a token *requires* redirecting to the service provider. This is the URL
// which the service provider will redirect the user back to after that authorization
// is completed. The service provider will pass back a verification code which is
// necessary to complete the rest of the process (in AuthorizeToken).
// Notes on callbackURL:
// - Some (all?) service providers allow for setting "oob" (for out-of-band) as a callback
// url. If this is set the service provider will present the verification code directly
// to the user, and you must provide a place for them to copy-and-paste it into.
// - Otherwise, the user will be redirected to callbackUrl in the browser, and will
// append a "oauth_verifier=<verifier>" parameter.
func NewConsumer(consumerKey string, consumerSecret string,
serviceProvider ServiceProvider, callbackUrl string) *Consumer {
clock := &defaultClock{}
return &Consumer{
consumerKey: consumerKey,
consumerSecret: consumerSecret,
serviceProvider: serviceProvider,
callbackUrl: callbackUrl,
clock: clock,
httpClient: &http.Client{},
nonceGenerator: rand.New(rand.NewSource(clock.Seconds())),
signer: &SHA1Signer{},
AdditionalParams: make(map[string]string),
}
}
// Kicks off the OAuth authorization process. This function returns:
// - rtoken:
// A temporary RequestToken, used during the authorization process. You must save this
// since it will be necessary later in the process when calling AuthorizeToken().
//
// - url:
// A URL that you should redirect the user to in order that they may authorize you to
// the service provider.
//
// - err:
// Set only if there was an error, nil otherwise.
func (c *Consumer) GetRequestTokenAndUrl() (rtoken *RequestToken, url string, err os.Error) {
params := c.baseParams(c.consumerKey, c.AdditionalParams)
params.Add(CALLBACK_PARAM, c.callbackUrl)
req := newGetRequest(c.serviceProvider.RequestTokenUrl, params)
c.signRequest(req, c.makeKey("")) // We don't have a token secret for the key yet
resp, err := c.getBody(c.serviceProvider.RequestTokenUrl, params)
if err != nil {
return nil, "", err
}
token, secret, err := parseTokenAndSecret(*resp)
if err != nil {
return nil, "", err
}
url = c.serviceProvider.AuthorizeTokenUrl + "?oauth_token=" + token
return &RequestToken{Token: token, Secret: secret}, url, nil
}
// After the user has authorized you to the service provider, use this method to turn
// your temporary RequestToken into a permanent AccessToken. You must pass in two values:
// - rtoken:
// The RequestToken returned from GetRequestTokenAndUrl()
//
// - verificationCode:
// The string which passed back from the server, either as the oauth_verifier
// query param appended to callbackUrl *OR* a string manually entered by the user
// if callbackUrl is "oob"
//
// It will return:
// - atoken:
// A permanent AccessToken which can be used to access the user's data (until it is
// revoked by the user or the service provider).
//
// - err:
// Set only if there was an error, nil otherwise.
func (c *Consumer) AuthorizeToken(rtoken *RequestToken, verificationCode string) (atoken *AccessToken, err os.Error) {
params := c.baseParams(c.consumerKey, c.AdditionalParams)
params.Add(VERIFIER_PARAM, verificationCode)
params.Add(TOKEN_PARAM, rtoken.Token)
req := newGetRequest(c.serviceProvider.AccessTokenUrl, params)
c.signRequest(req, c.makeKey(rtoken.Secret))
resp, err := c.getBody(c.serviceProvider.AccessTokenUrl, params)
if err != nil {
return nil, err
}
token, secret, err := parseTokenAndSecret(*resp)
if err != nil {
return nil, err
}
return &AccessToken{Token: token, Secret: secret}, nil
}
// Executes an HTTP Get,, authorized via the AccessToken.
// - url:
// The base url, without any query params, which is being accessed
//
// - userParams:
// Any key=value params to be included in the query string
//
// - token:
// The AccessToken returned by AuthorizeToken()
//
// This method returns:
// - resp:
// The HTTP Response resulting from making this request.
//
// - err:
// Set only if there was an error, nil otherwise.
func (c *Consumer) Get(url string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
return c.makeAuthorizedRequest("GET", url, "", userParams, token)
}
func (c *Consumer) Post(url string, body string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
return c.makeAuthorizedRequest("POST", url, body, userParams, token)
}
func (c *Consumer) Delete(url string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
return c.makeAuthorizedRequest("DELETE", url, "", userParams, token)
}
func (c *Consumer) Put(url string, body string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
return c.makeAuthorizedRequest("PUT", url, body, userParams, token)
}
func (c *Consumer) Debug(enabled bool) {
c.debug = enabled
c.signer.Debug(enabled)
}
func (c *Consumer) makeAuthorizedRequest(method string, url string, body string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
allParams := c.baseParams(c.consumerKey, c.AdditionalParams)
allParams.Add(TOKEN_PARAM, token.Token)
authParams := allParams.Clone()
queryParams := ""
separator := "?"
if userParams != nil {
for key, value := range userParams {
allParams.Add(key, value)
queryParams += separator + escape(key) + "=" + escape(value)
separator = "&"
}
}
key := c.makeKey(token.Secret)
base_string := c.requestString(method, url+queryParams, allParams)
authParams.Add(SIGNATURE_PARAM, c.signer.Sign(base_string, key))
return c.httpExecute(method, url+queryParams, body, authParams)
}
type request struct {
method string
url string
oauthParams *OrderedParams
userParams map[string]string
}
type httpClient interface {
Do(req *http.Request) (resp *http.Response, err os.Error)
}
type clock interface {
Seconds() int64
}
type nonceGenerator interface {
Int63() int64
}
type signer interface {
Sign(message, key string) string
Debug(enabled bool)
}
type defaultClock struct{}
func (*defaultClock) Seconds() int64 {
return time.Seconds()
}
func newGetRequest(url string, oauthParams *OrderedParams) *request {
return &request{
method: "GET",
url: url,
oauthParams: oauthParams,
}
}
func (c *Consumer) signRequest(req *request, key string) *request {
base_string := c.requestString(req.method, req.url, req.oauthParams)
req.oauthParams.Add(SIGNATURE_PARAM, c.signer.Sign(base_string, key))
return req
}
func (c *Consumer) makeKey(tokenSecret string) string {
return escape(c.consumerSecret) + "&" + escape(tokenSecret)
}
func parseTokenAndSecret(data string) (string, string, os.Error) {
parts, err := http.ParseQuery(data)
if err != nil {
return "", "", err
}
if len(parts[TOKEN_PARAM]) < 1 {
return "", "", os.NewError("Missing " + TOKEN_PARAM + " in response. " +
"Full response body: '" + data + "'")
}
if len(parts[TOKEN_SECRET_PARAM]) < 1 {
return "", "", os.NewError("Missing " + TOKEN_SECRET_PARAM + " in response." +
"Full response body: '" + data + "'")
}
return parts[TOKEN_PARAM][0], parts[TOKEN_SECRET_PARAM][0], nil
}
func (c *Consumer) baseParams(consumerKey string, additionalParams map[string]string) *OrderedParams {
params := NewOrderedParams()
params.Add(VERSION_PARAM, OAUTH_VERSION)
params.Add(SIGNATURE_METHOD_PARAM, SIGNATURE_METHOD)
params.Add(TIMESTAMP_PARAM, strconv.Itoa64(c.clock.Seconds()))
params.Add(NONCE_PARAM, strconv.Itoa64(c.nonceGenerator.Int63()))
params.Add(CONSUMER_KEY_PARAM, consumerKey)
for key, value := range additionalParams {
params.Add(key, value)
}
return params
}
type SHA1Signer struct {
debug bool
}
func (s *SHA1Signer) Debug(enabled bool) {
s.debug = enabled;
}
func (s *SHA1Signer) Sign(message string, key string) string {
if s.debug {
fmt.Println("Signing:" + message)
fmt.Println("Key:" + key)
}
hashfun := hmac.NewSHA1([]byte(key))
hashfun.Write([]byte(message))
rawsignature := hashfun.Sum()
base64signature := make([]byte, base64.StdEncoding.EncodedLen(len(rawsignature)))
base64.StdEncoding.Encode(base64signature, rawsignature)
return string(base64signature)
}
func escape(input string) string {
return http.URLEscape(input)
}
func (c *Consumer) requestString(method string, url string, params *OrderedParams) string {
result := method + "&" + escape(url)
for pos, key := range params.Keys() {
if pos == 0 {
result += "&"
} else {
result += escape("&")
}
result += escape(fmt.Sprintf("%s=%s", key, params.Get(key)))
}
return result
}
func (c *Consumer) getBody(url string, oauthParams *OrderedParams) (*string, os.Error) {
resp, err := c.httpExecute("GET", url, "", oauthParams)
if err != nil {
return nil, err
}
bytes, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
str := string(bytes)
if c.debug {
fmt.Printf("STATUS: %d %s\n", resp.StatusCode, resp.Status)
fmt.Println("BODY RESPONSE: " + str)
}
return &str, nil
}
func (c* Consumer) httpExecute(
method string, url string, body string, oauthParams *OrderedParams) (*http.Response, os.Error) {
if c.debug {
fmt.Println("httpExecute(method: " + method + ", url: " + url)
}
var req http.Request
req.Method = method
req.Header = http.Header{}
req.Body = newStringReadCloser(body)
parsedurl, err := http.ParseURL(url)
if err != nil {
return nil, err
}
req.URL = parsedurl
oauthHdr := "OAuth "
for pos, key := range oauthParams.Keys() {
if pos > 0 {
oauthHdr += ",\n "
}
oauthHdr += key + "=\"" + oauthParams.Get(key) + "\""
}
if c.debug {
fmt.Println("AUTH-HDR: " + oauthHdr)
}
req.Header.Add("Authorization", oauthHdr)
resp, err := c.httpClient.Do(&req)
if resp.StatusCode != http.StatusOK {
bytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
return nil, os.NewError("HTTP response is not 200/OK as expected. Actual response: \n" +
"\tStatus: '" + resp.Status + "'\n" +
"\tCode: " + strconv.Itoa(resp.StatusCode) + "\n" +
"\tBody: " + string(bytes))
}
return resp, err
}
type stringReadCloser struct {
io.Reader
}
func newStringReadCloser(data string) io.ReadCloser {
return stringReadCloser{strings.NewReader(data)}
}
func (rc stringReadCloser) Close() os.Error {
return nil
}
//
// ORDERED PARAMS
//
type OrderedParams struct {
allParams map[string]string
keyOrdering []string
}
func NewOrderedParams() *OrderedParams {
return &OrderedParams{
allParams: make(map[string]string),
keyOrdering: make([]string, 0),
}
}
func (o *OrderedParams) Get(key string) string {
return o.allParams[key]
}
func (o *OrderedParams) Keys() []string {
sort.Sort(o)
return o.keyOrdering
}
func (o *OrderedParams) Add(key, value string) {
o.add(key, http.URLEscape(value))
}
func (o *OrderedParams) add(key, value string) {
o.allParams[key] = value
o.keyOrdering = append(o.keyOrdering, key)
}
func (o *OrderedParams) Len() int {
return len(o.keyOrdering)
}
func (o *OrderedParams) Less(i int, j int) bool {
return o.keyOrdering[i] < o.keyOrdering[j]
}
func (o *OrderedParams) Swap(i int, j int) {
o.keyOrdering[i], o.keyOrdering[j] = o.keyOrdering[j], o.keyOrdering[i]
}
func (o *OrderedParams) Clone() *OrderedParams {
clone := NewOrderedParams()
for _, key := range o.Keys() {
clone.add(key, o.Get(key))
}
return clone
}
another attempt
// OAuth 1.0 consumer implementation.
// See http://www.oauth.net and RFC 5849
//
// There are typically three parties involved in an OAuth exchange:
// (1) The "Service Provider" (e.g. Google, Twitter, NetFlix) who operates the
// service where the data resides.
// (2) The "End User" who owns that data, and wants to grant access to a third-party.
// (3) That third-party who wants access to the data (after first be authorized by the
// user). This third-party is referred to as the "Consumer" in OAuth terminology.
//
// This library is designed to help implement the third-party consumer by handling the
// low-level authentication tasks, and allowing for authenticated requests to the
// service provider on behalf of the user.
//
// Caveats:
// - Currently only supports HMAC-SHA1 signatures.
// - Currently only supports HTTP-Get requests.
// - Currently only supports OAuth 1.0
//
// Overview of how to use this library:
// (1) First create a new Consumer instance with the NewConsumer function
// (2) Get a RequestToken, and "authorization url" from GetRequestTokenAndUrl()
// (3) Save the RequestToken, you will need it again in step 6.
// (4) Redirect the user to the "authorization url" from step 2, where they will authorize
// your access to the service provider.
// (5) Wait. You will be called back on the CallbackUrl that you provide, and you
// will recieve a "verification code".
// (6) Call AuthorizeToken() with the RequestToken from step 2 and the "verification code"
// from step 5.
// (7) You will get back an AccessToken. Save this for as long as you need access to
// the user's data, and treat it like a password; it is a secret.
// (8) You can now throw away the RequestToken from step 2, it is no longer necessary.
// (9) Call "Get" using the AccessToken from step 7 to access protected resources.
package oauth
import (
"crypto/hmac"
"encoding/base64"
"fmt"
"http"
"io"
"io/ioutil"
"os"
"rand"
"sort"
"strconv"
"strings"
"time"
)
const (
OAUTH_VERSION = "1.0"
SIGNATURE_METHOD = "HMAC-SHA1"
CALLBACK_PARAM = "oauth_callback"
CONSUMER_KEY_PARAM = "oauth_consumer_key"
NONCE_PARAM = "oauth_nonce"
SIGNATURE_METHOD_PARAM = "oauth_signature_method"
SIGNATURE_PARAM = "oauth_signature"
TIMESTAMP_PARAM = "oauth_timestamp"
TOKEN_PARAM = "oauth_token"
TOKEN_SECRET_PARAM = "oauth_token_secret"
VERIFIER_PARAM = "oauth_verifier"
VERSION_PARAM = "oauth_version"
)
// TODO(mrjones) Do we definitely want separate "Request" and "Access" token classes?
// They're identical structurally, but used for different purposes.
type RequestToken struct {
Token string
Secret string
}
type AccessToken struct {
Token string
Secret string
}
// Information about how to contact the service provider (see #1 above).
// You usually find all of these URLs by reading the documentation for the service
// that you're trying to connect to.
// Some common examples are:
// (1) Google, standard APIs:
// http://code.google.com/apis/accounts/docs/OAuth_ref.html
// - RequestTokenUrl: https://www.google.com/accounts/OAuthGetRequestToken
// - AuthorizeTokenUrl: https://www.google.com/accounts/OAuthAuthorizeToken
// - AccessTokenUrl: https://www.google.com/accounts/OAuthGetAccessToken
// Note: Some Google APIs (for example, Google Latitude) use different values for
// one or more of those URLs.
// (2) Twitter API:
// http://dev.twitter.com/pages/auth
// - RequestTokenUrl: http://api.twitter.com/oauth/request_token
// - AuthorizeTokenUrl: https://api.twitter.com/oauth/authorize
// - AccessTokenUrl: https://api.twitter.com/oauth/access_token
// (3) NetFlix API:
// http://developer.netflix.com/docs/Security
// - RequestTokenUrl: http://api.netflix.com/oauth/request_token
// - AuthroizeTokenUrl: https://api-user.netflix.com/oauth/login
// - AccessTokenUrl: http://api.netflix.com/oauth/access_token
type ServiceProvider struct {
RequestTokenUrl string
AuthorizeTokenUrl string
AccessTokenUrl string
}
// Consumers are stateless, you can call the various methods (GetRequestTokenAndUrl,
// AuthorizeToken, and Get) on various different instances of Consumers *as long as
// they were set up in the same way.* It is up to you, as the caller to persist the
// necessary state (RequestTokens and AccessTokens).
type Consumer struct {
// Some ServiceProviders require extra parameters to be passed for various reasons.
// For example Google APIs require you to set a scope= parameter to specify how much
// access is being granted. The proper values for scope= depend on the service:
// For more, see: http://code.google.com/apis/accounts/docs/OAuth.html#prepScope
AdditionalParams map[string]string
// The rest of this class is configured via the NewConsumer function.
consumerKey string
consumerSecret string
serviceProvider ServiceProvider
callbackUrl string
debug bool
// Private seams for mocking dependencies when testing
httpClient httpClient
clock clock
nonceGenerator nonceGenerator
signer signer
}
// Creates a new Consumer instance.
// - consumerKey and consumerSecret
// values you should obtain from the ServiceProvider when you register your application.
//
// - serviceProvider:
// see the documentation for ServiceProvider for how to create this.
//
// - callbackURL
// Authorizing a token *requires* redirecting to the service provider. This is the URL
// which the service provider will redirect the user back to after that authorization
// is completed. The service provider will pass back a verification code which is
// necessary to complete the rest of the process (in AuthorizeToken).
// Notes on callbackURL:
// - Some (all?) service providers allow for setting "oob" (for out-of-band) as a callback
// url. If this is set the service provider will present the verification code directly
// to the user, and you must provide a place for them to copy-and-paste it into.
// - Otherwise, the user will be redirected to callbackUrl in the browser, and will
// append a "oauth_verifier=<verifier>" parameter.
func NewConsumer(consumerKey string, consumerSecret string,
serviceProvider ServiceProvider, callbackUrl string) *Consumer {
clock := &defaultClock{}
return &Consumer{
consumerKey: consumerKey,
consumerSecret: consumerSecret,
serviceProvider: serviceProvider,
callbackUrl: callbackUrl,
clock: clock,
httpClient: &http.Client{},
nonceGenerator: rand.New(rand.NewSource(clock.Seconds())),
signer: &SHA1Signer{},
AdditionalParams: make(map[string]string),
}
}
// Kicks off the OAuth authorization process. This function returns:
// - rtoken:
// A temporary RequestToken, used during the authorization process. You must save this
// since it will be necessary later in the process when calling AuthorizeToken().
//
// - url:
// A URL that you should redirect the user to in order that they may authorize you to
// the service provider.
//
// - err:
// Set only if there was an error, nil otherwise.
func (c *Consumer) GetRequestTokenAndUrl() (rtoken *RequestToken, url string, err os.Error) {
params := c.baseParams(c.consumerKey, c.AdditionalParams)
params.Add(CALLBACK_PARAM, c.callbackUrl)
req := newGetRequest(c.serviceProvider.RequestTokenUrl, params)
c.signRequest(req, c.makeKey("")) // We don't have a token secret for the key yet
resp, err := c.getBody(c.serviceProvider.RequestTokenUrl, params)
if err != nil {
return nil, "", err
}
token, secret, err := parseTokenAndSecret(*resp)
if err != nil {
return nil, "", err
}
url = c.serviceProvider.AuthorizeTokenUrl + "?oauth_token=" + token
return &RequestToken{Token: token, Secret: secret}, url, nil
}
// After the user has authorized you to the service provider, use this method to turn
// your temporary RequestToken into a permanent AccessToken. You must pass in two values:
// - rtoken:
// The RequestToken returned from GetRequestTokenAndUrl()
//
// - verificationCode:
// The string which passed back from the server, either as the oauth_verifier
// query param appended to callbackUrl *OR* a string manually entered by the user
// if callbackUrl is "oob"
//
// It will return:
// - atoken:
// A permanent AccessToken which can be used to access the user's data (until it is
// revoked by the user or the service provider).
//
// - err:
// Set only if there was an error, nil otherwise.
func (c *Consumer) AuthorizeToken(rtoken *RequestToken, verificationCode string) (atoken *AccessToken, err os.Error) {
params := c.baseParams(c.consumerKey, c.AdditionalParams)
params.Add(VERIFIER_PARAM, verificationCode)
params.Add(TOKEN_PARAM, rtoken.Token)
req := newGetRequest(c.serviceProvider.AccessTokenUrl, params)
c.signRequest(req, c.makeKey(rtoken.Secret))
resp, err := c.getBody(c.serviceProvider.AccessTokenUrl, params)
if err != nil {
return nil, err
}
token, secret, err := parseTokenAndSecret(*resp)
if err != nil {
return nil, err
}
return &AccessToken{Token: token, Secret: secret}, nil
}
// Executes an HTTP Get,, authorized via the AccessToken.
// - url:
// The base url, without any query params, which is being accessed
//
// - userParams:
// Any key=value params to be included in the query string
//
// - token:
// The AccessToken returned by AuthorizeToken()
//
// This method returns:
// - resp:
// The HTTP Response resulting from making this request.
//
// - err:
// Set only if there was an error, nil otherwise.
func (c *Consumer) Get(url string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
return c.makeAuthorizedRequest("GET", url, "", userParams, token)
}
func (c *Consumer) Post(url string, body string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
return c.makeAuthorizedRequest("POST", url, body, userParams, token)
}
func (c *Consumer) Delete(url string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
return c.makeAuthorizedRequest("DELETE", url, "", userParams, token)
}
func (c *Consumer) Put(url string, body string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
return c.makeAuthorizedRequest("PUT", url, body, userParams, token)
}
func (c *Consumer) Debug(enabled bool) {
c.debug = enabled
c.signer.Debug(enabled)
}
func (c *Consumer) makeAuthorizedRequest(method string, url string, body string, userParams map[string]string, token *AccessToken) (resp *http.Response, err os.Error) {
allParams := c.baseParams(c.consumerKey, c.AdditionalParams)
allParams.Add(TOKEN_PARAM, token.Token)
authParams := allParams.Clone()
queryParams := ""
separator := "?"
if userParams != nil {
for key, value := range userParams {
allParams.Add(key, value)
queryParams += separator + escape(key) + "=" + escape(value)
separator = "&"
}
}
key := c.makeKey(token.Secret)
base_string := c.requestString(method, url, allParams)
authParams.Add(SIGNATURE_PARAM, c.signer.Sign(base_string, key))
return c.httpExecute(method, url+queryParams, body, authParams)
}
type request struct {
method string
url string
oauthParams *OrderedParams
userParams map[string]string
}
type httpClient interface {
Do(req *http.Request) (resp *http.Response, err os.Error)
}
type clock interface {
Seconds() int64
}
type nonceGenerator interface {
Int63() int64
}
type signer interface {
Sign(message, key string) string
Debug(enabled bool)
}
type defaultClock struct{}
func (*defaultClock) Seconds() int64 {
return time.Seconds()
}
func newGetRequest(url string, oauthParams *OrderedParams) *request {
return &request{
method: "GET",
url: url,
oauthParams: oauthParams,
}
}
func (c *Consumer) signRequest(req *request, key string) *request {
base_string := c.requestString(req.method, req.url, req.oauthParams)
req.oauthParams.Add(SIGNATURE_PARAM, c.signer.Sign(base_string, key))
return req
}
func (c *Consumer) makeKey(tokenSecret string) string {
return escape(c.consumerSecret) + "&" + escape(tokenSecret)
}
func parseTokenAndSecret(data string) (string, string, os.Error) {
parts, err := http.ParseQuery(data)
if err != nil {
return "", "", err
}
if len(parts[TOKEN_PARAM]) < 1 {
return "", "", os.NewError("Missing " + TOKEN_PARAM + " in response. " +
"Full response body: '" + data + "'")
}
if len(parts[TOKEN_SECRET_PARAM]) < 1 {
return "", "", os.NewError("Missing " + TOKEN_SECRET_PARAM + " in response." +
"Full response body: '" + data + "'")
}
return parts[TOKEN_PARAM][0], parts[TOKEN_SECRET_PARAM][0], nil
}
func (c *Consumer) baseParams(consumerKey string, additionalParams map[string]string) *OrderedParams {
params := NewOrderedParams()
params.Add(VERSION_PARAM, OAUTH_VERSION)
params.Add(SIGNATURE_METHOD_PARAM, SIGNATURE_METHOD)
params.Add(TIMESTAMP_PARAM, strconv.Itoa64(c.clock.Seconds()))
params.Add(NONCE_PARAM, strconv.Itoa64(c.nonceGenerator.Int63()))
params.Add(CONSUMER_KEY_PARAM, consumerKey)
for key, value := range additionalParams {
params.Add(key, value)
}
return params
}
type SHA1Signer struct {
debug bool
}
func (s *SHA1Signer) Debug(enabled bool) {
s.debug = enabled;
}
func (s *SHA1Signer) Sign(message string, key string) string {
if s.debug {
fmt.Println("Signing:" + message)
fmt.Println("Key:" + key)
}
hashfun := hmac.NewSHA1([]byte(key))
hashfun.Write([]byte(message))
rawsignature := hashfun.Sum()
base64signature := make([]byte, base64.StdEncoding.EncodedLen(len(rawsignature)))
base64.StdEncoding.Encode(base64signature, rawsignature)
return string(base64signature)
}
func escape(input string) string {
return http.URLEscape(input)
}
func (c *Consumer) requestString(method string, url string, params *OrderedParams) string {
result := method + "&" + escape(url)
for pos, key := range params.Keys() {
if pos == 0 {
result += "&"
} else {
result += escape("&")
}
result += escape(fmt.Sprintf("%s=%s", key, params.Get(key)))
}
return result
}
func (c *Consumer) getBody(url string, oauthParams *OrderedParams) (*string, os.Error) {
resp, err := c.httpExecute("GET", url, "", oauthParams)
if err != nil {
return nil, err
}
bytes, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
str := string(bytes)
if c.debug {
fmt.Printf("STATUS: %d %s\n", resp.StatusCode, resp.Status)
fmt.Println("BODY RESPONSE: " + str)
}
return &str, nil
}
func (c* Consumer) httpExecute(
method string, url string, body string, oauthParams *OrderedParams) (*http.Response, os.Error) {
if c.debug {
fmt.Println("httpExecute(method: " + method + ", url: " + url)
}
var req http.Request
req.Method = method
req.Header = http.Header{}
req.Body = newStringReadCloser(body)
parsedurl, err := http.ParseURL(url)
if err != nil {
return nil, err
}
req.URL = parsedurl
oauthHdr := "OAuth "
for pos, key := range oauthParams.Keys() {
if pos > 0 {
oauthHdr += ",\n "
}
oauthHdr += key + "=\"" + oauthParams.Get(key) + "\""
}
if c.debug {
fmt.Println("AUTH-HDR: " + oauthHdr)
}
req.Header.Add("Authorization", oauthHdr)
resp, err := c.httpClient.Do(&req)
if resp.StatusCode != http.StatusOK {
bytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
return nil, os.NewError("HTTP response is not 200/OK as expected. Actual response: \n" +
"\tStatus: '" + resp.Status + "'\n" +
"\tCode: " + strconv.Itoa(resp.StatusCode) + "\n" +
"\tBody: " + string(bytes))
}
return resp, err
}
type stringReadCloser struct {
io.Reader
}
func newStringReadCloser(data string) io.ReadCloser {
return stringReadCloser{strings.NewReader(data)}
}
func (rc stringReadCloser) Close() os.Error {
return nil
}
//
// ORDERED PARAMS
//
type OrderedParams struct {
allParams map[string]string
keyOrdering []string
}
func NewOrderedParams() *OrderedParams {
return &OrderedParams{
allParams: make(map[string]string),
keyOrdering: make([]string, 0),
}
}
func (o *OrderedParams) Get(key string) string {
return o.allParams[key]
}
func (o *OrderedParams) Keys() []string {
sort.Sort(o)
return o.keyOrdering
}
func (o *OrderedParams) Add(key, value string) {
o.AddUnescaped(key, http.URLEscape(value))
}
func (o *OrderedParams) AddUnescaped(key, value string) {
o.allParams[key] = value
o.keyOrdering = append(o.keyOrdering, key)
}
func (o *OrderedParams) Len() int {
return len(o.keyOrdering)
}
func (o *OrderedParams) Less(i int, j int) bool {
return o.keyOrdering[i] < o.keyOrdering[j]
}
func (o *OrderedParams) Swap(i int, j int) {
o.keyOrdering[i], o.keyOrdering[j] = o.keyOrdering[j], o.keyOrdering[i]
}
func (o *OrderedParams) Clone() *OrderedParams {
clone := NewOrderedParams()
for _, key := range o.Keys() {
clone.add(key, o.Get(key))
}
return clone
}
|
package cache
import (
"bytes"
bin "encoding/binary"
"errors"
"github.com/jmhodges/levigo"
"goposm/binary"
"goposm/element"
"os"
"path/filepath"
"strconv"
)
var levelDbWriteBufferSize, levelDbWriteBlockSize int64
var deltaCacheBunchSize int64
func init() {
levelDbWriteBufferSize, _ = strconv.ParseInt(
os.Getenv("GOPOSM_LEVELDB_BUFFERSIZE"), 10, 32)
levelDbWriteBlockSize, _ = strconv.ParseInt(
os.Getenv("GOPOSM_LEVELDB_BLOCKSIZE"), 10, 32)
// bunchSize defines how many coordinates should be stored in a
// single record. This is the maximum and a bunch will typically contain
// less coordinates (e.g. when nodes are removes).
//
// A higher number improves -read mode (writing the cache) but also
// increases the overhead during -write mode (reading coords).
deltaCacheBunchSize, _ = strconv.ParseInt(
os.Getenv("GOPOSM_DELTACACHE_BUNCHSIZE"), 10, 32)
if deltaCacheBunchSize == 0 {
deltaCacheBunchSize = 128
}
}
var (
NotFound = errors.New("not found")
)
type OSMCache struct {
Dir string
Coords *DeltaCoordsCache
Ways *WaysCache
Nodes *NodesCache
Relations *RelationsCache
opened bool
}
func (c *OSMCache) Close() {
if c.Coords != nil {
c.Coords.close()
c.Coords = nil
}
if c.Nodes != nil {
c.Nodes.close()
c.Nodes = nil
}
if c.Ways != nil {
c.Ways.close()
c.Ways = nil
}
if c.Relations != nil {
c.Relations.close()
c.Relations = nil
}
}
func NewOSMCache(dir string) *OSMCache {
cache := &OSMCache{Dir: dir}
return cache
}
func (c *OSMCache) Open() error {
var err error
c.Coords, err = NewDeltaCoordsCache(filepath.Join(c.Dir, "coords"))
if err != nil {
return err
}
c.Nodes, err = NewNodesCache(filepath.Join(c.Dir, "nodes"))
if err != nil {
c.Close()
return err
}
c.Ways, err = NewWaysCache(filepath.Join(c.Dir, "ways"))
if err != nil {
c.Close()
return err
}
c.Relations, err = NewRelationsCache(filepath.Join(c.Dir, "relations"))
if err != nil {
c.Close()
return err
}
c.opened = true
return nil
}
func (c *OSMCache) Exists() bool {
if c.opened {
return true
}
if _, err := os.Stat(filepath.Join(c.Dir, "coords")); !os.IsNotExist(err) {
return true
}
if _, err := os.Stat(filepath.Join(c.Dir, "nodes")); !os.IsNotExist(err) {
return true
}
if _, err := os.Stat(filepath.Join(c.Dir, "ways")); !os.IsNotExist(err) {
return true
}
if _, err := os.Stat(filepath.Join(c.Dir, "relations")); !os.IsNotExist(err) {
return true
}
return false
}
func (c *OSMCache) Remove() error {
if c.opened {
c.Close()
}
if err := os.RemoveAll(filepath.Join(c.Dir, "coords")); err != nil {
return err
}
if err := os.RemoveAll(filepath.Join(c.Dir, "nodes")); err != nil {
return err
}
if err := os.RemoveAll(filepath.Join(c.Dir, "ways")); err != nil {
return err
}
if err := os.RemoveAll(filepath.Join(c.Dir, "relations")); err != nil {
return err
}
return nil
}
type Cache struct {
db *levigo.DB
wo *levigo.WriteOptions
ro *levigo.ReadOptions
}
func (c *Cache) open(path string) error {
opts := levigo.NewOptions()
opts.SetCache(levigo.NewLRUCache(1024 * 1024 * 8))
opts.SetCreateIfMissing(true)
opts.SetMaxOpenFiles(64)
// save a few bytes by allowing leveldb to use delta enconding
// for up to n keys (instead of only 16)
opts.SetBlockRestartInterval(128)
if levelDbWriteBufferSize != 0 {
opts.SetWriteBufferSize(int(levelDbWriteBufferSize))
}
if levelDbWriteBlockSize != 0 {
opts.SetBlockSize(int(levelDbWriteBlockSize))
}
db, err := levigo.Open(path, opts)
if err != nil {
return err
}
c.db = db
c.wo = levigo.NewWriteOptions()
c.ro = levigo.NewReadOptions()
return nil
}
func (c *Cache) close() {
c.db.Close()
}
type NodesCache struct {
Cache
}
func idToKeyBuf(id int64) []byte {
var b bytes.Buffer
bin.Write(&b, bin.BigEndian, &id)
return b.Bytes()
}
func NewNodesCache(path string) (*NodesCache, error) {
cache := NodesCache{}
err := cache.open(path)
if err != nil {
return nil, err
}
return &cache, err
}
type CoordsCache struct {
Cache
}
func NewCoordsCache(path string) (*CoordsCache, error) {
cache := CoordsCache{}
err := cache.open(path)
if err != nil {
return nil, err
}
return &cache, err
}
type WaysCache struct {
Cache
toWrite chan []element.Way
}
func NewWaysCache(path string) (*WaysCache, error) {
cache := WaysCache{}
cache.toWrite = make(chan []element.Way)
go cache.wayWriter()
err := cache.open(path)
if err != nil {
return nil, err
}
return &cache, err
}
type RelationsCache struct {
Cache
}
func NewRelationsCache(path string) (*RelationsCache, error) {
cache := RelationsCache{}
err := cache.open(path)
if err != nil {
return nil, err
}
return &cache, err
}
func (p *CoordsCache) PutCoord(node *element.Node) error {
keyBuf := idToKeyBuf(node.Id)
data, err := binary.MarshalCoord(node)
if err != nil {
return err
}
return p.db.Put(p.wo, keyBuf, data)
}
func (p *CoordsCache) PutCoords(nodes []element.Node) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, node := range nodes {
keyBuf := idToKeyBuf(node.Id)
data, err := binary.MarshalCoord(&node)
if err != nil {
return err
}
batch.Put(keyBuf, data)
}
return p.db.Write(p.wo, batch)
}
func (p *CoordsCache) GetCoord(id int64) (*element.Node, error) {
keyBuf := idToKeyBuf(id)
data, err := p.db.Get(p.ro, keyBuf)
if err != nil {
return nil, err
}
if data == nil {
return nil, NotFound
}
node, err := binary.UnmarshalCoord(id, data)
if err != nil {
return nil, err
}
return node, nil
}
func (p *NodesCache) PutNode(node *element.Node) error {
if node.Tags == nil {
return nil
}
keyBuf := idToKeyBuf(node.Id)
data, err := binary.MarshalNode(node)
if err != nil {
return err
}
return p.db.Put(p.wo, keyBuf, data)
}
func (p *NodesCache) PutNodes(nodes []element.Node) (int, error) {
batch := levigo.NewWriteBatch()
defer batch.Close()
var n int
for _, node := range nodes {
if len(node.Tags) == 0 {
continue
}
keyBuf := idToKeyBuf(node.Id)
data, err := binary.MarshalNode(&node)
if err != nil {
return 0, err
}
batch.Put(keyBuf, data)
n += 1
}
return n, p.db.Write(p.wo, batch)
}
func (p *NodesCache) GetNode(id int64) (*element.Node, error) {
keyBuf := idToKeyBuf(id)
data, err := p.db.Get(p.ro, keyBuf)
if err != nil {
return nil, err
}
if data == nil {
return nil, NotFound
}
node, err := binary.UnmarshalNode(data)
if err != nil {
return nil, err
}
return node, nil
}
func (p *NodesCache) Iter() chan *element.Node {
node := make(chan *element.Node)
go func() {
ro := levigo.NewReadOptions()
ro.SetFillCache(false)
it := p.db.NewIterator(ro)
defer it.Close()
it.SeekToFirst()
for ; it.Valid(); it.Next() {
nodes, err := binary.UnmarshalNode(it.Value())
if err != nil {
panic(err)
}
node <- nodes
}
close(node)
}()
return node
}
func (p *WaysCache) PutWay(way *element.Way) error {
keyBuf := idToKeyBuf(way.Id)
data, err := binary.MarshalWay(way)
if err != nil {
return err
}
return p.db.Put(p.wo, keyBuf, data)
}
func (p *WaysCache) PutWays(ways []element.Way) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, way := range ways {
keyBuf := idToKeyBuf(way.Id)
data, err := binary.MarshalWay(&way)
if err != nil {
return err
}
batch.Put(keyBuf, data)
}
return p.db.Write(p.wo, batch)
}
func (p *WaysCache) _PutWays(ways []element.Way) {
p.toWrite <- ways
}
func (p *WaysCache) wayWriter() {
for ways := range p.toWrite {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, way := range ways {
keyBuf := idToKeyBuf(way.Id)
data, err := binary.MarshalWay(&way)
if err != nil {
panic(err)
}
batch.Put(keyBuf, data)
}
_ = p.db.Write(p.wo, batch)
}
}
func (p *WaysCache) GetWay(id int64) (*element.Way, error) {
keyBuf := idToKeyBuf(id)
data, err := p.db.Get(p.ro, keyBuf)
if err != nil {
return nil, err
}
if data == nil {
return nil, NotFound
}
way, err := binary.UnmarshalWay(data)
if err != nil {
return nil, err
}
return way, nil
}
func (p *WaysCache) Iter() chan *element.Way {
way := make(chan *element.Way)
go func() {
ro := levigo.NewReadOptions()
ro.SetFillCache(false)
it := p.db.NewIterator(ro)
defer it.Close()
it.SeekToFirst()
for ; it.Valid(); it.Next() {
ways, err := binary.UnmarshalWay(it.Value())
if err != nil {
panic(err)
}
way <- ways
}
close(way)
}()
return way
}
func (self *WaysCache) FillMembers(members []element.Member) error {
if members == nil || len(members) == 0 {
return nil
}
for i, member := range members {
if member.Type != element.WAY {
continue
}
way, err := self.GetWay(member.Id)
if err != nil {
return err
}
members[i].Way = way
}
return nil
}
func (p *RelationsCache) PutRelation(relation *element.Relation) error {
keyBuf := idToKeyBuf(relation.Id)
data, err := binary.MarshalRelation(relation)
if err != nil {
return err
}
return p.db.Put(p.wo, keyBuf, data)
}
func (p *RelationsCache) PutRelations(rels []element.Relation) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, rel := range rels {
keyBuf := idToKeyBuf(rel.Id)
data, err := binary.MarshalRelation(&rel)
if err != nil {
return err
}
batch.Put(keyBuf, data)
}
return p.db.Write(p.wo, batch)
}
func (p *RelationsCache) Iter() chan *element.Relation {
rel := make(chan *element.Relation)
go func() {
ro := levigo.NewReadOptions()
ro.SetFillCache(false)
it := p.db.NewIterator(ro)
defer it.Close()
it.SeekToFirst()
for ; it.Valid(); it.Next() {
relation, err := binary.UnmarshalRelation(it.Value())
if err != nil {
panic(err)
}
rel <- relation
}
close(rel)
}()
return rel
}
func (p *RelationsCache) GetRelation(id int64) (*element.Relation, error) {
keyBuf := idToKeyBuf(id)
data, err := p.db.Get(p.ro, keyBuf)
if err != nil {
return nil, err
}
if data == nil {
return nil, NotFound
}
relation, err := binary.UnmarshalRelation(data)
if err != nil {
return nil, err
}
return relation, err
}
func (p *Cache) Close() {
p.db.Close()
}
make chache dir on Open() if missing
package cache
import (
"bytes"
bin "encoding/binary"
"errors"
"github.com/jmhodges/levigo"
"goposm/binary"
"goposm/element"
"os"
"path/filepath"
"strconv"
)
var levelDbWriteBufferSize, levelDbWriteBlockSize int64
var deltaCacheBunchSize int64
func init() {
levelDbWriteBufferSize, _ = strconv.ParseInt(
os.Getenv("GOPOSM_LEVELDB_BUFFERSIZE"), 10, 32)
levelDbWriteBlockSize, _ = strconv.ParseInt(
os.Getenv("GOPOSM_LEVELDB_BLOCKSIZE"), 10, 32)
// bunchSize defines how many coordinates should be stored in a
// single record. This is the maximum and a bunch will typically contain
// less coordinates (e.g. when nodes are removes).
//
// A higher number improves -read mode (writing the cache) but also
// increases the overhead during -write mode (reading coords).
deltaCacheBunchSize, _ = strconv.ParseInt(
os.Getenv("GOPOSM_DELTACACHE_BUNCHSIZE"), 10, 32)
if deltaCacheBunchSize == 0 {
deltaCacheBunchSize = 128
}
}
var (
NotFound = errors.New("not found")
)
type OSMCache struct {
Dir string
Coords *DeltaCoordsCache
Ways *WaysCache
Nodes *NodesCache
Relations *RelationsCache
opened bool
}
func (c *OSMCache) Close() {
if c.Coords != nil {
c.Coords.close()
c.Coords = nil
}
if c.Nodes != nil {
c.Nodes.close()
c.Nodes = nil
}
if c.Ways != nil {
c.Ways.close()
c.Ways = nil
}
if c.Relations != nil {
c.Relations.close()
c.Relations = nil
}
}
func NewOSMCache(dir string) *OSMCache {
cache := &OSMCache{Dir: dir}
return cache
}
func (c *OSMCache) Open() error {
err := os.MkdirAll(c.Dir, 0755)
if err != nil {
return err
}
c.Coords, err = NewDeltaCoordsCache(filepath.Join(c.Dir, "coords"))
if err != nil {
return err
}
c.Nodes, err = NewNodesCache(filepath.Join(c.Dir, "nodes"))
if err != nil {
c.Close()
return err
}
c.Ways, err = NewWaysCache(filepath.Join(c.Dir, "ways"))
if err != nil {
c.Close()
return err
}
c.Relations, err = NewRelationsCache(filepath.Join(c.Dir, "relations"))
if err != nil {
c.Close()
return err
}
c.opened = true
return nil
}
func (c *OSMCache) Exists() bool {
if c.opened {
return true
}
if _, err := os.Stat(filepath.Join(c.Dir, "coords")); !os.IsNotExist(err) {
return true
}
if _, err := os.Stat(filepath.Join(c.Dir, "nodes")); !os.IsNotExist(err) {
return true
}
if _, err := os.Stat(filepath.Join(c.Dir, "ways")); !os.IsNotExist(err) {
return true
}
if _, err := os.Stat(filepath.Join(c.Dir, "relations")); !os.IsNotExist(err) {
return true
}
return false
}
func (c *OSMCache) Remove() error {
if c.opened {
c.Close()
}
if err := os.RemoveAll(filepath.Join(c.Dir, "coords")); err != nil {
return err
}
if err := os.RemoveAll(filepath.Join(c.Dir, "nodes")); err != nil {
return err
}
if err := os.RemoveAll(filepath.Join(c.Dir, "ways")); err != nil {
return err
}
if err := os.RemoveAll(filepath.Join(c.Dir, "relations")); err != nil {
return err
}
return nil
}
type Cache struct {
db *levigo.DB
wo *levigo.WriteOptions
ro *levigo.ReadOptions
}
func (c *Cache) open(path string) error {
opts := levigo.NewOptions()
opts.SetCache(levigo.NewLRUCache(1024 * 1024 * 8))
opts.SetCreateIfMissing(true)
opts.SetMaxOpenFiles(64)
// save a few bytes by allowing leveldb to use delta enconding
// for up to n keys (instead of only 16)
opts.SetBlockRestartInterval(128)
if levelDbWriteBufferSize != 0 {
opts.SetWriteBufferSize(int(levelDbWriteBufferSize))
}
if levelDbWriteBlockSize != 0 {
opts.SetBlockSize(int(levelDbWriteBlockSize))
}
db, err := levigo.Open(path, opts)
if err != nil {
return err
}
c.db = db
c.wo = levigo.NewWriteOptions()
c.ro = levigo.NewReadOptions()
return nil
}
func (c *Cache) close() {
c.db.Close()
}
type NodesCache struct {
Cache
}
func idToKeyBuf(id int64) []byte {
var b bytes.Buffer
bin.Write(&b, bin.BigEndian, &id)
return b.Bytes()
}
func NewNodesCache(path string) (*NodesCache, error) {
cache := NodesCache{}
err := cache.open(path)
if err != nil {
return nil, err
}
return &cache, err
}
type CoordsCache struct {
Cache
}
func NewCoordsCache(path string) (*CoordsCache, error) {
cache := CoordsCache{}
err := cache.open(path)
if err != nil {
return nil, err
}
return &cache, err
}
type WaysCache struct {
Cache
toWrite chan []element.Way
}
func NewWaysCache(path string) (*WaysCache, error) {
cache := WaysCache{}
cache.toWrite = make(chan []element.Way)
go cache.wayWriter()
err := cache.open(path)
if err != nil {
return nil, err
}
return &cache, err
}
type RelationsCache struct {
Cache
}
func NewRelationsCache(path string) (*RelationsCache, error) {
cache := RelationsCache{}
err := cache.open(path)
if err != nil {
return nil, err
}
return &cache, err
}
func (p *CoordsCache) PutCoord(node *element.Node) error {
keyBuf := idToKeyBuf(node.Id)
data, err := binary.MarshalCoord(node)
if err != nil {
return err
}
return p.db.Put(p.wo, keyBuf, data)
}
func (p *CoordsCache) PutCoords(nodes []element.Node) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, node := range nodes {
keyBuf := idToKeyBuf(node.Id)
data, err := binary.MarshalCoord(&node)
if err != nil {
return err
}
batch.Put(keyBuf, data)
}
return p.db.Write(p.wo, batch)
}
func (p *CoordsCache) GetCoord(id int64) (*element.Node, error) {
keyBuf := idToKeyBuf(id)
data, err := p.db.Get(p.ro, keyBuf)
if err != nil {
return nil, err
}
if data == nil {
return nil, NotFound
}
node, err := binary.UnmarshalCoord(id, data)
if err != nil {
return nil, err
}
return node, nil
}
func (p *NodesCache) PutNode(node *element.Node) error {
if node.Tags == nil {
return nil
}
keyBuf := idToKeyBuf(node.Id)
data, err := binary.MarshalNode(node)
if err != nil {
return err
}
return p.db.Put(p.wo, keyBuf, data)
}
func (p *NodesCache) PutNodes(nodes []element.Node) (int, error) {
batch := levigo.NewWriteBatch()
defer batch.Close()
var n int
for _, node := range nodes {
if len(node.Tags) == 0 {
continue
}
keyBuf := idToKeyBuf(node.Id)
data, err := binary.MarshalNode(&node)
if err != nil {
return 0, err
}
batch.Put(keyBuf, data)
n += 1
}
return n, p.db.Write(p.wo, batch)
}
func (p *NodesCache) GetNode(id int64) (*element.Node, error) {
keyBuf := idToKeyBuf(id)
data, err := p.db.Get(p.ro, keyBuf)
if err != nil {
return nil, err
}
if data == nil {
return nil, NotFound
}
node, err := binary.UnmarshalNode(data)
if err != nil {
return nil, err
}
return node, nil
}
func (p *NodesCache) Iter() chan *element.Node {
node := make(chan *element.Node)
go func() {
ro := levigo.NewReadOptions()
ro.SetFillCache(false)
it := p.db.NewIterator(ro)
defer it.Close()
it.SeekToFirst()
for ; it.Valid(); it.Next() {
nodes, err := binary.UnmarshalNode(it.Value())
if err != nil {
panic(err)
}
node <- nodes
}
close(node)
}()
return node
}
func (p *WaysCache) PutWay(way *element.Way) error {
keyBuf := idToKeyBuf(way.Id)
data, err := binary.MarshalWay(way)
if err != nil {
return err
}
return p.db.Put(p.wo, keyBuf, data)
}
func (p *WaysCache) PutWays(ways []element.Way) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, way := range ways {
keyBuf := idToKeyBuf(way.Id)
data, err := binary.MarshalWay(&way)
if err != nil {
return err
}
batch.Put(keyBuf, data)
}
return p.db.Write(p.wo, batch)
}
func (p *WaysCache) _PutWays(ways []element.Way) {
p.toWrite <- ways
}
func (p *WaysCache) wayWriter() {
for ways := range p.toWrite {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, way := range ways {
keyBuf := idToKeyBuf(way.Id)
data, err := binary.MarshalWay(&way)
if err != nil {
panic(err)
}
batch.Put(keyBuf, data)
}
_ = p.db.Write(p.wo, batch)
}
}
func (p *WaysCache) GetWay(id int64) (*element.Way, error) {
keyBuf := idToKeyBuf(id)
data, err := p.db.Get(p.ro, keyBuf)
if err != nil {
return nil, err
}
if data == nil {
return nil, NotFound
}
way, err := binary.UnmarshalWay(data)
if err != nil {
return nil, err
}
return way, nil
}
func (p *WaysCache) Iter() chan *element.Way {
way := make(chan *element.Way)
go func() {
ro := levigo.NewReadOptions()
ro.SetFillCache(false)
it := p.db.NewIterator(ro)
defer it.Close()
it.SeekToFirst()
for ; it.Valid(); it.Next() {
ways, err := binary.UnmarshalWay(it.Value())
if err != nil {
panic(err)
}
way <- ways
}
close(way)
}()
return way
}
func (self *WaysCache) FillMembers(members []element.Member) error {
if members == nil || len(members) == 0 {
return nil
}
for i, member := range members {
if member.Type != element.WAY {
continue
}
way, err := self.GetWay(member.Id)
if err != nil {
return err
}
members[i].Way = way
}
return nil
}
func (p *RelationsCache) PutRelation(relation *element.Relation) error {
keyBuf := idToKeyBuf(relation.Id)
data, err := binary.MarshalRelation(relation)
if err != nil {
return err
}
return p.db.Put(p.wo, keyBuf, data)
}
func (p *RelationsCache) PutRelations(rels []element.Relation) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, rel := range rels {
keyBuf := idToKeyBuf(rel.Id)
data, err := binary.MarshalRelation(&rel)
if err != nil {
return err
}
batch.Put(keyBuf, data)
}
return p.db.Write(p.wo, batch)
}
func (p *RelationsCache) Iter() chan *element.Relation {
rel := make(chan *element.Relation)
go func() {
ro := levigo.NewReadOptions()
ro.SetFillCache(false)
it := p.db.NewIterator(ro)
defer it.Close()
it.SeekToFirst()
for ; it.Valid(); it.Next() {
relation, err := binary.UnmarshalRelation(it.Value())
if err != nil {
panic(err)
}
rel <- relation
}
close(rel)
}()
return rel
}
func (p *RelationsCache) GetRelation(id int64) (*element.Relation, error) {
keyBuf := idToKeyBuf(id)
data, err := p.db.Get(p.ro, keyBuf)
if err != nil {
return nil, err
}
if data == nil {
return nil, NotFound
}
relation, err := binary.UnmarshalRelation(data)
if err != nil {
return nil, err
}
return relation, err
}
func (p *Cache) Close() {
p.db.Close()
}
|
package ogdat
import (
"code.google.com/p/go-uuid/uuid"
"encoding/json"
"net/url"
"time"
// "fmt"
)
const OGDTimeSpecifier = "2006-01-02T15:04:05" // RFC 3339 = ISO 8601 ohne Zeitzone
const (
OGDTime1 = time.RFC3339Nano
OGDTime2 = time.RFC3339
OGDTime3 = OGDTimeSpecifier
OGDTimeUnknow
)
type Beschreibung struct {
ID int
Bezeichner string
OGD_Kurzname string
CKAN_Feld string
Defintion_DE string
Erlauterung string
Beispiel string
ONA2270 string
ISO19115 string
RDFProperty string
Definition_EN string
}
type Kategorie struct {
NumID int `json:"-"`
ID string
PrettyName string `json:"-"`
RDFProperty string `json:"-"`
}
func (kat *Kategorie) String() string {
return kat.PrettyName
}
var (
Arbeit = Kategorie{NumID: 1, ID: "arbeit", PrettyName: "Arbeit", RDFProperty: ""}
Bevoelkerung = Kategorie{NumID: 2, ID: "bevölkerung", PrettyName: "Bevölkerung", RDFProperty: ""}
BildungForschung = Kategorie{NumID: 3, ID: "bildung-und-forschung", PrettyName: "Bildung und Forschung", RDFProperty: ""}
FinanzRW = Kategorie{NumID: 4, ID: "finanzen-und-rechnungswesen", PrettyName: "Finanzen und Rechnungswesen", RDFProperty: ""}
GeographPlanung = Kategorie{NumID: 5, ID: "geographie-und-planung", PrettyName: "Geographie und Planung", RDFProperty: ""}
GesellSoziales = Kategorie{NumID: 6, ID: "gesellschaft-und-soziales", PrettyName: "Gesellschaft und Soziales", RDFProperty: ""}
Gesundheit = Kategorie{NumID: 7, ID: "gesundheit", PrettyName: "Gesundheit", RDFProperty: ""}
KunstKultur = Kategorie{NumID: 8, ID: "kunst-und-kultur", PrettyName: "Kunst und Kultur", RDFProperty: ""}
LandFW = Kategorie{NumID: 9, ID: "land-und-forstwirtschaft", PrettyName: "Land und Forstwirtschaft", RDFProperty: ""}
SportFZ = Kategorie{NumID: 10, ID: "sport-und-freizeit", PrettyName: "Sport und Freizeit", RDFProperty: ""}
Umwelt = Kategorie{NumID: 11, ID: "umwelt", PrettyName: "Umwelt", RDFProperty: ""}
VerkehrTechnik = Kategorie{NumID: 12, ID: "verkehr-und-technik", PrettyName: "Verkehr und Technik", RDFProperty: ""}
VerwaltPol = Kategorie{NumID: 13, ID: "verwaltung-und-politik", PrettyName: "Verwaltung und Politik", RDFProperty: ""}
WirtTourism = Kategorie{NumID: 14, ID: "wirtschaft-und-tourismus", PrettyName: "Wirtschaft und Tourismus", RDFProperty: ""}
)
var categories = []Kategorie{Arbeit,
Bevoelkerung,
BildungForschung,
FinanzRW,
GeographPlanung,
GesellSoziales,
Gesundheit,
KunstKultur,
LandFW,
SportFZ,
Umwelt,
VerkehrTechnik,
VerwaltPol,
WirtTourism,
}
var categorymap = make(map[string]Kategorie)
type Tags string
type ResourceSpecifier string
type Url struct {
*url.URL
Raw string
}
type Identfier struct {
*uuid.UUID
Raw string
}
func (id *Identfier) String() string {
return id.Raw
}
type OGDTime struct {
*time.Time
Raw string
Format string
}
func (time *OGDTime) String() string {
return time.Raw
}
func (ogdtime *OGDTime) UnmarshalJSON(data []byte) error {
var raw string
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
ogdtime.Raw = raw
ogdtime.Format = OGDTime1
t, err := time.Parse(ogdtime.Format, raw)
if err != nil {
ogdtime.Format = OGDTime2
t, err = time.Parse(ogdtime.Format, raw)
if err != nil {
ogdtime.Format = OGDTime3
t, err = time.Parse(ogdtime.Format, raw)
if err != nil {
ogdtime.Format = OGDTimeUnknow
}
}
}
ogdtime.Time = &t
return nil
}
func (u *Url) UnmarshalJSON(data []byte) error {
var raw string
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
u.Raw = raw
url, err := url.Parse(raw)
if err != nil {
return err
}
u.URL = url
return nil
}
func (id *Identfier) UnmarshalJSON(data []byte) error {
var raw string
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
id.Raw = string(raw)
if uuid := uuid.Parse(raw); uuid != nil {
id.UUID = &uuid
}
return nil
}
func (kat *Kategorie) UnmarshalJSON(data []byte) error {
var raw string
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
corecat, found := categorymap[raw]
if !found {
kat.NumID = -1
kat.ID = raw
kat.PrettyName = "**** NON core category **** - " + kat.ID
} else {
*kat = corecat
}
return nil
}
type Extras struct {
// Core
Metadata_Identifier Identfier `json:"metadata_identifier"` // CKAN uses since API Version 2 a UUID V4, cf. https://github.com/okfn/ckan/blob/master/ckan/model/types.py
Metadata_Modified string `json:"metadata_modified"`
Categorization []Kategorie `json:"categorization"`
Begin_DateTime OGDTime `json:"begin_datetime"`
// Optional
Schema_Name string `json:"schema_name"`
}
type Resource struct {
// Core
URL *Url `json:"url"`
Format ResourceSpecifier `json:"format"`
// Optional
Resource_Name string
}
type MetaData struct {
// Core
Title string `json:"title"`
Description string `json:"notes"`
Schlagworte []Tags `json:"tags"`
Maintainer string `json:"maintainer"`
License string `json:"license"` // Sollte URI des Lizenzdokuments sein
// Optional
// nested structures
Extras `json:"extras"`
Resource []Resource `json:"resources"`
}
func init() {
for _, val := range categories {
categorymap[val.ID] = val
}
}
add some more optional OGD fields
package ogdat
import (
"code.google.com/p/go-uuid/uuid"
"encoding/json"
"net/url"
"time"
)
const OGDTimeSpecifier = "2006-01-02T15:04:05" // RFC 3339 = ISO 8601 ohne Zeitzone
const (
OGDTime1 = time.RFC3339Nano
OGDTime2 = time.RFC3339
OGDTime3 = OGDTimeSpecifier
OGDTimeUnknow
)
type Beschreibung struct {
ID int
Bezeichner string
OGD_Kurzname string
CKAN_Feld string
Defintion_DE string
Erlauterung string
Beispiel string
ONA2270 string
ISO19115 string
RDFProperty string
Definition_EN string
}
type Kategorie struct {
NumID int `json:"-"`
ID string
PrettyName string `json:"-"`
RDFProperty string `json:"-"`
}
func (kat *Kategorie) String() string {
return kat.PrettyName
}
var (
Arbeit = Kategorie{NumID: 1, ID: "arbeit", PrettyName: "Arbeit", RDFProperty: ""}
Bevoelkerung = Kategorie{NumID: 2, ID: "bevölkerung", PrettyName: "Bevölkerung", RDFProperty: ""}
BildungForschung = Kategorie{NumID: 3, ID: "bildung-und-forschung", PrettyName: "Bildung und Forschung", RDFProperty: ""}
FinanzRW = Kategorie{NumID: 4, ID: "finanzen-und-rechnungswesen", PrettyName: "Finanzen und Rechnungswesen", RDFProperty: ""}
GeographPlanung = Kategorie{NumID: 5, ID: "geographie-und-planung", PrettyName: "Geographie und Planung", RDFProperty: ""}
GesellSoziales = Kategorie{NumID: 6, ID: "gesellschaft-und-soziales", PrettyName: "Gesellschaft und Soziales", RDFProperty: ""}
Gesundheit = Kategorie{NumID: 7, ID: "gesundheit", PrettyName: "Gesundheit", RDFProperty: ""}
KunstKultur = Kategorie{NumID: 8, ID: "kunst-und-kultur", PrettyName: "Kunst und Kultur", RDFProperty: ""}
LandFW = Kategorie{NumID: 9, ID: "land-und-forstwirtschaft", PrettyName: "Land und Forstwirtschaft", RDFProperty: ""}
SportFZ = Kategorie{NumID: 10, ID: "sport-und-freizeit", PrettyName: "Sport und Freizeit", RDFProperty: ""}
Umwelt = Kategorie{NumID: 11, ID: "umwelt", PrettyName: "Umwelt", RDFProperty: ""}
VerkehrTechnik = Kategorie{NumID: 12, ID: "verkehr-und-technik", PrettyName: "Verkehr und Technik", RDFProperty: ""}
VerwaltPol = Kategorie{NumID: 13, ID: "verwaltung-und-politik", PrettyName: "Verwaltung und Politik", RDFProperty: ""}
WirtTourism = Kategorie{NumID: 14, ID: "wirtschaft-und-tourismus", PrettyName: "Wirtschaft und Tourismus", RDFProperty: ""}
)
var categories = []Kategorie{Arbeit,
Bevoelkerung,
BildungForschung,
FinanzRW,
GeographPlanung,
GesellSoziales,
Gesundheit,
KunstKultur,
LandFW,
SportFZ,
Umwelt,
VerkehrTechnik,
VerwaltPol,
WirtTourism,
}
var categorymap = make(map[string]Kategorie)
type Tags string
type ResourceSpecifier string
type Url struct {
*url.URL
Raw string
}
type Identfier struct {
*uuid.UUID
Raw string
}
func (id *Identfier) String() string {
return id.Raw
}
type OGDTime struct {
*time.Time
Raw string
Format string
}
func (time *OGDTime) String() string {
return time.Raw
}
func (ogdtime *OGDTime) UnmarshalJSON(data []byte) error {
var raw string
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
ogdtime.Raw = raw
ogdtime.Format = OGDTime1
t, err := time.Parse(ogdtime.Format, raw)
if err != nil {
ogdtime.Format = OGDTime2
t, err = time.Parse(ogdtime.Format, raw)
if err != nil {
ogdtime.Format = OGDTime3
t, err = time.Parse(ogdtime.Format, raw)
if err != nil {
ogdtime.Format = OGDTimeUnknow
}
}
}
ogdtime.Time = &t
return nil
}
func (u *Url) UnmarshalJSON(data []byte) error {
var raw string
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
u.Raw = raw
url, err := url.Parse(raw)
if err != nil {
return err
}
u.URL = url
return nil
}
func (id *Identfier) UnmarshalJSON(data []byte) error {
var raw string
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
id.Raw = string(raw)
if uuid := uuid.Parse(raw); uuid != nil {
id.UUID = &uuid
}
return nil
}
func (kat *Kategorie) UnmarshalJSON(data []byte) error {
var raw string
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
corecat, found := categorymap[raw]
if !found {
kat.NumID = -1
kat.ID = raw
kat.PrettyName = "**** NON core category **** - " + kat.ID
} else {
*kat = corecat
}
return nil
}
type Extras struct {
// Core
Metadata_Identifier Identfier `json:"metadata_identifier"` // CKAN uses since API Version 2 a UUID V4, cf. https://github.com/okfn/ckan/blob/master/ckan/model/types.py
Metadata_Modified string `json:"metadata_modified"`
Categorization []Kategorie `json:"categorization"`
Begin_DateTime OGDTime `json:"begin_datetime"`
// Optional
Schema_Name string `json:"schema_name"`
}
type Resource struct {
// Core
URL *Url `json:"url"`
Format ResourceSpecifier `json:"format"`
// Optional
Resource_Name string
Schema_Language string
}
type MetaData struct {
// Core
Title string `json:"title"`
Description string `json:"notes"`
Schlagworte []Tags `json:"tags"`
Maintainer string `json:"maintainer"`
License string `json:"license"` // Sollte URI des Lizenzdokuments sein
// Optional
// nested structures
Extras `json:"extras"`
Resource []Resource `json:"resources"`
}
func init() {
for _, val := range categories {
categorymap[val.ID] = val
}
}
|
// +build linux
package net
import (
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
"syscall"
"github.com/shirou/gopsutil/internal/common"
)
// NetIOCounters returnes network I/O statistics for every network
// interface installed on the system. If pernic argument is false,
// return only sum of all information (which name is 'all'). If true,
// every network interface installed on the system is returned
// separately.
func NetIOCounters(pernic bool) ([]NetIOCountersStat, error) {
filename := common.HostProc("net/dev")
return NetIOCountersByFile(pernic, filename)
}
func NetIOCountersByFile(pernic bool, filename string) ([]NetIOCountersStat, error) {
lines, err := common.ReadLines(filename)
if err != nil {
return nil, err
}
statlen := len(lines) - 1
ret := make([]NetIOCountersStat, 0, statlen)
for _, line := range lines[2:] {
parts := strings.SplitN(line, ":", 2)
if len(parts) != 2 {
continue
}
interfaceName := strings.TrimSpace(parts[0])
if interfaceName == "" {
continue
}
fields := strings.Fields(strings.TrimSpace(parts[1]))
bytesRecv, err := strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return ret, err
}
packetsRecv, err := strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return ret, err
}
errIn, err := strconv.ParseUint(fields[2], 10, 64)
if err != nil {
return ret, err
}
dropIn, err := strconv.ParseUint(fields[3], 10, 64)
if err != nil {
return ret, err
}
bytesSent, err := strconv.ParseUint(fields[8], 10, 64)
if err != nil {
return ret, err
}
packetsSent, err := strconv.ParseUint(fields[9], 10, 64)
if err != nil {
return ret, err
}
errOut, err := strconv.ParseUint(fields[10], 10, 64)
if err != nil {
return ret, err
}
dropOut, err := strconv.ParseUint(fields[13], 10, 64)
if err != nil {
return ret, err
}
nic := NetIOCountersStat{
Name: interfaceName,
BytesRecv: bytesRecv,
PacketsRecv: packetsRecv,
Errin: errIn,
Dropin: dropIn,
BytesSent: bytesSent,
PacketsSent: packetsSent,
Errout: errOut,
Dropout: dropOut,
}
ret = append(ret, nic)
}
if pernic == false {
return getNetIOCountersAll(ret)
}
return ret, nil
}
var netProtocols = []string{
"ip",
"icmp",
"icmpmsg",
"tcp",
"udp",
"udplite",
}
// NetProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Available protocols:
// ip,icmp,icmpmsg,tcp,udp,udplite
func NetProtoCounters(protocols []string) ([]NetProtoCountersStat, error) {
if len(protocols) == 0 {
protocols = netProtocols
}
stats := make([]NetProtoCountersStat, 0, len(protocols))
protos := make(map[string]bool, len(protocols))
for _, p := range protocols {
protos[p] = true
}
filename := common.HostProc("net/snmp")
lines, err := common.ReadLines(filename)
if err != nil {
return nil, err
}
linecount := len(lines)
for i := 0; i < linecount; i++ {
line := lines[i]
r := strings.IndexRune(line, ':')
if r == -1 {
return nil, errors.New(filename + " is not fomatted correctly, expected ':'.")
}
proto := strings.ToLower(line[:r])
if !protos[proto] {
// skip protocol and data line
i++
continue
}
// Read header line
statNames := strings.Split(line[r+2:], " ")
// Read data line
i++
statValues := strings.Split(lines[i][r+2:], " ")
if len(statNames) != len(statValues) {
return nil, errors.New(filename + " is not fomatted correctly, expected same number of columns.")
}
stat := NetProtoCountersStat{
Protocol: proto,
Stats: make(map[string]int64, len(statNames)),
}
for j := range statNames {
value, err := strconv.ParseInt(statValues[j], 10, 64)
if err != nil {
return nil, err
}
stat.Stats[statNames[j]] = value
}
stats = append(stats, stat)
}
return stats, nil
}
// NetFilterCounters returns iptables conntrack statistics
// the currently in use conntrack count and the max.
// If the file does not exist or is invalid it will return nil.
func NetFilterCounters() ([]NetFilterStat, error) {
countfile := common.HostProc("sys/net/netfilter/nf_conntrack_count")
maxfile := common.HostProc("sys/net/netfilter/nf_conntrack_max")
count, err := common.ReadInts(countfile)
if err != nil {
return nil, err
}
stats := make([]NetFilterStat, 0, 1)
max, err := common.ReadInts(maxfile)
if err != nil {
return nil, err
}
payload := NetFilterStat{
ConnTrackCount: count[0],
ConnTrackMax: max[0],
}
stats = append(stats, payload)
return stats, nil
}
// http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
var TCPStatuses = map[string]string{
"01": "ESTABLISHED",
"02": "SYN_SENT",
"03": "SYN_RECV",
"04": "FIN_WAIT1",
"05": "FIN_WAIT2",
"06": "TIME_WAIT",
"07": "CLOSE",
"08": "CLOSE_WAIT",
"09": "LAST_ACK",
"0A": "LISTEN",
"0B": "CLOSING",
}
type netConnectionKindType struct {
family uint32
sockType uint32
f string // file name
}
var kindTCP4 = netConnectionKindType{
family: syscall.AF_INET,
sockType: syscall.SOCK_STREAM,
f: "tcp",
}
var kindTCP6 = netConnectionKindType{
family: syscall.AF_INET6,
sockType: syscall.SOCK_STREAM,
f: "tcp6",
}
var kindUDP4 = netConnectionKindType{
family: syscall.AF_INET,
sockType: syscall.SOCK_DGRAM,
f: "udp",
}
var kindUDP6 = netConnectionKindType{
family: syscall.AF_INET6,
sockType: syscall.SOCK_DGRAM,
f: "udp6",
}
var kindUNIX = netConnectionKindType{
family: syscall.AF_UNIX,
f: "unix",
}
var netConnectionKindMap = map[string][]netConnectionKindType{
"all": []netConnectionKindType{kindTCP4, kindTCP6, kindUDP4, kindUDP6, kindUNIX},
"tcp": []netConnectionKindType{kindTCP4, kindTCP6},
"tcp4": []netConnectionKindType{kindTCP4},
"tcp6": []netConnectionKindType{kindTCP6},
"udp": []netConnectionKindType{kindUDP4, kindUDP6},
"udp4": []netConnectionKindType{kindUDP4},
"udp6": []netConnectionKindType{kindUDP6},
"unix": []netConnectionKindType{kindUNIX},
"inet": []netConnectionKindType{kindTCP4, kindTCP6, kindUDP4, kindUDP6},
"inet4": []netConnectionKindType{kindTCP4, kindUDP4},
"inet6": []netConnectionKindType{kindTCP6, kindUDP6},
}
type inodeMap struct {
pid int32
fd uint32
}
type connTmp struct {
fd uint32
family uint32
sockType uint32
laddr Addr
raddr Addr
status string
pid int32
boundPid int32
path string
}
// Return a list of network connections opened.
func NetConnections(kind string) ([]NetConnectionStat, error) {
return NetConnectionsPid(kind, 0)
}
// Return a list of network connections opened by a process.
func NetConnectionsPid(kind string, pid int32) ([]NetConnectionStat, error) {
tmap, ok := netConnectionKindMap[kind]
if !ok {
return nil, fmt.Errorf("invalid kind, %s", kind)
}
root := common.HostProc()
var err error
var inodes map[string][]inodeMap
if pid == 0 {
inodes, err = getProcInodesAll(root)
} else {
inodes, err = getProcInodes(root, pid)
if len(inodes) == 0 {
// no connection for the pid
return []NetConnectionStat{}, nil
}
}
if err != nil {
return nil, fmt.Errorf("cound not get pid(s), %d", pid)
}
var ret []NetConnectionStat
for _, t := range tmap {
var path string
var ls []connTmp
path = fmt.Sprintf("%s/net/%s", root, t.f)
switch t.family {
case syscall.AF_INET:
fallthrough
case syscall.AF_INET6:
ls, err = processInet(path, t, inodes, pid)
case syscall.AF_UNIX:
ls, err = processUnix(path, t, inodes, pid)
}
if err != nil {
return nil, err
}
for _, c := range ls {
conn := NetConnectionStat{
Fd: c.fd,
Family: t.family,
Type: t.sockType,
Laddr: c.laddr,
Raddr: c.raddr,
Status: c.status,
Pid: c.pid,
}
if c.pid == 0 {
conn.Pid = c.boundPid
} else {
conn.Pid = c.pid
}
ret = append(ret, conn)
}
}
return ret, nil
}
// getProcInodes returnes fd of the pid.
func getProcInodes(root string, pid int32) (map[string][]inodeMap, error) {
ret := make(map[string][]inodeMap)
dir := fmt.Sprintf("%s/%d/fd", root, pid)
files, err := ioutil.ReadDir(dir)
if err != nil {
return ret, nil
}
for _, fd := range files {
inodePath := fmt.Sprintf("%s/%d/fd/%s", root, pid, fd.Name())
inode, err := os.Readlink(inodePath)
if err != nil {
continue
}
if strings.HasPrefix(inode, "socket:[") {
// the process is using a socket
l := len(inode)
inode = inode[8 : l-1]
}
_, ok := ret[inode]
if !ok {
ret[inode] = make([]inodeMap, 0)
}
fd, err := strconv.Atoi(fd.Name())
if err != nil {
continue
}
i := inodeMap{
pid: pid,
fd: uint32(fd),
}
ret[inode] = append(ret[inode], i)
}
return ret, nil
}
// Pids retunres all pids.
// Note: this is a copy of process_linux.Pids()
// FIXME: Import process occures import cycle.
// move to common made other platform breaking. Need consider.
func Pids() ([]int32, error) {
var ret []int32
d, err := os.Open(common.HostProc())
if err != nil {
return nil, err
}
defer d.Close()
fnames, err := d.Readdirnames(-1)
if err != nil {
return nil, err
}
for _, fname := range fnames {
pid, err := strconv.ParseInt(fname, 10, 32)
if err != nil {
// if not numeric name, just skip
continue
}
ret = append(ret, int32(pid))
}
return ret, nil
}
func getProcInodesAll(root string) (map[string][]inodeMap, error) {
pids, err := Pids()
if err != nil {
return nil, err
}
ret := make(map[string][]inodeMap)
for _, pid := range pids {
t, err := getProcInodes(root, pid)
if err != nil {
return ret, err
}
if len(t) == 0 {
continue
}
// TODO: update ret.
ret = updateMap(ret, t)
}
return ret, nil
}
// decodeAddress decode addresse represents addr in proc/net/*
// ex:
// "0500000A:0016" -> "10.0.0.5", 22
// "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53
func decodeAddress(family uint32, src string) (Addr, error) {
t := strings.Split(src, ":")
if len(t) != 2 {
return Addr{}, fmt.Errorf("does not contain port, %s", src)
}
addr := t[0]
port, err := strconv.ParseInt("0x"+t[1], 0, 64)
if err != nil {
return Addr{}, fmt.Errorf("invalid port, %s", src)
}
decoded, err := hex.DecodeString(addr)
if err != nil {
return Addr{}, fmt.Errorf("decode error:", err)
}
var ip net.IP
// Assumes this is little_endian
if family == syscall.AF_INET {
ip = net.IP(Reverse(decoded))
} else { // IPv6
ip, err = parseIPv6HexString(decoded)
if err != nil {
return Addr{}, err
}
}
return Addr{
IP: ip.String(),
Port: uint32(port),
}, nil
}
// Reverse reverses array of bytes.
func Reverse(s []byte) []byte {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
return s
}
// parseIPv6HexString parse array of bytes to IPv6 string
func parseIPv6HexString(src []byte) (net.IP, error) {
if len(src) != 16 {
return nil, fmt.Errorf("invalid IPv6 string")
}
buf := make([]byte, 0, 16)
for i := 0; i < len(src); i += 4 {
r := Reverse(src[i : i+4])
buf = append(buf, r...)
}
return net.IP(buf), nil
}
func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) {
if strings.HasSuffix(file, "6") && !common.PathExists(file) {
// IPv6 not supported, return empty.
return []connTmp{}, nil
}
lines, err := common.ReadLines(file)
if err != nil {
return nil, err
}
var ret []connTmp
// skip first line
for _, line := range lines[1:] {
l := strings.Fields(line)
if len(l) < 10 {
continue
}
laddr := l[1]
raddr := l[2]
status := l[3]
inode := l[9]
pid := int32(0)
fd := uint32(0)
i, exists := inodes[inode]
if exists {
pid = i[0].pid
fd = i[0].fd
}
if filterPid > 0 && filterPid != pid {
continue
}
if kind.sockType == syscall.SOCK_STREAM {
status = TCPStatuses[status]
} else {
status = "NONE"
}
la, err := decodeAddress(kind.family, laddr)
if err != nil {
continue
}
ra, err := decodeAddress(kind.family, raddr)
if err != nil {
continue
}
ret = append(ret, connTmp{
fd: fd,
family: kind.family,
sockType: kind.sockType,
laddr: la,
raddr: ra,
status: status,
pid: pid,
})
}
return ret, nil
}
func processUnix(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) {
lines, err := common.ReadLines(file)
if err != nil {
return nil, err
}
var ret []connTmp
// skip first line
for _, line := range lines[1:] {
tokens := strings.Fields(line)
if len(tokens) < 7 {
continue
}
st, err := strconv.Atoi(tokens[4])
if err != nil {
continue
}
inode := tokens[6]
var pairs []inodeMap
pairs, exists := inodes[inode]
if !exists {
pairs = []inodeMap{}
}
for _, pair := range pairs {
if filterPid > 0 && filterPid != pair.pid {
continue
}
var path string
if len(tokens) == 8 {
path = tokens[len(tokens)-1]
}
ret = append(ret, connTmp{
family: kind.family,
sockType: uint32(st),
raddr: Addr{},
pid: pair.pid,
status: "NONE",
path: path,
})
}
}
return []connTmp{}, nil
}
func updateMap(src map[string][]inodeMap, add map[string][]inodeMap) map[string][]inodeMap {
for key, value := range add {
a, exists := src[key]
if !exists {
src[key] = value
continue
}
src[key] = append(a, value...)
}
return src
}
[net]linux: fix valious bugs in NetConnections.
// +build linux
package net
import (
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
"syscall"
"github.com/shirou/gopsutil/internal/common"
)
// NetIOCounters returnes network I/O statistics for every network
// interface installed on the system. If pernic argument is false,
// return only sum of all information (which name is 'all'). If true,
// every network interface installed on the system is returned
// separately.
func NetIOCounters(pernic bool) ([]NetIOCountersStat, error) {
filename := common.HostProc("net/dev")
return NetIOCountersByFile(pernic, filename)
}
func NetIOCountersByFile(pernic bool, filename string) ([]NetIOCountersStat, error) {
lines, err := common.ReadLines(filename)
if err != nil {
return nil, err
}
statlen := len(lines) - 1
ret := make([]NetIOCountersStat, 0, statlen)
for _, line := range lines[2:] {
parts := strings.SplitN(line, ":", 2)
if len(parts) != 2 {
continue
}
interfaceName := strings.TrimSpace(parts[0])
if interfaceName == "" {
continue
}
fields := strings.Fields(strings.TrimSpace(parts[1]))
bytesRecv, err := strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return ret, err
}
packetsRecv, err := strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return ret, err
}
errIn, err := strconv.ParseUint(fields[2], 10, 64)
if err != nil {
return ret, err
}
dropIn, err := strconv.ParseUint(fields[3], 10, 64)
if err != nil {
return ret, err
}
bytesSent, err := strconv.ParseUint(fields[8], 10, 64)
if err != nil {
return ret, err
}
packetsSent, err := strconv.ParseUint(fields[9], 10, 64)
if err != nil {
return ret, err
}
errOut, err := strconv.ParseUint(fields[10], 10, 64)
if err != nil {
return ret, err
}
dropOut, err := strconv.ParseUint(fields[13], 10, 64)
if err != nil {
return ret, err
}
nic := NetIOCountersStat{
Name: interfaceName,
BytesRecv: bytesRecv,
PacketsRecv: packetsRecv,
Errin: errIn,
Dropin: dropIn,
BytesSent: bytesSent,
PacketsSent: packetsSent,
Errout: errOut,
Dropout: dropOut,
}
ret = append(ret, nic)
}
if pernic == false {
return getNetIOCountersAll(ret)
}
return ret, nil
}
var netProtocols = []string{
"ip",
"icmp",
"icmpmsg",
"tcp",
"udp",
"udplite",
}
// NetProtoCounters returns network statistics for the entire system
// If protocols is empty then all protocols are returned, otherwise
// just the protocols in the list are returned.
// Available protocols:
// ip,icmp,icmpmsg,tcp,udp,udplite
func NetProtoCounters(protocols []string) ([]NetProtoCountersStat, error) {
if len(protocols) == 0 {
protocols = netProtocols
}
stats := make([]NetProtoCountersStat, 0, len(protocols))
protos := make(map[string]bool, len(protocols))
for _, p := range protocols {
protos[p] = true
}
filename := common.HostProc("net/snmp")
lines, err := common.ReadLines(filename)
if err != nil {
return nil, err
}
linecount := len(lines)
for i := 0; i < linecount; i++ {
line := lines[i]
r := strings.IndexRune(line, ':')
if r == -1 {
return nil, errors.New(filename + " is not fomatted correctly, expected ':'.")
}
proto := strings.ToLower(line[:r])
if !protos[proto] {
// skip protocol and data line
i++
continue
}
// Read header line
statNames := strings.Split(line[r+2:], " ")
// Read data line
i++
statValues := strings.Split(lines[i][r+2:], " ")
if len(statNames) != len(statValues) {
return nil, errors.New(filename + " is not fomatted correctly, expected same number of columns.")
}
stat := NetProtoCountersStat{
Protocol: proto,
Stats: make(map[string]int64, len(statNames)),
}
for j := range statNames {
value, err := strconv.ParseInt(statValues[j], 10, 64)
if err != nil {
return nil, err
}
stat.Stats[statNames[j]] = value
}
stats = append(stats, stat)
}
return stats, nil
}
// NetFilterCounters returns iptables conntrack statistics
// the currently in use conntrack count and the max.
// If the file does not exist or is invalid it will return nil.
func NetFilterCounters() ([]NetFilterStat, error) {
countfile := common.HostProc("sys/net/netfilter/nf_conntrack_count")
maxfile := common.HostProc("sys/net/netfilter/nf_conntrack_max")
count, err := common.ReadInts(countfile)
if err != nil {
return nil, err
}
stats := make([]NetFilterStat, 0, 1)
max, err := common.ReadInts(maxfile)
if err != nil {
return nil, err
}
payload := NetFilterStat{
ConnTrackCount: count[0],
ConnTrackMax: max[0],
}
stats = append(stats, payload)
return stats, nil
}
// http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
var TCPStatuses = map[string]string{
"01": "ESTABLISHED",
"02": "SYN_SENT",
"03": "SYN_RECV",
"04": "FIN_WAIT1",
"05": "FIN_WAIT2",
"06": "TIME_WAIT",
"07": "CLOSE",
"08": "CLOSE_WAIT",
"09": "LAST_ACK",
"0A": "LISTEN",
"0B": "CLOSING",
}
type netConnectionKindType struct {
family uint32
sockType uint32
f string // file name
}
var kindTCP4 = netConnectionKindType{
family: syscall.AF_INET,
sockType: syscall.SOCK_STREAM,
f: "tcp",
}
var kindTCP6 = netConnectionKindType{
family: syscall.AF_INET6,
sockType: syscall.SOCK_STREAM,
f: "tcp6",
}
var kindUDP4 = netConnectionKindType{
family: syscall.AF_INET,
sockType: syscall.SOCK_DGRAM,
f: "udp",
}
var kindUDP6 = netConnectionKindType{
family: syscall.AF_INET6,
sockType: syscall.SOCK_DGRAM,
f: "udp6",
}
var kindUNIX = netConnectionKindType{
family: syscall.AF_UNIX,
f: "unix",
}
var netConnectionKindMap = map[string][]netConnectionKindType{
"all": []netConnectionKindType{kindTCP4, kindTCP6, kindUDP4, kindUDP6, kindUNIX},
"tcp": []netConnectionKindType{kindTCP4, kindTCP6},
"tcp4": []netConnectionKindType{kindTCP4},
"tcp6": []netConnectionKindType{kindTCP6},
"udp": []netConnectionKindType{kindUDP4, kindUDP6},
"udp4": []netConnectionKindType{kindUDP4},
"udp6": []netConnectionKindType{kindUDP6},
"unix": []netConnectionKindType{kindUNIX},
"inet": []netConnectionKindType{kindTCP4, kindTCP6, kindUDP4, kindUDP6},
"inet4": []netConnectionKindType{kindTCP4, kindUDP4},
"inet6": []netConnectionKindType{kindTCP6, kindUDP6},
}
type inodeMap struct {
pid int32
fd uint32
}
type connTmp struct {
fd uint32
family uint32
sockType uint32
laddr Addr
raddr Addr
status string
pid int32
boundPid int32
path string
}
// Return a list of network connections opened.
func NetConnections(kind string) ([]NetConnectionStat, error) {
return NetConnectionsPid(kind, 0)
}
// Return a list of network connections opened by a process.
func NetConnectionsPid(kind string, pid int32) ([]NetConnectionStat, error) {
tmap, ok := netConnectionKindMap[kind]
if !ok {
return nil, fmt.Errorf("invalid kind, %s", kind)
}
root := common.HostProc()
var err error
var inodes map[string][]inodeMap
if pid == 0 {
inodes, err = getProcInodesAll(root)
} else {
inodes, err = getProcInodes(root, pid)
if len(inodes) == 0 {
// no connection for the pid
return []NetConnectionStat{}, nil
}
}
if err != nil {
return nil, fmt.Errorf("cound not get pid(s), %d", pid)
}
dupCheckMap := make(map[string]bool)
var ret []NetConnectionStat
for _, t := range tmap {
var path string
var ls []connTmp
path = fmt.Sprintf("%s/net/%s", root, t.f)
switch t.family {
case syscall.AF_INET:
fallthrough
case syscall.AF_INET6:
ls, err = processInet(path, t, inodes, pid)
case syscall.AF_UNIX:
ls, err = processUnix(path, t, inodes, pid)
}
if err != nil {
return nil, err
}
for _, c := range ls {
conn := NetConnectionStat{
Fd: c.fd,
Family: c.family,
Type: c.sockType,
Laddr: c.laddr,
Raddr: c.raddr,
Status: c.status,
Pid: c.pid,
}
if c.pid == 0 {
conn.Pid = c.boundPid
} else {
conn.Pid = c.pid
}
// check duplicate using JSON format
json := conn.String()
_, exists := dupCheckMap[json]
if !exists {
ret = append(ret, conn)
dupCheckMap[json] = true
}
}
}
return ret, nil
}
// getProcInodes returnes fd of the pid.
func getProcInodes(root string, pid int32) (map[string][]inodeMap, error) {
ret := make(map[string][]inodeMap)
dir := fmt.Sprintf("%s/%d/fd", root, pid)
files, err := ioutil.ReadDir(dir)
if err != nil {
return ret, nil
}
for _, fd := range files {
inodePath := fmt.Sprintf("%s/%d/fd/%s", root, pid, fd.Name())
inode, err := os.Readlink(inodePath)
if err != nil {
continue
}
if !strings.HasPrefix(inode, "socket:[") {
continue
}
// the process is using a socket
l := len(inode)
inode = inode[8 : l-1]
_, ok := ret[inode]
if !ok {
ret[inode] = make([]inodeMap, 0)
}
fd, err := strconv.Atoi(fd.Name())
if err != nil {
continue
}
i := inodeMap{
pid: pid,
fd: uint32(fd),
}
ret[inode] = append(ret[inode], i)
}
return ret, nil
}
// Pids retunres all pids.
// Note: this is a copy of process_linux.Pids()
// FIXME: Import process occures import cycle.
// move to common made other platform breaking. Need consider.
func Pids() ([]int32, error) {
var ret []int32
d, err := os.Open(common.HostProc())
if err != nil {
return nil, err
}
defer d.Close()
fnames, err := d.Readdirnames(-1)
if err != nil {
return nil, err
}
for _, fname := range fnames {
pid, err := strconv.ParseInt(fname, 10, 32)
if err != nil {
// if not numeric name, just skip
continue
}
ret = append(ret, int32(pid))
}
return ret, nil
}
func getProcInodesAll(root string) (map[string][]inodeMap, error) {
pids, err := Pids()
if err != nil {
return nil, err
}
ret := make(map[string][]inodeMap)
for _, pid := range pids {
t, err := getProcInodes(root, pid)
if err != nil {
return ret, err
}
if len(t) == 0 {
continue
}
// TODO: update ret.
ret = updateMap(ret, t)
}
return ret, nil
}
// decodeAddress decode addresse represents addr in proc/net/*
// ex:
// "0500000A:0016" -> "10.0.0.5", 22
// "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53
func decodeAddress(family uint32, src string) (Addr, error) {
t := strings.Split(src, ":")
if len(t) != 2 {
return Addr{}, fmt.Errorf("does not contain port, %s", src)
}
addr := t[0]
port, err := strconv.ParseInt("0x"+t[1], 0, 64)
if err != nil {
return Addr{}, fmt.Errorf("invalid port, %s", src)
}
decoded, err := hex.DecodeString(addr)
if err != nil {
return Addr{}, fmt.Errorf("decode error:", err)
}
var ip net.IP
// Assumes this is little_endian
if family == syscall.AF_INET {
ip = net.IP(Reverse(decoded))
} else { // IPv6
ip, err = parseIPv6HexString(decoded)
if err != nil {
return Addr{}, err
}
}
return Addr{
IP: ip.String(),
Port: uint32(port),
}, nil
}
// Reverse reverses array of bytes.
func Reverse(s []byte) []byte {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
return s
}
// parseIPv6HexString parse array of bytes to IPv6 string
func parseIPv6HexString(src []byte) (net.IP, error) {
if len(src) != 16 {
return nil, fmt.Errorf("invalid IPv6 string")
}
buf := make([]byte, 0, 16)
for i := 0; i < len(src); i += 4 {
r := Reverse(src[i : i+4])
buf = append(buf, r...)
}
return net.IP(buf), nil
}
func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) {
if strings.HasSuffix(file, "6") && !common.PathExists(file) {
// IPv6 not supported, return empty.
return []connTmp{}, nil
}
lines, err := common.ReadLines(file)
if err != nil {
return nil, err
}
var ret []connTmp
// skip first line
for _, line := range lines[1:] {
l := strings.Fields(line)
if len(l) < 10 {
continue
}
laddr := l[1]
raddr := l[2]
status := l[3]
inode := l[9]
pid := int32(0)
fd := uint32(0)
i, exists := inodes[inode]
if exists {
pid = i[0].pid
fd = i[0].fd
}
if filterPid > 0 && filterPid != pid {
continue
}
if kind.sockType == syscall.SOCK_STREAM {
status = TCPStatuses[status]
} else {
status = "NONE"
}
la, err := decodeAddress(kind.family, laddr)
if err != nil {
continue
}
ra, err := decodeAddress(kind.family, raddr)
if err != nil {
continue
}
ret = append(ret, connTmp{
fd: fd,
family: kind.family,
sockType: kind.sockType,
laddr: la,
raddr: ra,
status: status,
pid: pid,
})
}
return ret, nil
}
func processUnix(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) {
lines, err := common.ReadLines(file)
if err != nil {
return nil, err
}
var ret []connTmp
// skip first line
for _, line := range lines[1:] {
tokens := strings.Fields(line)
if len(tokens) < 6 {
continue
}
st, err := strconv.Atoi(tokens[4])
if err != nil {
return nil, err
}
inode := tokens[6]
var pairs []inodeMap
pairs, exists := inodes[inode]
if !exists {
pairs = []inodeMap{
inodeMap{},
}
}
for _, pair := range pairs {
if filterPid > 0 && filterPid != pair.pid {
continue
}
var path string
if len(tokens) == 8 {
path = tokens[len(tokens)-1]
}
ret = append(ret, connTmp{
fd: pair.fd,
family: kind.family,
sockType: uint32(st),
laddr: Addr{
IP: path,
},
pid: pair.pid,
status: "NONE",
path: path,
})
}
}
return ret, nil
}
func updateMap(src map[string][]inodeMap, add map[string][]inodeMap) map[string][]inodeMap {
for key, value := range add {
a, exists := src[key]
if !exists {
src[key] = value
continue
}
src[key] = append(a, value...)
}
return src
}
|
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package files
import (
"os"
"github.com/coreos/ignition/tests/register"
"github.com/coreos/ignition/tests/types"
)
func init() {
register.Register(register.PositiveTest, CreateDirectoryOnRoot())
register.Register(register.PositiveTest, ForceDirCreation())
register.Register(register.PositiveTest, ForceDirCreationOverNonemptyDir())
register.Register(register.PositiveTest, ApplyDefaultDirectoryPermissions())
}
func CreateDirectoryOnRoot() types.Test {
name := "Create a Directory on the Root Filesystem"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"path": "/foo/bar"
}]
}
}`
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Name: "bar",
Directory: "foo",
},
},
})
configMinVersion := "3.0.0-experimental"
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
func ForceDirCreation() types.Test {
name := "Force Directory Creation"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"path": "/foo/bar",
"overwrite": true
}]
}
}`
in[0].Partitions.AddFiles("ROOT", []types.File{
{
Node: types.Node{
Directory: "foo",
Name: "bar",
},
Contents: "hello, world",
},
})
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Directory: "foo",
Name: "bar",
},
},
})
configMinVersion := "3.0.0-experimental"
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
func ForceDirCreationOverNonemptyDir() types.Test {
name := "Force Directory Creation Over Nonempty Directory"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"path": "/foo/bar",
"overwrite": true
}]
}
}`
in[0].Partitions.AddFiles("ROOT", []types.File{
{
Node: types.Node{
Directory: "foo/bar",
Name: "baz",
},
Contents: "hello, world",
},
})
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Directory: "foo",
Name: "bar",
},
},
})
configMinVersion := "3.0.0-experimental"
// TODO: add ability to ensure that foo/bar/baz doesn't exist here.
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
func ApplyDefaultDirectoryPermissions() types.Test {
name := "Apply Default Directory Permissions"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"filesystem": "root",
"path": "/foo/bar"
}]
}
}`
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Name: "bar",
Directory: "foo",
},
Mode: 0755 | int(os.ModeDir),
},
})
configMinVersion := "3.0.0-experimental"
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
tests: add test ensuring correct ordering of nodes
Add a blackbox test to enusre that the order of creation is based on
symlink resolved paths not specified paths.
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package files
import (
"os"
"github.com/coreos/ignition/tests/register"
"github.com/coreos/ignition/tests/types"
)
func init() {
register.Register(register.PositiveTest, CreateDirectoryOnRoot())
register.Register(register.PositiveTest, ForceDirCreation())
register.Register(register.PositiveTest, ForceDirCreationOverNonemptyDir())
register.Register(register.PositiveTest, CheckOrdering())
register.Register(register.PositiveTest, ApplyDefaultDirectoryPermissions())
}
func CreateDirectoryOnRoot() types.Test {
name := "Create a Directory on the Root Filesystem"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"path": "/foo/bar"
}]
}
}`
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Name: "bar",
Directory: "foo",
},
},
})
configMinVersion := "3.0.0-experimental"
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
func ForceDirCreation() types.Test {
name := "Force Directory Creation"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"path": "/foo/bar",
"overwrite": true
}]
}
}`
in[0].Partitions.AddFiles("ROOT", []types.File{
{
Node: types.Node{
Directory: "foo",
Name: "bar",
},
Contents: "hello, world",
},
})
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Directory: "foo",
Name: "bar",
},
},
})
configMinVersion := "3.0.0-experimental"
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
func ForceDirCreationOverNonemptyDir() types.Test {
name := "Force Directory Creation Over Nonempty Directory"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"path": "/foo/bar",
"overwrite": true
}]
}
}`
in[0].Partitions.AddFiles("ROOT", []types.File{
{
Node: types.Node{
Directory: "foo/bar",
Name: "baz",
},
Contents: "hello, world",
},
})
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Directory: "foo",
Name: "bar",
},
},
})
configMinVersion := "3.0.0-experimental"
// TODO: add ability to ensure that foo/bar/baz doesn't exist here.
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
func CheckOrdering() types.Test {
name := "TODO"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"path": "/foo/bar/baz",
"mode": 511,
"overwrite": false
},
{
"path": "/baz/quux",
"mode": 493,
"overwrite": false
}]
}
}`
in[0].Partitions.AddLinks("ROOT", []types.Link{
{
Node: types.Node{
Directory: "foo",
Name: "bar",
},
Target: "/",
},
})
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Directory: "/",
Name: "baz",
},
Mode: 0777 | int(os.ModeDir),
},
{
Node: types.Node{
Directory: "baz",
Name: "quux",
},
Mode: 0755 | int(os.ModeDir),
},
})
configMinVersion := "3.0.0-experimental"
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
func ApplyDefaultDirectoryPermissions() types.Test {
name := "Apply Default Directory Permissions"
in := types.GetBaseDisk()
out := types.GetBaseDisk()
config := `{
"ignition": { "version": "$version" },
"storage": {
"directories": [{
"filesystem": "root",
"path": "/foo/bar"
}]
}
}`
out[0].Partitions.AddDirectories("ROOT", []types.Directory{
{
Node: types.Node{
Name: "bar",
Directory: "foo",
},
Mode: 0755 | int(os.ModeDir),
},
})
configMinVersion := "3.0.0-experimental"
return types.Test{
Name: name,
In: in,
Out: out,
Config: config,
ConfigMinVersion: configMinVersion,
}
}
|
package sparta
import (
"bytes"
"errors"
"fmt"
"strings"
"text/template"
// Also included in lambda_permissions.go, but doubly included
// here as the package's init() function handles registering
// the resources we look up in this package.
_ "github.com/mweagle/cloudformationresources"
"github.com/Sirupsen/logrus"
gocf "github.com/mweagle/go-cloudformation"
)
// resourceOutputs is responsible for returning the conditional
// set of CloudFormation outputs for a given resource type.
func resourceOutputs(resourceName string,
resource gocf.ResourceProperties,
logger *logrus.Logger) ([]string, error) {
outputProps := []string{}
switch typedResource := resource.(type) {
case gocf.IAMRole:
// NOP
case gocf.DynamoDBTable:
if typedResource.StreamSpecification != nil {
outputProps = append(outputProps, "StreamArn")
}
case gocf.KinesisStream:
outputProps = append(outputProps, "Arn")
case gocf.Route53RecordSet:
// TODO
case gocf.S3Bucket:
outputProps = append(outputProps, "DomainName", "WebsiteURL")
case gocf.SNSTopic:
outputProps = append(outputProps, "TopicName")
case gocf.SQSQueue:
outputProps = append(outputProps, "Arn", "QueueName")
default:
logger.WithFields(logrus.Fields{
"ResourceType": fmt.Sprintf("%T", typedResource),
}).Warn("Discovery information for dependency not yet implemented")
}
return outputProps, nil
}
func newCloudFormationResource(resourceType string, logger *logrus.Logger) (gocf.ResourceProperties, error) {
resProps := gocf.NewResourceByType(resourceType)
if nil == resProps {
logger.WithFields(logrus.Fields{
"Type": resourceType,
}).Fatal("Failed to create CloudFormation CustomResource!")
return nil, fmt.Errorf("Unsupported CustomResourceType: %s", resourceType)
}
return resProps, nil
}
type discoveryDataTemplate struct {
ResourceID string
ResourceType string
ResourceProperties string
}
var discoveryDataForResourceDependency = `
{
"ResourceID" : "<< .ResourceID >>",
"ResourceRef" : "{"Ref":"<< .ResourceID >>"}",
"ResourceType" : "<< .ResourceType >>",
"Properties" : {
<< .ResourceProperties >>
}
}
`
func discoveryResourceInfoForDependency(cfTemplate *gocf.Template,
logicalResourceName string,
logger *logrus.Logger) ([]byte, error) {
item, ok := cfTemplate.Resources[logicalResourceName]
if !ok {
return nil, nil
}
resourceOutputs, resourceOutputsErr := resourceOutputs(logicalResourceName,
item.Properties,
logger)
if resourceOutputsErr != nil {
return nil, resourceOutputsErr
}
// Template data
templateData := &discoveryDataTemplate{
ResourceID: logicalResourceName,
ResourceType: item.Properties.CfnResourceType(),
}
quotedAttrs := make([]string, 0)
for _, eachOutput := range resourceOutputs {
quotedAttrs = append(quotedAttrs,
fmt.Sprintf(`"%s" :"{ "Fn::GetAtt" : [ "%s", "%s" ] }"`,
eachOutput,
logicalResourceName,
eachOutput))
}
templateData.ResourceProperties = strings.Join(quotedAttrs, ",")
// Create the data that can be stuffed into Environment
discoveryTemplate, discoveryTemplateErr := template.New("discoveryResourceData").
Delims("<<", ">>").
Parse(discoveryDataForResourceDependency)
if nil != discoveryTemplateErr {
return nil, discoveryTemplateErr
}
var templateResults bytes.Buffer
evalResultErr := discoveryTemplate.Execute(&templateResults, templateData)
return templateResults.Bytes(), evalResultErr
// outputs := make(map[string]interface{})
// outputs["ResourceID"] = logicalResourceName
// outputs["ResourceType"] = item.Properties.CfnResourceType()
// if len(resourceOutputs) != 0 {
// properties := make(map[string]interface{})
// for _, eachAttr := range resourceOutputs {
// properties[eachAttr] = gocf.GetAtt(logicalResourceName, eachAttr)
// }
// if len(properties) != 0 {
// outputs["Properties"] = properties
// }
// }
// if len(outputs) != 0 {
// logger.WithFields(logrus.Fields{
// "ResourceName": logicalResourceName,
// "Outputs": outputs,
// }).Debug("Resource Outputs")
// }
// return outputs, nil
}
func safeAppendDependency(resource *gocf.Resource, dependencyName string) {
if nil == resource.DependsOn {
resource.DependsOn = []string{}
}
resource.DependsOn = append(resource.DependsOn, dependencyName)
}
func safeMetadataInsert(resource *gocf.Resource, key string, value interface{}) {
if nil == resource.Metadata {
resource.Metadata = make(map[string]interface{})
}
resource.Metadata[key] = value
}
func safeMergeTemplates(sourceTemplate *gocf.Template, destTemplate *gocf.Template, logger *logrus.Logger) error {
var mergeErrors []string
// Append the custom resources
for eachKey, eachLambdaResource := range sourceTemplate.Resources {
_, exists := destTemplate.Resources[eachKey]
if exists {
errorMsg := fmt.Sprintf("Duplicate CloudFormation resource name: %s", eachKey)
mergeErrors = append(mergeErrors, errorMsg)
} else {
destTemplate.Resources[eachKey] = eachLambdaResource
}
}
// Append the custom Mappings
for eachKey, eachMapping := range sourceTemplate.Mappings {
_, exists := destTemplate.Mappings[eachKey]
if exists {
errorMsg := fmt.Sprintf("Duplicate CloudFormation Mapping name: %s", eachKey)
mergeErrors = append(mergeErrors, errorMsg)
} else {
destTemplate.Mappings[eachKey] = eachMapping
}
}
// Append the custom outputs
for eachKey, eachLambdaOutput := range sourceTemplate.Outputs {
_, exists := destTemplate.Outputs[eachKey]
if exists {
errorMsg := fmt.Sprintf("Duplicate CloudFormation output key name: %s", eachKey)
mergeErrors = append(mergeErrors, errorMsg)
} else {
destTemplate.Outputs[eachKey] = eachLambdaOutput
}
}
if len(mergeErrors) > 0 {
logger.Error("Failed to update template. The following collisions were found:")
for _, eachError := range mergeErrors {
logger.Error("\t" + eachError)
}
return errors.New("Template merge failed")
}
return nil
}
Cleanup inits, obsolete code
package sparta
import (
"bytes"
"errors"
"fmt"
"strings"
"text/template"
// Also included in lambda_permissions.go, but doubly included
// here as the package's init() function handles registering
// the resources we look up in this package.
_ "github.com/mweagle/cloudformationresources"
"github.com/Sirupsen/logrus"
gocf "github.com/mweagle/go-cloudformation"
)
// resourceOutputs is responsible for returning the conditional
// set of CloudFormation outputs for a given resource type.
func resourceOutputs(resourceName string,
resource gocf.ResourceProperties,
logger *logrus.Logger) ([]string, error) {
outputProps := []string{}
switch typedResource := resource.(type) {
case gocf.IAMRole:
// NOP
case gocf.DynamoDBTable:
if typedResource.StreamSpecification != nil {
outputProps = append(outputProps, "StreamArn")
}
case gocf.KinesisStream:
outputProps = append(outputProps, "Arn")
case gocf.Route53RecordSet:
// TODO
case gocf.S3Bucket:
outputProps = append(outputProps, "DomainName", "WebsiteURL")
case gocf.SNSTopic:
outputProps = append(outputProps, "TopicName")
case gocf.SQSQueue:
outputProps = append(outputProps, "Arn", "QueueName")
default:
logger.WithFields(logrus.Fields{
"ResourceType": fmt.Sprintf("%T", typedResource),
}).Warn("Discovery information for dependency not yet implemented")
}
return outputProps, nil
}
func newCloudFormationResource(resourceType string, logger *logrus.Logger) (gocf.ResourceProperties, error) {
resProps := gocf.NewResourceByType(resourceType)
if nil == resProps {
logger.WithFields(logrus.Fields{
"Type": resourceType,
}).Fatal("Failed to create CloudFormation CustomResource!")
return nil, fmt.Errorf("Unsupported CustomResourceType: %s", resourceType)
}
return resProps, nil
}
type discoveryDataTemplate struct {
ResourceID string
ResourceType string
ResourceProperties string
}
var discoveryDataForResourceDependency = `
{
"ResourceID" : "<< .ResourceID >>",
"ResourceRef" : "{"Ref":"<< .ResourceID >>"}",
"ResourceType" : "<< .ResourceType >>",
"Properties" : {
<< .ResourceProperties >>
}
}
`
func discoveryResourceInfoForDependency(cfTemplate *gocf.Template,
logicalResourceName string,
logger *logrus.Logger) ([]byte, error) {
item, ok := cfTemplate.Resources[logicalResourceName]
if !ok {
return nil, nil
}
resourceOutputs, resourceOutputsErr := resourceOutputs(logicalResourceName,
item.Properties,
logger)
if resourceOutputsErr != nil {
return nil, resourceOutputsErr
}
// Template data
templateData := &discoveryDataTemplate{
ResourceID: logicalResourceName,
ResourceType: item.Properties.CfnResourceType(),
}
var quotedAttrs []string
for _, eachOutput := range resourceOutputs {
quotedAttrs = append(quotedAttrs,
fmt.Sprintf(`"%s" :"{ "Fn::GetAtt" : [ "%s", "%s" ] }"`,
eachOutput,
logicalResourceName,
eachOutput))
}
templateData.ResourceProperties = strings.Join(quotedAttrs, ",")
// Create the data that can be stuffed into Environment
discoveryTemplate, discoveryTemplateErr := template.New("discoveryResourceData").
Delims("<<", ">>").
Parse(discoveryDataForResourceDependency)
if nil != discoveryTemplateErr {
return nil, discoveryTemplateErr
}
var templateResults bytes.Buffer
evalResultErr := discoveryTemplate.Execute(&templateResults, templateData)
return templateResults.Bytes(), evalResultErr
}
func safeAppendDependency(resource *gocf.Resource, dependencyName string) {
if nil == resource.DependsOn {
resource.DependsOn = []string{}
}
resource.DependsOn = append(resource.DependsOn, dependencyName)
}
func safeMetadataInsert(resource *gocf.Resource, key string, value interface{}) {
if nil == resource.Metadata {
resource.Metadata = make(map[string]interface{})
}
resource.Metadata[key] = value
}
func safeMergeTemplates(sourceTemplate *gocf.Template, destTemplate *gocf.Template, logger *logrus.Logger) error {
var mergeErrors []string
// Append the custom resources
for eachKey, eachLambdaResource := range sourceTemplate.Resources {
_, exists := destTemplate.Resources[eachKey]
if exists {
errorMsg := fmt.Sprintf("Duplicate CloudFormation resource name: %s", eachKey)
mergeErrors = append(mergeErrors, errorMsg)
} else {
destTemplate.Resources[eachKey] = eachLambdaResource
}
}
// Append the custom Mappings
for eachKey, eachMapping := range sourceTemplate.Mappings {
_, exists := destTemplate.Mappings[eachKey]
if exists {
errorMsg := fmt.Sprintf("Duplicate CloudFormation Mapping name: %s", eachKey)
mergeErrors = append(mergeErrors, errorMsg)
} else {
destTemplate.Mappings[eachKey] = eachMapping
}
}
// Append the custom outputs
for eachKey, eachLambdaOutput := range sourceTemplate.Outputs {
_, exists := destTemplate.Outputs[eachKey]
if exists {
errorMsg := fmt.Sprintf("Duplicate CloudFormation output key name: %s", eachKey)
mergeErrors = append(mergeErrors, errorMsg)
} else {
destTemplate.Outputs[eachKey] = eachLambdaOutput
}
}
if len(mergeErrors) > 0 {
logger.Error("Failed to update template. The following collisions were found:")
for _, eachError := range mergeErrors {
logger.Error("\t" + eachError)
}
return errors.New("Template merge failed")
}
return nil
}
|
package paypalsdk
import "fmt"
// GetOrder retrieves order by ID
// Endpoint: GET /v2/checkout/orders/ID
func (c *Client) GetOrder(orderID string) (*Order, error) {
order := &Order{}
req, err := c.NewRequest("GET", fmt.Sprintf("%s%s%s", c.APIBase, "/v2/checkout/orders/", orderID), nil)
if err != nil {
return order, err
}
if err = c.SendWithAuth(req, order); err != nil {
return order, err
}
return order, nil
}
// Create Order - Use this call to create an order
// Endpoint: POST /v2/checkout/orders
func (c *Client) CreateOrder(intent string, purchaseUnits []PurchaseUnitRequest, payer *PayerInfo, appContext *ApplicationContext) (*Order, error) {
type createOrderRequest struct {
Intent string `json:"intent"`
Payer *PayerInfo `json:"payer,omitempty"`
PurchaseUnits []PurchaseUnitRequest `json:"purchase_units"`
ApplicationContext *ApplicationContext `json:"application_context,omitempty"`
}
order := &Order{}
req, err := c.NewRequest("POST", "/v2/checkout/orders", createOrderRequest{Intent: intent, PurchaseUnits: purchaseUnits, Payer: payer, ApplicationContext: appContext})
if err = c.SendWithAuth(req, order); err != nil {
return order, err
}
return order, nil
}
// AuthorizeOrder - Use this call to authorize an order.
// Endpoint: POST /v2/checkout/orders/ID/authorize
func (c *Client) AuthorizeOrder(orderID string, amount *Amount) (*Authorization, error) {
type authRequest struct {
Amount *Amount `json:"amount"`
}
auth := &Authorization{}
req, err := c.NewRequest("POST", fmt.Sprintf("%s%s", c.APIBase, "/v2/checkout/orders/"+orderID+"/authorize"), authRequest{Amount: amount})
if err != nil {
return auth, err
}
if err = c.SendWithAuth(req, auth); err != nil {
return auth, err
}
return auth, nil
}
// CaptureOrder - Use this call to capture a payment on an order. To use this call, an original payment call must specify an intent of order.
// Endpoint: POST /v2/checkout/orders/ID/capture
func (c *Client) CaptureOrder(orderID string, amount *Amount, isFinalCapture bool, currency *Currency) (*Capture, error) {
type captureRequest struct {
Amount *Amount `json:"amount"`
IsFinalCapture bool `json:"is_final_capture"`
Currency *Currency `json:"transaction_fee"`
}
capture := &Capture{}
req, err := c.NewRequest("POST", fmt.Sprintf("%s%s", c.APIBase, "/v2/checkout/orders/"+orderID+"/capture"), captureRequest{Amount: amount, IsFinalCapture: isFinalCapture, Currency: currency})
if err != nil {
return capture, err
}
if err = c.SendWithAuth(req, capture); err != nil {
return capture, err
}
return capture, nil
}
[Tiny fix] Create Order
package paypalsdk
import "fmt"
// GetOrder retrieves order by ID
// Endpoint: GET /v2/checkout/orders/ID
func (c *Client) GetOrder(orderID string) (*Order, error) {
order := &Order{}
req, err := c.NewRequest("GET", fmt.Sprintf("%s%s%s", c.APIBase, "/v2/checkout/orders/", orderID), nil)
if err != nil {
return order, err
}
if err = c.SendWithAuth(req, order); err != nil {
return order, err
}
return order, nil
}
// Create Order - Use this call to create an order
// Endpoint: POST /v2/checkout/orders
func (c *Client) CreateOrder(intent string, purchaseUnits []PurchaseUnitRequest, payer *PayerInfo, appContext *ApplicationContext) (*Order, error) {
type createOrderRequest struct {
Intent string `json:"intent"`
Payer *PayerInfo `json:"payer,omitempty"`
PurchaseUnits []PurchaseUnitRequest `json:"purchase_units"`
ApplicationContext *ApplicationContext `json:"application_context,omitempty"`
}
order := &Order{}
req, err := c.NewRequest("POST", fmt.Sprintf("%s%s", c.APIBase, "/v2/checkout/orders"), createOrderRequest{Intent: intent, PurchaseUnits: purchaseUnits, Payer: payer, ApplicationContext: appContext})
if err = c.SendWithAuth(req, order); err != nil {
return order, err
}
return order, nil
}
// AuthorizeOrder - Use this call to authorize an order.
// Endpoint: POST /v2/checkout/orders/ID/authorize
func (c *Client) AuthorizeOrder(orderID string, amount *Amount) (*Authorization, error) {
type authRequest struct {
Amount *Amount `json:"amount"`
}
auth := &Authorization{}
req, err := c.NewRequest("POST", fmt.Sprintf("%s%s", c.APIBase, "/v2/checkout/orders/"+orderID+"/authorize"), authRequest{Amount: amount})
if err != nil {
return auth, err
}
if err = c.SendWithAuth(req, auth); err != nil {
return auth, err
}
return auth, nil
}
// CaptureOrder - Use this call to capture a payment on an order. To use this call, an original payment call must specify an intent of order.
// Endpoint: POST /v2/checkout/orders/ID/capture
func (c *Client) CaptureOrder(orderID string, amount *Amount, isFinalCapture bool, currency *Currency) (*Capture, error) {
type captureRequest struct {
Amount *Amount `json:"amount"`
IsFinalCapture bool `json:"is_final_capture"`
Currency *Currency `json:"transaction_fee"`
}
capture := &Capture{}
req, err := c.NewRequest("POST", fmt.Sprintf("%s%s", c.APIBase, "/v2/checkout/orders/"+orderID+"/capture"), captureRequest{Amount: amount, IsFinalCapture: isFinalCapture, Currency: currency})
if err != nil {
return capture, err
}
if err = c.SendWithAuth(req, capture); err != nil {
return capture, err
}
return capture, nil
}
|
// Copyright (c) 2015 Spinpunch, Inc. All Rights Reserved.
// See License.txt for license information.
package store
import (
"github.com/mattermost/platform/model"
"testing"
)
func TestTeamStoreSave(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
if err := (<-store.Team().Save(&o1)).Err; err != nil {
t.Fatal("couldn't save item", err)
}
if err := (<-store.Team().Save(&o1)).Err; err == nil {
t.Fatal("shouldn't be able to update from save")
}
o1.Id = ""
if err := (<-store.Team().Save(&o1)).Err; err == nil {
t.Fatal("should be unique domain")
}
}
func TestTeamStoreUpdate(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
if err := (<-store.Team().Save(&o1)).Err; err != nil {
t.Fatal(err)
}
if err := (<-store.Team().Update(&o1)).Err; err != nil {
t.Fatal(err)
}
o1.Id = "missing"
if err := (<-store.Team().Update(&o1)).Err; err == nil {
t.Fatal("Update should have failed because of missing key")
}
o1.Id = model.NewId()
if err := (<-store.Team().Update(&o1)).Err; err == nil {
t.Fatal("Update should have faile because id change")
}
}
func TestTeamStoreUpdateName(t *testing.T) {
Setup()
o1 := &model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
o1 = (<-store.Team().Save(o1)).Data.(*model.Team)
newName := "NewName"
if err := (<-store.Team().UpdateName(newName, o1.Id)).Err; err != nil {
t.Fatal(err)
}
ro1 := (<-store.Team().Get(o1.Id)).Data.(*model.Team)
if ro1.Name != newName {
t.Fatal("Name not updated")
}
}
func TestTeamStoreGet(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
<-store.Team().Save(&o1)
if r1 := <-store.Team().Get(o1.Id); r1.Err != nil {
t.Fatal(r1.Err)
} else {
if r1.Data.(*model.Team).ToJson() != o1.ToJson() {
t.Fatal("invalid returned team")
}
}
if err := (<-store.Team().Get("")).Err; err == nil {
t.Fatal("Missing id should have failed")
}
}
func TestTeamStoreGetByDomain(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
if err := (<-store.Team().Save(&o1)).Err; err != nil {
t.Fatal(err)
}
if r1 := <-store.Team().GetByDomain(o1.Domain); r1.Err != nil {
t.Fatal(r1.Err)
} else {
if r1.Data.(*model.Team).ToJson() != o1.ToJson() {
t.Fatal("invalid returned team")
}
}
if err := (<-store.Team().GetByDomain("")).Err; err == nil {
t.Fatal("Missing id should have failed")
}
}
func TestTeamStoreGetForEmail(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
<-store.Team().Save(&o1)
u1 := model.User{}
u1.TeamId = o1.Id
u1.Email = model.NewId()
<-store.User().Save(&u1)
if r1 := <-store.Team().GetTeamsForEmail(u1.Email); r1.Err != nil {
t.Fatal(r1.Err)
} else {
teams := r1.Data.([]*model.Team)
if teams[0].Id != o1.Id {
t.Fatal("failed to lookup by email")
}
}
if r1 := <-store.Team().GetTeamsForEmail("missing"); r1.Err != nil {
t.Fatal(r1.Err)
}
}
fixing unit test
// Copyright (c) 2015 Spinpunch, Inc. All Rights Reserved.
// See License.txt for license information.
package store
import (
"github.com/mattermost/platform/model"
"testing"
"time"
)
func TestTeamStoreSave(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
if err := (<-store.Team().Save(&o1)).Err; err != nil {
t.Fatal("couldn't save item", err)
}
time.Sleep(100 * time.Millisecond)
if err := (<-store.Team().Save(&o1)).Err; err == nil {
t.Fatal("shouldn't be able to update from save")
}
o1.Id = ""
if err := (<-store.Team().Save(&o1)).Err; err == nil {
t.Fatal("should be unique domain")
}
}
func TestTeamStoreUpdate(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
if err := (<-store.Team().Save(&o1)).Err; err != nil {
t.Fatal(err)
}
if err := (<-store.Team().Update(&o1)).Err; err != nil {
t.Fatal(err)
}
o1.Id = "missing"
if err := (<-store.Team().Update(&o1)).Err; err == nil {
t.Fatal("Update should have failed because of missing key")
}
o1.Id = model.NewId()
if err := (<-store.Team().Update(&o1)).Err; err == nil {
t.Fatal("Update should have faile because id change")
}
}
func TestTeamStoreUpdateName(t *testing.T) {
Setup()
o1 := &model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
o1 = (<-store.Team().Save(o1)).Data.(*model.Team)
newName := "NewName"
if err := (<-store.Team().UpdateName(newName, o1.Id)).Err; err != nil {
t.Fatal(err)
}
ro1 := (<-store.Team().Get(o1.Id)).Data.(*model.Team)
if ro1.Name != newName {
t.Fatal("Name not updated")
}
}
func TestTeamStoreGet(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
<-store.Team().Save(&o1)
if r1 := <-store.Team().Get(o1.Id); r1.Err != nil {
t.Fatal(r1.Err)
} else {
if r1.Data.(*model.Team).ToJson() != o1.ToJson() {
t.Fatal("invalid returned team")
}
}
if err := (<-store.Team().Get("")).Err; err == nil {
t.Fatal("Missing id should have failed")
}
}
func TestTeamStoreGetByDomain(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
if err := (<-store.Team().Save(&o1)).Err; err != nil {
t.Fatal(err)
}
if r1 := <-store.Team().GetByDomain(o1.Domain); r1.Err != nil {
t.Fatal(r1.Err)
} else {
if r1.Data.(*model.Team).ToJson() != o1.ToJson() {
t.Fatal("invalid returned team")
}
}
if err := (<-store.Team().GetByDomain("")).Err; err == nil {
t.Fatal("Missing id should have failed")
}
}
func TestTeamStoreGetForEmail(t *testing.T) {
Setup()
o1 := model.Team{}
o1.Name = "Name"
o1.Domain = "a" + model.NewId() + "b"
o1.Email = model.NewId() + "@nowhere.com"
o1.Type = model.TEAM_OPEN
<-store.Team().Save(&o1)
u1 := model.User{}
u1.TeamId = o1.Id
u1.Email = model.NewId()
<-store.User().Save(&u1)
if r1 := <-store.Team().GetTeamsForEmail(u1.Email); r1.Err != nil {
t.Fatal(r1.Err)
} else {
teams := r1.Data.([]*model.Team)
if teams[0].Id != o1.Id {
t.Fatal("failed to lookup by email")
}
}
if r1 := <-store.Team().GetTeamsForEmail("missing"); r1.Err != nil {
t.Fatal(r1.Err)
}
}
|
package oleutil
import "ole"
import "os"
func CreateDispatch(progId string) (dispatch *ole.IDispatch, err os.Error) {
var clsid *ole.GUID
clsid, err = ole.CLSIDFromProgID(progId)
if err != nil {
return
}
var unknown *ole.IUnknown
unknown, err = ole.CreateInstance(clsid)
if err != nil {
return
}
dispatch, err = unknown.QueryInterface(ole.IID_IDispatch)
if err != nil {
return
}
return
}
func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err os.Error) {
var dispid []int32
dispid, err = disp.GetIDsOfName([]string{name})
result, err = disp.Invoke(dispid[0], ole.DISPATCH_METHOD, params...)
return
}
func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err os.Error) {
var dispid []int32
dispid, err = disp.GetIDsOfName([]string{name})
result, err = disp.Invoke(dispid[0], ole.DISPATCH_PROPERTYGET, params...)
return
}
func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err os.Error) {
var dispid []int32
dispid, err = disp.GetIDsOfName([]string{name})
result, err = disp.Invoke(dispid[0], ole.DISPATCH_PROPERTYPUT, params...)
return
}
Get IDispatch directly.
package oleutil
import "ole"
import "os"
import "unsafe"
func CreateDispatch(progId string) (dispatch *ole.IDispatch, err os.Error) {
var clsid *ole.GUID
clsid, err = ole.CLSIDFromProgID(progId)
if err != nil {
clsid, err = ole.CLSIDFromString(progId)
if err != nil {
return
}
}
var unknown *ole.IUnknown
unknown, err = ole.CreateInstance(clsid, ole.IID_IDispatch)
if err != nil {
return
}
dispatch = (*ole.IDispatch)(unsafe.Pointer(unknown))
return
}
func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err os.Error) {
var dispid []int32
dispid, err = disp.GetIDsOfName([]string{name})
if err != nil {
return
}
result, err = disp.Invoke(dispid[0], ole.DISPATCH_METHOD, params...)
return
}
func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err os.Error) {
var dispid []int32
dispid, err = disp.GetIDsOfName([]string{name})
if err != nil {
return
}
result, err = disp.Invoke(dispid[0], ole.DISPATCH_PROPERTYGET, params...)
return
}
func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err os.Error) {
var dispid []int32
dispid, err = disp.GetIDsOfName([]string{name})
if err != nil {
return
}
result, err = disp.Invoke(dispid[0], ole.DISPATCH_PROPERTYPUT, params...)
return
}
|
package openvpn
import (
"bufio"
"fmt"
"net"
"os"
"os/exec"
"os/user"
"path"
"regexp"
"strconv"
"syscall"
"github.com/subgraph/oz"
)
func StartOpenVPN(c *oz.Config, conf string, ip *net.IP, table, dev, auth, runtoken string) (cmd *exec.Cmd, err error) {
confFile := path.Join(c.OpenVPNConfDir, conf)
cmdArgs, err := parseOpenVPNConf(c, confFile, ip, table, dev, auth, runtoken)
if err != nil {
fmt.Fprintln(os.Stderr, "Error %v\n", err)
return nil, err
}
runcmd := exec.Command("/usr/sbin/openvpn", cmdArgs...)
runcmd.Stdin = os.Stdin
runcmd.Stderr = os.Stderr
ovpngroup, err := user.LookupGroup(c.OpenVPNGroup)
if err != nil {
fmt.Fprintln(os.Stderr, "[FATAL] OpenVPN group: %v", err)
return nil, err
}
ovpngid, err := strconv.Atoi(ovpngroup.Gid)
if err != nil {
fmt.Fprintln(os.Stderr, "[FATAL] OpenVPN group: %v", err)
return nil, err
}
runcmd.SysProcAttr = &syscall.SysProcAttr{}
runcmd.SysProcAttr.Credential = &syscall.Credential{
Gid: uint32(ovpngid),
}
err = runcmd.Start()
if err != nil {
fmt.Fprintf(os.Stderr, "[FATAL] Error (exec): %v %s\n", err, cmdArgs[0])
os.Exit(1)
}
return runcmd, nil
}
func parseOpenVPNConf(c *oz.Config, filename string, ip *net.IP, table, dev, auth, runtoken string) (cmdargs []string, err error) {
var cmd []string
var certpath, capath, keypath, tlsauthpath string
pidfilepath := path.Join(c.OpenVPNRunPath, runtoken+".pid")
file, err := os.Open(filename)
if err != nil {
return []string{}, err
}
defer file.Close()
r := regexp.MustCompile("[^\\s]+")
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
cmd = append(cmd, "--client")
for scanner.Scan() {
x := r.FindAllString(scanner.Text(), -1)
if len(x) == 0 {
continue
}
switch x[0] {
/* TODO: Need to review all OpenVPN client params and filter here */
case "auth-user-pass":
cmd = append(cmd, []string{"--auth-nocache", "--auth-user-pass", path.Join(c.OpenVPNConfDir, auth)}...)
continue
case "persist-tun":
continue
case "auth-nocache":
continue
case "iproute":
continue
case "route-up":
continue
case "config":
continue
case "route-pre-down":
continue
case "down":
continue
case "script-security":
continue
case "ipchange":
continue
case "up":
continue
case "cd":
continue
case "chroot":
continue
case "setenv":
continue
case "setenv-safe":
continue
case "group":
continue
case "user":
continue
case "daemon":
continue
case "syslog":
continue
case "log":
continue
case "log-append":
continue
case "echo":
continue
case "status":
continue
case "mode":
continue
case "client":
continue
case "server":
continue
case "management":
continue
case "plugin":
continue
case "ifconfig":
continue
case "ca":
if len(x) == 2 {
cmd = append(cmd, []string{"--" + x[0], path.Join(c.OpenVPNConfDir, x[1])}...)
}
continue
case "writepid":
continue
case "crl-verify":
if len(x) == 2 {
cmd = append(cmd, []string{"--" + x[0], path.Join(c.OpenVPNConfDir, x[1])}...)
}
continue
case "<cert>":
certpath = path.Join(c.OpenVPNRunPath, runtoken+"-cert.cert")
f, err := os.Create(certpath)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing cert to file: %v", err)
return cmd, err
}
defer f.Close()
for scanner.Scan() {
if scanner.Text() == "</cert>" {
f.Sync()
break
}
_, err := f.WriteString(scanner.Text() + "\n")
if err != nil {
fmt.Fprintf(os.Stderr, "error writing cert contents to file: %v", err)
return cmd, err
}
}
cmd = append(cmd, []string{"--cert", certpath}...)
continue
case "<ca>":
capath = path.Join(c.OpenVPNRunPath, runtoken+"-ca.cert")
f, err := os.Create(capath)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing cert to file: %v", err)
return cmd, err
}
defer f.Close()
for scanner.Scan() {
if scanner.Text() == "</ca>" {
f.Sync()
break
}
_, err := f.WriteString(scanner.Text() + "\n")
if err != nil {
fmt.Fprintf(os.Stderr, "error writing cert contents to file: %v", err)
return cmd, err
}
}
cmd = append(cmd, []string{"--ca", capath}...)
continue
case "<key>":
keypath = path.Join(c.OpenVPNRunPath, runtoken+"-key.key")
f, err := os.Create(keypath)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing key to file: %v", err)
return cmd, err
}
defer f.Close()
for scanner.Scan() {
if scanner.Text() == "</key>" {
f.Sync()
break
}
_, err := f.WriteString(scanner.Text() + "\n")
if err != nil {
fmt.Fprintf(os.Stderr, "error writing key contents to file: %v", err)
return cmd, err
}
}
cmd = append(cmd, []string{"--key", keypath}...)
continue
case "<tls-auth>":
tlsauthpath = path.Join(c.OpenVPNRunPath, runtoken+"-tls-auth.key")
f, err := os.Create(tlsauthpath)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing tls-auth to file: %v", err)
return cmd, err
}
defer f.Close()
for scanner.Scan() {
if scanner.Text() == "</tls-auth>" {
f.Sync()
break
}
_, err := f.WriteString(scanner.Text() + "\n")
if err != nil {
fmt.Fprintf(os.Stderr, "error writing contents to file: %v", err)
return cmd, err
}
}
cmd = append(cmd, []string{"--tls-auth", tlsauthpath}...)
continue
default:
}
if len(x) == 1 {
cmd = append(cmd, "--"+x[0])
} else {
cmd = append(cmd, "--"+x[0])
for _, t := range x[1:] {
cmd = append(cmd, t)
}
}
}
extra := []string{"--writepid", pidfilepath,"--ping","10","--ping-restart","60","--daemon", "--auth-retry", "nointeract", "--route-noexec", "--route-up", "/usr/bin/oz-ovpn-route-up", "--route-pre-down", "/usr/bin/oz-ovpn-route-down", "--script-security", "2", "--setenv", "bridge_addr", ip.String(), "--setenv", "routing_table", table, "--setenv", "bridge_dev", dev}
cmd = append(cmd, extra...)
for _, x := range cmd {
fmt.Fprintf(os.Stderr, "%s", x)
fmt.Fprintf(os.Stderr, " ")
}
return cmd, nil
}
Update ovpn config parsing to strip comments
package openvpn
import (
"bufio"
"fmt"
"net"
"os"
"os/exec"
"os/user"
"path"
"regexp"
"strconv"
"syscall"
"github.com/subgraph/oz"
)
func StartOpenVPN(c *oz.Config, conf string, ip *net.IP, table, dev, auth, runtoken string) (cmd *exec.Cmd, err error) {
confFile := path.Join(c.OpenVPNConfDir, conf)
cmdArgs, err := parseOpenVPNConf(c, confFile, ip, table, dev, auth, runtoken)
if err != nil {
fmt.Fprintln(os.Stderr, "Error %v\n", err)
return nil, err
}
runcmd := exec.Command("/usr/sbin/openvpn", cmdArgs...)
runcmd.Stdin = os.Stdin
runcmd.Stderr = os.Stderr
ovpngroup, err := user.LookupGroup(c.OpenVPNGroup)
if err != nil {
fmt.Fprintln(os.Stderr, "[FATAL] OpenVPN group: %v", err)
return nil, err
}
ovpngid, err := strconv.Atoi(ovpngroup.Gid)
if err != nil {
fmt.Fprintln(os.Stderr, "[FATAL] OpenVPN group: %v", err)
return nil, err
}
runcmd.SysProcAttr = &syscall.SysProcAttr{}
runcmd.SysProcAttr.Credential = &syscall.Credential{
Gid: uint32(ovpngid),
}
err = runcmd.Start()
if err != nil {
fmt.Fprintf(os.Stderr, "[FATAL] Error (exec): %v %s\n", err, cmdArgs[0])
os.Exit(1)
}
return runcmd, nil
}
func parseOpenVPNConf(c *oz.Config, filename string, ip *net.IP, table, dev, auth, runtoken string) (cmdargs []string, err error) {
var cmd []string
var certpath, capath, keypath, tlsauthpath string
pidfilepath := path.Join(c.OpenVPNRunPath, runtoken+".pid")
file, err := os.Open(filename)
if err != nil {
return []string{}, err
}
defer file.Close()
r := regexp.MustCompile("[^\\s]+")
commentstrip := regexp.MustCompile("^(.*)(#.*)$")
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
cmd = append(cmd, "--client")
for scanner.Scan() {
x := r.FindAllString(scanner.Text(), -1)
var y []string
for _, v := range x {
new := commentstrip.ReplaceAllString(v, `$1`)
if new != "" {
y = append(y, new)
}
if v != new {
break
}
}
x = y
if len(x) == 0 {
continue
}
switch x[0] {
/* TODO: Need to review all OpenVPN client params and filter here */
case "auth-user-pass":
cmd = append(cmd, []string{"--auth-nocache", "--auth-user-pass", path.Join(c.OpenVPNConfDir, auth)}...)
continue
case "persist-tun":
continue
case "auth-nocache":
continue
case "iproute":
continue
case "route-up":
continue
case "config":
continue
case "route-pre-down":
continue
case "down":
continue
case "script-security":
continue
case "ipchange":
continue
case "up":
continue
case "cd":
continue
case "chroot":
continue
case "setenv":
continue
case "setenv-safe":
continue
case "group":
continue
case "user":
continue
case "daemon":
continue
case "syslog":
continue
case "log":
continue
case "log-append":
continue
case "echo":
continue
case "status":
continue
case "mode":
continue
case "client":
continue
case "server":
continue
case "management":
continue
case "plugin":
continue
case "ifconfig":
continue
case "ca":
if len(x) == 2 {
cmd = append(cmd, []string{"--" + x[0], path.Join(c.OpenVPNConfDir, x[1])}...)
}
continue
case "writepid":
continue
case "crl-verify":
if len(x) == 2 {
cmd = append(cmd, []string{"--" + x[0], path.Join(c.OpenVPNConfDir, x[1])}...)
}
continue
case "<cert>":
certpath = path.Join(c.OpenVPNRunPath, runtoken+"-cert.cert")
f, err := os.Create(certpath)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing cert to file: %v", err)
return cmd, err
}
defer f.Close()
for scanner.Scan() {
if scanner.Text() == "</cert>" {
f.Sync()
break
}
_, err := f.WriteString(scanner.Text() + "\n")
if err != nil {
fmt.Fprintf(os.Stderr, "error writing cert contents to file: %v", err)
return cmd, err
}
}
cmd = append(cmd, []string{"--cert", certpath}...)
continue
case "<ca>":
capath = path.Join(c.OpenVPNRunPath, runtoken+"-ca.cert")
f, err := os.Create(capath)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing cert to file: %v", err)
return cmd, err
}
defer f.Close()
for scanner.Scan() {
if scanner.Text() == "</ca>" {
f.Sync()
break
}
_, err := f.WriteString(scanner.Text() + "\n")
if err != nil {
fmt.Fprintf(os.Stderr, "error writing cert contents to file: %v", err)
return cmd, err
}
}
cmd = append(cmd, []string{"--ca", capath}...)
continue
case "<key>":
keypath = path.Join(c.OpenVPNRunPath, runtoken+"-key.key")
f, err := os.Create(keypath)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing key to file: %v", err)
return cmd, err
}
defer f.Close()
for scanner.Scan() {
if scanner.Text() == "</key>" {
f.Sync()
break
}
_, err := f.WriteString(scanner.Text() + "\n")
if err != nil {
fmt.Fprintf(os.Stderr, "error writing key contents to file: %v", err)
return cmd, err
}
}
cmd = append(cmd, []string{"--key", keypath}...)
continue
case "<tls-auth>":
tlsauthpath = path.Join(c.OpenVPNRunPath, runtoken+"-tls-auth.key")
f, err := os.Create(tlsauthpath)
if err != nil {
fmt.Fprintf(os.Stderr, "error writing tls-auth to file: %v", err)
return cmd, err
}
defer f.Close()
for scanner.Scan() {
if scanner.Text() == "</tls-auth>" {
f.Sync()
break
}
_, err := f.WriteString(scanner.Text() + "\n")
if err != nil {
fmt.Fprintf(os.Stderr, "error writing contents to file: %v", err)
return cmd, err
}
}
cmd = append(cmd, []string{"--tls-auth", tlsauthpath}...)
continue
default:
}
if len(x) == 1 {
cmd = append(cmd, "--"+x[0])
} else {
cmd = append(cmd, "--"+x[0])
for _, t := range x[1:] {
cmd = append(cmd, t)
}
}
}
extra := []string{"--writepid", pidfilepath, "--ping", "10", "--ping-restart", "60", "--daemon", "--auth-retry", "nointeract", "--route-noexec", "--route-up", "/usr/bin/oz-ovpn-route-up", "--route-pre-down", "/usr/bin/oz-ovpn-route-down", "--script-security", "2", "--setenv", "bridge_addr", ip.String(), "--setenv", "routing_table", table, "--setenv", "bridge_dev", dev}
cmd = append(cmd, extra...)
for _, x := range cmd {
fmt.Fprintf(os.Stderr, "%s", x)
fmt.Fprintf(os.Stderr, " ")
}
return cmd, nil
}
|
// Package panos interacts with Palo Alto and Panorama devices using the XML API.
package panos
import (
"crypto/tls"
"encoding/xml"
"errors"
"fmt"
"io/ioutil"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/parnurzeal/gorequest"
)
// PaloAlto is a container for our session state. It also holds information about the device
// that is gathered upon a successful connection to it.
type PaloAlto struct {
Host string
Key string
URI string
Platform string
Model string
Serial string
SoftwareVersion string
DeviceType string
Panorama bool
Shared bool
IPAddress string
Netmask string
DefaultGateway string
MACAddress string
Time string
Uptime string
GPClientPackageVersion string
GPDatafileVersion string
GPDatafileReleaseDate string
GPClientlessVPNVersion string
GPClientlessVPNReleaseDate string
AppVersion string
AppReleaseDate string
AntiVirusVersion string
AntiVirusReleaseDate string
ThreatVersion string
ThreatReleaseDate string
WildfireVersion string
WildfireReleaseDate string
URLDB string
URLFilteringVersion string
LogDBVersion string
MultiVsys string
OperationalMode string
}
// AuthMethod defines how we want to authenticate to the device. If using a
// username and password to authenticate, the Credentials field must contain the username and password
//, respectively (e.g. []string{"admin", "password"}). If you are using the API key for
// authentication, provide the entire key for the APIKey field.
type AuthMethod struct {
Credentials []string
APIKey string
}
// Jobs holds information about all jobs on the device.
type Jobs struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Jobs []Job `xml:"result>job"`
}
// Job holds information about each individual job.
type Job struct {
ID int `xml:"id"`
User string `xml:"user"`
Type string `xml:"type"`
Status string `xml:"status"`
Queued string `xml:"queued"`
Stoppable string `xml:"stoppable"`
Result string `xml:"result"`
Description string `xml:"description,omitempty"`
QueuePosition int `xml:"positionInQ"`
Progress string `xml:"progress"`
Details []string `xml:"details>line"`
Warnings string `xml:"warnings,omitempty"`
StartTime string `xml:"tdeq"`
EndTime string `xml:"tfin"`
}
// Logs holds all of the log data retrieved from querying the system.
type Logs struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
StartTime string `xml:"result>job>tdeq"`
EndTime string `xml:"result>job>tfin"`
JobStatus string `xml:"result>job>status"`
JobID int `xml:"result>job>id"`
Logs []Log `xml:"result>log>logs>entry"`
}
// Log holds information about each individual log retrieved for the following log-types:
//
// config, system, traffic, threat, wildfire, url, data
//
// Certain fields are omitted or populated based on the log type that is specified when querying the system. See https://goo.gl/PPLjVZ for the
// fields assigned for the different log types.
type Log struct {
DeviceName string `xml:"device_name,omitempty"`
Serial string `xml:"serial,omitempty"`
Rule string `xml:"rule,omitempty"`
TimeGenerated string `xml:"time_generated,omitempty"`
TimeReceived string `xml:"time_received,omitempty"`
Type string `xml:"type,omitempty"`
Subtype string `xml:"subtype,omitempty"`
From string `xml:"from,omitempty"`
To string `xml:"to,omitempty"`
Source string `xml:"src,omitempty"`
SourceUser string `xml:"srcuser,omitempty"`
SourcePort int `xml:"sport,omitempty"`
SourceCountry string `xml:"srcloc,omitempty"`
Destination string `xml:"dst,omitempty"`
DestinationPort int `xml:"dport,omitempty"`
DestinationCountry string `xml:"dstloc,omitempty"`
Application string `xml:"app,omitempty"`
Action string `xml:"action,omitempty"`
NATSourceIP string `xml:"natsrc,omitempty"`
NATSourcePort int `xml:"natsport,omitempty"`
NATDestinationIP string `xml:"natdst,omitempty"`
NATDestinationPort int `xml:"natdport,omitempty"`
Packets int `xml:"packets,omitempty"`
PacketsSent int `xml:"pkts_sent,omitempty"`
PacketsReceived int `xml:"pkts_received,omitempty"`
Bytes int `xml:"bytes,omitempty"`
BytesSent int `xml:"bytes_sent,omitempty"`
BytesReceived int `xml:"bytes_received,omitempty"`
SessionID int `xml:"sessionid,omitempty"`
SessionEndReason string `xml:"session_end_reason,omitempty"`
RepeatCount int `xml:"repeatcnt,omitempty"`
Start string `xml:"start,omitempty"`
Elapsed string `xml:"elapsed,omitempty"`
Category string `xml:"category,omitempty"`
ThreatCategory string `xml:"thr_category,omitempty"`
ThreatName string `xml:"threatid,omitempty"`
ThreatID int `xml:"tid,omitempty"`
Misc string `xml:"misc,omitempty"`
Severity string `xml:"severity,omitempty"`
Direction string `xml:"direction,omitempty"`
InboundInterface string `xml:"inbound_if,omitempty"`
OutboundInterface string `xml:"outbound_if,omitempty"`
ID int `xml:"logid,attr"`
Domain int `xml:"domain,omitempty"`
ReceiveTime string `xml:"receive_time,omitempty"`
SequenceNumber string `xml:"seqno,omitempty"`
ActionFlags string `xml:"actionflags,omitempty"`
ConfigVersion int `xml:"config_ver,omitempty"`
Vsys string `xml:"vsys,omitempty"`
Logset string `xml:"logset,omitempty"`
Flags string `xml:"flags,omitempty"`
Pcap string `xml:"flag-pcap,omitempty"`
PcapID int `xml:"pcap_id,omitempty"`
Flagged string `xml:"flag-flagged,omitempty"`
Proxy string `xml:"flag-proxy,omitempty"`
URLDenied string `xml:"flag-url-denied,omitempty"`
NAT string `xml:"flag-nat,omitempty"`
CaptivePortal string `xml:"captive-portal"`
NonStandardDestinationPort string `xml:"non-std-dport"`
Transaction string `xml:"transaction,omitempty"`
PBFClient2Server string `xml:"pbf-c2s,omitempty"`
PBFServer2Client string `xml:"pbf-s2c,omitempty"`
TemporaryMatch string `xml:"temporary-match,omitempty"`
SymmetricReturn string `xml:"sym-return,omitempty"`
SSLDecryptMirror string `xml:"decrypt-mirror,omitempty"`
CredentialDetected string `xml:"credential-detected,omitempty"`
MPTCP string `xml:"flag-mptcp-set,omitempty"`
TunnelInspected string `xml:"flag-tunnel-inspected,omitempty"`
ReconExcluded string `xml:"flag-recon-excluded,omitempty"`
Protocol string `xml:"proto,omitempty"`
TunnelType string `xml:"tunnel,omitempty"`
TPadding int `xml:"tpadding,omitempty"`
CPadding int `xml:"cpadding,omitempty"`
TunnelIMSI int `xml:"tunnelid_imsi,omitempty"`
VsysID int `xml:"vsys_id,omitempty"`
ParentSessionID int `xml:"parent_session_id,omitempty"`
ReportID int `xml:"reportid,omitempty"`
URLIndex int `xml:"url_idx,omitempty"`
HTTPMethod string `xml:"http_method,omitempty"`
XForwardedFor string `xml:"xff,omitempty"`
Referer string `xml:"referer,omitempty"`
UserAgent string `xml:"user_agent,omitempty"`
SignatureFlags string `xml:"sig_flags,omitempty"`
ContentVersion string `xml:"contentver,omitempty"`
FileDigest string `xml:"filedigest,omitempty"`
Filetype string `xml:"filetype,omitempty"`
Sender string `xml:"sender,omitempty"`
Recipient string `xml:"recipient,omitempty"`
Subject string `xml:"subject,omitempty"`
Cloud string `xml:"cloud,omitempty"`
Padding int `xml:"padding,omitempty"`
ActionSource string `xml:"action_source,omitempty"`
TunnelID int `xml:"tunnelid,omitempty"`
IMSI string `xml:"imsi,omitempty"`
MonitorTag string `xml:"monitortag,omitempty"`
IMEI string `xml:"imei,omitempty"`
DeviceGroupHierarchy1 int `xml:"dg_hier_level_1,omitempty"`
DeviceGroupHierarchy2 int `xml:"dg_hier_level_2,omitempty"`
DeviceGroupHierarchy3 int `xml:"dg_hier_level_3,omitempty"`
DeviceGroupHierarchy4 int `xml:"dg_hier_level_4,omitempty"`
Host string `xml:"host,omitempty"`
Command string `xml:"cmd,omitempty"`
Admin string `xml:"admin,omitempty"`
Client string `xml:"client,omitempty"`
Result string `xml:"result,omitempty"`
Path string `xml:"path,omitempty"`
BeforeChangePreview string `xml:"before-change-preview,omitempty"`
AfterChangePreview string `xml:"after-change-preview,omitempty"`
FullPath string `xml:"full-path,omitempty"`
EventID string `xml:"eventid,omitempty"`
Module string `xml:"module,omitempty"`
Description string `xml:"opaque,omitempty"`
}
// LogParameters specifies additional parameters that can be used when retrieving logs. These are all optional.
type LogParameters struct {
// Query specifies the match criteria for the logs. This is similar to the query provided in the web interface under the Monitor
// tab when viewing the logs. The query must be URL encoded.
Query string
// NLogs specifies the number of logs to retrieve. The default is 20 when the parameter is not specified. The maximum is 5000.
NLogs int
// Skip specifies the number of logs to skip when doing a log retrieval. The default is 0. This is useful when retrieving
// logs in batches where you can skip the previously retrieved logs.
Skip int
// Direction specifies whether logs are shown oldest first (forward) or newest first (backward). Default is backward.
Direction string
// Action is not used at the moment. Log data sizes can be large so the API uses an asynchronous job scheduling approach to retrieve
// log data. The initial query returns a Job ID (job-id) that you can then use for future queries with the action parameter: action=get
// will check status of an active job or retrieve the log data when the status is FIN (finished).
Action string
}
// RoutingTable contains all of the routes in the devices routing table.
type RoutingTable struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Flags string `xml:"result>flags"`
Routes []Route `xml:"result>entry"`
}
// Route contains information about each individual route in the devices routing table.
type Route struct {
VirtualRouter string `xml:"virtual-router"`
Destination string `xml:"destination"`
NextHop string `xml:"nexthop"`
Metric int `xml:"metric"`
Flags string `xml:"flags"`
Age int64 `xml:"age"`
Interface string `xml:"interface"`
}
// authKey holds our API key.
type authKey struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Key string `xml:"result>key"`
}
// systemInfo holds basic system information.
type systemInfo struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Platform string `xml:"result>system>platform-family"`
Model string `xml:"result>system>model"`
Serial string `xml:"result>system>serial"`
SoftwareVersion string `xml:"result>system>sw-version"`
IPAddress string `xml:"result>system>ip-address"`
Netmask string `xml:"result>system>netmask"`
DefaultGateway string `xml:"result>system>default-gateway"`
MACAddress string `xml:"result>system>mac-address"`
Time string `xml:"result>system>time"`
Uptime string `xml:"result>system>uptime"`
GPClientPackageVersion string `xml:"result>system>global-protect-client-package-version"`
GPDatafileVersion string `xml:"result>system>global-protect-datafile-version"`
GPDatafileReleaseDate string `xml:"result>system>global-protect-datafile-release-date"`
GPClientlessVPNVersion string `xml:"result>system>global-protect-clientless-vpn-version"`
GPClientlessVPNReleaseDate string `xml:"result>system>global-protect-clientless-vpn-release-date"`
AppVersion string `xml:"result>system>app-version"`
AppReleaseDate string `xml:"result>system>app-release-date"`
AntiVirusVersion string `xml:"result>system>av-version"`
AntiVirusReleaseDate string `xml:"result>system>av-release-date"`
ThreatVersion string `xml:"result>system>threat-version"`
ThreatReleaseDate string `xml:"result>system>threat-release-date"`
WildfireVersion string `xml:"result>system>wildfire-version"`
WildfireReleaseDate string `xml:"result>system>wildfire-release-date"`
URLDB string `xml:"result>system>url-db"`
URLFilteringVersion string `xml:"result>system>url-filtering-version"`
LogDBVersion string `xml:"result>system>logdb-version"`
MultiVsys string `xml:"result>system>multi-vsys"`
OperationalMode string `xml:"result>system>operational-mode"`
}
// commandOutput holds the results of our operational mode commands that were issued.
type commandOutput struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Data string `xml:"result"`
}
// requestError contains information about any error we get from a request.
type requestError struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Message string `xml:"result>msg,omitempty"`
}
// testURL contains the results of the operational command test url.
type testURL struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Result string `xml:"result"`
}
// logID contains the job ID when querying the device for log retrieval.
type logID struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
ID int `xml:"result>job"`
}
// routeLookupResults contains the results of testing a route lookup.
type routeLookupResults struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
NextHop string `xml:"result>nh"`
Source string `xml:"result>src"`
IP string `xml:"result>ip"`
Metric int `xml:"result>metric"`
Interface string `xml:"result>interface"`
DataPlane string `xml:"result>dp"`
}
// RouteLookup contains the results of the operational command: test routing fib-lookup <ip> <virtual-router>.
type RouteLookup struct {
NextHop string
Source string
IP string
Metric int
Interface string
DataPlane string
}
// SessionTable holds information about every session on the device.
type SessionTable struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Sessions []Session `xml:"result>entry"`
}
// Session holds information about each individual session on the device.
type Session struct {
Application string `xml:"application"`
IngressInterface string `xml:"ingress"`
EgressInterface string `xml:"egress"`
VsysID int `xml:"vsys-idx"`
NATSourceAddress string `xml:"xsource"`
SourceNAT string `xml:"srcnat"`
SourcePort int `xml:"sport"`
SecurityRule string `xml:"security-rule"`
From string `xml:"from"`
DestinationAddress string `xml:"dst"`
To string `xml:"to"`
State string `xml:"state"`
NATDestinationAddress string `xml:"xdst"`
NAT string `xml:"nat"`
Type string `xml:"type"`
StartTime string `xml:"start-time"`
Proxy string `xml:"proxy"`
DecryptMirror string `xml:"decrypt-mirror"`
ID int `xml:"idx"`
TotalByteCount int `xml:"total-byte-count"`
DestinationNAT string `xml:"dstnat"`
Vsys string `xml:"vsys"`
NATSourcePort int `xml:"xsport"`
NATDestinationPort int `xml:"xdport"`
Flags string `xml:"flags,omitempty"`
SourceAddress string `xml:"source"`
DestinationPort int `xml:"dport"`
}
var (
r = gorequest.New().TLSClientConfig(&tls.Config{InsecureSkipVerify: true})
errorCodes = map[string]string{
"400": "Bad request - Returned when a required parameter is missing, an illegal parameter value is used",
"403": "Forbidden - Returned for authentication or authorization errors including invalid key, insufficient admin access rights",
"1": "Unknown command - The specific config or operational command is not recognized",
"2": "Internal error - Check with technical support when seeing these errors",
"3": "Internal error - Check with technical support when seeing these errors",
"4": "Internal error - Check with technical support when seeing these errors",
"5": "Internal error - Check with technical support when seeing these errors",
"6": "Bad Xpath - The xpath specified in one or more attributes of the command is invalid. Check the API browser for proper xpath values",
"7": "Object not present - Object specified by the xpath is not present. For example, entry[@name=’value’] where no object with name ‘value’ is present",
"8": "Object not unique - For commands that operate on a single object, the specified object is not unique",
"9": "Internal error - Check with technical support when seeing these errors",
"10": "Reference count not zero - Object cannot be deleted as there are other objects that refer to it. For example, address object still in use in policy",
"11": "Internal error - Check with technical support when seeing these errors",
"12": "Invalid object - Xpath or element values provided are not complete",
"13": "Operation failed - A descriptive error message is returned in the response",
"14": "Operation not possible - Operation is not possible. For example, moving a rule up one position when it is already at the top",
"15": "Operation denied - For example, Admin not allowed to delete own account, Running a command that is not allowed on a passive device",
"16": "Unauthorized - The API role does not have access rights to run this query",
"17": "Invalid command - Invalid command or parameters",
"18": "Malformed command - The XML is malformed",
"19": "Success - Command completed successfully",
"20": "Success - Command completed successfully",
"21": "Internal error - Check with technical support when seeing these errors",
"22": "Session timed out - The session for this query timed out",
}
)
// splitSWVersion
func splitSWVersion(version string) []int {
re := regexp.MustCompile(`(\d+)\.(\d+)\.(\d+)`)
match := re.FindStringSubmatch(version)
maj, _ := strconv.Atoi(match[1])
min, _ := strconv.Atoi(match[2])
rel, _ := strconv.Atoi(match[3])
return []int{maj, min, rel}
}
// NewSession sets up our connection to the Palo Alto firewall or Panorama device. The authmethod parameter
// is used to define two ways of authenticating to the device. One is via username/password, the other is with
// the API key if you already have generated it. Please see the documentation for the AuthMethod struct for further
// details.
func NewSession(host string, authmethod *AuthMethod) (*PaloAlto, error) {
var keygen authKey
var key string
var info systemInfo
var pan commandOutput
status := false
deviceType := "panos"
if len(authmethod.Credentials) > 0 {
_, body, errs := r.Get(fmt.Sprintf("https://%s/api/?type=keygen&user=%s&password=%s", host, authmethod.Credentials[0], authmethod.Credentials[1])).End()
if errs != nil {
return nil, errs[0]
}
err := xml.Unmarshal([]byte(body), &keygen)
if err != nil {
return nil, err
}
if keygen.Status != "success" {
return nil, fmt.Errorf("error code %s: %s (keygen)", keygen.Code, errorCodes[keygen.Code])
}
key = keygen.Key
}
if len(authmethod.APIKey) > 0 {
key = authmethod.APIKey
}
uri := fmt.Sprintf("https://%s/api/?", host)
_, getInfo, errs := r.Get(fmt.Sprintf("%s&key=%s&type=op&cmd=<show><system><info></info></system></show>", uri, key)).End()
if errs != nil {
return nil, errs[0]
}
err := xml.Unmarshal([]byte(getInfo), &info)
if err != nil {
return nil, err
}
if info.Status != "success" {
return nil, fmt.Errorf("error code %s: %s (show system info)", info.Code, errorCodes[info.Code])
}
_, panStatus, errs := r.Get(fmt.Sprintf("%s&key=%s&type=op&cmd=<show><panorama-status></panorama-status></show>", uri, key)).End()
if errs != nil {
return nil, errs[0]
}
err = xml.Unmarshal([]byte(panStatus), &pan)
if err != nil {
return nil, err
}
if info.Platform == "m" || info.Model == "Panorama" {
deviceType = "panorama"
}
if strings.Contains(pan.Data, ": yes") {
status = true
}
return &PaloAlto{
Host: host,
Key: key,
URI: fmt.Sprintf("https://%s/api/?", host),
Platform: info.Platform,
Model: info.Model,
Serial: info.Serial,
SoftwareVersion: info.SoftwareVersion,
DeviceType: deviceType,
Panorama: status,
Shared: false,
IPAddress: info.IPAddress,
Netmask: info.Netmask,
DefaultGateway: info.DefaultGateway,
MACAddress: info.MACAddress,
Time: strings.Trim(info.Time, "[\r\n]"),
Uptime: info.Uptime,
GPClientPackageVersion: info.GPClientPackageVersion,
GPDatafileVersion: info.GPDatafileVersion,
GPDatafileReleaseDate: info.GPDatafileReleaseDate,
GPClientlessVPNVersion: info.GPClientlessVPNVersion,
GPClientlessVPNReleaseDate: info.GPClientlessVPNReleaseDate,
AppVersion: info.AppVersion,
AppReleaseDate: info.AppReleaseDate,
AntiVirusVersion: info.AntiVirusVersion,
AntiVirusReleaseDate: info.AntiVirusReleaseDate,
ThreatVersion: info.ThreatVersion,
ThreatReleaseDate: info.ThreatReleaseDate,
WildfireVersion: info.WildfireVersion,
WildfireReleaseDate: info.WildfireReleaseDate,
URLDB: info.URLDB,
URLFilteringVersion: info.URLFilteringVersion,
LogDBVersion: info.LogDBVersion,
MultiVsys: info.MultiVsys,
OperationalMode: info.OperationalMode,
}, nil
}
// Commit issues a commit on the device. When issuing a commit against a Panorama device,
// the configuration will only be committed to Panorama, and not an individual device-group.
func (p *PaloAlto) Commit() error {
var reqError requestError
_, resp, errs := r.Get(p.URI).Query(fmt.Sprintf("type=commit&cmd=<commit></commit>&key=%s", p.Key)).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// CommitAll issues a commit to a Panorama device, for the given devicegroup. If you wish to push to specific
// firewalls within the specified device group only, add each firewalls serial number as an additional parameter,
// (e.g. CommitAll("Some-DeviceGroup", "000000000001", "000000000002")).
func (p *PaloAlto) CommitAll(devicegroup string, devices ...string) error {
var reqError requestError
var cmd string
if p.DeviceType == "panorama" && len(devices) <= 0 {
cmd = fmt.Sprintf("<commit-all><shared-policy><device-group><entry name=\"%s\"/></device-group></shared-policy></commit-all>", devicegroup)
}
if p.DeviceType == "panorama" && len(devices) > 0 {
cmd = fmt.Sprintf("<commit-all><shared-policy><device-group><entry name=\"%s\"/>><devices>", devicegroup)
for _, d := range devices {
cmd += fmt.Sprintf("<entry name=\"%s\"/>", d)
}
cmd += "</devices></entry></device-group></shared-policy></commit-all>"
}
_, resp, errs := r.Get(p.URI).Query(fmt.Sprintf("type=commit&action=all&cmd=%s&key=%s", cmd, p.Key)).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// RestartSystem will issue a system restart to the device.
func (p *PaloAlto) RestartSystem() error {
var reqError requestError
command := "<request><restart><system></system></restart></request>"
_, resp, errs := r.Post(p.URI).Query(fmt.Sprintf("type=op&cmd=%s&key=%s", command, p.Key)).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// TestURL will verify what category the given URL falls under. It will return two results in a string slice ([]string). The
// first one is from the Base db categorization, and the second is from the Cloud db categorization. If you specify a URL
// with a wildcard, such as *.paloaltonetworks.com, it will not return a result.
func (p *PaloAlto) TestURL(url string) ([]string, error) {
var urlResults testURL
rex := regexp.MustCompile(`(?m)^([\d\.a-zA-Z-]+)\s([\w-]+)\s.*seconds\s([\d\.a-zA-Z-]+)\s([\w-]+)\s`)
command := fmt.Sprintf("<test><url>%s</url></test>", url)
if p.DeviceType == "panorama" {
return nil, errors.New("you can only test URL's from a local device")
}
_, resp, errs := r.Post(p.URI).Query(fmt.Sprintf("type=op&cmd=%s&key=%s", command, p.Key)).End()
if errs != nil {
return nil, errs[0]
}
if err := xml.Unmarshal([]byte(resp), &urlResults); err != nil {
return nil, err
}
if urlResults.Status != "success" {
return nil, fmt.Errorf("error code %s: %s", urlResults.Code, errorCodes[urlResults.Code])
}
categorization := rex.FindStringSubmatch(urlResults.Result)
if len(categorization) == 0 {
return nil, fmt.Errorf("cannot resolve the site %s", url)
}
results := []string{
fmt.Sprintf("%s", categorization[2]),
fmt.Sprintf("%s", categorization[4]),
}
return results, nil
}
// TestRouteLookup will lookup the given destination IP in the virtual-router "vr" and return the results.
func (p *PaloAlto) TestRouteLookup(vr, destination string) (*RouteLookup, error) {
var testRouteLookup routeLookupResults
command := fmt.Sprintf("<test><routing><fib-lookup><virtual-router>%s</virtual-router><ip>%s</ip></fib-lookup></routing></test>", vr, destination)
if p.DeviceType == "panorama" {
return nil, errors.New("you can only test route lookups from a local device")
}
_, resp, errs := r.Post(p.URI).Query(fmt.Sprintf("type=op&cmd=%s&key=%s", command, p.Key)).End()
if errs != nil {
return nil, errs[0]
}
if err := xml.Unmarshal([]byte(resp), &testRouteLookup); err != nil {
return nil, err
}
if testRouteLookup.Status != "success" {
return nil, fmt.Errorf("error code %s: %s", testRouteLookup.Code, errorCodes[testRouteLookup.Code])
}
return &RouteLookup{
NextHop: testRouteLookup.NextHop,
Source: testRouteLookup.Source,
IP: testRouteLookup.IP,
Metric: testRouteLookup.Metric,
Interface: testRouteLookup.Interface,
DataPlane: testRouteLookup.DataPlane,
}, nil
}
// Jobs returns information about every job on the device. Status can be one of: all, pending, or processed. If you want
// information about a specific job, specify the job ID instead of one of the other options.
func (p *PaloAlto) Jobs(status interface{}) (*Jobs, error) {
var jobs Jobs
var cmd string
switch status.(type) {
case string:
if status == "all" {
cmd += "<show><jobs><all></all></jobs></show>"
}
if status == "pending" {
cmd += "<show><jobs><pending></pending></jobs></show>"
}
if status == "processed" {
cmd += "<show><jobs><processed></processed></jobs></show>"
}
case int:
cmd += fmt.Sprintf("<show><jobs><id>%d</id></jobs></show>", status)
}
_, res, errs := r.Get(fmt.Sprintf("%s&key=%s&type=op&cmd=%s", p.URI, p.Key, cmd)).End()
if errs != nil {
return nil, errs[0]
}
err := xml.Unmarshal([]byte(res), &jobs)
if err != nil {
return nil, err
}
return &jobs, nil
}
// QueryLogs allows you to pull logs from the system, given a specific log-type. Currently, the
// supported log types are as follows:
//
// config, system, traffic, threat, wildfire, url, data
//
// The LogParameters struct lists optional parameters you can use in your query. See the documentation for a full
// description of options. If you do not wish to use any of the optional parameters, just specify nil. The job ID is
// returned from the query, and should be passed to RetrieveLogs().
func (p *PaloAlto) QueryLogs(logtype string, parameters *LogParameters) (int, error) {
var id logID
req := fmt.Sprintf("%s&key=%s&type=log&log-type=%s", p.URI, p.Key, logtype)
if parameters != nil {
if parameters.Query != "" {
req += fmt.Sprintf("&query=%s", url.QueryEscape(parameters.Query))
}
if parameters.NLogs > 0 {
req += fmt.Sprintf("&nlogs=%d", parameters.NLogs)
}
if parameters.Direction != "" {
req += fmt.Sprintf("&dir=%s", parameters.Direction)
}
if parameters.Skip > 0 {
req += fmt.Sprintf("&skip=%d", parameters.Skip)
}
}
_, res, errs := r.Get(req).End()
if errs != nil {
return 0, errs[0]
}
err := xml.Unmarshal([]byte(res), &id)
if err != nil {
return 0, err
}
return id.ID, nil
}
// RetrieveLogs will return the log data as specified in the QueryLogs() function, given the job ID. If the job
// status is not FIN, then you will have to query the job ID until it has finished and then it will return the
// results.
func (p *PaloAlto) RetrieveLogs(id int) (*Logs, error) {
var logs Logs
_, res, errs := r.Get(fmt.Sprintf("%s&key=%s&type=log&action=get&job-id=%d", p.URI, p.Key, id)).End()
if errs != nil {
return nil, errs[0]
}
err := xml.Unmarshal([]byte(res), &logs)
if err != nil {
return nil, err
}
return &logs, nil
}
// XpathConfig allows you to configure the device using an Xpath expression for the given xpath parameter.
// The element parameter can either be an XML file or an XML string when configuring the device. The action parameter can be one of:
//
// set, edit, rename, override or delete
//
// Set actions add, update, or merge configuration nodes, while edit actions replace configuration nodes
// - use the edit action with caution! If you are renaming an object, specify the new name for the
// object in the element parameter. If you are deleting a part of the configuration, you do not need
// the element parameter. For all other actions you will need to provide it.
//
// See https://goo.gl/G1vzJT for details regarding all of the actions available.
func (p *PaloAlto) XpathConfig(action, xpath string, element ...string) error {
var reqError requestError
var query string
switch action {
case "set", "edit", "override":
if len(element) <= 0 {
return errors.New("you must specify the element parameter")
}
if strings.Contains(element[0], ".xml") {
c, err := ioutil.ReadFile(element[0])
if err != nil {
return err
}
xmlcontents := string(c)
query = fmt.Sprintf("type=config&action=%s&xpath=%s&element=%s&key=%s", action, xpath, xmlcontents, p.Key)
} else {
query = fmt.Sprintf("type=config&action=%s&xpath=%s&element=%s&key=%s", action, xpath, element[0], p.Key)
}
case "rename":
if len(element) <= 0 {
return errors.New("you must specify the element parameter when renaming an object")
}
query = fmt.Sprintf("type=config&action=%s&xpath=%s&newname=%s&key=%s", action, xpath, element[0], p.Key)
case "delete":
query = fmt.Sprintf("type=config&action=%s&xpath=%s&key=%s", action, xpath, p.Key)
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// XpathClone allows you to clone an existing part of the devices configuration. Use the xpath parameter
// to specify the location of the object to be cloned. Use the from parameter to specify the source object,
// and the newname parameter to provide a name for the cloned object.
//
// See https://goo.gl/ZfmBB6 for details.
func (p *PaloAlto) XpathClone(xpath, from, newname string) error {
var reqError requestError
query := fmt.Sprintf("type=config&action=clone&xpath=%s&from=%s&newname=%s&key=%s", xpath, from, newname, p.Key)
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// XpathMove allows you to move the location of an existing configuration object. Use the xpath parameter to specify
// the location of the object to be moved, and the where parameter to specify type of move. You can optionally use the
// destination parameter to specify the destination path.
//
// See https://goo.gl/LbkQDG for details.
func (p *PaloAlto) XpathMove(xpath, where string, destination ...string) error {
var reqError requestError
var query string
query = fmt.Sprintf("type=config&action=move&xpath=%s&where=%s&key=%s", xpath, where, p.Key)
if len(destination) > 0 {
query = fmt.Sprintf("type=config&action=move&xpath=%s&where=%s&dst=%s&key=%s", xpath, where, destination[0], p.Key)
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// XpathMulti allows you to move and clone multiple objects across device groups and virtual systems. The element parameter
// can be either an XML file or XML string. The action parameter must be one of: clone or move.
// The xpath parameter is for the destination where the addresses will be moved to. The element parameter must
// include in the XML the xpath for the source and the list of objects within the specified source.
//
// See https://goo.gl/oeufnu for details.
func (p *PaloAlto) XpathMulti(action, xpath, element string) error {
var reqError requestError
var query string
if strings.Contains(element, ".xml") {
c, err := ioutil.ReadFile(element)
if err != nil {
return err
}
xmlcontents := string(c)
query = fmt.Sprintf("type=config&action=multi-%s&xpath=%s&element=%s&key=%s", action, xpath, xmlcontents, p.Key)
} else {
query = fmt.Sprintf("type=config&action=multi-%s&xpath=%s&element=%s&key=%s", action, xpath, element, p.Key)
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// XpathGetConfig allows you to view the active or candidate configuration at the location specified in the
// xpath parameter.
func (p *PaloAlto) XpathGetConfig(configtype, xpath string) (string, error) {
var reqError requestError
var query string
switch configtype {
case "active":
query = fmt.Sprintf("type=config&action=show&xpath=%s&key=%s", xpath, p.Key)
case "candidate":
query = fmt.Sprintf("type=config&action=get&xpath=%s&key=%s", xpath, p.Key)
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return "", errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return "", err
}
if reqError.Status != "success" {
return "", fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return resp, nil
}
// Command lets you run any operational mode command against the given device, and it returns the output. You
// must use the XML-formatted version of the command string as if you were calling the API yourself,
// (e.g. "<show><running><ippool></ippool></running></show>")
func (p *PaloAlto) Command(command string) (string, error) {
var output commandOutput
_, res, errs := r.Get(fmt.Sprintf("%s&key=%s&type=op&cmd=%s", p.URI, p.Key, command)).End()
if errs != nil {
return "", errs[0]
}
err := xml.Unmarshal([]byte(res), &output)
if err != nil {
return "", err
}
return output.Data, nil
}
// Routes will retrieve information about each route in the devices routing table(s). You can (optionally) specify
// a specific virtual router to retrieve routes from.
func (p *PaloAlto) Routes(vr ...string) (*RoutingTable, error) {
var rt RoutingTable
query := fmt.Sprintf("%s&key=%s&type=op&cmd=<show><routing><route></route></routing></show>", p.URI, p.Key)
if p.DeviceType != "panos" {
return nil, errors.New("you can only retrieve the routing table on a local firewall")
}
if len(vr) > 0 {
query = fmt.Sprintf("%s&key=%s&type=op&cmd=<show><routing><route><virtual-router>%s</virtual-router></route></routing></show>", p.URI, p.Key, vr[0])
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return nil, errs[0]
}
if err := xml.Unmarshal([]byte(resp), &rt); err != nil {
return nil, err
}
if rt.Status != "success" {
return nil, fmt.Errorf("error code %s: %s", rt.Code, errorCodes[rt.Code])
}
return &rt, nil
}
// Sessions will retrieve information about each session within the session table on a firewall.
func (p *PaloAlto) Sessions() (*SessionTable, error) {
var st SessionTable
query := fmt.Sprintf("%s&key=%s&type=op&cmd=<show><session><all></all></session></show>", p.URI, p.Key)
if p.DeviceType != "panos" {
return nil, errors.New("you can only retrieve the session table on a local firewall")
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return nil, errs[0]
}
if err := xml.Unmarshal([]byte(resp), &st); err != nil {
return nil, err
}
if st.Status != "success" {
return nil, fmt.Errorf("error code %s: %s", st.Code, errorCodes[st.Code])
}
return &st, nil
}
Added protocol field
// Package panos interacts with Palo Alto and Panorama devices using the XML API.
package panos
import (
"crypto/tls"
"encoding/xml"
"errors"
"fmt"
"io/ioutil"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/parnurzeal/gorequest"
)
// PaloAlto is a container for our session state. It also holds information about the device
// that is gathered upon a successful connection to it.
type PaloAlto struct {
Host string
Key string
URI string
Platform string
Model string
Serial string
SoftwareVersion string
DeviceType string
Panorama bool
Shared bool
IPAddress string
Netmask string
DefaultGateway string
MACAddress string
Time string
Uptime string
GPClientPackageVersion string
GPDatafileVersion string
GPDatafileReleaseDate string
GPClientlessVPNVersion string
GPClientlessVPNReleaseDate string
AppVersion string
AppReleaseDate string
AntiVirusVersion string
AntiVirusReleaseDate string
ThreatVersion string
ThreatReleaseDate string
WildfireVersion string
WildfireReleaseDate string
URLDB string
URLFilteringVersion string
LogDBVersion string
MultiVsys string
OperationalMode string
}
// AuthMethod defines how we want to authenticate to the device. If using a
// username and password to authenticate, the Credentials field must contain the username and password
//, respectively (e.g. []string{"admin", "password"}). If you are using the API key for
// authentication, provide the entire key for the APIKey field.
type AuthMethod struct {
Credentials []string
APIKey string
}
// Jobs holds information about all jobs on the device.
type Jobs struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Jobs []Job `xml:"result>job"`
}
// Job holds information about each individual job.
type Job struct {
ID int `xml:"id"`
User string `xml:"user"`
Type string `xml:"type"`
Status string `xml:"status"`
Queued string `xml:"queued"`
Stoppable string `xml:"stoppable"`
Result string `xml:"result"`
Description string `xml:"description,omitempty"`
QueuePosition int `xml:"positionInQ"`
Progress string `xml:"progress"`
Details []string `xml:"details>line"`
Warnings string `xml:"warnings,omitempty"`
StartTime string `xml:"tdeq"`
EndTime string `xml:"tfin"`
}
// Logs holds all of the log data retrieved from querying the system.
type Logs struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
StartTime string `xml:"result>job>tdeq"`
EndTime string `xml:"result>job>tfin"`
JobStatus string `xml:"result>job>status"`
JobID int `xml:"result>job>id"`
Logs []Log `xml:"result>log>logs>entry"`
}
// Log holds information about each individual log retrieved for the following log-types:
//
// config, system, traffic, threat, wildfire, url, data
//
// Certain fields are omitted or populated based on the log type that is specified when querying the system. See https://goo.gl/PPLjVZ for the
// fields assigned for the different log types.
type Log struct {
DeviceName string `xml:"device_name,omitempty"`
Serial string `xml:"serial,omitempty"`
Rule string `xml:"rule,omitempty"`
TimeGenerated string `xml:"time_generated,omitempty"`
TimeReceived string `xml:"time_received,omitempty"`
Type string `xml:"type,omitempty"`
Subtype string `xml:"subtype,omitempty"`
From string `xml:"from,omitempty"`
To string `xml:"to,omitempty"`
Source string `xml:"src,omitempty"`
SourceUser string `xml:"srcuser,omitempty"`
SourcePort int `xml:"sport,omitempty"`
SourceCountry string `xml:"srcloc,omitempty"`
Destination string `xml:"dst,omitempty"`
DestinationPort int `xml:"dport,omitempty"`
DestinationCountry string `xml:"dstloc,omitempty"`
Application string `xml:"app,omitempty"`
Action string `xml:"action,omitempty"`
NATSourceIP string `xml:"natsrc,omitempty"`
NATSourcePort int `xml:"natsport,omitempty"`
NATDestinationIP string `xml:"natdst,omitempty"`
NATDestinationPort int `xml:"natdport,omitempty"`
Packets int `xml:"packets,omitempty"`
PacketsSent int `xml:"pkts_sent,omitempty"`
PacketsReceived int `xml:"pkts_received,omitempty"`
Bytes int `xml:"bytes,omitempty"`
BytesSent int `xml:"bytes_sent,omitempty"`
BytesReceived int `xml:"bytes_received,omitempty"`
SessionID int `xml:"sessionid,omitempty"`
SessionEndReason string `xml:"session_end_reason,omitempty"`
RepeatCount int `xml:"repeatcnt,omitempty"`
Start string `xml:"start,omitempty"`
Elapsed string `xml:"elapsed,omitempty"`
Category string `xml:"category,omitempty"`
ThreatCategory string `xml:"thr_category,omitempty"`
ThreatName string `xml:"threatid,omitempty"`
ThreatID int `xml:"tid,omitempty"`
Misc string `xml:"misc,omitempty"`
Severity string `xml:"severity,omitempty"`
Direction string `xml:"direction,omitempty"`
InboundInterface string `xml:"inbound_if,omitempty"`
OutboundInterface string `xml:"outbound_if,omitempty"`
ID int `xml:"logid,attr"`
Domain int `xml:"domain,omitempty"`
ReceiveTime string `xml:"receive_time,omitempty"`
SequenceNumber string `xml:"seqno,omitempty"`
ActionFlags string `xml:"actionflags,omitempty"`
ConfigVersion int `xml:"config_ver,omitempty"`
Vsys string `xml:"vsys,omitempty"`
Logset string `xml:"logset,omitempty"`
Flags string `xml:"flags,omitempty"`
Pcap string `xml:"flag-pcap,omitempty"`
PcapID int `xml:"pcap_id,omitempty"`
Flagged string `xml:"flag-flagged,omitempty"`
Proxy string `xml:"flag-proxy,omitempty"`
URLDenied string `xml:"flag-url-denied,omitempty"`
NAT string `xml:"flag-nat,omitempty"`
CaptivePortal string `xml:"captive-portal"`
NonStandardDestinationPort string `xml:"non-std-dport"`
Transaction string `xml:"transaction,omitempty"`
PBFClient2Server string `xml:"pbf-c2s,omitempty"`
PBFServer2Client string `xml:"pbf-s2c,omitempty"`
TemporaryMatch string `xml:"temporary-match,omitempty"`
SymmetricReturn string `xml:"sym-return,omitempty"`
SSLDecryptMirror string `xml:"decrypt-mirror,omitempty"`
CredentialDetected string `xml:"credential-detected,omitempty"`
MPTCP string `xml:"flag-mptcp-set,omitempty"`
TunnelInspected string `xml:"flag-tunnel-inspected,omitempty"`
ReconExcluded string `xml:"flag-recon-excluded,omitempty"`
Protocol string `xml:"proto,omitempty"`
TunnelType string `xml:"tunnel,omitempty"`
TPadding int `xml:"tpadding,omitempty"`
CPadding int `xml:"cpadding,omitempty"`
TunnelIMSI int `xml:"tunnelid_imsi,omitempty"`
VsysID int `xml:"vsys_id,omitempty"`
ParentSessionID int `xml:"parent_session_id,omitempty"`
ReportID int `xml:"reportid,omitempty"`
URLIndex int `xml:"url_idx,omitempty"`
HTTPMethod string `xml:"http_method,omitempty"`
XForwardedFor string `xml:"xff,omitempty"`
Referer string `xml:"referer,omitempty"`
UserAgent string `xml:"user_agent,omitempty"`
SignatureFlags string `xml:"sig_flags,omitempty"`
ContentVersion string `xml:"contentver,omitempty"`
FileDigest string `xml:"filedigest,omitempty"`
Filetype string `xml:"filetype,omitempty"`
Sender string `xml:"sender,omitempty"`
Recipient string `xml:"recipient,omitempty"`
Subject string `xml:"subject,omitempty"`
Cloud string `xml:"cloud,omitempty"`
Padding int `xml:"padding,omitempty"`
ActionSource string `xml:"action_source,omitempty"`
TunnelID int `xml:"tunnelid,omitempty"`
IMSI string `xml:"imsi,omitempty"`
MonitorTag string `xml:"monitortag,omitempty"`
IMEI string `xml:"imei,omitempty"`
DeviceGroupHierarchy1 int `xml:"dg_hier_level_1,omitempty"`
DeviceGroupHierarchy2 int `xml:"dg_hier_level_2,omitempty"`
DeviceGroupHierarchy3 int `xml:"dg_hier_level_3,omitempty"`
DeviceGroupHierarchy4 int `xml:"dg_hier_level_4,omitempty"`
Host string `xml:"host,omitempty"`
Command string `xml:"cmd,omitempty"`
Admin string `xml:"admin,omitempty"`
Client string `xml:"client,omitempty"`
Result string `xml:"result,omitempty"`
Path string `xml:"path,omitempty"`
BeforeChangePreview string `xml:"before-change-preview,omitempty"`
AfterChangePreview string `xml:"after-change-preview,omitempty"`
FullPath string `xml:"full-path,omitempty"`
EventID string `xml:"eventid,omitempty"`
Module string `xml:"module,omitempty"`
Description string `xml:"opaque,omitempty"`
}
// LogParameters specifies additional parameters that can be used when retrieving logs. These are all optional.
type LogParameters struct {
// Query specifies the match criteria for the logs. This is similar to the query provided in the web interface under the Monitor
// tab when viewing the logs. The query must be URL encoded.
Query string
// NLogs specifies the number of logs to retrieve. The default is 20 when the parameter is not specified. The maximum is 5000.
NLogs int
// Skip specifies the number of logs to skip when doing a log retrieval. The default is 0. This is useful when retrieving
// logs in batches where you can skip the previously retrieved logs.
Skip int
// Direction specifies whether logs are shown oldest first (forward) or newest first (backward). Default is backward.
Direction string
// Action is not used at the moment. Log data sizes can be large so the API uses an asynchronous job scheduling approach to retrieve
// log data. The initial query returns a Job ID (job-id) that you can then use for future queries with the action parameter: action=get
// will check status of an active job or retrieve the log data when the status is FIN (finished).
Action string
}
// RoutingTable contains all of the routes in the devices routing table.
type RoutingTable struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Flags string `xml:"result>flags"`
Routes []Route `xml:"result>entry"`
}
// Route contains information about each individual route in the devices routing table.
type Route struct {
VirtualRouter string `xml:"virtual-router"`
Destination string `xml:"destination"`
NextHop string `xml:"nexthop"`
Metric int `xml:"metric"`
Flags string `xml:"flags"`
Age int64 `xml:"age"`
Interface string `xml:"interface"`
}
// authKey holds our API key.
type authKey struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Key string `xml:"result>key"`
}
// systemInfo holds basic system information.
type systemInfo struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Platform string `xml:"result>system>platform-family"`
Model string `xml:"result>system>model"`
Serial string `xml:"result>system>serial"`
SoftwareVersion string `xml:"result>system>sw-version"`
IPAddress string `xml:"result>system>ip-address"`
Netmask string `xml:"result>system>netmask"`
DefaultGateway string `xml:"result>system>default-gateway"`
MACAddress string `xml:"result>system>mac-address"`
Time string `xml:"result>system>time"`
Uptime string `xml:"result>system>uptime"`
GPClientPackageVersion string `xml:"result>system>global-protect-client-package-version"`
GPDatafileVersion string `xml:"result>system>global-protect-datafile-version"`
GPDatafileReleaseDate string `xml:"result>system>global-protect-datafile-release-date"`
GPClientlessVPNVersion string `xml:"result>system>global-protect-clientless-vpn-version"`
GPClientlessVPNReleaseDate string `xml:"result>system>global-protect-clientless-vpn-release-date"`
AppVersion string `xml:"result>system>app-version"`
AppReleaseDate string `xml:"result>system>app-release-date"`
AntiVirusVersion string `xml:"result>system>av-version"`
AntiVirusReleaseDate string `xml:"result>system>av-release-date"`
ThreatVersion string `xml:"result>system>threat-version"`
ThreatReleaseDate string `xml:"result>system>threat-release-date"`
WildfireVersion string `xml:"result>system>wildfire-version"`
WildfireReleaseDate string `xml:"result>system>wildfire-release-date"`
URLDB string `xml:"result>system>url-db"`
URLFilteringVersion string `xml:"result>system>url-filtering-version"`
LogDBVersion string `xml:"result>system>logdb-version"`
MultiVsys string `xml:"result>system>multi-vsys"`
OperationalMode string `xml:"result>system>operational-mode"`
}
// commandOutput holds the results of our operational mode commands that were issued.
type commandOutput struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Data string `xml:"result"`
}
// requestError contains information about any error we get from a request.
type requestError struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Message string `xml:"result>msg,omitempty"`
}
// testURL contains the results of the operational command test url.
type testURL struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Result string `xml:"result"`
}
// logID contains the job ID when querying the device for log retrieval.
type logID struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
ID int `xml:"result>job"`
}
// routeLookupResults contains the results of testing a route lookup.
type routeLookupResults struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
NextHop string `xml:"result>nh"`
Source string `xml:"result>src"`
IP string `xml:"result>ip"`
Metric int `xml:"result>metric"`
Interface string `xml:"result>interface"`
DataPlane string `xml:"result>dp"`
}
// RouteLookup contains the results of the operational command: test routing fib-lookup <ip> <virtual-router>.
type RouteLookup struct {
NextHop string
Source string
IP string
Metric int
Interface string
DataPlane string
}
// SessionTable holds information about every session on the device.
type SessionTable struct {
XMLName xml.Name `xml:"response"`
Status string `xml:"status,attr"`
Code string `xml:"code,attr"`
Sessions []Session `xml:"result>entry"`
}
// Session holds information about each individual session on the device.
type Session struct {
Application string `xml:"application"`
IngressInterface string `xml:"ingress"`
EgressInterface string `xml:"egress"`
VsysID int `xml:"vsys-idx"`
NATSourceAddress string `xml:"xsource"`
SourceNAT string `xml:"srcnat"`
SourcePort int `xml:"sport"`
SecurityRule string `xml:"security-rule"`
From string `xml:"from"`
Protocol int `xml:"proto"`
DestinationAddress string `xml:"dst"`
To string `xml:"to"`
State string `xml:"state"`
NATDestinationAddress string `xml:"xdst"`
NAT string `xml:"nat"`
Type string `xml:"type"`
StartTime string `xml:"start-time"`
Proxy string `xml:"proxy"`
DecryptMirror string `xml:"decrypt-mirror"`
ID int `xml:"idx"`
TotalByteCount int `xml:"total-byte-count"`
DestinationNAT string `xml:"dstnat"`
Vsys string `xml:"vsys"`
NATSourcePort int `xml:"xsport"`
NATDestinationPort int `xml:"xdport"`
Flags string `xml:"flags,omitempty"`
SourceAddress string `xml:"source"`
DestinationPort int `xml:"dport"`
}
var (
r = gorequest.New().TLSClientConfig(&tls.Config{InsecureSkipVerify: true})
errorCodes = map[string]string{
"400": "Bad request - Returned when a required parameter is missing, an illegal parameter value is used",
"403": "Forbidden - Returned for authentication or authorization errors including invalid key, insufficient admin access rights",
"1": "Unknown command - The specific config or operational command is not recognized",
"2": "Internal error - Check with technical support when seeing these errors",
"3": "Internal error - Check with technical support when seeing these errors",
"4": "Internal error - Check with technical support when seeing these errors",
"5": "Internal error - Check with technical support when seeing these errors",
"6": "Bad Xpath - The xpath specified in one or more attributes of the command is invalid. Check the API browser for proper xpath values",
"7": "Object not present - Object specified by the xpath is not present. For example, entry[@name=’value’] where no object with name ‘value’ is present",
"8": "Object not unique - For commands that operate on a single object, the specified object is not unique",
"9": "Internal error - Check with technical support when seeing these errors",
"10": "Reference count not zero - Object cannot be deleted as there are other objects that refer to it. For example, address object still in use in policy",
"11": "Internal error - Check with technical support when seeing these errors",
"12": "Invalid object - Xpath or element values provided are not complete",
"13": "Operation failed - A descriptive error message is returned in the response",
"14": "Operation not possible - Operation is not possible. For example, moving a rule up one position when it is already at the top",
"15": "Operation denied - For example, Admin not allowed to delete own account, Running a command that is not allowed on a passive device",
"16": "Unauthorized - The API role does not have access rights to run this query",
"17": "Invalid command - Invalid command or parameters",
"18": "Malformed command - The XML is malformed",
"19": "Success - Command completed successfully",
"20": "Success - Command completed successfully",
"21": "Internal error - Check with technical support when seeing these errors",
"22": "Session timed out - The session for this query timed out",
}
)
// splitSWVersion
func splitSWVersion(version string) []int {
re := regexp.MustCompile(`(\d+)\.(\d+)\.(\d+)`)
match := re.FindStringSubmatch(version)
maj, _ := strconv.Atoi(match[1])
min, _ := strconv.Atoi(match[2])
rel, _ := strconv.Atoi(match[3])
return []int{maj, min, rel}
}
// NewSession sets up our connection to the Palo Alto firewall or Panorama device. The authmethod parameter
// is used to define two ways of authenticating to the device. One is via username/password, the other is with
// the API key if you already have generated it. Please see the documentation for the AuthMethod struct for further
// details.
func NewSession(host string, authmethod *AuthMethod) (*PaloAlto, error) {
var keygen authKey
var key string
var info systemInfo
var pan commandOutput
status := false
deviceType := "panos"
if len(authmethod.Credentials) > 0 {
_, body, errs := r.Get(fmt.Sprintf("https://%s/api/?type=keygen&user=%s&password=%s", host, authmethod.Credentials[0], authmethod.Credentials[1])).End()
if errs != nil {
return nil, errs[0]
}
err := xml.Unmarshal([]byte(body), &keygen)
if err != nil {
return nil, err
}
if keygen.Status != "success" {
return nil, fmt.Errorf("error code %s: %s (keygen)", keygen.Code, errorCodes[keygen.Code])
}
key = keygen.Key
}
if len(authmethod.APIKey) > 0 {
key = authmethod.APIKey
}
uri := fmt.Sprintf("https://%s/api/?", host)
_, getInfo, errs := r.Get(fmt.Sprintf("%s&key=%s&type=op&cmd=<show><system><info></info></system></show>", uri, key)).End()
if errs != nil {
return nil, errs[0]
}
err := xml.Unmarshal([]byte(getInfo), &info)
if err != nil {
return nil, err
}
if info.Status != "success" {
return nil, fmt.Errorf("error code %s: %s (show system info)", info.Code, errorCodes[info.Code])
}
_, panStatus, errs := r.Get(fmt.Sprintf("%s&key=%s&type=op&cmd=<show><panorama-status></panorama-status></show>", uri, key)).End()
if errs != nil {
return nil, errs[0]
}
err = xml.Unmarshal([]byte(panStatus), &pan)
if err != nil {
return nil, err
}
if info.Platform == "m" || info.Model == "Panorama" {
deviceType = "panorama"
}
if strings.Contains(pan.Data, ": yes") {
status = true
}
return &PaloAlto{
Host: host,
Key: key,
URI: fmt.Sprintf("https://%s/api/?", host),
Platform: info.Platform,
Model: info.Model,
Serial: info.Serial,
SoftwareVersion: info.SoftwareVersion,
DeviceType: deviceType,
Panorama: status,
Shared: false,
IPAddress: info.IPAddress,
Netmask: info.Netmask,
DefaultGateway: info.DefaultGateway,
MACAddress: info.MACAddress,
Time: strings.Trim(info.Time, "[\r\n]"),
Uptime: info.Uptime,
GPClientPackageVersion: info.GPClientPackageVersion,
GPDatafileVersion: info.GPDatafileVersion,
GPDatafileReleaseDate: info.GPDatafileReleaseDate,
GPClientlessVPNVersion: info.GPClientlessVPNVersion,
GPClientlessVPNReleaseDate: info.GPClientlessVPNReleaseDate,
AppVersion: info.AppVersion,
AppReleaseDate: info.AppReleaseDate,
AntiVirusVersion: info.AntiVirusVersion,
AntiVirusReleaseDate: info.AntiVirusReleaseDate,
ThreatVersion: info.ThreatVersion,
ThreatReleaseDate: info.ThreatReleaseDate,
WildfireVersion: info.WildfireVersion,
WildfireReleaseDate: info.WildfireReleaseDate,
URLDB: info.URLDB,
URLFilteringVersion: info.URLFilteringVersion,
LogDBVersion: info.LogDBVersion,
MultiVsys: info.MultiVsys,
OperationalMode: info.OperationalMode,
}, nil
}
// Commit issues a commit on the device. When issuing a commit against a Panorama device,
// the configuration will only be committed to Panorama, and not an individual device-group.
func (p *PaloAlto) Commit() error {
var reqError requestError
_, resp, errs := r.Get(p.URI).Query(fmt.Sprintf("type=commit&cmd=<commit></commit>&key=%s", p.Key)).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// CommitAll issues a commit to a Panorama device, for the given devicegroup. If you wish to push to specific
// firewalls within the specified device group only, add each firewalls serial number as an additional parameter,
// (e.g. CommitAll("Some-DeviceGroup", "000000000001", "000000000002")).
func (p *PaloAlto) CommitAll(devicegroup string, devices ...string) error {
var reqError requestError
var cmd string
if p.DeviceType == "panorama" && len(devices) <= 0 {
cmd = fmt.Sprintf("<commit-all><shared-policy><device-group><entry name=\"%s\"/></device-group></shared-policy></commit-all>", devicegroup)
}
if p.DeviceType == "panorama" && len(devices) > 0 {
cmd = fmt.Sprintf("<commit-all><shared-policy><device-group><entry name=\"%s\"/>><devices>", devicegroup)
for _, d := range devices {
cmd += fmt.Sprintf("<entry name=\"%s\"/>", d)
}
cmd += "</devices></entry></device-group></shared-policy></commit-all>"
}
_, resp, errs := r.Get(p.URI).Query(fmt.Sprintf("type=commit&action=all&cmd=%s&key=%s", cmd, p.Key)).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// RestartSystem will issue a system restart to the device.
func (p *PaloAlto) RestartSystem() error {
var reqError requestError
command := "<request><restart><system></system></restart></request>"
_, resp, errs := r.Post(p.URI).Query(fmt.Sprintf("type=op&cmd=%s&key=%s", command, p.Key)).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// TestURL will verify what category the given URL falls under. It will return two results in a string slice ([]string). The
// first one is from the Base db categorization, and the second is from the Cloud db categorization. If you specify a URL
// with a wildcard, such as *.paloaltonetworks.com, it will not return a result.
func (p *PaloAlto) TestURL(url string) ([]string, error) {
var urlResults testURL
rex := regexp.MustCompile(`(?m)^([\d\.a-zA-Z-]+)\s([\w-]+)\s.*seconds\s([\d\.a-zA-Z-]+)\s([\w-]+)\s`)
command := fmt.Sprintf("<test><url>%s</url></test>", url)
if p.DeviceType == "panorama" {
return nil, errors.New("you can only test URL's from a local device")
}
_, resp, errs := r.Post(p.URI).Query(fmt.Sprintf("type=op&cmd=%s&key=%s", command, p.Key)).End()
if errs != nil {
return nil, errs[0]
}
if err := xml.Unmarshal([]byte(resp), &urlResults); err != nil {
return nil, err
}
if urlResults.Status != "success" {
return nil, fmt.Errorf("error code %s: %s", urlResults.Code, errorCodes[urlResults.Code])
}
categorization := rex.FindStringSubmatch(urlResults.Result)
if len(categorization) == 0 {
return nil, fmt.Errorf("cannot resolve the site %s", url)
}
results := []string{
fmt.Sprintf("%s", categorization[2]),
fmt.Sprintf("%s", categorization[4]),
}
return results, nil
}
// TestRouteLookup will lookup the given destination IP in the virtual-router "vr" and return the results.
func (p *PaloAlto) TestRouteLookup(vr, destination string) (*RouteLookup, error) {
var testRouteLookup routeLookupResults
command := fmt.Sprintf("<test><routing><fib-lookup><virtual-router>%s</virtual-router><ip>%s</ip></fib-lookup></routing></test>", vr, destination)
if p.DeviceType == "panorama" {
return nil, errors.New("you can only test route lookups from a local device")
}
_, resp, errs := r.Post(p.URI).Query(fmt.Sprintf("type=op&cmd=%s&key=%s", command, p.Key)).End()
if errs != nil {
return nil, errs[0]
}
if err := xml.Unmarshal([]byte(resp), &testRouteLookup); err != nil {
return nil, err
}
if testRouteLookup.Status != "success" {
return nil, fmt.Errorf("error code %s: %s", testRouteLookup.Code, errorCodes[testRouteLookup.Code])
}
return &RouteLookup{
NextHop: testRouteLookup.NextHop,
Source: testRouteLookup.Source,
IP: testRouteLookup.IP,
Metric: testRouteLookup.Metric,
Interface: testRouteLookup.Interface,
DataPlane: testRouteLookup.DataPlane,
}, nil
}
// Jobs returns information about every job on the device. Status can be one of: all, pending, or processed. If you want
// information about a specific job, specify the job ID instead of one of the other options.
func (p *PaloAlto) Jobs(status interface{}) (*Jobs, error) {
var jobs Jobs
var cmd string
switch status.(type) {
case string:
if status == "all" {
cmd += "<show><jobs><all></all></jobs></show>"
}
if status == "pending" {
cmd += "<show><jobs><pending></pending></jobs></show>"
}
if status == "processed" {
cmd += "<show><jobs><processed></processed></jobs></show>"
}
case int:
cmd += fmt.Sprintf("<show><jobs><id>%d</id></jobs></show>", status)
}
_, res, errs := r.Get(fmt.Sprintf("%s&key=%s&type=op&cmd=%s", p.URI, p.Key, cmd)).End()
if errs != nil {
return nil, errs[0]
}
err := xml.Unmarshal([]byte(res), &jobs)
if err != nil {
return nil, err
}
return &jobs, nil
}
// QueryLogs allows you to pull logs from the system, given a specific log-type. Currently, the
// supported log types are as follows:
//
// config, system, traffic, threat, wildfire, url, data
//
// The LogParameters struct lists optional parameters you can use in your query. See the documentation for a full
// description of options. If you do not wish to use any of the optional parameters, just specify nil. The job ID is
// returned from the query, and should be passed to RetrieveLogs().
func (p *PaloAlto) QueryLogs(logtype string, parameters *LogParameters) (int, error) {
var id logID
req := fmt.Sprintf("%s&key=%s&type=log&log-type=%s", p.URI, p.Key, logtype)
if parameters != nil {
if parameters.Query != "" {
req += fmt.Sprintf("&query=%s", url.QueryEscape(parameters.Query))
}
if parameters.NLogs > 0 {
req += fmt.Sprintf("&nlogs=%d", parameters.NLogs)
}
if parameters.Direction != "" {
req += fmt.Sprintf("&dir=%s", parameters.Direction)
}
if parameters.Skip > 0 {
req += fmt.Sprintf("&skip=%d", parameters.Skip)
}
}
_, res, errs := r.Get(req).End()
if errs != nil {
return 0, errs[0]
}
err := xml.Unmarshal([]byte(res), &id)
if err != nil {
return 0, err
}
return id.ID, nil
}
// RetrieveLogs will return the log data as specified in the QueryLogs() function, given the job ID. If the job
// status is not FIN, then you will have to query the job ID until it has finished and then it will return the
// results.
func (p *PaloAlto) RetrieveLogs(id int) (*Logs, error) {
var logs Logs
_, res, errs := r.Get(fmt.Sprintf("%s&key=%s&type=log&action=get&job-id=%d", p.URI, p.Key, id)).End()
if errs != nil {
return nil, errs[0]
}
err := xml.Unmarshal([]byte(res), &logs)
if err != nil {
return nil, err
}
return &logs, nil
}
// XpathConfig allows you to configure the device using an Xpath expression for the given xpath parameter.
// The element parameter can either be an XML file or an XML string when configuring the device. The action parameter can be one of:
//
// set, edit, rename, override or delete
//
// Set actions add, update, or merge configuration nodes, while edit actions replace configuration nodes
// - use the edit action with caution! If you are renaming an object, specify the new name for the
// object in the element parameter. If you are deleting a part of the configuration, you do not need
// the element parameter. For all other actions you will need to provide it.
//
// See https://goo.gl/G1vzJT for details regarding all of the actions available.
func (p *PaloAlto) XpathConfig(action, xpath string, element ...string) error {
var reqError requestError
var query string
switch action {
case "set", "edit", "override":
if len(element) <= 0 {
return errors.New("you must specify the element parameter")
}
if strings.Contains(element[0], ".xml") {
c, err := ioutil.ReadFile(element[0])
if err != nil {
return err
}
xmlcontents := string(c)
query = fmt.Sprintf("type=config&action=%s&xpath=%s&element=%s&key=%s", action, xpath, xmlcontents, p.Key)
} else {
query = fmt.Sprintf("type=config&action=%s&xpath=%s&element=%s&key=%s", action, xpath, element[0], p.Key)
}
case "rename":
if len(element) <= 0 {
return errors.New("you must specify the element parameter when renaming an object")
}
query = fmt.Sprintf("type=config&action=%s&xpath=%s&newname=%s&key=%s", action, xpath, element[0], p.Key)
case "delete":
query = fmt.Sprintf("type=config&action=%s&xpath=%s&key=%s", action, xpath, p.Key)
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// XpathClone allows you to clone an existing part of the devices configuration. Use the xpath parameter
// to specify the location of the object to be cloned. Use the from parameter to specify the source object,
// and the newname parameter to provide a name for the cloned object.
//
// See https://goo.gl/ZfmBB6 for details.
func (p *PaloAlto) XpathClone(xpath, from, newname string) error {
var reqError requestError
query := fmt.Sprintf("type=config&action=clone&xpath=%s&from=%s&newname=%s&key=%s", xpath, from, newname, p.Key)
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// XpathMove allows you to move the location of an existing configuration object. Use the xpath parameter to specify
// the location of the object to be moved, and the where parameter to specify type of move. You can optionally use the
// destination parameter to specify the destination path.
//
// See https://goo.gl/LbkQDG for details.
func (p *PaloAlto) XpathMove(xpath, where string, destination ...string) error {
var reqError requestError
var query string
query = fmt.Sprintf("type=config&action=move&xpath=%s&where=%s&key=%s", xpath, where, p.Key)
if len(destination) > 0 {
query = fmt.Sprintf("type=config&action=move&xpath=%s&where=%s&dst=%s&key=%s", xpath, where, destination[0], p.Key)
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// XpathMulti allows you to move and clone multiple objects across device groups and virtual systems. The element parameter
// can be either an XML file or XML string. The action parameter must be one of: clone or move.
// The xpath parameter is for the destination where the addresses will be moved to. The element parameter must
// include in the XML the xpath for the source and the list of objects within the specified source.
//
// See https://goo.gl/oeufnu for details.
func (p *PaloAlto) XpathMulti(action, xpath, element string) error {
var reqError requestError
var query string
if strings.Contains(element, ".xml") {
c, err := ioutil.ReadFile(element)
if err != nil {
return err
}
xmlcontents := string(c)
query = fmt.Sprintf("type=config&action=multi-%s&xpath=%s&element=%s&key=%s", action, xpath, xmlcontents, p.Key)
} else {
query = fmt.Sprintf("type=config&action=multi-%s&xpath=%s&element=%s&key=%s", action, xpath, element, p.Key)
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return err
}
if reqError.Status != "success" {
return fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return nil
}
// XpathGetConfig allows you to view the active or candidate configuration at the location specified in the
// xpath parameter.
func (p *PaloAlto) XpathGetConfig(configtype, xpath string) (string, error) {
var reqError requestError
var query string
switch configtype {
case "active":
query = fmt.Sprintf("type=config&action=show&xpath=%s&key=%s", xpath, p.Key)
case "candidate":
query = fmt.Sprintf("type=config&action=get&xpath=%s&key=%s", xpath, p.Key)
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return "", errs[0]
}
if err := xml.Unmarshal([]byte(resp), &reqError); err != nil {
return "", err
}
if reqError.Status != "success" {
return "", fmt.Errorf("error code %s: %s", reqError.Code, errorCodes[reqError.Code])
}
return resp, nil
}
// Command lets you run any operational mode command against the given device, and it returns the output. You
// must use the XML-formatted version of the command string as if you were calling the API yourself,
// (e.g. "<show><running><ippool></ippool></running></show>")
func (p *PaloAlto) Command(command string) (string, error) {
var output commandOutput
_, res, errs := r.Get(fmt.Sprintf("%s&key=%s&type=op&cmd=%s", p.URI, p.Key, command)).End()
if errs != nil {
return "", errs[0]
}
err := xml.Unmarshal([]byte(res), &output)
if err != nil {
return "", err
}
return output.Data, nil
}
// Routes will retrieve information about each route in the devices routing table(s). You can (optionally) specify
// a specific virtual router to retrieve routes from.
func (p *PaloAlto) Routes(vr ...string) (*RoutingTable, error) {
var rt RoutingTable
query := fmt.Sprintf("%s&key=%s&type=op&cmd=<show><routing><route></route></routing></show>", p.URI, p.Key)
if p.DeviceType != "panos" {
return nil, errors.New("you can only retrieve the routing table on a local firewall")
}
if len(vr) > 0 {
query = fmt.Sprintf("%s&key=%s&type=op&cmd=<show><routing><route><virtual-router>%s</virtual-router></route></routing></show>", p.URI, p.Key, vr[0])
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return nil, errs[0]
}
if err := xml.Unmarshal([]byte(resp), &rt); err != nil {
return nil, err
}
if rt.Status != "success" {
return nil, fmt.Errorf("error code %s: %s", rt.Code, errorCodes[rt.Code])
}
return &rt, nil
}
// Sessions will retrieve information about each session within the session table on a firewall.
func (p *PaloAlto) Sessions() (*SessionTable, error) {
var st SessionTable
query := fmt.Sprintf("%s&key=%s&type=op&cmd=<show><session><all></all></session></show>", p.URI, p.Key)
if p.DeviceType != "panos" {
return nil, errors.New("you can only retrieve the session table on a local firewall")
}
_, resp, errs := r.Post(p.URI).Query(query).End()
if errs != nil {
return nil, errs[0]
}
if err := xml.Unmarshal([]byte(resp), &st); err != nil {
return nil, err
}
if st.Status != "success" {
return nil, fmt.Errorf("error code %s: %s", st.Code, errorCodes[st.Code])
}
return &st, nil
}
|
package sous
import (
"container/ring"
"sort"
"sync"
"github.com/pborman/uuid"
)
// MaxRefsPerR11nQueue is the maximum number of rectifications to cache in memory.
const MaxRefsPerR11nQueue = 100
// R11nQueue is a queue of rectifications.
type R11nQueue struct {
cap int
queue chan *QueuedR11n
refs, allRefs map[R11nID]*QueuedR11n
fifoRefs *ring.Ring
handler func(*QueuedR11n) DiffResolution
start bool
sync.Mutex
}
// R11nQueueCapDefault is the default capacity for a new R11nQueue.
const R11nQueueCapDefault = 10
// NewR11nQueue creates a freshly initialised R11nQueue.
func NewR11nQueue(opts ...R11nQueueOpt) *R11nQueue {
rq := &R11nQueue{
cap: R11nQueueCapDefault,
}
for _, opt := range opts {
opt(rq)
}
rq.init()
if rq.start {
rq.Start(rq.handler)
}
return rq
}
// R11nQueueOpt is an option for configuring an R11nQueue.
type R11nQueueOpt func(*R11nQueue)
// R11nQueueCap sets the max capacity of an R11nQueue to the supplied cap.
func R11nQueueCap(cap int) R11nQueueOpt {
return func(rq *R11nQueue) {
rq.cap = cap
}
}
// R11nQueueStartWithHandler starts processing the queue using the supplied
// handler.
func R11nQueueStartWithHandler(handler func(*QueuedR11n) DiffResolution) R11nQueueOpt {
return func(rq *R11nQueue) {
rq.handler = func(qr *QueuedR11n) DiffResolution {
dr := handler(qr)
// TODO SS:
// This oddity ensures the resolution on the queued rectification
// matches that returned by the handler. This is only really
// important in testing where we don't want to run rectifications
// just to test the queue. However I would rather clean up the
// implementation to remove the need for this.
qr.Rectification.Resolution = dr
return dr
}
rq.start = true
}
}
// Snapshot returns a slice of items to be processed in the queue ordered by
// their queue position. It includes the item being worked on at the head of the
// queue.
func (rq *R11nQueue) Snapshot() []QueuedR11n {
rq.Lock()
defer rq.Unlock()
var snapshot []QueuedR11n
for _, qr := range rq.refs {
snapshot = append(snapshot, *qr)
}
sort.Slice(snapshot, func(i, j int) bool {
return snapshot[i].Pos < snapshot[j].Pos
})
return snapshot
}
// ByID returns the queued rectification matching ID and true if it exists, nil
// and false otherwise.
func (rq *R11nQueue) ByID(id R11nID) (*QueuedR11n, bool) {
rq.Lock()
defer rq.Unlock()
qr, ok := rq.refs[id]
return qr, ok
}
func (rq *R11nQueue) init() *R11nQueue {
rq.Lock()
defer rq.Unlock()
rq.queue = make(chan *QueuedR11n, rq.cap)
rq.refs = map[R11nID]*QueuedR11n{}
rq.allRefs = map[R11nID]*QueuedR11n{}
rq.fifoRefs = ring.New(MaxRefsPerR11nQueue)
return rq
}
// QueuedR11n is a queue item wrapping a Rectification with an ID and position.
type QueuedR11n struct {
ID R11nID
Pos int
Rectification *Rectification
done chan struct{}
}
// R11nID is a QueuedR11n identifier.
type R11nID string
// NewR11nID returns a new random R11nID.
func NewR11nID() R11nID {
return R11nID(uuid.New())
}
// Start starts applying handler to each item on the queue in order.
func (rq *R11nQueue) Start(handler func(*QueuedR11n) DiffResolution) <-chan DiffResolution {
rq.Lock()
defer rq.Unlock()
results := make(chan DiffResolution, 100)
go func() {
for {
qr := rq.next()
results <- handler(qr)
rq.Lock()
close(qr.done)
delete(rq.refs, qr.ID)
rq.Unlock()
}
}()
return results
}
// Wait waits for a particular rectification to be processed then returns its
// result. If that rectification is not in this queue, it immediately returns a
// zero DiffResolution and false.
func (rq *R11nQueue) Wait(id R11nID) (DiffResolution, bool) {
rq.Lock()
qr, ok := rq.allRefs[id]
rq.Unlock()
if !ok {
return DiffResolution{}, false
}
<-qr.done
return qr.Rectification.Resolution, true
}
// Push adds r to the queue, wrapped in a *QueuedR11n. It returns the wrapper.
// If the push was successful, it returns the wrapper and true, otherwise it
// returns nil and false.
func (rq *R11nQueue) Push(r *Rectification) (*QueuedR11n, bool) {
rq.Lock()
defer rq.Unlock()
if len(rq.queue) == rq.cap {
return nil, false
}
return rq.internalPush(r), true
}
// internalPush assumes rq is already locked.
func (rq *R11nQueue) internalPush(r *Rectification) *QueuedR11n {
id := NewR11nID()
qr := &QueuedR11n{
ID: id,
Pos: len(rq.queue),
Rectification: r,
done: make(chan struct{}),
}
rq.refs[id] = qr
rq.allRefs[id] = qr
rq.fifoRefs = rq.fifoRefs.Next()
if rq.fifoRefs.Value != nil {
idToDelete := rq.fifoRefs.Value.(R11nID)
delete(rq.allRefs, idToDelete)
}
rq.fifoRefs.Value = id
rq.queue <- qr
return qr
}
// PushIfEmpty adds an item to the queue if it is empty, and returns the wrapper
// added and true if successful. If the queue is not empty, or is full, it
// returns nil, false.
func (rq *R11nQueue) PushIfEmpty(r *Rectification) (*QueuedR11n, bool) {
rq.Lock()
defer rq.Unlock()
// We look at refs since we only delete the ref after handling has happened.
// If we are busy handling a r11n, then we consider the queue non-empty.
if len(rq.refs) != 0 {
return nil, false
}
return rq.internalPush(r), true
}
// Len returns the current number of items in the queue.
func (rq *R11nQueue) Len() int {
return len(rq.queue)
}
// next waits until there is something on the queue to
// return and then returns it.
func (rq *R11nQueue) next() *QueuedR11n {
qr := <-rq.queue
rq.Lock()
defer rq.Unlock()
rq.handlePopped(qr.ID)
return qr
}
// handlePopped assumes rq is locked.
func (rq *R11nQueue) handlePopped(id R11nID) {
for _, r := range rq.refs {
r.Pos--
}
}
lib: Fix channel send deadlock.
- Nothing was reading from the queue so after 100 attempted resolutions
the queue handler blocked forever.
package sous
import (
"container/ring"
"sort"
"sync"
"github.com/pborman/uuid"
)
// MaxRefsPerR11nQueue is the maximum number of rectifications to cache in memory.
const MaxRefsPerR11nQueue = 100
// R11nQueue is a queue of rectifications.
type R11nQueue struct {
cap int
queue chan *QueuedR11n
refs, allRefs map[R11nID]*QueuedR11n
fifoRefs *ring.Ring
handler func(*QueuedR11n) DiffResolution
start bool
sync.Mutex
}
// R11nQueueCapDefault is the default capacity for a new R11nQueue.
const R11nQueueCapDefault = 10
// NewR11nQueue creates a freshly initialised R11nQueue.
func NewR11nQueue(opts ...R11nQueueOpt) *R11nQueue {
rq := &R11nQueue{
cap: R11nQueueCapDefault,
}
for _, opt := range opts {
opt(rq)
}
rq.init()
if rq.start {
rq.Start(rq.handler)
}
return rq
}
// R11nQueueOpt is an option for configuring an R11nQueue.
type R11nQueueOpt func(*R11nQueue)
// R11nQueueCap sets the max capacity of an R11nQueue to the supplied cap.
func R11nQueueCap(cap int) R11nQueueOpt {
return func(rq *R11nQueue) {
rq.cap = cap
}
}
// R11nQueueStartWithHandler starts processing the queue using the supplied
// handler.
func R11nQueueStartWithHandler(handler func(*QueuedR11n) DiffResolution) R11nQueueOpt {
return func(rq *R11nQueue) {
rq.handler = func(qr *QueuedR11n) DiffResolution {
dr := handler(qr)
// TODO SS:
// This oddity ensures the resolution on the queued rectification
// matches that returned by the handler. This is only really
// important in testing where we don't want to run rectifications
// just to test the queue. However I would rather clean up the
// implementation to remove the need for this.
qr.Rectification.Resolution = dr
return dr
}
rq.start = true
}
}
// Snapshot returns a slice of items to be processed in the queue ordered by
// their queue position. It includes the item being worked on at the head of the
// queue.
func (rq *R11nQueue) Snapshot() []QueuedR11n {
rq.Lock()
defer rq.Unlock()
var snapshot []QueuedR11n
for _, qr := range rq.refs {
snapshot = append(snapshot, *qr)
}
sort.Slice(snapshot, func(i, j int) bool {
return snapshot[i].Pos < snapshot[j].Pos
})
return snapshot
}
// ByID returns the queued rectification matching ID and true if it exists, nil
// and false otherwise.
func (rq *R11nQueue) ByID(id R11nID) (*QueuedR11n, bool) {
rq.Lock()
defer rq.Unlock()
qr, ok := rq.refs[id]
return qr, ok
}
func (rq *R11nQueue) init() *R11nQueue {
rq.Lock()
defer rq.Unlock()
rq.queue = make(chan *QueuedR11n, rq.cap)
rq.refs = map[R11nID]*QueuedR11n{}
rq.allRefs = map[R11nID]*QueuedR11n{}
rq.fifoRefs = ring.New(MaxRefsPerR11nQueue)
return rq
}
// QueuedR11n is a queue item wrapping a Rectification with an ID and position.
type QueuedR11n struct {
ID R11nID
Pos int
Rectification *Rectification
done chan struct{}
}
// R11nID is a QueuedR11n identifier.
type R11nID string
// NewR11nID returns a new random R11nID.
func NewR11nID() R11nID {
return R11nID(uuid.New())
}
// Start starts applying handler to each item on the queue in order.
func (rq *R11nQueue) Start(handler func(*QueuedR11n) DiffResolution) {
rq.Lock()
defer rq.Unlock()
go func() {
for {
qr := rq.next()
handler(qr)
rq.Lock()
close(qr.done)
delete(rq.refs, qr.ID)
rq.Unlock()
}
}()
}
// Wait waits for a particular rectification to be processed then returns its
// result. If that rectification is not in this queue, it immediately returns a
// zero DiffResolution and false.
func (rq *R11nQueue) Wait(id R11nID) (DiffResolution, bool) {
rq.Lock()
qr, ok := rq.allRefs[id]
rq.Unlock()
if !ok {
return DiffResolution{}, false
}
<-qr.done
return qr.Rectification.Resolution, true
}
// Push adds r to the queue, wrapped in a *QueuedR11n. It returns the wrapper.
// If the push was successful, it returns the wrapper and true, otherwise it
// returns nil and false.
func (rq *R11nQueue) Push(r *Rectification) (*QueuedR11n, bool) {
rq.Lock()
defer rq.Unlock()
if len(rq.queue) == rq.cap {
return nil, false
}
return rq.internalPush(r), true
}
// internalPush assumes rq is already locked.
func (rq *R11nQueue) internalPush(r *Rectification) *QueuedR11n {
id := NewR11nID()
qr := &QueuedR11n{
ID: id,
Pos: len(rq.queue),
Rectification: r,
done: make(chan struct{}),
}
rq.refs[id] = qr
rq.allRefs[id] = qr
rq.fifoRefs = rq.fifoRefs.Next()
if rq.fifoRefs.Value != nil {
idToDelete := rq.fifoRefs.Value.(R11nID)
delete(rq.allRefs, idToDelete)
}
rq.fifoRefs.Value = id
rq.queue <- qr
return qr
}
// PushIfEmpty adds an item to the queue if it is empty, and returns the wrapper
// added and true if successful. If the queue is not empty, or is full, it
// returns nil, false.
func (rq *R11nQueue) PushIfEmpty(r *Rectification) (*QueuedR11n, bool) {
rq.Lock()
defer rq.Unlock()
// We look at refs since we only delete the ref after handling has happened.
// If we are busy handling a r11n, then we consider the queue non-empty.
if len(rq.refs) != 0 {
return nil, false
}
return rq.internalPush(r), true
}
// Len returns the current number of items in the queue.
func (rq *R11nQueue) Len() int {
return len(rq.queue)
}
// next waits until there is something on the queue to
// return and then returns it.
func (rq *R11nQueue) next() *QueuedR11n {
qr := <-rq.queue
rq.Lock()
defer rq.Unlock()
rq.handlePopped(qr.ID)
return qr
}
// handlePopped assumes rq is locked.
func (rq *R11nQueue) handlePopped(id R11nID) {
for _, r := range rq.refs {
r.Pos--
}
}
|
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package sh
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
)
// Mode controls the parser behaviour via a set of flags.
type Mode uint
const (
ParseComments Mode = 1 << iota // add comments to the AST
)
// Parse reads and parses a shell program with an optional name. It
// returns the parsed program if no issues were encountered. Otherwise,
// an error is returned.
//
// The type of src must be []byte, string or io.Reader.
func Parse(src interface{}, name string, mode Mode) (*File, error) {
p := parser{
f: &File{Name: name},
mode: mode,
}
if p.src, p.err = getSource(src); p.err != nil {
return nil, p.err
}
p.f.lines = make([]int, 1, 16)
p.next()
p.f.Stmts = p.stmts()
return p.f, p.err
}
func getSource(src interface{}) ([]byte, error) {
switch x := src.(type) {
case []byte:
return x, nil
case string:
return []byte(x), nil
case io.Reader:
return ioutil.ReadAll(x)
default:
return nil, fmt.Errorf("invalid src type: %T", src)
}
}
type parser struct {
src []byte
f *File
mode Mode
spaced, newLine bool
stopNewline, forbidNested bool
err error
tok Token
val string
buf [8]byte
pos Pos
npos int
quote Token
// list of pending heredoc bodies
heredocs []*Redirect
}
// bytes that form or start a token
func regOps(b byte) bool {
return b == ';' || b == '"' || b == '\'' || b == '(' ||
b == ')' || b == '$' || b == '|' || b == '&' ||
b == '>' || b == '<' || b == '`'
}
// tokenize these inside parameter expansions
func paramOps(b byte) bool {
return b == '}' || b == '#' || b == ':' || b == '-' ||
b == '+' || b == '=' || b == '?' || b == '%' ||
b == '[' || b == '/'
}
// tokenize these inside arithmetic expansions
func arithmOps(b byte) bool {
return b == '+' || b == '-' || b == '!' || b == '*' ||
b == '/' || b == '%' || b == '(' || b == ')' ||
b == '^' || b == '<' || b == '>' || b == ':' ||
b == '=' || b == ',' || b == '?' || b == '|' ||
b == '&'
}
func (p *parser) next() {
if p.tok == EOF {
return
}
if p.npos >= len(p.src) {
p.errPass(io.EOF)
return
}
b := p.src[p.npos]
if p.tok == STOPPED && b == '\n' {
p.npos++
p.f.lines = append(p.f.lines, p.npos)
p.doHeredocs()
if p.npos >= len(p.src) {
p.errPass(io.EOF)
return
}
b = p.src[p.npos]
p.spaced, p.newLine = true, true
} else {
p.spaced, p.newLine = false, false
}
q := p.quote
switch q {
case QUO:
p.pos = Pos(p.npos + 1)
switch b {
case '}':
p.npos++
p.advanceTok(RBRACE)
case '/':
p.npos++
p.advanceTok(QUO)
case '`', '"', '$':
p.advanceTok(p.regToken(b))
default:
p.advanceLitOther(q)
}
return
case DQUOTE:
p.pos = Pos(p.npos + 1)
switch b {
case '`', '"', '$':
p.advanceTok(p.dqToken(b))
case '\n':
p.advanceLitDquote()
default:
p.advanceLitDquote()
}
return
case RBRACE:
p.pos = Pos(p.npos + 1)
switch b {
case '}':
p.npos++
p.advanceTok(RBRACE)
case '`', '"', '$':
p.advanceTok(p.regToken(b))
default:
p.advanceLitOther(q)
}
return
case SQUOTE:
p.pos = Pos(p.npos + 1)
if b == '\'' {
p.npos++
p.advanceTok(SQUOTE)
} else {
p.advanceLitOther(q)
}
return
}
skipSpace:
for {
switch b {
case ' ', '\t', '\r':
p.spaced = true
p.npos++
case '\n':
if p.stopNewline {
p.stopNewline = false
p.advanceTok(STOPPED)
return
}
p.spaced = true
if p.npos < len(p.src) {
p.npos++
}
p.f.lines = append(p.f.lines, p.npos)
p.newLine = true
case '\\':
if p.npos < len(p.src)-1 && p.src[p.npos+1] == '\n' {
p.npos += 2
p.f.lines = append(p.f.lines, p.npos)
} else {
break skipSpace
}
default:
break skipSpace
}
if p.npos >= len(p.src) {
p.errPass(io.EOF)
return
}
b = p.src[p.npos]
}
p.pos = Pos(p.npos + 1)
switch {
case q == ILLEGAL, q == RPAREN, q == BQUOTE, q == DSEMICOLON:
switch b {
case ';', '"', '\'', '(', ')', '$', '|', '&', '>', '<', '`':
p.advanceTok(p.regToken(b))
case '#':
p.npos++
bs, _ := p.readUntil('\n')
p.npos += len(bs)
if p.mode&ParseComments > 0 {
p.f.Comments = append(p.f.Comments, &Comment{
Hash: p.pos,
Text: string(bs),
})
}
p.next()
default:
p.advanceLitNone()
}
case q == LBRACE && paramOps(b):
p.advanceTok(p.paramToken(b))
case (q == DLPAREN || q == DRPAREN || q == LPAREN) && arithmOps(b):
p.advanceTok(p.arithmToken(b))
case q == RBRACK && b == ']':
p.npos++
p.advanceTok(RBRACK)
case regOps(b):
p.advanceTok(p.regToken(b))
default:
p.advanceLitOther(q)
}
}
func (p *parser) advanceLitOther(q Token) {
bs := p.buf[:0]
for {
if p.npos >= len(p.src) {
p.advanceBoth(LIT, string(bs))
return
}
b := p.src[p.npos]
switch {
case b == '\\': // escaped byte follows
if p.npos == len(p.src)-1 {
p.npos++
bs = append(bs, '\\')
p.advanceBoth(LIT, string(bs))
return
}
b = p.src[p.npos+1]
p.npos += 2
if b == '\n' {
p.f.lines = append(p.f.lines, p.npos)
} else {
bs = append(bs, '\\', b)
}
continue
case q == SQUOTE:
switch b {
case '\n':
p.f.lines = append(p.f.lines, p.npos+1)
case '\'':
p.advanceBoth(LIT, string(bs))
return
}
case b == '`', b == '$':
p.advanceBoth(LIT, string(bs))
return
case q == RBRACE:
if b == '}' || b == '"' {
p.advanceBoth(LIT, string(bs))
return
}
case q == LBRACE && paramOps(b), q == RBRACK && b == ']':
p.advanceBoth(LIT, string(bs))
return
case q == QUO:
if b == '/' || b == '}' {
p.advanceBoth(LIT, string(bs))
return
}
case wordBreak(b), regOps(b):
p.advanceBoth(LIT, string(bs))
return
case (q == DLPAREN || q == DRPAREN || q == LPAREN) && arithmOps(b):
p.advanceBoth(LIT, string(bs))
return
}
bs = append(bs, p.src[p.npos])
p.npos++
}
}
func (p *parser) advanceLitNone() {
bs := p.buf[:0]
for {
if p.npos >= len(p.src) {
p.advanceBoth(LITWORD, string(bs))
return
}
switch p.src[p.npos] {
case '\\': // escaped byte follows
if p.npos == len(p.src)-1 {
p.npos++
bs = append(bs, '\\')
p.advanceBoth(LIT, string(bs))
return
}
b := p.src[p.npos+1]
p.npos += 2
if b == '\n' {
p.f.lines = append(p.f.lines, p.npos)
} else {
bs = append(bs, '\\', b)
}
case ' ', '\t', '\n', '\r', '&', '>', '<', '|', ';', '(', ')', '`':
p.advanceBoth(LITWORD, string(bs))
return
case '"', '\'', '$':
p.advanceBoth(LIT, string(bs))
return
default:
bs = append(bs, p.src[p.npos])
p.npos++
}
}
}
func (p *parser) advanceLitDquote() {
var i int
loop:
for i = p.npos; i < len(p.src); i++ {
switch p.src[i] {
case '\\': // escaped byte follows
i++
if i == len(p.src) {
break loop
}
if p.src[i] == '\n' {
p.f.lines = append(p.f.lines, i+1)
}
case '`', '"', '$':
break loop
case '\n':
p.f.lines = append(p.f.lines, i+1)
}
}
bs := make([]byte, i-p.npos)
copy(bs, p.src[p.npos:i])
p.npos = i
p.advanceBoth(LIT, string(bs))
}
func (p *parser) advanceTok(tok Token) { p.advanceBoth(tok, "") }
func (p *parser) advanceBoth(tok Token, val string) { p.tok, p.val = tok, val }
func (p *parser) readUntil(b byte) ([]byte, bool) {
rem := p.src[p.npos:]
i := bytes.IndexByte(rem, b)
if i < 0 {
bs := rem
return bs, false
}
return rem[:i], true
}
func (p *parser) doHeredocs() {
for _, r := range p.heredocs {
end := unquotedWordStr(p.f, &r.Word)
r.Hdoc.ValuePos = Pos(p.npos + 1)
r.Hdoc.Value, _ = p.readHdocBody(end, r.Op == DHEREDOC)
}
p.heredocs = nil
}
func (p *parser) readHdocBody(end string, noTabs bool) (string, bool) {
var buf bytes.Buffer
for p.npos < len(p.src) {
bs, found := p.readUntil('\n')
p.npos += len(bs) + 1
if found {
p.f.lines = append(p.f.lines, p.npos)
}
line := string(bs)
if line == end || (noTabs && strings.TrimLeft(line, "\t") == end) {
// add trailing tabs
buf.Write(bs[:len(bs)-len(end)])
return buf.String(), true
}
buf.Write(bs)
if found {
buf.WriteByte('\n')
}
}
return buf.String(), false
}
func wordBreak(b byte) bool {
return b == ' ' || b == '\t' || b == '\r' || b == '\n' ||
b == '&' || b == '>' || b == '<' || b == '|' ||
b == ';' || b == '(' || b == ')' || b == '`'
}
func (p *parser) got(tok Token) bool {
if p.tok == tok {
p.next()
return true
}
return false
}
func (p *parser) gotRsrv(val string) bool {
if p.tok == LITWORD && p.val == val {
p.next()
return true
}
return false
}
func (p *parser) gotSameLine(tok Token) bool {
if !p.newLine && p.tok == tok {
p.next()
return true
}
return false
}
func readableStr(s string) string {
// don't quote tokens like & or }
if s[0] >= 'a' && s[0] <= 'z' {
return strconv.Quote(s)
}
return s
}
func (p *parser) followErr(pos Pos, left, right string) {
leftStr := readableStr(left)
p.posErr(pos, "%s must be followed by %s", leftStr, right)
}
func (p *parser) follow(lpos Pos, left string, tok Token) Pos {
pos := p.pos
if !p.got(tok) {
p.followErr(lpos, left, fmt.Sprintf(`%q`, tok))
}
return pos
}
func (p *parser) followRsrv(lpos Pos, left, val string) Pos {
pos := p.pos
if !p.gotRsrv(val) {
p.followErr(lpos, left, fmt.Sprintf(`%q`, val))
}
return pos
}
func (p *parser) followStmts(left string, lpos Pos, stops ...string) []*Stmt {
if p.gotSameLine(SEMICOLON) {
return nil
}
sts := p.stmts(stops...)
if len(sts) < 1 && !p.newLine {
p.followErr(lpos, left, "a statement list")
}
return sts
}
func (p *parser) followWordTok(tok Token, pos Pos) Word {
w, ok := p.gotWord()
if !ok {
p.followErr(pos, tok.String(), "a word")
}
return w
}
func (p *parser) followWord(s string, pos Pos) Word {
w, ok := p.gotWord()
if !ok {
p.followErr(pos, s, "a word")
}
return w
}
func (p *parser) stmtEnd(n Node, start, end string) Pos {
pos := p.pos
if !p.gotRsrv(end) {
p.posErr(n.Pos(), `%s statement must end with %q`, start, end)
}
return pos
}
func (p *parser) quoteErr(lpos Pos, quote Token) {
p.posErr(lpos, `reached %s without closing quote %s`, p.tok, quote)
}
func (p *parser) matchingErr(lpos Pos, left, right Token) {
p.posErr(lpos, `reached %s without matching token %s with %s`,
p.tok, left, right)
}
func (p *parser) matched(lpos Pos, left, right Token) Pos {
pos := p.pos
if !p.got(right) {
p.matchingErr(lpos, left, right)
}
return pos
}
func (p *parser) errPass(err error) {
if p.err == nil {
if err != io.EOF {
p.err = err
}
p.advanceTok(EOF)
}
}
// ParseError represents an error found when parsing a source file.
type ParseError struct {
Position
Filename, Text string
}
func (e *ParseError) Error() string {
prefix := ""
if e.Filename != "" {
prefix = e.Filename + ":"
}
return fmt.Sprintf("%s%d:%d: %s", prefix, e.Line, e.Column, e.Text)
}
func (p *parser) posErr(pos Pos, format string, a ...interface{}) {
p.errPass(&ParseError{
Position: p.f.Position(pos),
Filename: p.f.Name,
Text: fmt.Sprintf(format, a...),
})
}
func (p *parser) curErr(format string, a ...interface{}) {
p.posErr(p.pos, format, a...)
}
func dsemicolon(t Token) bool {
return t == DSEMICOLON || t == SEMIFALL || t == DSEMIFALL
}
func (p *parser) stmts(stops ...string) (sts []*Stmt) {
if p.forbidNested {
p.curErr("nested statements not allowed in this word")
}
q := p.quote
gotEnd := true
for p.tok != EOF {
switch p.tok {
case LITWORD:
for _, stop := range stops {
if p.val == stop {
return
}
}
case q:
return
case SEMIFALL, DSEMIFALL:
if q == DSEMICOLON {
return
}
}
if !p.newLine && !gotEnd {
p.curErr("statements must be separated by &, ; or a newline")
}
if p.tok == EOF {
break
}
if s, end := p.getStmt(true); s == nil {
p.invalidStmtStart()
} else {
sts = append(sts, s)
gotEnd = end
}
p.got(STOPPED)
}
return
}
func (p *parser) invalidStmtStart() {
switch p.tok {
case SEMICOLON, AND, OR, LAND, LOR:
p.curErr("%s can only immediately follow a statement", p.tok)
case RPAREN:
p.curErr("%s can only be used to close a subshell", p.tok)
default:
p.curErr("%s is not a valid start for a statement", p.tok)
}
}
func (p *parser) getWord() (w Word) {
if p.tok == LITWORD {
w.Parts = append(w.Parts, &Lit{ValuePos: p.pos, Value: p.val})
p.next()
} else {
w.Parts = p.wordParts()
}
return
}
func (p *parser) gotWord() (Word, bool) {
w := p.getWord()
return w, len(w.Parts) > 0
}
func (p *parser) gotLit(l *Lit) bool {
l.ValuePos = p.pos
if p.tok == LIT || p.tok == LITWORD {
l.Value = p.val
p.next()
return true
}
return false
}
func (p *parser) wordParts() (wps []WordPart) {
for {
n := p.wordPart()
if n == nil {
return
}
wps = append(wps, n)
if p.spaced {
return
}
}
}
func (p *parser) wordPart() WordPart {
switch p.tok {
case LIT, LITWORD:
l := &Lit{ValuePos: p.pos, Value: p.val}
p.next()
return l
case p.quote:
return nil
case DOLLBR:
return p.paramExp()
case DOLLDP:
ar := &ArithmExp{Dollar: p.pos}
old := p.quote
p.quote = DRPAREN
p.next()
ar.X = p.arithmExpr(DOLLDP, ar.Dollar)
ar.Rparen = p.arithmEnd(ar.Dollar, old)
return ar
case DOLLPR:
cs := &CmdSubst{Left: p.pos}
old := p.quote
p.quote = RPAREN
p.next()
cs.Stmts = p.stmts()
p.quote = old
cs.Right = p.matched(cs.Left, LPAREN, RPAREN)
return cs
case DOLLAR:
var b byte
if p.npos >= len(p.src) {
p.errPass(io.EOF)
} else {
b = p.src[p.npos]
}
if p.tok == EOF || wordBreak(b) || b == '"' {
l := &Lit{ValuePos: p.pos, Value: "$"}
p.next()
return l
}
pe := &ParamExp{Dollar: p.pos, Short: true}
if b == '#' || b == '$' || b == '?' {
p.npos++
p.pos++
p.advanceBoth(LIT, string(b))
} else {
p.next()
}
p.gotLit(&pe.Param)
return pe
case CMDIN, CMDOUT:
ps := &ProcSubst{Op: p.tok, OpPos: p.pos}
old := p.quote
p.quote = RPAREN
p.next()
ps.Stmts = p.stmts()
p.quote = old
ps.Rparen = p.matched(ps.OpPos, ps.Op, RPAREN)
return ps
case SQUOTE:
sq := &SglQuoted{Quote: p.pos}
bs, found := p.readUntil('\'')
rem := bs
for {
i := bytes.IndexByte(rem, '\n')
if i < 0 {
p.npos += len(rem)
break
}
p.npos += i + 1
p.f.lines = append(p.f.lines, p.npos)
rem = rem[i+1:]
}
p.npos++
if !found {
p.posErr(sq.Pos(), `reached EOF without closing quote %s`, SQUOTE)
}
sq.Value = string(bs)
p.next()
return sq
case DOLLSQ, DQUOTE, DOLLDQ:
q := &Quoted{Quote: p.tok, QuotePos: p.pos}
stop := quotedStop(q.Quote)
old := p.quote
p.quote = stop
p.next()
q.Parts = p.wordParts()
p.quote = old
if !p.got(stop) {
p.quoteErr(q.Pos(), stop)
}
return q
case BQUOTE:
cs := &CmdSubst{Backquotes: true, Left: p.pos}
old := p.quote
p.quote = BQUOTE
p.next()
cs.Stmts = p.stmts()
p.quote = old
cs.Right = p.pos
if !p.got(BQUOTE) {
p.quoteErr(cs.Pos(), BQUOTE)
}
return cs
}
return nil
}
func quotedStop(start Token) Token {
switch start {
case DOLLSQ:
return SQUOTE
case DOLLDQ:
return DQUOTE
}
return start
}
func (p *parser) arithmExpr(ftok Token, fpos Pos) ArithmExpr {
if p.tok == EOF || p.peekArithmEnd() {
return nil
}
left := p.arithmExprBase(ftok, fpos)
q := p.quote
if q != DRPAREN && q != LPAREN && p.spaced {
return left
}
switch p.tok {
case EOF, STOPPED, RPAREN, SEMICOLON, DSEMICOLON, SEMIFALL, DSEMIFALL:
return left
case LIT, LITWORD:
p.curErr("not a valid arithmetic operator: %s", p.val)
}
b := &BinaryExpr{
OpPos: p.pos,
Op: p.tok,
X: left,
}
p.next()
if q != DRPAREN && q != LPAREN && p.spaced {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
if b.Y = p.arithmExpr(b.Op, b.OpPos); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
return b
}
func (p *parser) arithmExprBase(ftok Token, fpos Pos) ArithmExpr {
if p.tok == INC || p.tok == DEC || p.tok == NOT {
pre := &UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
pre.X = p.arithmExprBase(pre.Op, pre.OpPos)
return pre
}
var x ArithmExpr
q := p.quote
switch p.tok {
case LPAREN:
pe := &ParenExpr{Lparen: p.pos}
old := p.quote
p.quote = LPAREN
p.next()
pe.X = p.arithmExpr(LPAREN, pe.Lparen)
if pe.X == nil {
p.posErr(pe.Lparen, "parentheses must enclose an expression")
}
p.quote = old
pe.Rparen = p.matched(pe.Lparen, LPAREN, RPAREN)
x = pe
case ADD, SUB:
ue := &UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
if q != DRPAREN && q != LPAREN && p.spaced {
p.followErr(ue.OpPos, ue.Op.String(), "an expression")
}
ue.X = p.arithmExpr(ue.Op, ue.OpPos)
if ue.X == nil {
p.followErr(ue.OpPos, ue.Op.String(), "an expression")
}
x = ue
default:
w := p.followWordTok(ftok, fpos)
x = &w
}
if q != DRPAREN && q != LPAREN && p.spaced {
return x
}
if p.tok == INC || p.tok == DEC {
u := &UnaryExpr{
Post: true,
OpPos: p.pos,
Op: p.tok,
X: x,
}
p.next()
return u
}
return x
}
func (p *parser) gotParamLit(l *Lit) bool {
switch p.tok {
case LIT, LITWORD:
l.ValuePos, l.Value = p.pos, p.val
case DOLLAR:
l.ValuePos, l.Value = p.pos, "$"
case QUEST:
l.ValuePos, l.Value = p.pos, "?"
default:
l.ValuePos = p.pos
return false
}
p.next()
return true
}
func (p *parser) paramExp() *ParamExp {
pe := &ParamExp{Dollar: p.pos}
old := p.quote
p.quote = LBRACE
p.next()
pe.Length = p.got(HASH)
if !p.gotParamLit(&pe.Param) && !pe.Length {
p.posErr(pe.Dollar, "parameter expansion requires a literal")
}
if p.tok == RBRACE {
p.quote = old
p.next()
return pe
}
if p.tok == LBRACK {
lpos := p.pos
p.quote = RBRACK
p.next()
pe.Ind = &Index{Word: p.getWord()}
p.quote = LBRACE
p.matched(lpos, LBRACK, RBRACK)
}
if p.tok == RBRACE {
p.quote = old
p.next()
return pe
}
if pe.Length {
p.curErr(`can only get length of a simple parameter`)
}
if p.tok == QUO || p.tok == DQUO {
pe.Repl = &Replace{All: p.tok == DQUO}
p.quote = QUO
p.next()
pe.Repl.Orig = p.getWord()
if p.tok == QUO {
p.quote = RBRACE
p.next()
pe.Repl.With = p.getWord()
}
} else {
pe.Exp = &Expansion{Op: p.tok}
p.quote = RBRACE
p.next()
pe.Exp.Word = p.getWord()
}
p.quote = old
p.matched(pe.Dollar, DOLLBR, RBRACE)
return pe
}
func (p *parser) peekArithmEnd() bool {
return p.tok == RPAREN && p.npos < len(p.src) && p.src[p.npos] == ')'
}
func (p *parser) arithmEnd(left Pos, old Token) Pos {
if p.peekArithmEnd() {
p.npos++
} else {
p.matchingErr(left, DLPAREN, DRPAREN)
}
p.quote = old
pos := p.pos
p.next()
return pos
}
func (p *parser) peekEnd() bool {
return p.tok == EOF || p.newLine || p.tok == SEMICOLON
}
func (p *parser) peekStop() bool {
return p.peekEnd() || p.tok == AND || p.tok == OR ||
p.tok == LAND || p.tok == LOR || p.tok == PIPEALL ||
p.tok == p.quote || (p.quote == DSEMICOLON && dsemicolon(p.tok))
}
var identRe = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)
func assignSplit(s string) int {
i := strings.Index(s, "=")
if i <= 0 {
return -1
}
if s[i-1] == '+' {
i--
}
if !identRe.MatchString(s[:i]) {
return -1
}
return i
}
func (p *parser) getAssign() (*Assign, bool) {
i := assignSplit(p.val)
if i < 0 {
return nil, false
}
as := &Assign{}
as.Name = &Lit{ValuePos: p.pos, Value: p.val[:i]}
if p.val[i] == '+' {
as.Append = true
i++
}
start := &Lit{ValuePos: p.pos + 1, Value: p.val[i+1:]}
if start.Value != "" {
start.ValuePos += Pos(i)
as.Value.Parts = append(as.Value.Parts, start)
}
p.next()
if p.spaced {
return as, true
}
if start.Value == "" && p.tok == LPAREN {
ae := &ArrayExpr{Lparen: p.pos}
p.next()
for p.tok != EOF && p.tok != RPAREN {
if w, ok := p.gotWord(); !ok {
p.curErr("array elements must be words")
} else {
ae.List = append(ae.List, w)
}
}
ae.Rparen = p.matched(ae.Lparen, LPAREN, RPAREN)
as.Value.Parts = append(as.Value.Parts, ae)
} else if !p.peekStop() {
w := p.getWord()
if start.Value == "" {
as.Value = w
} else {
as.Value.Parts = append(as.Value.Parts, w.Parts...)
}
}
return as, true
}
func (p *parser) peekRedir() bool {
switch p.tok {
case LITWORD:
return p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<')
case GTR, SHR, LSS, DPLIN, DPLOUT, RDRINOUT,
SHL, DHEREDOC, WHEREDOC, RDRALL, APPALL:
return true
}
return false
}
func (p *parser) doRedirect(s *Stmt) {
r := &Redirect{}
var l Lit
if p.gotLit(&l) {
r.N = &l
}
r.Op, r.OpPos = p.tok, p.pos
p.next()
switch r.Op {
case SHL, DHEREDOC:
p.stopNewline = true
p.forbidNested = true
r.Word = p.followWordTok(r.Op, r.OpPos)
p.forbidNested = false
r.Hdoc = &Lit{}
p.heredocs = append(p.heredocs, r)
p.got(STOPPED)
default:
r.Word = p.followWordTok(r.Op, r.OpPos)
}
s.Redirs = append(s.Redirs, r)
}
func (p *parser) getStmt(readEnd bool) (s *Stmt, gotEnd bool) {
s = &Stmt{Position: p.pos}
if p.gotRsrv("!") {
s.Negated = true
}
preLoop:
for {
switch p.tok {
case LIT, LITWORD:
if as, ok := p.getAssign(); ok {
s.Assigns = append(s.Assigns, as)
} else if p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<') {
p.doRedirect(s)
} else {
break preLoop
}
case GTR, SHR, LSS, DPLIN, DPLOUT, RDRINOUT,
SHL, DHEREDOC, WHEREDOC, RDRALL, APPALL:
p.doRedirect(s)
default:
break preLoop
}
if p.peekEnd() {
gotEnd = p.gotSameLine(SEMICOLON)
return
}
}
if s = p.gotStmtPipe(s); s == nil {
return
}
switch p.tok {
case LAND, LOR:
s = p.binaryCmdAndOr(s)
case AND:
p.next()
s.Background = true
gotEnd = true
}
if readEnd && p.gotSameLine(SEMICOLON) {
gotEnd = true
}
return
}
func (p *parser) gotStmtPipe(s *Stmt) *Stmt {
switch p.tok {
case LPAREN:
s.Cmd = p.subshell()
case LITWORD:
switch p.val {
case "}":
p.curErr("%s can only be used to close a block", p.val)
case "{":
s.Cmd = p.block()
case "if":
s.Cmd = p.ifClause()
case "while":
s.Cmd = p.whileClause()
case "until":
s.Cmd = p.untilClause()
case "for":
s.Cmd = p.forClause()
case "case":
s.Cmd = p.caseClause()
case "declare":
s.Cmd = p.declClause(false)
case "local":
s.Cmd = p.declClause(true)
case "eval":
s.Cmd = p.evalClause()
case "let":
s.Cmd = p.letClause()
case "function":
s.Cmd = p.bashFuncDecl()
default:
name := Lit{ValuePos: p.pos, Value: p.val}
w := p.getWord()
if p.gotSameLine(LPAREN) {
p.follow(name.ValuePos, "foo(", RPAREN)
s.Cmd = p.funcDecl(name, name.ValuePos)
} else {
s.Cmd = p.callExpr(s, w)
}
}
case LIT, DOLLBR, DOLLDP, DOLLPR, DOLLAR, CMDIN, CMDOUT,
SQUOTE, DOLLSQ, DQUOTE, DOLLDQ, BQUOTE:
w := p.getWord()
if p.gotSameLine(LPAREN) && p.err == nil {
p.posErr(w.Pos(), "invalid func name: %s", wordStr(p.f, w))
}
s.Cmd = p.callExpr(s, w)
}
for !p.newLine && p.peekRedir() {
p.doRedirect(s)
}
if s.Cmd == nil && len(s.Redirs) == 0 && !s.Negated && len(s.Assigns) == 0 {
return nil
}
if p.tok == OR || p.tok == PIPEALL {
s = p.binaryCmdPipe(s)
}
return s
}
func (p *parser) binaryCmdAndOr(left *Stmt) *Stmt {
b := &BinaryCmd{OpPos: p.pos, Op: p.tok, X: left}
p.next()
p.got(STOPPED)
if b.Y, _ = p.getStmt(false); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "a statement")
}
return &Stmt{Position: left.Position, Cmd: b}
}
func (p *parser) binaryCmdPipe(left *Stmt) *Stmt {
b := &BinaryCmd{OpPos: p.pos, Op: p.tok, X: left}
p.next()
p.got(STOPPED)
if b.Y = p.gotStmtPipe(&Stmt{Position: p.pos}); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "a statement")
}
return &Stmt{Position: left.Position, Cmd: b}
}
func (p *parser) subshell() *Subshell {
s := &Subshell{Lparen: p.pos}
old := p.quote
p.quote = RPAREN
p.next()
s.Stmts = p.stmts()
p.quote = old
s.Rparen = p.matched(s.Lparen, LPAREN, RPAREN)
if len(s.Stmts) == 0 {
p.posErr(s.Lparen, "a subshell must contain at least one statement")
}
return s
}
func (p *parser) block() *Block {
b := &Block{Lbrace: p.pos}
p.next()
b.Stmts = p.stmts("}")
b.Rbrace = p.pos
if !p.gotRsrv("}") {
p.posErr(b.Lbrace, `reached %s without matching word { with }`, p.tok)
}
return b
}
func (p *parser) ifClause() *IfClause {
ic := &IfClause{If: p.pos}
p.next()
ic.Cond = p.cond("if", ic.If, "then")
ic.Then = p.followRsrv(ic.If, "if [stmts]", "then")
ic.ThenStmts = p.followStmts("then", ic.Then, "fi", "elif", "else")
elifPos := p.pos
for p.gotRsrv("elif") {
elf := &Elif{Elif: elifPos}
elf.Cond = p.cond("elif", elf.Elif, "then")
elf.Then = p.followRsrv(elf.Elif, "elif [stmts]", "then")
elf.ThenStmts = p.followStmts("then", elf.Then, "fi", "elif", "else")
ic.Elifs = append(ic.Elifs, elf)
elifPos = p.pos
}
elsePos := p.pos
if p.gotRsrv("else") {
ic.Else = elsePos
ic.ElseStmts = p.followStmts("else", ic.Else, "fi")
}
ic.Fi = p.stmtEnd(ic, "if", "fi")
return ic
}
func (p *parser) cond(left string, lpos Pos, stop string) Cond {
if p.tok == LPAREN && p.npos < len(p.src) && p.src[p.npos] == '(' {
p.npos++
c := &CStyleCond{Lparen: p.pos}
old := p.quote
p.quote = DRPAREN
p.next()
c.X = p.arithmExpr(DLPAREN, c.Lparen)
c.Rparen = p.arithmEnd(c.Lparen, old)
p.gotSameLine(SEMICOLON)
return c
}
stmts := p.followStmts(left, lpos, stop)
if len(stmts) == 0 {
return nil
}
return &StmtCond{Stmts: stmts}
}
func (p *parser) whileClause() *WhileClause {
wc := &WhileClause{While: p.pos}
p.next()
wc.Cond = p.cond("while", wc.While, "do")
wc.Do = p.followRsrv(wc.While, "while [stmts]", "do")
wc.DoStmts = p.followStmts("do", wc.Do, "done")
wc.Done = p.stmtEnd(wc, "while", "done")
return wc
}
func (p *parser) untilClause() *UntilClause {
uc := &UntilClause{Until: p.pos}
p.next()
uc.Cond = p.cond("until", uc.Until, "do")
uc.Do = p.followRsrv(uc.Until, "until [stmts]", "do")
uc.DoStmts = p.followStmts("do", uc.Do, "done")
uc.Done = p.stmtEnd(uc, "until", "done")
return uc
}
func (p *parser) forClause() *ForClause {
fc := &ForClause{For: p.pos}
p.next()
fc.Loop = p.loop(fc.For)
fc.Do = p.followRsrv(fc.For, "for foo [in words]", "do")
fc.DoStmts = p.followStmts("do", fc.Do, "done")
fc.Done = p.stmtEnd(fc, "for", "done")
return fc
}
func (p *parser) loop(forPos Pos) Loop {
if p.tok == LPAREN && p.npos < len(p.src) && p.src[p.npos] == '(' {
p.npos++
cl := &CStyleLoop{Lparen: p.pos}
old := p.quote
p.quote = DRPAREN
p.next()
cl.Init = p.arithmExpr(DLPAREN, cl.Lparen)
scPos := p.pos
p.follow(p.pos, "expression", SEMICOLON)
cl.Cond = p.arithmExpr(SEMICOLON, scPos)
scPos = p.pos
p.follow(p.pos, "expression", SEMICOLON)
cl.Post = p.arithmExpr(SEMICOLON, scPos)
cl.Rparen = p.arithmEnd(cl.Lparen, old)
p.gotSameLine(SEMICOLON)
return cl
}
wi := &WordIter{}
if !p.gotLit(&wi.Name) {
p.followErr(forPos, "for", "a literal")
}
if p.gotRsrv("in") {
for !p.peekEnd() {
if w, ok := p.gotWord(); !ok {
p.curErr("word list can only contain words")
} else {
wi.List = append(wi.List, w)
}
}
p.gotSameLine(SEMICOLON)
} else if !p.gotSameLine(SEMICOLON) && !p.newLine {
p.followErr(forPos, "for foo", `"in", ; or a newline`)
}
return wi
}
func (p *parser) caseClause() *CaseClause {
cc := &CaseClause{Case: p.pos}
p.next()
cc.Word = p.followWord("case", cc.Case)
p.followRsrv(cc.Case, "case x", "in")
cc.List = p.patLists()
cc.Esac = p.stmtEnd(cc, "case", "esac")
return cc
}
func (p *parser) patLists() (pls []*PatternList) {
if p.gotSameLine(SEMICOLON) {
return
}
for p.tok != EOF && !(p.tok == LITWORD && p.val == "esac") {
pl := &PatternList{}
p.got(LPAREN)
for p.tok != EOF {
if w, ok := p.gotWord(); !ok {
p.curErr("case patterns must consist of words")
} else {
pl.Patterns = append(pl.Patterns, w)
}
if p.tok == RPAREN {
break
}
if !p.got(OR) {
p.curErr("case patterns must be separated with |")
}
}
old := p.quote
p.quote = DSEMICOLON
p.next()
pl.Stmts = p.stmts("esac")
p.quote = old
if !dsemicolon(p.tok) {
pl.Op, pl.OpPos = DSEMICOLON, p.pos
pls = append(pls, pl)
break
}
pl.Op, pl.OpPos = p.tok, p.pos
p.next()
pls = append(pls, pl)
}
return
}
func (p *parser) declClause(local bool) *DeclClause {
ds := &DeclClause{Declare: p.pos, Local: local}
p.next()
for p.tok == LITWORD && p.val[0] == '-' {
ds.Opts = append(ds.Opts, p.getWord())
}
for !p.peekStop() {
if as, ok := p.getAssign(); ok {
ds.Assigns = append(ds.Assigns, as)
} else if w, ok := p.gotWord(); !ok {
p.followErr(p.pos, "declare", "words")
} else {
ds.Assigns = append(ds.Assigns, &Assign{Value: w})
}
}
return ds
}
func (p *parser) evalClause() *EvalClause {
ec := &EvalClause{Eval: p.pos}
p.next()
ec.Stmt, _ = p.getStmt(false)
return ec
}
func (p *parser) letClause() *LetClause {
lc := &LetClause{Let: p.pos}
old := p.quote
p.quote = DLPAREN
p.next()
p.stopNewline = true
for !p.peekStop() && p.tok != STOPPED && !dsemicolon(p.tok) {
x := p.arithmExpr(LET, lc.Let)
if x == nil {
p.followErr(p.pos, "let", "arithmetic expressions")
}
lc.Exprs = append(lc.Exprs, x)
}
if len(lc.Exprs) == 0 {
p.posErr(lc.Let, "let clause requires at least one expression")
}
p.stopNewline = false
p.quote = old
p.got(STOPPED)
return lc
}
func (p *parser) bashFuncDecl() *FuncDecl {
fpos := p.pos
p.next()
if p.tok != LITWORD {
w := p.followWord("function", fpos)
if p.err == nil {
p.posErr(w.Pos(), "invalid func name: %s", wordStr(p.f, w))
}
}
name := Lit{ValuePos: p.pos, Value: p.val}
p.next()
if p.gotSameLine(LPAREN) {
p.follow(name.ValuePos, "foo(", RPAREN)
}
return p.funcDecl(name, fpos)
}
func (p *parser) callExpr(s *Stmt, w Word) *CallExpr {
ce := &CallExpr{Args: []Word{w}}
for !p.peekStop() {
switch p.tok {
case STOPPED:
p.next()
case LITWORD:
if p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<') {
p.doRedirect(s)
continue
}
fallthrough
case LIT, DOLLBR, DOLLDP, DOLLPR, DOLLAR, CMDIN, CMDOUT,
SQUOTE, DOLLSQ, DQUOTE, DOLLDQ, BQUOTE:
ce.Args = append(ce.Args, p.getWord())
case GTR, SHR, LSS, DPLIN, DPLOUT, RDRINOUT,
SHL, DHEREDOC, WHEREDOC, RDRALL, APPALL:
p.doRedirect(s)
default:
p.curErr("a command can only contain words and redirects")
}
}
return ce
}
func (p *parser) funcDecl(name Lit, pos Pos) *FuncDecl {
fd := &FuncDecl{
Position: pos,
BashStyle: pos != name.ValuePos,
Name: name,
}
if fd.Body, _ = p.getStmt(false); fd.Body == nil {
p.followErr(fd.Pos(), "foo()", "a statement")
}
return fd
}
parse: inline advanceTok/Both
These functions were trivial, and using the assignment directly is more
clear.
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package sh
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
)
// Mode controls the parser behaviour via a set of flags.
type Mode uint
const (
ParseComments Mode = 1 << iota // add comments to the AST
)
// Parse reads and parses a shell program with an optional name. It
// returns the parsed program if no issues were encountered. Otherwise,
// an error is returned.
//
// The type of src must be []byte, string or io.Reader.
func Parse(src interface{}, name string, mode Mode) (*File, error) {
p := parser{
f: &File{Name: name},
mode: mode,
}
if p.src, p.err = getSource(src); p.err != nil {
return nil, p.err
}
p.f.lines = make([]int, 1, 16)
p.next()
p.f.Stmts = p.stmts()
return p.f, p.err
}
func getSource(src interface{}) ([]byte, error) {
switch x := src.(type) {
case []byte:
return x, nil
case string:
return []byte(x), nil
case io.Reader:
return ioutil.ReadAll(x)
default:
return nil, fmt.Errorf("invalid src type: %T", src)
}
}
type parser struct {
src []byte
f *File
mode Mode
spaced, newLine bool
stopNewline, forbidNested bool
err error
tok Token
val string
buf [8]byte
pos Pos
npos int
quote Token
// list of pending heredoc bodies
heredocs []*Redirect
}
// bytes that form or start a token
func regOps(b byte) bool {
return b == ';' || b == '"' || b == '\'' || b == '(' ||
b == ')' || b == '$' || b == '|' || b == '&' ||
b == '>' || b == '<' || b == '`'
}
// tokenize these inside parameter expansions
func paramOps(b byte) bool {
return b == '}' || b == '#' || b == ':' || b == '-' ||
b == '+' || b == '=' || b == '?' || b == '%' ||
b == '[' || b == '/'
}
// tokenize these inside arithmetic expansions
func arithmOps(b byte) bool {
return b == '+' || b == '-' || b == '!' || b == '*' ||
b == '/' || b == '%' || b == '(' || b == ')' ||
b == '^' || b == '<' || b == '>' || b == ':' ||
b == '=' || b == ',' || b == '?' || b == '|' ||
b == '&'
}
func (p *parser) next() {
if p.tok == EOF {
return
}
if p.npos >= len(p.src) {
p.errPass(io.EOF)
return
}
b := p.src[p.npos]
if p.tok == STOPPED && b == '\n' {
p.npos++
p.f.lines = append(p.f.lines, p.npos)
p.doHeredocs()
if p.npos >= len(p.src) {
p.errPass(io.EOF)
return
}
b = p.src[p.npos]
p.spaced, p.newLine = true, true
} else {
p.spaced, p.newLine = false, false
}
q := p.quote
switch q {
case QUO:
p.pos = Pos(p.npos + 1)
switch b {
case '}':
p.npos++
p.tok = RBRACE
case '/':
p.npos++
p.tok = QUO
case '`', '"', '$':
p.tok = p.regToken(b)
default:
p.advanceLitOther(q)
}
return
case DQUOTE:
p.pos = Pos(p.npos + 1)
switch b {
case '`', '"', '$':
p.tok = p.dqToken(b)
case '\n':
p.advanceLitDquote()
default:
p.advanceLitDquote()
}
return
case RBRACE:
p.pos = Pos(p.npos + 1)
switch b {
case '}':
p.npos++
p.tok = RBRACE
case '`', '"', '$':
p.tok = p.regToken(b)
default:
p.advanceLitOther(q)
}
return
case SQUOTE:
p.pos = Pos(p.npos + 1)
if b == '\'' {
p.npos++
p.tok = SQUOTE
} else {
p.advanceLitOther(q)
}
return
}
skipSpace:
for {
switch b {
case ' ', '\t', '\r':
p.spaced = true
p.npos++
case '\n':
if p.stopNewline {
p.stopNewline = false
p.tok = STOPPED
return
}
p.spaced = true
if p.npos < len(p.src) {
p.npos++
}
p.f.lines = append(p.f.lines, p.npos)
p.newLine = true
case '\\':
if p.npos < len(p.src)-1 && p.src[p.npos+1] == '\n' {
p.npos += 2
p.f.lines = append(p.f.lines, p.npos)
} else {
break skipSpace
}
default:
break skipSpace
}
if p.npos >= len(p.src) {
p.errPass(io.EOF)
return
}
b = p.src[p.npos]
}
p.pos = Pos(p.npos + 1)
switch {
case q == ILLEGAL, q == RPAREN, q == BQUOTE, q == DSEMICOLON:
switch b {
case ';', '"', '\'', '(', ')', '$', '|', '&', '>', '<', '`':
p.tok = p.regToken(b)
case '#':
p.npos++
bs, _ := p.readUntil('\n')
p.npos += len(bs)
if p.mode&ParseComments > 0 {
p.f.Comments = append(p.f.Comments, &Comment{
Hash: p.pos,
Text: string(bs),
})
}
p.next()
default:
p.advanceLitNone()
}
case q == LBRACE && paramOps(b):
p.tok = p.paramToken(b)
case (q == DLPAREN || q == DRPAREN || q == LPAREN) && arithmOps(b):
p.tok = p.arithmToken(b)
case q == RBRACK && b == ']':
p.npos++
p.tok = RBRACK
case regOps(b):
p.tok = p.regToken(b)
default:
p.advanceLitOther(q)
}
}
func (p *parser) advanceLitOther(q Token) {
bs := p.buf[:0]
for {
if p.npos >= len(p.src) {
p.tok, p.val = LIT, string(bs)
return
}
b := p.src[p.npos]
switch {
case b == '\\': // escaped byte follows
if p.npos == len(p.src)-1 {
p.npos++
bs = append(bs, '\\')
p.tok, p.val = LIT, string(bs)
return
}
b = p.src[p.npos+1]
p.npos += 2
if b == '\n' {
p.f.lines = append(p.f.lines, p.npos)
} else {
bs = append(bs, '\\', b)
}
continue
case q == SQUOTE:
switch b {
case '\n':
p.f.lines = append(p.f.lines, p.npos+1)
case '\'':
p.tok, p.val = LIT, string(bs)
return
}
case b == '`', b == '$':
p.tok, p.val = LIT, string(bs)
return
case q == RBRACE:
if b == '}' || b == '"' {
p.tok, p.val = LIT, string(bs)
return
}
case q == LBRACE && paramOps(b), q == RBRACK && b == ']':
p.tok, p.val = LIT, string(bs)
return
case q == QUO:
if b == '/' || b == '}' {
p.tok, p.val = LIT, string(bs)
return
}
case wordBreak(b), regOps(b):
p.tok, p.val = LIT, string(bs)
return
case (q == DLPAREN || q == DRPAREN || q == LPAREN) && arithmOps(b):
p.tok, p.val = LIT, string(bs)
return
}
bs = append(bs, p.src[p.npos])
p.npos++
}
}
func (p *parser) advanceLitNone() {
bs := p.buf[:0]
for {
if p.npos >= len(p.src) {
p.tok, p.val = LITWORD, string(bs)
return
}
switch p.src[p.npos] {
case '\\': // escaped byte follows
if p.npos == len(p.src)-1 {
p.npos++
bs = append(bs, '\\')
p.tok, p.val = LIT, string(bs)
return
}
b := p.src[p.npos+1]
p.npos += 2
if b == '\n' {
p.f.lines = append(p.f.lines, p.npos)
} else {
bs = append(bs, '\\', b)
}
case ' ', '\t', '\n', '\r', '&', '>', '<', '|', ';', '(', ')', '`':
p.tok, p.val = LITWORD, string(bs)
return
case '"', '\'', '$':
p.tok, p.val = LIT, string(bs)
return
default:
bs = append(bs, p.src[p.npos])
p.npos++
}
}
}
func (p *parser) advanceLitDquote() {
var i int
loop:
for i = p.npos; i < len(p.src); i++ {
switch p.src[i] {
case '\\': // escaped byte follows
i++
if i == len(p.src) {
break loop
}
if p.src[i] == '\n' {
p.f.lines = append(p.f.lines, i+1)
}
case '`', '"', '$':
break loop
case '\n':
p.f.lines = append(p.f.lines, i+1)
}
}
bs := make([]byte, i-p.npos)
copy(bs, p.src[p.npos:i])
p.npos = i
p.tok, p.val = LIT, string(bs)
}
func (p *parser) readUntil(b byte) ([]byte, bool) {
rem := p.src[p.npos:]
i := bytes.IndexByte(rem, b)
if i < 0 {
bs := rem
return bs, false
}
return rem[:i], true
}
func (p *parser) doHeredocs() {
for _, r := range p.heredocs {
end := unquotedWordStr(p.f, &r.Word)
r.Hdoc.ValuePos = Pos(p.npos + 1)
r.Hdoc.Value, _ = p.readHdocBody(end, r.Op == DHEREDOC)
}
p.heredocs = nil
}
func (p *parser) readHdocBody(end string, noTabs bool) (string, bool) {
var buf bytes.Buffer
for p.npos < len(p.src) {
bs, found := p.readUntil('\n')
p.npos += len(bs) + 1
if found {
p.f.lines = append(p.f.lines, p.npos)
}
line := string(bs)
if line == end || (noTabs && strings.TrimLeft(line, "\t") == end) {
// add trailing tabs
buf.Write(bs[:len(bs)-len(end)])
return buf.String(), true
}
buf.Write(bs)
if found {
buf.WriteByte('\n')
}
}
return buf.String(), false
}
func wordBreak(b byte) bool {
return b == ' ' || b == '\t' || b == '\r' || b == '\n' ||
b == '&' || b == '>' || b == '<' || b == '|' ||
b == ';' || b == '(' || b == ')' || b == '`'
}
func (p *parser) got(tok Token) bool {
if p.tok == tok {
p.next()
return true
}
return false
}
func (p *parser) gotRsrv(val string) bool {
if p.tok == LITWORD && p.val == val {
p.next()
return true
}
return false
}
func (p *parser) gotSameLine(tok Token) bool {
if !p.newLine && p.tok == tok {
p.next()
return true
}
return false
}
func readableStr(s string) string {
// don't quote tokens like & or }
if s[0] >= 'a' && s[0] <= 'z' {
return strconv.Quote(s)
}
return s
}
func (p *parser) followErr(pos Pos, left, right string) {
leftStr := readableStr(left)
p.posErr(pos, "%s must be followed by %s", leftStr, right)
}
func (p *parser) follow(lpos Pos, left string, tok Token) Pos {
pos := p.pos
if !p.got(tok) {
p.followErr(lpos, left, fmt.Sprintf(`%q`, tok))
}
return pos
}
func (p *parser) followRsrv(lpos Pos, left, val string) Pos {
pos := p.pos
if !p.gotRsrv(val) {
p.followErr(lpos, left, fmt.Sprintf(`%q`, val))
}
return pos
}
func (p *parser) followStmts(left string, lpos Pos, stops ...string) []*Stmt {
if p.gotSameLine(SEMICOLON) {
return nil
}
sts := p.stmts(stops...)
if len(sts) < 1 && !p.newLine {
p.followErr(lpos, left, "a statement list")
}
return sts
}
func (p *parser) followWordTok(tok Token, pos Pos) Word {
w, ok := p.gotWord()
if !ok {
p.followErr(pos, tok.String(), "a word")
}
return w
}
func (p *parser) followWord(s string, pos Pos) Word {
w, ok := p.gotWord()
if !ok {
p.followErr(pos, s, "a word")
}
return w
}
func (p *parser) stmtEnd(n Node, start, end string) Pos {
pos := p.pos
if !p.gotRsrv(end) {
p.posErr(n.Pos(), `%s statement must end with %q`, start, end)
}
return pos
}
func (p *parser) quoteErr(lpos Pos, quote Token) {
p.posErr(lpos, `reached %s without closing quote %s`, p.tok, quote)
}
func (p *parser) matchingErr(lpos Pos, left, right Token) {
p.posErr(lpos, `reached %s without matching token %s with %s`,
p.tok, left, right)
}
func (p *parser) matched(lpos Pos, left, right Token) Pos {
pos := p.pos
if !p.got(right) {
p.matchingErr(lpos, left, right)
}
return pos
}
func (p *parser) errPass(err error) {
if p.err == nil {
if err != io.EOF {
p.err = err
}
p.tok = EOF
}
}
// ParseError represents an error found when parsing a source file.
type ParseError struct {
Position
Filename, Text string
}
func (e *ParseError) Error() string {
prefix := ""
if e.Filename != "" {
prefix = e.Filename + ":"
}
return fmt.Sprintf("%s%d:%d: %s", prefix, e.Line, e.Column, e.Text)
}
func (p *parser) posErr(pos Pos, format string, a ...interface{}) {
p.errPass(&ParseError{
Position: p.f.Position(pos),
Filename: p.f.Name,
Text: fmt.Sprintf(format, a...),
})
}
func (p *parser) curErr(format string, a ...interface{}) {
p.posErr(p.pos, format, a...)
}
func dsemicolon(t Token) bool {
return t == DSEMICOLON || t == SEMIFALL || t == DSEMIFALL
}
func (p *parser) stmts(stops ...string) (sts []*Stmt) {
if p.forbidNested {
p.curErr("nested statements not allowed in this word")
}
q := p.quote
gotEnd := true
for p.tok != EOF {
switch p.tok {
case LITWORD:
for _, stop := range stops {
if p.val == stop {
return
}
}
case q:
return
case SEMIFALL, DSEMIFALL:
if q == DSEMICOLON {
return
}
}
if !p.newLine && !gotEnd {
p.curErr("statements must be separated by &, ; or a newline")
}
if p.tok == EOF {
break
}
if s, end := p.getStmt(true); s == nil {
p.invalidStmtStart()
} else {
sts = append(sts, s)
gotEnd = end
}
p.got(STOPPED)
}
return
}
func (p *parser) invalidStmtStart() {
switch p.tok {
case SEMICOLON, AND, OR, LAND, LOR:
p.curErr("%s can only immediately follow a statement", p.tok)
case RPAREN:
p.curErr("%s can only be used to close a subshell", p.tok)
default:
p.curErr("%s is not a valid start for a statement", p.tok)
}
}
func (p *parser) getWord() (w Word) {
if p.tok == LITWORD {
w.Parts = append(w.Parts, &Lit{ValuePos: p.pos, Value: p.val})
p.next()
} else {
w.Parts = p.wordParts()
}
return
}
func (p *parser) gotWord() (Word, bool) {
w := p.getWord()
return w, len(w.Parts) > 0
}
func (p *parser) gotLit(l *Lit) bool {
l.ValuePos = p.pos
if p.tok == LIT || p.tok == LITWORD {
l.Value = p.val
p.next()
return true
}
return false
}
func (p *parser) wordParts() (wps []WordPart) {
for {
n := p.wordPart()
if n == nil {
return
}
wps = append(wps, n)
if p.spaced {
return
}
}
}
func (p *parser) wordPart() WordPart {
switch p.tok {
case LIT, LITWORD:
l := &Lit{ValuePos: p.pos, Value: p.val}
p.next()
return l
case p.quote:
return nil
case DOLLBR:
return p.paramExp()
case DOLLDP:
ar := &ArithmExp{Dollar: p.pos}
old := p.quote
p.quote = DRPAREN
p.next()
ar.X = p.arithmExpr(DOLLDP, ar.Dollar)
ar.Rparen = p.arithmEnd(ar.Dollar, old)
return ar
case DOLLPR:
cs := &CmdSubst{Left: p.pos}
old := p.quote
p.quote = RPAREN
p.next()
cs.Stmts = p.stmts()
p.quote = old
cs.Right = p.matched(cs.Left, LPAREN, RPAREN)
return cs
case DOLLAR:
var b byte
if p.npos >= len(p.src) {
p.errPass(io.EOF)
} else {
b = p.src[p.npos]
}
if p.tok == EOF || wordBreak(b) || b == '"' {
l := &Lit{ValuePos: p.pos, Value: "$"}
p.next()
return l
}
pe := &ParamExp{Dollar: p.pos, Short: true}
if b == '#' || b == '$' || b == '?' {
p.npos++
p.pos++
p.tok, p.val = LIT, string(b)
} else {
p.next()
}
p.gotLit(&pe.Param)
return pe
case CMDIN, CMDOUT:
ps := &ProcSubst{Op: p.tok, OpPos: p.pos}
old := p.quote
p.quote = RPAREN
p.next()
ps.Stmts = p.stmts()
p.quote = old
ps.Rparen = p.matched(ps.OpPos, ps.Op, RPAREN)
return ps
case SQUOTE:
sq := &SglQuoted{Quote: p.pos}
bs, found := p.readUntil('\'')
rem := bs
for {
i := bytes.IndexByte(rem, '\n')
if i < 0 {
p.npos += len(rem)
break
}
p.npos += i + 1
p.f.lines = append(p.f.lines, p.npos)
rem = rem[i+1:]
}
p.npos++
if !found {
p.posErr(sq.Pos(), `reached EOF without closing quote %s`, SQUOTE)
}
sq.Value = string(bs)
p.next()
return sq
case DOLLSQ, DQUOTE, DOLLDQ:
q := &Quoted{Quote: p.tok, QuotePos: p.pos}
stop := quotedStop(q.Quote)
old := p.quote
p.quote = stop
p.next()
q.Parts = p.wordParts()
p.quote = old
if !p.got(stop) {
p.quoteErr(q.Pos(), stop)
}
return q
case BQUOTE:
cs := &CmdSubst{Backquotes: true, Left: p.pos}
old := p.quote
p.quote = BQUOTE
p.next()
cs.Stmts = p.stmts()
p.quote = old
cs.Right = p.pos
if !p.got(BQUOTE) {
p.quoteErr(cs.Pos(), BQUOTE)
}
return cs
}
return nil
}
func quotedStop(start Token) Token {
switch start {
case DOLLSQ:
return SQUOTE
case DOLLDQ:
return DQUOTE
}
return start
}
func (p *parser) arithmExpr(ftok Token, fpos Pos) ArithmExpr {
if p.tok == EOF || p.peekArithmEnd() {
return nil
}
left := p.arithmExprBase(ftok, fpos)
q := p.quote
if q != DRPAREN && q != LPAREN && p.spaced {
return left
}
switch p.tok {
case EOF, STOPPED, RPAREN, SEMICOLON, DSEMICOLON, SEMIFALL, DSEMIFALL:
return left
case LIT, LITWORD:
p.curErr("not a valid arithmetic operator: %s", p.val)
}
b := &BinaryExpr{
OpPos: p.pos,
Op: p.tok,
X: left,
}
p.next()
if q != DRPAREN && q != LPAREN && p.spaced {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
if b.Y = p.arithmExpr(b.Op, b.OpPos); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "an expression")
}
return b
}
func (p *parser) arithmExprBase(ftok Token, fpos Pos) ArithmExpr {
if p.tok == INC || p.tok == DEC || p.tok == NOT {
pre := &UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
pre.X = p.arithmExprBase(pre.Op, pre.OpPos)
return pre
}
var x ArithmExpr
q := p.quote
switch p.tok {
case LPAREN:
pe := &ParenExpr{Lparen: p.pos}
old := p.quote
p.quote = LPAREN
p.next()
pe.X = p.arithmExpr(LPAREN, pe.Lparen)
if pe.X == nil {
p.posErr(pe.Lparen, "parentheses must enclose an expression")
}
p.quote = old
pe.Rparen = p.matched(pe.Lparen, LPAREN, RPAREN)
x = pe
case ADD, SUB:
ue := &UnaryExpr{OpPos: p.pos, Op: p.tok}
p.next()
if q != DRPAREN && q != LPAREN && p.spaced {
p.followErr(ue.OpPos, ue.Op.String(), "an expression")
}
ue.X = p.arithmExpr(ue.Op, ue.OpPos)
if ue.X == nil {
p.followErr(ue.OpPos, ue.Op.String(), "an expression")
}
x = ue
default:
w := p.followWordTok(ftok, fpos)
x = &w
}
if q != DRPAREN && q != LPAREN && p.spaced {
return x
}
if p.tok == INC || p.tok == DEC {
u := &UnaryExpr{
Post: true,
OpPos: p.pos,
Op: p.tok,
X: x,
}
p.next()
return u
}
return x
}
func (p *parser) gotParamLit(l *Lit) bool {
switch p.tok {
case LIT, LITWORD:
l.ValuePos, l.Value = p.pos, p.val
case DOLLAR:
l.ValuePos, l.Value = p.pos, "$"
case QUEST:
l.ValuePos, l.Value = p.pos, "?"
default:
l.ValuePos = p.pos
return false
}
p.next()
return true
}
func (p *parser) paramExp() *ParamExp {
pe := &ParamExp{Dollar: p.pos}
old := p.quote
p.quote = LBRACE
p.next()
pe.Length = p.got(HASH)
if !p.gotParamLit(&pe.Param) && !pe.Length {
p.posErr(pe.Dollar, "parameter expansion requires a literal")
}
if p.tok == RBRACE {
p.quote = old
p.next()
return pe
}
if p.tok == LBRACK {
lpos := p.pos
p.quote = RBRACK
p.next()
pe.Ind = &Index{Word: p.getWord()}
p.quote = LBRACE
p.matched(lpos, LBRACK, RBRACK)
}
if p.tok == RBRACE {
p.quote = old
p.next()
return pe
}
if pe.Length {
p.curErr(`can only get length of a simple parameter`)
}
if p.tok == QUO || p.tok == DQUO {
pe.Repl = &Replace{All: p.tok == DQUO}
p.quote = QUO
p.next()
pe.Repl.Orig = p.getWord()
if p.tok == QUO {
p.quote = RBRACE
p.next()
pe.Repl.With = p.getWord()
}
} else {
pe.Exp = &Expansion{Op: p.tok}
p.quote = RBRACE
p.next()
pe.Exp.Word = p.getWord()
}
p.quote = old
p.matched(pe.Dollar, DOLLBR, RBRACE)
return pe
}
func (p *parser) peekArithmEnd() bool {
return p.tok == RPAREN && p.npos < len(p.src) && p.src[p.npos] == ')'
}
func (p *parser) arithmEnd(left Pos, old Token) Pos {
if p.peekArithmEnd() {
p.npos++
} else {
p.matchingErr(left, DLPAREN, DRPAREN)
}
p.quote = old
pos := p.pos
p.next()
return pos
}
func (p *parser) peekEnd() bool {
return p.tok == EOF || p.newLine || p.tok == SEMICOLON
}
func (p *parser) peekStop() bool {
return p.peekEnd() || p.tok == AND || p.tok == OR ||
p.tok == LAND || p.tok == LOR || p.tok == PIPEALL ||
p.tok == p.quote || (p.quote == DSEMICOLON && dsemicolon(p.tok))
}
var identRe = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)
func assignSplit(s string) int {
i := strings.Index(s, "=")
if i <= 0 {
return -1
}
if s[i-1] == '+' {
i--
}
if !identRe.MatchString(s[:i]) {
return -1
}
return i
}
func (p *parser) getAssign() (*Assign, bool) {
i := assignSplit(p.val)
if i < 0 {
return nil, false
}
as := &Assign{}
as.Name = &Lit{ValuePos: p.pos, Value: p.val[:i]}
if p.val[i] == '+' {
as.Append = true
i++
}
start := &Lit{ValuePos: p.pos + 1, Value: p.val[i+1:]}
if start.Value != "" {
start.ValuePos += Pos(i)
as.Value.Parts = append(as.Value.Parts, start)
}
p.next()
if p.spaced {
return as, true
}
if start.Value == "" && p.tok == LPAREN {
ae := &ArrayExpr{Lparen: p.pos}
p.next()
for p.tok != EOF && p.tok != RPAREN {
if w, ok := p.gotWord(); !ok {
p.curErr("array elements must be words")
} else {
ae.List = append(ae.List, w)
}
}
ae.Rparen = p.matched(ae.Lparen, LPAREN, RPAREN)
as.Value.Parts = append(as.Value.Parts, ae)
} else if !p.peekStop() {
w := p.getWord()
if start.Value == "" {
as.Value = w
} else {
as.Value.Parts = append(as.Value.Parts, w.Parts...)
}
}
return as, true
}
func (p *parser) peekRedir() bool {
switch p.tok {
case LITWORD:
return p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<')
case GTR, SHR, LSS, DPLIN, DPLOUT, RDRINOUT,
SHL, DHEREDOC, WHEREDOC, RDRALL, APPALL:
return true
}
return false
}
func (p *parser) doRedirect(s *Stmt) {
r := &Redirect{}
var l Lit
if p.gotLit(&l) {
r.N = &l
}
r.Op, r.OpPos = p.tok, p.pos
p.next()
switch r.Op {
case SHL, DHEREDOC:
p.stopNewline = true
p.forbidNested = true
r.Word = p.followWordTok(r.Op, r.OpPos)
p.forbidNested = false
r.Hdoc = &Lit{}
p.heredocs = append(p.heredocs, r)
p.got(STOPPED)
default:
r.Word = p.followWordTok(r.Op, r.OpPos)
}
s.Redirs = append(s.Redirs, r)
}
func (p *parser) getStmt(readEnd bool) (s *Stmt, gotEnd bool) {
s = &Stmt{Position: p.pos}
if p.gotRsrv("!") {
s.Negated = true
}
preLoop:
for {
switch p.tok {
case LIT, LITWORD:
if as, ok := p.getAssign(); ok {
s.Assigns = append(s.Assigns, as)
} else if p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<') {
p.doRedirect(s)
} else {
break preLoop
}
case GTR, SHR, LSS, DPLIN, DPLOUT, RDRINOUT,
SHL, DHEREDOC, WHEREDOC, RDRALL, APPALL:
p.doRedirect(s)
default:
break preLoop
}
if p.peekEnd() {
gotEnd = p.gotSameLine(SEMICOLON)
return
}
}
if s = p.gotStmtPipe(s); s == nil {
return
}
switch p.tok {
case LAND, LOR:
s = p.binaryCmdAndOr(s)
case AND:
p.next()
s.Background = true
gotEnd = true
}
if readEnd && p.gotSameLine(SEMICOLON) {
gotEnd = true
}
return
}
func (p *parser) gotStmtPipe(s *Stmt) *Stmt {
switch p.tok {
case LPAREN:
s.Cmd = p.subshell()
case LITWORD:
switch p.val {
case "}":
p.curErr("%s can only be used to close a block", p.val)
case "{":
s.Cmd = p.block()
case "if":
s.Cmd = p.ifClause()
case "while":
s.Cmd = p.whileClause()
case "until":
s.Cmd = p.untilClause()
case "for":
s.Cmd = p.forClause()
case "case":
s.Cmd = p.caseClause()
case "declare":
s.Cmd = p.declClause(false)
case "local":
s.Cmd = p.declClause(true)
case "eval":
s.Cmd = p.evalClause()
case "let":
s.Cmd = p.letClause()
case "function":
s.Cmd = p.bashFuncDecl()
default:
name := Lit{ValuePos: p.pos, Value: p.val}
w := p.getWord()
if p.gotSameLine(LPAREN) {
p.follow(name.ValuePos, "foo(", RPAREN)
s.Cmd = p.funcDecl(name, name.ValuePos)
} else {
s.Cmd = p.callExpr(s, w)
}
}
case LIT, DOLLBR, DOLLDP, DOLLPR, DOLLAR, CMDIN, CMDOUT,
SQUOTE, DOLLSQ, DQUOTE, DOLLDQ, BQUOTE:
w := p.getWord()
if p.gotSameLine(LPAREN) && p.err == nil {
p.posErr(w.Pos(), "invalid func name: %s", wordStr(p.f, w))
}
s.Cmd = p.callExpr(s, w)
}
for !p.newLine && p.peekRedir() {
p.doRedirect(s)
}
if s.Cmd == nil && len(s.Redirs) == 0 && !s.Negated && len(s.Assigns) == 0 {
return nil
}
if p.tok == OR || p.tok == PIPEALL {
s = p.binaryCmdPipe(s)
}
return s
}
func (p *parser) binaryCmdAndOr(left *Stmt) *Stmt {
b := &BinaryCmd{OpPos: p.pos, Op: p.tok, X: left}
p.next()
p.got(STOPPED)
if b.Y, _ = p.getStmt(false); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "a statement")
}
return &Stmt{Position: left.Position, Cmd: b}
}
func (p *parser) binaryCmdPipe(left *Stmt) *Stmt {
b := &BinaryCmd{OpPos: p.pos, Op: p.tok, X: left}
p.next()
p.got(STOPPED)
if b.Y = p.gotStmtPipe(&Stmt{Position: p.pos}); b.Y == nil {
p.followErr(b.OpPos, b.Op.String(), "a statement")
}
return &Stmt{Position: left.Position, Cmd: b}
}
func (p *parser) subshell() *Subshell {
s := &Subshell{Lparen: p.pos}
old := p.quote
p.quote = RPAREN
p.next()
s.Stmts = p.stmts()
p.quote = old
s.Rparen = p.matched(s.Lparen, LPAREN, RPAREN)
if len(s.Stmts) == 0 {
p.posErr(s.Lparen, "a subshell must contain at least one statement")
}
return s
}
func (p *parser) block() *Block {
b := &Block{Lbrace: p.pos}
p.next()
b.Stmts = p.stmts("}")
b.Rbrace = p.pos
if !p.gotRsrv("}") {
p.posErr(b.Lbrace, `reached %s without matching word { with }`, p.tok)
}
return b
}
func (p *parser) ifClause() *IfClause {
ic := &IfClause{If: p.pos}
p.next()
ic.Cond = p.cond("if", ic.If, "then")
ic.Then = p.followRsrv(ic.If, "if [stmts]", "then")
ic.ThenStmts = p.followStmts("then", ic.Then, "fi", "elif", "else")
elifPos := p.pos
for p.gotRsrv("elif") {
elf := &Elif{Elif: elifPos}
elf.Cond = p.cond("elif", elf.Elif, "then")
elf.Then = p.followRsrv(elf.Elif, "elif [stmts]", "then")
elf.ThenStmts = p.followStmts("then", elf.Then, "fi", "elif", "else")
ic.Elifs = append(ic.Elifs, elf)
elifPos = p.pos
}
elsePos := p.pos
if p.gotRsrv("else") {
ic.Else = elsePos
ic.ElseStmts = p.followStmts("else", ic.Else, "fi")
}
ic.Fi = p.stmtEnd(ic, "if", "fi")
return ic
}
func (p *parser) cond(left string, lpos Pos, stop string) Cond {
if p.tok == LPAREN && p.npos < len(p.src) && p.src[p.npos] == '(' {
p.npos++
c := &CStyleCond{Lparen: p.pos}
old := p.quote
p.quote = DRPAREN
p.next()
c.X = p.arithmExpr(DLPAREN, c.Lparen)
c.Rparen = p.arithmEnd(c.Lparen, old)
p.gotSameLine(SEMICOLON)
return c
}
stmts := p.followStmts(left, lpos, stop)
if len(stmts) == 0 {
return nil
}
return &StmtCond{Stmts: stmts}
}
func (p *parser) whileClause() *WhileClause {
wc := &WhileClause{While: p.pos}
p.next()
wc.Cond = p.cond("while", wc.While, "do")
wc.Do = p.followRsrv(wc.While, "while [stmts]", "do")
wc.DoStmts = p.followStmts("do", wc.Do, "done")
wc.Done = p.stmtEnd(wc, "while", "done")
return wc
}
func (p *parser) untilClause() *UntilClause {
uc := &UntilClause{Until: p.pos}
p.next()
uc.Cond = p.cond("until", uc.Until, "do")
uc.Do = p.followRsrv(uc.Until, "until [stmts]", "do")
uc.DoStmts = p.followStmts("do", uc.Do, "done")
uc.Done = p.stmtEnd(uc, "until", "done")
return uc
}
func (p *parser) forClause() *ForClause {
fc := &ForClause{For: p.pos}
p.next()
fc.Loop = p.loop(fc.For)
fc.Do = p.followRsrv(fc.For, "for foo [in words]", "do")
fc.DoStmts = p.followStmts("do", fc.Do, "done")
fc.Done = p.stmtEnd(fc, "for", "done")
return fc
}
func (p *parser) loop(forPos Pos) Loop {
if p.tok == LPAREN && p.npos < len(p.src) && p.src[p.npos] == '(' {
p.npos++
cl := &CStyleLoop{Lparen: p.pos}
old := p.quote
p.quote = DRPAREN
p.next()
cl.Init = p.arithmExpr(DLPAREN, cl.Lparen)
scPos := p.pos
p.follow(p.pos, "expression", SEMICOLON)
cl.Cond = p.arithmExpr(SEMICOLON, scPos)
scPos = p.pos
p.follow(p.pos, "expression", SEMICOLON)
cl.Post = p.arithmExpr(SEMICOLON, scPos)
cl.Rparen = p.arithmEnd(cl.Lparen, old)
p.gotSameLine(SEMICOLON)
return cl
}
wi := &WordIter{}
if !p.gotLit(&wi.Name) {
p.followErr(forPos, "for", "a literal")
}
if p.gotRsrv("in") {
for !p.peekEnd() {
if w, ok := p.gotWord(); !ok {
p.curErr("word list can only contain words")
} else {
wi.List = append(wi.List, w)
}
}
p.gotSameLine(SEMICOLON)
} else if !p.gotSameLine(SEMICOLON) && !p.newLine {
p.followErr(forPos, "for foo", `"in", ; or a newline`)
}
return wi
}
func (p *parser) caseClause() *CaseClause {
cc := &CaseClause{Case: p.pos}
p.next()
cc.Word = p.followWord("case", cc.Case)
p.followRsrv(cc.Case, "case x", "in")
cc.List = p.patLists()
cc.Esac = p.stmtEnd(cc, "case", "esac")
return cc
}
func (p *parser) patLists() (pls []*PatternList) {
if p.gotSameLine(SEMICOLON) {
return
}
for p.tok != EOF && !(p.tok == LITWORD && p.val == "esac") {
pl := &PatternList{}
p.got(LPAREN)
for p.tok != EOF {
if w, ok := p.gotWord(); !ok {
p.curErr("case patterns must consist of words")
} else {
pl.Patterns = append(pl.Patterns, w)
}
if p.tok == RPAREN {
break
}
if !p.got(OR) {
p.curErr("case patterns must be separated with |")
}
}
old := p.quote
p.quote = DSEMICOLON
p.next()
pl.Stmts = p.stmts("esac")
p.quote = old
if !dsemicolon(p.tok) {
pl.Op, pl.OpPos = DSEMICOLON, p.pos
pls = append(pls, pl)
break
}
pl.Op, pl.OpPos = p.tok, p.pos
p.next()
pls = append(pls, pl)
}
return
}
func (p *parser) declClause(local bool) *DeclClause {
ds := &DeclClause{Declare: p.pos, Local: local}
p.next()
for p.tok == LITWORD && p.val[0] == '-' {
ds.Opts = append(ds.Opts, p.getWord())
}
for !p.peekStop() {
if as, ok := p.getAssign(); ok {
ds.Assigns = append(ds.Assigns, as)
} else if w, ok := p.gotWord(); !ok {
p.followErr(p.pos, "declare", "words")
} else {
ds.Assigns = append(ds.Assigns, &Assign{Value: w})
}
}
return ds
}
func (p *parser) evalClause() *EvalClause {
ec := &EvalClause{Eval: p.pos}
p.next()
ec.Stmt, _ = p.getStmt(false)
return ec
}
func (p *parser) letClause() *LetClause {
lc := &LetClause{Let: p.pos}
old := p.quote
p.quote = DLPAREN
p.next()
p.stopNewline = true
for !p.peekStop() && p.tok != STOPPED && !dsemicolon(p.tok) {
x := p.arithmExpr(LET, lc.Let)
if x == nil {
p.followErr(p.pos, "let", "arithmetic expressions")
}
lc.Exprs = append(lc.Exprs, x)
}
if len(lc.Exprs) == 0 {
p.posErr(lc.Let, "let clause requires at least one expression")
}
p.stopNewline = false
p.quote = old
p.got(STOPPED)
return lc
}
func (p *parser) bashFuncDecl() *FuncDecl {
fpos := p.pos
p.next()
if p.tok != LITWORD {
w := p.followWord("function", fpos)
if p.err == nil {
p.posErr(w.Pos(), "invalid func name: %s", wordStr(p.f, w))
}
}
name := Lit{ValuePos: p.pos, Value: p.val}
p.next()
if p.gotSameLine(LPAREN) {
p.follow(name.ValuePos, "foo(", RPAREN)
}
return p.funcDecl(name, fpos)
}
func (p *parser) callExpr(s *Stmt, w Word) *CallExpr {
ce := &CallExpr{Args: []Word{w}}
for !p.peekStop() {
switch p.tok {
case STOPPED:
p.next()
case LITWORD:
if p.npos < len(p.src) && (p.src[p.npos] == '>' || p.src[p.npos] == '<') {
p.doRedirect(s)
continue
}
fallthrough
case LIT, DOLLBR, DOLLDP, DOLLPR, DOLLAR, CMDIN, CMDOUT,
SQUOTE, DOLLSQ, DQUOTE, DOLLDQ, BQUOTE:
ce.Args = append(ce.Args, p.getWord())
case GTR, SHR, LSS, DPLIN, DPLOUT, RDRINOUT,
SHL, DHEREDOC, WHEREDOC, RDRALL, APPALL:
p.doRedirect(s)
default:
p.curErr("a command can only contain words and redirects")
}
}
return ce
}
func (p *parser) funcDecl(name Lit, pos Pos) *FuncDecl {
fd := &FuncDecl{
Position: pos,
BashStyle: pos != name.ValuePos,
Name: name,
}
if fd.Body, _ = p.getStmt(false); fd.Body == nil {
p.followErr(fd.Pos(), "foo()", "a statement")
}
return fd
}
|
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package sh
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"strconv"
"strings"
)
// Parse reads and parses a shell program with an optional name. It
// returns the parsed program if no issues were encountered. Otherwise,
// an error is returned.
func Parse(r io.Reader, name string) (File, error) {
p := &parser{
br: bufio.NewReader(r),
file: File{
Name: name,
},
npos: Pos{
Line: 1,
Column: 1,
},
}
p.next()
p.file.Stmts = p.stmts()
return p.file, p.err
}
type parser struct {
br *bufio.Reader
file File
err error
spaced, newLine bool
ltok, tok Token
lval, val string
lpos, pos, npos Pos
// stack of stop tokens
stops []Token
stopNewline bool
heredocs []*Word
}
func (p *parser) enterStops(stops ...Token) {
p.stops = append(p.stops, stops...)
p.next()
}
func (p *parser) quoted(tok Token) bool {
return len(p.stops) > 0 && p.stops[len(p.stops)-1] == tok
}
func (p *parser) popStops(n int) { p.stops = p.stops[:len(p.stops)-n] }
func (p *parser) popStop() { p.popStops(1) }
func (p *parser) readByte() (byte, error) {
b, err := p.br.ReadByte()
if err != nil {
p.errPass(err)
return 0, err
}
p.moveWith(b)
return b, nil
}
func (p *parser) consumeByte() { p.readByte() }
func (p *parser) moveWith(b byte) {
if b == '\n' {
p.npos.Line++
p.npos.Column = 1
} else {
p.npos.Column++
}
}
func (p *parser) peekByte() (byte, error) {
bs, err := p.br.Peek(1)
if err != nil {
return 0, err
}
return bs[0], nil
}
func (p *parser) peekString(s string) bool {
bs, err := p.br.Peek(len(s))
return err == nil && string(bs) == s
}
func (p *parser) peekAnyByte(bs ...byte) bool {
peek, err := p.br.Peek(1)
if err != nil {
return false
}
return bytes.IndexByte(bs, peek[0]) >= 0
}
func (p *parser) readOnly(s string) bool {
if p.peekString(s) {
for i := 0; i < len(s); i++ {
p.consumeByte()
}
return true
}
return false
}
var (
// bytes that form or start a token
reserved = map[byte]bool{
'&': true,
'>': true,
'<': true,
'|': true,
';': true,
'(': true,
')': true,
'$': true,
'"': true,
'\'': true,
'`': true,
}
// subset of the above that mark the end of a word
wordBreak = map[byte]bool{
'&': true,
'>': true,
'<': true,
'|': true,
';': true,
'(': true,
')': true,
}
// tokenize these inside parameter expansions
paramOps = map[byte]bool{
'}': true,
'#': true,
':': true,
'-': true,
'+': true,
'=': true,
'?': true,
'%': true,
}
// tokenize these inside arithmetic expansions
arithmOps = map[byte]bool{
'+': true,
'-': true,
'!': true,
'*': true,
'/': true,
'%': true,
'^': true,
'<': true,
'>': true,
':': true,
'=': true,
}
// bytes that will be treated as space
space = map[byte]bool{
' ': true,
'\t': true,
'\n': true,
}
)
func (p *parser) next() {
if p.tok == EOF {
return
}
p.lpos, p.pos = p.pos, p.npos
p.spaced, p.newLine = false, false
var b byte
for {
if p.readOnly("\\\n") {
continue
}
var err error
if b, err = p.peekByte(); err != nil {
p.errPass(err)
return
}
if p.stopNewline && b == '\n' {
p.advanceTok(STOPPED)
return
}
if p.quoted('"') || !space[b] {
break
}
p.consumeByte()
p.pos = p.npos
p.spaced = true
if b == '\n' {
p.newLine = true
if len(p.heredocs) > 0 {
p.doHeredocs()
return
}
}
}
switch {
case p.quoted(RBRACE) && b == '}', p.quoted(LBRACE) && paramOps[b]:
if p.readOnly("}") {
// '}' is a token only in this context
p.advanceTok(RBRACE)
} else {
p.advanceTok(p.doToken(b))
}
case b == '#' && !p.quoted('"'):
p.advanceBoth(COMMENT, p.readLine())
case reserved[b]:
// Between double quotes, only under certain
// circumstnaces do we tokenize
if p.quoted('"') {
switch {
case b == '`', b == '"', b == '$', p.tok == DOLLAR:
default:
p.advanceReadLit()
return
}
}
fallthrough
case p.quoted(DRPAREN) && arithmOps[b]:
p.advanceTok(p.doToken(b))
default:
p.advanceReadLit()
}
}
func (p *parser) advanceReadLit() { p.advanceBoth(LIT, string(p.readLitBytes())) }
func (p *parser) readLitBytes() (bs []byte) {
for {
if p.readOnly("\\") { // escaped byte
if b, _ := p.readByte(); b != '\n' {
bs = append(bs, '\\', b)
}
continue
}
b, err := p.peekByte()
if err != nil {
return
}
switch {
case b == '$', b == '`':
return
case p.quoted(RBRACE) && b == '}':
return
case p.quoted(LBRACE) && paramOps[b]:
return
case p.quoted('"'):
if b == '"' {
return
}
case reserved[b], space[b]:
return
case p.quoted(DRPAREN) && arithmOps[b]:
return
}
p.consumeByte()
bs = append(bs, b)
}
}
func (p *parser) advanceTok(tok Token) { p.advanceBoth(tok, tok.String()) }
func (p *parser) advanceBoth(tok Token, val string) {
if p.tok != EOF {
p.ltok = p.tok
p.lval = p.val
}
p.tok = tok
p.val = val
}
func (p *parser) readUntil(s string) (string, bool) {
var bs []byte
for {
if p.peekString(s) {
return string(bs), true
}
b, err := p.readByte()
if err != nil {
return string(bs), false
}
bs = append(bs, b)
}
}
func (p *parser) readLine() string {
s, _ := p.readUntil("\n")
return s
}
func (p *parser) doHeredocs() {
for i, w := range p.heredocs {
endLine := unquote(*w).String()
if i > 0 {
p.readOnly("\n")
}
s, _ := p.readHeredocContent(endLine)
w.Parts[0] = Lit{
ValuePos: w.Pos(),
Value: fmt.Sprintf("%s\n%s", w, s),
}
w.Parts = w.Parts[:1]
}
p.heredocs = nil
p.next()
}
func (p *parser) readHeredocContent(endLine string) (string, bool) {
var buf bytes.Buffer
for !p.eof() {
line := p.readLine()
if line == endLine {
fmt.Fprint(&buf, line)
return buf.String(), true
}
fmt.Fprintln(&buf, line)
p.readOnly("\n")
}
fmt.Fprint(&buf, endLine)
return buf.String(), false
}
func (p *parser) peek(tok Token) bool {
for p.tok == COMMENT {
p.next()
}
return p.tok == tok || p.peekReservedWord(tok)
}
func (p *parser) peekReservedWord(tok Token) bool {
return p.val == tokNames[tok] && p.peekSpaced()
}
func (p *parser) peekSpaced() bool {
b, err := p.peekByte()
return err != nil || space[b] || wordBreak[b]
}
func (p *parser) eof() bool {
p.peek(COMMENT)
return p.tok == EOF
}
func (p *parser) peekAny(toks ...Token) bool {
for _, tok := range toks {
if p.peek(tok) {
return true
}
}
return false
}
func (p *parser) got(tok Token) bool {
if p.peek(tok) {
p.next()
return true
}
return false
}
func (p *parser) gotSameLine(tok Token) bool { return !p.newLine && p.got(tok) }
func (p *parser) gotAny(toks ...Token) bool {
for _, tok := range toks {
if p.got(tok) {
return true
}
}
return false
}
func readableStr(v interface{}) string {
var s string
switch x := v.(type) {
case string:
s = x
case Token:
s = x.String()
}
// don't quote tokens like & or }
if s[0] >= 'a' && s[0] <= 'z' {
return strconv.Quote(s)
}
return s
}
func (p *parser) followErr(lpos Pos, left interface{}, right string) {
leftStr := readableStr(left)
p.posErr(lpos, "%s must be followed by %s", leftStr, right)
}
func (p *parser) followTok(lpos Pos, left string, tok Token) {
if !p.got(tok) {
p.followErr(lpos, left, fmt.Sprintf(`%q`, tok))
}
}
func (p *parser) followStmt(lpos Pos, left string) (s Stmt) {
if !p.gotStmt(&s) {
p.followErr(lpos, left, "a statement")
}
return
}
func (p *parser) followStmts(left Token, stops ...Token) []Stmt {
if p.gotSameLine(SEMICOLON) {
return nil
}
sts := p.stmts(stops...)
if len(sts) < 1 && !p.newLine {
p.followErr(p.lpos, left, "a statement list")
}
return sts
}
func (p *parser) followWord(left Token) (w Word) {
if !p.gotWord(&w) {
p.followErr(p.lpos, left, "a word")
}
return
}
func (p *parser) stmtEnd(startPos Pos, startTok, tok Token, pos *Pos) {
if !p.got(tok) {
p.posErr(startPos, `%s statement must end with %q`, startTok, tok)
}
*pos = p.lpos
}
func (p *parser) closingQuote(lpos Pos, b byte) {
tok := Token(b)
if !p.got(tok) {
p.posErr(lpos, `reached %s without closing quote %s`, p.tok, tok)
}
}
func (p *parser) matchingErr(lpos Pos, left, right Token) {
p.posErr(lpos, `reached %s without matching token %s with %s`,
p.tok, left, right)
}
func (p *parser) matchedTok(lpos Pos, left, right Token) Pos {
if !p.got(right) {
p.matchingErr(lpos, left, right)
}
return p.lpos
}
func (p *parser) errPass(err error) {
if p.err == nil && err != io.EOF {
p.err = err
}
p.advanceTok(EOF)
}
type lineErr struct {
pos Position
text string
}
func (e lineErr) Error() string {
return fmt.Sprintf("%s: %s", e.pos, e.text)
}
func (p *parser) posErr(pos Pos, format string, v ...interface{}) {
p.errPass(lineErr{
pos: Position{
Filename: p.file.Name,
Line: pos.Line,
Column: pos.Column,
},
text: fmt.Sprintf(format, v...),
})
}
func (p *parser) curErr(format string, v ...interface{}) {
p.posErr(p.pos, format, v...)
}
func (p *parser) stmts(stops ...Token) (sts []Stmt) {
for !p.eof() && !p.peekAny(stops...) {
gotEnd := p.newLine || p.ltok == AND || p.ltok == SEMICOLON
if len(sts) > 0 && !gotEnd {
p.curErr("statements must be separated by &, ; or a newline")
}
var s Stmt
if !p.gotStmt(&s, stops...) {
p.invalidStmtStart()
}
sts = append(sts, s)
}
return
}
func (p *parser) invalidStmtStart() {
switch {
case p.peekAny(SEMICOLON, AND, OR, LAND, LOR):
p.curErr("%s can only immediately follow a statement", p.tok)
case p.peek(RBRACE):
p.curErr("%s can only be used to close a block", p.val)
case p.peek(RPAREN):
p.curErr("%s can only be used to close a subshell", p.tok)
default:
p.curErr("%s is not a valid start for a statement", p.tok)
}
}
func (p *parser) stmtsNested(stops ...Token) []Stmt {
p.enterStops(stops...)
sts := p.stmts(stops...)
p.popStops(len(stops))
return sts
}
func (p *parser) gotWord(w *Word) bool {
p.readParts(&w.Parts)
return len(w.Parts) > 0
}
func (p *parser) gotLit(l *Lit) bool {
l.ValuePos = p.pos
if p.got(LIT) {
l.Value = p.lval
return true
}
return false
}
func (p *parser) readParts(ns *[]Node) {
for {
n := p.wordPart()
if n == nil {
break
}
*ns = append(*ns, n)
if p.spaced {
break
}
}
}
func (p *parser) wordPart() Node {
switch {
case p.peek(DOLLAR):
switch {
case p.peekAnyByte('('):
// otherwise it is seen as a word break
case p.peekAnyByte('\'', '"', '`'), p.peekSpaced():
p.next()
return Lit{
ValuePos: p.lpos,
Value: p.lval,
}
}
return p.dollar()
case p.got(LIT):
return Lit{
ValuePos: p.lpos,
Value: p.lval,
}
case p.peek('\''):
sq := SglQuoted{Quote: p.pos}
s, found := p.readUntil("'")
if !found {
p.closingQuote(sq.Quote, '\'')
}
sq.Value = s
p.readOnly("'")
p.next()
return sq
case !p.quoted('"') && p.peek('"'):
dq := DblQuoted{Quote: p.pos}
p.enterStops('"')
p.readParts(&dq.Parts)
p.popStop()
p.closingQuote(dq.Quote, '"')
return dq
case !p.quoted('`') && p.peek('`'):
cs := CmdSubst{Backquotes: true, Left: p.pos}
cs.Stmts = p.stmtsNested('`')
p.closingQuote(cs.Left, '`')
cs.Right = p.lpos
return cs
}
return nil
}
func (p *parser) dollar() Node {
dpos := p.pos
if p.peekAnyByte('{') {
return p.paramExp(dpos)
}
if p.readOnly("#") {
p.advanceTok(Token('#'))
} else {
p.next()
}
lpos := p.pos
switch {
case p.gotArithmStart():
ar := ArithmExpr{
Dollar: dpos,
X: p.arithmExpr(DLPAREN),
}
ar.Rparen = p.arithmEnd(lpos)
return ar
case p.peek(LPAREN):
cs := CmdSubst{Left: dpos}
cs.Stmts = p.stmtsNested(RPAREN)
cs.Right = p.matchedTok(lpos, LPAREN, RPAREN)
return cs
default:
p.next()
return ParamExp{
Dollar: dpos,
Short: true,
Param: Lit{
ValuePos: p.lpos,
Value: p.lval,
},
}
}
}
func (p *parser) arithmExpr(following Token) Node {
if p.eof() || p.peekArithmEnd() {
return nil
}
var left Node
if p.gotAny(INC, DEC) {
pre := UnaryExpr{
OpPos: p.lpos,
Op: p.ltok,
}
pre.X = p.arithmExprBase(pre.Op)
left = pre
} else if left = p.arithmExprBase(following); p.gotAny(INC, DEC) {
left = UnaryExpr{
Post: true,
OpPos: p.lpos,
Op: p.ltok,
X: left,
}
}
if p.eof() || p.peekAny(RPAREN, SEMICOLON) {
return left
}
if !p.gotAny(ADD, SUB, REM, MUL, QUO, XOR, AND, OR, LSS, GTR,
SHR, SHL, QUEST, COLON, ASSIGN) {
p.curErr("not a valid arithmetic operator")
}
b := BinaryExpr{
OpPos: p.lpos,
Op: p.ltok,
X: left,
}
if b.Y = p.arithmExpr(b.Op); b.Y == nil {
p.followErr(b.OpPos, b.Op, "an expression")
}
return b
}
func (p *parser) arithmExprBase(following Token) Node {
switch {
case p.got(LPAREN):
pe := ParenExpr{Lparen: p.lpos}
pe.X = p.arithmExpr(LPAREN)
if pe.X == nil {
p.posErr(pe.Lparen, "parentheses must enclose an expression")
}
pe.Rparen = p.matchedTok(pe.Lparen, LPAREN, RPAREN)
return pe
case p.gotAny(ADD, SUB):
ue := UnaryExpr{
OpPos: p.lpos,
Op: p.ltok,
}
ue.X = p.arithmExpr(ue.Op)
if ue.X == nil {
p.followErr(ue.OpPos, ue.Op, "an expression")
}
return ue
default:
return p.followWord(following)
}
}
func (p *parser) gotParamLit(l *Lit) bool {
if p.gotLit(l) {
return true
}
switch {
case p.got(DOLLAR), p.got(QUEST):
l.Value = p.lval
default:
return false
}
return true
}
func (p *parser) paramExp(dpos Pos) (pe ParamExp) {
pe.Dollar = dpos
lpos := p.npos
p.readOnly("{")
p.enterStops(LBRACE)
pe.Length = p.got(HASH)
if !p.gotParamLit(&pe.Param) && !pe.Length {
p.posErr(pe.Dollar, "parameter expansion requires a literal")
}
if p.peek(RBRACE) {
p.popStop()
p.next()
return
}
if pe.Length {
p.posErr(pe.Dollar, `string lengths must be like "${#foo}"`)
}
pe.Exp = &Expansion{Op: p.tok}
p.popStop()
p.enterStops(RBRACE)
p.gotWord(&pe.Exp.Word)
p.popStop()
if !p.got(RBRACE) {
p.matchingErr(lpos, LBRACE, RBRACE)
}
return
}
func (p *parser) gotArithmStart() bool {
if p.peek(LPAREN) && p.readOnly("(") {
p.enterStops(DRPAREN)
return true
}
return false
}
func (p *parser) peekArithmEnd() bool {
return p.peek(RPAREN) && p.peekAnyByte(')')
}
func (p *parser) arithmEnd(left Pos) Pos {
if !p.peekArithmEnd() {
p.matchingErr(left, DLPAREN, DRPAREN)
}
right := p.pos
p.readOnly(")")
p.popStop()
p.next()
return right
}
func (p *parser) wordList(ws *[]Word, stops ...Token) {
for !p.peekEnd() && !p.peekAny(stops...) {
var w Word
if !p.gotWord(&w) {
p.curErr("word list can only contain words")
}
*ws = append(*ws, w)
}
p.gotSameLine(SEMICOLON)
}
func (p *parser) peekEnd() bool {
return p.eof() || p.newLine || p.peek(SEMICOLON)
}
func (p *parser) peekStop() bool {
if p.peekEnd() || p.peekAny(AND, OR, LAND, LOR) {
return true
}
for i := len(p.stops) - 1; i >= 0; i-- {
stop := p.stops[i]
if p.peek(stop) {
return true
}
if stop == '`' || stop == RPAREN {
break
}
}
return false
}
func (p *parser) peekRedir() bool {
if p.peek(LIT) && p.peekAnyByte('>', '<') {
return true
}
return p.peekAny(GTR, SHR, LSS, DPLIN, DPLOUT, RDRINOUT,
SHL, DHEREDOC, WHEREDOC)
}
var identRe = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)
func (p *parser) assignSplit() int {
if !p.peek(LIT) {
return -1
}
i := strings.IndexByte(p.val, '=')
if i == -1 || !identRe.MatchString(p.val[:i]) {
return -1
}
return i
}
func (p *parser) getAssign() (Assign, bool) {
var as Assign
i := p.assignSplit()
if i < 0 {
return as, false
}
as.Name = Lit{ValuePos: p.pos, Value: p.val[:i]}
start := Lit{
ValuePos: p.pos,
Value: p.val[i+1:],
}
if start.Value != "" {
start.ValuePos.Column += len(as.Name.Value)
as.Value.Parts = append(as.Value.Parts, start)
}
p.next()
if p.spaced {
return as, true
}
if start.Value == "" && p.got(LPAREN) {
ae := ArrayExpr{Lparen: p.lpos}
p.wordList(&ae.List, RPAREN)
ae.Rparen = p.matchedTok(ae.Lparen, LPAREN, RPAREN)
as.Value.Parts = append(as.Value.Parts, ae)
} else if !p.peekStop() {
p.gotWord(&as.Value)
}
return as, true
}
func (p *parser) gotStmt(s *Stmt, stops ...Token) bool {
if p.peek(RBRACE) {
// don't let it be a LIT
return false
}
s.Position = p.pos
if p.got(NOT) {
s.Negated = true
}
addRedir := func() {
s.Redirs = append(s.Redirs, p.redirect())
}
for {
if as, ok := p.getAssign(); ok {
s.Assigns = append(s.Assigns, as)
} else if p.peekRedir() {
addRedir()
} else {
break
}
if p.newLine {
return true
}
}
p.gotStmtAndOr(s, addRedir)
if !p.newLine {
for p.peekRedir() {
addRedir()
}
}
if !s.Negated && s.Node == nil && len(s.Assigns) == 0 && len(s.Redirs) == 0 {
return false
}
if _, ok := s.Node.(FuncDecl); ok {
return true
}
switch {
case p.got(LAND), p.got(LOR):
*s = p.binaryStmt(*s, addRedir)
return true
case p.got(AND):
s.Background = true
}
p.gotSameLine(SEMICOLON)
return true
}
func (p *parser) gotStmtAndOr(s *Stmt, addRedir func()) bool {
s.Position = p.pos
switch {
case p.peek(LPAREN):
s.Node = p.subshell()
case p.got(LBRACE):
s.Node = p.block()
case p.got(IF):
s.Node = p.ifStmt()
case p.got(WHILE):
s.Node = p.whileStmt()
case p.got(UNTIL):
s.Node = p.untilStmt()
case p.got(FOR):
s.Node = p.forStmt()
case p.got(CASE):
s.Node = p.caseStmt()
case p.got(DECLARE):
s.Node = p.declStmt()
case p.peekAny(LIT, DOLLAR, '"', '\'', '`'):
s.Node = p.cmdOrFunc(addRedir)
default:
return false
}
if p.got(OR) {
*s = p.binaryStmt(*s, addRedir)
}
return true
}
func (p *parser) binaryStmt(left Stmt, addRedir func()) Stmt {
b := BinaryExpr{
OpPos: p.lpos,
Op: p.ltok,
X: left,
}
var s Stmt
if b.Op == LAND || b.Op == LOR {
s = p.followStmt(b.OpPos, b.Op.String())
} else if !p.gotStmtAndOr(&s, addRedir) {
p.followErr(b.OpPos, b.Op, "a statement")
}
b.Y = s
return Stmt{
Position: left.Position,
Node: b,
}
}
func unquote(w Word) (unq Word) {
for _, n := range w.Parts {
switch x := n.(type) {
case SglQuoted:
unq.Parts = append(unq.Parts, Lit{Value: x.Value})
case DblQuoted:
unq.Parts = append(unq.Parts, x.Parts...)
default:
unq.Parts = append(unq.Parts, n)
}
}
return unq
}
func (p *parser) redirect() (r Redirect) {
p.gotLit(&r.N)
r.Op = p.tok
r.OpPos = p.pos
p.next()
switch r.Op {
case SHL, DHEREDOC:
p.stopNewline = true
r.Word = p.followWord(r.Op)
p.stopNewline = false
p.heredocs = append(p.heredocs, &r.Word)
p.got(STOPPED)
default:
r.Word = p.followWord(r.Op)
}
return
}
func (p *parser) subshell() (s Subshell) {
s.Lparen = p.pos
s.Stmts = p.stmtsNested(RPAREN)
s.Rparen = p.matchedTok(s.Lparen, LPAREN, RPAREN)
return
}
func (p *parser) block() (b Block) {
b.Lbrace = p.lpos
b.Stmts = p.stmts(RBRACE)
b.Rbrace = p.matchedTok(b.Lbrace, LBRACE, RBRACE)
return
}
func (p *parser) ifStmt() (fs IfStmt) {
fs.If = p.lpos
fs.Conds = p.followStmts(IF, THEN)
p.followTok(fs.If, "if [stmts]", THEN)
fs.ThenStmts = p.followStmts(THEN, FI, ELIF, ELSE)
for p.got(ELIF) {
elf := Elif{Elif: p.lpos}
elf.Conds = p.followStmts(ELIF, THEN)
p.followTok(elf.Elif, "elif [stmts]", THEN)
elf.ThenStmts = p.followStmts(THEN, FI, ELIF, ELSE)
fs.Elifs = append(fs.Elifs, elf)
}
if p.got(ELSE) {
fs.ElseStmts = p.followStmts(ELSE, FI)
}
p.stmtEnd(fs.If, IF, FI, &fs.Fi)
return
}
func (p *parser) whileStmt() (ws WhileStmt) {
ws.While = p.lpos
ws.Conds = p.followStmts(WHILE, DO)
p.followTok(ws.While, "while [stmts]", DO)
ws.DoStmts = p.followStmts(DO, DONE)
p.stmtEnd(ws.While, WHILE, DONE, &ws.Done)
return
}
func (p *parser) untilStmt() (us UntilStmt) {
us.Until = p.lpos
us.Conds = p.followStmts(UNTIL, DO)
p.followTok(us.Until, "until [stmts]", DO)
us.DoStmts = p.followStmts(DO, DONE)
p.stmtEnd(us.Until, UNTIL, DONE, &us.Done)
return
}
func (p *parser) forStmt() (fs ForStmt) {
fs.For = p.lpos
if p.gotArithmStart() {
c := CStyleLoop{Lparen: p.lpos}
c.Init = p.arithmExpr(DLPAREN)
p.followTok(c.Init.Pos(), "expression", SEMICOLON)
c.Cond = p.arithmExpr(SEMICOLON)
p.followTok(c.Cond.Pos(), "expression", SEMICOLON)
c.Post = p.arithmExpr(SEMICOLON)
c.Rparen = p.arithmEnd(c.Lparen)
p.gotSameLine(SEMICOLON)
fs.Cond = c
} else {
var w WordIter
if !p.gotLit(&w.Name) {
p.followErr(fs.For, FOR, "a literal")
}
if p.got(IN) {
p.wordList(&w.List)
} else if !p.gotSameLine(SEMICOLON) && !p.newLine {
p.followErr(fs.For, "for foo", `"in", ; or a newline`)
}
fs.Cond = w
}
p.followTok(fs.For, "for foo [in words]", DO)
fs.DoStmts = p.followStmts(DO, DONE)
p.stmtEnd(fs.For, FOR, DONE, &fs.Done)
return
}
func (p *parser) caseStmt() (cs CaseStmt) {
cs.Case = p.lpos
cs.Word = p.followWord(CASE)
p.followTok(cs.Case, "case x", IN)
cs.List = p.patLists()
p.stmtEnd(cs.Case, CASE, ESAC, &cs.Esac)
return
}
func (p *parser) patLists() (pls []PatternList) {
if p.gotSameLine(SEMICOLON) {
return
}
for !p.eof() && !p.peek(ESAC) {
var pl PatternList
p.got(LPAREN)
for !p.eof() {
var w Word
if !p.gotWord(&w) {
p.curErr("case patterns must consist of words")
}
pl.Patterns = append(pl.Patterns, w)
if p.peek(RPAREN) {
break
}
if !p.got(OR) {
p.curErr("case patterns must be separated with |")
}
}
pl.Stmts = p.stmtsNested(DSEMICOLON, ESAC)
pls = append(pls, pl)
if !p.got(DSEMICOLON) {
break
}
}
return
}
func (p *parser) declStmt() Node {
ds := DeclStmt{Declare: p.lpos}
for p.peek(LIT) && p.peekSpaced() && p.val[0] == '-' {
var w Word
p.gotWord(&w)
ds.Opts = append(ds.Opts, w)
}
for !p.peekStop() {
if as, ok := p.getAssign(); ok {
ds.Assigns = append(ds.Assigns, as)
} else if p.peek(LIT) && p.peekSpaced() {
if !identRe.MatchString(p.val) {
p.curErr("invalid var name: %s", p.val)
}
ds.Assigns = append(ds.Assigns, Assign{
NameOnly: true,
Name: Lit{
ValuePos: p.pos,
Value: p.val,
},
})
p.next()
} else {
p.curErr("declare statements must be followed by literals")
}
}
return ds
}
func (p *parser) cmdOrFunc(addRedir func()) Node {
if p.got(FUNCTION) {
fpos := p.lpos
w := p.followWord(FUNCTION)
if p.gotSameLine(LPAREN) {
p.followTok(w.Pos(), "foo(", RPAREN)
}
return p.funcDecl(w, fpos)
}
var w Word
p.gotWord(&w)
if p.gotSameLine(LPAREN) {
p.followTok(w.Pos(), "foo(", RPAREN)
return p.funcDecl(w, w.Pos())
}
cmd := Command{Args: []Word{w}}
for !p.peekStop() {
var w Word
switch {
case p.peekRedir():
addRedir()
case p.gotWord(&w):
cmd.Args = append(cmd.Args, w)
default:
p.curErr("a command can only contain words and redirects")
}
}
return cmd
}
func (p *parser) funcDecl(w Word, pos Pos) FuncDecl {
fd := FuncDecl{
Position: pos,
BashStyle: pos != w.Pos(),
Name: Lit{
Value: w.String(),
ValuePos: w.Pos(),
},
}
if !identRe.MatchString(fd.Name.Value) {
p.posErr(fd.Pos(), "invalid func name: %s", fd.Name.Value)
}
fd.Body = p.followStmt(fd.Pos(), "foo()")
return fd
}
Stop treating func decls differently as statements
This was necessary when gotEnd/wantEnd were around.
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package sh
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"strconv"
"strings"
)
// Parse reads and parses a shell program with an optional name. It
// returns the parsed program if no issues were encountered. Otherwise,
// an error is returned.
func Parse(r io.Reader, name string) (File, error) {
p := &parser{
br: bufio.NewReader(r),
file: File{
Name: name,
},
npos: Pos{
Line: 1,
Column: 1,
},
}
p.next()
p.file.Stmts = p.stmts()
return p.file, p.err
}
type parser struct {
br *bufio.Reader
file File
err error
spaced, newLine bool
ltok, tok Token
lval, val string
lpos, pos, npos Pos
// stack of stop tokens
stops []Token
stopNewline bool
heredocs []*Word
}
func (p *parser) enterStops(stops ...Token) {
p.stops = append(p.stops, stops...)
p.next()
}
func (p *parser) quoted(tok Token) bool {
return len(p.stops) > 0 && p.stops[len(p.stops)-1] == tok
}
func (p *parser) popStops(n int) { p.stops = p.stops[:len(p.stops)-n] }
func (p *parser) popStop() { p.popStops(1) }
func (p *parser) readByte() (byte, error) {
b, err := p.br.ReadByte()
if err != nil {
p.errPass(err)
return 0, err
}
p.moveWith(b)
return b, nil
}
func (p *parser) consumeByte() { p.readByte() }
func (p *parser) moveWith(b byte) {
if b == '\n' {
p.npos.Line++
p.npos.Column = 1
} else {
p.npos.Column++
}
}
func (p *parser) peekByte() (byte, error) {
bs, err := p.br.Peek(1)
if err != nil {
return 0, err
}
return bs[0], nil
}
func (p *parser) peekString(s string) bool {
bs, err := p.br.Peek(len(s))
return err == nil && string(bs) == s
}
func (p *parser) peekAnyByte(bs ...byte) bool {
peek, err := p.br.Peek(1)
if err != nil {
return false
}
return bytes.IndexByte(bs, peek[0]) >= 0
}
func (p *parser) readOnly(s string) bool {
if p.peekString(s) {
for i := 0; i < len(s); i++ {
p.consumeByte()
}
return true
}
return false
}
var (
// bytes that form or start a token
reserved = map[byte]bool{
'&': true,
'>': true,
'<': true,
'|': true,
';': true,
'(': true,
')': true,
'$': true,
'"': true,
'\'': true,
'`': true,
}
// subset of the above that mark the end of a word
wordBreak = map[byte]bool{
'&': true,
'>': true,
'<': true,
'|': true,
';': true,
'(': true,
')': true,
}
// tokenize these inside parameter expansions
paramOps = map[byte]bool{
'}': true,
'#': true,
':': true,
'-': true,
'+': true,
'=': true,
'?': true,
'%': true,
}
// tokenize these inside arithmetic expansions
arithmOps = map[byte]bool{
'+': true,
'-': true,
'!': true,
'*': true,
'/': true,
'%': true,
'^': true,
'<': true,
'>': true,
':': true,
'=': true,
}
// bytes that will be treated as space
space = map[byte]bool{
' ': true,
'\t': true,
'\n': true,
}
)
func (p *parser) next() {
if p.tok == EOF {
return
}
p.lpos, p.pos = p.pos, p.npos
p.spaced, p.newLine = false, false
var b byte
for {
if p.readOnly("\\\n") {
continue
}
var err error
if b, err = p.peekByte(); err != nil {
p.errPass(err)
return
}
if p.stopNewline && b == '\n' {
p.advanceTok(STOPPED)
return
}
if p.quoted('"') || !space[b] {
break
}
p.consumeByte()
p.pos = p.npos
p.spaced = true
if b == '\n' {
p.newLine = true
if len(p.heredocs) > 0 {
p.doHeredocs()
return
}
}
}
switch {
case p.quoted(RBRACE) && b == '}', p.quoted(LBRACE) && paramOps[b]:
if p.readOnly("}") {
// '}' is a token only in this context
p.advanceTok(RBRACE)
} else {
p.advanceTok(p.doToken(b))
}
case b == '#' && !p.quoted('"'):
p.advanceBoth(COMMENT, p.readLine())
case reserved[b]:
// Between double quotes, only under certain
// circumstnaces do we tokenize
if p.quoted('"') {
switch {
case b == '`', b == '"', b == '$', p.tok == DOLLAR:
default:
p.advanceReadLit()
return
}
}
fallthrough
case p.quoted(DRPAREN) && arithmOps[b]:
p.advanceTok(p.doToken(b))
default:
p.advanceReadLit()
}
}
func (p *parser) advanceReadLit() { p.advanceBoth(LIT, string(p.readLitBytes())) }
func (p *parser) readLitBytes() (bs []byte) {
for {
if p.readOnly("\\") { // escaped byte
if b, _ := p.readByte(); b != '\n' {
bs = append(bs, '\\', b)
}
continue
}
b, err := p.peekByte()
if err != nil {
return
}
switch {
case b == '$', b == '`':
return
case p.quoted(RBRACE) && b == '}':
return
case p.quoted(LBRACE) && paramOps[b]:
return
case p.quoted('"'):
if b == '"' {
return
}
case reserved[b], space[b]:
return
case p.quoted(DRPAREN) && arithmOps[b]:
return
}
p.consumeByte()
bs = append(bs, b)
}
}
func (p *parser) advanceTok(tok Token) { p.advanceBoth(tok, tok.String()) }
func (p *parser) advanceBoth(tok Token, val string) {
if p.tok != EOF {
p.ltok = p.tok
p.lval = p.val
}
p.tok = tok
p.val = val
}
func (p *parser) readUntil(s string) (string, bool) {
var bs []byte
for {
if p.peekString(s) {
return string(bs), true
}
b, err := p.readByte()
if err != nil {
return string(bs), false
}
bs = append(bs, b)
}
}
func (p *parser) readLine() string {
s, _ := p.readUntil("\n")
return s
}
func (p *parser) doHeredocs() {
for i, w := range p.heredocs {
endLine := unquote(*w).String()
if i > 0 {
p.readOnly("\n")
}
s, _ := p.readHeredocContent(endLine)
w.Parts[0] = Lit{
ValuePos: w.Pos(),
Value: fmt.Sprintf("%s\n%s", w, s),
}
w.Parts = w.Parts[:1]
}
p.heredocs = nil
p.next()
}
func (p *parser) readHeredocContent(endLine string) (string, bool) {
var buf bytes.Buffer
for !p.eof() {
line := p.readLine()
if line == endLine {
fmt.Fprint(&buf, line)
return buf.String(), true
}
fmt.Fprintln(&buf, line)
p.readOnly("\n")
}
fmt.Fprint(&buf, endLine)
return buf.String(), false
}
func (p *parser) peek(tok Token) bool {
for p.tok == COMMENT {
p.next()
}
return p.tok == tok || p.peekReservedWord(tok)
}
func (p *parser) peekReservedWord(tok Token) bool {
return p.val == tokNames[tok] && p.peekSpaced()
}
func (p *parser) peekSpaced() bool {
b, err := p.peekByte()
return err != nil || space[b] || wordBreak[b]
}
func (p *parser) eof() bool {
p.peek(COMMENT)
return p.tok == EOF
}
func (p *parser) peekAny(toks ...Token) bool {
for _, tok := range toks {
if p.peek(tok) {
return true
}
}
return false
}
func (p *parser) got(tok Token) bool {
if p.peek(tok) {
p.next()
return true
}
return false
}
func (p *parser) gotSameLine(tok Token) bool { return !p.newLine && p.got(tok) }
func (p *parser) gotAny(toks ...Token) bool {
for _, tok := range toks {
if p.got(tok) {
return true
}
}
return false
}
func readableStr(v interface{}) string {
var s string
switch x := v.(type) {
case string:
s = x
case Token:
s = x.String()
}
// don't quote tokens like & or }
if s[0] >= 'a' && s[0] <= 'z' {
return strconv.Quote(s)
}
return s
}
func (p *parser) followErr(lpos Pos, left interface{}, right string) {
leftStr := readableStr(left)
p.posErr(lpos, "%s must be followed by %s", leftStr, right)
}
func (p *parser) followTok(lpos Pos, left string, tok Token) {
if !p.got(tok) {
p.followErr(lpos, left, fmt.Sprintf(`%q`, tok))
}
}
func (p *parser) followStmt(lpos Pos, left string) (s Stmt) {
if !p.gotStmt(&s) {
p.followErr(lpos, left, "a statement")
}
return
}
func (p *parser) followStmts(left Token, stops ...Token) []Stmt {
if p.gotSameLine(SEMICOLON) {
return nil
}
sts := p.stmts(stops...)
if len(sts) < 1 && !p.newLine {
p.followErr(p.lpos, left, "a statement list")
}
return sts
}
func (p *parser) followWord(left Token) (w Word) {
if !p.gotWord(&w) {
p.followErr(p.lpos, left, "a word")
}
return
}
func (p *parser) stmtEnd(startPos Pos, startTok, tok Token, pos *Pos) {
if !p.got(tok) {
p.posErr(startPos, `%s statement must end with %q`, startTok, tok)
}
*pos = p.lpos
}
func (p *parser) closingQuote(lpos Pos, b byte) {
tok := Token(b)
if !p.got(tok) {
p.posErr(lpos, `reached %s without closing quote %s`, p.tok, tok)
}
}
func (p *parser) matchingErr(lpos Pos, left, right Token) {
p.posErr(lpos, `reached %s without matching token %s with %s`,
p.tok, left, right)
}
func (p *parser) matchedTok(lpos Pos, left, right Token) Pos {
if !p.got(right) {
p.matchingErr(lpos, left, right)
}
return p.lpos
}
func (p *parser) errPass(err error) {
if p.err == nil && err != io.EOF {
p.err = err
}
p.advanceTok(EOF)
}
type lineErr struct {
pos Position
text string
}
func (e lineErr) Error() string {
return fmt.Sprintf("%s: %s", e.pos, e.text)
}
func (p *parser) posErr(pos Pos, format string, v ...interface{}) {
p.errPass(lineErr{
pos: Position{
Filename: p.file.Name,
Line: pos.Line,
Column: pos.Column,
},
text: fmt.Sprintf(format, v...),
})
}
func (p *parser) curErr(format string, v ...interface{}) {
p.posErr(p.pos, format, v...)
}
func (p *parser) stmts(stops ...Token) (sts []Stmt) {
for !p.eof() && !p.peekAny(stops...) {
gotEnd := p.newLine || p.ltok == AND || p.ltok == SEMICOLON
if len(sts) > 0 && !gotEnd {
p.curErr("statements must be separated by &, ; or a newline")
}
var s Stmt
if !p.gotStmt(&s, stops...) {
p.invalidStmtStart()
}
sts = append(sts, s)
}
return
}
func (p *parser) invalidStmtStart() {
switch {
case p.peekAny(SEMICOLON, AND, OR, LAND, LOR):
p.curErr("%s can only immediately follow a statement", p.tok)
case p.peek(RBRACE):
p.curErr("%s can only be used to close a block", p.val)
case p.peek(RPAREN):
p.curErr("%s can only be used to close a subshell", p.tok)
default:
p.curErr("%s is not a valid start for a statement", p.tok)
}
}
func (p *parser) stmtsNested(stops ...Token) []Stmt {
p.enterStops(stops...)
sts := p.stmts(stops...)
p.popStops(len(stops))
return sts
}
func (p *parser) gotWord(w *Word) bool {
p.readParts(&w.Parts)
return len(w.Parts) > 0
}
func (p *parser) gotLit(l *Lit) bool {
l.ValuePos = p.pos
if p.got(LIT) {
l.Value = p.lval
return true
}
return false
}
func (p *parser) readParts(ns *[]Node) {
for {
n := p.wordPart()
if n == nil {
break
}
*ns = append(*ns, n)
if p.spaced {
break
}
}
}
func (p *parser) wordPart() Node {
switch {
case p.peek(DOLLAR):
switch {
case p.peekAnyByte('('):
// otherwise it is seen as a word break
case p.peekAnyByte('\'', '"', '`'), p.peekSpaced():
p.next()
return Lit{
ValuePos: p.lpos,
Value: p.lval,
}
}
return p.dollar()
case p.got(LIT):
return Lit{
ValuePos: p.lpos,
Value: p.lval,
}
case p.peek('\''):
sq := SglQuoted{Quote: p.pos}
s, found := p.readUntil("'")
if !found {
p.closingQuote(sq.Quote, '\'')
}
sq.Value = s
p.readOnly("'")
p.next()
return sq
case !p.quoted('"') && p.peek('"'):
dq := DblQuoted{Quote: p.pos}
p.enterStops('"')
p.readParts(&dq.Parts)
p.popStop()
p.closingQuote(dq.Quote, '"')
return dq
case !p.quoted('`') && p.peek('`'):
cs := CmdSubst{Backquotes: true, Left: p.pos}
cs.Stmts = p.stmtsNested('`')
p.closingQuote(cs.Left, '`')
cs.Right = p.lpos
return cs
}
return nil
}
func (p *parser) dollar() Node {
dpos := p.pos
if p.peekAnyByte('{') {
return p.paramExp(dpos)
}
if p.readOnly("#") {
p.advanceTok(Token('#'))
} else {
p.next()
}
lpos := p.pos
switch {
case p.gotArithmStart():
ar := ArithmExpr{
Dollar: dpos,
X: p.arithmExpr(DLPAREN),
}
ar.Rparen = p.arithmEnd(lpos)
return ar
case p.peek(LPAREN):
cs := CmdSubst{Left: dpos}
cs.Stmts = p.stmtsNested(RPAREN)
cs.Right = p.matchedTok(lpos, LPAREN, RPAREN)
return cs
default:
p.next()
return ParamExp{
Dollar: dpos,
Short: true,
Param: Lit{
ValuePos: p.lpos,
Value: p.lval,
},
}
}
}
func (p *parser) arithmExpr(following Token) Node {
if p.eof() || p.peekArithmEnd() {
return nil
}
var left Node
if p.gotAny(INC, DEC) {
pre := UnaryExpr{
OpPos: p.lpos,
Op: p.ltok,
}
pre.X = p.arithmExprBase(pre.Op)
left = pre
} else if left = p.arithmExprBase(following); p.gotAny(INC, DEC) {
left = UnaryExpr{
Post: true,
OpPos: p.lpos,
Op: p.ltok,
X: left,
}
}
if p.eof() || p.peekAny(RPAREN, SEMICOLON) {
return left
}
if !p.gotAny(ADD, SUB, REM, MUL, QUO, XOR, AND, OR, LSS, GTR,
SHR, SHL, QUEST, COLON, ASSIGN) {
p.curErr("not a valid arithmetic operator")
}
b := BinaryExpr{
OpPos: p.lpos,
Op: p.ltok,
X: left,
}
if b.Y = p.arithmExpr(b.Op); b.Y == nil {
p.followErr(b.OpPos, b.Op, "an expression")
}
return b
}
func (p *parser) arithmExprBase(following Token) Node {
switch {
case p.got(LPAREN):
pe := ParenExpr{Lparen: p.lpos}
pe.X = p.arithmExpr(LPAREN)
if pe.X == nil {
p.posErr(pe.Lparen, "parentheses must enclose an expression")
}
pe.Rparen = p.matchedTok(pe.Lparen, LPAREN, RPAREN)
return pe
case p.gotAny(ADD, SUB):
ue := UnaryExpr{
OpPos: p.lpos,
Op: p.ltok,
}
ue.X = p.arithmExpr(ue.Op)
if ue.X == nil {
p.followErr(ue.OpPos, ue.Op, "an expression")
}
return ue
default:
return p.followWord(following)
}
}
func (p *parser) gotParamLit(l *Lit) bool {
if p.gotLit(l) {
return true
}
switch {
case p.got(DOLLAR), p.got(QUEST):
l.Value = p.lval
default:
return false
}
return true
}
func (p *parser) paramExp(dpos Pos) (pe ParamExp) {
pe.Dollar = dpos
lpos := p.npos
p.readOnly("{")
p.enterStops(LBRACE)
pe.Length = p.got(HASH)
if !p.gotParamLit(&pe.Param) && !pe.Length {
p.posErr(pe.Dollar, "parameter expansion requires a literal")
}
if p.peek(RBRACE) {
p.popStop()
p.next()
return
}
if pe.Length {
p.posErr(pe.Dollar, `string lengths must be like "${#foo}"`)
}
pe.Exp = &Expansion{Op: p.tok}
p.popStop()
p.enterStops(RBRACE)
p.gotWord(&pe.Exp.Word)
p.popStop()
if !p.got(RBRACE) {
p.matchingErr(lpos, LBRACE, RBRACE)
}
return
}
func (p *parser) gotArithmStart() bool {
if p.peek(LPAREN) && p.readOnly("(") {
p.enterStops(DRPAREN)
return true
}
return false
}
func (p *parser) peekArithmEnd() bool {
return p.peek(RPAREN) && p.peekAnyByte(')')
}
func (p *parser) arithmEnd(left Pos) Pos {
if !p.peekArithmEnd() {
p.matchingErr(left, DLPAREN, DRPAREN)
}
right := p.pos
p.readOnly(")")
p.popStop()
p.next()
return right
}
func (p *parser) wordList(ws *[]Word, stops ...Token) {
for !p.peekEnd() && !p.peekAny(stops...) {
var w Word
if !p.gotWord(&w) {
p.curErr("word list can only contain words")
}
*ws = append(*ws, w)
}
p.gotSameLine(SEMICOLON)
}
func (p *parser) peekEnd() bool {
return p.eof() || p.newLine || p.peek(SEMICOLON)
}
func (p *parser) peekStop() bool {
if p.peekEnd() || p.peekAny(AND, OR, LAND, LOR) {
return true
}
for i := len(p.stops) - 1; i >= 0; i-- {
stop := p.stops[i]
if p.peek(stop) {
return true
}
if stop == '`' || stop == RPAREN {
break
}
}
return false
}
func (p *parser) peekRedir() bool {
if p.peek(LIT) && p.peekAnyByte('>', '<') {
return true
}
return p.peekAny(GTR, SHR, LSS, DPLIN, DPLOUT, RDRINOUT,
SHL, DHEREDOC, WHEREDOC)
}
var identRe = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)
func (p *parser) assignSplit() int {
if !p.peek(LIT) {
return -1
}
i := strings.IndexByte(p.val, '=')
if i == -1 || !identRe.MatchString(p.val[:i]) {
return -1
}
return i
}
func (p *parser) getAssign() (Assign, bool) {
var as Assign
i := p.assignSplit()
if i < 0 {
return as, false
}
as.Name = Lit{ValuePos: p.pos, Value: p.val[:i]}
start := Lit{
ValuePos: p.pos,
Value: p.val[i+1:],
}
if start.Value != "" {
start.ValuePos.Column += len(as.Name.Value)
as.Value.Parts = append(as.Value.Parts, start)
}
p.next()
if p.spaced {
return as, true
}
if start.Value == "" && p.got(LPAREN) {
ae := ArrayExpr{Lparen: p.lpos}
p.wordList(&ae.List, RPAREN)
ae.Rparen = p.matchedTok(ae.Lparen, LPAREN, RPAREN)
as.Value.Parts = append(as.Value.Parts, ae)
} else if !p.peekStop() {
p.gotWord(&as.Value)
}
return as, true
}
func (p *parser) gotStmt(s *Stmt, stops ...Token) bool {
if p.peek(RBRACE) {
// don't let it be a LIT
return false
}
s.Position = p.pos
if p.got(NOT) {
s.Negated = true
}
addRedir := func() {
s.Redirs = append(s.Redirs, p.redirect())
}
for {
if as, ok := p.getAssign(); ok {
s.Assigns = append(s.Assigns, as)
} else if p.peekRedir() {
addRedir()
} else {
break
}
if p.newLine {
return true
}
}
p.gotStmtAndOr(s, addRedir)
if !p.newLine {
for p.peekRedir() {
addRedir()
}
}
if !s.Negated && s.Node == nil && len(s.Assigns) == 0 && len(s.Redirs) == 0 {
return false
}
switch {
case p.got(LAND), p.got(LOR):
*s = p.binaryStmt(*s, addRedir)
return true
case p.got(AND):
s.Background = true
}
p.gotSameLine(SEMICOLON)
return true
}
func (p *parser) gotStmtAndOr(s *Stmt, addRedir func()) bool {
s.Position = p.pos
switch {
case p.peek(LPAREN):
s.Node = p.subshell()
case p.got(LBRACE):
s.Node = p.block()
case p.got(IF):
s.Node = p.ifStmt()
case p.got(WHILE):
s.Node = p.whileStmt()
case p.got(UNTIL):
s.Node = p.untilStmt()
case p.got(FOR):
s.Node = p.forStmt()
case p.got(CASE):
s.Node = p.caseStmt()
case p.got(DECLARE):
s.Node = p.declStmt()
case p.peekAny(LIT, DOLLAR, '"', '\'', '`'):
s.Node = p.cmdOrFunc(addRedir)
default:
return false
}
if p.got(OR) {
*s = p.binaryStmt(*s, addRedir)
}
return true
}
func (p *parser) binaryStmt(left Stmt, addRedir func()) Stmt {
b := BinaryExpr{
OpPos: p.lpos,
Op: p.ltok,
X: left,
}
var s Stmt
if b.Op == LAND || b.Op == LOR {
s = p.followStmt(b.OpPos, b.Op.String())
} else if !p.gotStmtAndOr(&s, addRedir) {
p.followErr(b.OpPos, b.Op, "a statement")
}
b.Y = s
return Stmt{
Position: left.Position,
Node: b,
}
}
func unquote(w Word) (unq Word) {
for _, n := range w.Parts {
switch x := n.(type) {
case SglQuoted:
unq.Parts = append(unq.Parts, Lit{Value: x.Value})
case DblQuoted:
unq.Parts = append(unq.Parts, x.Parts...)
default:
unq.Parts = append(unq.Parts, n)
}
}
return unq
}
func (p *parser) redirect() (r Redirect) {
p.gotLit(&r.N)
r.Op = p.tok
r.OpPos = p.pos
p.next()
switch r.Op {
case SHL, DHEREDOC:
p.stopNewline = true
r.Word = p.followWord(r.Op)
p.stopNewline = false
p.heredocs = append(p.heredocs, &r.Word)
p.got(STOPPED)
default:
r.Word = p.followWord(r.Op)
}
return
}
func (p *parser) subshell() (s Subshell) {
s.Lparen = p.pos
s.Stmts = p.stmtsNested(RPAREN)
s.Rparen = p.matchedTok(s.Lparen, LPAREN, RPAREN)
return
}
func (p *parser) block() (b Block) {
b.Lbrace = p.lpos
b.Stmts = p.stmts(RBRACE)
b.Rbrace = p.matchedTok(b.Lbrace, LBRACE, RBRACE)
return
}
func (p *parser) ifStmt() (fs IfStmt) {
fs.If = p.lpos
fs.Conds = p.followStmts(IF, THEN)
p.followTok(fs.If, "if [stmts]", THEN)
fs.ThenStmts = p.followStmts(THEN, FI, ELIF, ELSE)
for p.got(ELIF) {
elf := Elif{Elif: p.lpos}
elf.Conds = p.followStmts(ELIF, THEN)
p.followTok(elf.Elif, "elif [stmts]", THEN)
elf.ThenStmts = p.followStmts(THEN, FI, ELIF, ELSE)
fs.Elifs = append(fs.Elifs, elf)
}
if p.got(ELSE) {
fs.ElseStmts = p.followStmts(ELSE, FI)
}
p.stmtEnd(fs.If, IF, FI, &fs.Fi)
return
}
func (p *parser) whileStmt() (ws WhileStmt) {
ws.While = p.lpos
ws.Conds = p.followStmts(WHILE, DO)
p.followTok(ws.While, "while [stmts]", DO)
ws.DoStmts = p.followStmts(DO, DONE)
p.stmtEnd(ws.While, WHILE, DONE, &ws.Done)
return
}
func (p *parser) untilStmt() (us UntilStmt) {
us.Until = p.lpos
us.Conds = p.followStmts(UNTIL, DO)
p.followTok(us.Until, "until [stmts]", DO)
us.DoStmts = p.followStmts(DO, DONE)
p.stmtEnd(us.Until, UNTIL, DONE, &us.Done)
return
}
func (p *parser) forStmt() (fs ForStmt) {
fs.For = p.lpos
if p.gotArithmStart() {
c := CStyleLoop{Lparen: p.lpos}
c.Init = p.arithmExpr(DLPAREN)
p.followTok(c.Init.Pos(), "expression", SEMICOLON)
c.Cond = p.arithmExpr(SEMICOLON)
p.followTok(c.Cond.Pos(), "expression", SEMICOLON)
c.Post = p.arithmExpr(SEMICOLON)
c.Rparen = p.arithmEnd(c.Lparen)
p.gotSameLine(SEMICOLON)
fs.Cond = c
} else {
var w WordIter
if !p.gotLit(&w.Name) {
p.followErr(fs.For, FOR, "a literal")
}
if p.got(IN) {
p.wordList(&w.List)
} else if !p.gotSameLine(SEMICOLON) && !p.newLine {
p.followErr(fs.For, "for foo", `"in", ; or a newline`)
}
fs.Cond = w
}
p.followTok(fs.For, "for foo [in words]", DO)
fs.DoStmts = p.followStmts(DO, DONE)
p.stmtEnd(fs.For, FOR, DONE, &fs.Done)
return
}
func (p *parser) caseStmt() (cs CaseStmt) {
cs.Case = p.lpos
cs.Word = p.followWord(CASE)
p.followTok(cs.Case, "case x", IN)
cs.List = p.patLists()
p.stmtEnd(cs.Case, CASE, ESAC, &cs.Esac)
return
}
func (p *parser) patLists() (pls []PatternList) {
if p.gotSameLine(SEMICOLON) {
return
}
for !p.eof() && !p.peek(ESAC) {
var pl PatternList
p.got(LPAREN)
for !p.eof() {
var w Word
if !p.gotWord(&w) {
p.curErr("case patterns must consist of words")
}
pl.Patterns = append(pl.Patterns, w)
if p.peek(RPAREN) {
break
}
if !p.got(OR) {
p.curErr("case patterns must be separated with |")
}
}
pl.Stmts = p.stmtsNested(DSEMICOLON, ESAC)
pls = append(pls, pl)
if !p.got(DSEMICOLON) {
break
}
}
return
}
func (p *parser) declStmt() Node {
ds := DeclStmt{Declare: p.lpos}
for p.peek(LIT) && p.peekSpaced() && p.val[0] == '-' {
var w Word
p.gotWord(&w)
ds.Opts = append(ds.Opts, w)
}
for !p.peekStop() {
if as, ok := p.getAssign(); ok {
ds.Assigns = append(ds.Assigns, as)
} else if p.peek(LIT) && p.peekSpaced() {
if !identRe.MatchString(p.val) {
p.curErr("invalid var name: %s", p.val)
}
ds.Assigns = append(ds.Assigns, Assign{
NameOnly: true,
Name: Lit{
ValuePos: p.pos,
Value: p.val,
},
})
p.next()
} else {
p.curErr("declare statements must be followed by literals")
}
}
return ds
}
func (p *parser) cmdOrFunc(addRedir func()) Node {
if p.got(FUNCTION) {
fpos := p.lpos
w := p.followWord(FUNCTION)
if p.gotSameLine(LPAREN) {
p.followTok(w.Pos(), "foo(", RPAREN)
}
return p.funcDecl(w, fpos)
}
var w Word
p.gotWord(&w)
if p.gotSameLine(LPAREN) {
p.followTok(w.Pos(), "foo(", RPAREN)
return p.funcDecl(w, w.Pos())
}
cmd := Command{Args: []Word{w}}
for !p.peekStop() {
var w Word
switch {
case p.peekRedir():
addRedir()
case p.gotWord(&w):
cmd.Args = append(cmd.Args, w)
default:
p.curErr("a command can only contain words and redirects")
}
}
return cmd
}
func (p *parser) funcDecl(w Word, pos Pos) FuncDecl {
fd := FuncDecl{
Position: pos,
BashStyle: pos != w.Pos(),
Name: Lit{
Value: w.String(),
ValuePos: w.Pos(),
},
}
if !identRe.MatchString(fd.Name.Value) {
p.posErr(fd.Pos(), "invalid func name: %s", fd.Name.Value)
}
fd.Body = p.followStmt(fd.Pos(), "foo()")
return fd
}
|
// Copyright (C) 2016 Kohei YOSHIDA. All rights reserved.
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of The BSD 3-Clause License
// that can be found in the LICENSE file.
package uritemplate
import (
"fmt"
"unicode"
"unicode/utf8"
)
type parseOp int
const (
parseOpSimple parseOp = iota
parseOpPlus
parseOpCrosshatch
parseOpDot
parseOpSlash
parseOpSemicolon
parseOpQuestion
parseOpAmpersand
)
var (
rangeVarchar = &unicode.RangeTable{
R16: []unicode.Range16{
{Lo: 0x0030, Hi: 0x0039, Stride: 1}, // '0' - '9'
{Lo: 0x0041, Hi: 0x005A, Stride: 1}, // 'A' - 'Z'
{Lo: 0x005F, Hi: 0x005F, Stride: 1}, // '_'
{Lo: 0x0061, Hi: 0x007A, Stride: 1}, // 'a' - 'z'
},
LatinOffset: 4,
}
rangeLiterals = &unicode.RangeTable{
R16: []unicode.Range16{
{Lo: 0x0021, Hi: 0x0021, Stride: 1}, // '!'
{Lo: 0x0023, Hi: 0x0024, Stride: 1}, // '#' - '$'
{Lo: 0x0026, Hi: 0x0026, Stride: 1}, // '&'
{Lo: 0x0028, Hi: 0x003B, Stride: 1}, // '(' - ';'
{Lo: 0x003D, Hi: 0x003D, Stride: 1}, // '='
{Lo: 0x003F, Hi: 0x005B, Stride: 1}, // '?' - '['
{Lo: 0x005D, Hi: 0x005D, Stride: 1}, // ']'
{Lo: 0x005F, Hi: 0x005F, Stride: 1}, // '_'
{Lo: 0x0061, Hi: 0x007A, Stride: 1}, // 'a' - 'z'
{Lo: 0x007E, Hi: 0x007E, Stride: 1}, // '~'
{Lo: 0x00A0, Hi: 0xD7FF, Stride: 1}, // ucschar
{Lo: 0xE000, Hi: 0xF8FF, Stride: 1}, // iprivate
{Lo: 0xF900, Hi: 0xFDCF, Stride: 1}, // ucschar
{Lo: 0xFDF0, Hi: 0xFFEF, Stride: 1}, // ucschar
},
R32: []unicode.Range32{
{Lo: 0x00010000, Hi: 0x0001FFFD, Stride: 1}, // ucschar
{Lo: 0x00020000, Hi: 0x0002FFFD, Stride: 1}, // ucschar
{Lo: 0x00030000, Hi: 0x0003FFFD, Stride: 1}, // ucschar
{Lo: 0x00040000, Hi: 0x0004FFFD, Stride: 1}, // ucschar
{Lo: 0x00050000, Hi: 0x0005FFFD, Stride: 1}, // ucschar
{Lo: 0x00060000, Hi: 0x0006FFFD, Stride: 1}, // ucschar
{Lo: 0x00070000, Hi: 0x0007FFFD, Stride: 1}, // ucschar
{Lo: 0x00080000, Hi: 0x0008FFFD, Stride: 1}, // ucschar
{Lo: 0x00090000, Hi: 0x0009FFFD, Stride: 1}, // ucschar
{Lo: 0x000A0000, Hi: 0x000AFFFD, Stride: 1}, // ucschar
{Lo: 0x000B0000, Hi: 0x000BFFFD, Stride: 1}, // ucschar
{Lo: 0x000C0000, Hi: 0x000CFFFD, Stride: 1}, // ucschar
{Lo: 0x000D0000, Hi: 0x000DFFFD, Stride: 1}, // ucschar
{Lo: 0x000E1000, Hi: 0x000EFFFD, Stride: 1}, // ucschar
{Lo: 0x000F0000, Hi: 0x000FFFFD, Stride: 1}, // iprivate
{Lo: 0x00100000, Hi: 0x0010FFFD, Stride: 1}, // iprivate
},
LatinOffset: 10,
}
)
type parser struct {
r string
start int
stop int
state parseState
}
func (p *parser) errorf(format string, a ...interface{}) error {
return errorf(p.stop, format, a...)
}
func (p *parser) rune() (rune, int) {
r, size := utf8.DecodeRuneInString(p.r[p.stop:])
p.stop += size
return r, size
}
func (p *parser) unread(r rune) {
p.stop -= utf8.RuneLen(r)
}
type parseState int
const (
parseStateDefault = parseState(iota)
parseStateOperator
parseStateVarList
parseStateVarName
parseStatePrefix
)
func (p *parser) setState(state parseState) {
p.state = state
p.start = p.stop
}
func (p *parser) parseURITemplate() (*Template, error) {
tmpl := Template{
raw: p.r,
exprs: []template{},
}
var exp *expression
for {
r, size := p.rune()
if r == utf8.RuneError {
if size == 0 {
if p.state != parseStateDefault {
return nil, p.errorf("incomplete template")
}
if p.start < p.stop {
tmpl.exprs = append(tmpl.exprs, literals(p.r[p.start:p.stop]))
}
return &tmpl, nil
}
return nil, p.errorf("invalid UTF-8 encoding")
}
switch p.state {
case parseStateDefault:
switch r {
case '{':
if stop := p.stop - size; stop > p.start {
tmpl.exprs = append(tmpl.exprs, literals(p.r[p.start:stop]))
}
exp = &expression{}
tmpl.exprs = append(tmpl.exprs, exp)
p.setState(parseStateOperator)
case '%':
p.unread(r)
if _, err := p.consumeTriplets(); err != nil {
return nil, err
}
default:
if !unicode.Is(rangeLiterals, r) {
return nil, p.errorf("invalid literals")
}
}
case parseStateOperator:
switch r {
default:
p.unread(r)
exp.op = parseOpSimple
case '+':
exp.op = parseOpPlus
case '#':
exp.op = parseOpCrosshatch
case '.':
exp.op = parseOpDot
case '/':
exp.op = parseOpSlash
case ';':
exp.op = parseOpSemicolon
case '?':
exp.op = parseOpQuestion
case '&':
exp.op = parseOpAmpersand
case '=', ',', '!', '@', '|': // op-reserved
return nil, p.errorf("unsupported operator")
}
p.setState(parseStateVarName)
case parseStateVarList:
switch r {
case ',':
p.setState(parseStateVarName)
case '}':
exp.init()
p.setState(parseStateDefault)
default:
return nil, p.errorf("invalid variable-list")
}
case parseStateVarName:
switch r {
case ':':
exp.vars = append(exp.vars, varspec{
name: p.r[p.start : p.stop-size],
})
p.setState(parseStatePrefix)
case '*':
exp.vars = append(exp.vars, varspec{
name: p.r[p.start : p.stop-size],
explode: true,
})
p.setState(parseStateVarList)
case ',', '}':
p.unread(r)
exp.vars = append(exp.vars, varspec{
name: p.r[p.start:p.stop],
})
p.setState(parseStateVarList)
case '%':
p.unread(r)
if _, err := p.consumeTriplets(); err != nil {
return nil, err
}
default:
if !unicode.Is(rangeVarchar, r) {
return nil, p.errorf("invalid varname")
}
}
case parseStatePrefix:
spec := &(exp.vars[len(exp.vars)-1])
switch {
case '0' <= r && r <= '9':
spec.maxlen *= 10
spec.maxlen += int(r - '0')
if spec.maxlen == 0 || spec.maxlen > 9999 {
return nil, p.errorf("max-length must be (0, 9999]")
}
default:
if spec.maxlen == 0 {
return nil, p.errorf("max-length must be (0, 9999]")
}
p.unread(r)
p.setState(parseStateVarList)
}
default:
panic(fmt.Errorf("unhandled parseState(%d)", p.state))
}
}
}
func (p *parser) consumeTriplets() (string, error) {
if len(p.r)-p.stop < 3 || p.r[p.stop] != '%' || !ishex(p.r[p.stop+1]) || !ishex(p.r[p.stop+2]) {
return "", errorf(p.stop, "incomplete pct-encodeed")
}
triplets := p.r[p.stop : p.stop+3]
p.stop += 3
return triplets, nil
}
fix parser not accepts dots in middle of varnames
// Copyright (C) 2016 Kohei YOSHIDA. All rights reserved.
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of The BSD 3-Clause License
// that can be found in the LICENSE file.
package uritemplate
import (
"fmt"
"unicode"
"unicode/utf8"
)
type parseOp int
const (
parseOpSimple parseOp = iota
parseOpPlus
parseOpCrosshatch
parseOpDot
parseOpSlash
parseOpSemicolon
parseOpQuestion
parseOpAmpersand
)
var (
rangeVarchar = &unicode.RangeTable{
R16: []unicode.Range16{
{Lo: 0x0030, Hi: 0x0039, Stride: 1}, // '0' - '9'
{Lo: 0x0041, Hi: 0x005A, Stride: 1}, // 'A' - 'Z'
{Lo: 0x005F, Hi: 0x005F, Stride: 1}, // '_'
{Lo: 0x0061, Hi: 0x007A, Stride: 1}, // 'a' - 'z'
},
LatinOffset: 4,
}
rangeLiterals = &unicode.RangeTable{
R16: []unicode.Range16{
{Lo: 0x0021, Hi: 0x0021, Stride: 1}, // '!'
{Lo: 0x0023, Hi: 0x0024, Stride: 1}, // '#' - '$'
{Lo: 0x0026, Hi: 0x0026, Stride: 1}, // '&'
{Lo: 0x0028, Hi: 0x003B, Stride: 1}, // '(' - ';'
{Lo: 0x003D, Hi: 0x003D, Stride: 1}, // '='
{Lo: 0x003F, Hi: 0x005B, Stride: 1}, // '?' - '['
{Lo: 0x005D, Hi: 0x005D, Stride: 1}, // ']'
{Lo: 0x005F, Hi: 0x005F, Stride: 1}, // '_'
{Lo: 0x0061, Hi: 0x007A, Stride: 1}, // 'a' - 'z'
{Lo: 0x007E, Hi: 0x007E, Stride: 1}, // '~'
{Lo: 0x00A0, Hi: 0xD7FF, Stride: 1}, // ucschar
{Lo: 0xE000, Hi: 0xF8FF, Stride: 1}, // iprivate
{Lo: 0xF900, Hi: 0xFDCF, Stride: 1}, // ucschar
{Lo: 0xFDF0, Hi: 0xFFEF, Stride: 1}, // ucschar
},
R32: []unicode.Range32{
{Lo: 0x00010000, Hi: 0x0001FFFD, Stride: 1}, // ucschar
{Lo: 0x00020000, Hi: 0x0002FFFD, Stride: 1}, // ucschar
{Lo: 0x00030000, Hi: 0x0003FFFD, Stride: 1}, // ucschar
{Lo: 0x00040000, Hi: 0x0004FFFD, Stride: 1}, // ucschar
{Lo: 0x00050000, Hi: 0x0005FFFD, Stride: 1}, // ucschar
{Lo: 0x00060000, Hi: 0x0006FFFD, Stride: 1}, // ucschar
{Lo: 0x00070000, Hi: 0x0007FFFD, Stride: 1}, // ucschar
{Lo: 0x00080000, Hi: 0x0008FFFD, Stride: 1}, // ucschar
{Lo: 0x00090000, Hi: 0x0009FFFD, Stride: 1}, // ucschar
{Lo: 0x000A0000, Hi: 0x000AFFFD, Stride: 1}, // ucschar
{Lo: 0x000B0000, Hi: 0x000BFFFD, Stride: 1}, // ucschar
{Lo: 0x000C0000, Hi: 0x000CFFFD, Stride: 1}, // ucschar
{Lo: 0x000D0000, Hi: 0x000DFFFD, Stride: 1}, // ucschar
{Lo: 0x000E1000, Hi: 0x000EFFFD, Stride: 1}, // ucschar
{Lo: 0x000F0000, Hi: 0x000FFFFD, Stride: 1}, // iprivate
{Lo: 0x00100000, Hi: 0x0010FFFD, Stride: 1}, // iprivate
},
LatinOffset: 10,
}
)
type parser struct {
r string
start int
stop int
state parseState
}
func (p *parser) errorf(format string, a ...interface{}) error {
return errorf(p.stop, format, a...)
}
func (p *parser) rune() (rune, int) {
r, size := utf8.DecodeRuneInString(p.r[p.stop:])
p.stop += size
return r, size
}
func (p *parser) unread(r rune) {
p.stop -= utf8.RuneLen(r)
}
type parseState int
const (
parseStateDefault = parseState(iota)
parseStateOperator
parseStateVarList
parseStateVarName
parseStatePrefix
)
func (p *parser) setState(state parseState) {
p.state = state
p.start = p.stop
}
func (p *parser) parseURITemplate() (*Template, error) {
tmpl := Template{
raw: p.r,
exprs: []template{},
}
var exp *expression
for {
r, size := p.rune()
if r == utf8.RuneError {
if size == 0 {
if p.state != parseStateDefault {
return nil, p.errorf("incomplete template")
}
if p.start < p.stop {
tmpl.exprs = append(tmpl.exprs, literals(p.r[p.start:p.stop]))
}
return &tmpl, nil
}
return nil, p.errorf("invalid UTF-8 encoding")
}
switch p.state {
case parseStateDefault:
switch r {
case '{':
if stop := p.stop - size; stop > p.start {
tmpl.exprs = append(tmpl.exprs, literals(p.r[p.start:stop]))
}
exp = &expression{}
tmpl.exprs = append(tmpl.exprs, exp)
p.setState(parseStateOperator)
case '%':
p.unread(r)
if _, err := p.consumeTriplets(); err != nil {
return nil, err
}
default:
if !unicode.Is(rangeLiterals, r) {
return nil, p.errorf("invalid literals")
}
}
case parseStateOperator:
switch r {
default:
p.unread(r)
exp.op = parseOpSimple
case '+':
exp.op = parseOpPlus
case '#':
exp.op = parseOpCrosshatch
case '.':
exp.op = parseOpDot
case '/':
exp.op = parseOpSlash
case ';':
exp.op = parseOpSemicolon
case '?':
exp.op = parseOpQuestion
case '&':
exp.op = parseOpAmpersand
case '=', ',', '!', '@', '|': // op-reserved
return nil, p.errorf("unsupported operator")
}
p.setState(parseStateVarName)
case parseStateVarList:
switch r {
case ',':
p.setState(parseStateVarName)
case '}':
exp.init()
p.setState(parseStateDefault)
default:
return nil, p.errorf("invalid variable-list")
}
case parseStateVarName:
switch r {
case ':', '*':
name := p.r[p.start : p.stop-size]
if !validVarname(name) {
return nil, p.errorf("invalid varname")
}
explode := r == '*'
exp.vars = append(exp.vars, varspec{
name: name,
explode: explode,
})
if explode {
p.setState(parseStateVarList)
} else {
p.setState(parseStatePrefix)
}
case ',', '}':
p.unread(r)
name := p.r[p.start:p.stop]
if !validVarname(name) {
return nil, p.errorf("invalid varname")
}
exp.vars = append(exp.vars, varspec{
name: name,
})
p.setState(parseStateVarList)
case '%':
p.unread(r)
if _, err := p.consumeTriplets(); err != nil {
return nil, err
}
case '.':
if dot := p.stop - size; dot == p.start || p.r[dot-1] == '.' {
return nil, p.errorf("invalid varname")
}
default:
if !unicode.Is(rangeVarchar, r) {
return nil, p.errorf("invalid varname")
}
}
case parseStatePrefix:
spec := &(exp.vars[len(exp.vars)-1])
switch {
case '0' <= r && r <= '9':
spec.maxlen *= 10
spec.maxlen += int(r - '0')
if spec.maxlen == 0 || spec.maxlen > 9999 {
return nil, p.errorf("max-length must be (0, 9999]")
}
default:
if spec.maxlen == 0 {
return nil, p.errorf("max-length must be (0, 9999]")
}
p.unread(r)
p.setState(parseStateVarList)
}
default:
panic(fmt.Errorf("unhandled parseState(%d)", p.state))
}
}
}
func validVarname(name string) bool {
if l := len(name); l == 0 || name[0] == '.' || name[l-1] == '.' {
return false
}
for i := 1; i < len(name)-1; i++ {
switch c := name[i]; c {
case '.':
if name[i-1] == '.' {
return false
}
}
}
return true
}
func (p *parser) consumeTriplets() (string, error) {
if len(p.r)-p.stop < 3 || p.r[p.stop] != '%' || !ishex(p.r[p.stop+1]) || !ishex(p.r[p.stop+2]) {
return "", errorf(p.stop, "incomplete pct-encodeed")
}
triplets := p.r[p.stop : p.stop+3]
p.stop += 3
return triplets, nil
}
|
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"text/template"
"github.com/go-xorm/core"
"github.com/go-xorm/xorm"
"github.com/go-xweb/log"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/ziutek/mymysql/godrv"
)
var CmdReverse = &Command{
UsageLine: "reverse [-m] driverName datasourceName tmplPath [generatedPath]",
Short: "reverse a db to codes",
Long: `
according database's tables and columns to generate codes for Go, C++ and etc.
-m Generated one go file for every table
driverName Database driver name, now supported four: mysql mymysql sqlite3 postgres
datasourceName Database connection uri, for detail infomation please visit driver's project page
tmplPath Template dir for generated. the default templates dir has provide 1 template
generatedPath This parameter is optional, if blank, the default value is model, then will
generated all codes in model dir
`,
}
func init() {
CmdReverse.Run = runReverse
CmdReverse.Flags = map[string]bool{
"-s": false,
"-l": false,
}
}
var (
genJson bool = false
)
func printReversePrompt(flag string) {
}
type Tmpl struct {
Tables []*core.Table
Imports map[string]string
Model string
}
func dirExists(dir string) bool {
d, e := os.Stat(dir)
switch {
case e != nil:
return false
case !d.IsDir():
return false
}
return true
}
func runReverse(cmd *Command, args []string) {
num := checkFlags(cmd.Flags, args, printReversePrompt)
if num == -1 {
return
}
args = args[num:]
if len(args) < 3 {
fmt.Println("params error, please see xorm help reverse")
return
}
var isMultiFile bool = true
if use, ok := cmd.Flags["-s"]; ok {
isMultiFile = !use
}
curPath, err := os.Getwd()
if err != nil {
fmt.Println(err)
return
}
var genDir string
var model string
if len(args) == 4 {
genDir, err = filepath.Abs(args[3])
if err != nil {
fmt.Println(err)
return
}
//[SWH|+] 经测试,path.Base不能解析windows下的“\”,需要替换为“/”
genDir = strings.Replace(genDir, "\\", "/", -1)
model = path.Base(genDir)
} else {
model = "model"
genDir = path.Join(curPath, model)
}
dir, err := filepath.Abs(args[2])
if err != nil {
log.Errorf("%v", err)
return
}
if !dirExists(dir) {
log.Errorf("Template %v path is not exist", dir)
return
}
var langTmpl LangTmpl
var ok bool
var lang string = "go"
var prefix string = "" //[SWH|+]
cfgPath := path.Join(dir, "config")
info, err := os.Stat(cfgPath)
var configs map[string]string
if err == nil && !info.IsDir() {
configs = loadConfig(cfgPath)
if l, ok := configs["lang"]; ok {
lang = l
}
if j, ok := configs["genJson"]; ok {
genJson, err = strconv.ParseBool(j)
}
//[SWH|+]
if j, ok := configs["prefix"]; ok {
prefix = j
}
}
if langTmpl, ok = langTmpls[lang]; !ok {
fmt.Println("Unsupported programing language", lang)
return
}
os.MkdirAll(genDir, os.ModePerm)
Orm, err := xorm.NewEngine(args[0], args[1])
if err != nil {
log.Errorf("%v", err)
return
}
tables, err := Orm.DBMetas()
if err != nil {
log.Errorf("%v", err)
return
}
filepath.Walk(dir, func(f string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
if info.Name() == "config" {
return nil
}
bs, err := ioutil.ReadFile(f)
if err != nil {
log.Errorf("%v", err)
return err
}
t := template.New(f)
t.Funcs(langTmpl.Funcs)
tmpl, err := t.Parse(string(bs))
if err != nil {
log.Errorf("%v", err)
return err
}
var w *os.File
fileName := info.Name()
newFileName := fileName[:len(fileName)-4]
ext := path.Ext(newFileName)
if !isMultiFile {
w, err = os.Create(path.Join(genDir, newFileName))
if err != nil {
log.Errorf("%v", err)
return err
}
imports := langTmpl.GenImports(tables)
tbls := make([]*core.Table, 0)
for _, table := range tables {
//[SWH|+]
if prefix != "" {
table.Name = strings.TrimPrefix(table.Name, prefix)
}
tbls = append(tbls, table)
}
newbytes := bytes.NewBufferString("")
t := &Tmpl{Tables: tbls, Imports: imports, Model: model}
err = tmpl.Execute(newbytes, t)
if err != nil {
log.Errorf("%v", err)
return err
}
tplcontent, err := ioutil.ReadAll(newbytes)
if err != nil {
log.Errorf("%v", err)
return err
}
var source string
if langTmpl.Formater != nil {
source, err = langTmpl.Formater(string(tplcontent))
if err != nil {
log.Errorf("%v", err)
return err
}
} else {
source = string(tplcontent)
}
w.WriteString(source)
w.Close()
} else {
for _, table := range tables {
//[SWH|+]
if prefix != "" {
table.Name = strings.TrimPrefix(table.Name, prefix)
}
// imports
tbs := []*core.Table{table}
imports := langTmpl.GenImports(tbs)
w, err := os.Create(path.Join(genDir, unTitle(mapper.Table2Obj(table.Name))+ext))
if err != nil {
log.Errorf("%v", err)
return err
}
newbytes := bytes.NewBufferString("")
t := &Tmpl{Tables: tbs, Imports: imports, Model: model}
err = tmpl.Execute(newbytes, t)
if err != nil {
log.Errorf("%v", err)
return err
}
tplcontent, err := ioutil.ReadAll(newbytes)
if err != nil {
log.Errorf("%v", err)
return err
}
var source string
if langTmpl.Formater != nil {
source, err = langTmpl.Formater(string(tplcontent))
if err != nil {
log.Errorf("%v-%v", err, string(tplcontent))
return err
}
} else {
source = string(tplcontent)
}
w.WriteString(source)
w.Close()
}
}
return nil
})
}
fixed #8
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"text/template"
"github.com/go-xorm/core"
"github.com/go-xorm/xorm"
"github.com/go-xweb/log"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/ziutek/mymysql/godrv"
)
var CmdReverse = &Command{
UsageLine: "reverse [-s] driverName datasourceName tmplPath [generatedPath]",
Short: "reverse a db to codes",
Long: `
according database's tables and columns to generate codes for Go, C++ and etc.
-s Generated one go file for every table
driverName Database driver name, now supported four: mysql mymysql sqlite3 postgres
datasourceName Database connection uri, for detail infomation please visit driver's project page
tmplPath Template dir for generated. the default templates dir has provide 1 template
generatedPath This parameter is optional, if blank, the default value is model, then will
generated all codes in model dir
`,
}
func init() {
CmdReverse.Run = runReverse
CmdReverse.Flags = map[string]bool{
"-s": false,
"-l": false,
}
}
var (
genJson bool = false
)
func printReversePrompt(flag string) {
}
type Tmpl struct {
Tables []*core.Table
Imports map[string]string
Model string
}
func dirExists(dir string) bool {
d, e := os.Stat(dir)
switch {
case e != nil:
return false
case !d.IsDir():
return false
}
return true
}
func runReverse(cmd *Command, args []string) {
num := checkFlags(cmd.Flags, args, printReversePrompt)
if num == -1 {
return
}
args = args[num:]
if len(args) < 3 {
fmt.Println("params error, please see xorm help reverse")
return
}
var isMultiFile bool = true
if use, ok := cmd.Flags["-s"]; ok {
isMultiFile = !use
}
curPath, err := os.Getwd()
if err != nil {
fmt.Println(err)
return
}
var genDir string
var model string
if len(args) == 4 {
genDir, err = filepath.Abs(args[3])
if err != nil {
fmt.Println(err)
return
}
//[SWH|+] 经测试,path.Base不能解析windows下的“\”,需要替换为“/”
genDir = strings.Replace(genDir, "\\", "/", -1)
model = path.Base(genDir)
} else {
model = "model"
genDir = path.Join(curPath, model)
}
dir, err := filepath.Abs(args[2])
if err != nil {
log.Errorf("%v", err)
return
}
if !dirExists(dir) {
log.Errorf("Template %v path is not exist", dir)
return
}
var langTmpl LangTmpl
var ok bool
var lang string = "go"
var prefix string = "" //[SWH|+]
cfgPath := path.Join(dir, "config")
info, err := os.Stat(cfgPath)
var configs map[string]string
if err == nil && !info.IsDir() {
configs = loadConfig(cfgPath)
if l, ok := configs["lang"]; ok {
lang = l
}
if j, ok := configs["genJson"]; ok {
genJson, err = strconv.ParseBool(j)
}
//[SWH|+]
if j, ok := configs["prefix"]; ok {
prefix = j
}
}
if langTmpl, ok = langTmpls[lang]; !ok {
fmt.Println("Unsupported programing language", lang)
return
}
os.MkdirAll(genDir, os.ModePerm)
Orm, err := xorm.NewEngine(args[0], args[1])
if err != nil {
log.Errorf("%v", err)
return
}
tables, err := Orm.DBMetas()
if err != nil {
log.Errorf("%v", err)
return
}
filepath.Walk(dir, func(f string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
if info.Name() == "config" {
return nil
}
bs, err := ioutil.ReadFile(f)
if err != nil {
log.Errorf("%v", err)
return err
}
t := template.New(f)
t.Funcs(langTmpl.Funcs)
tmpl, err := t.Parse(string(bs))
if err != nil {
log.Errorf("%v", err)
return err
}
var w *os.File
fileName := info.Name()
newFileName := fileName[:len(fileName)-4]
ext := path.Ext(newFileName)
if !isMultiFile {
w, err = os.Create(path.Join(genDir, newFileName))
if err != nil {
log.Errorf("%v", err)
return err
}
imports := langTmpl.GenImports(tables)
tbls := make([]*core.Table, 0)
for _, table := range tables {
//[SWH|+]
if prefix != "" {
table.Name = strings.TrimPrefix(table.Name, prefix)
}
tbls = append(tbls, table)
}
newbytes := bytes.NewBufferString("")
t := &Tmpl{Tables: tbls, Imports: imports, Model: model}
err = tmpl.Execute(newbytes, t)
if err != nil {
log.Errorf("%v", err)
return err
}
tplcontent, err := ioutil.ReadAll(newbytes)
if err != nil {
log.Errorf("%v", err)
return err
}
var source string
if langTmpl.Formater != nil {
source, err = langTmpl.Formater(string(tplcontent))
if err != nil {
log.Errorf("%v", err)
return err
}
} else {
source = string(tplcontent)
}
w.WriteString(source)
w.Close()
} else {
for _, table := range tables {
//[SWH|+]
if prefix != "" {
table.Name = strings.TrimPrefix(table.Name, prefix)
}
// imports
tbs := []*core.Table{table}
imports := langTmpl.GenImports(tbs)
w, err := os.Create(path.Join(genDir, unTitle(mapper.Table2Obj(table.Name))+ext))
if err != nil {
log.Errorf("%v", err)
return err
}
newbytes := bytes.NewBufferString("")
t := &Tmpl{Tables: tbs, Imports: imports, Model: model}
err = tmpl.Execute(newbytes, t)
if err != nil {
log.Errorf("%v", err)
return err
}
tplcontent, err := ioutil.ReadAll(newbytes)
if err != nil {
log.Errorf("%v", err)
return err
}
var source string
if langTmpl.Formater != nil {
source, err = langTmpl.Formater(string(tplcontent))
if err != nil {
log.Errorf("%v-%v", err, string(tplcontent))
return err
}
} else {
source = string(tplcontent)
}
w.WriteString(source)
w.Close()
}
}
return nil
})
}
|
add test cases for MgoStore.
|
/* Copyright (c) 2014, Daniel Martí <mvdan@mvdan.cc> */
/* See LICENSE for licensing information */
package main
import (
"compress/zlib"
"errors"
"flag"
"fmt"
"html/template"
"io"
"log"
"math/rand"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
)
const (
idSize = 8 // should be between 6 and 256
indexTmpl = "index.html"
chars = "abcdefghijklmnopqrstuvwxyz0123456789"
// GET error messages
invalidId = "Invalid paste id."
pasteNotFound = "Paste doesn't exist."
unknownError = "Something went terribly wrong."
// POST error messages
missingForm = "Paste could not be found inside the posted form."
)
var (
siteUrl = flag.String("u", "http://localhost:9090", "URL of the site")
listen = flag.String("l", "localhost:9090", "Host and port to listen to")
dataDir = flag.String("d", "data", "Directory to store all the pastes in")
lifeTimeStr = flag.String("t", "12h", "Lifetime of the pastes (units: s,m,h)")
maxSizeStr = flag.String("s", "1M", "Maximum size of POSTs in bytes (units: B,K,M)")
lifeTime time.Duration
maxSize ByteSize
validId = regexp.MustCompile("^[a-zA-Z0-9]{" + strconv.FormatInt(idSize, 10) + "}$")
regexByteSize = regexp.MustCompile(`^([\d\.]+)\s*([KM]?B|[BKM])$`)
indexTemplate *template.Template
)
func pathId(id string) string {
return path.Join(id[0:2], id[2:4], id[4:])
}
type ByteSize int64
const (
B ByteSize = 1 << (10 * iota)
KB
MB
)
func (b ByteSize) String() string {
switch {
case b >= MB:
return fmt.Sprintf("%.2f MB", float64(b)/float64(MB))
case b >= KB:
return fmt.Sprintf("%.2f KB", float64(b)/float64(KB))
}
return fmt.Sprintf("%d B", b)
}
func parseByteSize(str string) (ByteSize, error) {
if !regexByteSize.MatchString(str) {
return 0, errors.New("Could not parse size in bytes")
}
parts := regexByteSize.FindStringSubmatch(str)
size, _ := strconv.ParseFloat(string(parts[1]), 64)
switch string(parts[2]) {
case "KB", "K":
size *= float64(KB)
case "MB", "M":
size *= float64(MB)
}
return ByteSize(size), nil
}
func randomId() string {
s := make([]byte, idSize)
var offset uint = 0
for {
r := rand.Int63()
for i := 0; i < 8; i++ {
randbyte := int(r&0xff) % len(chars)
s[offset] = chars[randbyte]
offset++
if offset == idSize {
return string(s)
}
r >>= 8
}
}
return strings.Repeat(chars[0:1], idSize)
}
func endLife(path string) {
err := os.Remove(path)
if err == nil {
log.Printf("Removed paste: %s", path)
} else {
log.Printf("Could not end the life of %s: %s", path, err)
programDeath(path, 2*time.Minute)
}
}
func programDeath(path string, after time.Duration) {
timer := time.NewTimer(after)
go func() {
<-timer.C
endLife(path)
}()
}
func handler(w http.ResponseWriter, r *http.Request) {
var err error
switch r.Method {
case "GET":
var id, pastePath string
id = r.URL.Path[1:]
if len(id) == 0 {
indexTemplate.Execute(w, *siteUrl)
return
}
if !validId.MatchString(id) {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "%s\n", invalidId)
return
}
id = strings.ToLower(id)
pastePath = pathId(id)
pasteFile, err := os.Open(pastePath)
if err != nil {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "%s\n", pasteNotFound)
return
}
compReader, err := zlib.NewReader(pasteFile)
if err != nil {
log.Printf("Could not open a compression reader for %s: %s", pastePath, err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s\n", unknownError)
return
}
io.Copy(w, compReader)
compReader.Close()
pasteFile.Close()
case "POST":
r.Body = http.MaxBytesReader(w, r.Body, int64(maxSize))
var id, pastePath string
for {
id = randomId()
pastePath = pathId(id)
if _, err := os.Stat(pastePath); os.IsNotExist(err) {
break
}
}
if err = r.ParseMultipartForm(int64(maxSize)); err != nil {
log.Printf("Could not parse POST multipart form: %s", err)
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "%s\n", err)
return
}
var content string
if vs, found := r.Form["paste"]; found {
content = vs[0]
} else {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "%s\n", missingForm)
return
}
dir, _ := path.Split(pastePath)
if err = os.MkdirAll(dir, 0700); err != nil {
log.Printf("Could not create directories leading to %s: %s", pastePath, err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s\n", unknownError)
return
}
programDeath(pastePath, lifeTime)
pasteFile, err := os.OpenFile(pastePath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
log.Printf("Could not create new paste pasteFile %s: %s", pastePath, err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s\n", unknownError)
return
}
compWriter := zlib.NewWriter(pasteFile)
b, err := io.WriteString(compWriter, content)
compWriter.Close()
pasteFile.Close()
if err != nil {
log.Printf("Could not write compressed data into %s: %s", pastePath, err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s\n", unknownError)
return
}
writtenSize := ByteSize(b)
log.Printf("Created a new paste: %s (%s)", pastePath, writtenSize)
fmt.Fprintf(w, "%s/%s\n", *siteUrl, id)
}
}
func walkFunc(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
deathTime := info.ModTime().Add(lifeTime)
now := time.Now()
if deathTime.Before(now) {
go endLife(path)
return nil
}
var lifeLeft time.Duration
if deathTime.After(now.Add(lifeTime)) {
lifeLeft = lifeTime
} else {
lifeLeft = deathTime.Sub(now)
}
log.Printf("Recovered paste %s has %s left", path, lifeLeft)
programDeath(path, lifeLeft)
return nil
}
func main() {
var err error
flag.Parse()
if lifeTime, err = time.ParseDuration(*lifeTimeStr); err != nil {
log.Fatalf("Invalid lifetime '%s': %s", lifeTimeStr, err)
}
if maxSize, err = parseByteSize(*maxSizeStr); err != nil {
log.Fatalf("Invalid max size '%s': %s", maxSizeStr, err)
}
if indexTemplate, err = template.ParseFiles(indexTmpl); err != nil {
log.Fatalf("Could not load template %s: %s", indexTmpl, err)
}
if err = os.MkdirAll(*dataDir, 0700); err != nil {
log.Fatalf("Could not create data directory %s: %s", *dataDir, err)
}
if err = os.Chdir(*dataDir); err != nil {
log.Fatalf("Could not enter data directory %s: %s", *dataDir, err)
}
if err = filepath.Walk(".", walkFunc); err != nil {
log.Fatalf("Could not recover data directory %s: %s", *dataDir, err)
}
log.Printf("idSize = %d", idSize)
log.Printf("maxSize = %s", maxSize)
log.Printf("siteUrl = %s", *siteUrl)
log.Printf("listen = %s", *listen)
log.Printf("dataDir = %s", *dataDir)
log.Printf("lifeTime = %s", lifeTime)
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(*listen, nil))
}
Fix all issues found by "go vet"
/* Copyright (c) 2014, Daniel Martí <mvdan@mvdan.cc> */
/* See LICENSE for licensing information */
package main
import (
"compress/zlib"
"errors"
"flag"
"fmt"
"html/template"
"io"
"log"
"math/rand"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
)
const (
idSize = 8 // should be between 6 and 256
indexTmpl = "index.html"
chars = "abcdefghijklmnopqrstuvwxyz0123456789"
// GET error messages
invalidId = "Invalid paste id."
pasteNotFound = "Paste doesn't exist."
unknownError = "Something went terribly wrong."
// POST error messages
missingForm = "Paste could not be found inside the posted form."
)
var (
siteUrl = flag.String("u", "http://localhost:9090", "URL of the site")
listen = flag.String("l", "localhost:9090", "Host and port to listen to")
dataDir = flag.String("d", "data", "Directory to store all the pastes in")
lifeTimeStr = flag.String("t", "12h", "Lifetime of the pastes (units: s,m,h)")
maxSizeStr = flag.String("s", "1M", "Maximum size of POSTs in bytes (units: B,K,M)")
lifeTime time.Duration
maxSize ByteSize
validId = regexp.MustCompile("^[a-zA-Z0-9]{" + strconv.FormatInt(idSize, 10) + "}$")
regexByteSize = regexp.MustCompile(`^([\d\.]+)\s*([KM]?B|[BKM])$`)
indexTemplate *template.Template
)
func pathId(id string) string {
return path.Join(id[0:2], id[2:4], id[4:])
}
type ByteSize int64
const (
B ByteSize = 1 << (10 * iota)
KB
MB
)
func (b ByteSize) String() string {
switch {
case b >= MB:
return fmt.Sprintf("%.2f MB", float64(b)/float64(MB))
case b >= KB:
return fmt.Sprintf("%.2f KB", float64(b)/float64(KB))
}
return fmt.Sprintf("%d B", b)
}
func parseByteSize(str string) (ByteSize, error) {
if !regexByteSize.MatchString(str) {
return 0, errors.New("Could not parse size in bytes")
}
parts := regexByteSize.FindStringSubmatch(str)
size, _ := strconv.ParseFloat(string(parts[1]), 64)
switch string(parts[2]) {
case "KB", "K":
size *= float64(KB)
case "MB", "M":
size *= float64(MB)
}
return ByteSize(size), nil
}
func randomId() string {
s := make([]byte, idSize)
var offset uint = 0
MainLoop:
for {
r := rand.Int63()
for i := 0; i < 8; i++ {
randbyte := int(r&0xff) % len(chars)
s[offset] = chars[randbyte]
offset++
if offset == idSize {
break MainLoop
}
r >>= 8
}
}
return string(s)
}
func endLife(path string) {
err := os.Remove(path)
if err == nil {
log.Printf("Removed paste: %s", path)
} else {
log.Printf("Could not end the life of %s: %s", path, err)
programDeath(path, 2*time.Minute)
}
}
func programDeath(path string, after time.Duration) {
timer := time.NewTimer(after)
go func() {
<-timer.C
endLife(path)
}()
}
func handler(w http.ResponseWriter, r *http.Request) {
var err error
switch r.Method {
case "GET":
var id, pastePath string
id = r.URL.Path[1:]
if len(id) == 0 {
indexTemplate.Execute(w, *siteUrl)
return
}
if !validId.MatchString(id) {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "%s\n", invalidId)
return
}
id = strings.ToLower(id)
pastePath = pathId(id)
pasteFile, err := os.Open(pastePath)
if err != nil {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "%s\n", pasteNotFound)
return
}
compReader, err := zlib.NewReader(pasteFile)
if err != nil {
log.Printf("Could not open a compression reader for %s: %s", pastePath, err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s\n", unknownError)
return
}
io.Copy(w, compReader)
compReader.Close()
pasteFile.Close()
case "POST":
r.Body = http.MaxBytesReader(w, r.Body, int64(maxSize))
var id, pastePath string
for {
id = randomId()
pastePath = pathId(id)
if _, err := os.Stat(pastePath); os.IsNotExist(err) {
break
}
}
if err = r.ParseMultipartForm(int64(maxSize)); err != nil {
log.Printf("Could not parse POST multipart form: %s", err)
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "%s\n", err)
return
}
var content string
if vs, found := r.Form["paste"]; found {
content = vs[0]
} else {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "%s\n", missingForm)
return
}
dir, _ := path.Split(pastePath)
if err = os.MkdirAll(dir, 0700); err != nil {
log.Printf("Could not create directories leading to %s: %s", pastePath, err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s\n", unknownError)
return
}
programDeath(pastePath, lifeTime)
pasteFile, err := os.OpenFile(pastePath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
log.Printf("Could not create new paste pasteFile %s: %s", pastePath, err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s\n", unknownError)
return
}
compWriter := zlib.NewWriter(pasteFile)
b, err := io.WriteString(compWriter, content)
compWriter.Close()
pasteFile.Close()
if err != nil {
log.Printf("Could not write compressed data into %s: %s", pastePath, err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s\n", unknownError)
return
}
writtenSize := ByteSize(b)
log.Printf("Created a new paste: %s (%s)", pastePath, writtenSize)
fmt.Fprintf(w, "%s/%s\n", *siteUrl, id)
}
}
func walkFunc(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
deathTime := info.ModTime().Add(lifeTime)
now := time.Now()
if deathTime.Before(now) {
go endLife(path)
return nil
}
var lifeLeft time.Duration
if deathTime.After(now.Add(lifeTime)) {
lifeLeft = lifeTime
} else {
lifeLeft = deathTime.Sub(now)
}
log.Printf("Recovered paste %s has %s left", path, lifeLeft)
programDeath(path, lifeLeft)
return nil
}
func main() {
var err error
flag.Parse()
if lifeTime, err = time.ParseDuration(*lifeTimeStr); err != nil {
log.Fatalf("Invalid lifetime '%s': %s", *lifeTimeStr, err)
}
if maxSize, err = parseByteSize(*maxSizeStr); err != nil {
log.Fatalf("Invalid max size '%s': %s", *maxSizeStr, err)
}
if indexTemplate, err = template.ParseFiles(indexTmpl); err != nil {
log.Fatalf("Could not load template %s: %s", indexTmpl, err)
}
if err = os.MkdirAll(*dataDir, 0700); err != nil {
log.Fatalf("Could not create data directory %s: %s", *dataDir, err)
}
if err = os.Chdir(*dataDir); err != nil {
log.Fatalf("Could not enter data directory %s: %s", *dataDir, err)
}
if err = filepath.Walk(".", walkFunc); err != nil {
log.Fatalf("Could not recover data directory %s: %s", *dataDir, err)
}
log.Printf("idSize = %d", idSize)
log.Printf("maxSize = %s", maxSize)
log.Printf("siteUrl = %s", *siteUrl)
log.Printf("listen = %s", *listen)
log.Printf("dataDir = %s", *dataDir)
log.Printf("lifeTime = %s", lifeTime)
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(*listen, nil))
}
|
// Copyright (C) 2017 Damon Revoe. All rights reserved.
// Use of this source code is governed by the MIT
// license, which can be found in the LICENSE file.
package main
import (
"fmt"
"strings"
)
type templateParams map[string]interface{}
type fileParams struct {
filename string
params templateParams
}
type array struct {
paramName string
paramValues []string
continuation verbatim
}
type verbatim struct {
text string
next *array
}
// Subst updates the 'verbatim' receiver by replacing all instances
// of paramName surrounded by braces with paramValue, which can be
// either a string or a slice of strings. In the latter case, the
// text in the receiver structure gets truncated by the substitution
// and the receiver structure gets extended by a new array structure.
func (v *verbatim) subst(paramName string, paramValue interface{}) {
textValue, ok := paramValue.(string)
if ok {
v.text = strings.Replace(v.text, "{"+paramName+"}", textValue, -1)
} else {
arrayValue, ok := paramValue.([]string)
if ok {
pos := strings.Index(v.text, "{"+paramName+"}")
if pos >= 0 {
v.next = &array{paramName, arrayValue,
verbatim{v.text[pos+len(paramName)+2:], v.next}}
v.text = v.text[:pos]
}
} else {
v.text = strings.Replace(v.text, "{"+paramName+"}",
fmt.Sprint(paramValue), -1)
}
}
}
// ExpandPathnameTemplate takes a pathname template and subsitutes
// template parameter names with their values. Parameter values can be
// either strings or slices of strings. Each template value that is a
// slice of strings multiplies the number of output strings by the number
// of strings in the slice.
func expandPathnameTemplate(pathname string, params templateParams) []fileParams {
root := verbatim{pathname, nil}
for paramName, paramValue := range params {
root.subst(paramName, paramValue)
for node := root.next; node != nil; node = node.continuation.next {
node.continuation.subst(paramName, paramValue)
}
fmt.Println(paramName, paramValue)
strings.Index(pathname, "{"+paramName+"}")
}
if root.next == nil {
return []fileParams{{root.text, params}}
}
fmt.Println(root)
return []fileParams{{pathname, params}}
}
Simplify verbatim.subst implementation
// Copyright (C) 2017 Damon Revoe. All rights reserved.
// Use of this source code is governed by the MIT
// license, which can be found in the LICENSE file.
package main
import (
"fmt"
"strings"
)
type templateParams map[string]interface{}
type fileParams struct {
filename string
params templateParams
}
type array struct {
paramName string
paramValues []string
continuation verbatim
}
type verbatim struct {
text string
next *array
}
// Subst updates the 'verbatim' receiver by replacing all instances
// of paramName surrounded by braces with paramValue, which can be
// either a string or a slice of strings. In the latter case, the
// text in the receiver structure gets truncated by the substitution
// and the receiver structure gets extended by a new array structure.
func (v *verbatim) subst(paramName string, paramValue interface{}) {
if textValue, ok := paramValue.(string); ok {
v.text = strings.Replace(v.text, "{"+paramName+"}",
textValue, -1)
} else if arrayValue, ok := paramValue.([]string); !ok {
v.text = strings.Replace(v.text, "{"+paramName+"}",
fmt.Sprint(paramValue), -1)
} else if pos := strings.Index(v.text, "{"+paramName+"}"); pos >= 0 {
v.next = &array{paramName, arrayValue,
verbatim{v.text[pos+len(paramName)+2:],
v.next}}
v.text = v.text[:pos]
}
}
// ExpandPathnameTemplate takes a pathname template and subsitutes
// template parameter names with their values. Parameter values can be
// either strings or slices of strings. Each template value that is a
// slice of strings multiplies the number of output strings by the number
// of strings in the slice.
func expandPathnameTemplate(pathname string, params templateParams) []fileParams {
root := verbatim{pathname, nil}
for paramName, paramValue := range params {
root.subst(paramName, paramValue)
for node := root.next; node != nil; node = node.continuation.next {
node.continuation.subst(paramName, paramValue)
}
fmt.Println(paramName, paramValue)
strings.Index(pathname, "{"+paramName+"}")
}
if root.next == nil {
return []fileParams{{root.text, params}}
}
fmt.Println(root)
return []fileParams{{pathname, params}}
}
|
package perfm
import (
"fmt"
"math"
"sort"
"sync"
"sync/atomic"
"time"
hist "github.com/arthurkiller/perfm/histogram"
)
// Job give out a job for parallel call
// 1. start workers
// 1. workers call job.Copy()
// 2. for-loop do
// * job.Pre()
// * job.Do()
// 3. after for-loop call job.After()
// 2. caculate the summary
type Job interface {
// Copy will copy a job for parallel call
Copy() (Job, error)
// Pre will called before do
Pre() error
// Do contains the core job here
Do() error
// After contains the clean job after job done
After()
}
//PerfMonitor define the atcion about perfmonitor
type PerfMonitor interface {
Regist(Job) //regist the job to perfm
Start() //start the perf monitor
}
// Regist the job to perfm
func Regist(j Job) {
p.Regist(j)
}
// Start the perf monitor
func Start() {
p.Start()
}
type perfmonitor struct {
Sum float64 //Sum of the per request cost
Stdev float64 //Standard Deviation
Mean float64 //Mean about distribution
Total int64 //total request by count
Config //configration for perfm
done chan int //stop the perfm
startTime time.Time //keep the start time
timer <-chan time.Time //the frequency sampling timer
collector chan time.Duration //get the request cost from every done()
errCount int64 //error counter count error request
localCount int //count for the number in the sampling times
localTimeCount time.Duration //count for the sampling time total costs
buffer chan int64 //buffer the test time for latter add to the historgam
histogram hist.Histogram //used to print the histogram
wg sync.WaitGroup //wait group to block the stop and sync the work thread
//job implement benchmark job
//error occoured in job.Do will be collected
job Job
}
func New(options ...Options) PerfMonitor { return &perfmonitor{Config: newConfig(options...)} }
// Regist a job into perfmonitor fro benchmark
func (p *perfmonitor) Regist(job Job) {
p.timer = time.Tick(time.Second * time.Duration(p.Frequency))
p.collector = make(chan time.Duration, p.BufferSize)
p.histogram = hist.NewHistogram(p.BinsNumber)
p.done = make(chan int, 0)
p.buffer = make(chan int64, 100000000)
p.wg = sync.WaitGroup{}
p.job = job
p.Sum = 0
p.Stdev = 0
p.Mean = 0
p.Total = 0
p.errCount = 0
p.localCount = 0
p.localTimeCount = 0
}
// Start the benchmark with given arguments on regisit
func (p *perfmonitor) Start() {
if p.job == nil {
panic("error job does not registed yet")
}
var localwg sync.WaitGroup
// If job implement descripetion as Stringer
if _, ok := p.job.(fmt.Stringer); ok {
fmt.Println(p.job)
}
fmt.Println("===============================================")
p.wg.Add(1)
go func() {
p.startTime = time.Now()
var cost time.Duration
for {
select {
case cost = <-p.collector:
p.localCount++
p.localTimeCount += cost
p.buffer <- int64(cost)
case <-p.timer:
if p.localCount == 0 {
continue
}
if !p.NoPrint {
fmt.Printf("%s \t Qps: %d \t Avg Latency: %.3fms\n", time.Now().Format("15:04:05.000"),
p.localCount, float64(p.localTimeCount.Nanoseconds()/int64(p.localCount))/1000000)
}
p.localCount = 0
p.localTimeCount = 0
case <-p.done:
localwg.Wait()
close(p.collector)
for cost := range p.collector {
p.localCount++
p.localTimeCount += cost
p.buffer <- int64(cost)
}
if !p.NoPrint {
fmt.Printf("%s \t Qps: %d \t Avg Latency: %.3fms\n", time.Now().Format("15:04:05.000"),
p.localCount, float64(p.localTimeCount.Nanoseconds()/int64(p.localCount))/1000000)
}
close(p.buffer)
p.wg.Done()
return
}
}
}()
if p.Number > 0 {
// in total request module
sum := int64(p.Number)
for i := 0; i < p.Parallel; i++ {
localwg.Add(1)
go func() {
defer localwg.Done()
var err error
job, err := p.job.Copy()
if err != nil {
fmt.Println("error in do copy", err)
return
}
defer job.After()
var start time.Time
var l int64
for {
if l = atomic.AddInt64(&p.Total, 1); l > sum {
if l == sum+1 {
close(p.done)
}
// check if the request reach the goal
return
}
if err = job.Pre(); err != nil {
fmt.Println("error in do pre job", err)
return
}
start = time.Now()
err = job.Do()
p.collector <- time.Since(start)
if err != nil {
atomic.AddInt64(&p.errCount, 1)
}
}
}()
}
} else {
// in test duration module
// start all the worker and do job till cancelled
starter := make(chan struct{})
for i := 0; i < p.Parallel; i++ {
localwg.Add(1)
go func() {
defer localwg.Done()
var err error
job, err := p.job.Copy()
if err != nil {
fmt.Println("error in do copy", err)
return
}
defer job.After()
var start time.Time
<-starter
for {
select {
case <-p.done:
return
default:
if err = job.Pre(); err != nil {
fmt.Println("error in do pre job", err)
return
}
start = time.Now()
err = job.Do()
p.collector <- time.Since(start)
if err != nil {
atomic.AddInt64(&p.errCount, 1)
}
atomic.AddInt64(&p.Total, 1)
}
}
}()
}
p.wg.Add(1)
go func() {
// stoper to cancell all the workers
p.wg.Done()
close(starter)
time.Sleep(time.Second * time.Duration(p.Duration))
close(p.done)
return
}()
}
var sum2, max, min, p70, p80, p90, p95 float64
min = 0x7fffffffffffffff
p.wg.Wait()
p.Total--
sortSlice := make([]float64, 0, len(p.buffer))
for d := range p.buffer {
sortSlice = append(sortSlice, float64(d))
p.histogram.Add(float64(d))
p.Sum += float64(d)
sum2 += float64(d * d)
}
sort.Slice(sortSlice, func(i, j int) bool { return sortSlice[i] < sortSlice[j] })
p70 = sortSlice[int(float64(p.Total)*0.7)] / 1000000
p80 = sortSlice[int(float64(p.Total)*0.8)] / 1000000
p90 = sortSlice[int(float64(p.Total)*0.9)] / 1000000
p95 = sortSlice[int(float64(p.Total)*0.95)] / 1000000
min = sortSlice[0]
max = sortSlice[p.Total-1]
p.Mean = p.histogram.(*hist.NumericHistogram).Mean()
p.Stdev = math.Sqrt((float64(sum2) - 2*float64(p.Mean*p.Sum) + float64(float64(p.Total)*p.Mean*p.Mean)) / float64(p.Total))
fmt.Println("\n===============================================")
// here show the histogram
if p.errCount != 0 {
fmt.Printf("Total errors: %v\t Error percentage: %.3f%%\n", p.errCount, float64(p.errCount*100)/float64(p.Total))
}
fmt.Printf("MAX: %.3fms MIN: %.3fms MEAN: %.3fms STDEV: %.3f CV: %.3f%% ", max/1000000, min/1000000, p.Mean/1000000, p.Stdev/1000000, p.Stdev/float64(p.Mean)*100)
fmt.Println(p.histogram)
fmt.Println("===============================================")
fmt.Printf("Summary:\n70%% in:\t%.3fms\n80%% in:\t%.3fms\n90%% in:\t%.3fms\n95%% in:\t%.3fms\n", p70, p80, p90, p95)
}
update code, reduce process
package perfm
import (
"fmt"
"math"
"sort"
"sync"
"sync/atomic"
"time"
hist "github.com/arthurkiller/perfm/histogram"
)
// Job give out a job for parallel call
// 1. start workers
// 1. workers call job.Copy()
// 2. for-loop do
// * job.Pre()
// * job.Do()
// 3. after for-loop call job.After()
// 2. caculate the summary
type Job interface {
// Copy will copy a job for parallel call
Copy() (Job, error)
// Pre will called before do
Pre() error
// Do contains the core job here
Do() error
// After contains the clean job after job done
After()
}
//PerfMonitor define the atcion about perfmonitor
type PerfMonitor interface {
Regist(Job) //regist the job to perfm
Start() //start the perf monitor
}
// Regist the job to perfm
func Regist(j Job) {
p.Regist(j)
}
// Start the perf monitor
func Start() {
p.Start()
}
type perfmonitor struct {
Sum float64 //Sum of the per request cost
Stdev float64 //Standard Deviation
Mean float64 //Mean about distribution
Total int64 //total request by count
Config //configration for perfm
done chan int //stop the perfm
startTime time.Time //keep the start time
timer <-chan time.Time //the frequency sampling timer
collector chan time.Duration //get the request cost from every done()
errCount int64 //error counter count error request
localCount int //count for the number in the sampling times
localTimeCount time.Duration //count for the sampling time total costs
buffer chan int64 //buffer the test time for latter add to the historgam
histogram hist.Histogram //used to print the histogram
wg sync.WaitGroup //wait group to block the stop and sync the work thread
//job implement benchmark job
//error occoured in job.Do will be collected
job Job
}
func New(options ...Options) PerfMonitor { return &perfmonitor{Config: newConfig(options...)} }
// Regist a job into perfmonitor fro benchmark
func (p *perfmonitor) Regist(job Job) {
p.timer = time.Tick(time.Second * time.Duration(p.Frequency))
p.collector = make(chan time.Duration, p.BufferSize)
p.histogram = hist.NewHistogram(p.BinsNumber)
p.buffer = make(chan int64, 100000000)
p.done = make(chan int, 0)
p.wg = sync.WaitGroup{}
p.job = job
p.Sum = 0
p.Mean = 0
p.Stdev = 0
p.Total = 0
p.errCount = 0
p.localCount = 0
p.localTimeCount = 0
}
// Start the benchmark with given arguments on regisit
func (p *perfmonitor) Start() {
if p.job == nil {
panic("error job does not registed yet")
}
var localwg sync.WaitGroup
// If job implement descripetion as Stringer
if _, ok := p.job.(fmt.Stringer); ok {
fmt.Println(p.job)
}
fmt.Println("===============================================")
p.wg.Add(1)
go func() {
p.startTime = time.Now()
var cost time.Duration
for {
select {
case cost = <-p.collector:
p.localCount++
p.localTimeCount += cost
p.buffer <- int64(cost)
case <-p.timer:
if p.localCount == 0 {
continue
}
if !p.NoPrint {
fmt.Printf("%s \t Qps: %d \t Avg Latency: %.3fms\n", time.Now().Format("15:04:05.000"),
p.localCount, float64(p.localTimeCount.Nanoseconds()/int64(p.localCount))/1000000)
}
p.localCount = 0
p.localTimeCount = 0
case <-p.done:
localwg.Wait()
close(p.collector)
for cost := range p.collector {
p.localCount++
p.localTimeCount += cost
p.buffer <- int64(cost)
}
if !p.NoPrint {
fmt.Printf("%s \t Qps: %d \t Avg Latency: %.3fms\n", time.Now().Format("15:04:05.000"),
p.localCount, float64(p.localTimeCount.Nanoseconds()/int64(p.localCount))/1000000)
}
close(p.buffer)
p.wg.Done()
return
}
}
}()
if p.Number > 0 {
// in total request module
sum := int64(p.Number)
for i := 0; i < p.Parallel; i++ {
localwg.Add(1)
go func() {
defer localwg.Done()
var err error
job, err := p.job.Copy()
if err != nil {
fmt.Println("error in do copy", err)
return
}
defer job.After()
var start time.Time
var l int64
for {
// check if the request reach the goal
if l = atomic.AddInt64(&p.Total, 1); l > sum {
if l == sum+1 { // make sure only close once
close(p.done)
}
return
}
if err = job.Pre(); err != nil {
fmt.Println("error in do pre job", err)
return
}
start = time.Now()
err = job.Do()
p.collector <- time.Since(start)
if err != nil {
atomic.AddInt64(&p.errCount, 1)
}
}
}()
}
} else {
// in test duration module
// start all the worker and do job till cancelled
starter := make(chan struct{})
for i := 0; i < p.Parallel; i++ {
localwg.Add(1)
go func() {
defer localwg.Done()
var err error
job, err := p.job.Copy()
if err != nil {
fmt.Println("error in do copy", err)
return
}
defer job.After()
var start time.Time
<-starter
for {
select {
case <-p.done:
return
default:
if err = job.Pre(); err != nil {
fmt.Println("error in do pre job", err)
return
}
start = time.Now()
err = job.Do()
p.collector <- time.Since(start)
if err != nil {
atomic.AddInt64(&p.errCount, 1)
}
atomic.AddInt64(&p.Total, 1)
}
}
}()
}
p.wg.Add(1)
go func() {
// stoper to cancell all the workers
p.wg.Done()
close(starter)
time.Sleep(time.Second * time.Duration(p.Duration))
close(p.done)
return
}()
}
// wait job done and do summarize
p.wg.Wait()
var sum2, max, min, p70, p80, p90, p95 float64
min = 0x7fffffffffffffff
p.Total--
sortedSlice := make([]float64, 0, len(p.buffer))
for d := range p.buffer {
sortedSlice = append(sortedSlice, float64(d))
p.histogram.Add(float64(d))
p.Sum += float64(d)
sum2 += float64(d * d)
}
sort.Slice(sortedSlice, func(i, j int) bool { return sortedSlice[i] < sortedSlice[j] })
p70 = sortedSlice[int(float64(p.Total)*0.7)] / 1000000
p80 = sortedSlice[int(float64(p.Total)*0.8)] / 1000000
p90 = sortedSlice[int(float64(p.Total)*0.9)] / 1000000
p95 = sortedSlice[int(float64(p.Total)*0.95)] / 1000000
min = sortedSlice[0]
max = sortedSlice[p.Total-1]
p.Mean = p.histogram.(*hist.NumericHistogram).Mean()
p.Stdev = math.Sqrt((float64(sum2) - 2*float64(p.Mean*p.Sum) +
float64(float64(p.Total)*p.Mean*p.Mean)) / float64(p.Total))
fmt.Println("\n===============================================")
// here show the histogram
if p.errCount != 0 {
fmt.Printf("Total errors: %v\t Error percentage: %.3f%%\n", p.errCount,
float64(p.errCount*100)/float64(p.Total))
}
fmt.Printf("MAX: %.3fms MIN: %.3fms MEAN: %.3fms STDEV: %.3f CV: %.3f%% ", max/1000000,
min/1000000, p.Mean/1000000, p.Stdev/1000000, p.Stdev/float64(p.Mean)*100)
fmt.Println(p.histogram)
fmt.Println("===============================================")
fmt.Printf("Summary:\n70%% in:\t%.3fms\n80%% in:\t%.3fms\n90%% in:\t%.3fms\n95%% in:\t%.3fms\n",
p70, p80, p90, p95)
}
|
package pkcs7
import (
"bytes"
"crypto"
"crypto/cipher"
"crypto/des"
"crypto/hmac"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
"math/big"
_ "crypto/sha1" // for crypto.SHA1
)
// PKCS7 Represents a PKCS7 structure
type PKCS7 struct {
Content []byte
Certificates []*x509.Certificate
CRLs []pkix.CertificateList
Signers []signerInfo
raw interface{}
}
type contentInfo struct {
ContentType asn1.ObjectIdentifier
Content asn1.RawValue `asn1:"explicit,optional,tag:0"`
}
// ErrUnsupportedContentType is returned when a PKCS7 content is not supported.
// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2),
// and Enveloped Data are supported (1.2.840.113549.1.7.3)
var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type")
type unsignedData []byte
var (
oidData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1}
oidSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}
oidEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3}
oidSignedAndEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 4}
oidDigestedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 5}
oidEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6}
oidAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3}
oidAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4}
oidAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5}
)
type signedData struct {
Version int `asn1:"default:1"`
DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"`
ContentInfo contentInfo
Certificates asn1.RawValue `asn1:"optional,tag:0"`
CRLs []pkix.CertificateList `asn1:"optional,tag:1"`
SignerInfos []signerInfo `asn1:"set"`
}
type envelopedData struct {
Version int
RecipientInfos []recipientInfo `asn1:"set"`
EncryptedContentInfo encryptedContentInfo
}
type recipientInfo struct {
Version int
IssuerAndSerialNumber issuerAndSerial
KeyEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedKey []byte
}
type encryptedContentInfo struct {
ContentType asn1.ObjectIdentifier
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedContent asn1.RawValue `asn1:"tag:0,optional,explicit"`
}
type attribute struct {
Type asn1.ObjectIdentifier
Value asn1.RawValue `asn1:"set"`
}
type issuerAndSerial struct {
IssuerName pkix.RDNSequence
SerialNumber *big.Int
}
// MessageDigestMismatchError is returned when the signer data digest does not
// match the computed digest for the contained content
type MessageDigestMismatchError struct {
ExpectedDigest []byte
ActualDigest []byte
}
func (err *MessageDigestMismatchError) Error() string {
return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest)
}
type signerInfo struct {
Version int `asn1:"default:1"`
IssuerAndSerialNumber issuerAndSerial
DigestAlgorithm pkix.AlgorithmIdentifier
AuthenticatedAttributes []attribute `asn1:"optional,tag:0"`
DigestEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedDigest []byte
UnauthenticatedAttributes []attribute `asn1:"optional,tag:1"`
}
// Parse decodes a DER encoded PKCS7 package
func Parse(data []byte) (p7 *PKCS7, err error) {
var info contentInfo
der, err := ber2der(data)
if err != nil {
return nil, err
}
rest, err := asn1.Unmarshal(der, &info)
if len(rest) > 0 {
err = asn1.SyntaxError{Msg: "trailing data"}
return
}
if err != nil {
return
}
fmt.Printf("--> Content Type: %s", info.ContentType)
switch {
case info.ContentType.Equal(oidSignedData):
return parseSignedData(info.Content.Bytes)
case info.ContentType.Equal(oidEnvelopedData):
return parseEnvelopedData(info.Content.Bytes)
}
return nil, ErrUnsupportedContentType
}
func parseSignedData(data []byte) (*PKCS7, error) {
var sd signedData
asn1.Unmarshal(data, &sd)
certs, err := x509.ParseCertificates(sd.Certificates.Bytes)
if err != nil {
return nil, err
}
for _, crl := range sd.CRLs {
fmt.Printf("CRLs: %v", crl)
}
fmt.Printf("--> Signed Data Version %d\n", sd.Version)
var compound asn1.RawValue
var content unsignedData
if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil {
return nil, err
}
// Compound octet string
if compound.IsCompound {
if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil {
return nil, err
}
} else {
// assuming this is tag 04
content = compound.Bytes
}
return &PKCS7{
Content: content,
Certificates: certs,
CRLs: sd.CRLs,
Signers: sd.SignerInfos,
raw: sd}, nil
}
func parseEnvelopedData(data []byte) (*PKCS7, error) {
var ed envelopedData
if _, err := asn1.Unmarshal(data, &ed); err != nil {
return nil, err
}
return &PKCS7{
raw: ed,
}, nil
}
type digestInfo struct {
Algorithm pkix.AlgorithmIdentifier
Digest []byte
}
// Verify checks the signatures of a PKCS7 object
func (p7 *PKCS7) Verify() (err error) {
if len(p7.Signers) == 0 {
return errors.New("pkcs7: Message has no signers")
}
for _, signer := range p7.Signers {
if err := verifySignature(p7, signer); err != nil {
return err
}
}
return nil
}
func verifySignature(p7 *PKCS7, signer signerInfo) error {
if len(signer.AuthenticatedAttributes) > 0 {
// TODO(fullsailor): First check the content type match
digest, err := getDigestFromAttributes(signer.AuthenticatedAttributes)
if err != nil {
return err
}
h := crypto.SHA1.New()
h.Write(p7.Content)
computed := h.Sum(nil)
if !hmac.Equal(digest, computed) {
return &MessageDigestMismatchError{
ExpectedDigest: digest,
ActualDigest: computed,
}
}
}
cert := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
if cert == nil {
return errors.New("pkcs7: No certificate for signer")
}
encodedAttributes, err := marshalAttributes(signer.AuthenticatedAttributes)
if err != nil {
return err
}
/*
h := crypto.SHA1.New()
h.Write(p7.raw.ContentInfo.Content.Bytes)
h.Write(encodedAttributes)
messageDigest := h.Sum(nil)
di := digestInfo{
Algorithm: signer.DigestAlgorithm,
Digest: messageDigest,
}
fmt.Printf("--> digestInfo %+v\n", di)
info, err := asn1.Marshal(di)
if err != nil {
return err
}
fmt.Printf("--> asn.1 digestInfo %x\n---> length:%d\n", info, len(info))
*/
algo := x509.SHA1WithRSA
return cert.CheckSignature(algo, encodedAttributes, signer.EncryptedDigest)
}
func marshalAttributes(attrs []attribute) ([]byte, error) {
encodedAttributes, err := asn1.Marshal(struct {
A []attribute `asn1:"set"`
}{A: attrs})
if err != nil {
return nil, err
}
// Remove the leading sequence octets
var raw asn1.RawValue
asn1.Unmarshal(encodedAttributes, &raw)
return raw.Bytes, nil
}
var (
oidDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
oidEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
)
func getDigestFromAttributes(attributes []attribute) (digest []byte, err error) {
for _, attr := range attributes {
if attr.Type.Equal(oidAttributeMessageDigest) {
_, err = asn1.Unmarshal(attr.Value.Bytes, &digest)
return
}
}
return nil, errors.New("pkcs7: Missing messageDigest attribute")
}
func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate {
for _, cert := range certs {
if isCertMatchForIssuerAndSerial(cert, ias) {
return cert
}
}
return nil
}
// GetOnlySigner returns an x509.Certificate for the first signer of the signed
// data payload. If there are more or less than one signer, nil is returned
func (p7 *PKCS7) GetOnlySigner() *x509.Certificate {
if len(p7.Signers) != 1 {
return nil
}
signer := p7.Signers[0]
return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
}
// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed
var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, & DES-EDE3 supported")
// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data
var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type")
// Decrypt decrypts encrypted content info for recipient cert and private key
func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pk crypto.PrivateKey) ([]byte, error) {
data, ok := p7.raw.(envelopedData)
if !ok {
return nil, ErrNotEncryptedContent
}
recipient := selectRecipientForCertificate(data.RecipientInfos, cert)
if recipient.EncryptedKey == nil {
return nil, errors.New("pkcs7: no enveloped recipient for provided certificate")
}
if priv := pk.(*rsa.PrivateKey); priv != nil {
var contentKey []byte
contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, priv, recipient.EncryptedKey)
if err != nil {
return nil, err
}
return data.EncryptedContentInfo.decrypt(contentKey)
}
fmt.Printf("Unsupported Private Key: %v\n", pk)
return nil, ErrUnsupportedAlgorithm
}
var oidEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7}
var oidEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
alg := eci.ContentEncryptionAlgorithm.Algorithm
if !alg.Equal(oidEncryptionAlgorithmDESCBC) && !alg.Equal(oidEncryptionAlgorithmDESEDE3CBC) {
fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg)
return nil, ErrUnsupportedAlgorithm
}
// EncryptedContent can either be constructed of multple OCTET STRINGs
// or _be_ a tagged OCTET STRING
var cyphertext []byte
if eci.EncryptedContent.IsCompound {
// Complex case to concat all of the children OCTET STRINGs
var buf bytes.Buffer
cypherbytes := eci.EncryptedContent.Bytes
for {
var part []byte
cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part)
buf.Write(part)
if cypherbytes == nil {
break
}
}
cyphertext = buf.Bytes()
} else {
// Simple case, the bytes _are_ the cyphertext
cyphertext = eci.EncryptedContent.Bytes
}
var block cipher.Block
var err error
switch {
case alg.Equal(oidEncryptionAlgorithmDESCBC):
block, err = des.NewCipher(key)
case alg.Equal(oidEncryptionAlgorithmDESEDE3CBC):
block, err = des.NewTripleDESCipher(key)
}
if err != nil {
return nil, err
}
iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes
if len(iv) != 8 {
return nil, errors.New("pkcs7: encryption algorithm parameters are malformed")
}
mode := cipher.NewCBCDecrypter(block, iv)
plaintext := make([]byte, len(cyphertext))
mode.CryptBlocks(plaintext, cyphertext)
if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil {
return nil, err
}
return plaintext, nil
}
func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo {
for _, recp := range recipients {
if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) {
return recp
}
}
return recipientInfo{}
}
func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool {
// TODO(fullsailor): openssl's implementation of comparing issuer names compares
// the DER encoding of both for exact byte match. Which is much stricter than
// what we have here. This would require marshalling ias.IssuerName and comparing
// with cert.RawIssuer using bytes.Compare
issuer := new(pkix.Name)
issuer.FillFromRDNSequence(&ias.IssuerName)
return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && issuer.CommonName == cert.Issuer.CommonName
}
func pad(data []byte, blocklen int) ([]byte, error) {
if blocklen < 1 {
return nil, fmt.Errorf("invalid blocklen %d", blocklen)
}
padlen := blocklen - (len(data) % blocklen)
if padlen == 0 {
padlen = blocklen
}
pad := bytes.Repeat([]byte{byte(padlen)}, padlen)
return append(data, pad...), nil
}
func unpad(data []byte, blocklen int) ([]byte, error) {
if blocklen < 1 {
return nil, fmt.Errorf("invalid blocklen %d", blocklen)
}
if len(data)%blocklen != 0 || len(data) == 0 {
return nil, fmt.Errorf("invalid data len %d", len(data))
}
// the last byte is the length of padding
padlen := int(data[len(data)-1])
// check padding integrity, all bytes should be the same
pad := data[len(data)-padlen:]
for _, padbyte := range pad {
if padbyte != byte(padlen) {
return nil, errors.New("invalid padding")
}
}
return data[:len(data)-padlen], nil
}
// UnmarshalSignedAttribute decodes a single attribute from the signer info
func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error {
sd, ok := p7.raw.(signedData)
if !ok {
return errors.New("pkcs7: payload is not signedData content")
}
if len(sd.SignerInfos) < 1 {
return errors.New("pkcs7: payload has no signers")
}
attributes := sd.SignerInfos[0].AuthenticatedAttributes
for _, attr := range attributes {
if attr.Type.Equal(attributeType) {
_, err := asn1.Unmarshal(attr.Value.Bytes, out)
return err
}
}
return errors.New("pkcs7: attribute type not in signed attributes")
}
// SignedData is an opaque data structure for creating signed data payloads
type SignedData struct {
sd signedData
certs []*x509.Certificate
messageDigest []byte
}
// Attribute represents a key value pair attribute. Value must be marshalable byte
// `encoding/asn1`
type Attribute struct {
Type asn1.ObjectIdentifier
Value interface{}
}
// SignerInfoConfig are optional values to include when adding a signer
type SignerInfoConfig struct {
ExtraSignedAttributes []Attribute
}
// NewSignedData initializes a SignedData with content
func NewSignedData(data []byte) (*SignedData, error) {
content, err := asn1.Marshal(data)
if err != nil {
return nil, err
}
ci := contentInfo{
ContentType: oidData,
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
}
digAlg := pkix.AlgorithmIdentifier{
Algorithm: oidDigestAlgorithmSHA1,
}
h := crypto.SHA1.New()
h.Write(data)
md := h.Sum(nil)
sd := signedData{
ContentInfo: ci,
Version: 1,
DigestAlgorithmIdentifiers: []pkix.AlgorithmIdentifier{digAlg},
}
return &SignedData{sd: sd, messageDigest: md}, nil
}
type attributes struct {
types []asn1.ObjectIdentifier
values []interface{}
}
// Add adds the attribute, maintaining insertion order
func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) {
attrs.types = append(attrs.types, attrType)
attrs.values = append(attrs.values, value)
}
func (attrs *attributes) ForMarshaling() ([]attribute, error) {
results := make([]attribute, len(attrs.types))
for i := range results {
attrType := attrs.types[i]
attrValue := attrs.values[i]
asn1Value, err := asn1.Marshal(attrValue)
if err != nil {
return nil, err
}
results[i] = attribute{
Type: attrType,
Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag
}
}
return results, nil
}
// AddSigner signs attributes about the content and adds certificate to payload
func (sd *SignedData) AddSigner(cert *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
attrs := &attributes{}
attrs.Add(oidAttributeContentType, sd.sd.ContentInfo.ContentType)
attrs.Add(oidAttributeMessageDigest, sd.messageDigest)
for _, attr := range config.ExtraSignedAttributes {
attrs.Add(attr.Type, attr.Value)
}
finalAttrs, err := attrs.ForMarshaling()
if err != nil {
return err
}
signature, err := signAttributes(finalAttrs, pkey, crypto.SHA1)
if err != nil {
return err
}
ias, err := cert2issuerAndSerial(cert)
if err != nil {
return err
}
signer := signerInfo{
AuthenticatedAttributes: finalAttrs,
DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidDigestAlgorithmSHA1},
DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidEncryptionAlgorithmRSA},
IssuerAndSerialNumber: ias,
EncryptedDigest: signature,
Version: 1,
}
// create signature of signed attributes
sd.certs = append(sd.certs, cert)
sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
return nil
}
// AddCertificate adds the certificate to the payload. Useful for parent certificates
func (sd *SignedData) AddCertificate(cert *x509.Certificate) {
sd.certs = append(sd.certs, cert)
}
// Finish marshals the content and its signers
func (sd *SignedData) Finish() ([]byte, error) {
sd.sd.Certificates = marshalCertificates(sd.certs)
inner, err := asn1.Marshal(sd.sd)
if err != nil {
return nil, err
}
outer := contentInfo{
ContentType: oidSignedData,
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true},
}
return asn1.Marshal(outer)
}
func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) {
var ias issuerAndSerial
// The issuer RDNSequence has to match exactly the sequence in the certificate
// We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence
var issuer pkix.RDNSequence
if extra, err := asn1.Unmarshal(cert.RawIssuer, &issuer); err != nil {
return ias, err
} else if len(extra) > 0 {
return ias, errors.New("pkcs7: extra data remains after parsing certificate issuer")
}
ias.IssuerName = issuer
ias.SerialNumber = cert.SerialNumber
return ias, nil
}
// signs the DER encoded form of the attributes with the private key
func signAttributes(attrs []attribute, pkey crypto.PrivateKey, hash crypto.Hash) ([]byte, error) {
attrBytes, err := marshalAttributes(attrs)
if err != nil {
return nil, err
}
h := hash.New()
h.Write(attrBytes)
hashed := h.Sum(nil)
switch priv := pkey.(type) {
case *rsa.PrivateKey:
return rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA1, hashed)
}
return nil, ErrUnsupportedAlgorithm
}
// concats and wraps the certificates in the RawValue structure
func marshalCertificates(certs []*x509.Certificate) asn1.RawValue {
var buf bytes.Buffer
for _, cert := range certs {
buf.Write(cert.Raw)
}
return asn1.RawValue{Class: 2, Tag: 0, Bytes: buf.Bytes(), IsCompound: true}
}
// DegenerateCertificate creates a signed data structure containing only the
// provided certificate
func DegenerateCertificate(cert []byte) ([]byte, error) {
certs := asn1.RawValue{Class: 2, Tag: 0, Bytes: cert, IsCompound: true}
emptyContent := contentInfo{ContentType: oidData}
sd := signedData{
Version: 1,
ContentInfo: emptyContent,
Certificates: certs,
CRLs: []pkix.CertificateList{},
}
content, err := asn1.Marshal(sd)
if err != nil {
return nil, err
}
signedContent := contentInfo{
ContentType: oidSignedData,
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
}
return asn1.Marshal(signedContent)
}
// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
// recipient keys for each recipient public key
// TODO(fullsailor): Add support for encrypting content with other algorithms
func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
// Create DES key & CBC IV
key := make([]byte, 8)
iv := make([]byte, des.BlockSize)
_, err := rand.Read(key)
if err != nil {
return nil, err
}
_, err = rand.Read(iv)
if err != nil {
return nil, err
}
// Encrypt padded content
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
mode := cipher.NewCBCEncrypter(block, iv)
plaintext, err := pad(content, mode.BlockSize())
cyphertext := make([]byte, len(plaintext))
mode.CryptBlocks(cyphertext, plaintext)
// Prepare ASN.1 Encrypted Content Info
eci := encryptedContentInfo{
ContentType: oidData,
ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
Algorithm: oidEncryptionAlgorithmDESCBC,
Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
},
EncryptedContent: marshalEncryptedContent(cyphertext),
}
// Prepare each recipient's encrypted cipher key
recipientInfos := make([]recipientInfo, len(recipients))
for i, recipient := range recipients {
encrypted, err := encryptKey(key, recipient)
if err != nil {
return nil, err
}
ias, err := cert2issuerAndSerial(recipient)
if err != nil {
return nil, err
}
info := recipientInfo{
Version: 0,
IssuerAndSerialNumber: ias,
KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{
Algorithm: oidEncryptionAlgorithmRSA,
},
EncryptedKey: encrypted,
}
recipientInfos[i] = info
}
// Prepare envelope content
envelope := envelopedData{
EncryptedContentInfo: eci,
Version: 0,
RecipientInfos: recipientInfos,
}
innerContent, err := asn1.Marshal(envelope)
if err != nil {
return nil, err
}
// Prepare outer payload structure
wrapper := contentInfo{
ContentType: oidEnvelopedData,
Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
}
return asn1.Marshal(wrapper)
}
func marshalEncryptedContent(content []byte) asn1.RawValue {
asn1Content, _ := asn1.Marshal(content)
return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true}
}
func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) {
if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil {
return rsa.EncryptPKCS1v15(rand.Reader, pub, key)
}
return nil, ErrUnsupportedAlgorithm
}
Sort Attribute SETs to obey DER spec
DER requires that SET OFs order their children lexicographically.
package pkcs7
import (
"bytes"
"crypto"
"crypto/cipher"
"crypto/des"
"crypto/hmac"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
"math/big"
"sort"
_ "crypto/sha1" // for crypto.SHA1
)
// PKCS7 Represents a PKCS7 structure
type PKCS7 struct {
Content []byte
Certificates []*x509.Certificate
CRLs []pkix.CertificateList
Signers []signerInfo
raw interface{}
}
type contentInfo struct {
ContentType asn1.ObjectIdentifier
Content asn1.RawValue `asn1:"explicit,optional,tag:0"`
}
// ErrUnsupportedContentType is returned when a PKCS7 content is not supported.
// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2),
// and Enveloped Data are supported (1.2.840.113549.1.7.3)
var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type")
type unsignedData []byte
var (
oidData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1}
oidSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}
oidEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3}
oidSignedAndEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 4}
oidDigestedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 5}
oidEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6}
oidAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3}
oidAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4}
oidAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5}
)
type signedData struct {
Version int `asn1:"default:1"`
DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"`
ContentInfo contentInfo
Certificates asn1.RawValue `asn1:"optional,tag:0"`
CRLs []pkix.CertificateList `asn1:"optional,tag:1"`
SignerInfos []signerInfo `asn1:"set"`
}
type envelopedData struct {
Version int
RecipientInfos []recipientInfo `asn1:"set"`
EncryptedContentInfo encryptedContentInfo
}
type recipientInfo struct {
Version int
IssuerAndSerialNumber issuerAndSerial
KeyEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedKey []byte
}
type encryptedContentInfo struct {
ContentType asn1.ObjectIdentifier
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedContent asn1.RawValue `asn1:"tag:0,optional,explicit"`
}
type attribute struct {
Type asn1.ObjectIdentifier
Value asn1.RawValue `asn1:"set"`
}
type issuerAndSerial struct {
IssuerName pkix.RDNSequence
SerialNumber *big.Int
}
// MessageDigestMismatchError is returned when the signer data digest does not
// match the computed digest for the contained content
type MessageDigestMismatchError struct {
ExpectedDigest []byte
ActualDigest []byte
}
func (err *MessageDigestMismatchError) Error() string {
return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest)
}
type signerInfo struct {
Version int `asn1:"default:1"`
IssuerAndSerialNumber issuerAndSerial
DigestAlgorithm pkix.AlgorithmIdentifier
AuthenticatedAttributes []attribute `asn1:"optional,tag:0"`
DigestEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedDigest []byte
UnauthenticatedAttributes []attribute `asn1:"optional,tag:1"`
}
// Parse decodes a DER encoded PKCS7 package
func Parse(data []byte) (p7 *PKCS7, err error) {
var info contentInfo
der, err := ber2der(data)
if err != nil {
return nil, err
}
rest, err := asn1.Unmarshal(der, &info)
if len(rest) > 0 {
err = asn1.SyntaxError{Msg: "trailing data"}
return
}
if err != nil {
return
}
fmt.Printf("--> Content Type: %s", info.ContentType)
switch {
case info.ContentType.Equal(oidSignedData):
return parseSignedData(info.Content.Bytes)
case info.ContentType.Equal(oidEnvelopedData):
return parseEnvelopedData(info.Content.Bytes)
}
return nil, ErrUnsupportedContentType
}
func parseSignedData(data []byte) (*PKCS7, error) {
var sd signedData
asn1.Unmarshal(data, &sd)
certs, err := x509.ParseCertificates(sd.Certificates.Bytes)
if err != nil {
return nil, err
}
for _, crl := range sd.CRLs {
fmt.Printf("CRLs: %v", crl)
}
fmt.Printf("--> Signed Data Version %d\n", sd.Version)
var compound asn1.RawValue
var content unsignedData
if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil {
return nil, err
}
// Compound octet string
if compound.IsCompound {
if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil {
return nil, err
}
} else {
// assuming this is tag 04
content = compound.Bytes
}
return &PKCS7{
Content: content,
Certificates: certs,
CRLs: sd.CRLs,
Signers: sd.SignerInfos,
raw: sd}, nil
}
func parseEnvelopedData(data []byte) (*PKCS7, error) {
var ed envelopedData
if _, err := asn1.Unmarshal(data, &ed); err != nil {
return nil, err
}
return &PKCS7{
raw: ed,
}, nil
}
type digestInfo struct {
Algorithm pkix.AlgorithmIdentifier
Digest []byte
}
// Verify checks the signatures of a PKCS7 object
func (p7 *PKCS7) Verify() (err error) {
if len(p7.Signers) == 0 {
return errors.New("pkcs7: Message has no signers")
}
for _, signer := range p7.Signers {
if err := verifySignature(p7, signer); err != nil {
return err
}
}
return nil
}
func verifySignature(p7 *PKCS7, signer signerInfo) error {
if len(signer.AuthenticatedAttributes) > 0 {
// TODO(fullsailor): First check the content type match
digest, err := getDigestFromAttributes(signer.AuthenticatedAttributes)
if err != nil {
return err
}
h := crypto.SHA1.New()
h.Write(p7.Content)
computed := h.Sum(nil)
if !hmac.Equal(digest, computed) {
return &MessageDigestMismatchError{
ExpectedDigest: digest,
ActualDigest: computed,
}
}
}
cert := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
if cert == nil {
return errors.New("pkcs7: No certificate for signer")
}
encodedAttributes, err := marshalAttributes(signer.AuthenticatedAttributes)
if err != nil {
return err
}
/*
h := crypto.SHA1.New()
h.Write(p7.raw.ContentInfo.Content.Bytes)
h.Write(encodedAttributes)
messageDigest := h.Sum(nil)
di := digestInfo{
Algorithm: signer.DigestAlgorithm,
Digest: messageDigest,
}
fmt.Printf("--> digestInfo %+v\n", di)
info, err := asn1.Marshal(di)
if err != nil {
return err
}
fmt.Printf("--> asn.1 digestInfo %x\n---> length:%d\n", info, len(info))
*/
algo := x509.SHA1WithRSA
return cert.CheckSignature(algo, encodedAttributes, signer.EncryptedDigest)
}
func marshalAttributes(attrs []attribute) ([]byte, error) {
encodedAttributes, err := asn1.Marshal(struct {
A []attribute `asn1:"set"`
}{A: attrs})
if err != nil {
return nil, err
}
// Remove the leading sequence octets
var raw asn1.RawValue
asn1.Unmarshal(encodedAttributes, &raw)
return raw.Bytes, nil
}
var (
oidDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
oidEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
)
func getDigestFromAttributes(attributes []attribute) (digest []byte, err error) {
for _, attr := range attributes {
if attr.Type.Equal(oidAttributeMessageDigest) {
_, err = asn1.Unmarshal(attr.Value.Bytes, &digest)
return
}
}
return nil, errors.New("pkcs7: Missing messageDigest attribute")
}
func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate {
for _, cert := range certs {
if isCertMatchForIssuerAndSerial(cert, ias) {
return cert
}
}
return nil
}
// GetOnlySigner returns an x509.Certificate for the first signer of the signed
// data payload. If there are more or less than one signer, nil is returned
func (p7 *PKCS7) GetOnlySigner() *x509.Certificate {
if len(p7.Signers) != 1 {
return nil
}
signer := p7.Signers[0]
return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
}
// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed
var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, & DES-EDE3 supported")
// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data
var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type")
// Decrypt decrypts encrypted content info for recipient cert and private key
func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pk crypto.PrivateKey) ([]byte, error) {
data, ok := p7.raw.(envelopedData)
if !ok {
return nil, ErrNotEncryptedContent
}
recipient := selectRecipientForCertificate(data.RecipientInfos, cert)
if recipient.EncryptedKey == nil {
return nil, errors.New("pkcs7: no enveloped recipient for provided certificate")
}
if priv := pk.(*rsa.PrivateKey); priv != nil {
var contentKey []byte
contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, priv, recipient.EncryptedKey)
if err != nil {
return nil, err
}
return data.EncryptedContentInfo.decrypt(contentKey)
}
fmt.Printf("Unsupported Private Key: %v\n", pk)
return nil, ErrUnsupportedAlgorithm
}
var oidEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7}
var oidEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
alg := eci.ContentEncryptionAlgorithm.Algorithm
if !alg.Equal(oidEncryptionAlgorithmDESCBC) && !alg.Equal(oidEncryptionAlgorithmDESEDE3CBC) {
fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg)
return nil, ErrUnsupportedAlgorithm
}
// EncryptedContent can either be constructed of multple OCTET STRINGs
// or _be_ a tagged OCTET STRING
var cyphertext []byte
if eci.EncryptedContent.IsCompound {
// Complex case to concat all of the children OCTET STRINGs
var buf bytes.Buffer
cypherbytes := eci.EncryptedContent.Bytes
for {
var part []byte
cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part)
buf.Write(part)
if cypherbytes == nil {
break
}
}
cyphertext = buf.Bytes()
} else {
// Simple case, the bytes _are_ the cyphertext
cyphertext = eci.EncryptedContent.Bytes
}
var block cipher.Block
var err error
switch {
case alg.Equal(oidEncryptionAlgorithmDESCBC):
block, err = des.NewCipher(key)
case alg.Equal(oidEncryptionAlgorithmDESEDE3CBC):
block, err = des.NewTripleDESCipher(key)
}
if err != nil {
return nil, err
}
iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes
if len(iv) != 8 {
return nil, errors.New("pkcs7: encryption algorithm parameters are malformed")
}
mode := cipher.NewCBCDecrypter(block, iv)
plaintext := make([]byte, len(cyphertext))
mode.CryptBlocks(plaintext, cyphertext)
if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil {
return nil, err
}
return plaintext, nil
}
func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo {
for _, recp := range recipients {
if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) {
return recp
}
}
return recipientInfo{}
}
func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool {
// TODO(fullsailor): openssl's implementation of comparing issuer names compares
// the DER encoding of both for exact byte match. Which is much stricter than
// what we have here. This would require marshalling ias.IssuerName and comparing
// with cert.RawIssuer using bytes.Compare
issuer := new(pkix.Name)
issuer.FillFromRDNSequence(&ias.IssuerName)
return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && issuer.CommonName == cert.Issuer.CommonName
}
func pad(data []byte, blocklen int) ([]byte, error) {
if blocklen < 1 {
return nil, fmt.Errorf("invalid blocklen %d", blocklen)
}
padlen := blocklen - (len(data) % blocklen)
if padlen == 0 {
padlen = blocklen
}
pad := bytes.Repeat([]byte{byte(padlen)}, padlen)
return append(data, pad...), nil
}
func unpad(data []byte, blocklen int) ([]byte, error) {
if blocklen < 1 {
return nil, fmt.Errorf("invalid blocklen %d", blocklen)
}
if len(data)%blocklen != 0 || len(data) == 0 {
return nil, fmt.Errorf("invalid data len %d", len(data))
}
// the last byte is the length of padding
padlen := int(data[len(data)-1])
// check padding integrity, all bytes should be the same
pad := data[len(data)-padlen:]
for _, padbyte := range pad {
if padbyte != byte(padlen) {
return nil, errors.New("invalid padding")
}
}
return data[:len(data)-padlen], nil
}
// UnmarshalSignedAttribute decodes a single attribute from the signer info
func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error {
sd, ok := p7.raw.(signedData)
if !ok {
return errors.New("pkcs7: payload is not signedData content")
}
if len(sd.SignerInfos) < 1 {
return errors.New("pkcs7: payload has no signers")
}
attributes := sd.SignerInfos[0].AuthenticatedAttributes
for _, attr := range attributes {
if attr.Type.Equal(attributeType) {
_, err := asn1.Unmarshal(attr.Value.Bytes, out)
return err
}
}
return errors.New("pkcs7: attribute type not in signed attributes")
}
// SignedData is an opaque data structure for creating signed data payloads
type SignedData struct {
sd signedData
certs []*x509.Certificate
messageDigest []byte
}
// Attribute represents a key value pair attribute. Value must be marshalable byte
// `encoding/asn1`
type Attribute struct {
Type asn1.ObjectIdentifier
Value interface{}
}
// SignerInfoConfig are optional values to include when adding a signer
type SignerInfoConfig struct {
ExtraSignedAttributes []Attribute
}
// NewSignedData initializes a SignedData with content
func NewSignedData(data []byte) (*SignedData, error) {
content, err := asn1.Marshal(data)
if err != nil {
return nil, err
}
ci := contentInfo{
ContentType: oidData,
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
}
digAlg := pkix.AlgorithmIdentifier{
Algorithm: oidDigestAlgorithmSHA1,
}
h := crypto.SHA1.New()
h.Write(data)
md := h.Sum(nil)
sd := signedData{
ContentInfo: ci,
Version: 1,
DigestAlgorithmIdentifiers: []pkix.AlgorithmIdentifier{digAlg},
}
return &SignedData{sd: sd, messageDigest: md}, nil
}
type attributes struct {
types []asn1.ObjectIdentifier
values []interface{}
}
// Add adds the attribute, maintaining insertion order
func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) {
attrs.types = append(attrs.types, attrType)
attrs.values = append(attrs.values, value)
}
type sortableAttribute struct {
SortKey []byte
Attribute attribute
}
type attributeSet []sortableAttribute
func (sa attributeSet) Len() int {
return len(sa)
}
func (sa attributeSet) Less(i, j int) bool {
return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0
}
func (sa attributeSet) Swap(i, j int) {
sa[i], sa[j] = sa[j], sa[i]
}
func (sa attributeSet) Attributes() []attribute {
attrs := make([]attribute, len(sa))
for i, attr := range sa {
attrs[i] = attr.Attribute
}
return attrs
}
func (attrs *attributes) ForMarshaling() ([]attribute, error) {
sortables := make(attributeSet, len(attrs.types))
for i := range sortables {
attrType := attrs.types[i]
attrValue := attrs.values[i]
asn1Value, err := asn1.Marshal(attrValue)
if err != nil {
return nil, err
}
attr := attribute{
Type: attrType,
Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag
}
encoded, err := asn1.Marshal(attr)
if err != nil {
return nil, err
}
sortables[i] = sortableAttribute{
SortKey: encoded,
Attribute: attr,
}
}
sort.Sort(sortables)
return sortables.Attributes(), nil
}
// AddSigner signs attributes about the content and adds certificate to payload
func (sd *SignedData) AddSigner(cert *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
attrs := &attributes{}
attrs.Add(oidAttributeContentType, sd.sd.ContentInfo.ContentType)
attrs.Add(oidAttributeMessageDigest, sd.messageDigest)
for _, attr := range config.ExtraSignedAttributes {
attrs.Add(attr.Type, attr.Value)
}
finalAttrs, err := attrs.ForMarshaling()
if err != nil {
return err
}
signature, err := signAttributes(finalAttrs, pkey, crypto.SHA1)
if err != nil {
return err
}
ias, err := cert2issuerAndSerial(cert)
if err != nil {
return err
}
signer := signerInfo{
AuthenticatedAttributes: finalAttrs,
DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidDigestAlgorithmSHA1},
DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidEncryptionAlgorithmRSA},
IssuerAndSerialNumber: ias,
EncryptedDigest: signature,
Version: 1,
}
// create signature of signed attributes
sd.certs = append(sd.certs, cert)
sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
return nil
}
// AddCertificate adds the certificate to the payload. Useful for parent certificates
func (sd *SignedData) AddCertificate(cert *x509.Certificate) {
sd.certs = append(sd.certs, cert)
}
// Finish marshals the content and its signers
func (sd *SignedData) Finish() ([]byte, error) {
sd.sd.Certificates = marshalCertificates(sd.certs)
inner, err := asn1.Marshal(sd.sd)
if err != nil {
return nil, err
}
outer := contentInfo{
ContentType: oidSignedData,
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true},
}
return asn1.Marshal(outer)
}
func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) {
var ias issuerAndSerial
// The issuer RDNSequence has to match exactly the sequence in the certificate
// We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence
var issuer pkix.RDNSequence
if extra, err := asn1.Unmarshal(cert.RawIssuer, &issuer); err != nil {
return ias, err
} else if len(extra) > 0 {
return ias, errors.New("pkcs7: extra data remains after parsing certificate issuer")
}
ias.IssuerName = issuer
ias.SerialNumber = cert.SerialNumber
return ias, nil
}
// signs the DER encoded form of the attributes with the private key
func signAttributes(attrs []attribute, pkey crypto.PrivateKey, hash crypto.Hash) ([]byte, error) {
attrBytes, err := marshalAttributes(attrs)
if err != nil {
return nil, err
}
h := hash.New()
h.Write(attrBytes)
hashed := h.Sum(nil)
switch priv := pkey.(type) {
case *rsa.PrivateKey:
return rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA1, hashed)
}
return nil, ErrUnsupportedAlgorithm
}
// concats and wraps the certificates in the RawValue structure
func marshalCertificates(certs []*x509.Certificate) asn1.RawValue {
var buf bytes.Buffer
for _, cert := range certs {
buf.Write(cert.Raw)
}
return asn1.RawValue{Class: 2, Tag: 0, Bytes: buf.Bytes(), IsCompound: true}
}
// DegenerateCertificate creates a signed data structure containing only the
// provided certificate
func DegenerateCertificate(cert []byte) ([]byte, error) {
certs := asn1.RawValue{Class: 2, Tag: 0, Bytes: cert, IsCompound: true}
emptyContent := contentInfo{ContentType: oidData}
sd := signedData{
Version: 1,
ContentInfo: emptyContent,
Certificates: certs,
CRLs: []pkix.CertificateList{},
}
content, err := asn1.Marshal(sd)
if err != nil {
return nil, err
}
signedContent := contentInfo{
ContentType: oidSignedData,
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
}
return asn1.Marshal(signedContent)
}
// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
// recipient keys for each recipient public key
// TODO(fullsailor): Add support for encrypting content with other algorithms
func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
// Create DES key & CBC IV
key := make([]byte, 8)
iv := make([]byte, des.BlockSize)
_, err := rand.Read(key)
if err != nil {
return nil, err
}
_, err = rand.Read(iv)
if err != nil {
return nil, err
}
// Encrypt padded content
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
mode := cipher.NewCBCEncrypter(block, iv)
plaintext, err := pad(content, mode.BlockSize())
cyphertext := make([]byte, len(plaintext))
mode.CryptBlocks(cyphertext, plaintext)
// Prepare ASN.1 Encrypted Content Info
eci := encryptedContentInfo{
ContentType: oidData,
ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
Algorithm: oidEncryptionAlgorithmDESCBC,
Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
},
EncryptedContent: marshalEncryptedContent(cyphertext),
}
// Prepare each recipient's encrypted cipher key
recipientInfos := make([]recipientInfo, len(recipients))
for i, recipient := range recipients {
encrypted, err := encryptKey(key, recipient)
if err != nil {
return nil, err
}
ias, err := cert2issuerAndSerial(recipient)
if err != nil {
return nil, err
}
info := recipientInfo{
Version: 0,
IssuerAndSerialNumber: ias,
KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{
Algorithm: oidEncryptionAlgorithmRSA,
},
EncryptedKey: encrypted,
}
recipientInfos[i] = info
}
// Prepare envelope content
envelope := envelopedData{
EncryptedContentInfo: eci,
Version: 0,
RecipientInfos: recipientInfos,
}
innerContent, err := asn1.Marshal(envelope)
if err != nil {
return nil, err
}
// Prepare outer payload structure
wrapper := contentInfo{
ContentType: oidEnvelopedData,
Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
}
return asn1.Marshal(wrapper)
}
func marshalEncryptedContent(content []byte) asn1.RawValue {
asn1Content, _ := asn1.Marshal(content)
return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true}
}
func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) {
if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil {
return rsa.EncryptPKCS1v15(rand.Reader, pub, key)
}
return nil, ErrUnsupportedAlgorithm
}
|
package interp
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"log"
"strings"
"code.google.com/p/go.tools/go/types"
"code.google.com/p/go.tools/go/types/typeutil"
)
type interp struct {
oldSrc string
topEnv *environ
pkgs map[string]*Package
checker *checker
typeMap *typeutil.Map
}
func newInterp(pkgs []*Package, pkgMap map[string]*types.Package, typeMap *typeutil.Map) Interpreter {
// Setup package map
pkgObjMap := map[string]*Package{}
for _, pkg := range pkgs {
pkgObjMap[pkg.Name] = pkg
}
addBasicTypes(typeMap)
i := &interp{
pkgs: pkgObjMap,
topEnv: &environ{
objs: map[string]Object{},
},
checker: newChecker(pkgs, pkgMap),
typeMap: typeMap,
}
i.topEnv.interp = i
return i
}
type checker struct {
config types.Config
errs []error
}
func newChecker(pkgs []*Package, pkgMap map[string]*types.Package) *checker {
var c *checker
c = &checker{
config: types.Config{
Error: func(err error) {
switch e := err.(type) {
case types.Error:
// Ignore errors about unused variables, imports, and labels
if !strings.Contains(e.Msg, "but not used") && !strings.Contains(e.Msg, "is not used") {
c.errs = append(c.errs, err)
}
default:
c.errs = append(c.errs, err)
}
},
Packages: pkgMap,
},
errs: []error{},
}
return c
}
func (i *interp) Run(src string) (bool, error) {
src = strings.TrimSpace(src)
if len(src) == 0 {
if i.oldSrc == "" {
return false, nil
}
return true, nil
}
if i.oldSrc != "" {
src = i.oldSrc + "\n" + src
i.oldSrc = ""
}
declSrc, numDecls := i.topEnv.dumpScope()
var allSrcBuf bytes.Buffer
allSrcBuf.WriteString("package p;import(")
for _, pkg := range i.pkgs {
fmt.Fprintf(&allSrcBuf, "%q;", pkg.Pkg.Path())
}
allSrcBuf.WriteString(");func _(){")
allSrcBuf.WriteString(declSrc)
allSrcBuf.WriteString("{")
allSrcBuf.WriteString(src)
allSrcBuf.WriteString("\n}}")
allSrc := allSrcBuf.String()
fileSize := len(allSrc)
// Parse it
fset := token.NewFileSet()
file, err := parser.ParseFile(fset, "input", allSrc, 0)
if err != nil {
if errList, ok := err.(scanner.ErrorList); ok {
for j, err := range errList {
// Check if the error is at EOF or at a closing brace we added
if err.Pos.Offset >= fileSize-2 {
// If this is the first error, it actually just means the source is incomplete,
// unless there is a superfluous '}' at the end of their code
if j == 0 && err.Msg != "expected declaration, found '}'" {
i.oldSrc = src
return true, nil
}
}
}
} else {
log.Fatal("Parsing yielded a non-nil error that's not a scanner.ErrorList")
}
return false, err
}
if len(file.Decls) != 2 {
// There must be an extra closing brace that escaped our _ function
err := fmt.Errorf("Unexpected '}'")
return false, err
}
stmtList := file.Decls[len(file.Decls)-1].(*ast.FuncDecl).Body.List
if len(stmtList) != numDecls+1 {
// There must be an extra closing brace that escaped our block statement
err := fmt.Errorf("Unexpected '}'")
return false, err
}
// Clear the type-checker errors and create a struct to hold type info
i.checker.errs = i.checker.errs[:0]
info := types.Info{
Types: map[ast.Expr]types.TypeAndValue{},
Selections: map[*ast.SelectorExpr]*types.Selection{},
Defs: map[*ast.Ident]types.Object{},
Uses: map[*ast.Ident]types.Object{},
}
// Type check the statement list
files := []*ast.File{file}
pkg, _ := i.checker.config.Check("", fset, files, &info)
if len(i.checker.errs) > 0 {
return false, i.checker.errs[0]
}
// get the scope of the block stmt containing user code
pkgScope := pkg.Scope().Child(0)
undScope := pkgScope.Child(pkgScope.NumChildren() - 1)
i.topEnv.scope = undScope.Child(undScope.NumChildren() - 1)
i.topEnv.info = &info
// Extract the statement list provided by the user
stmtList = stmtList[numDecls].(*ast.BlockStmt).List
if len(stmtList) == 0 {
return false, nil
}
// Run each statement in the list
for _, stmt := range stmtList {
i.topEnv.runStmt(stmt, true)
}
return false, nil
}
Fixed issue where generated source code includes invalid access to unexported package objects.
The current solution is not optimal. The entire history of the source
code the user typed is parsed and type checked after every input.
It could fairly easily be cut down to just declarations (that's a TODO).
package interp
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"log"
"strings"
"code.google.com/p/go.tools/go/types"
"code.google.com/p/go.tools/go/types/typeutil"
)
type interp struct {
oldSrc string
topEnv *environ
pkgs map[string]*Package
checker *checker
typeMap *typeutil.Map
stmtLists []string
stmtListLens []int
}
func newInterp(pkgs []*Package, pkgMap map[string]*types.Package, typeMap *typeutil.Map) Interpreter {
// Setup package map
pkgObjMap := map[string]*Package{}
for _, pkg := range pkgs {
pkgObjMap[pkg.Name] = pkg
}
addBasicTypes(typeMap)
i := &interp{
pkgs: pkgObjMap,
topEnv: &environ{
objs: map[string]Object{},
},
checker: newChecker(pkgs, pkgMap),
typeMap: typeMap,
}
i.topEnv.interp = i
return i
}
type checker struct {
config types.Config
errs []error
}
func newChecker(pkgs []*Package, pkgMap map[string]*types.Package) *checker {
var c *checker
c = &checker{
config: types.Config{
Error: func(err error) {
switch e := err.(type) {
case types.Error:
// Ignore errors about unused variables, imports, and labels
if !strings.Contains(e.Msg, "but not used") && !strings.Contains(e.Msg, "is not used") {
c.errs = append(c.errs, err)
}
default:
c.errs = append(c.errs, err)
}
},
Packages: pkgMap,
},
errs: []error{},
}
return c
}
func (i *interp) Run(src string) (bool, error) {
src = strings.TrimSpace(src)
if len(src) == 0 {
if i.oldSrc == "" {
return false, nil
}
return true, nil
}
if i.oldSrc != "" {
src = i.oldSrc + "\n" + src
i.oldSrc = ""
}
// TODO: Only dump out declarations after each input rather than entire history of source.
var allSrcBuf bytes.Buffer
allSrcBuf.WriteString("package p;import(")
for _, pkg := range i.pkgs {
fmt.Fprintf(&allSrcBuf, "%q;", pkg.Pkg.Path())
}
allSrcBuf.WriteString(");func _(){")
// Add previous code, one stmtList at a time, each in a nested scope
for _, stmtList := range i.stmtLists {
allSrcBuf.WriteString(stmtList)
allSrcBuf.WriteString("\n{")
}
// Add current code in the innermost scope and close the scopes
allSrcBuf.WriteString(src)
allSrcBuf.WriteString("\n")
for _ = range i.stmtLists {
allSrcBuf.WriteString("}")
}
allSrcBuf.WriteString("}")
// Get the source "file" as a string
allSrc := allSrcBuf.String()
fileSize := len(allSrc)
// Parse it
fset := token.NewFileSet()
file, err := parser.ParseFile(fset, "input", allSrc, 0)
if err != nil {
if errList, ok := err.(scanner.ErrorList); ok {
for j, err := range errList {
// Check if the error is at EOF or at a closing brace we added
if err.Pos.Offset >= fileSize-2 {
// If this is the first error, it actually just means the source is incomplete,
// unless there is a superfluous '}' at the end of their code
if j == 0 && err.Msg != "expected declaration, found '}'" {
i.oldSrc = src
return true, nil
}
}
}
} else {
log.Fatal("Parsing yielded a non-nil error that's not a scanner.ErrorList")
}
return false, err
}
if len(file.Decls) != 2 {
// The input must have done something strange with braces
err := fmt.Errorf("Unexpected '}'")
return false, err
}
// Walk down the scopes to the inner statement list, checking that nothing
// looks wrong along the way
stmtList := file.Decls[len(file.Decls)-1].(*ast.FuncDecl).Body.List
for j := range i.stmtLists {
if len(stmtList) != i.stmtListLens[j]+1 {
// There must be an extra closing brace that escaped our block statement
err := fmt.Errorf("Unexpected '}'")
return false, err
}
blockStmt, ok := stmtList[len(stmtList)-1].(*ast.BlockStmt)
if !ok {
err := fmt.Errorf("Parse error")
return false, err
}
stmtList = blockStmt.List
}
if len(stmtList) == 0 {
return false, nil
}
// Clear the type-checker errors and create a struct to hold type info
i.checker.errs = i.checker.errs[:0]
info := types.Info{
Types: map[ast.Expr]types.TypeAndValue{},
Selections: map[*ast.SelectorExpr]*types.Selection{},
Defs: map[*ast.Ident]types.Object{},
Uses: map[*ast.Ident]types.Object{},
}
// Type check the statement list
files := []*ast.File{file}
pkg, _ := i.checker.config.Check("", fset, files, &info)
if len(i.checker.errs) > 0 {
return false, i.checker.errs[0]
}
// Walk down the scopes to the inner statement list, checking that nothing
// looks wrong along the way
pkgScope := pkg.Scope().Child(0)
undScope := pkgScope.Child(pkgScope.NumChildren() - 1)
currScope := undScope
for _ = range i.stmtLists {
currScope = currScope.Child(currScope.NumChildren() - 1)
}
// get the scope of the block stmt containing user code
i.topEnv.scope = currScope
i.topEnv.info = &info
// Run each statement in the list
for _, stmt := range stmtList {
i.topEnv.runStmt(stmt, true)
}
// Add current input to the stmtLists slice for next time
i.stmtLists = append(i.stmtLists, src)
i.stmtListLens = append(i.stmtListLens, len(stmtList))
return false, nil
}
|
package identify
import (
"context"
"fmt"
"io"
"runtime/debug"
"sync"
"time"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/event"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/peerstore"
"github.com/libp2p/go-libp2p-core/record"
"github.com/libp2p/go-eventbus"
"github.com/libp2p/go-msgio/protoio"
pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
msmux "github.com/multiformats/go-multistream"
"github.com/gogo/protobuf/proto"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("net/identify")
// ID is the protocol.ID of version 1.0.0 of the identify
// service.
const ID = "/ipfs/id/1.0.0"
// LibP2PVersion holds the current protocol version for a client running this code
// TODO(jbenet): fix the versioning mess.
// XXX: Don't change this till 2020. You'll break all go-ipfs versions prior to
// 0.4.17 which asserted an exact version match.
const LibP2PVersion = "ipfs/0.1.0"
// StreamReadTimeout is the read timeout on all incoming Identify family streams.
var StreamReadTimeout = 60 * time.Second
var (
legacyIDSize = 2 * 1024 // 2k Bytes
signedIDSize = 8 * 1024 // 8K
maxMessages = 10
defaultUserAgent = "github.com/libp2p/go-libp2p"
)
func init() {
bi, ok := debug.ReadBuildInfo()
if !ok {
return
}
version := bi.Main.Version
if version == "(devel)" {
defaultUserAgent = bi.Main.Path
} else {
defaultUserAgent = fmt.Sprintf("%s@%s", bi.Main.Path, bi.Main.Version)
}
}
type addPeerHandlerReq struct {
rp peer.ID
resp chan *peerHandler
}
type rmPeerHandlerReq struct {
p peer.ID
}
type IDService interface {
// IdentifyConn synchronously triggers an identify request on the connection and
// waits for it to complete. If the connection is being identified by another
// caller, this call will wait. If the connection has already been identified,
// it will return immediately.
IdentifyConn(network.Conn)
// IdentifyWait triggers an identify (if the connection has not already been
// identified) and returns a channel that is closed when the identify protocol
// completes.
IdentifyWait(network.Conn) <-chan struct{}
// OwnObservedAddrs returns the addresses peers have reported we've dialed from
OwnObservedAddrs() []ma.Multiaddr
// ObservedAddrsFor returns the addresses peers have reported we've dialed from,
// for a specific local address.
ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr
io.Closer
}
// idService is a structure that implements ProtocolIdentify.
// It is a trivial service that gives the other peer some
// useful information about the local peer. A sort of hello.
//
// The idService sends:
// * Our IPFS Protocol Version
// * Our IPFS Agent Version
// * Our public Listen Addresses
type idService struct {
Host host.Host
UserAgent string
ctx context.Context
ctxCancel context.CancelFunc
// track resources that need to be shut down before we shut down
refCount sync.WaitGroup
disableSignedPeerRecord bool
// Identified connections (finished and in progress).
connsMu sync.RWMutex
conns map[network.Conn]chan struct{}
addrMu sync.Mutex
// our own observed addresses.
observedAddrs *ObservedAddrManager
emitters struct {
evtPeerProtocolsUpdated event.Emitter
evtPeerIdentificationCompleted event.Emitter
evtPeerIdentificationFailed event.Emitter
}
addPeerHandlerCh chan addPeerHandlerReq
rmPeerHandlerCh chan rmPeerHandlerReq
}
// NewIDService constructs a new *idService and activates it by
// attaching its stream handler to the given host.Host.
func NewIDService(h host.Host, opts ...Option) (*idService, error) {
var cfg config
for _, opt := range opts {
opt(&cfg)
}
userAgent := defaultUserAgent
if cfg.userAgent != "" {
userAgent = cfg.userAgent
}
s := &idService{
Host: h,
UserAgent: userAgent,
conns: make(map[network.Conn]chan struct{}),
disableSignedPeerRecord: cfg.disableSignedPeerRecord,
addPeerHandlerCh: make(chan addPeerHandlerReq),
rmPeerHandlerCh: make(chan rmPeerHandlerReq),
}
s.ctx, s.ctxCancel = context.WithCancel(context.Background())
// handle local protocol handler updates, and push deltas to peers.
var err error
observedAddrs, err := NewObservedAddrManager(h)
if err != nil {
return nil, fmt.Errorf("failed to create observed address manager: %s", err)
}
s.observedAddrs = observedAddrs
s.refCount.Add(1)
go s.loop()
s.emitters.evtPeerProtocolsUpdated, err = h.EventBus().Emitter(&event.EvtPeerProtocolsUpdated{})
if err != nil {
log.Warnf("identify service not emitting peer protocol updates; err: %s", err)
}
s.emitters.evtPeerIdentificationCompleted, err = h.EventBus().Emitter(&event.EvtPeerIdentificationCompleted{})
if err != nil {
log.Warnf("identify service not emitting identification completed events; err: %s", err)
}
s.emitters.evtPeerIdentificationFailed, err = h.EventBus().Emitter(&event.EvtPeerIdentificationFailed{})
if err != nil {
log.Warnf("identify service not emitting identification failed events; err: %s", err)
}
// register protocols that do not depend on peer records.
h.SetStreamHandler(IDDelta, s.deltaHandler)
h.SetStreamHandler(ID, s.sendIdentifyResp)
h.SetStreamHandler(IDPush, s.pushHandler)
h.Network().Notify((*netNotifiee)(s))
return s, nil
}
func (ids *idService) loop() {
defer ids.refCount.Done()
phs := make(map[peer.ID]*peerHandler)
sub, err := ids.Host.EventBus().Subscribe([]interface{}{&event.EvtLocalProtocolsUpdated{},
&event.EvtLocalAddressesUpdated{}}, eventbus.BufSize(256))
if err != nil {
log.Errorf("failed to subscribe to events on the bus, err=%s", err)
return
}
phClosedCh := make(chan peer.ID)
defer func() {
sub.Close()
// The context will cancel the workers. Now, wait for them to
// exit.
for range phs {
<-phClosedCh
}
}()
// Use a fresh context for the handlers. Otherwise, they'll get canceled
// before we're ready to shutdown and they'll have "stopped" without us
// _calling_ stop.
handlerCtx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
select {
case addReq := <-ids.addPeerHandlerCh:
rp := addReq.rp
ph, ok := phs[rp]
if !ok && ids.Host.Network().Connectedness(rp) == network.Connected {
ph = newPeerHandler(rp, ids)
ph.start(handlerCtx, func() { phClosedCh <- rp })
phs[rp] = ph
}
addReq.resp <- ph
case rmReq := <-ids.rmPeerHandlerCh:
rp := rmReq.p
if ids.Host.Network().Connectedness(rp) != network.Connected {
// before we remove the peerhandler, we should ensure that it will not send any
// more messages. Otherwise, we might create a new handler and the Identify response
// synchronized with the new handler might be overwritten by a message sent by this "old" handler.
ph, ok := phs[rp]
if !ok {
// move on, move on, there's nothing to see here.
continue
}
// This is idempotent if already stopped.
ph.stop()
}
case rp := <-phClosedCh:
ph := phs[rp]
// If we are connected to the peer, it means that we got a connection from the peer
// before we could finish removing it's handler on the previous disconnection.
// If we delete the handler, we wont be able to push updates to it
// till we see a new connection. So, we should restart the handler.
// The fact that we got the handler on this channel means that it's context and handler
// have completed because we write the handler to this chanel only after it closed.
if ids.Host.Network().Connectedness(rp) == network.Connected {
ph.start(handlerCtx, func() { phClosedCh <- rp })
} else {
delete(phs, rp)
}
case e, more := <-sub.Out():
if !more {
return
}
switch e.(type) {
case event.EvtLocalAddressesUpdated:
for pid := range phs {
select {
case phs[pid].pushCh <- struct{}{}:
default:
log.Debugf("dropping addr updated message for %s as buffer full", pid.Pretty())
}
}
case event.EvtLocalProtocolsUpdated:
for pid := range phs {
select {
case phs[pid].deltaCh <- struct{}{}:
default:
log.Debugf("dropping protocol updated message for %s as buffer full", pid.Pretty())
}
}
}
case <-ids.ctx.Done():
return
}
}
}
// Close shuts down the idService
func (ids *idService) Close() error {
ids.ctxCancel()
ids.observedAddrs.Close()
ids.refCount.Wait()
return nil
}
func (ids *idService) OwnObservedAddrs() []ma.Multiaddr {
return ids.observedAddrs.Addrs()
}
func (ids *idService) ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr {
return ids.observedAddrs.AddrsFor(local)
}
func (ids *idService) IdentifyConn(c network.Conn) {
<-ids.IdentifyWait(c)
}
func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
ids.connsMu.RLock()
wait, found := ids.conns[c]
ids.connsMu.RUnlock()
if found {
return wait
}
ids.connsMu.Lock()
defer ids.connsMu.Unlock()
wait, found = ids.conns[c]
if !found {
wait = make(chan struct{})
ids.conns[c] = wait
// Spawn an identify. The connection may actually be closed
// already, but that doesn't really matter. We'll fail to open a
// stream then forget the connection.
go func() {
defer close(wait)
if err := ids.identifyConn(c); err != nil {
ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
return
}
ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()})
}()
}
return wait
}
func (ids *idService) removeConn(c network.Conn) {
ids.connsMu.Lock()
delete(ids.conns, c)
ids.connsMu.Unlock()
}
func (ids *idService) identifyConn(c network.Conn) error {
s, err := c.NewStream(network.WithUseTransient(context.TODO(), "identify"))
if err != nil {
log.Debugw("error opening identify stream", "error", err)
// the connection is probably already closed if we hit this.
// TODO: Remove this?
c.Close()
// We usually do this on disconnect, but we may have already
// processed the disconnect event.
ids.removeConn(c)
return err
}
s.SetProtocol(ID)
// ok give the response to our handler.
if err := msmux.SelectProtoOrFail(ID, s); err != nil {
log.Infow("failed negotiate identify protocol with peer", "peer", c.RemotePeer(), "error", err)
s.Reset()
return err
}
return ids.handleIdentifyResponse(s)
}
func (ids *idService) sendIdentifyResp(s network.Stream) {
defer s.Close()
c := s.Conn()
phCh := make(chan *peerHandler, 1)
select {
case ids.addPeerHandlerCh <- addPeerHandlerReq{c.RemotePeer(), phCh}:
case <-ids.ctx.Done():
return
}
var ph *peerHandler
select {
case ph = <-phCh:
case <-ids.ctx.Done():
return
}
if ph == nil {
// Peer disconnected, abort.
s.Reset()
return
}
ph.snapshotMu.RLock()
snapshot := ph.snapshot
ph.snapshotMu.RUnlock()
ids.writeChunkedIdentifyMsg(c, snapshot, s)
log.Debugf("%s sent message to %s %s", ID, c.RemotePeer(), c.RemoteMultiaddr())
}
func (ids *idService) handleIdentifyResponse(s network.Stream) error {
_ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout))
c := s.Conn()
r := protoio.NewDelimitedReader(s, signedIDSize)
mes := &pb.Identify{}
if err := readAllIDMessages(r, mes); err != nil {
log.Warn("error reading identify message: ", err)
s.Reset()
return err
}
defer s.Close()
log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr())
ids.consumeMessage(mes, c)
return nil
}
func readAllIDMessages(r protoio.Reader, finalMsg proto.Message) error {
mes := &pb.Identify{}
for i := 0; i < maxMessages; i++ {
switch err := r.ReadMsg(mes); err {
case io.EOF:
return nil
case nil:
proto.Merge(finalMsg, mes)
default:
return err
}
}
return fmt.Errorf("too many parts")
}
func (ids *idService) getSnapshot() *identifySnapshot {
snapshot := new(identifySnapshot)
if !ids.disableSignedPeerRecord {
if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok {
snapshot.record = cab.GetPeerRecord(ids.Host.ID())
}
}
snapshot.addrs = ids.Host.Addrs()
snapshot.protocols = ids.Host.Mux().Protocols()
return snapshot
}
func (ids *idService) writeChunkedIdentifyMsg(c network.Conn, snapshot *identifySnapshot, s network.Stream) error {
mes := ids.createBaseIdentifyResponse(c, snapshot)
sr := ids.getSignedRecord(snapshot)
mes.SignedPeerRecord = sr
writer := protoio.NewDelimitedWriter(s)
if sr == nil || proto.Size(mes) <= legacyIDSize {
return writer.WriteMsg(mes)
}
mes.SignedPeerRecord = nil
if err := writer.WriteMsg(mes); err != nil {
return err
}
// then write just the signed record
m := &pb.Identify{SignedPeerRecord: sr}
err := writer.WriteMsg(m)
return err
}
func (ids *idService) createBaseIdentifyResponse(
conn network.Conn,
snapshot *identifySnapshot,
) *pb.Identify {
mes := &pb.Identify{}
remoteAddr := conn.RemoteMultiaddr()
localAddr := conn.LocalMultiaddr()
// set protocols this node is currently handling
mes.Protocols = snapshot.protocols
// observed address so other side is informed of their
// "public" address, at least in relation to us.
mes.ObservedAddr = remoteAddr.Bytes()
// populate unsigned addresses.
// peers that do not yet support signed addresses will need this.
// Note: LocalMultiaddr is sometimes 0.0.0.0
viaLoopback := manet.IsIPLoopback(localAddr) || manet.IsIPLoopback(remoteAddr)
mes.ListenAddrs = make([][]byte, 0, len(snapshot.addrs))
for _, addr := range snapshot.addrs {
if !viaLoopback && manet.IsIPLoopback(addr) {
continue
}
mes.ListenAddrs = append(mes.ListenAddrs, addr.Bytes())
}
// set our public key
ownKey := ids.Host.Peerstore().PubKey(ids.Host.ID())
// check if we even have a public key.
if ownKey == nil {
// public key is nil. We are either using insecure transport or something erratic happened.
// check if we're even operating in "secure mode"
if ids.Host.Peerstore().PrivKey(ids.Host.ID()) != nil {
// private key is present. But NO public key. Something bad happened.
log.Errorf("did not have own public key in Peerstore")
}
// if neither of the key is present it is safe to assume that we are using an insecure transport.
} else {
// public key is present. Safe to proceed.
if kb, err := crypto.MarshalPublicKey(ownKey); err != nil {
log.Errorf("failed to convert key to bytes")
} else {
mes.PublicKey = kb
}
}
// set protocol versions
pv := LibP2PVersion
av := ids.UserAgent
mes.ProtocolVersion = &pv
mes.AgentVersion = &av
return mes
}
func (ids *idService) getSignedRecord(snapshot *identifySnapshot) []byte {
if ids.disableSignedPeerRecord || snapshot.record == nil {
return nil
}
recBytes, err := snapshot.record.Marshal()
if err != nil {
log.Errorw("failed to marshal signed record", "err", err)
return nil
}
return recBytes
}
func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) {
p := c.RemotePeer()
// mes.Protocols
ids.Host.Peerstore().SetProtocols(p, mes.Protocols...)
// mes.ObservedAddr
ids.consumeObservedAddress(mes.GetObservedAddr(), c)
// mes.ListenAddrs
laddrs := mes.GetListenAddrs()
lmaddrs := make([]ma.Multiaddr, 0, len(laddrs))
for _, addr := range laddrs {
maddr, err := ma.NewMultiaddrBytes(addr)
if err != nil {
log.Debugf("%s failed to parse multiaddr from %s %s", ID,
p, c.RemoteMultiaddr())
continue
}
lmaddrs = append(lmaddrs, maddr)
}
// NOTE: Do not add `c.RemoteMultiaddr()` to the peerstore if the remote
// peer doesn't tell us to do so. Otherwise, we'll advertise it.
//
// This can cause an "addr-splosion" issue where the network will slowly
// gossip and collect observed but unadvertised addresses. Given a NAT
// that picks random source ports, this can cause DHT nodes to collect
// many undialable addresses for other peers.
// add certified addresses for the peer, if they sent us a signed peer record
// otherwise use the unsigned addresses.
var signedPeerRecord *record.Envelope
signedPeerRecord, err := signedPeerRecordFromMessage(mes)
if err != nil {
log.Errorf("error getting peer record from Identify message: %v", err)
}
// Extend the TTLs on the known (probably) good addresses.
// Taking the lock ensures that we don't concurrently process a disconnect.
ids.addrMu.Lock()
ttl := peerstore.RecentlyConnectedAddrTTL
if ids.Host.Network().Connectedness(p) == network.Connected {
ttl = peerstore.ConnectedAddrTTL
}
// Downgrade connected and recently connected addrs to a temporary TTL.
for _, ttl := range []time.Duration{
peerstore.RecentlyConnectedAddrTTL,
peerstore.ConnectedAddrTTL,
} {
ids.Host.Peerstore().UpdateAddrs(p, ttl, peerstore.TempAddrTTL)
}
// add signed addrs if we have them and the peerstore supports them
cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore())
if ok && signedPeerRecord != nil {
_, addErr := cab.ConsumePeerRecord(signedPeerRecord, ttl)
if addErr != nil {
log.Debugf("error adding signed addrs to peerstore: %v", addErr)
}
} else {
ids.Host.Peerstore().AddAddrs(p, lmaddrs, ttl)
}
// Finally, expire all temporary addrs.
ids.Host.Peerstore().UpdateAddrs(p, peerstore.TempAddrTTL, 0)
ids.addrMu.Unlock()
log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
// get protocol versions
pv := mes.GetProtocolVersion()
av := mes.GetAgentVersion()
ids.Host.Peerstore().Put(p, "ProtocolVersion", pv)
ids.Host.Peerstore().Put(p, "AgentVersion", av)
// get the key from the other side. we may not have it (no-auth transport)
ids.consumeReceivedPubKey(c, mes.PublicKey)
}
func (ids *idService) consumeReceivedPubKey(c network.Conn, kb []byte) {
lp := c.LocalPeer()
rp := c.RemotePeer()
if kb == nil {
log.Debugf("%s did not receive public key for remote peer: %s", lp, rp)
return
}
newKey, err := crypto.UnmarshalPublicKey(kb)
if err != nil {
log.Warnf("%s cannot unmarshal key from remote peer: %s, %s", lp, rp, err)
return
}
// verify key matches peer.ID
np, err := peer.IDFromPublicKey(newKey)
if err != nil {
log.Debugf("%s cannot get peer.ID from key of remote peer: %s, %s", lp, rp, err)
return
}
if np != rp {
// if the newKey's peer.ID does not match known peer.ID...
if rp == "" && np != "" {
// if local peerid is empty, then use the new, sent key.
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
if err != nil {
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
}
} else {
// we have a local peer.ID and it does not match the sent key... error.
log.Errorf("%s received key for remote peer %s mismatch: %s", lp, rp, np)
}
return
}
currKey := ids.Host.Peerstore().PubKey(rp)
if currKey == nil {
// no key? no auth transport. set this one.
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
if err != nil {
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
}
return
}
// ok, we have a local key, we should verify they match.
if currKey.Equals(newKey) {
return // ok great. we're done.
}
// weird, got a different key... but the different key MATCHES the peer.ID.
// this odd. let's log error and investigate. this should basically never happen
// and it means we have something funky going on and possibly a bug.
log.Errorf("%s identify got a different key for: %s", lp, rp)
// okay... does ours NOT match the remote peer.ID?
cp, err := peer.IDFromPublicKey(currKey)
if err != nil {
log.Errorf("%s cannot get peer.ID from local key of remote peer: %s, %s", lp, rp, err)
return
}
if cp != rp {
log.Errorf("%s local key for remote peer %s yields different peer.ID: %s", lp, rp, cp)
return
}
// okay... curr key DOES NOT match new key. both match peer.ID. wat?
log.Errorf("%s local key and received key for %s do not match, but match peer.ID", lp, rp)
}
// HasConsistentTransport returns true if the address 'a' shares a
// protocol set with any address in the green set. This is used
// to check if a given address might be one of the addresses a peer is
// listening on.
func HasConsistentTransport(a ma.Multiaddr, green []ma.Multiaddr) bool {
protosMatch := func(a, b []ma.Protocol) bool {
if len(a) != len(b) {
return false
}
for i, p := range a {
if b[i].Code != p.Code {
return false
}
}
return true
}
protos := a.Protocols()
for _, ga := range green {
if protosMatch(protos, ga.Protocols()) {
return true
}
}
return false
}
func (ids *idService) consumeObservedAddress(observed []byte, c network.Conn) {
if observed == nil {
return
}
maddr, err := ma.NewMultiaddrBytes(observed)
if err != nil {
log.Debugf("error parsing received observed addr for %s: %s", c, err)
return
}
ids.observedAddrs.Record(c, maddr)
}
func addrInAddrs(a ma.Multiaddr, as []ma.Multiaddr) bool {
for _, b := range as {
if a.Equal(b) {
return true
}
}
return false
}
func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) {
if msg.SignedPeerRecord == nil || len(msg.SignedPeerRecord) == 0 {
return nil, nil
}
env, _, err := record.ConsumeEnvelope(msg.SignedPeerRecord, peer.PeerRecordEnvelopeDomain)
return env, err
}
// netNotifiee defines methods to be used with the IpfsDHT
type netNotifiee idService
func (nn *netNotifiee) IDService() *idService {
return (*idService)(nn)
}
func (nn *netNotifiee) Connected(n network.Network, v network.Conn) {
nn.IDService().IdentifyWait(v)
}
func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) {
ids := nn.IDService()
// Stop tracking the connection.
ids.removeConn(v)
// undo the setting of addresses to peer.ConnectedAddrTTL we did
ids.addrMu.Lock()
defer ids.addrMu.Unlock()
if ids.Host.Network().Connectedness(v.RemotePeer()) != network.Connected {
// consider removing the peer handler for this
select {
case ids.rmPeerHandlerCh <- rmPeerHandlerReq{v.RemotePeer()}:
case <-ids.ctx.Done():
return
}
// Last disconnect.
ps := ids.Host.Peerstore()
ps.UpdateAddrs(v.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
}
}
func (nn *netNotifiee) OpenedStream(n network.Network, v network.Stream) {}
func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {}
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
don't close the connection when opening the identify stream fails
package identify
import (
"context"
"fmt"
"io"
"runtime/debug"
"sync"
"time"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/event"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/peerstore"
"github.com/libp2p/go-libp2p-core/record"
"github.com/libp2p/go-eventbus"
"github.com/libp2p/go-msgio/protoio"
pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
msmux "github.com/multiformats/go-multistream"
"github.com/gogo/protobuf/proto"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("net/identify")
// ID is the protocol.ID of version 1.0.0 of the identify
// service.
const ID = "/ipfs/id/1.0.0"
// LibP2PVersion holds the current protocol version for a client running this code
// TODO(jbenet): fix the versioning mess.
// XXX: Don't change this till 2020. You'll break all go-ipfs versions prior to
// 0.4.17 which asserted an exact version match.
const LibP2PVersion = "ipfs/0.1.0"
// StreamReadTimeout is the read timeout on all incoming Identify family streams.
var StreamReadTimeout = 60 * time.Second
var (
legacyIDSize = 2 * 1024 // 2k Bytes
signedIDSize = 8 * 1024 // 8K
maxMessages = 10
defaultUserAgent = "github.com/libp2p/go-libp2p"
)
func init() {
bi, ok := debug.ReadBuildInfo()
if !ok {
return
}
version := bi.Main.Version
if version == "(devel)" {
defaultUserAgent = bi.Main.Path
} else {
defaultUserAgent = fmt.Sprintf("%s@%s", bi.Main.Path, bi.Main.Version)
}
}
type addPeerHandlerReq struct {
rp peer.ID
resp chan *peerHandler
}
type rmPeerHandlerReq struct {
p peer.ID
}
type IDService interface {
// IdentifyConn synchronously triggers an identify request on the connection and
// waits for it to complete. If the connection is being identified by another
// caller, this call will wait. If the connection has already been identified,
// it will return immediately.
IdentifyConn(network.Conn)
// IdentifyWait triggers an identify (if the connection has not already been
// identified) and returns a channel that is closed when the identify protocol
// completes.
IdentifyWait(network.Conn) <-chan struct{}
// OwnObservedAddrs returns the addresses peers have reported we've dialed from
OwnObservedAddrs() []ma.Multiaddr
// ObservedAddrsFor returns the addresses peers have reported we've dialed from,
// for a specific local address.
ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr
io.Closer
}
// idService is a structure that implements ProtocolIdentify.
// It is a trivial service that gives the other peer some
// useful information about the local peer. A sort of hello.
//
// The idService sends:
// * Our IPFS Protocol Version
// * Our IPFS Agent Version
// * Our public Listen Addresses
type idService struct {
Host host.Host
UserAgent string
ctx context.Context
ctxCancel context.CancelFunc
// track resources that need to be shut down before we shut down
refCount sync.WaitGroup
disableSignedPeerRecord bool
// Identified connections (finished and in progress).
connsMu sync.RWMutex
conns map[network.Conn]chan struct{}
addrMu sync.Mutex
// our own observed addresses.
observedAddrs *ObservedAddrManager
emitters struct {
evtPeerProtocolsUpdated event.Emitter
evtPeerIdentificationCompleted event.Emitter
evtPeerIdentificationFailed event.Emitter
}
addPeerHandlerCh chan addPeerHandlerReq
rmPeerHandlerCh chan rmPeerHandlerReq
}
// NewIDService constructs a new *idService and activates it by
// attaching its stream handler to the given host.Host.
func NewIDService(h host.Host, opts ...Option) (*idService, error) {
var cfg config
for _, opt := range opts {
opt(&cfg)
}
userAgent := defaultUserAgent
if cfg.userAgent != "" {
userAgent = cfg.userAgent
}
s := &idService{
Host: h,
UserAgent: userAgent,
conns: make(map[network.Conn]chan struct{}),
disableSignedPeerRecord: cfg.disableSignedPeerRecord,
addPeerHandlerCh: make(chan addPeerHandlerReq),
rmPeerHandlerCh: make(chan rmPeerHandlerReq),
}
s.ctx, s.ctxCancel = context.WithCancel(context.Background())
// handle local protocol handler updates, and push deltas to peers.
var err error
observedAddrs, err := NewObservedAddrManager(h)
if err != nil {
return nil, fmt.Errorf("failed to create observed address manager: %s", err)
}
s.observedAddrs = observedAddrs
s.refCount.Add(1)
go s.loop()
s.emitters.evtPeerProtocolsUpdated, err = h.EventBus().Emitter(&event.EvtPeerProtocolsUpdated{})
if err != nil {
log.Warnf("identify service not emitting peer protocol updates; err: %s", err)
}
s.emitters.evtPeerIdentificationCompleted, err = h.EventBus().Emitter(&event.EvtPeerIdentificationCompleted{})
if err != nil {
log.Warnf("identify service not emitting identification completed events; err: %s", err)
}
s.emitters.evtPeerIdentificationFailed, err = h.EventBus().Emitter(&event.EvtPeerIdentificationFailed{})
if err != nil {
log.Warnf("identify service not emitting identification failed events; err: %s", err)
}
// register protocols that do not depend on peer records.
h.SetStreamHandler(IDDelta, s.deltaHandler)
h.SetStreamHandler(ID, s.sendIdentifyResp)
h.SetStreamHandler(IDPush, s.pushHandler)
h.Network().Notify((*netNotifiee)(s))
return s, nil
}
func (ids *idService) loop() {
defer ids.refCount.Done()
phs := make(map[peer.ID]*peerHandler)
sub, err := ids.Host.EventBus().Subscribe([]interface{}{&event.EvtLocalProtocolsUpdated{},
&event.EvtLocalAddressesUpdated{}}, eventbus.BufSize(256))
if err != nil {
log.Errorf("failed to subscribe to events on the bus, err=%s", err)
return
}
phClosedCh := make(chan peer.ID)
defer func() {
sub.Close()
// The context will cancel the workers. Now, wait for them to
// exit.
for range phs {
<-phClosedCh
}
}()
// Use a fresh context for the handlers. Otherwise, they'll get canceled
// before we're ready to shutdown and they'll have "stopped" without us
// _calling_ stop.
handlerCtx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
select {
case addReq := <-ids.addPeerHandlerCh:
rp := addReq.rp
ph, ok := phs[rp]
if !ok && ids.Host.Network().Connectedness(rp) == network.Connected {
ph = newPeerHandler(rp, ids)
ph.start(handlerCtx, func() { phClosedCh <- rp })
phs[rp] = ph
}
addReq.resp <- ph
case rmReq := <-ids.rmPeerHandlerCh:
rp := rmReq.p
if ids.Host.Network().Connectedness(rp) != network.Connected {
// before we remove the peerhandler, we should ensure that it will not send any
// more messages. Otherwise, we might create a new handler and the Identify response
// synchronized with the new handler might be overwritten by a message sent by this "old" handler.
ph, ok := phs[rp]
if !ok {
// move on, move on, there's nothing to see here.
continue
}
// This is idempotent if already stopped.
ph.stop()
}
case rp := <-phClosedCh:
ph := phs[rp]
// If we are connected to the peer, it means that we got a connection from the peer
// before we could finish removing it's handler on the previous disconnection.
// If we delete the handler, we wont be able to push updates to it
// till we see a new connection. So, we should restart the handler.
// The fact that we got the handler on this channel means that it's context and handler
// have completed because we write the handler to this chanel only after it closed.
if ids.Host.Network().Connectedness(rp) == network.Connected {
ph.start(handlerCtx, func() { phClosedCh <- rp })
} else {
delete(phs, rp)
}
case e, more := <-sub.Out():
if !more {
return
}
switch e.(type) {
case event.EvtLocalAddressesUpdated:
for pid := range phs {
select {
case phs[pid].pushCh <- struct{}{}:
default:
log.Debugf("dropping addr updated message for %s as buffer full", pid.Pretty())
}
}
case event.EvtLocalProtocolsUpdated:
for pid := range phs {
select {
case phs[pid].deltaCh <- struct{}{}:
default:
log.Debugf("dropping protocol updated message for %s as buffer full", pid.Pretty())
}
}
}
case <-ids.ctx.Done():
return
}
}
}
// Close shuts down the idService
func (ids *idService) Close() error {
ids.ctxCancel()
ids.observedAddrs.Close()
ids.refCount.Wait()
return nil
}
func (ids *idService) OwnObservedAddrs() []ma.Multiaddr {
return ids.observedAddrs.Addrs()
}
func (ids *idService) ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr {
return ids.observedAddrs.AddrsFor(local)
}
func (ids *idService) IdentifyConn(c network.Conn) {
<-ids.IdentifyWait(c)
}
func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
ids.connsMu.RLock()
wait, found := ids.conns[c]
ids.connsMu.RUnlock()
if found {
return wait
}
ids.connsMu.Lock()
defer ids.connsMu.Unlock()
wait, found = ids.conns[c]
if !found {
wait = make(chan struct{})
ids.conns[c] = wait
// Spawn an identify. The connection may actually be closed
// already, but that doesn't really matter. We'll fail to open a
// stream then forget the connection.
go func() {
defer close(wait)
if err := ids.identifyConn(c); err != nil {
ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
return
}
ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()})
}()
}
return wait
}
func (ids *idService) removeConn(c network.Conn) {
ids.connsMu.Lock()
delete(ids.conns, c)
ids.connsMu.Unlock()
}
func (ids *idService) identifyConn(c network.Conn) error {
s, err := c.NewStream(network.WithUseTransient(context.TODO(), "identify"))
if err != nil {
log.Debugw("error opening identify stream", "error", err)
// We usually do this on disconnect, but we may have already
// processed the disconnect event.
ids.removeConn(c)
return err
}
s.SetProtocol(ID)
// ok give the response to our handler.
if err := msmux.SelectProtoOrFail(ID, s); err != nil {
log.Infow("failed negotiate identify protocol with peer", "peer", c.RemotePeer(), "error", err)
s.Reset()
return err
}
return ids.handleIdentifyResponse(s)
}
func (ids *idService) sendIdentifyResp(s network.Stream) {
defer s.Close()
c := s.Conn()
phCh := make(chan *peerHandler, 1)
select {
case ids.addPeerHandlerCh <- addPeerHandlerReq{c.RemotePeer(), phCh}:
case <-ids.ctx.Done():
return
}
var ph *peerHandler
select {
case ph = <-phCh:
case <-ids.ctx.Done():
return
}
if ph == nil {
// Peer disconnected, abort.
s.Reset()
return
}
ph.snapshotMu.RLock()
snapshot := ph.snapshot
ph.snapshotMu.RUnlock()
ids.writeChunkedIdentifyMsg(c, snapshot, s)
log.Debugf("%s sent message to %s %s", ID, c.RemotePeer(), c.RemoteMultiaddr())
}
func (ids *idService) handleIdentifyResponse(s network.Stream) error {
_ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout))
c := s.Conn()
r := protoio.NewDelimitedReader(s, signedIDSize)
mes := &pb.Identify{}
if err := readAllIDMessages(r, mes); err != nil {
log.Warn("error reading identify message: ", err)
s.Reset()
return err
}
defer s.Close()
log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr())
ids.consumeMessage(mes, c)
return nil
}
func readAllIDMessages(r protoio.Reader, finalMsg proto.Message) error {
mes := &pb.Identify{}
for i := 0; i < maxMessages; i++ {
switch err := r.ReadMsg(mes); err {
case io.EOF:
return nil
case nil:
proto.Merge(finalMsg, mes)
default:
return err
}
}
return fmt.Errorf("too many parts")
}
func (ids *idService) getSnapshot() *identifySnapshot {
snapshot := new(identifySnapshot)
if !ids.disableSignedPeerRecord {
if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok {
snapshot.record = cab.GetPeerRecord(ids.Host.ID())
}
}
snapshot.addrs = ids.Host.Addrs()
snapshot.protocols = ids.Host.Mux().Protocols()
return snapshot
}
func (ids *idService) writeChunkedIdentifyMsg(c network.Conn, snapshot *identifySnapshot, s network.Stream) error {
mes := ids.createBaseIdentifyResponse(c, snapshot)
sr := ids.getSignedRecord(snapshot)
mes.SignedPeerRecord = sr
writer := protoio.NewDelimitedWriter(s)
if sr == nil || proto.Size(mes) <= legacyIDSize {
return writer.WriteMsg(mes)
}
mes.SignedPeerRecord = nil
if err := writer.WriteMsg(mes); err != nil {
return err
}
// then write just the signed record
m := &pb.Identify{SignedPeerRecord: sr}
err := writer.WriteMsg(m)
return err
}
func (ids *idService) createBaseIdentifyResponse(
conn network.Conn,
snapshot *identifySnapshot,
) *pb.Identify {
mes := &pb.Identify{}
remoteAddr := conn.RemoteMultiaddr()
localAddr := conn.LocalMultiaddr()
// set protocols this node is currently handling
mes.Protocols = snapshot.protocols
// observed address so other side is informed of their
// "public" address, at least in relation to us.
mes.ObservedAddr = remoteAddr.Bytes()
// populate unsigned addresses.
// peers that do not yet support signed addresses will need this.
// Note: LocalMultiaddr is sometimes 0.0.0.0
viaLoopback := manet.IsIPLoopback(localAddr) || manet.IsIPLoopback(remoteAddr)
mes.ListenAddrs = make([][]byte, 0, len(snapshot.addrs))
for _, addr := range snapshot.addrs {
if !viaLoopback && manet.IsIPLoopback(addr) {
continue
}
mes.ListenAddrs = append(mes.ListenAddrs, addr.Bytes())
}
// set our public key
ownKey := ids.Host.Peerstore().PubKey(ids.Host.ID())
// check if we even have a public key.
if ownKey == nil {
// public key is nil. We are either using insecure transport or something erratic happened.
// check if we're even operating in "secure mode"
if ids.Host.Peerstore().PrivKey(ids.Host.ID()) != nil {
// private key is present. But NO public key. Something bad happened.
log.Errorf("did not have own public key in Peerstore")
}
// if neither of the key is present it is safe to assume that we are using an insecure transport.
} else {
// public key is present. Safe to proceed.
if kb, err := crypto.MarshalPublicKey(ownKey); err != nil {
log.Errorf("failed to convert key to bytes")
} else {
mes.PublicKey = kb
}
}
// set protocol versions
pv := LibP2PVersion
av := ids.UserAgent
mes.ProtocolVersion = &pv
mes.AgentVersion = &av
return mes
}
func (ids *idService) getSignedRecord(snapshot *identifySnapshot) []byte {
if ids.disableSignedPeerRecord || snapshot.record == nil {
return nil
}
recBytes, err := snapshot.record.Marshal()
if err != nil {
log.Errorw("failed to marshal signed record", "err", err)
return nil
}
return recBytes
}
func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) {
p := c.RemotePeer()
// mes.Protocols
ids.Host.Peerstore().SetProtocols(p, mes.Protocols...)
// mes.ObservedAddr
ids.consumeObservedAddress(mes.GetObservedAddr(), c)
// mes.ListenAddrs
laddrs := mes.GetListenAddrs()
lmaddrs := make([]ma.Multiaddr, 0, len(laddrs))
for _, addr := range laddrs {
maddr, err := ma.NewMultiaddrBytes(addr)
if err != nil {
log.Debugf("%s failed to parse multiaddr from %s %s", ID,
p, c.RemoteMultiaddr())
continue
}
lmaddrs = append(lmaddrs, maddr)
}
// NOTE: Do not add `c.RemoteMultiaddr()` to the peerstore if the remote
// peer doesn't tell us to do so. Otherwise, we'll advertise it.
//
// This can cause an "addr-splosion" issue where the network will slowly
// gossip and collect observed but unadvertised addresses. Given a NAT
// that picks random source ports, this can cause DHT nodes to collect
// many undialable addresses for other peers.
// add certified addresses for the peer, if they sent us a signed peer record
// otherwise use the unsigned addresses.
var signedPeerRecord *record.Envelope
signedPeerRecord, err := signedPeerRecordFromMessage(mes)
if err != nil {
log.Errorf("error getting peer record from Identify message: %v", err)
}
// Extend the TTLs on the known (probably) good addresses.
// Taking the lock ensures that we don't concurrently process a disconnect.
ids.addrMu.Lock()
ttl := peerstore.RecentlyConnectedAddrTTL
if ids.Host.Network().Connectedness(p) == network.Connected {
ttl = peerstore.ConnectedAddrTTL
}
// Downgrade connected and recently connected addrs to a temporary TTL.
for _, ttl := range []time.Duration{
peerstore.RecentlyConnectedAddrTTL,
peerstore.ConnectedAddrTTL,
} {
ids.Host.Peerstore().UpdateAddrs(p, ttl, peerstore.TempAddrTTL)
}
// add signed addrs if we have them and the peerstore supports them
cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore())
if ok && signedPeerRecord != nil {
_, addErr := cab.ConsumePeerRecord(signedPeerRecord, ttl)
if addErr != nil {
log.Debugf("error adding signed addrs to peerstore: %v", addErr)
}
} else {
ids.Host.Peerstore().AddAddrs(p, lmaddrs, ttl)
}
// Finally, expire all temporary addrs.
ids.Host.Peerstore().UpdateAddrs(p, peerstore.TempAddrTTL, 0)
ids.addrMu.Unlock()
log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
// get protocol versions
pv := mes.GetProtocolVersion()
av := mes.GetAgentVersion()
ids.Host.Peerstore().Put(p, "ProtocolVersion", pv)
ids.Host.Peerstore().Put(p, "AgentVersion", av)
// get the key from the other side. we may not have it (no-auth transport)
ids.consumeReceivedPubKey(c, mes.PublicKey)
}
func (ids *idService) consumeReceivedPubKey(c network.Conn, kb []byte) {
lp := c.LocalPeer()
rp := c.RemotePeer()
if kb == nil {
log.Debugf("%s did not receive public key for remote peer: %s", lp, rp)
return
}
newKey, err := crypto.UnmarshalPublicKey(kb)
if err != nil {
log.Warnf("%s cannot unmarshal key from remote peer: %s, %s", lp, rp, err)
return
}
// verify key matches peer.ID
np, err := peer.IDFromPublicKey(newKey)
if err != nil {
log.Debugf("%s cannot get peer.ID from key of remote peer: %s, %s", lp, rp, err)
return
}
if np != rp {
// if the newKey's peer.ID does not match known peer.ID...
if rp == "" && np != "" {
// if local peerid is empty, then use the new, sent key.
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
if err != nil {
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
}
} else {
// we have a local peer.ID and it does not match the sent key... error.
log.Errorf("%s received key for remote peer %s mismatch: %s", lp, rp, np)
}
return
}
currKey := ids.Host.Peerstore().PubKey(rp)
if currKey == nil {
// no key? no auth transport. set this one.
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
if err != nil {
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
}
return
}
// ok, we have a local key, we should verify they match.
if currKey.Equals(newKey) {
return // ok great. we're done.
}
// weird, got a different key... but the different key MATCHES the peer.ID.
// this odd. let's log error and investigate. this should basically never happen
// and it means we have something funky going on and possibly a bug.
log.Errorf("%s identify got a different key for: %s", lp, rp)
// okay... does ours NOT match the remote peer.ID?
cp, err := peer.IDFromPublicKey(currKey)
if err != nil {
log.Errorf("%s cannot get peer.ID from local key of remote peer: %s, %s", lp, rp, err)
return
}
if cp != rp {
log.Errorf("%s local key for remote peer %s yields different peer.ID: %s", lp, rp, cp)
return
}
// okay... curr key DOES NOT match new key. both match peer.ID. wat?
log.Errorf("%s local key and received key for %s do not match, but match peer.ID", lp, rp)
}
// HasConsistentTransport returns true if the address 'a' shares a
// protocol set with any address in the green set. This is used
// to check if a given address might be one of the addresses a peer is
// listening on.
func HasConsistentTransport(a ma.Multiaddr, green []ma.Multiaddr) bool {
protosMatch := func(a, b []ma.Protocol) bool {
if len(a) != len(b) {
return false
}
for i, p := range a {
if b[i].Code != p.Code {
return false
}
}
return true
}
protos := a.Protocols()
for _, ga := range green {
if protosMatch(protos, ga.Protocols()) {
return true
}
}
return false
}
func (ids *idService) consumeObservedAddress(observed []byte, c network.Conn) {
if observed == nil {
return
}
maddr, err := ma.NewMultiaddrBytes(observed)
if err != nil {
log.Debugf("error parsing received observed addr for %s: %s", c, err)
return
}
ids.observedAddrs.Record(c, maddr)
}
func addrInAddrs(a ma.Multiaddr, as []ma.Multiaddr) bool {
for _, b := range as {
if a.Equal(b) {
return true
}
}
return false
}
func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) {
if msg.SignedPeerRecord == nil || len(msg.SignedPeerRecord) == 0 {
return nil, nil
}
env, _, err := record.ConsumeEnvelope(msg.SignedPeerRecord, peer.PeerRecordEnvelopeDomain)
return env, err
}
// netNotifiee defines methods to be used with the IpfsDHT
type netNotifiee idService
func (nn *netNotifiee) IDService() *idService {
return (*idService)(nn)
}
func (nn *netNotifiee) Connected(n network.Network, v network.Conn) {
nn.IDService().IdentifyWait(v)
}
func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) {
ids := nn.IDService()
// Stop tracking the connection.
ids.removeConn(v)
// undo the setting of addresses to peer.ConnectedAddrTTL we did
ids.addrMu.Lock()
defer ids.addrMu.Unlock()
if ids.Host.Network().Connectedness(v.RemotePeer()) != network.Connected {
// consider removing the peer handler for this
select {
case ids.rmPeerHandlerCh <- rmPeerHandlerReq{v.RemotePeer()}:
case <-ids.ctx.Done():
return
}
// Last disconnect.
ps := ids.Host.Peerstore()
ps.UpdateAddrs(v.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
}
}
func (nn *netNotifiee) OpenedStream(n network.Network, v network.Stream) {}
func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {}
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
|
package main
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
func resourceVm() *schema.Resource {
return &schema.Resource{
Create: resourceVmCreate,
Read: resourceVmRead,
Delete: resourceVmDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"source": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"datacenter": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"folder": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"host": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"pool": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"linked_clone": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
}
}
func resourceVmCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*govmomi.Client)
ref, err := client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%s/vm/%s", d.Get("datacenter").(string), d.Get("source").(string)))
if err != nil {
return fmt.Errorf("Error reading vm: %s", err)
}
vm, ok := ref.(*govmomi.VirtualMachine)
if !ok {
return fmt.Errorf("Error reading vm")
}
ref, err = client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%v/vm/%v", d.Get("datacenter").(string), d.Get("folder").(string)))
if err != nil {
return fmt.Errorf("Error reading folder: %s", err)
}
f, ok := ref.(*govmomi.Folder)
if !ok {
return fmt.Errorf("Error reading folder")
}
ref, err = client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%v/host/%v/Resources/%v", d.Get("datacenter").(string), d.Get("host").(string), d.Get("pool").(string)))
if err != nil {
return fmt.Errorf("Error reading resource pool: %s", err)
}
p, ok := ref.(*govmomi.ResourcePool)
if !ok {
return fmt.Errorf("Error reading resource pool")
}
pref := p.Reference()
/////////////
var o mo.VirtualMachine
err = client.Properties(vm.Reference(), []string{"snapshot"}, &o)
if err != nil {
return fmt.Errorf("Error reading snapshot")
}
if o.Snapshot == nil {
return fmt.Errorf("Base VM has no snapshots")
}
sref := o.Snapshot.CurrentSnapshot
/////////////
relocateSpec := types.VirtualMachineRelocateSpec{
Pool: &pref,
}
linkedClone := d.Get("linked_clone").(bool)
if linkedClone {
relocateSpec.DiskMoveType = "createNewChildDiskBacking"
}
cloneSpec := types.VirtualMachineCloneSpec{
Snapshot: sref,
Location: relocateSpec,
}
name := d.Get("name").(string)
task, err := vm.Clone(f, name, cloneSpec)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
info, err := task.WaitForResult(nil)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
d.SetId(info.Result.(types.ManagedObjectReference).Value)
return nil
}
func resourceVmRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceVmDelete(d *schema.ResourceData, meta interface{}) error {
return nil
}
support 'terraform destroy' command
package main
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
func resourceVm() *schema.Resource {
return &schema.Resource{
Create: resourceVmCreate,
Read: resourceVmRead,
Delete: resourceVmDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"source": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"datacenter": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"folder": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"host": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"pool": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"linked_clone": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
}
}
func resourceVmCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*govmomi.Client)
ref, err := client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%s/vm/%s", d.Get("datacenter").(string), d.Get("source").(string)))
if err != nil {
return fmt.Errorf("Error reading vm: %s", err)
}
vm, ok := ref.(*govmomi.VirtualMachine)
if !ok {
return fmt.Errorf("Error reading vm")
}
ref, err = client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%v/vm/%v", d.Get("datacenter").(string), d.Get("folder").(string)))
if err != nil {
return fmt.Errorf("Error reading folder: %s", err)
}
f, ok := ref.(*govmomi.Folder)
if !ok {
return fmt.Errorf("Error reading folder")
}
ref, err = client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%v/host/%v/Resources/%v", d.Get("datacenter").(string), d.Get("host").(string), d.Get("pool").(string)))
if err != nil {
return fmt.Errorf("Error reading resource pool: %s", err)
}
p, ok := ref.(*govmomi.ResourcePool)
if !ok {
return fmt.Errorf("Error reading resource pool")
}
pref := p.Reference()
/////////////
var o mo.VirtualMachine
err = client.Properties(vm.Reference(), []string{"snapshot"}, &o)
if err != nil {
return fmt.Errorf("Error reading snapshot")
}
if o.Snapshot == nil {
return fmt.Errorf("Base VM has no snapshots")
}
sref := o.Snapshot.CurrentSnapshot
/////////////
relocateSpec := types.VirtualMachineRelocateSpec{
Pool: &pref,
}
linkedClone := d.Get("linked_clone").(bool)
if linkedClone {
relocateSpec.DiskMoveType = "createNewChildDiskBacking"
}
cloneSpec := types.VirtualMachineCloneSpec{
Snapshot: sref,
Location: relocateSpec,
}
name := d.Get("name").(string)
task, err := vm.Clone(f, name, cloneSpec)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
info, err := task.WaitForResult(nil)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
d.SetId(info.Result.(types.ManagedObjectReference).Value)
return nil
}
func resourceVmRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceVmDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*govmomi.Client)
ref := types.ManagedObjectReference{Type: "VirtualMachine", Value: d.Id() }
vm := govmomi.NewVirtualMachine(client, ref)
task, err := vm.Destroy()
if err != nil {
return fmt.Errorf("Error deleting vm: %s", err)
}
_, err = task.WaitForResult(nil)
if err != nil {
return fmt.Errorf("Error deleting vm: %s", err)
}
return nil
}
|
package main
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
func resourceVm() *schema.Resource {
return &schema.Resource{
Create: resourceVmCreate,
Read: resourceVmRead,
Update: resourceVmUpdate,
Delete: resourceVmDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"source": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"datacenter": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"folder": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"host": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"pool": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"linked_clone": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
}
}
func resourceVmCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*govmomi.Client)
ref, err := client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%s/vm/%s", d.Get("datacenter").(string), d.Get("source").(string)))
if err != nil {
return fmt.Errorf("Error reading vm: %s", err)
}
vm, ok := ref.(*govmomi.VirtualMachine)
if !ok {
return fmt.Errorf("Error reading vm")
}
ref, err = client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%v/vm/%v", d.Get("datacenter").(string), d.Get("folder").(string)))
if err != nil {
return fmt.Errorf("Error reading folder: %s", err)
}
f, ok := ref.(*govmomi.Folder)
if !ok {
return fmt.Errorf("Error reading folder")
}
ref, err = client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%v/host/%v/Resources/%v", d.Get("datacenter").(string), d.Get("host").(string), d.Get("pool").(string)))
if err != nil {
return fmt.Errorf("Error reading resource pool: %s", err)
}
p, ok := ref.(*govmomi.ResourcePool)
if !ok {
return fmt.Errorf("Error reading resource pool")
}
pref := p.Reference()
/////////////
var o mo.VirtualMachine
err = client.Properties(vm.Reference(), []string{"snapshot"}, &o)
if err != nil {
return fmt.Errorf("Error reading snapshot")
}
if o.Snapshot == nil {
return fmt.Errorf("Base VM has no snapshots")
}
sref := o.Snapshot.CurrentSnapshot
/////////////
relocateSpec := types.VirtualMachineRelocateSpec{
Pool: &pref,
}
linkedClone := d.Get("linked_clone").(bool)
if linkedClone {
relocateSpec.DiskMoveType = "createNewChildDiskBacking"
}
cloneSpec := types.VirtualMachineCloneSpec{
Snapshot: sref,
Location: relocateSpec,
}
name := d.Get("name").(string)
task, err := vm.Clone(f, name, cloneSpec)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
_, err = task.WaitForResult(nil)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
err = client.Properties(vm.Reference(), []string{"config.uuid"}, &o)
if err != nil {
return fmt.Errorf("Error reading UUID")
}
d.SetId(o.Config.Uuid)
return nil
}
func resourceVmRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceVmUpdate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceVmDelete(d *schema.ResourceData, meta interface{}) error {
return nil
}
don't support resource update
package main
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
func resourceVm() *schema.Resource {
return &schema.Resource{
Create: resourceVmCreate,
Read: resourceVmRead,
Delete: resourceVmDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"source": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"datacenter": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"folder": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"host": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"pool": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"linked_clone": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
}
}
func resourceVmCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*govmomi.Client)
ref, err := client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%s/vm/%s", d.Get("datacenter").(string), d.Get("source").(string)))
if err != nil {
return fmt.Errorf("Error reading vm: %s", err)
}
vm, ok := ref.(*govmomi.VirtualMachine)
if !ok {
return fmt.Errorf("Error reading vm")
}
ref, err = client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%v/vm/%v", d.Get("datacenter").(string), d.Get("folder").(string)))
if err != nil {
return fmt.Errorf("Error reading folder: %s", err)
}
f, ok := ref.(*govmomi.Folder)
if !ok {
return fmt.Errorf("Error reading folder")
}
ref, err = client.SearchIndex().FindByInventoryPath(fmt.Sprintf("%v/host/%v/Resources/%v", d.Get("datacenter").(string), d.Get("host").(string), d.Get("pool").(string)))
if err != nil {
return fmt.Errorf("Error reading resource pool: %s", err)
}
p, ok := ref.(*govmomi.ResourcePool)
if !ok {
return fmt.Errorf("Error reading resource pool")
}
pref := p.Reference()
/////////////
var o mo.VirtualMachine
err = client.Properties(vm.Reference(), []string{"snapshot"}, &o)
if err != nil {
return fmt.Errorf("Error reading snapshot")
}
if o.Snapshot == nil {
return fmt.Errorf("Base VM has no snapshots")
}
sref := o.Snapshot.CurrentSnapshot
/////////////
relocateSpec := types.VirtualMachineRelocateSpec{
Pool: &pref,
}
linkedClone := d.Get("linked_clone").(bool)
if linkedClone {
relocateSpec.DiskMoveType = "createNewChildDiskBacking"
}
cloneSpec := types.VirtualMachineCloneSpec{
Snapshot: sref,
Location: relocateSpec,
}
name := d.Get("name").(string)
task, err := vm.Clone(f, name, cloneSpec)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
_, err = task.WaitForResult(nil)
if err != nil {
return fmt.Errorf("Error clonning vm: %s", err)
}
err = client.Properties(vm.Reference(), []string{"config.uuid"}, &o)
if err != nil {
return fmt.Errorf("Error reading UUID")
}
d.SetId(o.Config.Uuid)
return nil
}
func resourceVmRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceVmDelete(d *schema.ResourceData, meta interface{}) error {
return nil
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package expvar provides a standardized interface to public variables, such
// as operation counters in servers. It exposes these variables via HTTP at
// /debug/vars in JSON format.
//
// Operations to set or modify these public variables are atomic.
//
// In addition to adding the HTTP handler, this package registers the
// following variables:
//
// cmdline os.Args
// memstats runtime.Memstats
//
// The package is sometimes only imported for the side effect of
// registering its HTTP handler and the above variables. To use it
// this way, link this package into your program:
// import _ "expvar"
//
package expvar
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime"
"strconv"
"sync"
)
// Var is an abstract type for all exported variables.
type Var interface {
String() string
}
// Int is a 64-bit integer variable that satisfies the Var interface.
type Int struct {
i int64
mu sync.Mutex
}
func (v *Int) String() string {
v.mu.Lock()
defer v.mu.Unlock()
return strconv.FormatInt(v.i, 10)
}
func (v *Int) Add(delta int64) {
v.mu.Lock()
defer v.mu.Unlock()
v.i += delta
}
func (v *Int) Set(value int64) {
v.mu.Lock()
defer v.mu.Unlock()
v.i = value
}
// Float is a 64-bit float variable that satisfies the Var interface.
type Float struct {
f float64
mu sync.Mutex
}
func (v *Float) String() string {
v.mu.Lock()
defer v.mu.Unlock()
return strconv.FormatFloat(v.f, 'g', -1, 64)
}
// Add adds delta to v.
func (v *Float) Add(delta float64) {
v.mu.Lock()
defer v.mu.Unlock()
v.f += delta
}
// Set sets v to value.
func (v *Float) Set(value float64) {
v.mu.Lock()
defer v.mu.Unlock()
v.f = value
}
// Map is a string-to-Var map variable that satisfies the Var interface.
type Map struct {
m map[string]Var
mu sync.RWMutex
}
// KeyValue represents a single entry in a Map.
type KeyValue struct {
Key string
Value Var
}
func (v *Map) String() string {
v.mu.RLock()
defer v.mu.RUnlock()
var b bytes.Buffer
fmt.Fprintf(&b, "{")
first := true
for key, val := range v.m {
if !first {
fmt.Fprintf(&b, ", ")
}
fmt.Fprintf(&b, "\"%s\": %v", key, val)
first = false
}
fmt.Fprintf(&b, "}")
return b.String()
}
func (v *Map) Init() *Map {
v.m = make(map[string]Var)
return v
}
func (v *Map) Get(key string) Var {
v.mu.RLock()
defer v.mu.RUnlock()
return v.m[key]
}
func (v *Map) Set(key string, av Var) {
v.mu.Lock()
defer v.mu.Unlock()
v.m[key] = av
}
func (v *Map) Add(key string, delta int64) {
v.mu.RLock()
av, ok := v.m[key]
v.mu.RUnlock()
if !ok {
// check again under the write lock
v.mu.Lock()
if _, ok = v.m[key]; !ok {
av = new(Int)
v.m[key] = av
}
v.mu.Unlock()
}
// Add to Int; ignore otherwise.
if iv, ok := av.(*Int); ok {
iv.Add(delta)
}
}
// AddFloat adds delta to the *Float value stored under the given map key.
func (v *Map) AddFloat(key string, delta float64) {
v.mu.RLock()
av, ok := v.m[key]
v.mu.RUnlock()
if !ok {
// check again under the write lock
v.mu.Lock()
if _, ok = v.m[key]; !ok {
av = new(Float)
v.m[key] = av
}
v.mu.Unlock()
}
// Add to Float; ignore otherwise.
if iv, ok := av.(*Float); ok {
iv.Add(delta)
}
}
// Do calls f for each entry in the map.
// The map is locked during the iteration,
// but existing entries may be concurrently updated.
func (v *Map) Do(f func(KeyValue)) {
v.mu.RLock()
defer v.mu.RUnlock()
for k, v := range v.m {
f(KeyValue{k, v})
}
}
// String is a string variable, and satisfies the Var interface.
type String struct {
s string
}
func (v *String) String() string { return strconv.Quote(v.s) }
func (v *String) Set(value string) { v.s = value }
// Func implements Var by calling the function
// and formatting the returned value using JSON.
type Func func() interface{}
func (f Func) String() string {
v, _ := json.Marshal(f())
return string(v)
}
// All published variables.
var (
mutex sync.RWMutex
vars map[string]Var = make(map[string]Var)
)
// Publish declares a named exported variable. This should be called from a
// package's init function when it creates its Vars. If the name is already
// registered then this will log.Panic.
func Publish(name string, v Var) {
mutex.Lock()
defer mutex.Unlock()
if _, existing := vars[name]; existing {
log.Panicln("Reuse of exported var name:", name)
}
vars[name] = v
}
// Get retrieves a named exported variable.
func Get(name string) Var {
mutex.RLock()
defer mutex.RUnlock()
return vars[name]
}
// Convenience functions for creating new exported variables.
func NewInt(name string) *Int {
v := new(Int)
Publish(name, v)
return v
}
func NewFloat(name string) *Float {
v := new(Float)
Publish(name, v)
return v
}
func NewMap(name string) *Map {
v := new(Map).Init()
Publish(name, v)
return v
}
func NewString(name string) *String {
v := new(String)
Publish(name, v)
return v
}
// Do calls f for each exported variable.
// The global variable map is locked during the iteration,
// but existing entries may be concurrently updated.
func Do(f func(KeyValue)) {
mutex.RLock()
defer mutex.RUnlock()
for k, v := range vars {
f(KeyValue{k, v})
}
}
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
Do(func(kv KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func cmdline() interface{} {
return os.Args
}
func memstats() interface{} {
stats := new(runtime.MemStats)
runtime.ReadMemStats(stats)
return *stats
}
func init() {
http.HandleFunc("/debug/vars", expvarHandler)
Publish("cmdline", Func(cmdline))
Publish("memstats", Func(memstats))
}
expvar: add locking to String, and use RWMutex properly throughout.
R=bradfitz
CC=golang-dev
https://golang.org/cl/5754043
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package expvar provides a standardized interface to public variables, such
// as operation counters in servers. It exposes these variables via HTTP at
// /debug/vars in JSON format.
//
// Operations to set or modify these public variables are atomic.
//
// In addition to adding the HTTP handler, this package registers the
// following variables:
//
// cmdline os.Args
// memstats runtime.Memstats
//
// The package is sometimes only imported for the side effect of
// registering its HTTP handler and the above variables. To use it
// this way, link this package into your program:
// import _ "expvar"
//
package expvar
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime"
"strconv"
"sync"
)
// Var is an abstract type for all exported variables.
type Var interface {
String() string
}
// Int is a 64-bit integer variable that satisfies the Var interface.
type Int struct {
i int64
mu sync.RWMutex
}
func (v *Int) String() string {
v.mu.RLock()
defer v.mu.RUnlock()
return strconv.FormatInt(v.i, 10)
}
func (v *Int) Add(delta int64) {
v.mu.Lock()
defer v.mu.Unlock()
v.i += delta
}
func (v *Int) Set(value int64) {
v.mu.Lock()
defer v.mu.Unlock()
v.i = value
}
// Float is a 64-bit float variable that satisfies the Var interface.
type Float struct {
f float64
mu sync.RWMutex
}
func (v *Float) String() string {
v.mu.RLock()
defer v.mu.RUnlock()
return strconv.FormatFloat(v.f, 'g', -1, 64)
}
// Add adds delta to v.
func (v *Float) Add(delta float64) {
v.mu.Lock()
defer v.mu.Unlock()
v.f += delta
}
// Set sets v to value.
func (v *Float) Set(value float64) {
v.mu.Lock()
defer v.mu.Unlock()
v.f = value
}
// Map is a string-to-Var map variable that satisfies the Var interface.
type Map struct {
m map[string]Var
mu sync.RWMutex
}
// KeyValue represents a single entry in a Map.
type KeyValue struct {
Key string
Value Var
}
func (v *Map) String() string {
v.mu.RLock()
defer v.mu.RUnlock()
var b bytes.Buffer
fmt.Fprintf(&b, "{")
first := true
for key, val := range v.m {
if !first {
fmt.Fprintf(&b, ", ")
}
fmt.Fprintf(&b, "\"%s\": %v", key, val)
first = false
}
fmt.Fprintf(&b, "}")
return b.String()
}
func (v *Map) Init() *Map {
v.m = make(map[string]Var)
return v
}
func (v *Map) Get(key string) Var {
v.mu.RLock()
defer v.mu.RUnlock()
return v.m[key]
}
func (v *Map) Set(key string, av Var) {
v.mu.Lock()
defer v.mu.Unlock()
v.m[key] = av
}
func (v *Map) Add(key string, delta int64) {
v.mu.RLock()
av, ok := v.m[key]
v.mu.RUnlock()
if !ok {
// check again under the write lock
v.mu.Lock()
if _, ok = v.m[key]; !ok {
av = new(Int)
v.m[key] = av
}
v.mu.Unlock()
}
// Add to Int; ignore otherwise.
if iv, ok := av.(*Int); ok {
iv.Add(delta)
}
}
// AddFloat adds delta to the *Float value stored under the given map key.
func (v *Map) AddFloat(key string, delta float64) {
v.mu.RLock()
av, ok := v.m[key]
v.mu.RUnlock()
if !ok {
// check again under the write lock
v.mu.Lock()
if _, ok = v.m[key]; !ok {
av = new(Float)
v.m[key] = av
}
v.mu.Unlock()
}
// Add to Float; ignore otherwise.
if iv, ok := av.(*Float); ok {
iv.Add(delta)
}
}
// Do calls f for each entry in the map.
// The map is locked during the iteration,
// but existing entries may be concurrently updated.
func (v *Map) Do(f func(KeyValue)) {
v.mu.RLock()
defer v.mu.RUnlock()
for k, v := range v.m {
f(KeyValue{k, v})
}
}
// String is a string variable, and satisfies the Var interface.
type String struct {
s string
mu sync.RWMutex
}
func (v *String) String() string {
v.mu.RLock()
defer v.mu.RUnlock()
return strconv.Quote(v.s)
}
func (v *String) Set(value string) {
v.mu.Lock()
defer v.mu.Unlock()
v.s = value
}
// Func implements Var by calling the function
// and formatting the returned value using JSON.
type Func func() interface{}
func (f Func) String() string {
v, _ := json.Marshal(f())
return string(v)
}
// All published variables.
var (
mutex sync.RWMutex
vars map[string]Var = make(map[string]Var)
)
// Publish declares a named exported variable. This should be called from a
// package's init function when it creates its Vars. If the name is already
// registered then this will log.Panic.
func Publish(name string, v Var) {
mutex.Lock()
defer mutex.Unlock()
if _, existing := vars[name]; existing {
log.Panicln("Reuse of exported var name:", name)
}
vars[name] = v
}
// Get retrieves a named exported variable.
func Get(name string) Var {
mutex.RLock()
defer mutex.RUnlock()
return vars[name]
}
// Convenience functions for creating new exported variables.
func NewInt(name string) *Int {
v := new(Int)
Publish(name, v)
return v
}
func NewFloat(name string) *Float {
v := new(Float)
Publish(name, v)
return v
}
func NewMap(name string) *Map {
v := new(Map).Init()
Publish(name, v)
return v
}
func NewString(name string) *String {
v := new(String)
Publish(name, v)
return v
}
// Do calls f for each exported variable.
// The global variable map is locked during the iteration,
// but existing entries may be concurrently updated.
func Do(f func(KeyValue)) {
mutex.RLock()
defer mutex.RUnlock()
for k, v := range vars {
f(KeyValue{k, v})
}
}
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
Do(func(kv KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func cmdline() interface{} {
return os.Args
}
func memstats() interface{} {
stats := new(runtime.MemStats)
runtime.ReadMemStats(stats)
return *stats
}
func init() {
http.HandleFunc("/debug/vars", expvarHandler)
Publish("cmdline", Func(cmdline))
Publish("memstats", Func(memstats))
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"flag"
"fmt"
"io"
"os"
"regexp"
"runtime"
"testing"
"time"
)
func newLocalListener(t *testing.T) Listener {
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
ln, err = Listen("tcp6", "[::1]:0")
}
if err != nil {
t.Fatal(err)
}
return ln
}
func TestDialTimeout(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
errc := make(chan error)
numConns := listenerBacklog + 10
// TODO(bradfitz): It's hard to test this in a portable
// way. This is unfortunate, but works for now.
switch runtime.GOOS {
case "linux":
// The kernel will start accepting TCP connections before userspace
// gets a chance to not accept them, so fire off a bunch to fill up
// the kernel's backlog. Then we test we get a failure after that.
for i := 0; i < numConns; i++ {
go func() {
_, err := DialTimeout("tcp", ln.Addr().String(), 200*time.Millisecond)
errc <- err
}()
}
case "darwin", "windows":
// At least OS X 10.7 seems to accept any number of
// connections, ignoring listen's backlog, so resort
// to connecting to a hopefully-dead 127/8 address.
// Same for windows.
//
// Use an IANA reserved port (49151) instead of 80, because
// on our 386 builder, this Dial succeeds, connecting
// to an IIS web server somewhere. The data center
// or VM or firewall must be stealing the TCP connection.
//
// IANA Service Name and Transport Protocol Port Number Registry
// <http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml>
go func() {
c, err := DialTimeout("tcp", "127.0.71.111:49151", 200*time.Millisecond)
if err == nil {
err = fmt.Errorf("unexpected: connected to %s!", c.RemoteAddr())
c.Close()
}
errc <- err
}()
default:
// TODO(bradfitz):
// OpenBSD may have a reject route to 127/8 except 127.0.0.1/32
// by default. FreeBSD likely works, but is untested.
// TODO(rsc):
// The timeout never happens on Windows. Why? Issue 3016.
t.Skipf("skipping test on %q; untested.", runtime.GOOS)
}
connected := 0
for {
select {
case <-time.After(15 * time.Second):
t.Fatal("too slow")
case err := <-errc:
if err == nil {
connected++
if connected == numConns {
t.Fatal("all connections connected; expected some to time out")
}
} else {
terr, ok := err.(timeout)
if !ok {
t.Fatalf("got error %q; want error with timeout interface", err)
}
if !terr.Timeout() {
t.Fatalf("got error %q; not a timeout", err)
}
// Pass. We saw a timeout error.
return
}
}
}
}
func TestSelfConnect(t *testing.T) {
if runtime.GOOS == "windows" {
// TODO(brainman): do not know why it hangs.
t.Skip("skipping known-broken test on windows")
}
// Test that Dial does not honor self-connects.
// See the comment in DialTCP.
// Find a port that would be used as a local address.
l, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
c, err := Dial("tcp", l.Addr().String())
if err != nil {
t.Fatal(err)
}
addr := c.LocalAddr().String()
c.Close()
l.Close()
// Try to connect to that address repeatedly.
n := 100000
if testing.Short() {
n = 1000
}
switch runtime.GOOS {
case "darwin", "freebsd", "netbsd", "openbsd", "plan9", "windows":
// Non-Linux systems take a long time to figure
// out that there is nothing listening on localhost.
n = 100
}
for i := 0; i < n; i++ {
c, err := Dial("tcp", addr)
if err == nil {
c.Close()
t.Errorf("#%d: Dial %q succeeded", i, addr)
}
}
}
var runErrorTest = flag.Bool("run_error_test", false, "let TestDialError check for dns errors")
type DialErrorTest struct {
Net string
Raddr string
Pattern string
}
var dialErrorTests = []DialErrorTest{
{
"datakit", "mh/astro/r70",
"dial datakit mh/astro/r70: unknown network datakit",
},
{
"tcp", "127.0.0.1:☺",
"dial tcp 127.0.0.1:☺: unknown port tcp/☺",
},
{
"tcp", "no-such-name.google.com.:80",
"dial tcp no-such-name.google.com.:80: lookup no-such-name.google.com.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name.no-such-top-level-domain.:80",
"dial tcp no-such-name.no-such-top-level-domain.:80: lookup no-such-name.no-such-top-level-domain.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name:80",
`dial tcp no-such-name:80: lookup no-such-name\.(.*\.)?( on .*)?: no (.*)`,
},
{
"tcp", "mh/astro/r70:http",
"dial tcp mh/astro/r70:http: lookup mh/astro/r70: invalid domain name",
},
{
"unix", "/etc/file-not-found",
"dial unix /etc/file-not-found: no such file or directory",
},
{
"unix", "/etc/",
"dial unix /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
{
"unixpacket", "/etc/file-not-found",
"dial unixpacket /etc/file-not-found: no such file or directory",
},
{
"unixpacket", "/etc/",
"dial unixpacket /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
}
var duplicateErrorPattern = `dial (.*) dial (.*)`
func TestDialError(t *testing.T) {
if !*runErrorTest {
t.Logf("test disabled; use -run_error_test to enable")
return
}
for i, tt := range dialErrorTests {
c, err := Dial(tt.Net, tt.Raddr)
if c != nil {
c.Close()
}
if err == nil {
t.Errorf("#%d: nil error, want match for %#q", i, tt.Pattern)
continue
}
s := err.Error()
match, _ := regexp.MatchString(tt.Pattern, s)
if !match {
t.Errorf("#%d: %q, want match for %#q", i, s, tt.Pattern)
}
match, _ = regexp.MatchString(duplicateErrorPattern, s)
if match {
t.Errorf("#%d: %q, duplicate error return from Dial", i, s)
}
}
}
func TestDialTimeoutFDLeak(t *testing.T) {
if runtime.GOOS != "linux" {
// TODO(bradfitz): test on other platforms
t.Skipf("skipping test on %q", runtime.GOOS)
}
ln := newLocalListener(t)
defer ln.Close()
type connErr struct {
conn Conn
err error
}
dials := listenerBacklog + 100
// used to be listenerBacklog + 5, but was found to be unreliable, issue 4384.
maxGoodConnect := listenerBacklog + runtime.NumCPU()*10
resc := make(chan connErr)
for i := 0; i < dials; i++ {
go func() {
conn, err := DialTimeout("tcp", ln.Addr().String(), 500*time.Millisecond)
resc <- connErr{conn, err}
}()
}
var firstErr string
var ngood int
var toClose []io.Closer
for i := 0; i < dials; i++ {
ce := <-resc
if ce.err == nil {
ngood++
if ngood > maxGoodConnect {
t.Errorf("%d good connects; expected at most %d", ngood, maxGoodConnect)
}
toClose = append(toClose, ce.conn)
continue
}
err := ce.err
if firstErr == "" {
firstErr = err.Error()
} else if err.Error() != firstErr {
t.Fatalf("inconsistent error messages: first was %q, then later %q", firstErr, err)
}
}
for _, c := range toClose {
c.Close()
}
for i := 0; i < 100; i++ {
if got := numFD(); got < dials {
// Test passes.
return
}
time.Sleep(10 * time.Millisecond)
}
if got := numFD(); got >= dials {
t.Errorf("num fds after %d timeouts = %d; want <%d", dials, got, dials)
}
}
func numFD() int {
if runtime.GOOS == "linux" {
f, err := os.Open("/proc/self/fd")
if err != nil {
panic(err)
}
defer f.Close()
names, err := f.Readdirnames(0)
if err != nil {
panic(err)
}
return len(names)
}
// All tests using this should be skipped anyway, but:
panic("numFDs not implemented on " + runtime.GOOS)
}
net: add test for Dial and Listen arguments
R=dave, bradfitz
CC=golang-dev
https://golang.org/cl/7305081
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"flag"
"fmt"
"io"
"os"
"reflect"
"regexp"
"runtime"
"testing"
"time"
)
func newLocalListener(t *testing.T) Listener {
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
ln, err = Listen("tcp6", "[::1]:0")
}
if err != nil {
t.Fatal(err)
}
return ln
}
func TestDialTimeout(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
errc := make(chan error)
numConns := listenerBacklog + 10
// TODO(bradfitz): It's hard to test this in a portable
// way. This is unfortunate, but works for now.
switch runtime.GOOS {
case "linux":
// The kernel will start accepting TCP connections before userspace
// gets a chance to not accept them, so fire off a bunch to fill up
// the kernel's backlog. Then we test we get a failure after that.
for i := 0; i < numConns; i++ {
go func() {
_, err := DialTimeout("tcp", ln.Addr().String(), 200*time.Millisecond)
errc <- err
}()
}
case "darwin", "windows":
// At least OS X 10.7 seems to accept any number of
// connections, ignoring listen's backlog, so resort
// to connecting to a hopefully-dead 127/8 address.
// Same for windows.
//
// Use an IANA reserved port (49151) instead of 80, because
// on our 386 builder, this Dial succeeds, connecting
// to an IIS web server somewhere. The data center
// or VM or firewall must be stealing the TCP connection.
//
// IANA Service Name and Transport Protocol Port Number Registry
// <http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml>
go func() {
c, err := DialTimeout("tcp", "127.0.71.111:49151", 200*time.Millisecond)
if err == nil {
err = fmt.Errorf("unexpected: connected to %s!", c.RemoteAddr())
c.Close()
}
errc <- err
}()
default:
// TODO(bradfitz):
// OpenBSD may have a reject route to 127/8 except 127.0.0.1/32
// by default. FreeBSD likely works, but is untested.
// TODO(rsc):
// The timeout never happens on Windows. Why? Issue 3016.
t.Skipf("skipping test on %q; untested.", runtime.GOOS)
}
connected := 0
for {
select {
case <-time.After(15 * time.Second):
t.Fatal("too slow")
case err := <-errc:
if err == nil {
connected++
if connected == numConns {
t.Fatal("all connections connected; expected some to time out")
}
} else {
terr, ok := err.(timeout)
if !ok {
t.Fatalf("got error %q; want error with timeout interface", err)
}
if !terr.Timeout() {
t.Fatalf("got error %q; not a timeout", err)
}
// Pass. We saw a timeout error.
return
}
}
}
}
func TestSelfConnect(t *testing.T) {
if runtime.GOOS == "windows" {
// TODO(brainman): do not know why it hangs.
t.Skip("skipping known-broken test on windows")
}
// Test that Dial does not honor self-connects.
// See the comment in DialTCP.
// Find a port that would be used as a local address.
l, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
c, err := Dial("tcp", l.Addr().String())
if err != nil {
t.Fatal(err)
}
addr := c.LocalAddr().String()
c.Close()
l.Close()
// Try to connect to that address repeatedly.
n := 100000
if testing.Short() {
n = 1000
}
switch runtime.GOOS {
case "darwin", "freebsd", "netbsd", "openbsd", "plan9", "windows":
// Non-Linux systems take a long time to figure
// out that there is nothing listening on localhost.
n = 100
}
for i := 0; i < n; i++ {
c, err := Dial("tcp", addr)
if err == nil {
c.Close()
t.Errorf("#%d: Dial %q succeeded", i, addr)
}
}
}
var runErrorTest = flag.Bool("run_error_test", false, "let TestDialError check for dns errors")
type DialErrorTest struct {
Net string
Raddr string
Pattern string
}
var dialErrorTests = []DialErrorTest{
{
"datakit", "mh/astro/r70",
"dial datakit mh/astro/r70: unknown network datakit",
},
{
"tcp", "127.0.0.1:☺",
"dial tcp 127.0.0.1:☺: unknown port tcp/☺",
},
{
"tcp", "no-such-name.google.com.:80",
"dial tcp no-such-name.google.com.:80: lookup no-such-name.google.com.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name.no-such-top-level-domain.:80",
"dial tcp no-such-name.no-such-top-level-domain.:80: lookup no-such-name.no-such-top-level-domain.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name:80",
`dial tcp no-such-name:80: lookup no-such-name\.(.*\.)?( on .*)?: no (.*)`,
},
{
"tcp", "mh/astro/r70:http",
"dial tcp mh/astro/r70:http: lookup mh/astro/r70: invalid domain name",
},
{
"unix", "/etc/file-not-found",
"dial unix /etc/file-not-found: no such file or directory",
},
{
"unix", "/etc/",
"dial unix /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
{
"unixpacket", "/etc/file-not-found",
"dial unixpacket /etc/file-not-found: no such file or directory",
},
{
"unixpacket", "/etc/",
"dial unixpacket /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
}
var duplicateErrorPattern = `dial (.*) dial (.*)`
func TestDialError(t *testing.T) {
if !*runErrorTest {
t.Logf("test disabled; use -run_error_test to enable")
return
}
for i, tt := range dialErrorTests {
c, err := Dial(tt.Net, tt.Raddr)
if c != nil {
c.Close()
}
if err == nil {
t.Errorf("#%d: nil error, want match for %#q", i, tt.Pattern)
continue
}
s := err.Error()
match, _ := regexp.MatchString(tt.Pattern, s)
if !match {
t.Errorf("#%d: %q, want match for %#q", i, s, tt.Pattern)
}
match, _ = regexp.MatchString(duplicateErrorPattern, s)
if match {
t.Errorf("#%d: %q, duplicate error return from Dial", i, s)
}
}
}
var invalidDialAndListenArgTests = []struct {
net string
addr string
err error
}{
{"foo", "bar", &OpError{Op: "dial", Net: "foo", Addr: nil, Err: UnknownNetworkError("foo")}},
{"baz", "", &OpError{Op: "listen", Net: "baz", Addr: nil, Err: UnknownNetworkError("baz")}},
{"tcp", "", &OpError{Op: "dial", Net: "tcp", Addr: nil, Err: errMissingAddress}},
}
func TestInvalidDialAndListenArgs(t *testing.T) {
for _, tt := range invalidDialAndListenArgTests {
var err error
switch tt.err.(*OpError).Op {
case "dial":
_, err = Dial(tt.net, tt.addr)
case "listen":
_, err = Listen(tt.net, tt.addr)
}
if !reflect.DeepEqual(tt.err, err) {
t.Fatalf("got %#v; expected %#v", err, tt.err)
}
}
}
func TestDialTimeoutFDLeak(t *testing.T) {
if runtime.GOOS != "linux" {
// TODO(bradfitz): test on other platforms
t.Skipf("skipping test on %q", runtime.GOOS)
}
ln := newLocalListener(t)
defer ln.Close()
type connErr struct {
conn Conn
err error
}
dials := listenerBacklog + 100
// used to be listenerBacklog + 5, but was found to be unreliable, issue 4384.
maxGoodConnect := listenerBacklog + runtime.NumCPU()*10
resc := make(chan connErr)
for i := 0; i < dials; i++ {
go func() {
conn, err := DialTimeout("tcp", ln.Addr().String(), 500*time.Millisecond)
resc <- connErr{conn, err}
}()
}
var firstErr string
var ngood int
var toClose []io.Closer
for i := 0; i < dials; i++ {
ce := <-resc
if ce.err == nil {
ngood++
if ngood > maxGoodConnect {
t.Errorf("%d good connects; expected at most %d", ngood, maxGoodConnect)
}
toClose = append(toClose, ce.conn)
continue
}
err := ce.err
if firstErr == "" {
firstErr = err.Error()
} else if err.Error() != firstErr {
t.Fatalf("inconsistent error messages: first was %q, then later %q", firstErr, err)
}
}
for _, c := range toClose {
c.Close()
}
for i := 0; i < 100; i++ {
if got := numFD(); got < dials {
// Test passes.
return
}
time.Sleep(10 * time.Millisecond)
}
if got := numFD(); got >= dials {
t.Errorf("num fds after %d timeouts = %d; want <%d", dials, got, dials)
}
}
func numFD() int {
if runtime.GOOS == "linux" {
f, err := os.Open("/proc/self/fd")
if err != nil {
panic(err)
}
defer f.Close()
names, err := f.Readdirnames(0)
if err != nil {
panic(err)
}
return len(names)
}
// All tests using this should be skipped anyway, but:
panic("numFDs not implemented on " + runtime.GOOS)
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"expvar"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"strconv"
)
// hello world, the web server
var helloRequests = expvar.NewInt("hello-requests")
func HelloServer(w http.ResponseWriter, req *http.Request) {
helloRequests.Add(1)
io.WriteString(w, "hello, world!\n")
}
// Simple counter server. POSTing to it will set the value.
type Counter struct {
n int
}
// This makes Counter satisfy the expvar.Var interface, so we can export
// it directly.
func (ctr *Counter) String() string { return fmt.Sprintf("%d", ctr.n) }
func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
switch req.Method {
case "GET":
ctr.n++
case "POST":
buf := new(bytes.Buffer)
io.Copy(buf, req.Body)
body := buf.String()
if n, err := strconv.Atoi(body); err != nil {
fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body)
} else {
ctr.n = n
fmt.Fprint(w, "counter reset\n")
}
}
fmt.Fprintf(w, "counter = %d\n", ctr.n)
}
// simple flag server
var booleanflag = flag.Bool("boolean", true, "another flag for testing")
func FlagServer(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprint(w, "Flags:\n")
flag.VisitAll(func(f *flag.Flag) {
if f.Value.String() != f.DefValue {
fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue)
} else {
fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String())
}
})
}
// simple argument server
func ArgServer(w http.ResponseWriter, req *http.Request) {
for _, s := range os.Args {
fmt.Fprint(w, s, " ")
}
}
// a channel (just for the fun of it)
type Chan chan int
func ChanCreate() Chan {
c := make(Chan)
go func(c Chan) {
for x := 0; ; x++ {
c <- x
}
}(c)
return c
}
func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) {
io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch))
}
// exec a program, redirecting output
func DateServer(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
r, w, err := os.Pipe()
if err != nil {
fmt.Fprintf(rw, "pipe: %s\n", err)
return
}
p, err := os.StartProcess("/bin/date", []string{"date"}, &os.ProcAttr{Files: []*os.File{nil, w, w}})
defer r.Close()
w.Close()
if err != nil {
fmt.Fprintf(rw, "fork/exec: %s\n", err)
return
}
io.Copy(rw, r)
wait, err := p.Wait(0)
if err != nil {
fmt.Fprintf(rw, "wait: %s\n", err)
return
}
if !wait.Exited() || wait.ExitStatus() != 0 {
fmt.Fprintf(rw, "date: %v\n", wait)
return
}
}
func Logger(w http.ResponseWriter, req *http.Request) {
log.Print(req.URL.Raw)
w.WriteHeader(404)
w.Write([]byte("oops"))
}
var webroot = flag.String("root", "/home/rsc", "web root directory")
func main() {
flag.Parse()
// The counter is published as a variable directly.
ctr := new(Counter)
http.Handle("/counter", ctr)
expvar.Publish("counter", ctr)
http.Handle("/", http.HandlerFunc(Logger))
http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot))))
http.Handle("/flags", http.HandlerFunc(FlagServer))
http.Handle("/args", http.HandlerFunc(ArgServer))
http.Handle("/go/hello", http.HandlerFunc(HelloServer))
http.Handle("/chan", ChanCreate())
http.Handle("/date", http.HandlerFunc(DateServer))
err := http.ListenAndServe(":12345", nil)
if err != nil {
log.Panicln("ListenAndServe:", err)
}
}
net/http: ensure triv.go compiles and runs
R=golang-dev, bradfitz, dsymonds, dave, r
CC=golang-dev
https://golang.org/cl/5795069
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"expvar"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"strconv"
)
// hello world, the web server
var helloRequests = expvar.NewInt("hello-requests")
func HelloServer(w http.ResponseWriter, req *http.Request) {
helloRequests.Add(1)
io.WriteString(w, "hello, world!\n")
}
// Simple counter server. POSTing to it will set the value.
type Counter struct {
n int
}
// This makes Counter satisfy the expvar.Var interface, so we can export
// it directly.
func (ctr *Counter) String() string { return fmt.Sprintf("%d", ctr.n) }
func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
switch req.Method {
case "GET":
ctr.n++
case "POST":
buf := new(bytes.Buffer)
io.Copy(buf, req.Body)
body := buf.String()
if n, err := strconv.Atoi(body); err != nil {
fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body)
} else {
ctr.n = n
fmt.Fprint(w, "counter reset\n")
}
}
fmt.Fprintf(w, "counter = %d\n", ctr.n)
}
// simple flag server
var booleanflag = flag.Bool("boolean", true, "another flag for testing")
func FlagServer(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprint(w, "Flags:\n")
flag.VisitAll(func(f *flag.Flag) {
if f.Value.String() != f.DefValue {
fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue)
} else {
fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String())
}
})
}
// simple argument server
func ArgServer(w http.ResponseWriter, req *http.Request) {
for _, s := range os.Args {
fmt.Fprint(w, s, " ")
}
}
// a channel (just for the fun of it)
type Chan chan int
func ChanCreate() Chan {
c := make(Chan)
go func(c Chan) {
for x := 0; ; x++ {
c <- x
}
}(c)
return c
}
func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) {
io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch))
}
// exec a program, redirecting output
func DateServer(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
date, err := exec.Command("/bin/date").Output()
if err != nil {
http.Error(rw, err.Error(), 500)
return
}
rw.Write(date)
}
func Logger(w http.ResponseWriter, req *http.Request) {
log.Print(req.URL)
http.Error(w, "oops", 404)
}
var webroot = flag.String("root", "/home/rsc", "web root directory")
func main() {
flag.Parse()
// The counter is published as a variable directly.
ctr := new(Counter)
http.Handle("/counter", ctr)
expvar.Publish("counter", ctr)
http.Handle("/", http.HandlerFunc(Logger))
http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot))))
http.Handle("/flags", http.HandlerFunc(FlagServer))
http.Handle("/args", http.HandlerFunc(ArgServer))
http.Handle("/go/hello", http.HandlerFunc(HelloServer))
http.Handle("/chan", ChanCreate())
http.Handle("/date", http.HandlerFunc(DateServer))
err := http.ListenAndServe(":12345", nil)
if err != nil {
log.Panicln("ListenAndServe:", err)
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os
import (
"os";
"syscall";
)
var Args []string; // provided by runtime
var Envs []string; // provided by runtime
// Exit causes the current program to exit with the given status code.
// Conventionally, code zero indicates success, non-zero an error.
func Exit(code int) {
syscall.Syscall(syscall.SYS_EXIT_GROUP, int64(code), 0, 0)
}
dreg
R=r
DELTA=19 (0 added, 19 deleted, 0 changed)
OCL=30739
CL=30751
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package reflect
import (
"math"
"runtime"
"strconv"
"unsafe"
)
const bigEndian = false // can be smarter if we find a big-endian machine
const ptrSize = unsafe.Sizeof((*byte)(nil))
const cannotSet = "cannot set value obtained from unexported struct field"
// TODO: This will have to go away when
// the new gc goes in.
func memmove(adst, asrc unsafe.Pointer, n uintptr) {
dst := uintptr(adst)
src := uintptr(asrc)
switch {
case src < dst && src+n > dst:
// byte copy backward
// careful: i is unsigned
for i := n; i > 0; {
i--
*(*byte)(unsafe.Pointer(dst + i)) = *(*byte)(unsafe.Pointer(src + i))
}
case (n|src|dst)&(ptrSize-1) != 0:
// byte copy forward
for i := uintptr(0); i < n; i++ {
*(*byte)(unsafe.Pointer(dst + i)) = *(*byte)(unsafe.Pointer(src + i))
}
default:
// word copy forward
for i := uintptr(0); i < n; i += ptrSize {
*(*uintptr)(unsafe.Pointer(dst + i)) = *(*uintptr)(unsafe.Pointer(src + i))
}
}
}
// Value is the reflection interface to a Go value.
//
// Not all methods apply to all kinds of values. Restrictions,
// if any, are noted in the documentation for each method.
// Use the Kind method to find out the kind of value before
// calling kind-specific methods. Calling a method
// inappropriate to the kind of type causes a run time panic.
//
// The zero Value represents no value.
// Its IsValid method returns false, its Kind method returns Invalid,
// its String method returns "<invalid Value>", and all other methods panic.
// Most functions and methods never return an invalid value.
// If one does, its documentation states the conditions explicitly.
//
// A Value can be used concurrently by multiple goroutines provided that
// the underlying Go value can be used concurrently for the equivalent
// direct operations.
type Value struct {
// typ holds the type of the value represented by a Value.
typ *rtype
// val holds the 1-word representation of the value.
// If flag's flagIndir bit is set, then val is a pointer to the data.
// Otherwise val is a word holding the actual data.
// When the data is smaller than a word, it begins at
// the first byte (in the memory address sense) of val.
// We use unsafe.Pointer so that the garbage collector
// knows that val could be a pointer.
val unsafe.Pointer
// flag holds metadata about the value.
// The lowest bits are flag bits:
// - flagRO: obtained via unexported field, so read-only
// - flagIndir: val holds a pointer to the data
// - flagAddr: v.CanAddr is true (implies flagIndir)
// - flagMethod: v is a method value.
// The next five bits give the Kind of the value.
// This repeats typ.Kind() except for method values.
// The remaining 23+ bits give a method number for method values.
// If flag.kind() != Func, code can assume that flagMethod is unset.
// If typ.size > ptrSize, code can assume that flagIndir is set.
flag
// A method value represents a curried method invocation
// like r.Read for some receiver r. The typ+val+flag bits describe
// the receiver r, but the flag's Kind bits say Func (methods are
// functions), and the top bits of the flag give the method number
// in r's type's method table.
}
type flag uintptr
const (
flagRO flag = 1 << iota
flagIndir
flagAddr
flagMethod
flagKindShift = iota
flagKindWidth = 5 // there are 27 kinds
flagKindMask flag = 1<<flagKindWidth - 1
flagMethodShift = flagKindShift + flagKindWidth
)
func (f flag) kind() Kind {
return Kind((f >> flagKindShift) & flagKindMask)
}
// A ValueError occurs when a Value method is invoked on
// a Value that does not support it. Such cases are documented
// in the description of each method.
type ValueError struct {
Method string
Kind Kind
}
func (e *ValueError) Error() string {
if e.Kind == 0 {
return "reflect: call of " + e.Method + " on zero Value"
}
return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
}
// methodName returns the name of the calling method,
// assumed to be two stack frames above.
func methodName() string {
pc, _, _, _ := runtime.Caller(2)
f := runtime.FuncForPC(pc)
if f == nil {
return "unknown method"
}
return f.Name()
}
// An iword is the word that would be stored in an
// interface to represent a given value v. Specifically, if v is
// bigger than a pointer, its word is a pointer to v's data.
// Otherwise, its word holds the data stored
// in its leading bytes (so is not a pointer).
// Because the value sometimes holds a pointer, we use
// unsafe.Pointer to represent it, so that if iword appears
// in a struct, the garbage collector knows that might be
// a pointer.
type iword unsafe.Pointer
func (v Value) iword() iword {
if v.flag&flagIndir != 0 && v.typ.size <= ptrSize {
// Have indirect but want direct word.
return loadIword(v.val, v.typ.size)
}
return iword(v.val)
}
// loadIword loads n bytes at p from memory into an iword.
func loadIword(p unsafe.Pointer, n uintptr) iword {
// Run the copy ourselves instead of calling memmove
// to avoid moving w to the heap.
var w iword
switch n {
default:
panic("reflect: internal error: loadIword of " + strconv.Itoa(int(n)) + "-byte value")
case 0:
case 1:
*(*uint8)(unsafe.Pointer(&w)) = *(*uint8)(p)
case 2:
*(*uint16)(unsafe.Pointer(&w)) = *(*uint16)(p)
case 3:
*(*[3]byte)(unsafe.Pointer(&w)) = *(*[3]byte)(p)
case 4:
*(*uint32)(unsafe.Pointer(&w)) = *(*uint32)(p)
case 5:
*(*[5]byte)(unsafe.Pointer(&w)) = *(*[5]byte)(p)
case 6:
*(*[6]byte)(unsafe.Pointer(&w)) = *(*[6]byte)(p)
case 7:
*(*[7]byte)(unsafe.Pointer(&w)) = *(*[7]byte)(p)
case 8:
*(*uint64)(unsafe.Pointer(&w)) = *(*uint64)(p)
}
return w
}
// storeIword stores n bytes from w into p.
func storeIword(p unsafe.Pointer, w iword, n uintptr) {
// Run the copy ourselves instead of calling memmove
// to avoid moving w to the heap.
switch n {
default:
panic("reflect: internal error: storeIword of " + strconv.Itoa(int(n)) + "-byte value")
case 0:
case 1:
*(*uint8)(p) = *(*uint8)(unsafe.Pointer(&w))
case 2:
*(*uint16)(p) = *(*uint16)(unsafe.Pointer(&w))
case 3:
*(*[3]byte)(p) = *(*[3]byte)(unsafe.Pointer(&w))
case 4:
*(*uint32)(p) = *(*uint32)(unsafe.Pointer(&w))
case 5:
*(*[5]byte)(p) = *(*[5]byte)(unsafe.Pointer(&w))
case 6:
*(*[6]byte)(p) = *(*[6]byte)(unsafe.Pointer(&w))
case 7:
*(*[7]byte)(p) = *(*[7]byte)(unsafe.Pointer(&w))
case 8:
*(*uint64)(p) = *(*uint64)(unsafe.Pointer(&w))
}
}
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
typ *rtype
word iword
}
// nonEmptyInterface is the header for a interface value with methods.
type nonEmptyInterface struct {
// see ../runtime/iface.c:/Itab
itab *struct {
ityp *rtype // static interface type
typ *rtype // dynamic concrete type
link unsafe.Pointer
bad int32
unused int32
fun [100000]unsafe.Pointer // method table
}
word iword
}
// mustBe panics if f's kind is not expected.
// Making this a method on flag instead of on Value
// (and embedding flag in Value) means that we can write
// the very clear v.mustBe(Bool) and have it compile into
// v.flag.mustBe(Bool), which will only bother to copy the
// single important word for the receiver.
func (f flag) mustBe(expected Kind) {
k := f.kind()
if k != expected {
panic(&ValueError{methodName(), k})
}
}
// mustBeExported panics if f records that the value was obtained using
// an unexported field.
func (f flag) mustBeExported() {
if f == 0 {
panic(&ValueError{methodName(), 0})
}
if f&flagRO != 0 {
panic("reflect: " + methodName() + " using value obtained using unexported field")
}
}
// mustBeAssignable panics if f records that the value is not assignable,
// which is to say that either it was obtained using an unexported field
// or it is not addressable.
func (f flag) mustBeAssignable() {
if f == 0 {
panic(&ValueError{methodName(), Invalid})
}
// Assignable if addressable and not read-only.
if f&flagRO != 0 {
panic("reflect: " + methodName() + " using value obtained using unexported field")
}
if f&flagAddr == 0 {
panic("reflect: " + methodName() + " using unaddressable value")
}
}
// Addr returns a pointer value representing the address of v.
// It panics if CanAddr() returns false.
// Addr is typically used to obtain a pointer to a struct field
// or slice element in order to call a method that requires a
// pointer receiver.
func (v Value) Addr() Value {
if v.flag&flagAddr == 0 {
panic("reflect.Value.Addr of unaddressable value")
}
return Value{v.typ.ptrTo(), v.val, (v.flag & flagRO) | flag(Ptr)<<flagKindShift}
}
// Bool returns v's underlying value.
// It panics if v's kind is not Bool.
func (v Value) Bool() bool {
v.mustBe(Bool)
if v.flag&flagIndir != 0 {
return *(*bool)(v.val)
}
return *(*bool)(unsafe.Pointer(&v.val))
}
// Bytes returns v's underlying value.
// It panics if v's underlying value is not a slice of bytes.
func (v Value) Bytes() []byte {
v.mustBe(Slice)
if v.typ.Elem().Kind() != Uint8 {
panic("reflect.Value.Bytes of non-byte slice")
}
// Slice is always bigger than a word; assume flagIndir.
return *(*[]byte)(v.val)
}
// runes returns v's underlying value.
// It panics if v's underlying value is not a slice of runes (int32s).
func (v Value) runes() []rune {
v.mustBe(Slice)
if v.typ.Elem().Kind() != Int32 {
panic("reflect.Value.Bytes of non-rune slice")
}
// Slice is always bigger than a word; assume flagIndir.
return *(*[]rune)(v.val)
}
// CanAddr returns true if the value's address can be obtained with Addr.
// Such values are called addressable. A value is addressable if it is
// an element of a slice, an element of an addressable array,
// a field of an addressable struct, or the result of dereferencing a pointer.
// If CanAddr returns false, calling Addr will panic.
func (v Value) CanAddr() bool {
return v.flag&flagAddr != 0
}
// CanSet returns true if the value of v can be changed.
// A Value can be changed only if it is addressable and was not
// obtained by the use of unexported struct fields.
// If CanSet returns false, calling Set or any type-specific
// setter (e.g., SetBool, SetInt64) will panic.
func (v Value) CanSet() bool {
return v.flag&(flagAddr|flagRO) == flagAddr
}
// Call calls the function v with the input arguments in.
// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]).
// Call panics if v's Kind is not Func.
// It returns the output results as Values.
// As in Go, each input argument must be assignable to the
// type of the function's corresponding input parameter.
// If v is a variadic function, Call creates the variadic slice parameter
// itself, copying in the corresponding values.
func (v Value) Call(in []Value) []Value {
v.mustBe(Func)
v.mustBeExported()
return v.call("Call", in)
}
// CallSlice calls the variadic function v with the input arguments in,
// assigning the slice in[len(in)-1] to v's final variadic argument.
// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]...).
// Call panics if v's Kind is not Func or if v is not variadic.
// It returns the output results as Values.
// As in Go, each input argument must be assignable to the
// type of the function's corresponding input parameter.
func (v Value) CallSlice(in []Value) []Value {
v.mustBe(Func)
v.mustBeExported()
return v.call("CallSlice", in)
}
func (v Value) call(op string, in []Value) []Value {
// Get function pointer, type.
t := v.typ
var (
fn unsafe.Pointer
rcvr iword
)
if v.flag&flagMethod != 0 {
t, fn, rcvr = methodReceiver(op, v, int(v.flag)>>flagMethodShift)
} else if v.flag&flagIndir != 0 {
fn = *(*unsafe.Pointer)(v.val)
} else {
fn = v.val
}
if fn == nil {
panic("reflect.Value.Call: call of nil function")
}
isSlice := op == "CallSlice"
n := t.NumIn()
if isSlice {
if !t.IsVariadic() {
panic("reflect: CallSlice of non-variadic function")
}
if len(in) < n {
panic("reflect: CallSlice with too few input arguments")
}
if len(in) > n {
panic("reflect: CallSlice with too many input arguments")
}
} else {
if t.IsVariadic() {
n--
}
if len(in) < n {
panic("reflect: Call with too few input arguments")
}
if !t.IsVariadic() && len(in) > n {
panic("reflect: Call with too many input arguments")
}
}
for _, x := range in {
if x.Kind() == Invalid {
panic("reflect: " + op + " using zero Value argument")
}
}
for i := 0; i < n; i++ {
if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(targ) {
panic("reflect: " + op + " using " + xt.String() + " as type " + targ.String())
}
}
if !isSlice && t.IsVariadic() {
// prepare slice for remaining values
m := len(in) - n
slice := MakeSlice(t.In(n), m, m)
elem := t.In(n).Elem()
for i := 0; i < m; i++ {
x := in[n+i]
if xt := x.Type(); !xt.AssignableTo(elem) {
panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op)
}
slice.Index(i).Set(x)
}
origIn := in
in = make([]Value, n+1)
copy(in[:n], origIn)
in[n] = slice
}
nin := len(in)
if nin != t.NumIn() {
panic("reflect.Value.Call: wrong argument count")
}
nout := t.NumOut()
// Compute arg size & allocate.
// This computation is 5g/6g/8g-dependent
// and probably wrong for gccgo, but so
// is most of this function.
size, _, _, _ := frameSize(t, v.flag&flagMethod != 0)
// Copy into args.
//
// TODO(rsc): This will need to be updated for any new garbage collector.
// For now make everything look like a pointer by allocating
// a []unsafe.Pointer.
args := make([]unsafe.Pointer, size/ptrSize)
ptr := uintptr(unsafe.Pointer(&args[0]))
off := uintptr(0)
if v.flag&flagMethod != 0 {
// Hard-wired first argument.
*(*iword)(unsafe.Pointer(ptr)) = rcvr
off = ptrSize
}
for i, v := range in {
v.mustBeExported()
targ := t.In(i).(*rtype)
a := uintptr(targ.align)
off = (off + a - 1) &^ (a - 1)
n := targ.size
addr := unsafe.Pointer(ptr + off)
v = v.assignTo("reflect.Value.Call", targ, (*interface{})(addr))
if v.flag&flagIndir == 0 {
storeIword(addr, iword(v.val), n)
} else {
memmove(addr, v.val, n)
}
off += n
}
off = (off + ptrSize - 1) &^ (ptrSize - 1)
// Call.
call(fn, unsafe.Pointer(ptr), uint32(size))
// Copy return values out of args.
//
// TODO(rsc): revisit like above.
ret := make([]Value, nout)
for i := 0; i < nout; i++ {
tv := t.Out(i)
a := uintptr(tv.Align())
off = (off + a - 1) &^ (a - 1)
fl := flagIndir | flag(tv.Kind())<<flagKindShift
ret[i] = Value{tv.common(), unsafe.Pointer(ptr + off), fl}
off += tv.Size()
}
return ret
}
// callReflect is the call implementation used by a function
// returned by MakeFunc. In many ways it is the opposite of the
// method Value.call above. The method above converts a call using Values
// into a call of a function with a concrete argument frame, while
// callReflect converts a call of a function with a concrete argument
// frame into a call using Values.
// It is in this file so that it can be next to the call method above.
// The remainder of the MakeFunc implementation is in makefunc.go.
//
// NOTE: This function must be marked as a "wrapper" in the generated code,
// so that the linker can make it work correctly for panic and recover.
// The gc compilers know to do that for the name "reflect.callReflect".
func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
ftyp := ctxt.typ
f := ctxt.fn
// Copy argument frame into Values.
ptr := frame
off := uintptr(0)
in := make([]Value, 0, len(ftyp.in))
for _, arg := range ftyp.in {
typ := arg
off += -off & uintptr(typ.align-1)
v := Value{typ, nil, flag(typ.Kind()) << flagKindShift}
if typ.size <= ptrSize {
// value fits in word.
v.val = unsafe.Pointer(loadIword(unsafe.Pointer(uintptr(ptr)+off), typ.size))
} else {
// value does not fit in word.
// Must make a copy, because f might keep a reference to it,
// and we cannot let f keep a reference to the stack frame
// after this function returns, not even a read-only reference.
v.val = unsafe_New(typ)
memmove(v.val, unsafe.Pointer(uintptr(ptr)+off), typ.size)
v.flag |= flagIndir
}
in = append(in, v)
off += typ.size
}
// Call underlying function.
out := f(in)
if len(out) != len(ftyp.out) {
panic("reflect: wrong return count from function created by MakeFunc")
}
// Copy results back into argument frame.
if len(ftyp.out) > 0 {
off += -off & (ptrSize - 1)
for i, arg := range ftyp.out {
typ := arg
v := out[i]
if v.typ != typ {
panic("reflect: function created by MakeFunc using " + funcName(f) +
" returned wrong type: have " +
out[i].typ.String() + " for " + typ.String())
}
if v.flag&flagRO != 0 {
panic("reflect: function created by MakeFunc using " + funcName(f) +
" returned value obtained from unexported field")
}
off += -off & uintptr(typ.align-1)
addr := unsafe.Pointer(uintptr(ptr) + off)
if v.flag&flagIndir == 0 {
storeIword(addr, iword(v.val), typ.size)
} else {
memmove(addr, v.val, typ.size)
}
off += typ.size
}
}
}
// methodReceiver returns information about the receiver
// described by v. The Value v may or may not have the
// flagMethod bit set, so the kind cached in v.flag should
// not be used.
func methodReceiver(op string, v Value, methodIndex int) (t *rtype, fn unsafe.Pointer, rcvr iword) {
i := methodIndex
if v.typ.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(v.typ))
if i < 0 || i >= len(tt.methods) {
panic("reflect: internal error: invalid method index")
}
m := &tt.methods[i]
if m.pkgPath != nil {
panic("reflect: " + op + " of unexported method")
}
t = m.typ
iface := (*nonEmptyInterface)(v.val)
if iface.itab == nil {
panic("reflect: " + op + " of method on nil interface value")
}
fn = unsafe.Pointer(&iface.itab.fun[i])
rcvr = iface.word
} else {
ut := v.typ.uncommon()
if ut == nil || i < 0 || i >= len(ut.methods) {
panic("reflect: internal error: invalid method index")
}
m := &ut.methods[i]
if m.pkgPath != nil {
panic("reflect: " + op + " of unexported method")
}
fn = unsafe.Pointer(&m.ifn)
t = m.mtyp
rcvr = v.iword()
}
return
}
// align returns the result of rounding x up to a multiple of n.
// n must be a power of two.
func align(x, n uintptr) uintptr {
return (x + n - 1) &^ (n - 1)
}
// frameSize returns the sizes of the argument and result frame
// for a function of the given type. The rcvr bool specifies whether
// a one-word receiver should be included in the total.
func frameSize(t *rtype, rcvr bool) (total, in, outOffset, out uintptr) {
if rcvr {
// extra word for receiver interface word
total += ptrSize
}
nin := t.NumIn()
in = -total
for i := 0; i < nin; i++ {
tv := t.In(i)
total = align(total, uintptr(tv.Align()))
total += tv.Size()
}
in += total
total = align(total, ptrSize)
nout := t.NumOut()
outOffset = total
out = -total
for i := 0; i < nout; i++ {
tv := t.Out(i)
total = align(total, uintptr(tv.Align()))
total += tv.Size()
}
out += total
// total must be > 0 in order for &args[0] to be valid.
// the argument copying is going to round it up to
// a multiple of ptrSize anyway, so make it ptrSize to begin with.
if total < ptrSize {
total = ptrSize
}
// round to pointer
total = align(total, ptrSize)
return
}
// callMethod is the call implementation used by a function returned
// by makeMethodValue (used by v.Method(i).Interface()).
// It is a streamlined version of the usual reflect call: the caller has
// already laid out the argument frame for us, so we don't have
// to deal with individual Values for each argument.
// It is in this file so that it can be next to the two similar functions above.
// The remainder of the makeMethodValue implementation is in makefunc.go.
//
// NOTE: This function must be marked as a "wrapper" in the generated code,
// so that the linker can make it work correctly for panic and recover.
// The gc compilers know to do that for the name "reflect.callMethod".
func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
t, fn, rcvr := methodReceiver("call", ctxt.rcvr, ctxt.method)
total, in, outOffset, out := frameSize(t, true)
// Copy into args.
//
// TODO(rsc): This will need to be updated for any new garbage collector.
// For now make everything look like a pointer by allocating
// a []unsafe.Pointer.
args := make([]unsafe.Pointer, total/ptrSize)
args[0] = unsafe.Pointer(rcvr)
base := unsafe.Pointer(&args[0])
memmove(unsafe.Pointer(uintptr(base)+ptrSize), frame, in)
// Call.
call(fn, unsafe.Pointer(&args[0]), uint32(total))
// Copy return values.
memmove(unsafe.Pointer(uintptr(frame)+outOffset-ptrSize), unsafe.Pointer(uintptr(base)+outOffset), out)
}
// funcName returns the name of f, for use in error messages.
func funcName(f func([]Value) []Value) string {
pc := *(*uintptr)(unsafe.Pointer(&f))
rf := runtime.FuncForPC(pc)
if rf != nil {
return rf.Name()
}
return "closure"
}
// Cap returns v's capacity.
// It panics if v's Kind is not Array, Chan, or Slice.
func (v Value) Cap() int {
k := v.kind()
switch k {
case Array:
return v.typ.Len()
case Chan:
return int(chancap(v.iword()))
case Slice:
// Slice is always bigger than a word; assume flagIndir.
return (*SliceHeader)(v.val).Cap
}
panic(&ValueError{"reflect.Value.Cap", k})
}
// Close closes the channel v.
// It panics if v's Kind is not Chan.
func (v Value) Close() {
v.mustBe(Chan)
v.mustBeExported()
chanclose(v.iword())
}
// Complex returns v's underlying value, as a complex128.
// It panics if v's Kind is not Complex64 or Complex128
func (v Value) Complex() complex128 {
k := v.kind()
switch k {
case Complex64:
if v.flag&flagIndir != 0 {
return complex128(*(*complex64)(v.val))
}
return complex128(*(*complex64)(unsafe.Pointer(&v.val)))
case Complex128:
// complex128 is always bigger than a word; assume flagIndir.
return *(*complex128)(v.val)
}
panic(&ValueError{"reflect.Value.Complex", k})
}
// Elem returns the value that the interface v contains
// or that the pointer v points to.
// It panics if v's Kind is not Interface or Ptr.
// It returns the zero Value if v is nil.
func (v Value) Elem() Value {
k := v.kind()
switch k {
case Interface:
var (
typ *rtype
val unsafe.Pointer
)
if v.typ.NumMethod() == 0 {
eface := (*emptyInterface)(v.val)
if eface.typ == nil {
// nil interface value
return Value{}
}
typ = eface.typ
val = unsafe.Pointer(eface.word)
} else {
iface := (*nonEmptyInterface)(v.val)
if iface.itab == nil {
// nil interface value
return Value{}
}
typ = iface.itab.typ
val = unsafe.Pointer(iface.word)
}
fl := v.flag & flagRO
fl |= flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
}
return Value{typ, val, fl}
case Ptr:
val := v.val
if v.flag&flagIndir != 0 {
val = *(*unsafe.Pointer)(val)
}
// The returned value's address is v's value.
if val == nil {
return Value{}
}
tt := (*ptrType)(unsafe.Pointer(v.typ))
typ := tt.elem
fl := v.flag&flagRO | flagIndir | flagAddr
fl |= flag(typ.Kind() << flagKindShift)
return Value{typ, val, fl}
}
panic(&ValueError{"reflect.Value.Elem", k})
}
// Field returns the i'th field of the struct v.
// It panics if v's Kind is not Struct or i is out of range.
func (v Value) Field(i int) Value {
v.mustBe(Struct)
tt := (*structType)(unsafe.Pointer(v.typ))
if i < 0 || i >= len(tt.fields) {
panic("reflect: Field index out of range")
}
field := &tt.fields[i]
typ := field.typ
// Inherit permission bits from v.
fl := v.flag & (flagRO | flagIndir | flagAddr)
// Using an unexported field forces flagRO.
if field.pkgPath != nil {
fl |= flagRO
}
fl |= flag(typ.Kind()) << flagKindShift
var val unsafe.Pointer
switch {
case fl&flagIndir != 0:
// Indirect. Just bump pointer.
val = unsafe.Pointer(uintptr(v.val) + field.offset)
case bigEndian:
// Direct. Discard leading bytes.
val = unsafe.Pointer(uintptr(v.val) << (field.offset * 8))
default:
// Direct. Discard leading bytes.
val = unsafe.Pointer(uintptr(v.val) >> (field.offset * 8))
}
return Value{typ, val, fl}
}
// FieldByIndex returns the nested field corresponding to index.
// It panics if v's Kind is not struct.
func (v Value) FieldByIndex(index []int) Value {
v.mustBe(Struct)
for i, x := range index {
if i > 0 {
if v.Kind() == Ptr && v.Elem().Kind() == Struct {
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}
// FieldByName returns the struct field with the given name.
// It returns the zero Value if no field was found.
// It panics if v's Kind is not struct.
func (v Value) FieldByName(name string) Value {
v.mustBe(Struct)
if f, ok := v.typ.FieldByName(name); ok {
return v.FieldByIndex(f.Index)
}
return Value{}
}
// FieldByNameFunc returns the struct field with a name
// that satisfies the match function.
// It panics if v's Kind is not struct.
// It returns the zero Value if no field was found.
func (v Value) FieldByNameFunc(match func(string) bool) Value {
v.mustBe(Struct)
if f, ok := v.typ.FieldByNameFunc(match); ok {
return v.FieldByIndex(f.Index)
}
return Value{}
}
// Float returns v's underlying value, as a float64.
// It panics if v's Kind is not Float32 or Float64
func (v Value) Float() float64 {
k := v.kind()
switch k {
case Float32:
if v.flag&flagIndir != 0 {
return float64(*(*float32)(v.val))
}
return float64(*(*float32)(unsafe.Pointer(&v.val)))
case Float64:
if v.flag&flagIndir != 0 {
return *(*float64)(v.val)
}
return *(*float64)(unsafe.Pointer(&v.val))
}
panic(&ValueError{"reflect.Value.Float", k})
}
var uint8Type = TypeOf(uint8(0)).(*rtype)
// Index returns v's i'th element.
// It panics if v's Kind is not Array, Slice, or String or i is out of range.
func (v Value) Index(i int) Value {
k := v.kind()
switch k {
case Array:
tt := (*arrayType)(unsafe.Pointer(v.typ))
if i < 0 || i > int(tt.len) {
panic("reflect: array index out of range")
}
typ := tt.elem
fl := v.flag & (flagRO | flagIndir | flagAddr) // bits same as overall array
fl |= flag(typ.Kind()) << flagKindShift
offset := uintptr(i) * typ.size
var val unsafe.Pointer
switch {
case fl&flagIndir != 0:
// Indirect. Just bump pointer.
val = unsafe.Pointer(uintptr(v.val) + offset)
case bigEndian:
// Direct. Discard leading bytes.
val = unsafe.Pointer(uintptr(v.val) << (offset * 8))
default:
// Direct. Discard leading bytes.
val = unsafe.Pointer(uintptr(v.val) >> (offset * 8))
}
return Value{typ, val, fl}
case Slice:
// Element flag same as Elem of Ptr.
// Addressable, indirect, possibly read-only.
fl := flagAddr | flagIndir | v.flag&flagRO
s := (*SliceHeader)(v.val)
if i < 0 || i >= s.Len {
panic("reflect: slice index out of range")
}
tt := (*sliceType)(unsafe.Pointer(v.typ))
typ := tt.elem
fl |= flag(typ.Kind()) << flagKindShift
val := unsafe.Pointer(s.Data + uintptr(i)*typ.size)
return Value{typ, val, fl}
case String:
fl := v.flag&flagRO | flag(Uint8<<flagKindShift)
s := (*StringHeader)(v.val)
if i < 0 || i >= s.Len {
panic("reflect: string index out of range")
}
val := *(*byte)(unsafe.Pointer(s.Data + uintptr(i)))
return Value{uint8Type, unsafe.Pointer(uintptr(val)), fl}
}
panic(&ValueError{"reflect.Value.Index", k})
}
// Int returns v's underlying value, as an int64.
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
func (v Value) Int() int64 {
k := v.kind()
var p unsafe.Pointer
if v.flag&flagIndir != 0 {
p = v.val
} else {
// The escape analysis is good enough that &v.val
// does not trigger a heap allocation.
p = unsafe.Pointer(&v.val)
}
switch k {
case Int:
return int64(*(*int)(p))
case Int8:
return int64(*(*int8)(p))
case Int16:
return int64(*(*int16)(p))
case Int32:
return int64(*(*int32)(p))
case Int64:
return int64(*(*int64)(p))
}
panic(&ValueError{"reflect.Value.Int", k})
}
// CanInterface returns true if Interface can be used without panicking.
func (v Value) CanInterface() bool {
if v.flag == 0 {
panic(&ValueError{"reflect.Value.CanInterface", Invalid})
}
return v.flag&flagRO == 0
}
// Interface returns v's current value as an interface{}.
// It is equivalent to:
// var i interface{} = (v's underlying value)
// If v is a method obtained by invoking Value.Method
// (as opposed to Type.Method), Interface cannot return an
// interface value, so it panics.
// It also panics if the Value was obtained by accessing
// unexported struct fields.
func (v Value) Interface() (i interface{}) {
return valueInterface(v, true)
}
func valueInterface(v Value, safe bool) interface{} {
if v.flag == 0 {
panic(&ValueError{"reflect.Value.Interface", 0})
}
if safe && v.flag&flagRO != 0 {
// Do not allow access to unexported values via Interface,
// because they might be pointers that should not be
// writable or methods or function that should not be callable.
panic("reflect.Value.Interface: cannot return value obtained from unexported field or method")
}
if v.flag&flagMethod != 0 {
v = makeMethodValue("Interface", v)
}
k := v.kind()
if k == Interface {
// Special case: return the element inside the interface.
// Empty interface has one layout, all interfaces with
// methods have a second layout.
if v.NumMethod() == 0 {
return *(*interface{})(v.val)
}
return *(*interface {
M()
})(v.val)
}
// Non-interface value.
var eface emptyInterface
eface.typ = v.typ
eface.word = v.iword()
// Don't need to allocate if v is not addressable or fits in one word.
if v.flag&flagAddr != 0 && v.typ.size > ptrSize {
// eface.word is a pointer to the actual data,
// which might be changed. We need to return
// a pointer to unchanging data, so make a copy.
ptr := unsafe_New(v.typ)
memmove(ptr, unsafe.Pointer(eface.word), v.typ.size)
eface.word = iword(ptr)
}
return *(*interface{})(unsafe.Pointer(&eface))
}
// InterfaceData returns the interface v's value as a uintptr pair.
// It panics if v's Kind is not Interface.
func (v Value) InterfaceData() [2]uintptr {
v.mustBe(Interface)
// We treat this as a read operation, so we allow
// it even for unexported data, because the caller
// has to import "unsafe" to turn it into something
// that can be abused.
// Interface value is always bigger than a word; assume flagIndir.
return *(*[2]uintptr)(v.val)
}
// IsNil returns true if v is a nil value.
// It panics if v's Kind is not Chan, Func, Interface, Map, Ptr, or Slice.
func (v Value) IsNil() bool {
k := v.kind()
switch k {
case Chan, Func, Map, Ptr:
if v.flag&flagMethod != 0 {
return false
}
ptr := v.val
if v.flag&flagIndir != 0 {
ptr = *(*unsafe.Pointer)(ptr)
}
return ptr == nil
case Interface, Slice:
// Both interface and slice are nil if first word is 0.
// Both are always bigger than a word; assume flagIndir.
return *(*unsafe.Pointer)(v.val) == nil
}
panic(&ValueError{"reflect.Value.IsNil", k})
}
// IsValid returns true if v represents a value.
// It returns false if v is the zero Value.
// If IsValid returns false, all other methods except String panic.
// Most functions and methods never return an invalid value.
// If one does, its documentation states the conditions explicitly.
func (v Value) IsValid() bool {
return v.flag != 0
}
// Kind returns v's Kind.
// If v is the zero Value (IsValid returns false), Kind returns Invalid.
func (v Value) Kind() Kind {
return v.kind()
}
// Len returns v's length.
// It panics if v's Kind is not Array, Chan, Map, Slice, or String.
func (v Value) Len() int {
k := v.kind()
switch k {
case Array:
tt := (*arrayType)(unsafe.Pointer(v.typ))
return int(tt.len)
case Chan:
return chanlen(v.iword())
case Map:
return maplen(v.iword())
case Slice:
// Slice is bigger than a word; assume flagIndir.
return (*SliceHeader)(v.val).Len
case String:
// String is bigger than a word; assume flagIndir.
return (*StringHeader)(v.val).Len
}
panic(&ValueError{"reflect.Value.Len", k})
}
// MapIndex returns the value associated with key in the map v.
// It panics if v's Kind is not Map.
// It returns the zero Value if key is not found in the map or if v represents a nil map.
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ))
// Do not require key to be exported, so that DeepEqual
// and other programs can use all the keys returned by
// MapKeys as arguments to MapIndex. If either the map
// or the key is unexported, though, the result will be
// considered unexported. This is consistent with the
// behavior for structs, which allow read but not write
// of unexported fields.
key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
word, ok := mapaccess(v.typ, v.iword(), key.iword())
if !ok {
return Value{}
}
typ := tt.elem
fl := (v.flag | key.flag) & flagRO
if typ.size > ptrSize {
fl |= flagIndir
}
fl |= flag(typ.Kind()) << flagKindShift
return Value{typ, unsafe.Pointer(word), fl}
}
// MapKeys returns a slice containing all the keys present in the map,
// in unspecified order.
// It panics if v's Kind is not Map.
// It returns an empty slice if v represents a nil map.
func (v Value) MapKeys() []Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ))
keyType := tt.key
fl := v.flag & flagRO
fl |= flag(keyType.Kind()) << flagKindShift
if keyType.size > ptrSize {
fl |= flagIndir
}
m := v.iword()
mlen := int(0)
if m != nil {
mlen = maplen(m)
}
it := mapiterinit(v.typ, m)
a := make([]Value, mlen)
var i int
for i = 0; i < len(a); i++ {
keyWord, ok := mapiterkey(it)
if !ok {
break
}
a[i] = Value{keyType, unsafe.Pointer(keyWord), fl}
mapiternext(it)
}
return a[:i]
}
// Method returns a function value corresponding to v's i'th method.
// The arguments to a Call on the returned function should not include
// a receiver; the returned function will always use v as the receiver.
// Method panics if i is out of range or if v is a nil interface value.
func (v Value) Method(i int) Value {
if v.typ == nil {
panic(&ValueError{"reflect.Value.Method", Invalid})
}
if v.flag&flagMethod != 0 || i < 0 || i >= v.typ.NumMethod() {
panic("reflect: Method index out of range")
}
if v.typ.Kind() == Interface && v.IsNil() {
panic("reflect: Method on nil interface value")
}
fl := v.flag & (flagRO | flagIndir)
fl |= flag(Func) << flagKindShift
fl |= flag(i)<<flagMethodShift | flagMethod
return Value{v.typ, v.val, fl}
}
// NumMethod returns the number of methods in the value's method set.
func (v Value) NumMethod() int {
if v.typ == nil {
panic(&ValueError{"reflect.Value.NumMethod", Invalid})
}
if v.flag&flagMethod != 0 {
return 0
}
return v.typ.NumMethod()
}
// MethodByName returns a function value corresponding to the method
// of v with the given name.
// The arguments to a Call on the returned function should not include
// a receiver; the returned function will always use v as the receiver.
// It returns the zero Value if no method was found.
func (v Value) MethodByName(name string) Value {
if v.typ == nil {
panic(&ValueError{"reflect.Value.MethodByName", Invalid})
}
if v.flag&flagMethod != 0 {
return Value{}
}
m, ok := v.typ.MethodByName(name)
if !ok {
return Value{}
}
return v.Method(m.Index)
}
// NumField returns the number of fields in the struct v.
// It panics if v's Kind is not Struct.
func (v Value) NumField() int {
v.mustBe(Struct)
tt := (*structType)(unsafe.Pointer(v.typ))
return len(tt.fields)
}
// OverflowComplex returns true if the complex128 x cannot be represented by v's type.
// It panics if v's Kind is not Complex64 or Complex128.
func (v Value) OverflowComplex(x complex128) bool {
k := v.kind()
switch k {
case Complex64:
return overflowFloat32(real(x)) || overflowFloat32(imag(x))
case Complex128:
return false
}
panic(&ValueError{"reflect.Value.OverflowComplex", k})
}
// OverflowFloat returns true if the float64 x cannot be represented by v's type.
// It panics if v's Kind is not Float32 or Float64.
func (v Value) OverflowFloat(x float64) bool {
k := v.kind()
switch k {
case Float32:
return overflowFloat32(x)
case Float64:
return false
}
panic(&ValueError{"reflect.Value.OverflowFloat", k})
}
func overflowFloat32(x float64) bool {
if x < 0 {
x = -x
}
return math.MaxFloat32 < x && x <= math.MaxFloat64
}
// OverflowInt returns true if the int64 x cannot be represented by v's type.
// It panics if v's Kind is not Int, Int8, int16, Int32, or Int64.
func (v Value) OverflowInt(x int64) bool {
k := v.kind()
switch k {
case Int, Int8, Int16, Int32, Int64:
bitSize := v.typ.size * 8
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
panic(&ValueError{"reflect.Value.OverflowInt", k})
}
// OverflowUint returns true if the uint64 x cannot be represented by v's type.
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
func (v Value) OverflowUint(x uint64) bool {
k := v.kind()
switch k {
case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:
bitSize := v.typ.size * 8
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
panic(&ValueError{"reflect.Value.OverflowUint", k})
}
// Pointer returns v's value as a uintptr.
// It returns uintptr instead of unsafe.Pointer so that
// code using reflect cannot obtain unsafe.Pointers
// without importing the unsafe package explicitly.
// It panics if v's Kind is not Chan, Func, Map, Ptr, Slice, or UnsafePointer.
//
// If v's Kind is Func, the returned pointer is an underlying
// code pointer, but not necessarily enough to identify a
// single function uniquely. The only guarantee is that the
// result is zero if and only if v is a nil func Value.
func (v Value) Pointer() uintptr {
k := v.kind()
switch k {
case Chan, Map, Ptr, UnsafePointer:
p := v.val
if v.flag&flagIndir != 0 {
p = *(*unsafe.Pointer)(p)
}
return uintptr(p)
case Func:
if v.flag&flagMethod != 0 {
// As the doc comment says, the returned pointer is an
// underlying code pointer but not necessarily enough to
// identify a single function uniquely. All method expressions
// created via reflect have the same underlying code pointer,
// so their Pointers are equal. The function used here must
// match the one used in makeMethodValue.
f := methodValueCall
return **(**uintptr)(unsafe.Pointer(&f))
}
p := v.val
if v.flag&flagIndir != 0 {
p = *(*unsafe.Pointer)(p)
}
// Non-nil func value points at data block.
// First word of data block is actual code.
if p != nil {
p = *(*unsafe.Pointer)(p)
}
return uintptr(p)
case Slice:
return (*SliceHeader)(v.val).Data
}
panic(&ValueError{"reflect.Value.Pointer", k})
}
// Recv receives and returns a value from the channel v.
// It panics if v's Kind is not Chan.
// The receive blocks until a value is ready.
// The boolean value ok is true if the value x corresponds to a send
// on the channel, false if it is a zero value received because the channel is closed.
func (v Value) Recv() (x Value, ok bool) {
v.mustBe(Chan)
v.mustBeExported()
return v.recv(false)
}
// internal recv, possibly non-blocking (nb).
// v is known to be a channel.
func (v Value) recv(nb bool) (val Value, ok bool) {
tt := (*chanType)(unsafe.Pointer(v.typ))
if ChanDir(tt.dir)&RecvDir == 0 {
panic("reflect: recv on send-only channel")
}
word, selected, ok := chanrecv(v.typ, v.iword(), nb)
if selected {
typ := tt.elem
fl := flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
}
val = Value{typ, unsafe.Pointer(word), fl}
}
return
}
// Send sends x on the channel v.
// It panics if v's kind is not Chan or if x's type is not the same type as v's element type.
// As in Go, x's value must be assignable to the channel's element type.
func (v Value) Send(x Value) {
v.mustBe(Chan)
v.mustBeExported()
v.send(x, false)
}
// internal send, possibly non-blocking.
// v is known to be a channel.
func (v Value) send(x Value, nb bool) (selected bool) {
tt := (*chanType)(unsafe.Pointer(v.typ))
if ChanDir(tt.dir)&SendDir == 0 {
panic("reflect: send on recv-only channel")
}
x.mustBeExported()
x = x.assignTo("reflect.Value.Send", tt.elem, nil)
return chansend(v.typ, v.iword(), x.iword(), nb)
}
// Set assigns x to the value v.
// It panics if CanSet returns false.
// As in Go, x's value must be assignable to v's type.
func (v Value) Set(x Value) {
v.mustBeAssignable()
x.mustBeExported() // do not let unexported x leak
var target *interface{}
if v.kind() == Interface {
target = (*interface{})(v.val)
}
x = x.assignTo("reflect.Set", v.typ, target)
if x.flag&flagIndir != 0 {
memmove(v.val, x.val, v.typ.size)
} else {
storeIword(v.val, iword(x.val), v.typ.size)
}
}
// SetBool sets v's underlying value.
// It panics if v's Kind is not Bool or if CanSet() is false.
func (v Value) SetBool(x bool) {
v.mustBeAssignable()
v.mustBe(Bool)
*(*bool)(v.val) = x
}
// SetBytes sets v's underlying value.
// It panics if v's underlying value is not a slice of bytes.
func (v Value) SetBytes(x []byte) {
v.mustBeAssignable()
v.mustBe(Slice)
if v.typ.Elem().Kind() != Uint8 {
panic("reflect.Value.SetBytes of non-byte slice")
}
*(*[]byte)(v.val) = x
}
// setRunes sets v's underlying value.
// It panics if v's underlying value is not a slice of runes (int32s).
func (v Value) setRunes(x []rune) {
v.mustBeAssignable()
v.mustBe(Slice)
if v.typ.Elem().Kind() != Int32 {
panic("reflect.Value.setRunes of non-rune slice")
}
*(*[]rune)(v.val) = x
}
// SetComplex sets v's underlying value to x.
// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
func (v Value) SetComplex(x complex128) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.SetComplex", k})
case Complex64:
*(*complex64)(v.val) = complex64(x)
case Complex128:
*(*complex128)(v.val) = x
}
}
// SetFloat sets v's underlying value to x.
// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
func (v Value) SetFloat(x float64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.SetFloat", k})
case Float32:
*(*float32)(v.val) = float32(x)
case Float64:
*(*float64)(v.val) = x
}
}
// SetInt sets v's underlying value to x.
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
func (v Value) SetInt(x int64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.SetInt", k})
case Int:
*(*int)(v.val) = int(x)
case Int8:
*(*int8)(v.val) = int8(x)
case Int16:
*(*int16)(v.val) = int16(x)
case Int32:
*(*int32)(v.val) = int32(x)
case Int64:
*(*int64)(v.val) = x
}
}
// SetLen sets v's length to n.
// It panics if v's Kind is not Slice or if n is negative or
// greater than the capacity of the slice.
func (v Value) SetLen(n int) {
v.mustBeAssignable()
v.mustBe(Slice)
s := (*SliceHeader)(v.val)
if n < 0 || n > int(s.Cap) {
panic("reflect: slice length out of range in SetLen")
}
s.Len = n
}
// SetCap sets v's capacity to n.
// It panics if v's Kind is not Slice or if n is smaller than the length or
// greater than the capacity of the slice.
func (v Value) SetCap(n int) {
v.mustBeAssignable()
v.mustBe(Slice)
s := (*SliceHeader)(v.val)
if n < int(s.Len) || n > int(s.Cap) {
panic("reflect: slice capacity out of range in SetCap")
}
s.Cap = n
}
// SetMapIndex sets the value associated with key in the map v to val.
// It panics if v's Kind is not Map.
// If val is the zero Value, SetMapIndex deletes the key from the map.
// As in Go, key's value must be assignable to the map's key type,
// and val's value must be assignable to the map's value type.
func (v Value) SetMapIndex(key, val Value) {
v.mustBe(Map)
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ))
key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
if val.typ != nil {
val.mustBeExported()
val = val.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
}
mapassign(v.typ, v.iword(), key.iword(), val.iword(), val.typ != nil)
}
// SetUint sets v's underlying value to x.
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64, or if CanSet() is false.
func (v Value) SetUint(x uint64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.SetUint", k})
case Uint:
*(*uint)(v.val) = uint(x)
case Uint8:
*(*uint8)(v.val) = uint8(x)
case Uint16:
*(*uint16)(v.val) = uint16(x)
case Uint32:
*(*uint32)(v.val) = uint32(x)
case Uint64:
*(*uint64)(v.val) = x
case Uintptr:
*(*uintptr)(v.val) = uintptr(x)
}
}
// SetPointer sets the unsafe.Pointer value v to x.
// It panics if v's Kind is not UnsafePointer.
func (v Value) SetPointer(x unsafe.Pointer) {
v.mustBeAssignable()
v.mustBe(UnsafePointer)
*(*unsafe.Pointer)(v.val) = x
}
// SetString sets v's underlying value to x.
// It panics if v's Kind is not String or if CanSet() is false.
func (v Value) SetString(x string) {
v.mustBeAssignable()
v.mustBe(String)
*(*string)(v.val) = x
}
// Slice returns v[i:j].
// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array,
// or if the indexes are out of bounds.
func (v Value) Slice(i, j int) Value {
var (
cap int
typ *sliceType
base unsafe.Pointer
)
switch kind := v.kind(); kind {
default:
panic(&ValueError{"reflect.Value.Slice", kind})
case Array:
if v.flag&flagAddr == 0 {
panic("reflect.Value.Slice: slice of unaddressable array")
}
tt := (*arrayType)(unsafe.Pointer(v.typ))
cap = int(tt.len)
typ = (*sliceType)(unsafe.Pointer(tt.slice))
base = v.val
case Slice:
typ = (*sliceType)(unsafe.Pointer(v.typ))
s := (*SliceHeader)(v.val)
base = unsafe.Pointer(s.Data)
cap = s.Cap
case String:
s := (*StringHeader)(v.val)
if i < 0 || j < i || j > s.Len {
panic("reflect.Value.Slice: string slice index out of bounds")
}
var x string
val := (*StringHeader)(unsafe.Pointer(&x))
val.Data = s.Data + uintptr(i)
val.Len = j - i
return Value{v.typ, unsafe.Pointer(&x), v.flag}
}
if i < 0 || j < i || j > cap {
panic("reflect.Value.Slice: slice index out of bounds")
}
// Declare slice so that gc can see the base pointer in it.
var x []unsafe.Pointer
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
s.Data = uintptr(base) + uintptr(i)*typ.elem.Size()
s.Len = j - i
s.Cap = cap - i
fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift
return Value{typ.common(), unsafe.Pointer(&x), fl}
}
// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
// It panics if v's Kind is not Array or Slice, or if v is an unaddressable array,
// or if the indexes are out of bounds.
func (v Value) Slice3(i, j, k int) Value {
var (
cap int
typ *sliceType
base unsafe.Pointer
)
switch kind := v.kind(); kind {
default:
panic(&ValueError{"reflect.Value.Slice3", kind})
case Array:
if v.flag&flagAddr == 0 {
panic("reflect.Value.Slice: slice of unaddressable array")
}
tt := (*arrayType)(unsafe.Pointer(v.typ))
cap = int(tt.len)
typ = (*sliceType)(unsafe.Pointer(tt.slice))
base = v.val
case Slice:
typ = (*sliceType)(unsafe.Pointer(v.typ))
s := (*SliceHeader)(v.val)
base = unsafe.Pointer(s.Data)
cap = s.Cap
}
if i < 0 || j < i || k < j || k > cap {
panic("reflect.Value.Slice3: slice index out of bounds")
}
// Declare slice so that the garbage collector
// can see the base pointer in it.
var x []unsafe.Pointer
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
s.Data = uintptr(base) + uintptr(i)*typ.elem.Size()
s.Len = j - i
s.Cap = k - i
fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift
return Value{typ.common(), unsafe.Pointer(&x), fl}
}
// String returns the string v's underlying value, as a string.
// String is a special case because of Go's String method convention.
// Unlike the other getters, it does not panic if v's Kind is not String.
// Instead, it returns a string of the form "<T value>" where T is v's type.
func (v Value) String() string {
switch k := v.kind(); k {
case Invalid:
return "<invalid Value>"
case String:
return *(*string)(v.val)
}
// If you call String on a reflect.Value of other type, it's better to
// print something than to panic. Useful in debugging.
return "<" + v.typ.String() + " Value>"
}
// TryRecv attempts to receive a value from the channel v but will not block.
// It panics if v's Kind is not Chan.
// If the receive cannot finish without blocking, x is the zero Value.
// The boolean ok is true if the value x corresponds to a send
// on the channel, false if it is a zero value received because the channel is closed.
func (v Value) TryRecv() (x Value, ok bool) {
v.mustBe(Chan)
v.mustBeExported()
return v.recv(true)
}
// TrySend attempts to send x on the channel v but will not block.
// It panics if v's Kind is not Chan.
// It returns true if the value was sent, false otherwise.
// As in Go, x's value must be assignable to the channel's element type.
func (v Value) TrySend(x Value) bool {
v.mustBe(Chan)
v.mustBeExported()
return v.send(x, true)
}
// Type returns v's type.
func (v Value) Type() Type {
f := v.flag
if f == 0 {
panic(&ValueError{"reflect.Value.Type", Invalid})
}
if f&flagMethod == 0 {
// Easy case
return v.typ
}
// Method value.
// v.typ describes the receiver, not the method type.
i := int(v.flag) >> flagMethodShift
if v.typ.Kind() == Interface {
// Method on interface.
tt := (*interfaceType)(unsafe.Pointer(v.typ))
if i < 0 || i >= len(tt.methods) {
panic("reflect: internal error: invalid method index")
}
m := &tt.methods[i]
return m.typ
}
// Method on concrete type.
ut := v.typ.uncommon()
if ut == nil || i < 0 || i >= len(ut.methods) {
panic("reflect: internal error: invalid method index")
}
m := &ut.methods[i]
return m.mtyp
}
// Uint returns v's underlying value, as a uint64.
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
func (v Value) Uint() uint64 {
k := v.kind()
var p unsafe.Pointer
if v.flag&flagIndir != 0 {
p = v.val
} else {
// The escape analysis is good enough that &v.val
// does not trigger a heap allocation.
p = unsafe.Pointer(&v.val)
}
switch k {
case Uint:
return uint64(*(*uint)(p))
case Uint8:
return uint64(*(*uint8)(p))
case Uint16:
return uint64(*(*uint16)(p))
case Uint32:
return uint64(*(*uint32)(p))
case Uint64:
return uint64(*(*uint64)(p))
case Uintptr:
return uint64(*(*uintptr)(p))
}
panic(&ValueError{"reflect.Value.Uint", k})
}
// UnsafeAddr returns a pointer to v's data.
// It is for advanced clients that also import the "unsafe" package.
// It panics if v is not addressable.
func (v Value) UnsafeAddr() uintptr {
if v.typ == nil {
panic(&ValueError{"reflect.Value.UnsafeAddr", Invalid})
}
if v.flag&flagAddr == 0 {
panic("reflect.Value.UnsafeAddr of unaddressable value")
}
return uintptr(v.val)
}
// StringHeader is the runtime representation of a string.
// It cannot be used safely or portably and its representation may
// change in a later release.
// Moreover, the Data field is not sufficient to guarantee the data
// it references will not be garbage collected, so programs must keep
// a separate, correctly typed pointer to the underlying data.
type StringHeader struct {
Data uintptr
Len int
}
// SliceHeader is the runtime representation of a slice.
// It cannot be used safely or portably and its representation may
// change in a later release.
// Moreover, the Data field is not sufficient to guarantee the data
// it references will not be garbage collected, so programs must keep
// a separate, correctly typed pointer to the underlying data.
type SliceHeader struct {
Data uintptr
Len int
Cap int
}
func typesMustMatch(what string, t1, t2 Type) {
if t1 != t2 {
panic(what + ": " + t1.String() + " != " + t2.String())
}
}
// grow grows the slice s so that it can hold extra more values, allocating
// more capacity if needed. It also returns the old and new slice lengths.
func grow(s Value, extra int) (Value, int, int) {
i0 := s.Len()
i1 := i0 + extra
if i1 < i0 {
panic("reflect.Append: slice overflow")
}
m := s.Cap()
if i1 <= m {
return s.Slice(0, i1), i0, i1
}
if m == 0 {
m = extra
} else {
for m < i1 {
if i0 < 1024 {
m += m
} else {
m += m / 4
}
}
}
t := MakeSlice(s.Type(), i1, m)
Copy(t, s)
return t, i0, i1
}
// Append appends the values x to a slice s and returns the resulting slice.
// As in Go, each x's value must be assignable to the slice's element type.
func Append(s Value, x ...Value) Value {
s.mustBe(Slice)
s, i0, i1 := grow(s, len(x))
for i, j := i0, 0; i < i1; i, j = i+1, j+1 {
s.Index(i).Set(x[j])
}
return s
}
// AppendSlice appends a slice t to a slice s and returns the resulting slice.
// The slices s and t must have the same element type.
func AppendSlice(s, t Value) Value {
s.mustBe(Slice)
t.mustBe(Slice)
typesMustMatch("reflect.AppendSlice", s.Type().Elem(), t.Type().Elem())
s, i0, i1 := grow(s, t.Len())
Copy(s.Slice(i0, i1), t)
return s
}
// Copy copies the contents of src into dst until either
// dst has been filled or src has been exhausted.
// It returns the number of elements copied.
// Dst and src each must have kind Slice or Array, and
// dst and src must have the same element type.
func Copy(dst, src Value) int {
dk := dst.kind()
if dk != Array && dk != Slice {
panic(&ValueError{"reflect.Copy", dk})
}
if dk == Array {
dst.mustBeAssignable()
}
dst.mustBeExported()
sk := src.kind()
if sk != Array && sk != Slice {
panic(&ValueError{"reflect.Copy", sk})
}
src.mustBeExported()
de := dst.typ.Elem()
se := src.typ.Elem()
typesMustMatch("reflect.Copy", de, se)
n := dst.Len()
if sn := src.Len(); n > sn {
n = sn
}
// If sk is an in-line array, cannot take its address.
// Instead, copy element by element.
if src.flag&flagIndir == 0 {
for i := 0; i < n; i++ {
dst.Index(i).Set(src.Index(i))
}
return n
}
// Copy via memmove.
var da, sa unsafe.Pointer
if dk == Array {
da = dst.val
} else {
da = unsafe.Pointer((*SliceHeader)(dst.val).Data)
}
if sk == Array {
sa = src.val
} else {
sa = unsafe.Pointer((*SliceHeader)(src.val).Data)
}
memmove(da, sa, uintptr(n)*de.Size())
return n
}
// A runtimeSelect is a single case passed to rselect.
// This must match ../runtime/chan.c:/runtimeSelect
type runtimeSelect struct {
dir uintptr // 0, SendDir, or RecvDir
typ *rtype // channel type
ch iword // interface word for channel
val iword // interface word for value (for SendDir)
}
// rselect runs a select. It returns the index of the chosen case,
// and if the case was a receive, the interface word of the received
// value and the conventional OK bool to indicate whether the receive
// corresponds to a sent value.
func rselect([]runtimeSelect) (chosen int, recv iword, recvOK bool)
// A SelectDir describes the communication direction of a select case.
type SelectDir int
// NOTE: These values must match ../runtime/chan.c:/SelectDir.
const (
_ SelectDir = iota
SelectSend // case Chan <- Send
SelectRecv // case <-Chan:
SelectDefault // default
)
// A SelectCase describes a single case in a select operation.
// The kind of case depends on Dir, the communication direction.
//
// If Dir is SelectDefault, the case represents a default case.
// Chan and Send must be zero Values.
//
// If Dir is SelectSend, the case represents a send operation.
// Normally Chan's underlying value must be a channel, and Send's underlying value must be
// assignable to the channel's element type. As a special case, if Chan is a zero Value,
// then the case is ignored, and the field Send will also be ignored and may be either zero
// or non-zero.
//
// If Dir is SelectRecv, the case represents a receive operation.
// Normally Chan's underlying value must be a channel and Send must be a zero Value.
// If Chan is a zero Value, then the case is ignored, but Send must still be a zero Value.
// When a receive operation is selected, the received Value is returned by Select.
//
type SelectCase struct {
Dir SelectDir // direction of case
Chan Value // channel to use (for send or receive)
Send Value // value to send (for send)
}
// Select executes a select operation described by the list of cases.
// Like the Go select statement, it blocks until at least one of the cases
// can proceed, makes a uniform pseudo-random choice,
// and then executes that case. It returns the index of the chosen case
// and, if that case was a receive operation, the value received and a
// boolean indicating whether the value corresponds to a send on the channel
// (as opposed to a zero value received because the channel is closed).
func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
// NOTE: Do not trust that caller is not modifying cases data underfoot.
// The range is safe because the caller cannot modify our copy of the len
// and each iteration makes its own copy of the value c.
runcases := make([]runtimeSelect, len(cases))
haveDefault := false
for i, c := range cases {
rc := &runcases[i]
rc.dir = uintptr(c.Dir)
switch c.Dir {
default:
panic("reflect.Select: invalid Dir")
case SelectDefault: // default
if haveDefault {
panic("reflect.Select: multiple default cases")
}
haveDefault = true
if c.Chan.IsValid() {
panic("reflect.Select: default case has Chan value")
}
if c.Send.IsValid() {
panic("reflect.Select: default case has Send value")
}
case SelectSend:
ch := c.Chan
if !ch.IsValid() {
break
}
ch.mustBe(Chan)
ch.mustBeExported()
tt := (*chanType)(unsafe.Pointer(ch.typ))
if ChanDir(tt.dir)&SendDir == 0 {
panic("reflect.Select: SendDir case using recv-only channel")
}
rc.ch = ch.iword()
rc.typ = &tt.rtype
v := c.Send
if !v.IsValid() {
panic("reflect.Select: SendDir case missing Send value")
}
v.mustBeExported()
v = v.assignTo("reflect.Select", tt.elem, nil)
rc.val = v.iword()
case SelectRecv:
if c.Send.IsValid() {
panic("reflect.Select: RecvDir case has Send value")
}
ch := c.Chan
if !ch.IsValid() {
break
}
ch.mustBe(Chan)
ch.mustBeExported()
tt := (*chanType)(unsafe.Pointer(ch.typ))
rc.typ = &tt.rtype
if ChanDir(tt.dir)&RecvDir == 0 {
panic("reflect.Select: RecvDir case using send-only channel")
}
rc.ch = ch.iword()
}
}
chosen, word, recvOK := rselect(runcases)
if runcases[chosen].dir == uintptr(SelectRecv) {
tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
typ := tt.elem
fl := flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
}
recv = Value{typ, unsafe.Pointer(word), fl}
}
return chosen, recv, recvOK
}
/*
* constructors
*/
// implemented in package runtime
func unsafe_New(*rtype) unsafe.Pointer
func unsafe_NewArray(*rtype, int) unsafe.Pointer
// MakeSlice creates a new zero-initialized slice value
// for the specified slice type, length, and capacity.
func MakeSlice(typ Type, len, cap int) Value {
if typ.Kind() != Slice {
panic("reflect.MakeSlice of non-slice type")
}
if len < 0 {
panic("reflect.MakeSlice: negative len")
}
if cap < 0 {
panic("reflect.MakeSlice: negative cap")
}
if len > cap {
panic("reflect.MakeSlice: len > cap")
}
// Declare slice so that gc can see the base pointer in it.
var x []unsafe.Pointer
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
s.Data = uintptr(unsafe_NewArray(typ.Elem().(*rtype), cap))
s.Len = len
s.Cap = cap
return Value{typ.common(), unsafe.Pointer(&x), flagIndir | flag(Slice)<<flagKindShift}
}
// MakeChan creates a new channel with the specified type and buffer size.
func MakeChan(typ Type, buffer int) Value {
if typ.Kind() != Chan {
panic("reflect.MakeChan of non-chan type")
}
if buffer < 0 {
panic("reflect.MakeChan: negative buffer size")
}
if typ.ChanDir() != BothDir {
panic("reflect.MakeChan: unidirectional channel type")
}
ch := makechan(typ.(*rtype), uint64(buffer))
return Value{typ.common(), unsafe.Pointer(ch), flag(Chan) << flagKindShift}
}
// MakeMap creates a new map of the specified type.
func MakeMap(typ Type) Value {
if typ.Kind() != Map {
panic("reflect.MakeMap of non-map type")
}
m := makemap(typ.(*rtype))
return Value{typ.common(), unsafe.Pointer(m), flag(Map) << flagKindShift}
}
// Indirect returns the value that v points to.
// If v is a nil pointer, Indirect returns a zero Value.
// If v is not a pointer, Indirect returns v.
func Indirect(v Value) Value {
if v.Kind() != Ptr {
return v
}
return v.Elem()
}
// ValueOf returns a new Value initialized to the concrete value
// stored in the interface i. ValueOf(nil) returns the zero Value.
func ValueOf(i interface{}) Value {
if i == nil {
return Value{}
}
// TODO(rsc): Eliminate this terrible hack.
// In the call to packValue, eface.typ doesn't escape,
// and eface.word is an integer. So it looks like
// i (= eface) doesn't escape. But really it does,
// because eface.word is actually a pointer.
escapes(i)
// For an interface value with the noAddr bit set,
// the representation is identical to an empty interface.
eface := *(*emptyInterface)(unsafe.Pointer(&i))
typ := eface.typ
fl := flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
}
return Value{typ, unsafe.Pointer(eface.word), fl}
}
// Zero returns a Value representing the zero value for the specified type.
// The result is different from the zero value of the Value struct,
// which represents no value at all.
// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
// The returned value is neither addressable nor settable.
func Zero(typ Type) Value {
if typ == nil {
panic("reflect: Zero(nil)")
}
t := typ.common()
fl := flag(t.Kind()) << flagKindShift
if t.size <= ptrSize {
return Value{t, nil, fl}
}
return Value{t, unsafe_New(typ.(*rtype)), fl | flagIndir}
}
// New returns a Value representing a pointer to a new zero value
// for the specified type. That is, the returned Value's Type is PtrTo(t).
func New(typ Type) Value {
if typ == nil {
panic("reflect: New(nil)")
}
ptr := unsafe_New(typ.(*rtype))
fl := flag(Ptr) << flagKindShift
return Value{typ.common().ptrTo(), ptr, fl}
}
// NewAt returns a Value representing a pointer to a value of the
// specified type, using p as that pointer.
func NewAt(typ Type, p unsafe.Pointer) Value {
fl := flag(Ptr) << flagKindShift
return Value{typ.common().ptrTo(), p, fl}
}
// assignTo returns a value v that can be assigned directly to typ.
// It panics if v is not assignable to typ.
// For a conversion to an interface type, target is a suggested scratch space to use.
func (v Value) assignTo(context string, dst *rtype, target *interface{}) Value {
if v.flag&flagMethod != 0 {
v = makeMethodValue(context, v)
}
switch {
case directlyAssignable(dst, v.typ):
// Overwrite type so that they match.
// Same memory layout, so no harm done.
v.typ = dst
fl := v.flag & (flagRO | flagAddr | flagIndir)
fl |= flag(dst.Kind()) << flagKindShift
return Value{dst, v.val, fl}
case implements(dst, v.typ):
if target == nil {
target = new(interface{})
}
x := valueInterface(v, false)
if dst.NumMethod() == 0 {
*target = x
} else {
ifaceE2I(dst, x, unsafe.Pointer(target))
}
return Value{dst, unsafe.Pointer(target), flagIndir | flag(Interface)<<flagKindShift}
}
// Failed.
panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
}
// Convert returns the value v converted to type t.
// If the usual Go conversion rules do not allow conversion
// of the value v to type t, Convert panics.
func (v Value) Convert(t Type) Value {
if v.flag&flagMethod != 0 {
v = makeMethodValue("Convert", v)
}
op := convertOp(t.common(), v.typ)
if op == nil {
panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String())
}
return op(v, t)
}
// convertOp returns the function to convert a value of type src
// to a value of type dst. If the conversion is illegal, convertOp returns nil.
func convertOp(dst, src *rtype) func(Value, Type) Value {
switch src.Kind() {
case Int, Int8, Int16, Int32, Int64:
switch dst.Kind() {
case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return cvtInt
case Float32, Float64:
return cvtIntFloat
case String:
return cvtIntString
}
case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
switch dst.Kind() {
case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return cvtUint
case Float32, Float64:
return cvtUintFloat
case String:
return cvtUintString
}
case Float32, Float64:
switch dst.Kind() {
case Int, Int8, Int16, Int32, Int64:
return cvtFloatInt
case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return cvtFloatUint
case Float32, Float64:
return cvtFloat
}
case Complex64, Complex128:
switch dst.Kind() {
case Complex64, Complex128:
return cvtComplex
}
case String:
if dst.Kind() == Slice && dst.Elem().PkgPath() == "" {
switch dst.Elem().Kind() {
case Uint8:
return cvtStringBytes
case Int32:
return cvtStringRunes
}
}
case Slice:
if dst.Kind() == String && src.Elem().PkgPath() == "" {
switch src.Elem().Kind() {
case Uint8:
return cvtBytesString
case Int32:
return cvtRunesString
}
}
}
// dst and src have same underlying type.
if haveIdenticalUnderlyingType(dst, src) {
return cvtDirect
}
// dst and src are unnamed pointer types with same underlying base type.
if dst.Kind() == Ptr && dst.Name() == "" &&
src.Kind() == Ptr && src.Name() == "" &&
haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common()) {
return cvtDirect
}
if implements(dst, src) {
if src.Kind() == Interface {
return cvtI2I
}
return cvtT2I
}
return nil
}
// makeInt returns a Value of type t equal to bits (possibly truncated),
// where t is a signed or unsigned int type.
func makeInt(f flag, bits uint64, t Type) Value {
typ := t.common()
if typ.size > ptrSize {
// Assume ptrSize >= 4, so this must be uint64.
ptr := unsafe_New(typ)
*(*uint64)(unsafe.Pointer(ptr)) = bits
return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
var w iword
switch typ.size {
case 1:
*(*uint8)(unsafe.Pointer(&w)) = uint8(bits)
case 2:
*(*uint16)(unsafe.Pointer(&w)) = uint16(bits)
case 4:
*(*uint32)(unsafe.Pointer(&w)) = uint32(bits)
case 8:
*(*uint64)(unsafe.Pointer(&w)) = uint64(bits)
}
return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
}
// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
// where t is a float32 or float64 type.
func makeFloat(f flag, v float64, t Type) Value {
typ := t.common()
if typ.size > ptrSize {
// Assume ptrSize >= 4, so this must be float64.
ptr := unsafe_New(typ)
*(*float64)(unsafe.Pointer(ptr)) = v
return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
var w iword
switch typ.size {
case 4:
*(*float32)(unsafe.Pointer(&w)) = float32(v)
case 8:
*(*float64)(unsafe.Pointer(&w)) = v
}
return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
}
// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
// where t is a complex64 or complex128 type.
func makeComplex(f flag, v complex128, t Type) Value {
typ := t.common()
if typ.size > ptrSize {
ptr := unsafe_New(typ)
switch typ.size {
case 8:
*(*complex64)(unsafe.Pointer(ptr)) = complex64(v)
case 16:
*(*complex128)(unsafe.Pointer(ptr)) = v
}
return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
// Assume ptrSize <= 8 so this must be complex64.
var w iword
*(*complex64)(unsafe.Pointer(&w)) = complex64(v)
return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
}
func makeString(f flag, v string, t Type) Value {
ret := New(t).Elem()
ret.SetString(v)
ret.flag = ret.flag&^flagAddr | f
return ret
}
func makeBytes(f flag, v []byte, t Type) Value {
ret := New(t).Elem()
ret.SetBytes(v)
ret.flag = ret.flag&^flagAddr | f
return ret
}
func makeRunes(f flag, v []rune, t Type) Value {
ret := New(t).Elem()
ret.setRunes(v)
ret.flag = ret.flag&^flagAddr | f
return ret
}
// These conversion functions are returned by convertOp
// for classes of conversions. For example, the first function, cvtInt,
// takes any value v of signed int type and returns the value converted
// to type t, where t is any signed or unsigned int type.
// convertOp: intXX -> [u]intXX
func cvtInt(v Value, t Type) Value {
return makeInt(v.flag&flagRO, uint64(v.Int()), t)
}
// convertOp: uintXX -> [u]intXX
func cvtUint(v Value, t Type) Value {
return makeInt(v.flag&flagRO, v.Uint(), t)
}
// convertOp: floatXX -> intXX
func cvtFloatInt(v Value, t Type) Value {
return makeInt(v.flag&flagRO, uint64(int64(v.Float())), t)
}
// convertOp: floatXX -> uintXX
func cvtFloatUint(v Value, t Type) Value {
return makeInt(v.flag&flagRO, uint64(v.Float()), t)
}
// convertOp: intXX -> floatXX
func cvtIntFloat(v Value, t Type) Value {
return makeFloat(v.flag&flagRO, float64(v.Int()), t)
}
// convertOp: uintXX -> floatXX
func cvtUintFloat(v Value, t Type) Value {
return makeFloat(v.flag&flagRO, float64(v.Uint()), t)
}
// convertOp: floatXX -> floatXX
func cvtFloat(v Value, t Type) Value {
return makeFloat(v.flag&flagRO, v.Float(), t)
}
// convertOp: complexXX -> complexXX
func cvtComplex(v Value, t Type) Value {
return makeComplex(v.flag&flagRO, v.Complex(), t)
}
// convertOp: intXX -> string
func cvtIntString(v Value, t Type) Value {
return makeString(v.flag&flagRO, string(v.Int()), t)
}
// convertOp: uintXX -> string
func cvtUintString(v Value, t Type) Value {
return makeString(v.flag&flagRO, string(v.Uint()), t)
}
// convertOp: []byte -> string
func cvtBytesString(v Value, t Type) Value {
return makeString(v.flag&flagRO, string(v.Bytes()), t)
}
// convertOp: string -> []byte
func cvtStringBytes(v Value, t Type) Value {
return makeBytes(v.flag&flagRO, []byte(v.String()), t)
}
// convertOp: []rune -> string
func cvtRunesString(v Value, t Type) Value {
return makeString(v.flag&flagRO, string(v.runes()), t)
}
// convertOp: string -> []rune
func cvtStringRunes(v Value, t Type) Value {
return makeRunes(v.flag&flagRO, []rune(v.String()), t)
}
// convertOp: direct copy
func cvtDirect(v Value, typ Type) Value {
f := v.flag
t := typ.common()
val := v.val
if f&flagAddr != 0 {
// indirect, mutable word - make a copy
ptr := unsafe_New(t)
memmove(ptr, val, t.size)
val = ptr
f &^= flagAddr
}
return Value{t, val, v.flag&flagRO | f}
}
// convertOp: concrete -> interface
func cvtT2I(v Value, typ Type) Value {
target := new(interface{})
x := valueInterface(v, false)
if typ.NumMethod() == 0 {
*target = x
} else {
ifaceE2I(typ.(*rtype), x, unsafe.Pointer(target))
}
return Value{typ.common(), unsafe.Pointer(target), v.flag&flagRO | flagIndir | flag(Interface)<<flagKindShift}
}
// convertOp: interface -> interface
func cvtI2I(v Value, typ Type) Value {
if v.IsNil() {
ret := Zero(typ)
ret.flag |= v.flag & flagRO
return ret
}
return cvtT2I(v.Elem(), typ)
}
// implemented in ../pkg/runtime
func chancap(ch iword) int
func chanclose(ch iword)
func chanlen(ch iword) int
func chanrecv(t *rtype, ch iword, nb bool) (val iword, selected, received bool)
func chansend(t *rtype, ch iword, val iword, nb bool) bool
func makechan(typ *rtype, size uint64) (ch iword)
func makemap(t *rtype) (m iword)
func mapaccess(t *rtype, m iword, key iword) (val iword, ok bool)
func mapassign(t *rtype, m iword, key, val iword, ok bool)
func mapiterinit(t *rtype, m iword) *byte
func mapiterkey(it *byte) (key iword, ok bool)
func mapiternext(it *byte)
func maplen(m iword) int
func call(fn, arg unsafe.Pointer, n uint32)
func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that
// the compiler cannot follow.
func escapes(x interface{}) {
if dummy.b {
dummy.x = x
}
}
var dummy struct {
b bool
x interface{}
}
reflect: update docs; Interface can return a method value
Fixes #6460.
R=golang-dev, r
CC=golang-dev
https://golang.org/cl/13761046
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package reflect
import (
"math"
"runtime"
"strconv"
"unsafe"
)
const bigEndian = false // can be smarter if we find a big-endian machine
const ptrSize = unsafe.Sizeof((*byte)(nil))
const cannotSet = "cannot set value obtained from unexported struct field"
// TODO: This will have to go away when
// the new gc goes in.
func memmove(adst, asrc unsafe.Pointer, n uintptr) {
dst := uintptr(adst)
src := uintptr(asrc)
switch {
case src < dst && src+n > dst:
// byte copy backward
// careful: i is unsigned
for i := n; i > 0; {
i--
*(*byte)(unsafe.Pointer(dst + i)) = *(*byte)(unsafe.Pointer(src + i))
}
case (n|src|dst)&(ptrSize-1) != 0:
// byte copy forward
for i := uintptr(0); i < n; i++ {
*(*byte)(unsafe.Pointer(dst + i)) = *(*byte)(unsafe.Pointer(src + i))
}
default:
// word copy forward
for i := uintptr(0); i < n; i += ptrSize {
*(*uintptr)(unsafe.Pointer(dst + i)) = *(*uintptr)(unsafe.Pointer(src + i))
}
}
}
// Value is the reflection interface to a Go value.
//
// Not all methods apply to all kinds of values. Restrictions,
// if any, are noted in the documentation for each method.
// Use the Kind method to find out the kind of value before
// calling kind-specific methods. Calling a method
// inappropriate to the kind of type causes a run time panic.
//
// The zero Value represents no value.
// Its IsValid method returns false, its Kind method returns Invalid,
// its String method returns "<invalid Value>", and all other methods panic.
// Most functions and methods never return an invalid value.
// If one does, its documentation states the conditions explicitly.
//
// A Value can be used concurrently by multiple goroutines provided that
// the underlying Go value can be used concurrently for the equivalent
// direct operations.
type Value struct {
// typ holds the type of the value represented by a Value.
typ *rtype
// val holds the 1-word representation of the value.
// If flag's flagIndir bit is set, then val is a pointer to the data.
// Otherwise val is a word holding the actual data.
// When the data is smaller than a word, it begins at
// the first byte (in the memory address sense) of val.
// We use unsafe.Pointer so that the garbage collector
// knows that val could be a pointer.
val unsafe.Pointer
// flag holds metadata about the value.
// The lowest bits are flag bits:
// - flagRO: obtained via unexported field, so read-only
// - flagIndir: val holds a pointer to the data
// - flagAddr: v.CanAddr is true (implies flagIndir)
// - flagMethod: v is a method value.
// The next five bits give the Kind of the value.
// This repeats typ.Kind() except for method values.
// The remaining 23+ bits give a method number for method values.
// If flag.kind() != Func, code can assume that flagMethod is unset.
// If typ.size > ptrSize, code can assume that flagIndir is set.
flag
// A method value represents a curried method invocation
// like r.Read for some receiver r. The typ+val+flag bits describe
// the receiver r, but the flag's Kind bits say Func (methods are
// functions), and the top bits of the flag give the method number
// in r's type's method table.
}
type flag uintptr
const (
flagRO flag = 1 << iota
flagIndir
flagAddr
flagMethod
flagKindShift = iota
flagKindWidth = 5 // there are 27 kinds
flagKindMask flag = 1<<flagKindWidth - 1
flagMethodShift = flagKindShift + flagKindWidth
)
func (f flag) kind() Kind {
return Kind((f >> flagKindShift) & flagKindMask)
}
// A ValueError occurs when a Value method is invoked on
// a Value that does not support it. Such cases are documented
// in the description of each method.
type ValueError struct {
Method string
Kind Kind
}
func (e *ValueError) Error() string {
if e.Kind == 0 {
return "reflect: call of " + e.Method + " on zero Value"
}
return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
}
// methodName returns the name of the calling method,
// assumed to be two stack frames above.
func methodName() string {
pc, _, _, _ := runtime.Caller(2)
f := runtime.FuncForPC(pc)
if f == nil {
return "unknown method"
}
return f.Name()
}
// An iword is the word that would be stored in an
// interface to represent a given value v. Specifically, if v is
// bigger than a pointer, its word is a pointer to v's data.
// Otherwise, its word holds the data stored
// in its leading bytes (so is not a pointer).
// Because the value sometimes holds a pointer, we use
// unsafe.Pointer to represent it, so that if iword appears
// in a struct, the garbage collector knows that might be
// a pointer.
type iword unsafe.Pointer
func (v Value) iword() iword {
if v.flag&flagIndir != 0 && v.typ.size <= ptrSize {
// Have indirect but want direct word.
return loadIword(v.val, v.typ.size)
}
return iword(v.val)
}
// loadIword loads n bytes at p from memory into an iword.
func loadIword(p unsafe.Pointer, n uintptr) iword {
// Run the copy ourselves instead of calling memmove
// to avoid moving w to the heap.
var w iword
switch n {
default:
panic("reflect: internal error: loadIword of " + strconv.Itoa(int(n)) + "-byte value")
case 0:
case 1:
*(*uint8)(unsafe.Pointer(&w)) = *(*uint8)(p)
case 2:
*(*uint16)(unsafe.Pointer(&w)) = *(*uint16)(p)
case 3:
*(*[3]byte)(unsafe.Pointer(&w)) = *(*[3]byte)(p)
case 4:
*(*uint32)(unsafe.Pointer(&w)) = *(*uint32)(p)
case 5:
*(*[5]byte)(unsafe.Pointer(&w)) = *(*[5]byte)(p)
case 6:
*(*[6]byte)(unsafe.Pointer(&w)) = *(*[6]byte)(p)
case 7:
*(*[7]byte)(unsafe.Pointer(&w)) = *(*[7]byte)(p)
case 8:
*(*uint64)(unsafe.Pointer(&w)) = *(*uint64)(p)
}
return w
}
// storeIword stores n bytes from w into p.
func storeIword(p unsafe.Pointer, w iword, n uintptr) {
// Run the copy ourselves instead of calling memmove
// to avoid moving w to the heap.
switch n {
default:
panic("reflect: internal error: storeIword of " + strconv.Itoa(int(n)) + "-byte value")
case 0:
case 1:
*(*uint8)(p) = *(*uint8)(unsafe.Pointer(&w))
case 2:
*(*uint16)(p) = *(*uint16)(unsafe.Pointer(&w))
case 3:
*(*[3]byte)(p) = *(*[3]byte)(unsafe.Pointer(&w))
case 4:
*(*uint32)(p) = *(*uint32)(unsafe.Pointer(&w))
case 5:
*(*[5]byte)(p) = *(*[5]byte)(unsafe.Pointer(&w))
case 6:
*(*[6]byte)(p) = *(*[6]byte)(unsafe.Pointer(&w))
case 7:
*(*[7]byte)(p) = *(*[7]byte)(unsafe.Pointer(&w))
case 8:
*(*uint64)(p) = *(*uint64)(unsafe.Pointer(&w))
}
}
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
typ *rtype
word iword
}
// nonEmptyInterface is the header for a interface value with methods.
type nonEmptyInterface struct {
// see ../runtime/iface.c:/Itab
itab *struct {
ityp *rtype // static interface type
typ *rtype // dynamic concrete type
link unsafe.Pointer
bad int32
unused int32
fun [100000]unsafe.Pointer // method table
}
word iword
}
// mustBe panics if f's kind is not expected.
// Making this a method on flag instead of on Value
// (and embedding flag in Value) means that we can write
// the very clear v.mustBe(Bool) and have it compile into
// v.flag.mustBe(Bool), which will only bother to copy the
// single important word for the receiver.
func (f flag) mustBe(expected Kind) {
k := f.kind()
if k != expected {
panic(&ValueError{methodName(), k})
}
}
// mustBeExported panics if f records that the value was obtained using
// an unexported field.
func (f flag) mustBeExported() {
if f == 0 {
panic(&ValueError{methodName(), 0})
}
if f&flagRO != 0 {
panic("reflect: " + methodName() + " using value obtained using unexported field")
}
}
// mustBeAssignable panics if f records that the value is not assignable,
// which is to say that either it was obtained using an unexported field
// or it is not addressable.
func (f flag) mustBeAssignable() {
if f == 0 {
panic(&ValueError{methodName(), Invalid})
}
// Assignable if addressable and not read-only.
if f&flagRO != 0 {
panic("reflect: " + methodName() + " using value obtained using unexported field")
}
if f&flagAddr == 0 {
panic("reflect: " + methodName() + " using unaddressable value")
}
}
// Addr returns a pointer value representing the address of v.
// It panics if CanAddr() returns false.
// Addr is typically used to obtain a pointer to a struct field
// or slice element in order to call a method that requires a
// pointer receiver.
func (v Value) Addr() Value {
if v.flag&flagAddr == 0 {
panic("reflect.Value.Addr of unaddressable value")
}
return Value{v.typ.ptrTo(), v.val, (v.flag & flagRO) | flag(Ptr)<<flagKindShift}
}
// Bool returns v's underlying value.
// It panics if v's kind is not Bool.
func (v Value) Bool() bool {
v.mustBe(Bool)
if v.flag&flagIndir != 0 {
return *(*bool)(v.val)
}
return *(*bool)(unsafe.Pointer(&v.val))
}
// Bytes returns v's underlying value.
// It panics if v's underlying value is not a slice of bytes.
func (v Value) Bytes() []byte {
v.mustBe(Slice)
if v.typ.Elem().Kind() != Uint8 {
panic("reflect.Value.Bytes of non-byte slice")
}
// Slice is always bigger than a word; assume flagIndir.
return *(*[]byte)(v.val)
}
// runes returns v's underlying value.
// It panics if v's underlying value is not a slice of runes (int32s).
func (v Value) runes() []rune {
v.mustBe(Slice)
if v.typ.Elem().Kind() != Int32 {
panic("reflect.Value.Bytes of non-rune slice")
}
// Slice is always bigger than a word; assume flagIndir.
return *(*[]rune)(v.val)
}
// CanAddr returns true if the value's address can be obtained with Addr.
// Such values are called addressable. A value is addressable if it is
// an element of a slice, an element of an addressable array,
// a field of an addressable struct, or the result of dereferencing a pointer.
// If CanAddr returns false, calling Addr will panic.
func (v Value) CanAddr() bool {
return v.flag&flagAddr != 0
}
// CanSet returns true if the value of v can be changed.
// A Value can be changed only if it is addressable and was not
// obtained by the use of unexported struct fields.
// If CanSet returns false, calling Set or any type-specific
// setter (e.g., SetBool, SetInt64) will panic.
func (v Value) CanSet() bool {
return v.flag&(flagAddr|flagRO) == flagAddr
}
// Call calls the function v with the input arguments in.
// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]).
// Call panics if v's Kind is not Func.
// It returns the output results as Values.
// As in Go, each input argument must be assignable to the
// type of the function's corresponding input parameter.
// If v is a variadic function, Call creates the variadic slice parameter
// itself, copying in the corresponding values.
func (v Value) Call(in []Value) []Value {
v.mustBe(Func)
v.mustBeExported()
return v.call("Call", in)
}
// CallSlice calls the variadic function v with the input arguments in,
// assigning the slice in[len(in)-1] to v's final variadic argument.
// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]...).
// Call panics if v's Kind is not Func or if v is not variadic.
// It returns the output results as Values.
// As in Go, each input argument must be assignable to the
// type of the function's corresponding input parameter.
func (v Value) CallSlice(in []Value) []Value {
v.mustBe(Func)
v.mustBeExported()
return v.call("CallSlice", in)
}
func (v Value) call(op string, in []Value) []Value {
// Get function pointer, type.
t := v.typ
var (
fn unsafe.Pointer
rcvr iword
)
if v.flag&flagMethod != 0 {
t, fn, rcvr = methodReceiver(op, v, int(v.flag)>>flagMethodShift)
} else if v.flag&flagIndir != 0 {
fn = *(*unsafe.Pointer)(v.val)
} else {
fn = v.val
}
if fn == nil {
panic("reflect.Value.Call: call of nil function")
}
isSlice := op == "CallSlice"
n := t.NumIn()
if isSlice {
if !t.IsVariadic() {
panic("reflect: CallSlice of non-variadic function")
}
if len(in) < n {
panic("reflect: CallSlice with too few input arguments")
}
if len(in) > n {
panic("reflect: CallSlice with too many input arguments")
}
} else {
if t.IsVariadic() {
n--
}
if len(in) < n {
panic("reflect: Call with too few input arguments")
}
if !t.IsVariadic() && len(in) > n {
panic("reflect: Call with too many input arguments")
}
}
for _, x := range in {
if x.Kind() == Invalid {
panic("reflect: " + op + " using zero Value argument")
}
}
for i := 0; i < n; i++ {
if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(targ) {
panic("reflect: " + op + " using " + xt.String() + " as type " + targ.String())
}
}
if !isSlice && t.IsVariadic() {
// prepare slice for remaining values
m := len(in) - n
slice := MakeSlice(t.In(n), m, m)
elem := t.In(n).Elem()
for i := 0; i < m; i++ {
x := in[n+i]
if xt := x.Type(); !xt.AssignableTo(elem) {
panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op)
}
slice.Index(i).Set(x)
}
origIn := in
in = make([]Value, n+1)
copy(in[:n], origIn)
in[n] = slice
}
nin := len(in)
if nin != t.NumIn() {
panic("reflect.Value.Call: wrong argument count")
}
nout := t.NumOut()
// Compute arg size & allocate.
// This computation is 5g/6g/8g-dependent
// and probably wrong for gccgo, but so
// is most of this function.
size, _, _, _ := frameSize(t, v.flag&flagMethod != 0)
// Copy into args.
//
// TODO(rsc): This will need to be updated for any new garbage collector.
// For now make everything look like a pointer by allocating
// a []unsafe.Pointer.
args := make([]unsafe.Pointer, size/ptrSize)
ptr := uintptr(unsafe.Pointer(&args[0]))
off := uintptr(0)
if v.flag&flagMethod != 0 {
// Hard-wired first argument.
*(*iword)(unsafe.Pointer(ptr)) = rcvr
off = ptrSize
}
for i, v := range in {
v.mustBeExported()
targ := t.In(i).(*rtype)
a := uintptr(targ.align)
off = (off + a - 1) &^ (a - 1)
n := targ.size
addr := unsafe.Pointer(ptr + off)
v = v.assignTo("reflect.Value.Call", targ, (*interface{})(addr))
if v.flag&flagIndir == 0 {
storeIword(addr, iword(v.val), n)
} else {
memmove(addr, v.val, n)
}
off += n
}
off = (off + ptrSize - 1) &^ (ptrSize - 1)
// Call.
call(fn, unsafe.Pointer(ptr), uint32(size))
// Copy return values out of args.
//
// TODO(rsc): revisit like above.
ret := make([]Value, nout)
for i := 0; i < nout; i++ {
tv := t.Out(i)
a := uintptr(tv.Align())
off = (off + a - 1) &^ (a - 1)
fl := flagIndir | flag(tv.Kind())<<flagKindShift
ret[i] = Value{tv.common(), unsafe.Pointer(ptr + off), fl}
off += tv.Size()
}
return ret
}
// callReflect is the call implementation used by a function
// returned by MakeFunc. In many ways it is the opposite of the
// method Value.call above. The method above converts a call using Values
// into a call of a function with a concrete argument frame, while
// callReflect converts a call of a function with a concrete argument
// frame into a call using Values.
// It is in this file so that it can be next to the call method above.
// The remainder of the MakeFunc implementation is in makefunc.go.
//
// NOTE: This function must be marked as a "wrapper" in the generated code,
// so that the linker can make it work correctly for panic and recover.
// The gc compilers know to do that for the name "reflect.callReflect".
func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
ftyp := ctxt.typ
f := ctxt.fn
// Copy argument frame into Values.
ptr := frame
off := uintptr(0)
in := make([]Value, 0, len(ftyp.in))
for _, arg := range ftyp.in {
typ := arg
off += -off & uintptr(typ.align-1)
v := Value{typ, nil, flag(typ.Kind()) << flagKindShift}
if typ.size <= ptrSize {
// value fits in word.
v.val = unsafe.Pointer(loadIword(unsafe.Pointer(uintptr(ptr)+off), typ.size))
} else {
// value does not fit in word.
// Must make a copy, because f might keep a reference to it,
// and we cannot let f keep a reference to the stack frame
// after this function returns, not even a read-only reference.
v.val = unsafe_New(typ)
memmove(v.val, unsafe.Pointer(uintptr(ptr)+off), typ.size)
v.flag |= flagIndir
}
in = append(in, v)
off += typ.size
}
// Call underlying function.
out := f(in)
if len(out) != len(ftyp.out) {
panic("reflect: wrong return count from function created by MakeFunc")
}
// Copy results back into argument frame.
if len(ftyp.out) > 0 {
off += -off & (ptrSize - 1)
for i, arg := range ftyp.out {
typ := arg
v := out[i]
if v.typ != typ {
panic("reflect: function created by MakeFunc using " + funcName(f) +
" returned wrong type: have " +
out[i].typ.String() + " for " + typ.String())
}
if v.flag&flagRO != 0 {
panic("reflect: function created by MakeFunc using " + funcName(f) +
" returned value obtained from unexported field")
}
off += -off & uintptr(typ.align-1)
addr := unsafe.Pointer(uintptr(ptr) + off)
if v.flag&flagIndir == 0 {
storeIword(addr, iword(v.val), typ.size)
} else {
memmove(addr, v.val, typ.size)
}
off += typ.size
}
}
}
// methodReceiver returns information about the receiver
// described by v. The Value v may or may not have the
// flagMethod bit set, so the kind cached in v.flag should
// not be used.
func methodReceiver(op string, v Value, methodIndex int) (t *rtype, fn unsafe.Pointer, rcvr iword) {
i := methodIndex
if v.typ.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(v.typ))
if i < 0 || i >= len(tt.methods) {
panic("reflect: internal error: invalid method index")
}
m := &tt.methods[i]
if m.pkgPath != nil {
panic("reflect: " + op + " of unexported method")
}
t = m.typ
iface := (*nonEmptyInterface)(v.val)
if iface.itab == nil {
panic("reflect: " + op + " of method on nil interface value")
}
fn = unsafe.Pointer(&iface.itab.fun[i])
rcvr = iface.word
} else {
ut := v.typ.uncommon()
if ut == nil || i < 0 || i >= len(ut.methods) {
panic("reflect: internal error: invalid method index")
}
m := &ut.methods[i]
if m.pkgPath != nil {
panic("reflect: " + op + " of unexported method")
}
fn = unsafe.Pointer(&m.ifn)
t = m.mtyp
rcvr = v.iword()
}
return
}
// align returns the result of rounding x up to a multiple of n.
// n must be a power of two.
func align(x, n uintptr) uintptr {
return (x + n - 1) &^ (n - 1)
}
// frameSize returns the sizes of the argument and result frame
// for a function of the given type. The rcvr bool specifies whether
// a one-word receiver should be included in the total.
func frameSize(t *rtype, rcvr bool) (total, in, outOffset, out uintptr) {
if rcvr {
// extra word for receiver interface word
total += ptrSize
}
nin := t.NumIn()
in = -total
for i := 0; i < nin; i++ {
tv := t.In(i)
total = align(total, uintptr(tv.Align()))
total += tv.Size()
}
in += total
total = align(total, ptrSize)
nout := t.NumOut()
outOffset = total
out = -total
for i := 0; i < nout; i++ {
tv := t.Out(i)
total = align(total, uintptr(tv.Align()))
total += tv.Size()
}
out += total
// total must be > 0 in order for &args[0] to be valid.
// the argument copying is going to round it up to
// a multiple of ptrSize anyway, so make it ptrSize to begin with.
if total < ptrSize {
total = ptrSize
}
// round to pointer
total = align(total, ptrSize)
return
}
// callMethod is the call implementation used by a function returned
// by makeMethodValue (used by v.Method(i).Interface()).
// It is a streamlined version of the usual reflect call: the caller has
// already laid out the argument frame for us, so we don't have
// to deal with individual Values for each argument.
// It is in this file so that it can be next to the two similar functions above.
// The remainder of the makeMethodValue implementation is in makefunc.go.
//
// NOTE: This function must be marked as a "wrapper" in the generated code,
// so that the linker can make it work correctly for panic and recover.
// The gc compilers know to do that for the name "reflect.callMethod".
func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
t, fn, rcvr := methodReceiver("call", ctxt.rcvr, ctxt.method)
total, in, outOffset, out := frameSize(t, true)
// Copy into args.
//
// TODO(rsc): This will need to be updated for any new garbage collector.
// For now make everything look like a pointer by allocating
// a []unsafe.Pointer.
args := make([]unsafe.Pointer, total/ptrSize)
args[0] = unsafe.Pointer(rcvr)
base := unsafe.Pointer(&args[0])
memmove(unsafe.Pointer(uintptr(base)+ptrSize), frame, in)
// Call.
call(fn, unsafe.Pointer(&args[0]), uint32(total))
// Copy return values.
memmove(unsafe.Pointer(uintptr(frame)+outOffset-ptrSize), unsafe.Pointer(uintptr(base)+outOffset), out)
}
// funcName returns the name of f, for use in error messages.
func funcName(f func([]Value) []Value) string {
pc := *(*uintptr)(unsafe.Pointer(&f))
rf := runtime.FuncForPC(pc)
if rf != nil {
return rf.Name()
}
return "closure"
}
// Cap returns v's capacity.
// It panics if v's Kind is not Array, Chan, or Slice.
func (v Value) Cap() int {
k := v.kind()
switch k {
case Array:
return v.typ.Len()
case Chan:
return int(chancap(v.iword()))
case Slice:
// Slice is always bigger than a word; assume flagIndir.
return (*SliceHeader)(v.val).Cap
}
panic(&ValueError{"reflect.Value.Cap", k})
}
// Close closes the channel v.
// It panics if v's Kind is not Chan.
func (v Value) Close() {
v.mustBe(Chan)
v.mustBeExported()
chanclose(v.iword())
}
// Complex returns v's underlying value, as a complex128.
// It panics if v's Kind is not Complex64 or Complex128
func (v Value) Complex() complex128 {
k := v.kind()
switch k {
case Complex64:
if v.flag&flagIndir != 0 {
return complex128(*(*complex64)(v.val))
}
return complex128(*(*complex64)(unsafe.Pointer(&v.val)))
case Complex128:
// complex128 is always bigger than a word; assume flagIndir.
return *(*complex128)(v.val)
}
panic(&ValueError{"reflect.Value.Complex", k})
}
// Elem returns the value that the interface v contains
// or that the pointer v points to.
// It panics if v's Kind is not Interface or Ptr.
// It returns the zero Value if v is nil.
func (v Value) Elem() Value {
k := v.kind()
switch k {
case Interface:
var (
typ *rtype
val unsafe.Pointer
)
if v.typ.NumMethod() == 0 {
eface := (*emptyInterface)(v.val)
if eface.typ == nil {
// nil interface value
return Value{}
}
typ = eface.typ
val = unsafe.Pointer(eface.word)
} else {
iface := (*nonEmptyInterface)(v.val)
if iface.itab == nil {
// nil interface value
return Value{}
}
typ = iface.itab.typ
val = unsafe.Pointer(iface.word)
}
fl := v.flag & flagRO
fl |= flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
}
return Value{typ, val, fl}
case Ptr:
val := v.val
if v.flag&flagIndir != 0 {
val = *(*unsafe.Pointer)(val)
}
// The returned value's address is v's value.
if val == nil {
return Value{}
}
tt := (*ptrType)(unsafe.Pointer(v.typ))
typ := tt.elem
fl := v.flag&flagRO | flagIndir | flagAddr
fl |= flag(typ.Kind() << flagKindShift)
return Value{typ, val, fl}
}
panic(&ValueError{"reflect.Value.Elem", k})
}
// Field returns the i'th field of the struct v.
// It panics if v's Kind is not Struct or i is out of range.
func (v Value) Field(i int) Value {
v.mustBe(Struct)
tt := (*structType)(unsafe.Pointer(v.typ))
if i < 0 || i >= len(tt.fields) {
panic("reflect: Field index out of range")
}
field := &tt.fields[i]
typ := field.typ
// Inherit permission bits from v.
fl := v.flag & (flagRO | flagIndir | flagAddr)
// Using an unexported field forces flagRO.
if field.pkgPath != nil {
fl |= flagRO
}
fl |= flag(typ.Kind()) << flagKindShift
var val unsafe.Pointer
switch {
case fl&flagIndir != 0:
// Indirect. Just bump pointer.
val = unsafe.Pointer(uintptr(v.val) + field.offset)
case bigEndian:
// Direct. Discard leading bytes.
val = unsafe.Pointer(uintptr(v.val) << (field.offset * 8))
default:
// Direct. Discard leading bytes.
val = unsafe.Pointer(uintptr(v.val) >> (field.offset * 8))
}
return Value{typ, val, fl}
}
// FieldByIndex returns the nested field corresponding to index.
// It panics if v's Kind is not struct.
func (v Value) FieldByIndex(index []int) Value {
v.mustBe(Struct)
for i, x := range index {
if i > 0 {
if v.Kind() == Ptr && v.Elem().Kind() == Struct {
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}
// FieldByName returns the struct field with the given name.
// It returns the zero Value if no field was found.
// It panics if v's Kind is not struct.
func (v Value) FieldByName(name string) Value {
v.mustBe(Struct)
if f, ok := v.typ.FieldByName(name); ok {
return v.FieldByIndex(f.Index)
}
return Value{}
}
// FieldByNameFunc returns the struct field with a name
// that satisfies the match function.
// It panics if v's Kind is not struct.
// It returns the zero Value if no field was found.
func (v Value) FieldByNameFunc(match func(string) bool) Value {
v.mustBe(Struct)
if f, ok := v.typ.FieldByNameFunc(match); ok {
return v.FieldByIndex(f.Index)
}
return Value{}
}
// Float returns v's underlying value, as a float64.
// It panics if v's Kind is not Float32 or Float64
func (v Value) Float() float64 {
k := v.kind()
switch k {
case Float32:
if v.flag&flagIndir != 0 {
return float64(*(*float32)(v.val))
}
return float64(*(*float32)(unsafe.Pointer(&v.val)))
case Float64:
if v.flag&flagIndir != 0 {
return *(*float64)(v.val)
}
return *(*float64)(unsafe.Pointer(&v.val))
}
panic(&ValueError{"reflect.Value.Float", k})
}
var uint8Type = TypeOf(uint8(0)).(*rtype)
// Index returns v's i'th element.
// It panics if v's Kind is not Array, Slice, or String or i is out of range.
func (v Value) Index(i int) Value {
k := v.kind()
switch k {
case Array:
tt := (*arrayType)(unsafe.Pointer(v.typ))
if i < 0 || i > int(tt.len) {
panic("reflect: array index out of range")
}
typ := tt.elem
fl := v.flag & (flagRO | flagIndir | flagAddr) // bits same as overall array
fl |= flag(typ.Kind()) << flagKindShift
offset := uintptr(i) * typ.size
var val unsafe.Pointer
switch {
case fl&flagIndir != 0:
// Indirect. Just bump pointer.
val = unsafe.Pointer(uintptr(v.val) + offset)
case bigEndian:
// Direct. Discard leading bytes.
val = unsafe.Pointer(uintptr(v.val) << (offset * 8))
default:
// Direct. Discard leading bytes.
val = unsafe.Pointer(uintptr(v.val) >> (offset * 8))
}
return Value{typ, val, fl}
case Slice:
// Element flag same as Elem of Ptr.
// Addressable, indirect, possibly read-only.
fl := flagAddr | flagIndir | v.flag&flagRO
s := (*SliceHeader)(v.val)
if i < 0 || i >= s.Len {
panic("reflect: slice index out of range")
}
tt := (*sliceType)(unsafe.Pointer(v.typ))
typ := tt.elem
fl |= flag(typ.Kind()) << flagKindShift
val := unsafe.Pointer(s.Data + uintptr(i)*typ.size)
return Value{typ, val, fl}
case String:
fl := v.flag&flagRO | flag(Uint8<<flagKindShift)
s := (*StringHeader)(v.val)
if i < 0 || i >= s.Len {
panic("reflect: string index out of range")
}
val := *(*byte)(unsafe.Pointer(s.Data + uintptr(i)))
return Value{uint8Type, unsafe.Pointer(uintptr(val)), fl}
}
panic(&ValueError{"reflect.Value.Index", k})
}
// Int returns v's underlying value, as an int64.
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
func (v Value) Int() int64 {
k := v.kind()
var p unsafe.Pointer
if v.flag&flagIndir != 0 {
p = v.val
} else {
// The escape analysis is good enough that &v.val
// does not trigger a heap allocation.
p = unsafe.Pointer(&v.val)
}
switch k {
case Int:
return int64(*(*int)(p))
case Int8:
return int64(*(*int8)(p))
case Int16:
return int64(*(*int16)(p))
case Int32:
return int64(*(*int32)(p))
case Int64:
return int64(*(*int64)(p))
}
panic(&ValueError{"reflect.Value.Int", k})
}
// CanInterface returns true if Interface can be used without panicking.
func (v Value) CanInterface() bool {
if v.flag == 0 {
panic(&ValueError{"reflect.Value.CanInterface", Invalid})
}
return v.flag&flagRO == 0
}
// Interface returns v's current value as an interface{}.
// It is equivalent to:
// var i interface{} = (v's underlying value)
// It panics if the Value was obtained by accessing
// unexported struct fields.
func (v Value) Interface() (i interface{}) {
return valueInterface(v, true)
}
func valueInterface(v Value, safe bool) interface{} {
if v.flag == 0 {
panic(&ValueError{"reflect.Value.Interface", 0})
}
if safe && v.flag&flagRO != 0 {
// Do not allow access to unexported values via Interface,
// because they might be pointers that should not be
// writable or methods or function that should not be callable.
panic("reflect.Value.Interface: cannot return value obtained from unexported field or method")
}
if v.flag&flagMethod != 0 {
v = makeMethodValue("Interface", v)
}
k := v.kind()
if k == Interface {
// Special case: return the element inside the interface.
// Empty interface has one layout, all interfaces with
// methods have a second layout.
if v.NumMethod() == 0 {
return *(*interface{})(v.val)
}
return *(*interface {
M()
})(v.val)
}
// Non-interface value.
var eface emptyInterface
eface.typ = v.typ
eface.word = v.iword()
// Don't need to allocate if v is not addressable or fits in one word.
if v.flag&flagAddr != 0 && v.typ.size > ptrSize {
// eface.word is a pointer to the actual data,
// which might be changed. We need to return
// a pointer to unchanging data, so make a copy.
ptr := unsafe_New(v.typ)
memmove(ptr, unsafe.Pointer(eface.word), v.typ.size)
eface.word = iword(ptr)
}
return *(*interface{})(unsafe.Pointer(&eface))
}
// InterfaceData returns the interface v's value as a uintptr pair.
// It panics if v's Kind is not Interface.
func (v Value) InterfaceData() [2]uintptr {
v.mustBe(Interface)
// We treat this as a read operation, so we allow
// it even for unexported data, because the caller
// has to import "unsafe" to turn it into something
// that can be abused.
// Interface value is always bigger than a word; assume flagIndir.
return *(*[2]uintptr)(v.val)
}
// IsNil returns true if v is a nil value.
// It panics if v's Kind is not Chan, Func, Interface, Map, Ptr, or Slice.
func (v Value) IsNil() bool {
k := v.kind()
switch k {
case Chan, Func, Map, Ptr:
if v.flag&flagMethod != 0 {
return false
}
ptr := v.val
if v.flag&flagIndir != 0 {
ptr = *(*unsafe.Pointer)(ptr)
}
return ptr == nil
case Interface, Slice:
// Both interface and slice are nil if first word is 0.
// Both are always bigger than a word; assume flagIndir.
return *(*unsafe.Pointer)(v.val) == nil
}
panic(&ValueError{"reflect.Value.IsNil", k})
}
// IsValid returns true if v represents a value.
// It returns false if v is the zero Value.
// If IsValid returns false, all other methods except String panic.
// Most functions and methods never return an invalid value.
// If one does, its documentation states the conditions explicitly.
func (v Value) IsValid() bool {
return v.flag != 0
}
// Kind returns v's Kind.
// If v is the zero Value (IsValid returns false), Kind returns Invalid.
func (v Value) Kind() Kind {
return v.kind()
}
// Len returns v's length.
// It panics if v's Kind is not Array, Chan, Map, Slice, or String.
func (v Value) Len() int {
k := v.kind()
switch k {
case Array:
tt := (*arrayType)(unsafe.Pointer(v.typ))
return int(tt.len)
case Chan:
return chanlen(v.iword())
case Map:
return maplen(v.iword())
case Slice:
// Slice is bigger than a word; assume flagIndir.
return (*SliceHeader)(v.val).Len
case String:
// String is bigger than a word; assume flagIndir.
return (*StringHeader)(v.val).Len
}
panic(&ValueError{"reflect.Value.Len", k})
}
// MapIndex returns the value associated with key in the map v.
// It panics if v's Kind is not Map.
// It returns the zero Value if key is not found in the map or if v represents a nil map.
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ))
// Do not require key to be exported, so that DeepEqual
// and other programs can use all the keys returned by
// MapKeys as arguments to MapIndex. If either the map
// or the key is unexported, though, the result will be
// considered unexported. This is consistent with the
// behavior for structs, which allow read but not write
// of unexported fields.
key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
word, ok := mapaccess(v.typ, v.iword(), key.iword())
if !ok {
return Value{}
}
typ := tt.elem
fl := (v.flag | key.flag) & flagRO
if typ.size > ptrSize {
fl |= flagIndir
}
fl |= flag(typ.Kind()) << flagKindShift
return Value{typ, unsafe.Pointer(word), fl}
}
// MapKeys returns a slice containing all the keys present in the map,
// in unspecified order.
// It panics if v's Kind is not Map.
// It returns an empty slice if v represents a nil map.
func (v Value) MapKeys() []Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ))
keyType := tt.key
fl := v.flag & flagRO
fl |= flag(keyType.Kind()) << flagKindShift
if keyType.size > ptrSize {
fl |= flagIndir
}
m := v.iword()
mlen := int(0)
if m != nil {
mlen = maplen(m)
}
it := mapiterinit(v.typ, m)
a := make([]Value, mlen)
var i int
for i = 0; i < len(a); i++ {
keyWord, ok := mapiterkey(it)
if !ok {
break
}
a[i] = Value{keyType, unsafe.Pointer(keyWord), fl}
mapiternext(it)
}
return a[:i]
}
// Method returns a function value corresponding to v's i'th method.
// The arguments to a Call on the returned function should not include
// a receiver; the returned function will always use v as the receiver.
// Method panics if i is out of range or if v is a nil interface value.
func (v Value) Method(i int) Value {
if v.typ == nil {
panic(&ValueError{"reflect.Value.Method", Invalid})
}
if v.flag&flagMethod != 0 || i < 0 || i >= v.typ.NumMethod() {
panic("reflect: Method index out of range")
}
if v.typ.Kind() == Interface && v.IsNil() {
panic("reflect: Method on nil interface value")
}
fl := v.flag & (flagRO | flagIndir)
fl |= flag(Func) << flagKindShift
fl |= flag(i)<<flagMethodShift | flagMethod
return Value{v.typ, v.val, fl}
}
// NumMethod returns the number of methods in the value's method set.
func (v Value) NumMethod() int {
if v.typ == nil {
panic(&ValueError{"reflect.Value.NumMethod", Invalid})
}
if v.flag&flagMethod != 0 {
return 0
}
return v.typ.NumMethod()
}
// MethodByName returns a function value corresponding to the method
// of v with the given name.
// The arguments to a Call on the returned function should not include
// a receiver; the returned function will always use v as the receiver.
// It returns the zero Value if no method was found.
func (v Value) MethodByName(name string) Value {
if v.typ == nil {
panic(&ValueError{"reflect.Value.MethodByName", Invalid})
}
if v.flag&flagMethod != 0 {
return Value{}
}
m, ok := v.typ.MethodByName(name)
if !ok {
return Value{}
}
return v.Method(m.Index)
}
// NumField returns the number of fields in the struct v.
// It panics if v's Kind is not Struct.
func (v Value) NumField() int {
v.mustBe(Struct)
tt := (*structType)(unsafe.Pointer(v.typ))
return len(tt.fields)
}
// OverflowComplex returns true if the complex128 x cannot be represented by v's type.
// It panics if v's Kind is not Complex64 or Complex128.
func (v Value) OverflowComplex(x complex128) bool {
k := v.kind()
switch k {
case Complex64:
return overflowFloat32(real(x)) || overflowFloat32(imag(x))
case Complex128:
return false
}
panic(&ValueError{"reflect.Value.OverflowComplex", k})
}
// OverflowFloat returns true if the float64 x cannot be represented by v's type.
// It panics if v's Kind is not Float32 or Float64.
func (v Value) OverflowFloat(x float64) bool {
k := v.kind()
switch k {
case Float32:
return overflowFloat32(x)
case Float64:
return false
}
panic(&ValueError{"reflect.Value.OverflowFloat", k})
}
func overflowFloat32(x float64) bool {
if x < 0 {
x = -x
}
return math.MaxFloat32 < x && x <= math.MaxFloat64
}
// OverflowInt returns true if the int64 x cannot be represented by v's type.
// It panics if v's Kind is not Int, Int8, int16, Int32, or Int64.
func (v Value) OverflowInt(x int64) bool {
k := v.kind()
switch k {
case Int, Int8, Int16, Int32, Int64:
bitSize := v.typ.size * 8
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
panic(&ValueError{"reflect.Value.OverflowInt", k})
}
// OverflowUint returns true if the uint64 x cannot be represented by v's type.
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
func (v Value) OverflowUint(x uint64) bool {
k := v.kind()
switch k {
case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:
bitSize := v.typ.size * 8
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
panic(&ValueError{"reflect.Value.OverflowUint", k})
}
// Pointer returns v's value as a uintptr.
// It returns uintptr instead of unsafe.Pointer so that
// code using reflect cannot obtain unsafe.Pointers
// without importing the unsafe package explicitly.
// It panics if v's Kind is not Chan, Func, Map, Ptr, Slice, or UnsafePointer.
//
// If v's Kind is Func, the returned pointer is an underlying
// code pointer, but not necessarily enough to identify a
// single function uniquely. The only guarantee is that the
// result is zero if and only if v is a nil func Value.
func (v Value) Pointer() uintptr {
k := v.kind()
switch k {
case Chan, Map, Ptr, UnsafePointer:
p := v.val
if v.flag&flagIndir != 0 {
p = *(*unsafe.Pointer)(p)
}
return uintptr(p)
case Func:
if v.flag&flagMethod != 0 {
// As the doc comment says, the returned pointer is an
// underlying code pointer but not necessarily enough to
// identify a single function uniquely. All method expressions
// created via reflect have the same underlying code pointer,
// so their Pointers are equal. The function used here must
// match the one used in makeMethodValue.
f := methodValueCall
return **(**uintptr)(unsafe.Pointer(&f))
}
p := v.val
if v.flag&flagIndir != 0 {
p = *(*unsafe.Pointer)(p)
}
// Non-nil func value points at data block.
// First word of data block is actual code.
if p != nil {
p = *(*unsafe.Pointer)(p)
}
return uintptr(p)
case Slice:
return (*SliceHeader)(v.val).Data
}
panic(&ValueError{"reflect.Value.Pointer", k})
}
// Recv receives and returns a value from the channel v.
// It panics if v's Kind is not Chan.
// The receive blocks until a value is ready.
// The boolean value ok is true if the value x corresponds to a send
// on the channel, false if it is a zero value received because the channel is closed.
func (v Value) Recv() (x Value, ok bool) {
v.mustBe(Chan)
v.mustBeExported()
return v.recv(false)
}
// internal recv, possibly non-blocking (nb).
// v is known to be a channel.
func (v Value) recv(nb bool) (val Value, ok bool) {
tt := (*chanType)(unsafe.Pointer(v.typ))
if ChanDir(tt.dir)&RecvDir == 0 {
panic("reflect: recv on send-only channel")
}
word, selected, ok := chanrecv(v.typ, v.iword(), nb)
if selected {
typ := tt.elem
fl := flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
}
val = Value{typ, unsafe.Pointer(word), fl}
}
return
}
// Send sends x on the channel v.
// It panics if v's kind is not Chan or if x's type is not the same type as v's element type.
// As in Go, x's value must be assignable to the channel's element type.
func (v Value) Send(x Value) {
v.mustBe(Chan)
v.mustBeExported()
v.send(x, false)
}
// internal send, possibly non-blocking.
// v is known to be a channel.
func (v Value) send(x Value, nb bool) (selected bool) {
tt := (*chanType)(unsafe.Pointer(v.typ))
if ChanDir(tt.dir)&SendDir == 0 {
panic("reflect: send on recv-only channel")
}
x.mustBeExported()
x = x.assignTo("reflect.Value.Send", tt.elem, nil)
return chansend(v.typ, v.iword(), x.iword(), nb)
}
// Set assigns x to the value v.
// It panics if CanSet returns false.
// As in Go, x's value must be assignable to v's type.
func (v Value) Set(x Value) {
v.mustBeAssignable()
x.mustBeExported() // do not let unexported x leak
var target *interface{}
if v.kind() == Interface {
target = (*interface{})(v.val)
}
x = x.assignTo("reflect.Set", v.typ, target)
if x.flag&flagIndir != 0 {
memmove(v.val, x.val, v.typ.size)
} else {
storeIword(v.val, iword(x.val), v.typ.size)
}
}
// SetBool sets v's underlying value.
// It panics if v's Kind is not Bool or if CanSet() is false.
func (v Value) SetBool(x bool) {
v.mustBeAssignable()
v.mustBe(Bool)
*(*bool)(v.val) = x
}
// SetBytes sets v's underlying value.
// It panics if v's underlying value is not a slice of bytes.
func (v Value) SetBytes(x []byte) {
v.mustBeAssignable()
v.mustBe(Slice)
if v.typ.Elem().Kind() != Uint8 {
panic("reflect.Value.SetBytes of non-byte slice")
}
*(*[]byte)(v.val) = x
}
// setRunes sets v's underlying value.
// It panics if v's underlying value is not a slice of runes (int32s).
func (v Value) setRunes(x []rune) {
v.mustBeAssignable()
v.mustBe(Slice)
if v.typ.Elem().Kind() != Int32 {
panic("reflect.Value.setRunes of non-rune slice")
}
*(*[]rune)(v.val) = x
}
// SetComplex sets v's underlying value to x.
// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
func (v Value) SetComplex(x complex128) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.SetComplex", k})
case Complex64:
*(*complex64)(v.val) = complex64(x)
case Complex128:
*(*complex128)(v.val) = x
}
}
// SetFloat sets v's underlying value to x.
// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
func (v Value) SetFloat(x float64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.SetFloat", k})
case Float32:
*(*float32)(v.val) = float32(x)
case Float64:
*(*float64)(v.val) = x
}
}
// SetInt sets v's underlying value to x.
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
func (v Value) SetInt(x int64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.SetInt", k})
case Int:
*(*int)(v.val) = int(x)
case Int8:
*(*int8)(v.val) = int8(x)
case Int16:
*(*int16)(v.val) = int16(x)
case Int32:
*(*int32)(v.val) = int32(x)
case Int64:
*(*int64)(v.val) = x
}
}
// SetLen sets v's length to n.
// It panics if v's Kind is not Slice or if n is negative or
// greater than the capacity of the slice.
func (v Value) SetLen(n int) {
v.mustBeAssignable()
v.mustBe(Slice)
s := (*SliceHeader)(v.val)
if n < 0 || n > int(s.Cap) {
panic("reflect: slice length out of range in SetLen")
}
s.Len = n
}
// SetCap sets v's capacity to n.
// It panics if v's Kind is not Slice or if n is smaller than the length or
// greater than the capacity of the slice.
func (v Value) SetCap(n int) {
v.mustBeAssignable()
v.mustBe(Slice)
s := (*SliceHeader)(v.val)
if n < int(s.Len) || n > int(s.Cap) {
panic("reflect: slice capacity out of range in SetCap")
}
s.Cap = n
}
// SetMapIndex sets the value associated with key in the map v to val.
// It panics if v's Kind is not Map.
// If val is the zero Value, SetMapIndex deletes the key from the map.
// As in Go, key's value must be assignable to the map's key type,
// and val's value must be assignable to the map's value type.
func (v Value) SetMapIndex(key, val Value) {
v.mustBe(Map)
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ))
key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
if val.typ != nil {
val.mustBeExported()
val = val.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
}
mapassign(v.typ, v.iword(), key.iword(), val.iword(), val.typ != nil)
}
// SetUint sets v's underlying value to x.
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64, or if CanSet() is false.
func (v Value) SetUint(x uint64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
panic(&ValueError{"reflect.Value.SetUint", k})
case Uint:
*(*uint)(v.val) = uint(x)
case Uint8:
*(*uint8)(v.val) = uint8(x)
case Uint16:
*(*uint16)(v.val) = uint16(x)
case Uint32:
*(*uint32)(v.val) = uint32(x)
case Uint64:
*(*uint64)(v.val) = x
case Uintptr:
*(*uintptr)(v.val) = uintptr(x)
}
}
// SetPointer sets the unsafe.Pointer value v to x.
// It panics if v's Kind is not UnsafePointer.
func (v Value) SetPointer(x unsafe.Pointer) {
v.mustBeAssignable()
v.mustBe(UnsafePointer)
*(*unsafe.Pointer)(v.val) = x
}
// SetString sets v's underlying value to x.
// It panics if v's Kind is not String or if CanSet() is false.
func (v Value) SetString(x string) {
v.mustBeAssignable()
v.mustBe(String)
*(*string)(v.val) = x
}
// Slice returns v[i:j].
// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array,
// or if the indexes are out of bounds.
func (v Value) Slice(i, j int) Value {
var (
cap int
typ *sliceType
base unsafe.Pointer
)
switch kind := v.kind(); kind {
default:
panic(&ValueError{"reflect.Value.Slice", kind})
case Array:
if v.flag&flagAddr == 0 {
panic("reflect.Value.Slice: slice of unaddressable array")
}
tt := (*arrayType)(unsafe.Pointer(v.typ))
cap = int(tt.len)
typ = (*sliceType)(unsafe.Pointer(tt.slice))
base = v.val
case Slice:
typ = (*sliceType)(unsafe.Pointer(v.typ))
s := (*SliceHeader)(v.val)
base = unsafe.Pointer(s.Data)
cap = s.Cap
case String:
s := (*StringHeader)(v.val)
if i < 0 || j < i || j > s.Len {
panic("reflect.Value.Slice: string slice index out of bounds")
}
var x string
val := (*StringHeader)(unsafe.Pointer(&x))
val.Data = s.Data + uintptr(i)
val.Len = j - i
return Value{v.typ, unsafe.Pointer(&x), v.flag}
}
if i < 0 || j < i || j > cap {
panic("reflect.Value.Slice: slice index out of bounds")
}
// Declare slice so that gc can see the base pointer in it.
var x []unsafe.Pointer
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
s.Data = uintptr(base) + uintptr(i)*typ.elem.Size()
s.Len = j - i
s.Cap = cap - i
fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift
return Value{typ.common(), unsafe.Pointer(&x), fl}
}
// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
// It panics if v's Kind is not Array or Slice, or if v is an unaddressable array,
// or if the indexes are out of bounds.
func (v Value) Slice3(i, j, k int) Value {
var (
cap int
typ *sliceType
base unsafe.Pointer
)
switch kind := v.kind(); kind {
default:
panic(&ValueError{"reflect.Value.Slice3", kind})
case Array:
if v.flag&flagAddr == 0 {
panic("reflect.Value.Slice: slice of unaddressable array")
}
tt := (*arrayType)(unsafe.Pointer(v.typ))
cap = int(tt.len)
typ = (*sliceType)(unsafe.Pointer(tt.slice))
base = v.val
case Slice:
typ = (*sliceType)(unsafe.Pointer(v.typ))
s := (*SliceHeader)(v.val)
base = unsafe.Pointer(s.Data)
cap = s.Cap
}
if i < 0 || j < i || k < j || k > cap {
panic("reflect.Value.Slice3: slice index out of bounds")
}
// Declare slice so that the garbage collector
// can see the base pointer in it.
var x []unsafe.Pointer
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
s.Data = uintptr(base) + uintptr(i)*typ.elem.Size()
s.Len = j - i
s.Cap = k - i
fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift
return Value{typ.common(), unsafe.Pointer(&x), fl}
}
// String returns the string v's underlying value, as a string.
// String is a special case because of Go's String method convention.
// Unlike the other getters, it does not panic if v's Kind is not String.
// Instead, it returns a string of the form "<T value>" where T is v's type.
func (v Value) String() string {
switch k := v.kind(); k {
case Invalid:
return "<invalid Value>"
case String:
return *(*string)(v.val)
}
// If you call String on a reflect.Value of other type, it's better to
// print something than to panic. Useful in debugging.
return "<" + v.typ.String() + " Value>"
}
// TryRecv attempts to receive a value from the channel v but will not block.
// It panics if v's Kind is not Chan.
// If the receive cannot finish without blocking, x is the zero Value.
// The boolean ok is true if the value x corresponds to a send
// on the channel, false if it is a zero value received because the channel is closed.
func (v Value) TryRecv() (x Value, ok bool) {
v.mustBe(Chan)
v.mustBeExported()
return v.recv(true)
}
// TrySend attempts to send x on the channel v but will not block.
// It panics if v's Kind is not Chan.
// It returns true if the value was sent, false otherwise.
// As in Go, x's value must be assignable to the channel's element type.
func (v Value) TrySend(x Value) bool {
v.mustBe(Chan)
v.mustBeExported()
return v.send(x, true)
}
// Type returns v's type.
func (v Value) Type() Type {
f := v.flag
if f == 0 {
panic(&ValueError{"reflect.Value.Type", Invalid})
}
if f&flagMethod == 0 {
// Easy case
return v.typ
}
// Method value.
// v.typ describes the receiver, not the method type.
i := int(v.flag) >> flagMethodShift
if v.typ.Kind() == Interface {
// Method on interface.
tt := (*interfaceType)(unsafe.Pointer(v.typ))
if i < 0 || i >= len(tt.methods) {
panic("reflect: internal error: invalid method index")
}
m := &tt.methods[i]
return m.typ
}
// Method on concrete type.
ut := v.typ.uncommon()
if ut == nil || i < 0 || i >= len(ut.methods) {
panic("reflect: internal error: invalid method index")
}
m := &ut.methods[i]
return m.mtyp
}
// Uint returns v's underlying value, as a uint64.
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
func (v Value) Uint() uint64 {
k := v.kind()
var p unsafe.Pointer
if v.flag&flagIndir != 0 {
p = v.val
} else {
// The escape analysis is good enough that &v.val
// does not trigger a heap allocation.
p = unsafe.Pointer(&v.val)
}
switch k {
case Uint:
return uint64(*(*uint)(p))
case Uint8:
return uint64(*(*uint8)(p))
case Uint16:
return uint64(*(*uint16)(p))
case Uint32:
return uint64(*(*uint32)(p))
case Uint64:
return uint64(*(*uint64)(p))
case Uintptr:
return uint64(*(*uintptr)(p))
}
panic(&ValueError{"reflect.Value.Uint", k})
}
// UnsafeAddr returns a pointer to v's data.
// It is for advanced clients that also import the "unsafe" package.
// It panics if v is not addressable.
func (v Value) UnsafeAddr() uintptr {
if v.typ == nil {
panic(&ValueError{"reflect.Value.UnsafeAddr", Invalid})
}
if v.flag&flagAddr == 0 {
panic("reflect.Value.UnsafeAddr of unaddressable value")
}
return uintptr(v.val)
}
// StringHeader is the runtime representation of a string.
// It cannot be used safely or portably and its representation may
// change in a later release.
// Moreover, the Data field is not sufficient to guarantee the data
// it references will not be garbage collected, so programs must keep
// a separate, correctly typed pointer to the underlying data.
type StringHeader struct {
Data uintptr
Len int
}
// SliceHeader is the runtime representation of a slice.
// It cannot be used safely or portably and its representation may
// change in a later release.
// Moreover, the Data field is not sufficient to guarantee the data
// it references will not be garbage collected, so programs must keep
// a separate, correctly typed pointer to the underlying data.
type SliceHeader struct {
Data uintptr
Len int
Cap int
}
func typesMustMatch(what string, t1, t2 Type) {
if t1 != t2 {
panic(what + ": " + t1.String() + " != " + t2.String())
}
}
// grow grows the slice s so that it can hold extra more values, allocating
// more capacity if needed. It also returns the old and new slice lengths.
func grow(s Value, extra int) (Value, int, int) {
i0 := s.Len()
i1 := i0 + extra
if i1 < i0 {
panic("reflect.Append: slice overflow")
}
m := s.Cap()
if i1 <= m {
return s.Slice(0, i1), i0, i1
}
if m == 0 {
m = extra
} else {
for m < i1 {
if i0 < 1024 {
m += m
} else {
m += m / 4
}
}
}
t := MakeSlice(s.Type(), i1, m)
Copy(t, s)
return t, i0, i1
}
// Append appends the values x to a slice s and returns the resulting slice.
// As in Go, each x's value must be assignable to the slice's element type.
func Append(s Value, x ...Value) Value {
s.mustBe(Slice)
s, i0, i1 := grow(s, len(x))
for i, j := i0, 0; i < i1; i, j = i+1, j+1 {
s.Index(i).Set(x[j])
}
return s
}
// AppendSlice appends a slice t to a slice s and returns the resulting slice.
// The slices s and t must have the same element type.
func AppendSlice(s, t Value) Value {
s.mustBe(Slice)
t.mustBe(Slice)
typesMustMatch("reflect.AppendSlice", s.Type().Elem(), t.Type().Elem())
s, i0, i1 := grow(s, t.Len())
Copy(s.Slice(i0, i1), t)
return s
}
// Copy copies the contents of src into dst until either
// dst has been filled or src has been exhausted.
// It returns the number of elements copied.
// Dst and src each must have kind Slice or Array, and
// dst and src must have the same element type.
func Copy(dst, src Value) int {
dk := dst.kind()
if dk != Array && dk != Slice {
panic(&ValueError{"reflect.Copy", dk})
}
if dk == Array {
dst.mustBeAssignable()
}
dst.mustBeExported()
sk := src.kind()
if sk != Array && sk != Slice {
panic(&ValueError{"reflect.Copy", sk})
}
src.mustBeExported()
de := dst.typ.Elem()
se := src.typ.Elem()
typesMustMatch("reflect.Copy", de, se)
n := dst.Len()
if sn := src.Len(); n > sn {
n = sn
}
// If sk is an in-line array, cannot take its address.
// Instead, copy element by element.
if src.flag&flagIndir == 0 {
for i := 0; i < n; i++ {
dst.Index(i).Set(src.Index(i))
}
return n
}
// Copy via memmove.
var da, sa unsafe.Pointer
if dk == Array {
da = dst.val
} else {
da = unsafe.Pointer((*SliceHeader)(dst.val).Data)
}
if sk == Array {
sa = src.val
} else {
sa = unsafe.Pointer((*SliceHeader)(src.val).Data)
}
memmove(da, sa, uintptr(n)*de.Size())
return n
}
// A runtimeSelect is a single case passed to rselect.
// This must match ../runtime/chan.c:/runtimeSelect
type runtimeSelect struct {
dir uintptr // 0, SendDir, or RecvDir
typ *rtype // channel type
ch iword // interface word for channel
val iword // interface word for value (for SendDir)
}
// rselect runs a select. It returns the index of the chosen case,
// and if the case was a receive, the interface word of the received
// value and the conventional OK bool to indicate whether the receive
// corresponds to a sent value.
func rselect([]runtimeSelect) (chosen int, recv iword, recvOK bool)
// A SelectDir describes the communication direction of a select case.
type SelectDir int
// NOTE: These values must match ../runtime/chan.c:/SelectDir.
const (
_ SelectDir = iota
SelectSend // case Chan <- Send
SelectRecv // case <-Chan:
SelectDefault // default
)
// A SelectCase describes a single case in a select operation.
// The kind of case depends on Dir, the communication direction.
//
// If Dir is SelectDefault, the case represents a default case.
// Chan and Send must be zero Values.
//
// If Dir is SelectSend, the case represents a send operation.
// Normally Chan's underlying value must be a channel, and Send's underlying value must be
// assignable to the channel's element type. As a special case, if Chan is a zero Value,
// then the case is ignored, and the field Send will also be ignored and may be either zero
// or non-zero.
//
// If Dir is SelectRecv, the case represents a receive operation.
// Normally Chan's underlying value must be a channel and Send must be a zero Value.
// If Chan is a zero Value, then the case is ignored, but Send must still be a zero Value.
// When a receive operation is selected, the received Value is returned by Select.
//
type SelectCase struct {
Dir SelectDir // direction of case
Chan Value // channel to use (for send or receive)
Send Value // value to send (for send)
}
// Select executes a select operation described by the list of cases.
// Like the Go select statement, it blocks until at least one of the cases
// can proceed, makes a uniform pseudo-random choice,
// and then executes that case. It returns the index of the chosen case
// and, if that case was a receive operation, the value received and a
// boolean indicating whether the value corresponds to a send on the channel
// (as opposed to a zero value received because the channel is closed).
func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
// NOTE: Do not trust that caller is not modifying cases data underfoot.
// The range is safe because the caller cannot modify our copy of the len
// and each iteration makes its own copy of the value c.
runcases := make([]runtimeSelect, len(cases))
haveDefault := false
for i, c := range cases {
rc := &runcases[i]
rc.dir = uintptr(c.Dir)
switch c.Dir {
default:
panic("reflect.Select: invalid Dir")
case SelectDefault: // default
if haveDefault {
panic("reflect.Select: multiple default cases")
}
haveDefault = true
if c.Chan.IsValid() {
panic("reflect.Select: default case has Chan value")
}
if c.Send.IsValid() {
panic("reflect.Select: default case has Send value")
}
case SelectSend:
ch := c.Chan
if !ch.IsValid() {
break
}
ch.mustBe(Chan)
ch.mustBeExported()
tt := (*chanType)(unsafe.Pointer(ch.typ))
if ChanDir(tt.dir)&SendDir == 0 {
panic("reflect.Select: SendDir case using recv-only channel")
}
rc.ch = ch.iword()
rc.typ = &tt.rtype
v := c.Send
if !v.IsValid() {
panic("reflect.Select: SendDir case missing Send value")
}
v.mustBeExported()
v = v.assignTo("reflect.Select", tt.elem, nil)
rc.val = v.iword()
case SelectRecv:
if c.Send.IsValid() {
panic("reflect.Select: RecvDir case has Send value")
}
ch := c.Chan
if !ch.IsValid() {
break
}
ch.mustBe(Chan)
ch.mustBeExported()
tt := (*chanType)(unsafe.Pointer(ch.typ))
rc.typ = &tt.rtype
if ChanDir(tt.dir)&RecvDir == 0 {
panic("reflect.Select: RecvDir case using send-only channel")
}
rc.ch = ch.iword()
}
}
chosen, word, recvOK := rselect(runcases)
if runcases[chosen].dir == uintptr(SelectRecv) {
tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
typ := tt.elem
fl := flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
}
recv = Value{typ, unsafe.Pointer(word), fl}
}
return chosen, recv, recvOK
}
/*
* constructors
*/
// implemented in package runtime
func unsafe_New(*rtype) unsafe.Pointer
func unsafe_NewArray(*rtype, int) unsafe.Pointer
// MakeSlice creates a new zero-initialized slice value
// for the specified slice type, length, and capacity.
func MakeSlice(typ Type, len, cap int) Value {
if typ.Kind() != Slice {
panic("reflect.MakeSlice of non-slice type")
}
if len < 0 {
panic("reflect.MakeSlice: negative len")
}
if cap < 0 {
panic("reflect.MakeSlice: negative cap")
}
if len > cap {
panic("reflect.MakeSlice: len > cap")
}
// Declare slice so that gc can see the base pointer in it.
var x []unsafe.Pointer
// Reinterpret as *SliceHeader to edit.
s := (*SliceHeader)(unsafe.Pointer(&x))
s.Data = uintptr(unsafe_NewArray(typ.Elem().(*rtype), cap))
s.Len = len
s.Cap = cap
return Value{typ.common(), unsafe.Pointer(&x), flagIndir | flag(Slice)<<flagKindShift}
}
// MakeChan creates a new channel with the specified type and buffer size.
func MakeChan(typ Type, buffer int) Value {
if typ.Kind() != Chan {
panic("reflect.MakeChan of non-chan type")
}
if buffer < 0 {
panic("reflect.MakeChan: negative buffer size")
}
if typ.ChanDir() != BothDir {
panic("reflect.MakeChan: unidirectional channel type")
}
ch := makechan(typ.(*rtype), uint64(buffer))
return Value{typ.common(), unsafe.Pointer(ch), flag(Chan) << flagKindShift}
}
// MakeMap creates a new map of the specified type.
func MakeMap(typ Type) Value {
if typ.Kind() != Map {
panic("reflect.MakeMap of non-map type")
}
m := makemap(typ.(*rtype))
return Value{typ.common(), unsafe.Pointer(m), flag(Map) << flagKindShift}
}
// Indirect returns the value that v points to.
// If v is a nil pointer, Indirect returns a zero Value.
// If v is not a pointer, Indirect returns v.
func Indirect(v Value) Value {
if v.Kind() != Ptr {
return v
}
return v.Elem()
}
// ValueOf returns a new Value initialized to the concrete value
// stored in the interface i. ValueOf(nil) returns the zero Value.
func ValueOf(i interface{}) Value {
if i == nil {
return Value{}
}
// TODO(rsc): Eliminate this terrible hack.
// In the call to packValue, eface.typ doesn't escape,
// and eface.word is an integer. So it looks like
// i (= eface) doesn't escape. But really it does,
// because eface.word is actually a pointer.
escapes(i)
// For an interface value with the noAddr bit set,
// the representation is identical to an empty interface.
eface := *(*emptyInterface)(unsafe.Pointer(&i))
typ := eface.typ
fl := flag(typ.Kind()) << flagKindShift
if typ.size > ptrSize {
fl |= flagIndir
}
return Value{typ, unsafe.Pointer(eface.word), fl}
}
// Zero returns a Value representing the zero value for the specified type.
// The result is different from the zero value of the Value struct,
// which represents no value at all.
// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
// The returned value is neither addressable nor settable.
func Zero(typ Type) Value {
if typ == nil {
panic("reflect: Zero(nil)")
}
t := typ.common()
fl := flag(t.Kind()) << flagKindShift
if t.size <= ptrSize {
return Value{t, nil, fl}
}
return Value{t, unsafe_New(typ.(*rtype)), fl | flagIndir}
}
// New returns a Value representing a pointer to a new zero value
// for the specified type. That is, the returned Value's Type is PtrTo(t).
func New(typ Type) Value {
if typ == nil {
panic("reflect: New(nil)")
}
ptr := unsafe_New(typ.(*rtype))
fl := flag(Ptr) << flagKindShift
return Value{typ.common().ptrTo(), ptr, fl}
}
// NewAt returns a Value representing a pointer to a value of the
// specified type, using p as that pointer.
func NewAt(typ Type, p unsafe.Pointer) Value {
fl := flag(Ptr) << flagKindShift
return Value{typ.common().ptrTo(), p, fl}
}
// assignTo returns a value v that can be assigned directly to typ.
// It panics if v is not assignable to typ.
// For a conversion to an interface type, target is a suggested scratch space to use.
func (v Value) assignTo(context string, dst *rtype, target *interface{}) Value {
if v.flag&flagMethod != 0 {
v = makeMethodValue(context, v)
}
switch {
case directlyAssignable(dst, v.typ):
// Overwrite type so that they match.
// Same memory layout, so no harm done.
v.typ = dst
fl := v.flag & (flagRO | flagAddr | flagIndir)
fl |= flag(dst.Kind()) << flagKindShift
return Value{dst, v.val, fl}
case implements(dst, v.typ):
if target == nil {
target = new(interface{})
}
x := valueInterface(v, false)
if dst.NumMethod() == 0 {
*target = x
} else {
ifaceE2I(dst, x, unsafe.Pointer(target))
}
return Value{dst, unsafe.Pointer(target), flagIndir | flag(Interface)<<flagKindShift}
}
// Failed.
panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
}
// Convert returns the value v converted to type t.
// If the usual Go conversion rules do not allow conversion
// of the value v to type t, Convert panics.
func (v Value) Convert(t Type) Value {
if v.flag&flagMethod != 0 {
v = makeMethodValue("Convert", v)
}
op := convertOp(t.common(), v.typ)
if op == nil {
panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String())
}
return op(v, t)
}
// convertOp returns the function to convert a value of type src
// to a value of type dst. If the conversion is illegal, convertOp returns nil.
func convertOp(dst, src *rtype) func(Value, Type) Value {
switch src.Kind() {
case Int, Int8, Int16, Int32, Int64:
switch dst.Kind() {
case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return cvtInt
case Float32, Float64:
return cvtIntFloat
case String:
return cvtIntString
}
case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
switch dst.Kind() {
case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return cvtUint
case Float32, Float64:
return cvtUintFloat
case String:
return cvtUintString
}
case Float32, Float64:
switch dst.Kind() {
case Int, Int8, Int16, Int32, Int64:
return cvtFloatInt
case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return cvtFloatUint
case Float32, Float64:
return cvtFloat
}
case Complex64, Complex128:
switch dst.Kind() {
case Complex64, Complex128:
return cvtComplex
}
case String:
if dst.Kind() == Slice && dst.Elem().PkgPath() == "" {
switch dst.Elem().Kind() {
case Uint8:
return cvtStringBytes
case Int32:
return cvtStringRunes
}
}
case Slice:
if dst.Kind() == String && src.Elem().PkgPath() == "" {
switch src.Elem().Kind() {
case Uint8:
return cvtBytesString
case Int32:
return cvtRunesString
}
}
}
// dst and src have same underlying type.
if haveIdenticalUnderlyingType(dst, src) {
return cvtDirect
}
// dst and src are unnamed pointer types with same underlying base type.
if dst.Kind() == Ptr && dst.Name() == "" &&
src.Kind() == Ptr && src.Name() == "" &&
haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common()) {
return cvtDirect
}
if implements(dst, src) {
if src.Kind() == Interface {
return cvtI2I
}
return cvtT2I
}
return nil
}
// makeInt returns a Value of type t equal to bits (possibly truncated),
// where t is a signed or unsigned int type.
func makeInt(f flag, bits uint64, t Type) Value {
typ := t.common()
if typ.size > ptrSize {
// Assume ptrSize >= 4, so this must be uint64.
ptr := unsafe_New(typ)
*(*uint64)(unsafe.Pointer(ptr)) = bits
return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
var w iword
switch typ.size {
case 1:
*(*uint8)(unsafe.Pointer(&w)) = uint8(bits)
case 2:
*(*uint16)(unsafe.Pointer(&w)) = uint16(bits)
case 4:
*(*uint32)(unsafe.Pointer(&w)) = uint32(bits)
case 8:
*(*uint64)(unsafe.Pointer(&w)) = uint64(bits)
}
return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
}
// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
// where t is a float32 or float64 type.
func makeFloat(f flag, v float64, t Type) Value {
typ := t.common()
if typ.size > ptrSize {
// Assume ptrSize >= 4, so this must be float64.
ptr := unsafe_New(typ)
*(*float64)(unsafe.Pointer(ptr)) = v
return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
var w iword
switch typ.size {
case 4:
*(*float32)(unsafe.Pointer(&w)) = float32(v)
case 8:
*(*float64)(unsafe.Pointer(&w)) = v
}
return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
}
// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
// where t is a complex64 or complex128 type.
func makeComplex(f flag, v complex128, t Type) Value {
typ := t.common()
if typ.size > ptrSize {
ptr := unsafe_New(typ)
switch typ.size {
case 8:
*(*complex64)(unsafe.Pointer(ptr)) = complex64(v)
case 16:
*(*complex128)(unsafe.Pointer(ptr)) = v
}
return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
}
// Assume ptrSize <= 8 so this must be complex64.
var w iword
*(*complex64)(unsafe.Pointer(&w)) = complex64(v)
return Value{typ, unsafe.Pointer(w), f | flag(typ.Kind())<<flagKindShift}
}
func makeString(f flag, v string, t Type) Value {
ret := New(t).Elem()
ret.SetString(v)
ret.flag = ret.flag&^flagAddr | f
return ret
}
func makeBytes(f flag, v []byte, t Type) Value {
ret := New(t).Elem()
ret.SetBytes(v)
ret.flag = ret.flag&^flagAddr | f
return ret
}
func makeRunes(f flag, v []rune, t Type) Value {
ret := New(t).Elem()
ret.setRunes(v)
ret.flag = ret.flag&^flagAddr | f
return ret
}
// These conversion functions are returned by convertOp
// for classes of conversions. For example, the first function, cvtInt,
// takes any value v of signed int type and returns the value converted
// to type t, where t is any signed or unsigned int type.
// convertOp: intXX -> [u]intXX
func cvtInt(v Value, t Type) Value {
return makeInt(v.flag&flagRO, uint64(v.Int()), t)
}
// convertOp: uintXX -> [u]intXX
func cvtUint(v Value, t Type) Value {
return makeInt(v.flag&flagRO, v.Uint(), t)
}
// convertOp: floatXX -> intXX
func cvtFloatInt(v Value, t Type) Value {
return makeInt(v.flag&flagRO, uint64(int64(v.Float())), t)
}
// convertOp: floatXX -> uintXX
func cvtFloatUint(v Value, t Type) Value {
return makeInt(v.flag&flagRO, uint64(v.Float()), t)
}
// convertOp: intXX -> floatXX
func cvtIntFloat(v Value, t Type) Value {
return makeFloat(v.flag&flagRO, float64(v.Int()), t)
}
// convertOp: uintXX -> floatXX
func cvtUintFloat(v Value, t Type) Value {
return makeFloat(v.flag&flagRO, float64(v.Uint()), t)
}
// convertOp: floatXX -> floatXX
func cvtFloat(v Value, t Type) Value {
return makeFloat(v.flag&flagRO, v.Float(), t)
}
// convertOp: complexXX -> complexXX
func cvtComplex(v Value, t Type) Value {
return makeComplex(v.flag&flagRO, v.Complex(), t)
}
// convertOp: intXX -> string
func cvtIntString(v Value, t Type) Value {
return makeString(v.flag&flagRO, string(v.Int()), t)
}
// convertOp: uintXX -> string
func cvtUintString(v Value, t Type) Value {
return makeString(v.flag&flagRO, string(v.Uint()), t)
}
// convertOp: []byte -> string
func cvtBytesString(v Value, t Type) Value {
return makeString(v.flag&flagRO, string(v.Bytes()), t)
}
// convertOp: string -> []byte
func cvtStringBytes(v Value, t Type) Value {
return makeBytes(v.flag&flagRO, []byte(v.String()), t)
}
// convertOp: []rune -> string
func cvtRunesString(v Value, t Type) Value {
return makeString(v.flag&flagRO, string(v.runes()), t)
}
// convertOp: string -> []rune
func cvtStringRunes(v Value, t Type) Value {
return makeRunes(v.flag&flagRO, []rune(v.String()), t)
}
// convertOp: direct copy
func cvtDirect(v Value, typ Type) Value {
f := v.flag
t := typ.common()
val := v.val
if f&flagAddr != 0 {
// indirect, mutable word - make a copy
ptr := unsafe_New(t)
memmove(ptr, val, t.size)
val = ptr
f &^= flagAddr
}
return Value{t, val, v.flag&flagRO | f}
}
// convertOp: concrete -> interface
func cvtT2I(v Value, typ Type) Value {
target := new(interface{})
x := valueInterface(v, false)
if typ.NumMethod() == 0 {
*target = x
} else {
ifaceE2I(typ.(*rtype), x, unsafe.Pointer(target))
}
return Value{typ.common(), unsafe.Pointer(target), v.flag&flagRO | flagIndir | flag(Interface)<<flagKindShift}
}
// convertOp: interface -> interface
func cvtI2I(v Value, typ Type) Value {
if v.IsNil() {
ret := Zero(typ)
ret.flag |= v.flag & flagRO
return ret
}
return cvtT2I(v.Elem(), typ)
}
// implemented in ../pkg/runtime
func chancap(ch iword) int
func chanclose(ch iword)
func chanlen(ch iword) int
func chanrecv(t *rtype, ch iword, nb bool) (val iword, selected, received bool)
func chansend(t *rtype, ch iword, val iword, nb bool) bool
func makechan(typ *rtype, size uint64) (ch iword)
func makemap(t *rtype) (m iword)
func mapaccess(t *rtype, m iword, key iword) (val iword, ok bool)
func mapassign(t *rtype, m iword, key, val iword, ok bool)
func mapiterinit(t *rtype, m iword) *byte
func mapiterkey(it *byte) (key iword, ok bool)
func mapiternext(it *byte)
func maplen(m iword) int
func call(fn, arg unsafe.Pointer, n uint32)
func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that
// the compiler cannot follow.
func escapes(x interface{}) {
if dummy.b {
dummy.x = x
}
}
var dummy struct {
b bool
x interface{}
}
|
package brightbox
import (
"time"
)
// ServerGroup represents a server group
// https://api.gb1.brightbox.com/1.0/#server_group
type ServerGroup struct {
Id string
Name string
CreatedAt *time.Time `json:"created_at"`
Description string
Default bool
Account Account `json:"account"`
Servers []Server
FirewallPolicy FirewallPolicy
}
// ServerGroupOptions is used in combination with CreateServerGroup and
// UpdateServerGroup to create and update server groups
type ServerGroupOptions struct {
Id string `json:"-"`
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
}
type serverGroupMemberOptions struct {
Servers []serverGroupMember `json:"servers"`
Destination string `json:"destination,omitempty"`
}
type serverGroupMember struct {
Server string `json:"server,omitempty"`
}
// ServerGroups retrieves a list of all server groups
func (c *Client) ServerGroups() ([]ServerGroup, error) {
var groups []ServerGroup
_, err := c.MakeApiRequest("GET", "/1.0/server_groups", nil, &groups)
if err != nil {
return nil, err
}
return groups, err
}
// ServerGroup retrieves a detailed view on one server group
func (c *Client) ServerGroup(identifier string) (*ServerGroup, error) {
group := new(ServerGroup)
_, err := c.MakeApiRequest("GET", "/1.0/server_groups/"+identifier, nil, group)
if err != nil {
return nil, err
}
return group, err
}
// CreateServerGroup creates a new server group
//
// It takes an instance of ServerGroupOptions. Not all attributes can be
// specified at create time (such as Id, which is allocated for you).
func (c *Client) CreateServerGroup(newServerGroup *ServerGroupOptions) (*ServerGroup, error) {
group := new(ServerGroup)
_, err := c.MakeApiRequest("POST", "/1.0/server_groups", newServerGroup, &group)
if err != nil {
return nil, err
}
return group, nil
}
// UpdateServerGroup updates an existing server groups's attributes. Not all
// attributes can be changed (such as Id).
//
// Specify the server group you want to update using the ServerGroupOptions Id
// field.
//
// To change group memberships, use AddServersToServerGroup,
// RemoveServersFromServerGroup and MoveServersToServerGroup.
func (c *Client) UpdateServerGroup(updateServerGroup *ServerGroupOptions) (*ServerGroup, error) {
group := new(ServerGroup)
_, err := c.MakeApiRequest("PUT", "/1.0/server_groups/"+updateServerGroup.Id, updateServerGroup, &group)
if err != nil {
return nil, err
}
return group, nil
}
// DestroyServerGroup destroys an existing server group
func (c *Client) DestroyServerGroup(identifier string) error {
_, err := c.MakeApiRequest("DELETE", "/1.0/server_groups/"+identifier, nil, nil)
if err != nil {
return err
}
return nil
}
// AddServersToServerGroup adds servers to an existing server group.
//
// The identifier parameter specifies the destination group.
//
// The serverIds paramater specifies the identifiers of the servers you want to add.
func (c *Client) AddServersToServerGroup(identifier string, serverIds []string) (*ServerGroup, error) {
group := new(ServerGroup)
opts := new(serverGroupMemberOptions)
for _, id := range serverIds {
opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
}
_, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+identifier+"/add_servers", opts, &group)
if err != nil {
return nil, err
}
return group, nil
}
// RemoveServersToServerGroup removes servers from an existing server group.
//
// The identifier parameter specifies the group.
//
// The serverIds paramater specifies the identifiers of the servers you want to remove.
func (c *Client) RemoveServersFromServerGroup(identifier string, serverIds []string) (*ServerGroup, error) {
group := new(ServerGroup)
opts := new(serverGroupMemberOptions)
for _, id := range serverIds {
opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
}
_, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+identifier+"/remove_servers", opts, &group)
if err != nil {
return nil, err
}
return group, nil
}
// MoveServersToServerGroup atomically moves servers from one group to another.
//
// The src parameter specifies the group to which the servers currently belong
//
// The dst parameter specifies the group to which you want to move the servers.
//
// The serverIds parameter specifies the identifiers of the servers you want to move.
func (c *Client) MoveServersToServerGroup(src string, dst string, serverIds []string) (*ServerGroup, error) {
group := new(ServerGroup)
opts := serverGroupMemberOptions{Destination: dst}
for _, id := range serverIds {
opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
}
_, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+src+"/move_servers", opts, &group)
if err != nil {
return nil, err
}
return group, nil
}
Make firewall policy link a pointer in server groups
So we can deal with server groups before a firewall policy is allocated
package brightbox
import (
"time"
)
// ServerGroup represents a server group
// https://api.gb1.brightbox.com/1.0/#server_group
type ServerGroup struct {
Id string
Name string
CreatedAt *time.Time `json:"created_at"`
Description string
Default bool
Account Account `json:"account"`
Servers []Server
FirewallPolicy *FirewallPolicy `json:"firewall_policy"`
}
// ServerGroupOptions is used in combination with CreateServerGroup and
// UpdateServerGroup to create and update server groups
type ServerGroupOptions struct {
Id string `json:"-"`
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
}
type serverGroupMemberOptions struct {
Servers []serverGroupMember `json:"servers"`
Destination string `json:"destination,omitempty"`
}
type serverGroupMember struct {
Server string `json:"server,omitempty"`
}
// ServerGroups retrieves a list of all server groups
func (c *Client) ServerGroups() ([]ServerGroup, error) {
var groups []ServerGroup
_, err := c.MakeApiRequest("GET", "/1.0/server_groups", nil, &groups)
if err != nil {
return nil, err
}
return groups, err
}
// ServerGroup retrieves a detailed view on one server group
func (c *Client) ServerGroup(identifier string) (*ServerGroup, error) {
group := new(ServerGroup)
_, err := c.MakeApiRequest("GET", "/1.0/server_groups/"+identifier, nil, group)
if err != nil {
return nil, err
}
return group, err
}
// CreateServerGroup creates a new server group
//
// It takes an instance of ServerGroupOptions. Not all attributes can be
// specified at create time (such as Id, which is allocated for you).
func (c *Client) CreateServerGroup(newServerGroup *ServerGroupOptions) (*ServerGroup, error) {
group := new(ServerGroup)
_, err := c.MakeApiRequest("POST", "/1.0/server_groups", newServerGroup, &group)
if err != nil {
return nil, err
}
return group, nil
}
// UpdateServerGroup updates an existing server groups's attributes. Not all
// attributes can be changed (such as Id).
//
// Specify the server group you want to update using the ServerGroupOptions Id
// field.
//
// To change group memberships, use AddServersToServerGroup,
// RemoveServersFromServerGroup and MoveServersToServerGroup.
func (c *Client) UpdateServerGroup(updateServerGroup *ServerGroupOptions) (*ServerGroup, error) {
group := new(ServerGroup)
_, err := c.MakeApiRequest("PUT", "/1.0/server_groups/"+updateServerGroup.Id, updateServerGroup, &group)
if err != nil {
return nil, err
}
return group, nil
}
// DestroyServerGroup destroys an existing server group
func (c *Client) DestroyServerGroup(identifier string) error {
_, err := c.MakeApiRequest("DELETE", "/1.0/server_groups/"+identifier, nil, nil)
if err != nil {
return err
}
return nil
}
// AddServersToServerGroup adds servers to an existing server group.
//
// The identifier parameter specifies the destination group.
//
// The serverIds paramater specifies the identifiers of the servers you want to add.
func (c *Client) AddServersToServerGroup(identifier string, serverIds []string) (*ServerGroup, error) {
group := new(ServerGroup)
opts := new(serverGroupMemberOptions)
for _, id := range serverIds {
opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
}
_, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+identifier+"/add_servers", opts, &group)
if err != nil {
return nil, err
}
return group, nil
}
// RemoveServersToServerGroup removes servers from an existing server group.
//
// The identifier parameter specifies the group.
//
// The serverIds paramater specifies the identifiers of the servers you want to remove.
func (c *Client) RemoveServersFromServerGroup(identifier string, serverIds []string) (*ServerGroup, error) {
group := new(ServerGroup)
opts := new(serverGroupMemberOptions)
for _, id := range serverIds {
opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
}
_, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+identifier+"/remove_servers", opts, &group)
if err != nil {
return nil, err
}
return group, nil
}
// MoveServersToServerGroup atomically moves servers from one group to another.
//
// The src parameter specifies the group to which the servers currently belong
//
// The dst parameter specifies the group to which you want to move the servers.
//
// The serverIds parameter specifies the identifiers of the servers you want to move.
func (c *Client) MoveServersToServerGroup(src string, dst string, serverIds []string) (*ServerGroup, error) {
group := new(ServerGroup)
opts := serverGroupMemberOptions{Destination: dst}
for _, id := range serverIds {
opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
}
_, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+src+"/move_servers", opts, &group)
if err != nil {
return nil, err
}
return group, nil
}
|
// Copyright 2014, The Serviced Authors. All rights reserved.
// Use of this source code is governed by a
// license that can be found in the LICENSE file.
// Package agent implements a service that runs on a serviced node. It is
// responsible for ensuring that a particular node is running the correct services
// and reporting the state and health of those services back to the master
// serviced.
package isvcs
import (
"github.com/zenoss/glog"
"github.com/zenoss/go-dockerclient"
"errors"
"os"
"os/exec"
"path"
)
// managerOp is a type of manager operation (stop, start, notify)
type managerOp int
// constants for the manager operations
const (
managerOpStart managerOp = iota // Start the subservices
managerOpStop // stop the subservices
managerOpNotify // notify config in subservices
managerOpExit // exit the loop of the manager
managerOpRegisterContainer // register a given container
managerOpInit // make sure manager is ready to run containers
managerOpWipe // wipe all data associated with volumes
)
var ErrManagerUnknownOp error
var ErrManagerNotRunning error
var ErrManagerRunning error
var ErrImageNotExists error
func init() {
ErrManagerUnknownOp = errors.New("manager: unknown operation")
ErrManagerNotRunning = errors.New("manager: not running")
ErrManagerRunning = errors.New("manager: already running")
ErrImageNotExists = errors.New("manager: image does not exist")
}
// A managerRequest describes an operation for the manager loop() to perform and a response channel
type managerRequest struct {
op managerOp // the operation to perform
val interface{}
response chan error // the response channel
}
// A manager of docker services run in ephemeral containers
type Manager struct {
dockerAddress string // the docker endpoint address to talk to
imagesDir string // local directory where images could be loaded from
volumesDir string // local directory where volumes are stored
requests chan managerRequest // the main loops request channel
containers map[string]*Container
}
// Returns a new Manager struct and starts the Manager's main loop()
func NewManager(dockerAddress, imagesDir, volumesDir string) *Manager {
manager := &Manager{
dockerAddress: dockerAddress,
imagesDir: imagesDir,
volumesDir: volumesDir,
requests: make(chan managerRequest),
containers: make(map[string]*Container),
}
go manager.loop()
return manager
}
// newDockerClient is a function pointer to the client contructor so that it can be mocked in tests
var newDockerClient func(address string) (*docker.Client, error)
func init() {
newDockerClient = docker.NewClient
}
// checks to see if the given repo:tag exists in docker
func (m *Manager) imageExists(repo, tag string) (bool, error) {
if client, err := newDockerClient(m.dockerAddress); err != nil {
return false, err
} else {
repoTag := repo + ":" + tag
if images, err := client.ListImages(false); err != nil {
return false, err
} else {
for _, image := range images {
for _, tagi := range image.RepoTags {
if string(tagi) == repoTag {
return true, nil
}
}
}
}
}
return false, nil
}
// SetVolumesDir sets the volumes dir for *Manager
func (m *Manager) SetVolumesDir(dir string) {
m.volumesDir = dir
}
// checks for the existence of all the container images
func (m *Manager) allImagesExist() error {
for _, c := range m.containers {
if exists, err := m.imageExists(c.Repo, c.Tag); err != nil {
return err
} else {
if !exists {
return ErrImageNotExists
}
}
}
return nil
}
// loadImage() loads a docker image from a tar export
func loadImage(tarball, dockerAddress, repoTag string) error {
if file, err := os.Open(tarball); err != nil {
return err
} else {
defer file.Close()
cmd := exec.Command("docker", "-H", dockerAddress, "import", "-", repoTag)
cmd.Stdin = file
glog.Infof("Loading docker image")
return cmd.Run()
}
return nil
}
// wipe() removes the data directory associate with the manager
func (m *Manager) wipe() error {
// remove volumeDir by running a container as root
// FIXME: detect if already root and avoid running docker
cmd := exec.Command("docker", "-H", m.dockerAddress,
"run", "-rm", "-v", m.volumesDir+":/mnt/volumes", "ubuntu", "/bin/sh", "-c", "rm -Rf /mnt/volumes/*")
return cmd.Run()
}
// loadImages() loads all the images defined in the registered services
func (m *Manager) loadImages() error {
loadedImages := make(map[string]bool)
for _, c := range m.containers {
if exists, err := m.imageExists(c.Repo, c.Tag); err != nil {
return err
} else {
if exists {
continue
}
localTar := path.Join(m.imagesDir, c.Repo, c.Tag+".tar.gz")
glog.Infof("Looking for %s", localTar)
if _, exists := loadedImages[localTar]; exists {
continue
}
if _, err := os.Stat(localTar); err == nil {
if err := loadImage(localTar, m.dockerAddress, c.Repo+":"+c.Tag); err != nil {
return err
}
loadedImages[localTar] = true
} else {
}
}
}
return nil
}
type containerStartResponse struct {
name string
err error
}
// loop() maitainers the Manager's state
func (m *Manager) loop() {
var running map[string]*Container
for {
select {
case request := <-m.requests:
switch request.op {
case managerOpWipe:
if running != nil {
request.response <- ErrManagerRunning
continue
}
responses := make(chan error, len(running))
for _, c := range running {
go func(con *Container) {
responses <- con.Stop()
}(c)
}
runningCount := len(running)
for i := 0; i < runningCount; i++ {
<-responses
}
running = nil
request.response <- m.wipe()
case managerOpNotify:
var retErr error
for _, c := range running {
if c.Notify != nil {
if err := c.Notify(c, request.val); err != nil {
retErr = err
}
}
}
request.response <- retErr
continue
case managerOpExit:
request.response <- nil
return // this will exit the loop()
case managerOpStart:
if running != nil {
request.response <- ErrManagerRunning
continue
}
if err := m.loadImages(); err != nil {
request.response <- err
continue
}
if err := m.allImagesExist(); err != nil {
request.response <- err
} else {
// start a map of running containers
running = make(map[string]*Container)
// start a channel to track responses
started := make(chan containerStartResponse, len(m.containers))
// start containers in parallel
for _, c := range m.containers {
running[c.Name] = c
go func(con *Container, respc chan containerStartResponse) {
glog.Infof("calling start on %s", con.Name)
con.SetVolumesDir(m.volumesDir)
resp := containerStartResponse{
name: con.Name,
err: con.Start(),
}
respc <- resp
}(c, started)
}
// wait for containers to respond to start
var returnErr error
for _, _ = range m.containers {
res := <-started
if res.err != nil {
returnErr = res.err
glog.Errorf("%s failed with %s", res.name, res.err)
delete(running, res.name)
} else {
glog.Infof("%s started", res.name)
}
}
request.response <- returnErr
}
case managerOpStop:
if running == nil {
request.response <- ErrManagerNotRunning
continue
}
responses := make(chan error, len(running))
for _, c := range running {
go func(con *Container) {
responses <- con.Stop()
}(c)
}
runningCount := len(running)
for i := 0; i < runningCount; i++ {
<-responses
}
running = nil
request.response <- nil
case managerOpRegisterContainer:
if running != nil {
request.response <- ErrManagerRunning
continue
}
if container, ok := request.val.(*Container); !ok {
panic(errors.New("manager unknown arg type"))
} else {
m.containers[container.Name] = container
request.response <- nil
}
continue
case managerOpInit:
request.response <- nil
default:
request.response <- ErrManagerUnknownOp
}
}
}
}
// makeRequest sends a manager operation request to the *Manager's loop()
func (m *Manager) makeRequest(op managerOp) error {
request := managerRequest{
op: op,
response: make(chan error),
}
m.requests <- request
return <-request.response
}
// Register() registers a container to be managed by the *Manager
func (m *Manager) Register(c *Container) error {
request := managerRequest{
op: managerOpRegisterContainer,
val: c,
response: make(chan error),
}
m.requests <- request
return <-request.response
}
// Wipe() removes the data directory associated with the Manager
func (m *Manager) Wipe() error {
glog.V(2).Infof("manager sending wipe request")
defer glog.V(2).Infof("received wipe response")
return m.makeRequest(managerOpWipe)
}
// Stop() stops all the containers currently registered to the *Manager
func (m *Manager) Stop() error {
glog.V(2).Infof("manager sending stop request")
defer glog.V(2).Infof("received stop response")
return m.makeRequest(managerOpStop)
}
// Start() starts all the containers managed by the *Manager
func (m *Manager) Start() error {
glog.V(2).Infof("manager sending start request")
defer glog.V(2).Infof("received start response")
return m.makeRequest(managerOpStart)
}
// Notify() sends a notify() message to all the containers with the given data val
func (m *Manager) Notify(val interface{}) error {
glog.V(2).Infof("manager sending notify request")
defer glog.V(2).Infof("received notify response")
request := managerRequest{
op: managerOpNotify,
val: val,
response: make(chan error),
}
m.requests <- request
return <-request.response
}
// TearDown() causes the *Manager's loop() to exit
func (m *Manager) TearDown() error {
glog.V(2).Infof("manager sending exit request")
defer glog.V(2).Infof("received exit response")
return m.makeRequest(managerOpExit)
}
don't use docker to remove if directory not present
// Copyright 2014, The Serviced Authors. All rights reserved.
// Use of this source code is governed by a
// license that can be found in the LICENSE file.
// Package agent implements a service that runs on a serviced node. It is
// responsible for ensuring that a particular node is running the correct services
// and reporting the state and health of those services back to the master
// serviced.
package isvcs
import (
"github.com/zenoss/glog"
"github.com/zenoss/go-dockerclient"
"errors"
"os"
"os/exec"
"path"
)
// managerOp is a type of manager operation (stop, start, notify)
type managerOp int
// constants for the manager operations
const (
managerOpStart managerOp = iota // Start the subservices
managerOpStop // stop the subservices
managerOpNotify // notify config in subservices
managerOpExit // exit the loop of the manager
managerOpRegisterContainer // register a given container
managerOpInit // make sure manager is ready to run containers
managerOpWipe // wipe all data associated with volumes
)
var ErrManagerUnknownOp error
var ErrManagerNotRunning error
var ErrManagerRunning error
var ErrImageNotExists error
func init() {
ErrManagerUnknownOp = errors.New("manager: unknown operation")
ErrManagerNotRunning = errors.New("manager: not running")
ErrManagerRunning = errors.New("manager: already running")
ErrImageNotExists = errors.New("manager: image does not exist")
}
// A managerRequest describes an operation for the manager loop() to perform and a response channel
type managerRequest struct {
op managerOp // the operation to perform
val interface{}
response chan error // the response channel
}
// A manager of docker services run in ephemeral containers
type Manager struct {
dockerAddress string // the docker endpoint address to talk to
imagesDir string // local directory where images could be loaded from
volumesDir string // local directory where volumes are stored
requests chan managerRequest // the main loops request channel
containers map[string]*Container
}
// Returns a new Manager struct and starts the Manager's main loop()
func NewManager(dockerAddress, imagesDir, volumesDir string) *Manager {
manager := &Manager{
dockerAddress: dockerAddress,
imagesDir: imagesDir,
volumesDir: volumesDir,
requests: make(chan managerRequest),
containers: make(map[string]*Container),
}
go manager.loop()
return manager
}
// newDockerClient is a function pointer to the client contructor so that it can be mocked in tests
var newDockerClient func(address string) (*docker.Client, error)
func init() {
newDockerClient = docker.NewClient
}
// checks to see if the given repo:tag exists in docker
func (m *Manager) imageExists(repo, tag string) (bool, error) {
if client, err := newDockerClient(m.dockerAddress); err != nil {
return false, err
} else {
repoTag := repo + ":" + tag
if images, err := client.ListImages(false); err != nil {
return false, err
} else {
for _, image := range images {
for _, tagi := range image.RepoTags {
if string(tagi) == repoTag {
return true, nil
}
}
}
}
}
return false, nil
}
// SetVolumesDir sets the volumes dir for *Manager
func (m *Manager) SetVolumesDir(dir string) {
m.volumesDir = dir
}
// checks for the existence of all the container images
func (m *Manager) allImagesExist() error {
for _, c := range m.containers {
if exists, err := m.imageExists(c.Repo, c.Tag); err != nil {
return err
} else {
if !exists {
return ErrImageNotExists
}
}
}
return nil
}
// loadImage() loads a docker image from a tar export
func loadImage(tarball, dockerAddress, repoTag string) error {
if file, err := os.Open(tarball); err != nil {
return err
} else {
defer file.Close()
cmd := exec.Command("docker", "-H", dockerAddress, "import", "-", repoTag)
cmd.Stdin = file
glog.Infof("Loading docker image")
return cmd.Run()
}
return nil
}
// wipe() removes the data directory associate with the manager
func (m *Manager) wipe() error {
if err := os.RemoveAll(m.volumesDir); err != nil {
glog.V(2).Infof("could not remove %s: %v", m.volumesDir, err)
}
//nothing to wipe if the volumesDir doesn't exist
if _, err := os.Stat(m.volumesDir); os.IsNotExist(err) {
glog.V(2).Infof("Not using docker to remove directories as %s doesn't exist", m.volumesDir)
return nil
}
glog.Infof("Using docker to remove directories in %s", m.volumesDir)
// remove volumeDir by running a container as root
// FIXME: detect if already root and avoid running docker
cmd := exec.Command("docker", "-H", m.dockerAddress,
"run", "-rm", "-v", m.volumesDir+":/mnt/volumes", "ubuntu", "/bin/sh", "-c", "rm -Rf /mnt/volumes/*")
return cmd.Run()
}
// loadImages() loads all the images defined in the registered services
func (m *Manager) loadImages() error {
loadedImages := make(map[string]bool)
for _, c := range m.containers {
if exists, err := m.imageExists(c.Repo, c.Tag); err != nil {
return err
} else {
if exists {
continue
}
localTar := path.Join(m.imagesDir, c.Repo, c.Tag+".tar.gz")
glog.Infof("Looking for %s", localTar)
if _, exists := loadedImages[localTar]; exists {
continue
}
if _, err := os.Stat(localTar); err == nil {
if err := loadImage(localTar, m.dockerAddress, c.Repo+":"+c.Tag); err != nil {
return err
}
loadedImages[localTar] = true
} else {
}
}
}
return nil
}
type containerStartResponse struct {
name string
err error
}
// loop() maitainers the Manager's state
func (m *Manager) loop() {
var running map[string]*Container
for {
select {
case request := <-m.requests:
switch request.op {
case managerOpWipe:
if running != nil {
request.response <- ErrManagerRunning
continue
}
responses := make(chan error, len(running))
for _, c := range running {
go func(con *Container) {
responses <- con.Stop()
}(c)
}
runningCount := len(running)
for i := 0; i < runningCount; i++ {
<-responses
}
running = nil
request.response <- m.wipe()
case managerOpNotify:
var retErr error
for _, c := range running {
if c.Notify != nil {
if err := c.Notify(c, request.val); err != nil {
retErr = err
}
}
}
request.response <- retErr
continue
case managerOpExit:
request.response <- nil
return // this will exit the loop()
case managerOpStart:
if running != nil {
request.response <- ErrManagerRunning
continue
}
if err := m.loadImages(); err != nil {
request.response <- err
continue
}
if err := m.allImagesExist(); err != nil {
request.response <- err
} else {
// start a map of running containers
running = make(map[string]*Container)
// start a channel to track responses
started := make(chan containerStartResponse, len(m.containers))
// start containers in parallel
for _, c := range m.containers {
running[c.Name] = c
go func(con *Container, respc chan containerStartResponse) {
glog.Infof("calling start on %s", con.Name)
con.SetVolumesDir(m.volumesDir)
resp := containerStartResponse{
name: con.Name,
err: con.Start(),
}
respc <- resp
}(c, started)
}
// wait for containers to respond to start
var returnErr error
for _, _ = range m.containers {
res := <-started
if res.err != nil {
returnErr = res.err
glog.Errorf("%s failed with %s", res.name, res.err)
delete(running, res.name)
} else {
glog.Infof("%s started", res.name)
}
}
request.response <- returnErr
}
case managerOpStop:
if running == nil {
request.response <- ErrManagerNotRunning
continue
}
responses := make(chan error, len(running))
for _, c := range running {
go func(con *Container) {
responses <- con.Stop()
}(c)
}
runningCount := len(running)
for i := 0; i < runningCount; i++ {
<-responses
}
running = nil
request.response <- nil
case managerOpRegisterContainer:
if running != nil {
request.response <- ErrManagerRunning
continue
}
if container, ok := request.val.(*Container); !ok {
panic(errors.New("manager unknown arg type"))
} else {
m.containers[container.Name] = container
request.response <- nil
}
continue
case managerOpInit:
request.response <- nil
default:
request.response <- ErrManagerUnknownOp
}
}
}
}
// makeRequest sends a manager operation request to the *Manager's loop()
func (m *Manager) makeRequest(op managerOp) error {
request := managerRequest{
op: op,
response: make(chan error),
}
m.requests <- request
return <-request.response
}
// Register() registers a container to be managed by the *Manager
func (m *Manager) Register(c *Container) error {
request := managerRequest{
op: managerOpRegisterContainer,
val: c,
response: make(chan error),
}
m.requests <- request
return <-request.response
}
// Wipe() removes the data directory associated with the Manager
func (m *Manager) Wipe() error {
glog.V(2).Infof("manager sending wipe request")
defer glog.V(2).Infof("received wipe response")
return m.makeRequest(managerOpWipe)
}
// Stop() stops all the containers currently registered to the *Manager
func (m *Manager) Stop() error {
glog.V(2).Infof("manager sending stop request")
defer glog.V(2).Infof("received stop response")
return m.makeRequest(managerOpStop)
}
// Start() starts all the containers managed by the *Manager
func (m *Manager) Start() error {
glog.V(2).Infof("manager sending start request")
defer glog.V(2).Infof("received start response")
return m.makeRequest(managerOpStart)
}
// Notify() sends a notify() message to all the containers with the given data val
func (m *Manager) Notify(val interface{}) error {
glog.V(2).Infof("manager sending notify request")
defer glog.V(2).Infof("received notify response")
request := managerRequest{
op: managerOpNotify,
val: val,
response: make(chan error),
}
m.requests <- request
return <-request.response
}
// TearDown() causes the *Manager's loop() to exit
func (m *Manager) TearDown() error {
glog.V(2).Infof("manager sending exit request")
defer glog.V(2).Infof("received exit response")
return m.makeRequest(managerOpExit)
}
|
// Copyright (c) 2014, B3log
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package session
import (
"crypto/md5"
"encoding/hex"
"encoding/json"
"math/rand"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"sync"
"text/template"
"time"
"github.com/b3log/wide/conf"
"github.com/b3log/wide/i18n"
"github.com/b3log/wide/util"
)
const (
// TODO: i18n
userExists = "user exists"
emailExists = "email exists"
userCreated = "user created"
userCreateError = "user create error"
)
// Exclusive lock for adding user.
var addUserMutex sync.Mutex
// PreferenceHandler handles request of preference page.
func PreferenceHandler(w http.ResponseWriter, r *http.Request) {
httpSession, _ := HTTPSession.Get(r, "wide-session")
if httpSession.IsNew {
http.Redirect(w, r, conf.Wide.Context+"login", http.StatusFound)
return
}
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
if "" != conf.Wide.Context {
httpSession.Options.Path = conf.Wide.Context
}
httpSession.Save(r, w)
username := httpSession.Values["username"].(string)
user := conf.GetUser(username)
if "GET" == r.Method {
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(user.Locale), "user": user,
"ver": conf.WideVersion, "goos": runtime.GOOS, "goarch": runtime.GOARCH, "gover": runtime.Version(),
"locales": i18n.GetLocalesNames(), "gofmts": util.Go.GetGoFormats(),
"themes": conf.GetThemes(), "editorThemes": conf.GetEditorThemes()}
t, err := template.ParseFiles("views/preference.html")
if nil != err {
logger.Error(err)
http.Error(w, err.Error(), 500)
return
}
t.Execute(w, model)
return
}
// non-GET request as save request
succ := true
data := map[string]interface{}{"succ": &succ}
defer util.RetJSON(w, r, data)
args := struct {
FontFamily string
FontSize string
GoFmt string
Workspace string
Username string
Password string
Email string
Locale string
Theme string
EditorFontFamily string
EditorFontSize string
EditorLineHeight string
EditorTheme string
EditorTabSize string
}{}
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
logger.Error(err)
succ = false
return
}
user.FontFamily = args.FontFamily
user.FontSize = args.FontSize
user.GoFormat = args.GoFmt
user.Workspace = args.Workspace
user.Password = conf.Salt(args.Password, user.Salt)
user.Email = args.Email
hash := md5.New()
hash.Write([]byte(user.Email))
user.Gravatar = hex.EncodeToString(hash.Sum(nil))
user.Locale = args.Locale
user.Theme = args.Theme
user.Editor.FontFamily = args.EditorFontFamily
user.Editor.FontSize = args.EditorFontSize
user.Editor.LineHeight = args.EditorLineHeight
user.Editor.Theme = args.EditorTheme
user.Editor.TabSize = args.EditorTabSize
conf.UpdateCustomizedConf(username)
now := time.Now().UnixNano()
user.Lived = now
user.Updated = now
succ = user.Save()
}
// LoginHandler handles request of user login.
func LoginHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
// show the login page
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(conf.Wide.Locale),
"locale": conf.Wide.Locale, "ver": conf.WideVersion}
t, err := template.ParseFiles("views/login.html")
if nil != err {
logger.Error(err)
http.Error(w, err.Error(), 500)
return
}
t.Execute(w, model)
return
}
// non-GET request as login request
succ := true
data := map[string]interface{}{"succ": &succ}
defer util.RetJSON(w, r, data)
args := struct {
Username string
Password string
}{}
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
logger.Error("login error: ", err)
succ = false
return
}
succ = false
for _, user := range conf.Users {
if user.Name == args.Username && user.Password == conf.Salt(args.Password, user.Salt) {
succ = true
break
}
}
if !succ {
return
}
// create a HTTP session
httpSession, _ := HTTPSession.Get(r, "wide-session")
httpSession.Values["username"] = args.Username
httpSession.Values["id"] = strconv.Itoa(rand.Int())
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
if "" != conf.Wide.Context {
httpSession.Options.Path = conf.Wide.Context
}
httpSession.Save(r, w)
logger.Debugf("Created a HTTP session [%s] for user [%s]", httpSession.Values["id"].(string), args.Username)
}
// LogoutHandler handles request of user logout (exit).
func LogoutHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{"succ": true}
defer util.RetJSON(w, r, data)
httpSession, _ := HTTPSession.Get(r, "wide-session")
httpSession.Options.MaxAge = -1
httpSession.Save(r, w)
}
// SignUpUser handles request of registering user.
func SignUpUser(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
// show the user sign up page
firstUserWorkspace := conf.GetUserWorkspace(conf.Users[0].Name)
dir := filepath.Dir(firstUserWorkspace)
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(conf.Wide.Locale),
"locale": conf.Wide.Locale, "ver": conf.WideVersion, "dir": dir,
"pathSeparator": conf.PathSeparator}
t, err := template.ParseFiles("views/sign_up.html")
if nil != err {
logger.Error(err)
http.Error(w, err.Error(), 500)
return
}
t.Execute(w, model)
return
}
// non-GET request as add user request
succ := true
data := map[string]interface{}{"succ": &succ}
defer util.RetJSON(w, r, data)
var args map[string]interface{}
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
logger.Error(err)
succ = false
return
}
username := args["username"].(string)
password := args["password"].(string)
email := args["email"].(string)
msg := addUser(username, password, email)
if userCreated != msg {
succ = false
data["msg"] = msg
}
}
// FixedTimeSave saves online users' configurations periodically (1 minute).
//
// Main goal of this function is to save user session content, for restoring session content while user open Wide next time.
func FixedTimeSave() {
go func() {
for _ = range time.Tick(time.Minute) {
users := getOnlineUsers()
for _, u := range users {
if u.Save() {
logger.Tracef("Saved online user [%s]'s configurations", u.Name)
}
}
}
}()
}
func getOnlineUsers() []*conf.User {
ret := []*conf.User{}
usernames := map[string]string{} // distinct username
for _, s := range WideSessions {
usernames[s.Username] = s.Username
}
for _, username := range usernames {
u := conf.GetUser(username)
if nil == u {
logger.Warnf("Not found user [%s]", username)
continue
}
ret = append(ret, u)
}
return ret
}
// addUser add a user with the specified username, password and email.
//
// 1. create the user's workspace
// 2. generate 'Hello, 世界' demo code in the workspace
// 3. update the user customized configurations, such as style.css
// 4. serve files of the user's workspace via HTTP
func addUser(username, password, email string) string {
addUserMutex.Lock()
defer addUserMutex.Unlock()
for _, user := range conf.Users {
if user.Name == username {
return userExists
}
if user.Email == email {
return emailExists
}
}
firstUserWorkspace := conf.GetUserWorkspace(conf.Users[0].Name)
dir := filepath.Dir(firstUserWorkspace)
workspace := filepath.Join(dir, username)
newUser := conf.NewUser(username, password, email, workspace)
conf.Users = append(conf.Users, newUser)
if !newUser.Save() {
return userCreateError
}
conf.CreateWorkspaceDir(workspace)
helloWorld(workspace)
conf.UpdateCustomizedConf(username)
http.Handle("/workspace/"+username+"/",
http.StripPrefix("/workspace/"+username+"/", http.FileServer(http.Dir(newUser.GetWorkspace()))))
logger.Infof("Created a user [%s]", username)
return userCreated
}
// helloWorld generates the 'Hello, 世界' source code in workspace/src/hello/main.go.
func helloWorld(workspace string) {
dir := workspace + conf.PathSeparator + "src" + conf.PathSeparator + "hello"
if err := os.MkdirAll(dir, 0755); nil != err {
logger.Error(err)
return
}
fout, err := os.Create(dir + conf.PathSeparator + "main.go")
if nil != err {
logger.Error(err)
os.Exit(-1)
}
fout.WriteString(`package main
import "fmt"
func main() {
fmt.Println("Hello, 世界")
}
`)
fout.Close()
}
:bug:
// Copyright (c) 2014, B3log
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package session
import (
"crypto/md5"
"encoding/hex"
"encoding/json"
"math/rand"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"sync"
"text/template"
"time"
"github.com/b3log/wide/conf"
"github.com/b3log/wide/i18n"
"github.com/b3log/wide/util"
)
const (
// TODO: i18n
userExists = "user exists"
emailExists = "email exists"
userCreated = "user created"
userCreateError = "user create error"
)
// Exclusive lock for adding user.
var addUserMutex sync.Mutex
// PreferenceHandler handles request of preference page.
func PreferenceHandler(w http.ResponseWriter, r *http.Request) {
httpSession, _ := HTTPSession.Get(r, "wide-session")
if httpSession.IsNew {
http.Redirect(w, r, conf.Wide.Context+"login", http.StatusFound)
return
}
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
if "" != conf.Wide.Context {
httpSession.Options.Path = conf.Wide.Context
}
httpSession.Save(r, w)
username := httpSession.Values["username"].(string)
user := conf.GetUser(username)
if "GET" == r.Method {
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(user.Locale), "user": user,
"ver": conf.WideVersion, "goos": runtime.GOOS, "goarch": runtime.GOARCH, "gover": runtime.Version(),
"locales": i18n.GetLocalesNames(), "gofmts": util.Go.GetGoFormats(),
"themes": conf.GetThemes(), "editorThemes": conf.GetEditorThemes()}
t, err := template.ParseFiles("views/preference.html")
if nil != err {
logger.Error(err)
http.Error(w, err.Error(), 500)
return
}
t.Execute(w, model)
return
}
// non-GET request as save request
succ := true
data := map[string]interface{}{"succ": &succ}
defer util.RetJSON(w, r, data)
args := struct {
FontFamily string
FontSize string
GoFmt string
Workspace string
Username string
Password string
Email string
Locale string
Theme string
EditorFontFamily string
EditorFontSize string
EditorLineHeight string
EditorTheme string
EditorTabSize string
}{}
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
logger.Error(err)
succ = false
return
}
user.FontFamily = args.FontFamily
user.FontSize = args.FontSize
user.GoFormat = args.GoFmt
user.Workspace = args.Workspace
if user.Password != args.Password {
user.Password = conf.Salt(args.Password, user.Salt)
}
user.Email = args.Email
hash := md5.New()
hash.Write([]byte(user.Email))
user.Gravatar = hex.EncodeToString(hash.Sum(nil))
user.Locale = args.Locale
user.Theme = args.Theme
user.Editor.FontFamily = args.EditorFontFamily
user.Editor.FontSize = args.EditorFontSize
user.Editor.LineHeight = args.EditorLineHeight
user.Editor.Theme = args.EditorTheme
user.Editor.TabSize = args.EditorTabSize
conf.UpdateCustomizedConf(username)
now := time.Now().UnixNano()
user.Lived = now
user.Updated = now
succ = user.Save()
}
// LoginHandler handles request of user login.
func LoginHandler(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
// show the login page
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(conf.Wide.Locale),
"locale": conf.Wide.Locale, "ver": conf.WideVersion}
t, err := template.ParseFiles("views/login.html")
if nil != err {
logger.Error(err)
http.Error(w, err.Error(), 500)
return
}
t.Execute(w, model)
return
}
// non-GET request as login request
succ := true
data := map[string]interface{}{"succ": &succ}
defer util.RetJSON(w, r, data)
args := struct {
Username string
Password string
}{}
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
logger.Error("login error: ", err)
succ = false
return
}
succ = false
for _, user := range conf.Users {
if user.Name == args.Username && user.Password == conf.Salt(args.Password, user.Salt) {
succ = true
break
}
}
if !succ {
return
}
// create a HTTP session
httpSession, _ := HTTPSession.Get(r, "wide-session")
httpSession.Values["username"] = args.Username
httpSession.Values["id"] = strconv.Itoa(rand.Int())
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
if "" != conf.Wide.Context {
httpSession.Options.Path = conf.Wide.Context
}
httpSession.Save(r, w)
logger.Debugf("Created a HTTP session [%s] for user [%s]", httpSession.Values["id"].(string), args.Username)
}
// LogoutHandler handles request of user logout (exit).
func LogoutHandler(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{"succ": true}
defer util.RetJSON(w, r, data)
httpSession, _ := HTTPSession.Get(r, "wide-session")
httpSession.Options.MaxAge = -1
httpSession.Save(r, w)
}
// SignUpUser handles request of registering user.
func SignUpUser(w http.ResponseWriter, r *http.Request) {
if "GET" == r.Method {
// show the user sign up page
firstUserWorkspace := conf.GetUserWorkspace(conf.Users[0].Name)
dir := filepath.Dir(firstUserWorkspace)
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(conf.Wide.Locale),
"locale": conf.Wide.Locale, "ver": conf.WideVersion, "dir": dir,
"pathSeparator": conf.PathSeparator}
t, err := template.ParseFiles("views/sign_up.html")
if nil != err {
logger.Error(err)
http.Error(w, err.Error(), 500)
return
}
t.Execute(w, model)
return
}
// non-GET request as add user request
succ := true
data := map[string]interface{}{"succ": &succ}
defer util.RetJSON(w, r, data)
var args map[string]interface{}
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
logger.Error(err)
succ = false
return
}
username := args["username"].(string)
password := args["password"].(string)
email := args["email"].(string)
msg := addUser(username, password, email)
if userCreated != msg {
succ = false
data["msg"] = msg
}
}
// FixedTimeSave saves online users' configurations periodically (1 minute).
//
// Main goal of this function is to save user session content, for restoring session content while user open Wide next time.
func FixedTimeSave() {
go func() {
for _ = range time.Tick(time.Minute) {
users := getOnlineUsers()
for _, u := range users {
if u.Save() {
logger.Tracef("Saved online user [%s]'s configurations", u.Name)
}
}
}
}()
}
func getOnlineUsers() []*conf.User {
ret := []*conf.User{}
usernames := map[string]string{} // distinct username
for _, s := range WideSessions {
usernames[s.Username] = s.Username
}
for _, username := range usernames {
u := conf.GetUser(username)
if nil == u {
logger.Warnf("Not found user [%s]", username)
continue
}
ret = append(ret, u)
}
return ret
}
// addUser add a user with the specified username, password and email.
//
// 1. create the user's workspace
// 2. generate 'Hello, 世界' demo code in the workspace
// 3. update the user customized configurations, such as style.css
// 4. serve files of the user's workspace via HTTP
func addUser(username, password, email string) string {
addUserMutex.Lock()
defer addUserMutex.Unlock()
for _, user := range conf.Users {
if user.Name == username {
return userExists
}
if user.Email == email {
return emailExists
}
}
firstUserWorkspace := conf.GetUserWorkspace(conf.Users[0].Name)
dir := filepath.Dir(firstUserWorkspace)
workspace := filepath.Join(dir, username)
newUser := conf.NewUser(username, password, email, workspace)
conf.Users = append(conf.Users, newUser)
if !newUser.Save() {
return userCreateError
}
conf.CreateWorkspaceDir(workspace)
helloWorld(workspace)
conf.UpdateCustomizedConf(username)
http.Handle("/workspace/"+username+"/",
http.StripPrefix("/workspace/"+username+"/", http.FileServer(http.Dir(newUser.GetWorkspace()))))
logger.Infof("Created a user [%s]", username)
return userCreated
}
// helloWorld generates the 'Hello, 世界' source code in workspace/src/hello/main.go.
func helloWorld(workspace string) {
dir := workspace + conf.PathSeparator + "src" + conf.PathSeparator + "hello"
if err := os.MkdirAll(dir, 0755); nil != err {
logger.Error(err)
return
}
fout, err := os.Create(dir + conf.PathSeparator + "main.go")
if nil != err {
logger.Error(err)
os.Exit(-1)
}
fout.WriteString(`package main
import "fmt"
func main() {
fmt.Println("Hello, 世界")
}
`)
fout.Close()
}
|
// Copyright 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package set_test
import (
"github.com/juju/names"
"github.com/juju/testing"
gc "launchpad.net/gocheck"
"github.com/juju/utils/set"
)
type tagSetSuite struct {
testing.IsolationSuite
}
var _ = gc.Suite(tagSetSuite{})
func (tagSetSuite) TestEmpty(c *gc.C) {
t := set.NewTags()
c.Assert(t.Size(), gc.Equals, 0)
}
func (tagSetSuite) TestInitialValues(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-rabbitmq-server-0")
t := set.NewTags(foo, bar)
c.Assert(t.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestInitialStringValues(c *gc.C) {
t, err := set.NewTagsFromStrings("unit-wordpress-0", "unit-rabbitmq-server-0")
c.Assert(err, gc.IsNil)
c.Assert(t.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestSize(c *gc.C) {
// Empty sets are empty.
s := set.NewTags()
c.Assert(s.Size(), gc.Equals, 0)
// Size returns number of unique values.
s, err := set.NewTagsFromStrings(
"unit-wordpress-0",
"unit-rabbitmq-server-0",
"unit-wordpress-0",
)
c.Assert(err, gc.IsNil)
c.Assert(s.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestIsEmpty(c *gc.C) {
// Empty sets are empty.
s := set.NewTags()
c.Assert(s.IsEmpty(), gc.Equals, true)
// Non-empty sets are not empty.
tag, _ := names.ParseTag("unit-wordpress-0")
s = set.NewTags(tag)
c.Assert(s.IsEmpty(), gc.Equals, false)
// Newly empty sets work too.
s.Remove(tag)
c.Assert(s.IsEmpty(), gc.Equals, true)
}
func (tagSetSuite) TestAdd(c *gc.C) {
t := set.NewTags()
foo, _ := names.ParseTag("unit-wordpress-0")
t.Add(foo)
c.Assert(t.Size(), gc.Equals, 1)
c.Assert(t.Contains(foo), gc.Equals, true)
}
func (tagSetSuite) TestAddDuplicate(c *gc.C) {
t := set.NewTags()
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-rabbitmq-server-0")
t.Add(foo)
t.Add(bar)
bar, _ = names.ParseTag("unit-wordpress-0")
t.Add(bar)
c.Assert(t.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestRemove(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-rabbitmq-server-0")
t := set.NewTags(foo, bar)
t.Remove(foo)
c.Assert(t.Contains(foo), gc.Equals, false)
c.Assert(t.Contains(bar), gc.Equals, true)
}
func (tagSetSuite) TestContains(c *gc.C) {
t, err := set.NewTagsFromStrings("unit-wordpress-0", "unit-rabbitmq-server-0")
c.Assert(err, gc.IsNil)
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-rabbitmq-server-0")
baz, _ := names.ParseTag("unit-mongodb-0")
c.Assert(t.Contains(foo), gc.Equals, true)
c.Assert(t.Contains(bar), gc.Equals, true)
c.Assert(t.Contains(baz), gc.Equals, false)
}
func (tagSetSuite) TestSortedValues(c *gc.C) {
m1, _ := names.ParseTag("machine-0")
z1, _ := names.ParseTag("unit-z-server-0")
z2, _ := names.ParseTag("unit-z-server-1")
a1, _ := names.ParseTag("unit-a-server-0")
t := set.NewTags(z2, a1, z1, m1)
values := t.SortedValues()
c.Assert(values, gc.DeepEquals, []names.Tag{m1, a1, z1, z2})
}
func (tagSetSuite) TestRemoveNonExistent(c *gc.C) {
t := set.NewTags()
foo, _ := names.ParseTag("unit-wordpress-0")
t.Remove(foo)
c.Assert(t.Size(), gc.Equals, 0)
}
func (tagSetSuite) TestUnion(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-mongodb-0")
baz, _ := names.ParseTag("unit-rabbitmq-server-0")
bang, _ := names.ParseTag("unit-mysql-server-0")
t1 := set.NewTags(foo, bar)
t2 := set.NewTags(foo, baz, bang)
union1 := t1.Union(t2)
union2 := t2.Union(t1)
c.Assert(union1.Size(), gc.Equals, 4)
c.Assert(union2.Size(), gc.Equals, 4)
c.Assert(union1, gc.DeepEquals, union2)
c.Assert(union1, gc.DeepEquals, set.NewTags(foo, bar, baz, bang))
}
func (tagSetSuite) TestIntersection(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-mongodb-0")
baz, _ := names.ParseTag("unit-rabbitmq-server-0")
bang, _ := names.ParseTag("unit-mysql-server-0")
t1 := set.NewTags(foo, bar)
t2 := set.NewTags(foo, baz, bang)
int1 := t1.Intersection(t2)
int2 := t2.Intersection(t1)
c.Assert(int1.Size(), gc.Equals, 1)
c.Assert(int2.Size(), gc.Equals, 1)
c.Assert(int1, gc.DeepEquals, int2)
c.Assert(int1, gc.DeepEquals, set.NewTags(foo))
}
func (tagSetSuite) TestDifference(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-mongodb-0")
baz, _ := names.ParseTag("unit-rabbitmq-server-0")
bang, _ := names.ParseTag("unit-mysql-server-0")
t1 := set.NewTags(foo, bar)
t2 := set.NewTags(foo, baz, bang)
diff1 := t1.Difference(t2)
diff2 := t2.Difference(t1)
c.Assert(diff1, gc.DeepEquals, set.NewTags(bar))
c.Assert(diff2, gc.DeepEquals, set.NewTags(baz, bang))
}
func (tagSetSuite) TestUninitialized(c *gc.C) {
var uninitialized set.Tags
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-mongodb-0")
c.Assert(uninitialized.Size(), gc.Equals, 0)
c.Assert(uninitialized.IsEmpty(), gc.Equals, true)
// You can get values and sorted values from an unitialized set.
c.Assert(uninitialized.Values(), gc.DeepEquals, []names.Tag{})
// All contains checks are false
c.Assert(uninitialized.Contains(foo), gc.Equals, false)
// Remove works on an uninitialized Strings
uninitialized.Remove(foo)
var other set.Tags
// Union returns a new set that is empty but initialized.
c.Assert(uninitialized.Union(other), gc.DeepEquals, set.NewTags())
c.Assert(uninitialized.Intersection(other), gc.DeepEquals, set.NewTags())
c.Assert(uninitialized.Difference(other), gc.DeepEquals, set.NewTags())
other = set.NewTags(foo, bar)
c.Assert(uninitialized.Union(other), gc.DeepEquals, other)
c.Assert(uninitialized.Intersection(other), gc.DeepEquals, set.NewTags())
c.Assert(uninitialized.Difference(other), gc.DeepEquals, set.NewTags())
c.Assert(other.Union(uninitialized), gc.DeepEquals, other)
c.Assert(other.Intersection(uninitialized), gc.DeepEquals, set.NewTags())
c.Assert(other.Difference(uninitialized), gc.DeepEquals, other)
// Once something is added, the set becomes initialized.
uninitialized.Add(foo)
c.Assert(uninitialized.Contains(foo), gc.Equals, true)
}
set/tags: more test updates
// Copyright 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package set_test
import (
"github.com/juju/names"
"github.com/juju/testing"
gc "launchpad.net/gocheck"
"github.com/juju/utils/set"
)
type tagSetSuite struct {
testing.IsolationSuite
}
var _ = gc.Suite(tagSetSuite{})
func (tagSetSuite) TestEmpty(c *gc.C) {
t := set.NewTags()
c.Assert(t.Size(), gc.Equals, 0)
}
func (tagSetSuite) TestInitialValues(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-rabbitmq-server-0")
t := set.NewTags(foo, bar)
c.Assert(t.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestInitialStringValues(c *gc.C) {
t, err := set.NewTagsFromStrings("unit-wordpress-0", "unit-rabbitmq-server-0")
c.Assert(err, gc.IsNil)
c.Assert(t.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestSize(c *gc.C) {
// Empty sets are empty.
s := set.NewTags()
c.Assert(s.Size(), gc.Equals, 0)
s, err := set.NewTagsFromStrings(
"unit-wordpress-0",
"unit-rabbitmq-server-0",
)
c.Assert(err, gc.IsNil)
c.Assert(s.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestSizeDuplicate(c *gc.C) {
// Empty sets are empty.
s := set.NewTags()
c.Assert(s.Size(), gc.Equals, 0)
// Size returns number of unique values.
s, err := set.NewTagsFromStrings(
"unit-wordpress-0",
"unit-rabbitmq-server-0",
"unit-wordpress-0",
)
c.Assert(err, gc.IsNil)
c.Assert(s.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestIsEmpty(c *gc.C) {
// Empty sets are empty.
s := set.NewTags()
c.Assert(s.IsEmpty(), gc.Equals, true)
// Non-empty sets are not empty.
tag, _ := names.ParseTag("unit-wordpress-0")
s = set.NewTags(tag)
c.Assert(s.IsEmpty(), gc.Equals, false)
// Newly empty sets work too.
s.Remove(tag)
c.Assert(s.IsEmpty(), gc.Equals, true)
}
func (tagSetSuite) TestAdd(c *gc.C) {
t := set.NewTags()
foo, _ := names.ParseTag("unit-wordpress-0")
t.Add(foo)
c.Assert(t.Size(), gc.Equals, 1)
c.Assert(t.Contains(foo), gc.Equals, true)
}
func (tagSetSuite) TestAddDuplicate(c *gc.C) {
t := set.NewTags()
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-rabbitmq-server-0")
t.Add(foo)
t.Add(bar)
bar, _ = names.ParseTag("unit-wordpress-0")
t.Add(bar)
c.Assert(t.Size(), gc.Equals, 2)
}
func (tagSetSuite) TestRemove(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-rabbitmq-server-0")
t := set.NewTags(foo, bar)
t.Remove(foo)
c.Assert(t.Contains(foo), gc.Equals, false)
c.Assert(t.Contains(bar), gc.Equals, true)
}
func (tagSetSuite) TestContains(c *gc.C) {
t, err := set.NewTagsFromStrings("unit-wordpress-0", "unit-rabbitmq-server-0")
c.Assert(err, gc.IsNil)
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-rabbitmq-server-0")
baz, _ := names.ParseTag("unit-mongodb-0")
c.Assert(t.Contains(foo), gc.Equals, true)
c.Assert(t.Contains(bar), gc.Equals, true)
c.Assert(t.Contains(baz), gc.Equals, false)
}
func (tagSetSuite) TestSortedValues(c *gc.C) {
m1, _ := names.ParseTag("machine-0")
z1, _ := names.ParseTag("unit-z-server-0")
z2, _ := names.ParseTag("unit-z-server-1")
a1, _ := names.ParseTag("unit-a-server-0")
t := set.NewTags(z2, a1, z1, m1)
values := t.SortedValues()
c.Assert(values, gc.DeepEquals, []names.Tag{m1, a1, z1, z2})
}
func (tagSetSuite) TestRemoveNonExistent(c *gc.C) {
t := set.NewTags()
foo, _ := names.ParseTag("unit-wordpress-0")
t.Remove(foo)
c.Assert(t.Size(), gc.Equals, 0)
}
func (tagSetSuite) TestUnion(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-mongodb-0")
baz, _ := names.ParseTag("unit-rabbitmq-server-0")
bang, _ := names.ParseTag("unit-mysql-server-0")
t1 := set.NewTags(foo, bar)
t2 := set.NewTags(foo, baz, bang)
union1 := t1.Union(t2)
union2 := t2.Union(t1)
c.Assert(union1.Size(), gc.Equals, 4)
c.Assert(union2.Size(), gc.Equals, 4)
c.Assert(union1, gc.DeepEquals, union2)
c.Assert(union1, gc.DeepEquals, set.NewTags(foo, bar, baz, bang))
}
func (tagSetSuite) TestIntersection(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-mongodb-0")
baz, _ := names.ParseTag("unit-rabbitmq-server-0")
bang, _ := names.ParseTag("unit-mysql-server-0")
t1 := set.NewTags(foo, bar)
t2 := set.NewTags(foo, baz, bang)
int1 := t1.Intersection(t2)
int2 := t2.Intersection(t1)
c.Assert(int1.Size(), gc.Equals, 1)
c.Assert(int2.Size(), gc.Equals, 1)
c.Assert(int1, gc.DeepEquals, int2)
c.Assert(int1, gc.DeepEquals, set.NewTags(foo))
}
func (tagSetSuite) TestDifference(c *gc.C) {
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-mongodb-0")
baz, _ := names.ParseTag("unit-rabbitmq-server-0")
bang, _ := names.ParseTag("unit-mysql-server-0")
t1 := set.NewTags(foo, bar)
t2 := set.NewTags(foo, baz, bang)
diff1 := t1.Difference(t2)
diff2 := t2.Difference(t1)
c.Assert(diff1, gc.DeepEquals, set.NewTags(bar))
c.Assert(diff2, gc.DeepEquals, set.NewTags(baz, bang))
}
func (tagSetSuite) TestUninitialized(c *gc.C) {
var uninitialized set.Tags
foo, _ := names.ParseTag("unit-wordpress-0")
bar, _ := names.ParseTag("unit-mongodb-0")
c.Assert(uninitialized.Size(), gc.Equals, 0)
c.Assert(uninitialized.IsEmpty(), gc.Equals, true)
// You can get values and sorted values from an unitialized set.
c.Assert(uninitialized.Values(), gc.DeepEquals, []names.Tag{})
// All contains checks are false
c.Assert(uninitialized.Contains(foo), gc.Equals, false)
// Remove works on an uninitialized Strings
uninitialized.Remove(foo)
var other set.Tags
// Union returns a new set that is empty but initialized.
c.Assert(uninitialized.Union(other), gc.DeepEquals, set.NewTags())
c.Assert(uninitialized.Intersection(other), gc.DeepEquals, set.NewTags())
c.Assert(uninitialized.Difference(other), gc.DeepEquals, set.NewTags())
other = set.NewTags(foo, bar)
c.Assert(uninitialized.Union(other), gc.DeepEquals, other)
c.Assert(uninitialized.Intersection(other), gc.DeepEquals, set.NewTags())
c.Assert(uninitialized.Difference(other), gc.DeepEquals, set.NewTags())
c.Assert(other.Union(uninitialized), gc.DeepEquals, other)
c.Assert(other.Intersection(uninitialized), gc.DeepEquals, set.NewTags())
c.Assert(other.Difference(uninitialized), gc.DeepEquals, other)
// Once something is added, the set becomes initialized.
uninitialized.Add(foo)
c.Assert(uninitialized.Contains(foo), gc.Equals, true)
}
|
package zego
import (
"encoding/json"
)
type TicketArray struct {
Count int `json:"count"`
Created string `json:"created"`
Next_page string `json:"next_page"`
Previous_page string `json:"previous_page"`
Tickets []Ticket
}
type SingleTicket struct {
Ticket *Ticket `json:"ticket"`
}
type Ticket struct {
Id uint64 `json:"id"`
URL string `json:"url"`
ExternalId string `json:"external_id"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
Type string `json:"type"`
Subject string `json:"subject"`
RawSubject string `json:"raw_subject"`
Description string `json:"description"`
Priority string `json:"priority"`
Status string `json:"status"`
Recipient string `json:"recipient"`
RequesterId uint32 `json:"requester_id"`
SubmitterId uint32 `json:"submitter_id"`
AssigneeId uint32 `json:"assignee_id"`
OrganizationId uint32 `json:"organization_id"`
GroupId uint32 `json:"group_id"`
CollaboratorIds []int32 `json:"collaborator_ids"`
ForumTopicId uint32 `json:"forum_topic_id"`
ProblemId uint32 `json:"problem_id"`
HasIncidents bool `json:"has_incidents"`
DueAt string `json:"due_at"`
Tags []string `json:"tags"`
Satisfaction_rating string `json:"satisfaction_rating"`
Ticket_form_id uint32 `json:"ticket_form_id"`
Sharing_agreement_ids interface{} `json:"sharing_agreement_ids"`
Via interface{} `json:"via"`
Custom_Fields interface{} `json:"custom_fields"`
Fields interface{} `json:"fields"`
}
func (a Auth) ListTickets(pag ...string) (*TicketArray, error) {
TicketStruct := &TicketArray{}
var path string
if len(pag) < 1 {
path = "/tickets.json"
} else {
path = pag[0]
}
resource, err := api(a, "GET", path, "")
if err != nil {
return nil, err
}
json.Unmarshal([]byte(resource.Raw), TicketStruct)
return TicketStruct, nil
}
func (a Auth) GetTicket(ticket_id string) (*SingleTicket, error) {
TicketStruct := &SingleTicket{}
path := "/tickets/" + ticket_id + ".json"
resource, err := api(a, "GET", path, "")
if err != nil {
return nil, err
}
json.Unmarshal([]byte(resource.Raw), TicketStruct)
return TicketStruct, nil
}
func (a Auth) GetMultipleTickets(ticket_id string) (*Resource, error) {
path := "/tickets/" + ticket_id + ".json"
resource, err := api(a, "GET", path, "")
if err != nil {
return nil, err
}
return resource, nil
}
func (a Auth) GetTicketComments(ticket_id string) (*Resource, error) {
path := "/tickets/" + ticket_id + "/comments.json"
resource, err := api(a, "GET", path, "")
if err != nil {
return nil, err
}
return resource, nil
}
func (a Auth) DeleteTicket(ticket_id string) (*Resource, error) {
path := "/tickets/" + ticket_id + ".json"
resource, err := api(a, "DELETE", path, "")
if err != nil {
return nil, err
}
return resource, nil
}
func (a Auth) CreateTicket(data string) (*Resource, error) {
path := "/tickets.json"
resource, err := api(a, "POST", path, data)
if err != nil {
return nil, err
}
return resource, nil
}
Added structs for FieldValue and Via
package zego
import (
"encoding/json"
)
type TicketArray struct {
Count int `json:"count"`
Created string `json:"created"`
Next_page string `json:"next_page"`
Previous_page string `json:"previous_page"`
Tickets []Ticket
}
type SingleTicket struct {
Ticket *Ticket `json:"ticket"`
}
type FieldValue struct {
Id uint64 `json:"id"`
Value string `json:"value"`
}
type Source struct {
From interface{} `json:"from"`
To interface{} `json:"to"`
Rel string `json:"rel"`
}
type Via struct {
Channel string `json:"channel"`
Source Source `json:"source"`
}
type Ticket struct {
Id uint64 `json:"id"`
URL string `json:"url"`
ExternalId string `json:"external_id"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
Type string `json:"type"`
Subject string `json:"subject"`
RawSubject string `json:"raw_subject"`
Description string `json:"description"`
Priority string `json:"priority"`
Status string `json:"status"`
Recipient string `json:"recipient"`
RequesterId uint32 `json:"requester_id"`
SubmitterId uint32 `json:"submitter_id"`
AssigneeId uint32 `json:"assignee_id"`
OrganizationId uint32 `json:"organization_id"`
GroupId uint32 `json:"group_id"`
CollaboratorIds []int32 `json:"collaborator_ids"`
ForumTopicId uint32 `json:"forum_topic_id"`
ProblemId uint32 `json:"problem_id"`
HasIncidents bool `json:"has_incidents"`
DueAt string `json:"due_at"`
Tags []string `json:"tags"`
Satisfaction_rating string `json:"satisfaction_rating"`
Ticket_form_id uint32 `json:"ticket_form_id"`
Sharing_agreement_ids interface{} `json:"sharing_agreement_ids"`
Via Via `json:"via"`
Custom_Fields []FieldValue `json:"custom_fields"`
Fields []FieldValue `json:"fields"`
}
func (a Auth) ListTickets(pag ...string) (*TicketArray, error) {
TicketStruct := &TicketArray{}
var path string
if len(pag) < 1 {
path = "/tickets.json"
} else {
path = pag[0]
}
resource, err := api(a, "GET", path, "")
if err != nil {
return nil, err
}
json.Unmarshal([]byte(resource.Raw), TicketStruct)
return TicketStruct, nil
}
func (a Auth) GetTicket(ticket_id string) (*SingleTicket, error) {
TicketStruct := &SingleTicket{}
path := "/tickets/" + ticket_id + ".json"
resource, err := api(a, "GET", path, "")
if err != nil {
return nil, err
}
json.Unmarshal([]byte(resource.Raw), TicketStruct)
return TicketStruct, nil
}
func (a Auth) GetMultipleTickets(ticket_id string) (*Resource, error) {
path := "/tickets/" + ticket_id + ".json"
resource, err := api(a, "GET", path, "")
if err != nil {
return nil, err
}
return resource, nil
}
func (a Auth) GetTicketComments(ticket_id string) (*Resource, error) {
path := "/tickets/" + ticket_id + "/comments.json"
resource, err := api(a, "GET", path, "")
if err != nil {
return nil, err
}
return resource, nil
}
func (a Auth) DeleteTicket(ticket_id string) (*Resource, error) {
path := "/tickets/" + ticket_id + ".json"
resource, err := api(a, "DELETE", path, "")
if err != nil {
return nil, err
}
return resource, nil
}
func (a Auth) CreateTicket(data string) (*Resource, error) {
path := "/tickets.json"
resource, err := api(a, "POST", path, data)
if err != nil {
return nil, err
}
return resource, nil
}
|
package zkill
import (
"encoding/json"
"errors"
"io/ioutil"
"log"
"net/http"
)
var (
running = false
redisQRecievers []RedisQReciever
redisQChannels []chan Kill
redisqErrors chan error
)
type RedisQReciever func(Kill)
// ZKillboardRedisQ default endpoint
const ZKillboardRedisQ = "https://redisq.zkillboard.com/listen.php"
var alreadyRunning = false
// RedisQClient is a client to zKillboard's RedisQ service
type RedisQClient struct {
RedisQURI string
webClient http.Client
UserAgent string
}
// NewRedisQ returns a client with default endpoints
func NewRedisQ() *RedisQClient {
c := &RedisQClient{
RedisQURI: ZKillboardRedisQ,
webClient: http.Client{},
}
return c
}
type redisqResp struct {
Kill Kill `json:"package"`
}
func RedisQStart() error {
if running {
return errors.New("already watching redisq")
}
running = true
go func() {
for running {
kill, err := fetchRedisQ()
if err != nil {
logRedisQError(err)
} else {
redisqSend(kill)
}
}
}()
return nil
}
func RedisQSetErrorChannel(errChan chan error) {
redisqErrors = errChan
}
func RedisQStop() {
running = false
}
func redisqSend(k Kill) {
for _, c := range redisQChannels {
go func() {
c <- k
}()
}
for _, r := range redisQRecievers {
go r(k)
}
}
func RedisQAddChannel(output chan Kill) {
redisQChannels = append(redisQChannels, output)
}
func RedisQAddReciever(reciever RedisQReciever) {
redisQRecievers = append(redisQRecievers, reciever)
}
// FetchKillmails starts retrieving Killmails from ZKillboard RedisQ, sending
// them (and any errors encountered) through the channels passed
func (c *RedisQClient) FetchKillmails(output chan Kill, errChan chan error) {
if c.UserAgent == "" {
errChan <- errors.New("user-agent must be set first")
return
}
go func() {
for {
kill, err := fetchRedisQ()
if err != nil {
errChan <- err
} else {
output <- kill
}
}
}()
}
func fetchRedisQ() (k Kill, err error) {
request, err := http.NewRequest("GET", RedisQAddr, nil)
if err != nil {
return
}
request.Header.Add("User-Agent", UserAgent)
rawresp, err := webClient.Do(request)
if err != nil {
return
}
body, err := ioutil.ReadAll(rawresp.Body)
if err != nil {
return
}
zresp := redisqResp{}
err = json.Unmarshal(body, &zresp)
k = zresp.Kill
return
}
func logRedisQError(err error) {
if redisqErrors != nil {
go func() {
redisqErrors <- err
}()
} else {
log.Printf("[ERROR][REDISQ] - %v", err)
}
}
removed old code
package zkill
import (
"encoding/json"
"errors"
"io/ioutil"
"log"
"net/http"
)
var (
running = false
redisQRecievers []RedisQReciever
redisQChannels []chan Kill
redisqErrors chan error
)
type RedisQReciever func(Kill)
// ZKillboardRedisQ default endpoint
const ZKillboardRedisQ = "https://redisq.zkillboard.com/listen.php"
var alreadyRunning = false
type redisqResp struct {
Kill Kill `json:"package"`
}
func RedisQStart() error {
if running {
return errors.New("already watching redisq")
}
running = true
go func() {
for running {
kill, err := fetchRedisQ()
if err != nil {
logRedisQError(err)
} else {
redisqSend(kill)
}
}
}()
return nil
}
func RedisQSetErrorChannel(errChan chan error) {
redisqErrors = errChan
}
func RedisQStop() {
running = false
}
func redisqSend(k Kill) {
for _, c := range redisQChannels {
go func() {
c <- k
}()
}
for _, r := range redisQRecievers {
go r(k)
}
}
func RedisQAddChannel(output chan Kill) {
redisQChannels = append(redisQChannels, output)
}
func RedisQAddReciever(reciever RedisQReciever) {
redisQRecievers = append(redisQRecievers, reciever)
}
func fetchRedisQ() (k Kill, err error) {
request, err := http.NewRequest("GET", RedisQAddr, nil)
if err != nil {
return
}
request.Header.Add("User-Agent", UserAgent)
rawresp, err := webClient.Do(request)
if err != nil {
return
}
body, err := ioutil.ReadAll(rawresp.Body)
if err != nil {
return
}
zresp := redisqResp{}
err = json.Unmarshal(body, &zresp)
k = zresp.Kill
return
}
func logRedisQError(err error) {
if redisqErrors != nil {
go func() {
redisqErrors <- err
}()
} else {
log.Printf("[ERROR][REDISQ] - %v", err)
}
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/nutrun/lentil"
"testing"
"os"
)
func resetConfig() {
Config = NewConfig("", "", "")
Config.deps = make(map[string][]string)
}
func TestPriority(t *testing.T) {
q := connect(t)
resetConfig()
Config.deps["tube1"] = []string{"tube2"}
put(t, "job1", "tube1", 0, q)
put(t, "job2", "tube2", 0, q)
jobs := NewJobQueue(q, false, make([]string, 0))
assertNextJob(t, jobs, "job2")
assertNextJob(t, jobs, "job1")
}
func TestIncludeExclude(t *testing.T) {
q := connect(t)
resetConfig()
all := NewJobQueue(q, false, make([]string, 0))
if !all.Include("tube") {
t.Errorf("Should include tube")
}
if !all.Include("another") {
t.Errorf("Should include another")
}
none := NewJobQueue(q, true, make([]string, 0))
if none.Include("none") {
t.Errorf("Should not include tube none")
}
include := NewJobQueue(q, true, []string{"in"})
if !include.Include("in") {
t.Errorf("Should include tube in")
}
if include.Include("out") {
t.Errorf("Should not include tube out")
}
exclude := NewJobQueue(q, false, []string{"out"})
if !exclude.Include("in") {
t.Errorf("Should not include tube in")
}
if exclude.Include("out") {
t.Errorf("Should not include tube out")
}
}
func TestMoarPriorities(t *testing.T) {
q := connect(t)
resetConfig()
Config.deps["tube3"] = []string{"tube2"}
Config.deps["tube1"] = []string{"tube3"}
put(t, "job11", "tube1", 0, q)
put(t, "job21", "tube2", 0, q)
put(t, "job31", "tube3", 0, q)
put(t, "job22", "tube2", 0, q)
put(t, "job32", "tube3", 0, q)
put(t, "job12", "tube1", 0, q)
jobs := NewJobQueue(q, false, make([]string, 0))
assertNextJob(t, jobs, "job21")
assertNextJob(t, jobs, "job22")
assertNextJob(t, jobs, "job31")
assertNextJob(t, jobs, "job32")
assertNextJob(t, jobs, "job11")
assertNextJob(t, jobs, "job12")
}
func TestSleepWhenNoJobs(t *testing.T) {
q := connect(t)
resetConfig()
jobs := NewJobQueue(q, false, make([]string, 0))
no_job, err := reserveNextJob(t, jobs, "job11")
if no_job != nil {
t.Error(fmt.Sprintf("Reserved %v when should not have", no_job))
}
if err == nil {
t.Error(fmt.Sprintf("Should have thrown a TIME_OUT, threw %v instead", err))
}
}
func TestBlockOnReserved(t *testing.T) {
q := connect(t)
resetConfig()
Config.deps["tube1"] = []string{"tube2"}
put(t, "job1", "tube1", 0, q)
put(t, "job2", "tube2", 0, q)
jobs := NewJobQueue(q, false, make([]string, 0))
job, err := reserveNextJob(t, jobs, "job2")
if err != nil {
t.Error(fmt.Sprintf("Could not reserve job %s", job))
}
no_job, err := reserveNextJob(t, jobs, "job1")
if no_job != nil {
t.Error(fmt.Sprintf("Reserved %v when should not have", no_job))
}
if err == nil {
t.Error(fmt.Sprintf("Should have thrown a TIME_OUT, threw %v instead", err))
}
}
func TestBlockOnIgnored(t *testing.T) {
q := connect(t)
resetConfig()
Config.deps["another"] = []string{"block_on"}
put(t, "job", "block_on", 0, q)
put(t, "another", "another", 0, q)
jobs := NewJobQueue(q, false, []string{"block_on"})
no_job, err := reserveNextJob(t, jobs, "job")
if no_job != nil {
t.Error(fmt.Sprintf("Reserved %v when should not have", no_job))
}
if err == nil {
t.Error(fmt.Sprintf("Should have thrown a TIME_OUT, threw %v instead", err))
}
}
func assertNextJob(t *testing.T, jobqueue *JobQueue, expected string) {
jobinfo := make(map[string]string)
job, e := jobqueue.Next()
if e != nil {
t.Error(fmt.Sprintf("%v on [%v]", e, expected))
return
}
json.Unmarshal(job.Body, &jobinfo)
if jobinfo["name"] != expected {
t.Errorf("%s != %s\n", expected, jobinfo["name"])
}
jobqueue.Delete(job.Id)
}
func reserveNextJob(t *testing.T, jobqueue *JobQueue, expected string) (*lentil.Job, error) {
job, e := jobqueue.Next()
if e != nil {
return nil, e
}
return job, e
}
func put(t *testing.T, jobName, tube string, delay int, q *lentil.Beanstalkd) {
job := make(map[string]string)
job["tube"] = tube
job["name"] = jobName
jobjson, _ := json.Marshal(job)
e := q.Use(tube)
if e != nil {
t.Fatal(e)
}
_, e = q.Put(0, delay, 60, jobjson)
if e != nil {
t.Error(e)
}
}
func connect(t *testing.T) *lentil.Beanstalkd {
q, e := lentil.Dial(os.Getenv("GLOW_QUEUE"))
if e != nil {
t.Fatal(e)
}
// Clear beanstalkd
tubes, e := q.ListTubes()
if e != nil {
t.Fatal(e)
}
for _, tube := range tubes {
if tube == "default" {
continue
}
_, e = q.Watch(tube)
if e != nil {
t.Fatal(e)
}
for {
job, e := q.ReserveWithTimeout(0)
if e != nil {
break
}
q.Delete(job.Id)
}
_, e := q.Ignore(tube)
if e != nil {
t.Fatal(e)
}
}
return q
}
use glow queue in jobqueue_tests
package main
import (
"encoding/json"
"fmt"
"github.com/nutrun/lentil"
"testing"
"os"
)
func resetConfig() {
Config = NewConfig("", "", "")
Config.deps = make(map[string][]string)
}
func TestPriority(t *testing.T) {
q := connect(t)
resetConfig()
Config.deps["tube1"] = []string{"tube2"}
put(t, "job1", "tube1", 0, q)
put(t, "job2", "tube2", 0, q)
jobs := NewJobQueue(q, false, make([]string, 0))
assertNextJob(t, jobs, "job2")
assertNextJob(t, jobs, "job1")
}
func TestIncludeExclude(t *testing.T) {
q := connect(t)
resetConfig()
all := NewJobQueue(q, false, make([]string, 0))
if !all.Include("tube") {
t.Errorf("Should include tube")
}
if !all.Include("another") {
t.Errorf("Should include another")
}
none := NewJobQueue(q, true, make([]string, 0))
if none.Include("none") {
t.Errorf("Should not include tube none")
}
include := NewJobQueue(q, true, []string{"in"})
if !include.Include("in") {
t.Errorf("Should include tube in")
}
if include.Include("out") {
t.Errorf("Should not include tube out")
}
exclude := NewJobQueue(q, false, []string{"out"})
if !exclude.Include("in") {
t.Errorf("Should not include tube in")
}
if exclude.Include("out") {
t.Errorf("Should not include tube out")
}
}
func TestMoarPriorities(t *testing.T) {
q := connect(t)
resetConfig()
Config.deps["tube3"] = []string{"tube2"}
Config.deps["tube1"] = []string{"tube3"}
put(t, "job11", "tube1", 0, q)
put(t, "job21", "tube2", 0, q)
put(t, "job31", "tube3", 0, q)
put(t, "job22", "tube2", 0, q)
put(t, "job32", "tube3", 0, q)
put(t, "job12", "tube1", 0, q)
jobs := NewJobQueue(q, false, make([]string, 0))
assertNextJob(t, jobs, "job21")
assertNextJob(t, jobs, "job22")
assertNextJob(t, jobs, "job31")
assertNextJob(t, jobs, "job32")
assertNextJob(t, jobs, "job11")
assertNextJob(t, jobs, "job12")
}
func TestSleepWhenNoJobs(t *testing.T) {
q := connect(t)
resetConfig()
jobs := NewJobQueue(q, false, make([]string, 0))
no_job, err := reserveNextJob(t, jobs, "job11")
if no_job != nil {
t.Error(fmt.Sprintf("Reserved %v when should not have", no_job))
}
if err == nil {
t.Error(fmt.Sprintf("Should have thrown a TIME_OUT, threw %v instead", err))
}
}
func TestBlockOnReserved(t *testing.T) {
q := connect(t)
resetConfig()
Config.deps["tube1"] = []string{"tube2"}
put(t, "job1", "tube1", 0, q)
put(t, "job2", "tube2", 0, q)
jobs := NewJobQueue(q, false, make([]string, 0))
job, err := reserveNextJob(t, jobs, "job2")
if err != nil {
t.Error(fmt.Sprintf("Could not reserve job %s", job))
}
no_job, err := reserveNextJob(t, jobs, "job1")
if no_job != nil {
t.Error(fmt.Sprintf("Reserved %v when should not have", no_job))
}
if err == nil {
t.Error(fmt.Sprintf("Should have thrown a TIME_OUT, threw %v instead", err))
}
}
func TestBlockOnIgnored(t *testing.T) {
q := connect(t)
resetConfig()
Config.deps["another"] = []string{"block_on"}
put(t, "job", "block_on", 0, q)
put(t, "another", "another", 0, q)
jobs := NewJobQueue(q, false, []string{"block_on"})
no_job, err := reserveNextJob(t, jobs, "job")
if no_job != nil {
t.Error(fmt.Sprintf("Reserved %v when should not have", no_job))
}
if err == nil {
t.Error(fmt.Sprintf("Should have thrown a TIME_OUT, threw %v instead", err))
}
}
func assertNextJob(t *testing.T, jobqueue *JobQueue, expected string) {
jobinfo := make(map[string]string)
job, e := jobqueue.Next()
if e != nil {
t.Error(fmt.Sprintf("%v on [%v]", e, expected))
return
}
json.Unmarshal(job.Body, &jobinfo)
if jobinfo["name"] != expected {
t.Errorf("%s != %s\n", expected, jobinfo["name"])
}
jobqueue.Delete(job.Id)
}
func reserveNextJob(t *testing.T, jobqueue *JobQueue, expected string) (*lentil.Job, error) {
job, e := jobqueue.Next()
if e != nil {
return nil, e
}
return job, e
}
func put(t *testing.T, jobName, tube string, delay int, q *lentil.Beanstalkd) {
job := make(map[string]string)
job["tube"] = tube
job["name"] = jobName
jobjson, _ := json.Marshal(job)
e := q.Use(tube)
if e != nil {
t.Fatal(e)
}
_, e = q.Put(0, delay, 60, jobjson)
if e != nil {
t.Error(e)
}
}
func connect(t *testing.T) *lentil.Beanstalkd {
gq := os.Getenv("GLOW_QUEUE")
if gq == "" {
gq = "localhost:11300"
}
q, e := lentil.Dial(gq)
if e != nil {
t.Fatal(e)
}
// Clear beanstalkd
tubes, e := q.ListTubes()
if e != nil {
t.Fatal(e)
}
for _, tube := range tubes {
if tube == "default" {
continue
}
_, e = q.Watch(tube)
if e != nil {
t.Fatal(e)
}
for {
job, e := q.ReserveWithTimeout(0)
if e != nil {
break
}
q.Delete(job.Id)
}
_, e := q.Ignore(tube)
if e != nil {
t.Fatal(e)
}
}
return q
}
|
package benchlist
import (
"container/list"
"math/rand"
"sync"
"time"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/snow"
"github.com/ava-labs/avalanchego/snow/validators"
"github.com/ava-labs/avalanchego/utils/timer"
safemath "github.com/ava-labs/avalanchego/utils/math"
)
// QueryBenchlist ...
type QueryBenchlist interface {
// RegisterQuery registers a sent query and returns whether the query is subject to benchlist
RegisterQuery(ids.ShortID, uint32) bool
// RegisterResponse registers the response to a query message
RegisterResponse(ids.ShortID, uint32)
// QueryFailed registers that a query did not receive a response within our synchrony bound
QueryFailed(ids.ShortID, uint32)
}
// If a peer consistently does not respond to queries, it will
// increase latencies on the network whenever that peer is polled.
// If we cannot terminate the poll early, then the poll will wait
// the full timeout before finalizing the poll and making progress.
// This can increase network latencies to an undesirable level.
// Therefore, a benchlist is used as a heurstic to immediately fail
// queries to nodes that are consistently not responding.
type queryBenchlist struct {
vdrs validators.Set
// Validator ID --> Request ID --> non-empty iff
// there is an outstanding request to this validator
// with the corresponding requestID
pendingQueries map[[20]byte]map[uint32]struct{}
// Map of consecutive query failures
consecutiveFailures map[[20]byte]int
// Maintain benchlist
benchlistTimes map[[20]byte]time.Time
benchlistOrder *list.List
benchlistSet ids.ShortSet
threshold int
halfDuration time.Duration
maxPortion float64
clock timer.Clock
metrics *metrics
ctx *snow.Context
lock sync.Mutex
}
// NewQueryBenchlist ...
func NewQueryBenchlist(validators validators.Set, ctx *snow.Context, threshold int, duration time.Duration, maxPortion float64) QueryBenchlist {
metrics := &metrics{}
metrics.Initialize(ctx.Namespace, ctx.Metrics)
return &queryBenchlist{
pendingQueries: make(map[[20]byte]map[uint32]struct{}),
consecutiveFailures: make(map[[20]byte]int),
benchlistTimes: make(map[[20]byte]time.Time),
benchlistOrder: list.New(),
benchlistSet: ids.ShortSet{},
vdrs: validators,
threshold: threshold,
halfDuration: duration / 2,
maxPortion: maxPortion,
ctx: ctx,
metrics: metrics,
}
}
// RegisterQuery attempts to register a query from [validatorID] and returns true
// if that request should be made (not subject to benchlisting)
func (b *queryBenchlist) RegisterQuery(validatorID ids.ShortID, requestID uint32) bool {
b.lock.Lock()
defer b.lock.Unlock()
key := validatorID.Key()
if benched := b.benched(validatorID); benched {
return false
}
validatorRequests, ok := b.pendingQueries[key]
if !ok {
validatorRequests = make(map[uint32]struct{})
b.pendingQueries[key] = validatorRequests
}
validatorRequests[requestID] = struct{}{}
return true
}
// RegisterResponse removes the query from pending
func (b *queryBenchlist) RegisterResponse(validatorID ids.ShortID, requestID uint32) {
b.lock.Lock()
defer b.lock.Unlock()
if ok := b.removeQuery(validatorID, requestID); !ok {
return
}
// Reset consecutive failures on success
delete(b.consecutiveFailures, validatorID.Key())
}
// QueryFailed notes a failure and benchlists [validatorID] if necessary
func (b *queryBenchlist) QueryFailed(validatorID ids.ShortID, requestID uint32) {
b.lock.Lock()
defer b.lock.Unlock()
if ok := b.removeQuery(validatorID, requestID); !ok {
return
}
key := validatorID.Key()
// Add a failure and benches [validatorID] if it has
// passed the threshold
b.consecutiveFailures[key]++
if b.consecutiveFailures[key] >= b.threshold {
b.bench(validatorID)
}
}
func (b *queryBenchlist) bench(validatorID ids.ShortID) {
if b.benchlistSet.Contains(validatorID) {
return
}
key := validatorID.Key()
// Add to benchlist times with randomized delay
randomizedDuration := time.Duration(rand.Float64()*float64(b.halfDuration)) + b.halfDuration // #nosec G404
b.benchlistTimes[key] = b.clock.Time().Add(randomizedDuration)
b.benchlistOrder.PushBack(validatorID)
b.benchlistSet.Add(validatorID)
delete(b.consecutiveFailures, key)
b.ctx.Log.Debug("Benching validator %s for %v after %d consecutive failed queries", validatorID, randomizedDuration, b.threshold)
// Note: there could be a memory leak if a large number of
// validators were added, sampled, benched, and never sampled
// again. Due to the minimum staking amount and durations this
// is not a realistic concern.
b.cleanup()
}
// benched checks if [validatorID] is currently benched
// and calls cleanup if its benching period has elapsed
func (b *queryBenchlist) benched(validatorID ids.ShortID) bool {
key := validatorID.Key()
end, ok := b.benchlistTimes[key]
if !ok {
return false
}
if b.clock.Time().Before(end) {
return true
}
// If a benched item has expired, cleanup the benchlist
b.cleanup()
return false
}
// cleanup ensures that we have not benched too much stake
// and removes anything from the benchlist whose time has expired
func (b *queryBenchlist) cleanup() {
currentWeight, err := b.vdrs.SubsetWeight(b.benchlistSet)
if err != nil {
// Add log for this, should never happen
b.ctx.Log.Error("Failed to calculate subset weight due to: %w. Resetting benchlist.", err)
b.reset()
return
}
benchLen := b.benchlistSet.Len()
updatedWeight := currentWeight
totalWeight := b.vdrs.Weight()
maxBenchlistWeight := uint64(float64(totalWeight) * b.maxPortion)
// Iterate over elements of the benchlist in order of expiration
for e := b.benchlistOrder.Front(); e != nil; e = e.Next() {
validatorID := e.Value.(ids.ShortID)
key := validatorID.Key()
end := b.benchlistTimes[key]
// Remove elements with the next expiration until the next item has not
// expired and the bench has less than the maximum weight
// Note: this creates an edge case where benchlisting a validator
// with a sufficient stake may clear the benchlist
if b.clock.Time().Before(end) && currentWeight < maxBenchlistWeight {
break
}
removeWeight, ok := b.vdrs.GetWeight(validatorID)
if ok {
newWeight, err := safemath.Sub64(currentWeight, removeWeight)
if err != nil {
b.ctx.Log.Error("Failed to calculate new subset weight due to: %w. Resetting benchlist.", err)
b.reset()
return
}
updatedWeight = newWeight
}
b.benchlistOrder.Remove(e)
delete(b.benchlistTimes, key)
b.benchlistSet.Remove(validatorID)
}
updatedBenchLen := b.benchlistSet.Len()
b.ctx.Log.Debug("Benchlist weight: (%v/%v) -> (%v/%v). Benched Validators: %d -> %d",
currentWeight,
totalWeight,
updatedWeight,
totalWeight,
benchLen,
updatedBenchLen,
)
b.metrics.weightBenched.Set(float64(updatedWeight))
b.metrics.numBenched.Set(float64(updatedBenchLen))
}
func (b *queryBenchlist) reset() {
b.pendingQueries = make(map[[20]byte]map[uint32]struct{})
b.consecutiveFailures = make(map[[20]byte]int)
b.benchlistTimes = make(map[[20]byte]time.Time)
b.benchlistOrder.Init()
b.benchlistSet.Clear()
b.metrics.weightBenched.Set(0)
b.metrics.numBenched.Set(0)
}
// removeQuery returns true if the query was present
func (b *queryBenchlist) removeQuery(validatorID ids.ShortID, requestID uint32) bool {
key := validatorID.Key()
validatorRequests, ok := b.pendingQueries[key]
if !ok {
return false
}
_, ok = validatorRequests[requestID]
if ok {
delete(validatorRequests, requestID)
if len(validatorRequests) == 0 {
delete(b.pendingQueries, key)
}
}
return ok
}
Update benchlist metrics namespace
package benchlist
import (
"container/list"
"fmt"
"math/rand"
"sync"
"time"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/snow"
"github.com/ava-labs/avalanchego/snow/validators"
"github.com/ava-labs/avalanchego/utils/timer"
safemath "github.com/ava-labs/avalanchego/utils/math"
)
// QueryBenchlist ...
type QueryBenchlist interface {
// RegisterQuery registers a sent query and returns whether the query is subject to benchlist
RegisterQuery(ids.ShortID, uint32) bool
// RegisterResponse registers the response to a query message
RegisterResponse(ids.ShortID, uint32)
// QueryFailed registers that a query did not receive a response within our synchrony bound
QueryFailed(ids.ShortID, uint32)
}
// If a peer consistently does not respond to queries, it will
// increase latencies on the network whenever that peer is polled.
// If we cannot terminate the poll early, then the poll will wait
// the full timeout before finalizing the poll and making progress.
// This can increase network latencies to an undesirable level.
// Therefore, a benchlist is used as a heurstic to immediately fail
// queries to nodes that are consistently not responding.
type queryBenchlist struct {
vdrs validators.Set
// Validator ID --> Request ID --> non-empty iff
// there is an outstanding request to this validator
// with the corresponding requestID
pendingQueries map[[20]byte]map[uint32]struct{}
// Map of consecutive query failures
consecutiveFailures map[[20]byte]int
// Maintain benchlist
benchlistTimes map[[20]byte]time.Time
benchlistOrder *list.List
benchlistSet ids.ShortSet
threshold int
halfDuration time.Duration
maxPortion float64
clock timer.Clock
metrics *metrics
ctx *snow.Context
lock sync.Mutex
}
// NewQueryBenchlist ...
func NewQueryBenchlist(validators validators.Set, ctx *snow.Context, threshold int, duration time.Duration, maxPortion float64) QueryBenchlist {
metrics := &metrics{}
metrics.Initialize(fmt.Sprintf("%s_benchlist", ctx.Namespace), ctx.Metrics)
return &queryBenchlist{
pendingQueries: make(map[[20]byte]map[uint32]struct{}),
consecutiveFailures: make(map[[20]byte]int),
benchlistTimes: make(map[[20]byte]time.Time),
benchlistOrder: list.New(),
benchlistSet: ids.ShortSet{},
vdrs: validators,
threshold: threshold,
halfDuration: duration / 2,
maxPortion: maxPortion,
ctx: ctx,
metrics: metrics,
}
}
// RegisterQuery attempts to register a query from [validatorID] and returns true
// if that request should be made (not subject to benchlisting)
func (b *queryBenchlist) RegisterQuery(validatorID ids.ShortID, requestID uint32) bool {
b.lock.Lock()
defer b.lock.Unlock()
key := validatorID.Key()
if benched := b.benched(validatorID); benched {
return false
}
validatorRequests, ok := b.pendingQueries[key]
if !ok {
validatorRequests = make(map[uint32]struct{})
b.pendingQueries[key] = validatorRequests
}
validatorRequests[requestID] = struct{}{}
return true
}
// RegisterResponse removes the query from pending
func (b *queryBenchlist) RegisterResponse(validatorID ids.ShortID, requestID uint32) {
b.lock.Lock()
defer b.lock.Unlock()
if ok := b.removeQuery(validatorID, requestID); !ok {
return
}
// Reset consecutive failures on success
delete(b.consecutiveFailures, validatorID.Key())
}
// QueryFailed notes a failure and benchlists [validatorID] if necessary
func (b *queryBenchlist) QueryFailed(validatorID ids.ShortID, requestID uint32) {
b.lock.Lock()
defer b.lock.Unlock()
if ok := b.removeQuery(validatorID, requestID); !ok {
return
}
key := validatorID.Key()
// Add a failure and benches [validatorID] if it has
// passed the threshold
b.consecutiveFailures[key]++
if b.consecutiveFailures[key] >= b.threshold {
b.bench(validatorID)
}
}
func (b *queryBenchlist) bench(validatorID ids.ShortID) {
if b.benchlistSet.Contains(validatorID) {
return
}
key := validatorID.Key()
// Add to benchlist times with randomized delay
randomizedDuration := time.Duration(rand.Float64()*float64(b.halfDuration)) + b.halfDuration // #nosec G404
b.benchlistTimes[key] = b.clock.Time().Add(randomizedDuration)
b.benchlistOrder.PushBack(validatorID)
b.benchlistSet.Add(validatorID)
delete(b.consecutiveFailures, key)
b.ctx.Log.Debug("Benching validator %s for %v after %d consecutive failed queries", validatorID, randomizedDuration, b.threshold)
// Note: there could be a memory leak if a large number of
// validators were added, sampled, benched, and never sampled
// again. Due to the minimum staking amount and durations this
// is not a realistic concern.
b.cleanup()
}
// benched checks if [validatorID] is currently benched
// and calls cleanup if its benching period has elapsed
func (b *queryBenchlist) benched(validatorID ids.ShortID) bool {
key := validatorID.Key()
end, ok := b.benchlistTimes[key]
if !ok {
return false
}
if b.clock.Time().Before(end) {
return true
}
// If a benched item has expired, cleanup the benchlist
b.cleanup()
return false
}
// cleanup ensures that we have not benched too much stake
// and removes anything from the benchlist whose time has expired
func (b *queryBenchlist) cleanup() {
currentWeight, err := b.vdrs.SubsetWeight(b.benchlistSet)
if err != nil {
// Add log for this, should never happen
b.ctx.Log.Error("Failed to calculate subset weight due to: %w. Resetting benchlist.", err)
b.reset()
return
}
benchLen := b.benchlistSet.Len()
updatedWeight := currentWeight
totalWeight := b.vdrs.Weight()
maxBenchlistWeight := uint64(float64(totalWeight) * b.maxPortion)
// Iterate over elements of the benchlist in order of expiration
for e := b.benchlistOrder.Front(); e != nil; e = e.Next() {
validatorID := e.Value.(ids.ShortID)
key := validatorID.Key()
end := b.benchlistTimes[key]
// Remove elements with the next expiration until the next item has not
// expired and the bench has less than the maximum weight
// Note: this creates an edge case where benchlisting a validator
// with a sufficient stake may clear the benchlist
if b.clock.Time().Before(end) && currentWeight < maxBenchlistWeight {
break
}
removeWeight, ok := b.vdrs.GetWeight(validatorID)
if ok {
newWeight, err := safemath.Sub64(currentWeight, removeWeight)
if err != nil {
b.ctx.Log.Error("Failed to calculate new subset weight due to: %w. Resetting benchlist.", err)
b.reset()
return
}
updatedWeight = newWeight
}
b.benchlistOrder.Remove(e)
delete(b.benchlistTimes, key)
b.benchlistSet.Remove(validatorID)
}
updatedBenchLen := b.benchlistSet.Len()
b.ctx.Log.Debug("Benchlist weight: (%v/%v) -> (%v/%v). Benched Validators: %d -> %d",
currentWeight,
totalWeight,
updatedWeight,
totalWeight,
benchLen,
updatedBenchLen,
)
b.metrics.weightBenched.Set(float64(updatedWeight))
b.metrics.numBenched.Set(float64(updatedBenchLen))
}
func (b *queryBenchlist) reset() {
b.pendingQueries = make(map[[20]byte]map[uint32]struct{})
b.consecutiveFailures = make(map[[20]byte]int)
b.benchlistTimes = make(map[[20]byte]time.Time)
b.benchlistOrder.Init()
b.benchlistSet.Clear()
b.metrics.weightBenched.Set(0)
b.metrics.numBenched.Set(0)
}
// removeQuery returns true if the query was present
func (b *queryBenchlist) removeQuery(validatorID ids.ShortID, requestID uint32) bool {
key := validatorID.Key()
validatorRequests, ok := b.pendingQueries[key]
if !ok {
return false
}
_, ok = validatorRequests[requestID]
if ok {
delete(validatorRequests, requestID)
if len(validatorRequests) == 0 {
delete(b.pendingQueries, key)
}
}
return ok
}
|
// CookieJar - A contestant's algorithm toolbox
// Copyright 2014 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package main
import (
"io/ioutil"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/gorilla/websocket"
"gopkg.in/fsnotify.v1"
"gopkg.in/inconshreveable/log15.v2"
"gopkg.in/karalabe/cookiejar.v2/tools/deps"
)
// Constants used by the arena backend
const wrapperStart = "/**************\nOriginal source\n"
const wrapperEnd = "\nOriginal source\n**************/\n\n"
// Creates a new arena backend to negotiate code snippets.
func backend() (int, error) {
// Find an unused port and listen on that
addr, err := net.ResolveTCPAddr("tcp4", "localhost:33214")
if err != nil {
return -1, err
}
// Create the file system monitor
watcher, err = fsnotify.NewWatcher()
if err != nil {
return -1, nil
}
watches = make(map[string]*websocket.Conn)
go monitor()
// Register the websocket handlers
http.HandleFunc("/", endpoint)
go func() {
log15.Info("Starting backend", "address", addr.String())
if err := http.ListenAndServe(addr.String(), nil); err != nil {
log15.Crit("failed to start backend", "error", err)
os.Exit(-1)
}
}()
return addr.Port, nil
}
// Upgrader to convert a simple HTTP request to a websocket connection.
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool { return true },
}
// Data associated with a challenge.
type challenge struct {
Name string `json:"name"`
Source string `json:"source"`
}
// Websocket inbound connection handler.
func endpoint(w http.ResponseWriter, r *http.Request) {
log15.Info("inbound websocket connection")
// Upgrade the request to a websocket connection
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log15.Error("failed to upgrade to ws connection", "error", err)
return
}
defer conn.Close()
open, conflict := "", ""
for {
// Fetch the challenge details
msg := new(challenge)
if err := conn.ReadJSON(&msg); err != nil {
log15.Error("failed to retrieve challenge data", "error", err)
return
}
// Pre process the source code
msg.Name = strings.TrimSpace(msg.Name)
if strings.Contains(msg.Source, wrapperStart) && strings.Contains(msg.Source, wrapperEnd) {
msg.Source = strings.Split(msg.Source, wrapperStart)[1]
msg.Source = strings.Split(msg.Source, wrapperEnd)[0]
}
// If it's a new challenge, add it to the repository
root := filepath.Join(*repo, msg.Name)
main := filepath.Join(root, "main.go")
if _, err := os.Stat(root); err != nil {
log15.Info("new challenge found", "name", msg.Name)
if err := os.MkdirAll(root, 0700); err != nil {
log15.Error("failed to create challenge", "error", err)
return
}
if err := ioutil.WriteFile(main, []byte(msg.Source), 0700); err != nil {
log15.Error("failed to write challenge", "error", err)
return
}
notify(conn, "success", "New challenge registered: "+msg.Name)
} else {
// Otherwise make sure we're not conflicting
if source, err := ioutil.ReadFile(main); err != nil {
log15.Error("failed to retrieve existing solution", "error", err)
return
} else if string(source) != msg.Source {
log15.Warn("solution conflict, download denied", "name", msg.Name)
if conflict == msg.Name {
notify(conn, "warning", "Solution conflict, download denied: "+msg.Name)
}
conflict = msg.Name
continue
}
conflict = ""
if open != msg.Name {
open = msg.Name
notify(conn, "information", "Challenge accepting uploads: "+msg.Name)
}
}
// Try to monitor the file
if _, ok := watches[msg.Name]; !ok {
log15.Info("starting challenge monitoring", "name", msg.Name)
if err := watcher.Add(root); err != nil {
log15.Error("failed to monitor the challenge", "error", err)
return
}
watches[msg.Name] = conn
}
}
}
// File system monitor to detect changes.
var watcher *fsnotify.Watcher
var watches map[string]*websocket.Conn
// Keeps processing monitoring events and reacts to source changes.
func monitor() {
for {
select {
case event := <-watcher.Events:
if path.Base(event.Name) == "main.go" {
if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create {
dir, _ := path.Split(event.Name)
name := path.Base(dir)
log15.Info("uploading modified solution", "name", name)
// Retrieve the user solution and wrap it in a comment block
source, err := ioutil.ReadFile(event.Name)
if err != nil {
log15.Error("failed to retrieve solution", "error", err)
continue
}
wrapped := wrapperStart + string(source) + wrapperEnd
// Merge all the dependencies to generate the submission
merged, err := deps.Merge(event.Name)
if err != nil {
log15.Error("failed to merge submit dependencies", "errir", err)
continue
}
// Serialize the wrapped original and the standalone submission
conn := watches[name]
if err := conn.WriteJSON(&challenge{Name: name, Source: wrapped + string(merged)}); err != nil {
log15.Error("failed to upload new solution", "error", err)
continue
}
notify(conn, "success", "Submission uploaded: "+name+" at "+time.Now().Format("15:04:05"))
}
}
case err := <-watcher.Errors:
log15.Error("file system monitor failure", "error", err)
}
}
}
Fix submission upload.
// CookieJar - A contestant's algorithm toolbox
// Copyright 2014 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package main
import (
"io/ioutil"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/gorilla/websocket"
"gopkg.in/fsnotify.v1"
"gopkg.in/inconshreveable/log15.v2"
"gopkg.in/karalabe/cookiejar.v2/tools/deps"
)
// Constants used by the arena backend
const wrapperStart = "/**************\nOriginal source\n"
const wrapperEnd = "\nOriginal source\n**************/\n\n"
// Creates a new arena backend to negotiate code snippets.
func backend() (int, error) {
// Find an unused port and listen on that
addr, err := net.ResolveTCPAddr("tcp4", "localhost:33214")
if err != nil {
return -1, err
}
// Create the file system monitor
watcher, err = fsnotify.NewWatcher()
if err != nil {
return -1, nil
}
watches = make(map[string]*websocket.Conn)
go monitor()
// Register the websocket handlers
http.HandleFunc("/", endpoint)
go func() {
log15.Info("Starting backend", "address", addr.String())
if err := http.ListenAndServe(addr.String(), nil); err != nil {
log15.Crit("failed to start backend", "error", err)
os.Exit(-1)
}
}()
return addr.Port, nil
}
// Upgrader to convert a simple HTTP request to a websocket connection.
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool { return true },
}
// Data associated with a challenge.
type challenge struct {
Name string `json:"name"`
Source string `json:"source"`
}
// Websocket inbound connection handler.
func endpoint(w http.ResponseWriter, r *http.Request) {
log15.Info("inbound websocket connection")
// Upgrade the request to a websocket connection
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log15.Error("failed to upgrade to ws connection", "error", err)
return
}
defer conn.Close()
open, conflict := "", ""
for {
// Fetch the challenge details
msg := new(challenge)
if err := conn.ReadJSON(&msg); err != nil {
log15.Error("failed to retrieve challenge data", "error", err)
return
}
// Pre process the source code
msg.Name = strings.TrimSpace(msg.Name)
if strings.Contains(msg.Source, wrapperStart) && strings.Contains(msg.Source, wrapperEnd) {
msg.Source = strings.Split(msg.Source, wrapperStart)[1]
msg.Source = strings.Split(msg.Source, wrapperEnd)[0]
}
// If it's a new challenge, add it to the repository
root := filepath.Join(*repo, msg.Name)
main := filepath.Join(root, "main.go")
if _, err := os.Stat(root); err != nil {
log15.Info("new challenge found", "name", msg.Name)
if err := os.MkdirAll(root, 0700); err != nil {
log15.Error("failed to create challenge", "error", err)
return
}
if err := ioutil.WriteFile(main, []byte(msg.Source), 0700); err != nil {
log15.Error("failed to write challenge", "error", err)
return
}
notify(conn, "success", "New challenge registered: "+msg.Name)
} else {
// Otherwise make sure we're not conflicting
if source, err := ioutil.ReadFile(main); err != nil {
log15.Error("failed to retrieve existing solution", "error", err)
return
} else if string(source) != msg.Source {
log15.Warn("solution conflict, download denied", "name", msg.Name)
if conflict == msg.Name {
notify(conn, "warning", "Solution conflict, download denied: "+msg.Name)
}
conflict = msg.Name
continue
}
conflict = ""
if open != msg.Name {
open = msg.Name
notify(conn, "information", "Challenge accepting uploads: "+msg.Name)
}
}
// Try to monitor the file
if _, ok := watches[msg.Name]; !ok {
log15.Info("starting challenge monitoring", "name", msg.Name)
if err := watcher.Add(root); err != nil {
log15.Error("failed to monitor the challenge", "error", err)
return
}
watches[msg.Name] = conn
}
}
}
// File system monitor to detect changes.
var watcher *fsnotify.Watcher
var watches map[string]*websocket.Conn
// Keeps processing monitoring events and reacts to source changes.
func monitor() {
for {
select {
case event := <-watcher.Events:
if path.Base(event.Name) == "main.go" {
if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create {
dir, _ := path.Split(event.Name)
name := path.Base(dir)
log15.Info("uploading modified solution", "name", name)
// Retrieve the user solution and wrap it in a comment block
source, err := ioutil.ReadFile(event.Name)
if err != nil {
log15.Error("failed to retrieve solution", "error", err)
continue
}
wrapped := wrapperStart + string(source) + wrapperEnd
// Merge all the dependencies to generate the submission
merged, err := deps.Merge(event.Name, true)
if err != nil {
log15.Error("failed to merge submit dependencies", "errir", err)
continue
}
// Serialize the wrapped original and the standalone submission
conn := watches[name]
if err := conn.WriteJSON(&challenge{Name: name, Source: wrapped + string(merged)}); err != nil {
log15.Error("failed to upload new solution", "error", err)
continue
}
notify(conn, "success", "Submission uploaded: "+name+" at "+time.Now().Format("15:04:05"))
}
}
case err := <-watcher.Errors:
log15.Error("file system monitor failure", "error", err)
}
}
}
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"cloud.google.com/go/compute/metadata"
"github.com/briandowns/spinner"
"github.com/fatih/color"
)
const (
flRepoURL = "repo_url"
flGitBranch = "git_branch"
flSubDir = "dir"
flPage = "page"
flContext = "context"
reauthCredentialsWaitTimeout = time.Minute * 2
reauthCredentialsPollingInterval = time.Second
projectCreateURL = "https://console.cloud.google.com/cloud-resource-manager"
)
var (
linkLabel = color.New(color.Bold, color.Underline)
parameterLabel = color.New(color.FgHiYellow, color.Bold, color.Underline)
errorLabel = color.New(color.FgRed, color.Bold)
warningLabel = color.New(color.Bold, color.FgHiYellow)
successLabel = color.New(color.Bold, color.FgGreen)
successPrefix = fmt.Sprintf("[ %s ]", successLabel.Sprint("✓"))
errorPrefix = fmt.Sprintf("[ %s ]", errorLabel.Sprint("✖"))
infoPrefix = fmt.Sprintf("[ %s ]", warningLabel.Sprint("!"))
// we have to reset the inherited color first from survey.QuestionIcon
// see https://github.com/AlecAivazis/survey/issues/193
questionPrefix = fmt.Sprintf("%s %s ]",
color.New(color.Reset).Sprint("["),
color.New(color.Bold, color.FgYellow).Sprint("?"))
questionSelectFocusIcon = "❯"
opts runOpts
flags = flag.NewFlagSet("cloudshell_open", flag.ContinueOnError)
)
func init() {
flags.StringVar(&opts.repoURL, flRepoURL, "", "url to git repo")
flags.StringVar(&opts.gitBranch, flGitBranch, "", "(optional) branch/revision to use from the git repo")
flags.StringVar(&opts.subDir, flSubDir, "", "(optional) sub-directory to deploy in the repo")
flags.StringVar(&opts.context, flContext, "", "(optional) arbitrary context")
_ = flags.String(flPage, "", "ignored")
}
func main() {
usage := flags.Usage
flags.Usage = func() {} // control when we print usage string
if err := flags.Parse(os.Args[1:]); err != nil {
if err == flag.ErrHelp {
usage()
return
} else {
fmt.Printf("%s flag parsing issue: %+v\n", warningLabel.Sprint("internal warning:"), err)
}
}
if err := run(opts); err != nil {
fmt.Printf("%s %+v\n", errorLabel.Sprint("Error:"), err)
os.Exit(1)
}
}
type runOpts struct {
repoURL string
gitBranch string
subDir string
context string
}
func logProgress(msg, endMsg, errMsg string) func(bool) {
s := spinner.New(spinner.CharSets[9], 300*time.Millisecond)
s.Prefix = "[ "
s.Suffix = " ] " + msg
s.Start()
return func(success bool) {
s.Stop()
if success {
if endMsg != "" {
fmt.Printf("%s %s\n", successPrefix, endMsg)
}
} else {
fmt.Printf("%s %s\n", errorPrefix, errMsg)
}
}
}
func run(opts runOpts) error {
ctx := context.Background()
highlight := func(s string) string { return color.CyanString(s) }
parameter := func(s string) string { return parameterLabel.Sprint(s) }
cmdColor := color.New(color.FgHiBlue)
repo := opts.repoURL
if repo == "" {
return fmt.Errorf("--%s not specified", flRepoURL)
}
trusted := os.Getenv("TRUSTED_ENVIRONMENT") == "true"
if !trusted {
fmt.Printf("%s You launched this custom Cloud Shell image as \"Do not trust\".\n"+
"In this mode, your credentials are not available and this experience\n"+
"cannot deploy to Cloud Run. Start over and \"Trust\" the image.\n", errorLabel.Sprint("Error:"))
return errors.New("aborting due to untrusted cloud shell environment")
}
end := logProgress("Waiting for your approval to 'Authorize' Cloud Shell...",
"",
"Failed to get GCP credentials. Please authorize Cloud Shell if you're presented with a prompt.",
)
time.Sleep(time.Second * 2)
waitCtx, cancelWait := context.WithTimeout(ctx, reauthCredentialsWaitTimeout)
err := waitCredsAvailable(waitCtx, reauthCredentialsPollingInterval)
cancelWait()
end(err == nil)
if err != nil {
return err
}
end = logProgress(fmt.Sprintf("Cloning git repository %s...", highlight(repo)),
fmt.Sprintf("Cloned git repository %s.", highlight(repo)),
fmt.Sprintf("Failed to clone git repository %s", highlight(repo)))
cloneDir, err := handleRepo(repo)
end(err == nil)
if err != nil {
return err
}
if opts.gitBranch != "" {
if err := gitCheckout(cloneDir, opts.gitBranch); err != nil {
return fmt.Errorf("failed to checkout revision %q: %+v", opts.gitBranch, err)
}
}
appDir := cloneDir
if opts.subDir != "" {
// verify if --dir is valid
appDir = filepath.Join(cloneDir, opts.subDir)
if fi, err := os.Stat(appDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("sub-directory doesn't exist in the cloned repository: %s", appDir)
}
return fmt.Errorf("failed to check sub-directory in the repo: %v", err)
} else if !fi.IsDir() {
return fmt.Errorf("specified sub-directory path %s is not a directory", appDir)
}
}
appFile, err := getAppFile(appDir)
if err != nil {
return fmt.Errorf("error attempting to read the app.json from the cloned repository: %+v", err)
}
var projects []string
for len(projects) == 0 {
end = logProgress("Retrieving your GCP projects...",
"Queried list of your GCP projects",
"Failed to retrieve your GCP projects.",
)
projects, err = listProjects()
end(err == nil)
if err != nil {
return err
}
if len(projects) == 0 {
fmt.Print(errorPrefix+" "+
warningLabel.Sprint("You don't have any GCP projects to deploy into!")+
"\n 1. Visit "+linkLabel.Sprint(projectCreateURL),
"\n 2. Create a new GCP project with a billing account",
"\n 3. Once you're done, press "+parameterLabel.Sprint("Enter")+" to continue: ")
if _, err := bufio.NewReader(os.Stdin).ReadBytes('\n'); err != nil {
return err
}
}
}
if len(projects) > 1 {
fmt.Printf(successPrefix+" Found %s projects in your GCP account.\n",
successLabel.Sprintf("%d", len(projects)))
}
project, err := promptProject(projects)
if err != nil {
return err
}
if err := waitForBilling(project, func(p string) error {
fmt.Print(errorPrefix+" "+
warningLabel.Sprint("GCP project you chose does not have an active billing account!")+
"\n 1. Visit "+linkLabel.Sprint(projectCreateURL),
"\n 2. Associate a billing account for project "+parameterLabel.Sprint(p),
"\n 3. Once you're done, press "+parameterLabel.Sprint("Enter")+" to continue: ")
if _, err := bufio.NewReader(os.Stdin).ReadBytes('\n'); err != nil {
return err
}
return nil
}); err != nil {
return err
}
end = logProgress(
fmt.Sprintf("Enabling Cloud Run API on project %s...", highlight(project)),
fmt.Sprintf("Enabled Cloud Run API on project %s.", highlight(project)),
fmt.Sprintf("Failed to enable required APIs on project %s.", highlight(project)))
err = enableAPIs(project, []string{"run.googleapis.com", "containerregistry.googleapis.com"})
end(err == nil)
if err != nil {
return err
}
region, err := promptDeploymentRegion(ctx, project)
if err != nil {
return err
}
repoName := filepath.Base(appDir)
serviceName := repoName
if appFile.Name != "" {
serviceName = appFile.Name
}
serviceName = tryFixServiceName(serviceName)
image := fmt.Sprintf("gcr.io/%s/%s", project, serviceName)
existingEnvVars := make(map[string]struct{})
// todo(jamesward) actually determine if the service exists instead of assuming it doesn't if we get an error
existingService, err := getService(project, serviceName, region)
if err == nil {
// service exists
existingEnvVars, err = envVars(project, serviceName, region)
}
neededEnvs := needEnvs(appFile.Env, existingEnvVars)
envs, err := promptOrGenerateEnvs(neededEnvs)
if err != nil {
return err
}
projectEnv := fmt.Sprintf("GOOGLE_CLOUD_PROJECT=%s", project)
regionEnv := fmt.Sprintf("GOOGLE_CLOUD_REGION=%s", region)
serviceEnv := fmt.Sprintf("K_SERVICE=%s", serviceName)
imageEnv := fmt.Sprintf("IMAGE_URL=%s", image)
appDirEnv := fmt.Sprintf("APP_DIR=%s", appDir)
inheritedEnv := os.Environ()
hookEnvs := append([]string{projectEnv, regionEnv, serviceEnv, imageEnv, appDirEnv}, envs...)
hookEnvs = append(hookEnvs, inheritedEnv...)
pushImage := true
if appFile.Hooks.PreBuild.Commands != nil {
err = runScripts(appDir, appFile.Hooks.PreBuild.Commands, hookEnvs)
}
skipBuild := appFile.Build.Skip != nil && *appFile.Build.Skip == true
builderImage := "heroku/buildpacks"
if opts.context == "cloudrun-gbp" {
builderImage = "gcr.io/buildpacks/builder"
}
if skipBuild {
fmt.Println(infoPrefix + " Skipping built-in build methods")
} else if dockerFileExists, _ := dockerFileExists(appDir); dockerFileExists {
fmt.Println(infoPrefix + " Attempting to build this application with its Dockerfile...")
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tdocker build -t %s %s\n", parameter(image), parameter("."))
err = dockerBuild(appDir, image)
} else if jibMaven, _ := jibMavenConfigured(appDir); jibMaven {
pushImage = false
fmt.Println(infoPrefix + " Attempting to build this application with Jib Maven plugin...")
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tmvn package jib:build -Dimage=%s\n", parameter(image))
err = jibMavenBuild(appDir, image)
} else {
fmt.Println(infoPrefix + " Attempting to build this application with Cloud Native Buildpacks (buildpacks.io)...")
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tpack build %s --path %s --builder %s\n", parameter(image), parameter(appDir), parameter(builderImage))
err = packBuild(appDir, image, builderImage)
}
if !skipBuild {
end = logProgress(fmt.Sprintf("Building container image %s", highlight(image)),
fmt.Sprintf("Built container image %s", highlight(image)),
"Failed to build container image.")
}
end(err == nil)
if err != nil {
return fmt.Errorf("attempted to build and failed: %s", err)
}
if appFile.Hooks.PostBuild.Commands != nil {
err = runScripts(appDir, appFile.Hooks.PostBuild.Commands, hookEnvs)
}
if pushImage {
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tdocker push %s\n", parameter(image))
end = logProgress("Pushing container image...",
"Pushed container image to Google Container Registry.",
"Failed to push container image to Google Container Registry.")
err = dockerPush(image)
end(err == nil)
if err != nil {
return fmt.Errorf("failed to push image to %s: %+v", image, err)
}
}
if existingService == nil {
err = runScripts(appDir, appFile.Hooks.PreCreate.Commands, hookEnvs)
if err != nil {
return err
}
}
optionsFlags := optionsToFlags(appFile.Options)
serviceLabel := highlight(serviceName)
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tgcloud run deploy %s", parameter(serviceName))
cmdColor.Println("\\")
cmdColor.Printf("\t --project=%s", parameter(project))
cmdColor.Println("\\")
cmdColor.Printf("\t --platform=%s", parameter("managed"))
cmdColor.Println("\\")
cmdColor.Printf("\t --region=%s", parameter(region))
cmdColor.Println("\\")
cmdColor.Printf("\t --image=%s", parameter(image))
if len(envs) > 0 {
cmdColor.Println("\\")
cmdColor.Printf("\t --update-env-vars=%s", parameter(strings.Join(envs, ",")))
}
for _, optionFlag := range optionsFlags {
cmdColor.Println("\\")
cmdColor.Printf("\t %s", optionFlag)
}
cmdColor.Println("")
end = logProgress(fmt.Sprintf("Deploying service %s to Cloud Run...", serviceLabel),
fmt.Sprintf("Successfully deployed service %s to Cloud Run.", serviceLabel),
"Failed deploying the application to Cloud Run.")
url, err := deploy(project, serviceName, image, region, envs, appFile.Options)
end(err == nil)
if err != nil {
return err
}
if existingService == nil {
err = runScripts(appDir, appFile.Hooks.PostCreate.Commands, hookEnvs)
if err != nil {
return err
}
}
fmt.Printf("* This application is billed only when it's handling requests.\n")
fmt.Printf("* Manage this application at Cloud Console:\n\t")
color.New(color.Underline, color.Bold).Printf("https://console.cloud.google.com/run/detail/%s/%s?project=%s\n", region, serviceName, project)
fmt.Printf("* Learn more about Cloud Run:\n\t")
color.New(color.Underline, color.Bold).Println("https://cloud.google.com/run/docs")
fmt.Printf(successPrefix+" %s%s\n",
color.New(color.Bold).Sprint("Your application is now live here:\n\t"),
color.New(color.Bold, color.FgGreen, color.Underline).Sprint(url))
return nil
}
func optionsToFlags(options options) []string {
var flags []string
authSetting := "--allow-unauthenticated"
if options.AllowUnauthenticated != nil && *options.AllowUnauthenticated == false {
authSetting = "--no-allow-unauthenticated"
}
flags = append(flags, authSetting)
if options.Memory != "" {
memorySetting := fmt.Sprintf("--memory=%s", options.Memory)
flags = append(flags, memorySetting)
}
if options.CPU != "" {
cpuSetting := fmt.Sprintf("--cpu=%s", options.CPU)
flags = append(flags, cpuSetting)
}
return flags
}
// waitCredsAvailable polls until Cloud Shell VM has available credentials.
// Credentials might be missing in the environment for some GSuite users that
// need to authenticate every N hours. See internal bug 154573156 for details.
func waitCredsAvailable(ctx context.Context, pollInterval time.Duration) error {
if os.Getenv("SKIP_GCE_CHECK") == "" && !metadata.OnGCE() {
return nil
}
for {
select {
case <-ctx.Done():
err := ctx.Err()
if err == context.DeadlineExceeded {
return errors.New("credentials were not available in the VM, try re-authenticating if Cloud Shell presents an authentication prompt and click the button again")
}
return err
default:
v, err := metadata.Get("instance/service-accounts/")
if err != nil {
return fmt.Errorf("failed to query metadata service to see if credentials are present: %w", err)
}
if strings.TrimSpace(v) != "" {
return nil
}
time.Sleep(pollInterval)
}
}
}
func waitForBilling(projectID string, prompt func(string) error) error {
for {
ok, err := checkBillingEnabled(projectID)
if err != nil {
return err
}
if ok {
return nil
}
if err := prompt(projectID); err != nil {
return err
}
}
}
// hasSubDirsInPATH determines if anything in PATH is a sub-directory of dir.
func hasSubDirsInPATH(dir string) (bool, error) {
path := os.Getenv("PATH")
if path == "" {
return false, errors.New("PATH is empty")
}
paths := strings.Split(path, string(os.PathListSeparator))
for _, p := range paths {
ok, err := isSubPath(dir, p)
if err != nil {
return false, fmt.Errorf("failure assessing if paths are the same: %v", err)
}
if ok {
return true, nil
}
}
return false, nil
}
// isSubPath determines b is under a. Both paths are evaluated by computing their abs paths.
func isSubPath(a, b string) (bool, error) {
a, err := filepath.Abs(a)
if err != nil {
return false, fmt.Errorf("failed to get absolute path for %s: %+v", a, err)
}
b, err = filepath.Abs(b)
if err != nil {
return false, fmt.Errorf("failed to get absolute path for %s: %+v", b, err)
}
v, err := filepath.Rel(a, b)
if err != nil {
return false, fmt.Errorf("failed to calculate relative path: %v", err)
}
return !strings.HasPrefix(v, ".."+string(os.PathSeparator)), nil
}
reword message for shell authorization (#178)
This message almost always shows for ~1s, so changing
to something less confusing that doesn't tell the user
there's an interaction required.
Signed-off-by: Ahmet Alp Balkan <c786c8527fd6be7431bb94ca48b6f76cc06787cc@google.com>
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"cloud.google.com/go/compute/metadata"
"github.com/briandowns/spinner"
"github.com/fatih/color"
)
const (
flRepoURL = "repo_url"
flGitBranch = "git_branch"
flSubDir = "dir"
flPage = "page"
flContext = "context"
reauthCredentialsWaitTimeout = time.Minute * 2
reauthCredentialsPollingInterval = time.Second
projectCreateURL = "https://console.cloud.google.com/cloud-resource-manager"
)
var (
linkLabel = color.New(color.Bold, color.Underline)
parameterLabel = color.New(color.FgHiYellow, color.Bold, color.Underline)
errorLabel = color.New(color.FgRed, color.Bold)
warningLabel = color.New(color.Bold, color.FgHiYellow)
successLabel = color.New(color.Bold, color.FgGreen)
successPrefix = fmt.Sprintf("[ %s ]", successLabel.Sprint("✓"))
errorPrefix = fmt.Sprintf("[ %s ]", errorLabel.Sprint("✖"))
infoPrefix = fmt.Sprintf("[ %s ]", warningLabel.Sprint("!"))
// we have to reset the inherited color first from survey.QuestionIcon
// see https://github.com/AlecAivazis/survey/issues/193
questionPrefix = fmt.Sprintf("%s %s ]",
color.New(color.Reset).Sprint("["),
color.New(color.Bold, color.FgYellow).Sprint("?"))
questionSelectFocusIcon = "❯"
opts runOpts
flags = flag.NewFlagSet("cloudshell_open", flag.ContinueOnError)
)
func init() {
flags.StringVar(&opts.repoURL, flRepoURL, "", "url to git repo")
flags.StringVar(&opts.gitBranch, flGitBranch, "", "(optional) branch/revision to use from the git repo")
flags.StringVar(&opts.subDir, flSubDir, "", "(optional) sub-directory to deploy in the repo")
flags.StringVar(&opts.context, flContext, "", "(optional) arbitrary context")
_ = flags.String(flPage, "", "ignored")
}
func main() {
usage := flags.Usage
flags.Usage = func() {} // control when we print usage string
if err := flags.Parse(os.Args[1:]); err != nil {
if err == flag.ErrHelp {
usage()
return
} else {
fmt.Printf("%s flag parsing issue: %+v\n", warningLabel.Sprint("internal warning:"), err)
}
}
if err := run(opts); err != nil {
fmt.Printf("%s %+v\n", errorLabel.Sprint("Error:"), err)
os.Exit(1)
}
}
type runOpts struct {
repoURL string
gitBranch string
subDir string
context string
}
func logProgress(msg, endMsg, errMsg string) func(bool) {
s := spinner.New(spinner.CharSets[9], 300*time.Millisecond)
s.Prefix = "[ "
s.Suffix = " ] " + msg
s.Start()
return func(success bool) {
s.Stop()
if success {
if endMsg != "" {
fmt.Printf("%s %s\n", successPrefix, endMsg)
}
} else {
fmt.Printf("%s %s\n", errorPrefix, errMsg)
}
}
}
func run(opts runOpts) error {
ctx := context.Background()
highlight := func(s string) string { return color.CyanString(s) }
parameter := func(s string) string { return parameterLabel.Sprint(s) }
cmdColor := color.New(color.FgHiBlue)
repo := opts.repoURL
if repo == "" {
return fmt.Errorf("--%s not specified", flRepoURL)
}
trusted := os.Getenv("TRUSTED_ENVIRONMENT") == "true"
if !trusted {
fmt.Printf("%s You launched this custom Cloud Shell image as \"Do not trust\".\n"+
"In this mode, your credentials are not available and this experience\n"+
"cannot deploy to Cloud Run. Start over and \"Trust\" the image.\n", errorLabel.Sprint("Error:"))
return errors.New("aborting due to untrusted cloud shell environment")
}
end := logProgress("Waiting for Cloud Shell authorization...",
"",
"Failed to get GCP credentials. Please authorize Cloud Shell if you're presented with a prompt.",
)
time.Sleep(time.Second * 2)
waitCtx, cancelWait := context.WithTimeout(ctx, reauthCredentialsWaitTimeout)
err := waitCredsAvailable(waitCtx, reauthCredentialsPollingInterval)
cancelWait()
end(err == nil)
if err != nil {
return err
}
end = logProgress(fmt.Sprintf("Cloning git repository %s...", highlight(repo)),
fmt.Sprintf("Cloned git repository %s.", highlight(repo)),
fmt.Sprintf("Failed to clone git repository %s", highlight(repo)))
cloneDir, err := handleRepo(repo)
end(err == nil)
if err != nil {
return err
}
if opts.gitBranch != "" {
if err := gitCheckout(cloneDir, opts.gitBranch); err != nil {
return fmt.Errorf("failed to checkout revision %q: %+v", opts.gitBranch, err)
}
}
appDir := cloneDir
if opts.subDir != "" {
// verify if --dir is valid
appDir = filepath.Join(cloneDir, opts.subDir)
if fi, err := os.Stat(appDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("sub-directory doesn't exist in the cloned repository: %s", appDir)
}
return fmt.Errorf("failed to check sub-directory in the repo: %v", err)
} else if !fi.IsDir() {
return fmt.Errorf("specified sub-directory path %s is not a directory", appDir)
}
}
appFile, err := getAppFile(appDir)
if err != nil {
return fmt.Errorf("error attempting to read the app.json from the cloned repository: %+v", err)
}
var projects []string
for len(projects) == 0 {
end = logProgress("Retrieving your GCP projects...",
"Queried list of your GCP projects",
"Failed to retrieve your GCP projects.",
)
projects, err = listProjects()
end(err == nil)
if err != nil {
return err
}
if len(projects) == 0 {
fmt.Print(errorPrefix+" "+
warningLabel.Sprint("You don't have any GCP projects to deploy into!")+
"\n 1. Visit "+linkLabel.Sprint(projectCreateURL),
"\n 2. Create a new GCP project with a billing account",
"\n 3. Once you're done, press "+parameterLabel.Sprint("Enter")+" to continue: ")
if _, err := bufio.NewReader(os.Stdin).ReadBytes('\n'); err != nil {
return err
}
}
}
if len(projects) > 1 {
fmt.Printf(successPrefix+" Found %s projects in your GCP account.\n",
successLabel.Sprintf("%d", len(projects)))
}
project, err := promptProject(projects)
if err != nil {
return err
}
if err := waitForBilling(project, func(p string) error {
fmt.Print(errorPrefix+" "+
warningLabel.Sprint("GCP project you chose does not have an active billing account!")+
"\n 1. Visit "+linkLabel.Sprint(projectCreateURL),
"\n 2. Associate a billing account for project "+parameterLabel.Sprint(p),
"\n 3. Once you're done, press "+parameterLabel.Sprint("Enter")+" to continue: ")
if _, err := bufio.NewReader(os.Stdin).ReadBytes('\n'); err != nil {
return err
}
return nil
}); err != nil {
return err
}
end = logProgress(
fmt.Sprintf("Enabling Cloud Run API on project %s...", highlight(project)),
fmt.Sprintf("Enabled Cloud Run API on project %s.", highlight(project)),
fmt.Sprintf("Failed to enable required APIs on project %s.", highlight(project)))
err = enableAPIs(project, []string{"run.googleapis.com", "containerregistry.googleapis.com"})
end(err == nil)
if err != nil {
return err
}
region, err := promptDeploymentRegion(ctx, project)
if err != nil {
return err
}
repoName := filepath.Base(appDir)
serviceName := repoName
if appFile.Name != "" {
serviceName = appFile.Name
}
serviceName = tryFixServiceName(serviceName)
image := fmt.Sprintf("gcr.io/%s/%s", project, serviceName)
existingEnvVars := make(map[string]struct{})
// todo(jamesward) actually determine if the service exists instead of assuming it doesn't if we get an error
existingService, err := getService(project, serviceName, region)
if err == nil {
// service exists
existingEnvVars, err = envVars(project, serviceName, region)
}
neededEnvs := needEnvs(appFile.Env, existingEnvVars)
envs, err := promptOrGenerateEnvs(neededEnvs)
if err != nil {
return err
}
projectEnv := fmt.Sprintf("GOOGLE_CLOUD_PROJECT=%s", project)
regionEnv := fmt.Sprintf("GOOGLE_CLOUD_REGION=%s", region)
serviceEnv := fmt.Sprintf("K_SERVICE=%s", serviceName)
imageEnv := fmt.Sprintf("IMAGE_URL=%s", image)
appDirEnv := fmt.Sprintf("APP_DIR=%s", appDir)
inheritedEnv := os.Environ()
hookEnvs := append([]string{projectEnv, regionEnv, serviceEnv, imageEnv, appDirEnv}, envs...)
hookEnvs = append(hookEnvs, inheritedEnv...)
pushImage := true
if appFile.Hooks.PreBuild.Commands != nil {
err = runScripts(appDir, appFile.Hooks.PreBuild.Commands, hookEnvs)
}
skipBuild := appFile.Build.Skip != nil && *appFile.Build.Skip == true
builderImage := "heroku/buildpacks"
if opts.context == "cloudrun-gbp" {
builderImage = "gcr.io/buildpacks/builder"
}
if skipBuild {
fmt.Println(infoPrefix + " Skipping built-in build methods")
} else if dockerFileExists, _ := dockerFileExists(appDir); dockerFileExists {
fmt.Println(infoPrefix + " Attempting to build this application with its Dockerfile...")
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tdocker build -t %s %s\n", parameter(image), parameter("."))
err = dockerBuild(appDir, image)
} else if jibMaven, _ := jibMavenConfigured(appDir); jibMaven {
pushImage = false
fmt.Println(infoPrefix + " Attempting to build this application with Jib Maven plugin...")
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tmvn package jib:build -Dimage=%s\n", parameter(image))
err = jibMavenBuild(appDir, image)
} else {
fmt.Println(infoPrefix + " Attempting to build this application with Cloud Native Buildpacks (buildpacks.io)...")
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tpack build %s --path %s --builder %s\n", parameter(image), parameter(appDir), parameter(builderImage))
err = packBuild(appDir, image, builderImage)
}
if !skipBuild {
end = logProgress(fmt.Sprintf("Building container image %s", highlight(image)),
fmt.Sprintf("Built container image %s", highlight(image)),
"Failed to build container image.")
}
end(err == nil)
if err != nil {
return fmt.Errorf("attempted to build and failed: %s", err)
}
if appFile.Hooks.PostBuild.Commands != nil {
err = runScripts(appDir, appFile.Hooks.PostBuild.Commands, hookEnvs)
}
if pushImage {
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tdocker push %s\n", parameter(image))
end = logProgress("Pushing container image...",
"Pushed container image to Google Container Registry.",
"Failed to push container image to Google Container Registry.")
err = dockerPush(image)
end(err == nil)
if err != nil {
return fmt.Errorf("failed to push image to %s: %+v", image, err)
}
}
if existingService == nil {
err = runScripts(appDir, appFile.Hooks.PreCreate.Commands, hookEnvs)
if err != nil {
return err
}
}
optionsFlags := optionsToFlags(appFile.Options)
serviceLabel := highlight(serviceName)
fmt.Println(infoPrefix + " FYI, running the following command:")
cmdColor.Printf("\tgcloud run deploy %s", parameter(serviceName))
cmdColor.Println("\\")
cmdColor.Printf("\t --project=%s", parameter(project))
cmdColor.Println("\\")
cmdColor.Printf("\t --platform=%s", parameter("managed"))
cmdColor.Println("\\")
cmdColor.Printf("\t --region=%s", parameter(region))
cmdColor.Println("\\")
cmdColor.Printf("\t --image=%s", parameter(image))
if len(envs) > 0 {
cmdColor.Println("\\")
cmdColor.Printf("\t --update-env-vars=%s", parameter(strings.Join(envs, ",")))
}
for _, optionFlag := range optionsFlags {
cmdColor.Println("\\")
cmdColor.Printf("\t %s", optionFlag)
}
cmdColor.Println("")
end = logProgress(fmt.Sprintf("Deploying service %s to Cloud Run...", serviceLabel),
fmt.Sprintf("Successfully deployed service %s to Cloud Run.", serviceLabel),
"Failed deploying the application to Cloud Run.")
url, err := deploy(project, serviceName, image, region, envs, appFile.Options)
end(err == nil)
if err != nil {
return err
}
if existingService == nil {
err = runScripts(appDir, appFile.Hooks.PostCreate.Commands, hookEnvs)
if err != nil {
return err
}
}
fmt.Printf("* This application is billed only when it's handling requests.\n")
fmt.Printf("* Manage this application at Cloud Console:\n\t")
color.New(color.Underline, color.Bold).Printf("https://console.cloud.google.com/run/detail/%s/%s?project=%s\n", region, serviceName, project)
fmt.Printf("* Learn more about Cloud Run:\n\t")
color.New(color.Underline, color.Bold).Println("https://cloud.google.com/run/docs")
fmt.Printf(successPrefix+" %s%s\n",
color.New(color.Bold).Sprint("Your application is now live here:\n\t"),
color.New(color.Bold, color.FgGreen, color.Underline).Sprint(url))
return nil
}
func optionsToFlags(options options) []string {
var flags []string
authSetting := "--allow-unauthenticated"
if options.AllowUnauthenticated != nil && *options.AllowUnauthenticated == false {
authSetting = "--no-allow-unauthenticated"
}
flags = append(flags, authSetting)
if options.Memory != "" {
memorySetting := fmt.Sprintf("--memory=%s", options.Memory)
flags = append(flags, memorySetting)
}
if options.CPU != "" {
cpuSetting := fmt.Sprintf("--cpu=%s", options.CPU)
flags = append(flags, cpuSetting)
}
return flags
}
// waitCredsAvailable polls until Cloud Shell VM has available credentials.
// Credentials might be missing in the environment for some GSuite users that
// need to authenticate every N hours. See internal bug 154573156 for details.
func waitCredsAvailable(ctx context.Context, pollInterval time.Duration) error {
if os.Getenv("SKIP_GCE_CHECK") == "" && !metadata.OnGCE() {
return nil
}
for {
select {
case <-ctx.Done():
err := ctx.Err()
if err == context.DeadlineExceeded {
return errors.New("credentials were not available in the VM, try re-authenticating if Cloud Shell presents an authentication prompt and click the button again")
}
return err
default:
v, err := metadata.Get("instance/service-accounts/")
if err != nil {
return fmt.Errorf("failed to query metadata service to see if credentials are present: %w", err)
}
if strings.TrimSpace(v) != "" {
return nil
}
time.Sleep(pollInterval)
}
}
}
func waitForBilling(projectID string, prompt func(string) error) error {
for {
ok, err := checkBillingEnabled(projectID)
if err != nil {
return err
}
if ok {
return nil
}
if err := prompt(projectID); err != nil {
return err
}
}
}
// hasSubDirsInPATH determines if anything in PATH is a sub-directory of dir.
func hasSubDirsInPATH(dir string) (bool, error) {
path := os.Getenv("PATH")
if path == "" {
return false, errors.New("PATH is empty")
}
paths := strings.Split(path, string(os.PathListSeparator))
for _, p := range paths {
ok, err := isSubPath(dir, p)
if err != nil {
return false, fmt.Errorf("failure assessing if paths are the same: %v", err)
}
if ok {
return true, nil
}
}
return false, nil
}
// isSubPath determines b is under a. Both paths are evaluated by computing their abs paths.
func isSubPath(a, b string) (bool, error) {
a, err := filepath.Abs(a)
if err != nil {
return false, fmt.Errorf("failed to get absolute path for %s: %+v", a, err)
}
b, err = filepath.Abs(b)
if err != nil {
return false, fmt.Errorf("failed to get absolute path for %s: %+v", b, err)
}
v, err := filepath.Rel(a, b)
if err != nil {
return false, fmt.Errorf("failed to calculate relative path: %v", err)
}
return !strings.HasPrefix(v, ".."+string(os.PathSeparator)), nil
}
|
// Enumerates USB devices, finds and identifies CrazyRadio USB dongle.
package main
import (
"fmt"
"log"
"os"
"time"
"github.com/kylelemons/gousb/usb"
)
const (
Vendor = 0x1915
Product = 0x7777
)
type Request uint8
const (
SET_RADIO_CHANNEL Request = 0x01
SET_RADIO_ADDRESS Request = 0x02
SET_DATA_RATE Request = 0x03
SET_RADIO_POWER Request = 0x04
SET_RADIO_ARD Request = 0x05
SET_RADIO_ARC Request = 0x06
ACK_ENABLE Request = 0x10
SET_CONT_CARRIER Request = 0x20
CHANNEL_SCANN Request = 0x21
LAUNCH_BOOTLOADER Request = 0xFF
)
type DataRate uint16
const (
DATA_RATE_250K DataRate = 0
DATA_RATE_1M DataRate = 1
DATA_RATE_2M DataRate = 2
RADIO_POWER_M18dBm = 0
RADIO_POWER_M12dBm = 1
RADIO_POWER_M6dBm = 2
RADIO_POWER_0dBm = 3
)
func control(d *usb.Device, req Request, val uint16, data []byte) error {
_, err := d.Control(usb.REQUEST_TYPE_VENDOR, uint8(req), val, 0, data)
return err
}
func initDongle(d *usb.Device, ch uint16, rate DataRate) (err error) {
if err = control(d, SET_DATA_RATE, uint16(DATA_RATE_250K), nil); err != nil {
return
}
if err = control(d, SET_RADIO_CHANNEL, 2, nil); err != nil {
return
}
if err = control(d, SET_CONT_CARRIER, 0, nil); err != nil {
return
}
if err = control(d, SET_RADIO_ADDRESS, 0, []byte{0xE7, 0xE7, 0xE7, 0xE7, 0xE7}); err != nil {
return
}
if err = control(d, SET_RADIO_POWER, RADIO_POWER_0dBm, nil); err != nil {
return
}
if err = control(d, SET_RADIO_ARC, 3, nil); err != nil {
return
}
if err = control(d, SET_RADIO_ARD, 32, nil); err != nil {
return
}
if err = control(d, SET_RADIO_ARC, 10, nil); err != nil {
return
}
if err = control(d, SET_RADIO_CHANNEL, ch, nil); err != nil {
return
}
if err = control(d, SET_DATA_RATE, uint16(rate), nil); err != nil {
return
}
return
}
func reader(in usb.Endpoint) {
buf := make([]byte, 128)
for {
n, err := in.Read(buf)
if err != nil {
log.Printf("Error: reader: %v", err)
continue
}
log.Printf("Reader, len: %d, package: %v", n, buf[:n])
}
}
func writer(out usb.Endpoint) {
buf := []byte{0xFF}
for {
_, err := out.Write(buf)
if err != nil {
log.Printf("Error: writer: %v", err)
continue
}
}
}
func listDongles() error {
ctx := usb.NewContext()
defer ctx.Close()
devs, err := ctx.ListDevices(func(desc *usb.Descriptor) bool {
if desc.Vendor == 0x1915 && desc.Product == 0x7777 {
return true
}
return false
})
defer func() {
for _, d := range devs {
d.Close()
}
}()
if err != nil {
return err
}
if len(devs) == 0 {
return fmt.Errorf("No CrazyRadio dongles found!")
}
for _, dev := range devs {
fmt.Printf("CrazyRadio USB dongle v%s\n", dev.Device)
}
controller := devs[0]
in, err := controller.OpenEndpoint(
/* config */ 1,
/* iface */ 0,
/* setup */ 0,
/* endpoint */ 1|uint8(usb.ENDPOINT_DIR_IN))
if err != nil {
return fmt.Errorf("OpenEndpoint(IN): %v", err)
}
out, err := controller.OpenEndpoint(
/* config */ 1,
/* iface */ 0,
/* setup */ 0,
/* endpoint */ 1|uint8(usb.ENDPOINT_DIR_OUT))
if err != nil {
return fmt.Errorf("OpenEndpoint(OUT): %v", err)
}
if err = initDongle(controller, 10, DATA_RATE_250K); err != nil {
return fmt.Errorf("initDongle: %v", err)
}
go reader(in)
go writer(out)
fmt.Printf("Press Ctrl+C to exit\n")
for {
time.Sleep(time.Second)
}
return nil
}
func main() {
if err := listDongles(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
Implement async reader, semi-sync writer and allow sending packages to Crazyflie. I can spin the motors, but still unable to receive any meaningful packages from the flie
// Enumerates USB devices, finds and identifies CrazyRadio USB dongle.
package main
import (
"fmt"
"log"
"os"
"time"
"github.com/kylelemons/gousb/usb"
)
const (
Vendor = 0x1915
Product = 0x7777
)
type Request uint8
const (
SET_RADIO_CHANNEL Request = 0x01
SET_RADIO_ADDRESS Request = 0x02
SET_DATA_RATE Request = 0x03
SET_RADIO_POWER Request = 0x04
SET_RADIO_ARD Request = 0x05
SET_RADIO_ARC Request = 0x06
ACK_ENABLE Request = 0x10
SET_CONT_CARRIER Request = 0x20
CHANNEL_SCANN Request = 0x21
LAUNCH_BOOTLOADER Request = 0xFF
)
type DataRate uint16
const (
DATA_RATE_250K DataRate = 0
DATA_RATE_1M DataRate = 1
DATA_RATE_2M DataRate = 2
RADIO_POWER_M18dBm = 0
RADIO_POWER_M12dBm = 1
RADIO_POWER_M6dBm = 2
RADIO_POWER_0dBm = 3
)
func control(d *usb.Device, req Request, val uint16, data []byte) error {
_, err := d.Control(usb.REQUEST_TYPE_VENDOR, uint8(req), val, 0, data)
return err
}
func initDongle(d *usb.Device, ch uint16, rate DataRate) (err error) {
if err = control(d, SET_DATA_RATE, uint16(DATA_RATE_250K), nil); err != nil {
return
}
if err = control(d, SET_RADIO_CHANNEL, 2, nil); err != nil {
return
}
if err = control(d, SET_CONT_CARRIER, 0, nil); err != nil {
return
}
if err = control(d, SET_RADIO_ADDRESS, 0, []byte{0xE7, 0xE7, 0xE7, 0xE7, 0xE7}); err != nil {
return
}
if err = control(d, SET_RADIO_POWER, RADIO_POWER_0dBm, nil); err != nil {
return
}
if err = control(d, SET_RADIO_ARC, 3, nil); err != nil {
return
}
if err = control(d, SET_RADIO_ARD, 32, nil); err != nil {
return
}
if err = control(d, SET_RADIO_ARC, 10, nil); err != nil {
return
}
if err = control(d, SET_RADIO_CHANNEL, ch, nil); err != nil {
return
}
if err = control(d, SET_DATA_RATE, uint16(rate), nil); err != nil {
return
}
return
}
func reader(in usb.Endpoint, ch chan<- []byte) {
buf := make([]byte, 128)
for {
n, err := in.Read(buf)
if err != nil {
log.Printf("Error: reader: %v", err)
continue
}
p := make([]byte, n)
copy(p, buf)
ch <- p
log.Printf("Reader, len: %d, package: %v", n, buf[:n])
}
}
func consume(cnt int, readCh <-chan []byte) {
for {
log.Printf("Consuming at least %d package", cnt)
for i := 0; i < cnt; i++ {
p := <-readCh
log.Printf("Writer, incoming package: %v", p)
}
select {
case p := <-readCh:
log.Printf("Writer, incoming package: %v", p)
default:
return
}
}
}
func sendPackage(out usb.Endpoint, readCh <-chan []byte, p []byte) (err error) {
log.Printf("sendPackage: %v", p)
consume(0, readCh)
_, err = out.Write(p)
if err != nil {
return fmt.Errorf("sendPackage: %v", err)
}
consume(1, readCh)
return
}
func writer(out usb.Endpoint, writeCh <-chan []byte, readCh <-chan []byte) {
buf := []byte{0xFF}
for {
var p []byte
select {
case p = <-writeCh:
default:
p = buf
}
err := sendPackage(out, readCh, p)
if err != nil {
log.Printf("Error: writer: %v", err)
}
}
}
func listDongles() error {
ctx := usb.NewContext()
defer ctx.Close()
devs, err := ctx.ListDevices(func(desc *usb.Descriptor) bool {
if desc.Vendor == 0x1915 && desc.Product == 0x7777 {
return true
}
return false
})
defer func() {
for _, d := range devs {
d.Close()
}
}()
if err != nil {
return err
}
if len(devs) == 0 {
return fmt.Errorf("No CrazyRadio dongles found!")
}
for _, dev := range devs {
fmt.Printf("CrazyRadio USB dongle v%s\n", dev.Device)
}
controller := devs[0]
in, err := controller.OpenEndpoint(
/* config */ 1,
/* iface */ 0,
/* setup */ 0,
/* endpoint */ 1|uint8(usb.ENDPOINT_DIR_IN))
if err != nil {
return fmt.Errorf("OpenEndpoint(IN): %v", err)
}
out, err := controller.OpenEndpoint(
/* config */ 1,
/* iface */ 0,
/* setup */ 0,
/* endpoint */ 1|uint8(usb.ENDPOINT_DIR_OUT))
if err != nil {
return fmt.Errorf("OpenEndpoint(OUT): %v", err)
}
if err = initDongle(controller, 10, DATA_RATE_250K); err != nil {
return fmt.Errorf("initDongle: %v", err)
}
readCh := make(chan []byte, 10)
writeCh := make(chan []byte)
go reader(in, readCh)
go writer(out, writeCh, readCh)
writeCh <- []byte{44, 1}
writeCh <- []byte{44, 1}
for i := 0; i <= 26; i++ {
writeCh <- []byte{44, 0, byte(i)}
}
writeCh <- []byte{92, 1}
for i := 0; i <= 14; i++ {
writeCh <- []byte{92, 0, byte(i)}
}
writeCh <- []byte{60, 0, 0, 0, 0, 0, 0, 0, 128, 250, 117, 61, 64, 48, 117}
fmt.Printf("Press Ctrl+C to exit\n")
for {
time.Sleep(time.Second)
}
return nil
}
func main() {
if err := listDongles(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
|
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
"github.com/klauspost/readahead"
"github.com/minio/madmin-go"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/bpool"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/sync/errgroup"
"github.com/minio/pkg/mimedb"
)
// erasureSingle - Implements single drive XL layer
type erasureSingle struct {
GatewayUnsupported
disk StorageAPI
endpoint Endpoint
// Locker mutex map.
nsMutex *nsLockMap
// Byte pools used for temporary i/o buffers.
bp *bpool.BytePoolCap
deletedCleanupSleeper *dynamicSleeper
// Shut down async operations
shutdown context.CancelFunc
format *formatErasureV3
}
// Initialize new set of erasure coded sets.
func newErasureSingle(ctx context.Context, storageDisk StorageAPI, format *formatErasureV3) (ObjectLayer, error) {
// Number of buffers, max 2GB
n := (2 * humanize.GiByte) / (blockSizeV2 * 2)
// Initialize byte pool once for all sets, bpool size is set to
// setCount * setDriveCount with each memory upto blockSizeV2.
bp := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2)
// Initialize the erasure sets instance.
s := &erasureSingle{
disk: storageDisk,
endpoint: storageDisk.Endpoint(),
format: format,
nsMutex: newNSLock(false),
bp: bp,
deletedCleanupSleeper: newDynamicSleeper(10, 2*time.Second, false),
}
// start cleanup stale uploads go-routine.
go s.cleanupStaleUploads(ctx)
// start cleanup of deleted objects.
go s.cleanupDeletedObjects(ctx)
ctx, s.shutdown = context.WithCancel(ctx)
go intDataUpdateTracker.start(ctx, s.endpoint.Path)
return s, nil
}
// List all buckets from one of the set, we are not doing merge
// sort here just for simplification. As per design it is assumed
// that all buckets are present on all sets.
func (es *erasureSingle) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
var listBuckets []BucketInfo
healBuckets := map[string]VolInfo{}
// lists all unique buckets across drives.
if err := listAllBuckets(ctx, []StorageAPI{es.disk}, healBuckets, 0); err != nil {
return nil, err
}
// include deleted buckets in listBuckets output
deletedBuckets := map[string]VolInfo{}
if opts.Deleted {
// lists all deleted buckets across drives.
if err := listDeletedBuckets(ctx, []StorageAPI{es.disk}, deletedBuckets, 0); err != nil {
return nil, err
}
}
for _, v := range healBuckets {
bi := BucketInfo{
Name: v.Name,
Created: v.Created,
}
if vi, ok := deletedBuckets[v.Name]; ok {
bi.Deleted = vi.Created
}
listBuckets = append(listBuckets, bi)
}
for _, v := range deletedBuckets {
if _, ok := healBuckets[v.Name]; !ok {
listBuckets = append(listBuckets, BucketInfo{
Name: v.Name,
Deleted: v.Created,
})
}
}
sort.Slice(listBuckets, func(i, j int) bool {
return listBuckets[i].Name < listBuckets[j].Name
})
for i := range listBuckets {
meta, err := globalBucketMetadataSys.Get(listBuckets[i].Name)
if err == nil {
listBuckets[i].Created = meta.Created
}
}
return listBuckets, nil
}
func (es *erasureSingle) cleanupStaleUploads(ctx context.Context) {
timer := time.NewTimer(globalAPIConfig.getStaleUploadsCleanupInterval())
defer timer.Stop()
for {
select {
case <-ctx.Done():
return
case <-timer.C:
es.cleanupStaleUploadsOnDisk(ctx, es.disk, globalAPIConfig.getStaleUploadsExpiry())
// Reset for the next interval
timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval())
}
}
}
// cleanup ".trash/" folder every 5m minutes with sufficient sleep cycles, between each
// deletes a dynamic sleeper is used with a factor of 10 ratio with max delay between
// deletes to be 2 seconds.
func (es *erasureSingle) cleanupDeletedObjects(ctx context.Context) {
timer := time.NewTimer(globalAPIConfig.getDeleteCleanupInterval())
defer timer.Stop()
for {
select {
case <-ctx.Done():
return
case <-timer.C:
es.cleanupDeletedObjectsInner(ctx)
// Reset for the next interval
timer.Reset(globalAPIConfig.getDeleteCleanupInterval())
}
}
}
// NewNSLock - initialize a new namespace RWLocker instance.
func (es *erasureSingle) NewNSLock(bucket string, objects ...string) RWLocker {
return es.nsMutex.NewNSLock(nil, bucket, objects...)
}
// Shutdown function for object storage interface.
func (es *erasureSingle) Shutdown(ctx context.Context) error {
defer es.shutdown()
// Add any object layer shutdown activities here.
closeStorageDisks(es.disk)
return nil
}
func (es *erasureSingle) SetDriveCounts() []int {
return []int{1}
}
func (es *erasureSingle) BackendInfo() (b madmin.BackendInfo) {
b.Type = madmin.Erasure
scParity := 0
rrSCParity := 0
// Data blocks can vary per pool, but parity is same.
for _, setDriveCount := range es.SetDriveCounts() {
b.StandardSCData = append(b.StandardSCData, setDriveCount-scParity)
b.RRSCData = append(b.RRSCData, setDriveCount-rrSCParity)
}
b.StandardSCParity = scParity
b.RRSCParity = rrSCParity
return
}
// StorageInfo - returns underlying storage statistics.
func (es *erasureSingle) StorageInfo(ctx context.Context) (StorageInfo, []error) {
disks := []StorageAPI{es.disk}
endpoints := []Endpoint{es.endpoint}
storageInfo, errs := getStorageInfo(disks, endpoints)
storageInfo.Backend = es.BackendInfo()
return storageInfo, errs
}
// LocalStorageInfo - returns underlying local storage statistics.
func (es *erasureSingle) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
disks := []StorageAPI{es.disk}
endpoints := []Endpoint{es.endpoint}
var localDisks []StorageAPI
var localEndpoints []Endpoint
for i, endpoint := range endpoints {
if endpoint.IsLocal {
localDisks = append(localDisks, disks[i])
localEndpoints = append(localEndpoints, endpoint)
}
}
return getStorageInfo(localDisks, localEndpoints)
}
// Clean-up previously deleted objects. from .minio.sys/tmp/.trash/
func (es *erasureSingle) cleanupDeletedObjectsInner(ctx context.Context) {
diskPath := es.disk.Endpoint().Path
readDirFn(pathJoin(diskPath, minioMetaTmpDeletedBucket), func(ddir string, typ os.FileMode) error {
wait := es.deletedCleanupSleeper.Timer(ctx)
removeAll(pathJoin(diskPath, minioMetaTmpDeletedBucket, ddir))
wait()
return nil
})
}
func (es *erasureSingle) renameAll(ctx context.Context, bucket, prefix string) {
if es.disk != nil {
es.disk.RenameFile(ctx, bucket, prefix, minioMetaTmpDeletedBucket, mustGetUUID())
}
}
type renameAllStorager interface {
renameAll(ctx context.Context, bucket, prefix string)
}
// Bucket operations
// MakeBucket - make a bucket.
func (es *erasureSingle) MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error {
defer NSUpdated(bucket, slashSeparator)
// Lock the bucket name before creating.
lk := es.NewNSLock(minioMetaTmpBucket, bucket+".lck")
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
// Verify if bucket is valid.
if !isMinioMetaBucketName(bucket) {
if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil {
return BucketNameInvalid{Bucket: bucket}
}
}
if err := es.disk.MakeVol(ctx, bucket); err != nil {
if opts.ForceCreate && errors.Is(err, errVolumeExists) {
// No need to return error when force create was
// requested.
return nil
}
if !errors.Is(err, errVolumeExists) {
logger.LogIf(ctx, err)
}
return toObjectErr(err, bucket)
}
// If it doesn't exist we get a new, so ignore errors
meta := newBucketMetadata(bucket)
meta.SetCreatedAt(opts.CreatedAt)
if opts.LockEnabled {
meta.VersioningConfigXML = enabledBucketVersioningConfig
meta.ObjectLockConfigXML = enabledBucketObjectLockConfig
}
if opts.VersioningEnabled {
meta.VersioningConfigXML = enabledBucketVersioningConfig
}
if err := meta.Save(context.Background(), es); err != nil {
return toObjectErr(err, bucket)
}
globalBucketMetadataSys.Set(bucket, meta)
return nil
}
// GetBucketInfo - returns BucketInfo for a bucket.
func (es *erasureSingle) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bi BucketInfo, e error) {
volInfo, err := es.disk.StatVol(ctx, bucket)
if err != nil {
if opts.Deleted {
if dvi, derr := es.disk.StatVol(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket)); derr == nil {
return BucketInfo{Name: bucket, Deleted: dvi.Created}, nil
}
}
return bi, toObjectErr(err, bucket)
}
return BucketInfo{Name: volInfo.Name, Created: volInfo.Created}, nil
}
// DeleteBucket - deletes a bucket.
func (es *erasureSingle) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
// Collect if all disks report volume not found.
defer NSUpdated(bucket, slashSeparator)
err := es.disk.DeleteVol(ctx, bucket, opts.Force)
// Purge the entire bucket metadata entirely.
deleteBucketMetadata(ctx, es, bucket)
globalBucketMetadataSys.Remove(bucket)
if err == nil || errors.Is(err, errVolumeNotFound) {
if opts.SRDeleteOp == MarkDelete {
es.markDelete(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
}
}
return toObjectErr(err, bucket)
}
// markDelete creates a vol entry in .minio.sys/buckets/.deleted until site replication
// syncs the delete to peers
func (es *erasureSingle) markDelete(ctx context.Context, bucket, prefix string) error {
err := es.disk.MakeVol(ctx, pathJoin(bucket, prefix))
if err != nil && errors.Is(err, errVolumeExists) {
return nil
}
return toObjectErr(err, bucket)
}
// purgeDelete deletes vol entry in .minio.sys/buckets/.deleted after site replication
// syncs the delete to peers OR on a new MakeBucket call.
func (es *erasureSingle) purgeDelete(ctx context.Context, bucket, prefix string) error {
err := es.disk.DeleteVol(ctx, pathJoin(bucket, prefix), true)
return toObjectErr(err, bucket)
}
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
func (es *erasureSingle) IsNotificationSupported() bool {
return true
}
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
func (es *erasureSingle) IsListenSupported() bool {
return true
}
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (es *erasureSingle) IsEncryptionSupported() bool {
return true
}
// IsCompressionSupported returns whether compression is applicable for this layer.
func (es *erasureSingle) IsCompressionSupported() bool {
return true
}
// IsTaggingSupported indicates whethes *erasureSingle implements tagging support.
func (es *erasureSingle) IsTaggingSupported() bool {
return true
}
// Object Operations
// CopyObject - copy object source object to destination object.
// if source object and destination object are same we only
// update metadata.
func (es *erasureSingle) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, err error) {
defer NSUpdated(dstBucket, dstObject)
srcObject = encodeDirObject(srcObject)
dstObject = encodeDirObject(dstObject)
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
if !dstOpts.NoLock {
ns := es.NewNSLock(dstBucket, dstObject)
lkctx, err := ns.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer ns.Unlock(lkctx.Cancel)
dstOpts.NoLock = true
}
if cpSrcDstSame && srcInfo.metadataOnly {
// Read metadata associated with the object from all disks.
storageDisks := []StorageAPI{es.disk}
var metaArr []FileInfo
var errs []error
// Read metadata associated with the object from all disks.
if srcOpts.VersionID != "" {
metaArr, errs = readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, true)
} else {
metaArr, errs = readAllXL(ctx, storageDisks, srcBucket, srcObject, true)
}
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
if fi.Deleted {
if srcOpts.VersionID == "" {
return oi, toObjectErr(errFileNotFound, srcBucket, srcObject)
}
return fi.ToObjectInfo(srcBucket, srcObject, srcOpts.Versioned || srcOpts.VersionSuspended), toObjectErr(errMethodNotAllowed, srcBucket, srcObject)
}
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
versionID := srcInfo.VersionID
if srcInfo.versionOnly {
versionID = dstOpts.VersionID
// preserve destination versionId if specified.
if versionID == "" {
versionID = mustGetUUID()
fi.IsLatest = true // we are creating a new version so this is latest.
}
modTime = UTCNow()
}
// If the data is not inlined, we may end up incorrectly
// inlining the data here, that leads to an inconsistent
// situation where some objects are were not inlined
// were now inlined, make sure to `nil` the Data such
// that xl.meta is written as expected.
if !fi.InlineData() {
fi.Data = nil
}
fi.VersionID = versionID // set any new versionID we might have created
fi.ModTime = modTime // set modTime for the new versionID
if !dstOpts.MTime.IsZero() {
modTime = dstOpts.MTime
fi.ModTime = dstOpts.MTime
}
fi.Metadata = srcInfo.UserDefined
srcInfo.UserDefined["etag"] = srcInfo.ETag
// Update `xl.meta` content on each disks.
for index := range metaArr {
if metaArr[index].IsValid() {
metaArr[index].ModTime = modTime
metaArr[index].VersionID = versionID
metaArr[index].Metadata = srcInfo.UserDefined
if !metaArr[index].InlineData() {
// If the data is not inlined, we may end up incorrectly
// inlining the data here, that leads to an inconsistent
// situation where some objects are were not inlined
// were now inlined, make sure to `nil` the Data such
// that xl.meta is written as expected.
metaArr[index].Data = nil
}
}
}
// Write unique `xl.meta` for each disk.
if _, err = writeUniqueFileInfo(ctx, onlineDisks, srcBucket, srcObject, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
return fi.ToObjectInfo(srcBucket, srcObject, srcOpts.Versioned || srcOpts.VersionSuspended), nil
}
putOpts := ObjectOptions{
ServerSideEncryption: dstOpts.ServerSideEncryption,
UserDefined: srcInfo.UserDefined,
Versioned: dstOpts.Versioned,
VersionID: dstOpts.VersionID,
MTime: dstOpts.MTime,
NoLock: true,
}
return es.PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
}
// GetObjectNInfo - returns object info and an object
// Read(Closer). When err != nil, the returned reader is always nil.
func (es *erasureSingle) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return nil, err
}
object = encodeDirObject(object)
var unlockOnDefer bool
nsUnlocker := func() {}
defer func() {
if unlockOnDefer {
nsUnlocker()
}
}()
// Acquire lock
if lockType != noLock {
lock := es.NewNSLock(bucket, object)
switch lockType {
case writeLock:
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return nil, err
}
ctx = lkctx.Context()
nsUnlocker = func() { lock.Unlock(lkctx.Cancel) }
case readLock:
lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return nil, err
}
ctx = lkctx.Context()
nsUnlocker = func() { lock.RUnlock(lkctx.Cancel) }
}
unlockOnDefer = true
}
fi, metaArr, onlineDisks, err := es.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil {
return nil, toObjectErr(err, bucket, object)
}
objInfo := fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
if objInfo.DeleteMarker {
if opts.VersionID == "" {
return &GetObjectReader{
ObjInfo: objInfo,
}, toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return &GetObjectReader{
ObjInfo: objInfo,
}, toObjectErr(errMethodNotAllowed, bucket, object)
}
if objInfo.IsRemote() {
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, h, objInfo, opts)
if err != nil {
return nil, err
}
unlockOnDefer = false
return gr.WithCleanupFuncs(nsUnlocker), nil
}
fn, off, length, err := NewGetObjectReader(rs, objInfo, opts)
if err != nil {
return nil, err
}
unlockOnDefer = false
pr, pw := xioutil.WaitPipe()
go func() {
pw.CloseWithError(es.getObjectWithFileInfo(ctx, bucket, object, off, length, pw, fi, metaArr, onlineDisks))
}()
// Cleanup function to cause the go routine above to exit, in
// case of incomplete read.
pipeCloser := func() {
pr.CloseWithError(nil)
}
return fn(pr, h, pipeCloser, nsUnlocker)
}
func (es *erasureSingle) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
// Reorder online disks based on erasure distribution ordes.
// Reorder parts metadata based on erasure distribution ordes.
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
// For negative length read everything.
if length < 0 {
length = fi.Size - startOffset
}
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > fi.Size || startOffset+length > fi.Size {
logger.LogIf(ctx, InvalidRange{startOffset, length, fi.Size}, logger.Application)
return InvalidRange{startOffset, length, fi.Size}
}
// Get start part index and offset.
partIndex, partOffset, err := fi.ObjectToPartOffset(ctx, startOffset)
if err != nil {
return InvalidRange{startOffset, length, fi.Size}
}
// Calculate endOffset according to length
endOffset := startOffset
if length > 0 {
endOffset += length - 1
}
// Get last part index to read given length.
lastPartIndex, _, err := fi.ObjectToPartOffset(ctx, endOffset)
if err != nil {
return InvalidRange{startOffset, length, fi.Size}
}
var totalBytesRead int64
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return toObjectErr(err, bucket, object)
}
// once we have obtained a common FileInfo i.e latest, we should stick
// to single dataDir to read the content to avoid reading from some other
// dataDir that has stale FileInfo{} to ensure that we fail appropriately
// during reads and expect the same dataDir everywhere.
dataDir := fi.DataDir
for ; partIndex <= lastPartIndex; partIndex++ {
if length == totalBytesRead {
break
}
partNumber := fi.Parts[partIndex].Number
// Save the current part name and size.
partSize := fi.Parts[partIndex].Size
partLength := partSize - partOffset
// partLength should be adjusted so that we don't write more data than what was requested.
if partLength > (length - totalBytesRead) {
partLength = length - totalBytesRead
}
tillOffset := erasure.ShardFileOffset(partOffset, partLength, partSize)
// Get the checksums of the current part.
readers := make([]io.ReaderAt, len(onlineDisks))
prefer := make([]bool, len(onlineDisks))
for index, disk := range onlineDisks {
if disk == OfflineDisk {
continue
}
if !metaArr[index].IsValid() {
continue
}
checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber)
partPath := pathJoin(object, dataDir, fmt.Sprintf("part.%d", partNumber))
readers[index] = newBitrotReader(disk, metaArr[index].Data, bucket, partPath, tillOffset,
checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize())
// Prefer local disks
prefer[index] = disk.Hostname() == ""
}
_, err = erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer)
// Note: we should not be defer'ing the following closeBitrotReaders() call as
// we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time
// we return from this function.
closeBitrotReaders(readers)
if err != nil {
return toObjectErr(err, bucket, object)
}
for i, r := range readers {
if r == nil {
onlineDisks[i] = OfflineDisk
}
}
// Track total bytes read from disk and written to the client.
totalBytesRead += partLength
// partOffset will be valid only for the first part, hence reset it to 0 for
// the remaining parts.
partOffset = 0
} // End of read all parts loop.
// Return success.
return nil
}
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (es *erasureSingle) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) {
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return info, err
}
object = encodeDirObject(object)
if !opts.NoLock {
// Lock the object before reading.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.RUnlock(lkctx.Cancel)
}
return es.getObjectInfo(ctx, bucket, object, opts)
}
func (es *erasureSingle) getObjectFileInfo(ctx context.Context, bucket, object string, opts ObjectOptions, readData bool) (fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI, err error) {
disks := []StorageAPI{es.disk}
var errs []error
// Read metadata associated with the object from all disks.
metaArr, errs = readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, readData)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return fi, nil, nil, toObjectErr(err, bucket, object)
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return fi, nil, nil, toObjectErr(reducedErr, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return fi, nil, nil, err
}
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
return fi, metaArr, onlineDisks, nil
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (es *erasureSingle) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
fi, _, _, err := es.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
objInfo = fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
if fi.Deleted {
if opts.VersionID == "" || opts.DeleteMarker {
return objInfo, toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return objInfo, toObjectErr(errMethodNotAllowed, bucket, object)
}
return objInfo, nil
}
// getObjectInfoAndQuroum - wrapper for reading object metadata and constructs ObjectInfo, additionally returns write quorum for the object.
func (es *erasureSingle) getObjectInfoAndQuorum(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, wquorum int, err error) {
fi, _, _, err := es.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return objInfo, 1, toObjectErr(err, bucket, object)
}
wquorum = fi.Erasure.DataBlocks
if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks {
wquorum++
}
objInfo = fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
if !fi.VersionPurgeStatus().Empty() && opts.VersionID != "" {
// Make sure to return object info to provide extra information.
return objInfo, wquorum, toObjectErr(errMethodNotAllowed, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" || opts.DeleteMarker {
return objInfo, wquorum, toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return objInfo, wquorum, toObjectErr(errMethodNotAllowed, bucket, object)
}
return objInfo, wquorum, nil
}
func (es *erasureSingle) putMetacacheObject(ctx context.Context, key string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
data := r.Reader
// No metadata is set, allocate a new one.
if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string)
}
storageDisks := []StorageAPI{es.disk}
// Get parity and data drive count based on storage class metadata
parityDrives := 0
dataDrives := len(storageDisks) - parityDrives
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum++
}
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.Application)
return ObjectInfo{}, toObjectErr(errInvalidArgument)
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(storageDisks))
fi := newFileInfo(pathJoin(minioMetaBucket, key), dataDrives, parityDrives)
fi.DataDir = mustGetUUID()
// Initialize erasure metadata.
for index := range partsMetadata {
partsMetadata[index] = fi
}
// Order disks according to erasure distribution
var onlineDisks []StorageAPI
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi)
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size >= fi.Erasure.BlockSize:
buffer = es.bp.Get()
defer es.bp.Put(buffer)
case size < fi.Erasure.BlockSize:
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
}
if len(buffer) > int(fi.Erasure.BlockSize) {
buffer = buffer[:fi.Erasure.BlockSize]
}
shardFileSize := erasure.ShardFileSize(data.Size())
writers := make([]io.Writer, len(onlineDisks))
inlineBuffers := make([]*bytes.Buffer, len(onlineDisks))
for i, disk := range onlineDisks {
if disk == nil {
continue
}
if disk.IsOnline() {
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
}
}
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaBucket, key)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
}
var index []byte
if opts.IndexCB != nil {
index = opts.IndexCB()
}
modTime := UTCNow()
for i, w := range writers {
if w == nil {
// Make sure to avoid writing to disks which we couldn't complete in erasure.Encode()
onlineDisks[i] = nil
continue
}
partsMetadata[i].Data = inlineBuffers[i].Bytes()
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(w),
})
}
// Fill all the necessary metadata.
// Update `xl.meta` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Size = n
partsMetadata[index].Fresh = true
partsMetadata[index].ModTime = modTime
partsMetadata[index].Metadata = opts.UserDefined
}
// Set an additional header when data is inlined.
for index := range partsMetadata {
partsMetadata[index].SetInlineData()
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
}
if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaBucket, key, partsMetadata, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key)
}
return fi.ToObjectInfo(minioMetaBucket, key, opts.Versioned || opts.VersionSuspended), nil
}
// PutObject - creates an object upon reading from the input stream
// until EOF, erasure codes the data across all disk and additionally
// writes `xl.meta` which carries the necessary metadata for future
// object operations.
func (es *erasureSingle) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
// Validate put object input args.
if err := checkPutObjectArgs(ctx, bucket, object, es); err != nil {
return ObjectInfo{}, err
}
object = encodeDirObject(object)
if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, es.disk), data.Size()) {
return ObjectInfo{}, toObjectErr(errDiskFull)
}
return es.putObject(ctx, bucket, object, data, opts)
}
// putObject wrapper for erasureObjects PutObject
func (es *erasureSingle) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
data := r.Reader
// No metadata is set, allocate a new one.
if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string)
}
storageDisks := []StorageAPI{es.disk}
parityDrives := 0
dataDrives := len(storageDisks) - parityDrives
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum++
}
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.Application)
return ObjectInfo{}, toObjectErr(errInvalidArgument)
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(storageDisks))
fi := newFileInfo(pathJoin(bucket, object), dataDrives, parityDrives)
fi.VersionID = opts.VersionID
if opts.Versioned && fi.VersionID == "" {
fi.VersionID = mustGetUUID()
}
fi.DataDir = mustGetUUID()
uniqueID := mustGetUUID()
tempObj := uniqueID
// Initialize erasure metadata.
for index := range partsMetadata {
partsMetadata[index] = fi
}
// Order disks according to erasure distribution
var onlineDisks []StorageAPI
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi)
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1:
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512)
} else {
buffer = es.bp.Get()
defer es.bp.Put(buffer)
}
case size >= fi.Erasure.BlockSize:
buffer = es.bp.Get()
defer es.bp.Put(buffer)
case size < fi.Erasure.BlockSize:
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
}
if len(buffer) > int(fi.Erasure.BlockSize) {
buffer = buffer[:fi.Erasure.BlockSize]
}
partName := "part.1"
tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName)
// Delete temporary object in the event of failure.
// If PutObject succeeded there would be no temporary
// object to delete.
var online int
defer func() {
if online != len(onlineDisks) {
es.disk.RenameFile(context.Background(), minioMetaTmpBucket, tempObj, minioMetaTmpDeletedBucket, mustGetUUID())
}
}()
shardFileSize := erasure.ShardFileSize(data.Size())
writers := make([]io.Writer, len(onlineDisks))
var inlineBuffers []*bytes.Buffer
if shardFileSize >= 0 {
if !opts.Versioned && shardFileSize < smallFileThreshold {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
} else if shardFileSize < smallFileThreshold/8 {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
}
} else {
// If compressed, use actual size to determine.
if sz := erasure.ShardFileSize(data.ActualSize()); sz > 0 {
if !opts.Versioned && sz < smallFileThreshold {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
} else if sz < smallFileThreshold/8 {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
}
}
}
for i, disk := range onlineDisks {
if disk == nil {
continue
}
if !disk.IsOnline() {
continue
}
if len(inlineBuffers) > 0 {
sz := shardFileSize
if sz < 0 {
sz = data.ActualSize()
}
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, sz))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize())
}
toEncode := io.Reader(data)
if data.Size() > bigFileThreshold {
// We use 2 buffers, so we always have a full buffer of input.
bufA := es.bp.Get()
bufB := es.bp.Get()
defer es.bp.Put(bufA)
defer es.bp.Put(bufB)
ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]})
if err == nil {
toEncode = ra
defer ra.Close()
}
logger.LogIf(ctx, err)
}
n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object}
}
if !opts.NoLock {
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
}
var index []byte
if opts.IndexCB != nil {
index = opts.IndexCB()
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
for i, w := range writers {
if w == nil {
onlineDisks[i] = nil
continue
}
if len(inlineBuffers) > 0 && inlineBuffers[i] != nil {
partsMetadata[i].Data = inlineBuffers[i].Bytes()
} else {
partsMetadata[i].Data = nil
}
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(w),
})
}
if opts.UserDefined["etag"] == "" {
opts.UserDefined["etag"] = r.MD5CurrentHexString()
}
// Guess content-type from the extension if possible.
if opts.UserDefined["content-type"] == "" {
opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object))
}
// Fill all the necessary metadata.
// Update `xl.meta` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Metadata = opts.UserDefined
partsMetadata[index].Size = n
partsMetadata[index].ModTime = modTime
}
if len(inlineBuffers) > 0 {
// Set an additional header when data is inlined.
for index := range partsMetadata {
partsMetadata[index].SetInlineData()
}
}
// Rename the successfully written temporary object to final location.
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil {
if errors.Is(err, errFileNotFound) {
return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object)
}
logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
}
fi.ReplicationState = opts.PutReplicationState()
online = countOnlineDisks(onlineDisks)
// we are adding a new version to this object under the namespace lock, so this is the latest version.
fi.IsLatest = true
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
func (es *erasureSingle) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo, forceDelMarker bool) error {
return es.disk.DeleteVersion(ctx, bucket, object, fi, forceDelMarker)
}
// DeleteObjects deletes objects/versions in bulk, this function will still automatically split objects list
// into smaller bulks if some object names are found to be duplicated in the delete list, splitting
// into smaller bulks will avoid holding twice the write lock of the duplicated object names.
func (es *erasureSingle) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
errs := make([]error, len(objects))
dobjects := make([]DeletedObject, len(objects))
objSets := set.NewStringSet()
for i := range errs {
objects[i].ObjectName = encodeDirObject(objects[i].ObjectName)
errs[i] = checkDelObjArgs(ctx, bucket, objects[i].ObjectName)
objSets.Add(objects[i].ObjectName)
}
// Acquire a bulk write lock across 'objects'
multiDeleteLock := es.NewNSLock(bucket, objSets.ToSlice()...)
lkctx, err := multiDeleteLock.GetLock(ctx, globalOperationTimeout)
if err != nil {
for i := range errs {
errs[i] = err
}
return dobjects, errs
}
ctx = lkctx.Context()
defer multiDeleteLock.Unlock(lkctx.Cancel)
writeQuorums := make([]int, len(objects))
storageDisks := []StorageAPI{es.disk}
for i := range objects {
// Single drive write quorum is '1'
writeQuorums[i] = 1
}
versionsMap := make(map[string]FileInfoVersions, len(objects))
for i := range objects {
// Construct the FileInfo data that needs to be preserved on the disk.
vr := FileInfo{
Name: objects[i].ObjectName,
VersionID: objects[i].VersionID,
ReplicationState: objects[i].ReplicationState(),
// save the index to set correct error at this index.
Idx: i,
}
vr.SetTierFreeVersionID(mustGetUUID())
// VersionID is not set means delete is not specific about
// any version, look for if the bucket is versioned or not.
if objects[i].VersionID == "" {
// MinIO extension to bucket version configuration
suspended := opts.VersionSuspended
versioned := opts.Versioned
if opts.PrefixEnabledFn != nil {
versioned = opts.PrefixEnabledFn(objects[i].ObjectName)
}
if versioned || suspended {
// Bucket is versioned and no version was explicitly
// mentioned for deletes, create a delete marker instead.
vr.ModTime = UTCNow()
vr.Deleted = true
// Versioning suspended means that we add a `null` version
// delete marker, if not add a new version for this delete
// marker.
if versioned {
vr.VersionID = mustGetUUID()
}
}
}
// De-dup same object name to collect multiple versions for same object.
v, ok := versionsMap[objects[i].ObjectName]
if ok {
v.Versions = append(v.Versions, vr)
} else {
v = FileInfoVersions{
Name: vr.Name,
Versions: []FileInfo{vr},
}
}
if vr.Deleted {
dobjects[i] = DeletedObject{
DeleteMarker: vr.Deleted,
DeleteMarkerVersionID: vr.VersionID,
DeleteMarkerMTime: DeleteMarkerMTime{vr.ModTime},
ObjectName: vr.Name,
ReplicationState: vr.ReplicationState,
}
} else {
dobjects[i] = DeletedObject{
ObjectName: vr.Name,
VersionID: vr.VersionID,
ReplicationState: vr.ReplicationState,
}
}
versionsMap[objects[i].ObjectName] = v
}
dedupVersions := make([]FileInfoVersions, 0, len(versionsMap))
for _, version := range versionsMap {
dedupVersions = append(dedupVersions, version)
}
// Initialize list of errors.
delObjErrs := make([][]error, len(storageDisks))
var wg sync.WaitGroup
// Remove versions in bulk for each disk
for index, disk := range storageDisks {
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
delObjErrs[index] = make([]error, len(objects))
if disk == nil {
for i := range objects {
delObjErrs[index][i] = errDiskNotFound
}
return
}
errs := disk.DeleteVersions(ctx, bucket, dedupVersions)
for i, err := range errs {
if err == nil {
continue
}
for _, v := range dedupVersions[i].Versions {
if err == errFileNotFound || err == errFileVersionNotFound {
if !dobjects[v.Idx].DeleteMarker {
// Not delete marker, if not found, ok.
continue
}
}
delObjErrs[index][v.Idx] = err
}
}
}(index, disk)
}
wg.Wait()
// Reduce errors for each object
for objIndex := range objects {
diskErrs := make([]error, len(storageDisks))
// Iterate over disks to fetch the error
// of deleting of the current object
for i := range delObjErrs {
// delObjErrs[i] is not nil when disks[i] is also not nil
if delObjErrs[i] != nil {
diskErrs[i] = delObjErrs[i][objIndex]
}
}
err := reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex])
if objects[objIndex].VersionID != "" {
errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName, objects[objIndex].VersionID)
} else {
errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName)
}
defer NSUpdated(bucket, objects[objIndex].ObjectName)
}
return dobjects, errs
}
func (es *erasureSingle) deletePrefix(ctx context.Context, bucket, prefix string) error {
dirPrefix := encodeDirObject(prefix)
defer es.disk.Delete(ctx, bucket, dirPrefix, DeleteOptions{
Recursive: true,
Force: true,
})
return es.disk.Delete(ctx, bucket, prefix, DeleteOptions{
Recursive: true,
Force: true,
})
}
// DeleteObject - deletes an object, this call doesn't necessary reply
// any error as it is not necessary for the handler to reply back a
// response to the client request.
func (es *erasureSingle) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if err = checkDelObjArgs(ctx, bucket, object); err != nil {
return objInfo, err
}
if opts.DeletePrefix {
return ObjectInfo{}, toObjectErr(es.deletePrefix(ctx, bucket, object), bucket, object)
}
object = encodeDirObject(object)
var lc *lifecycle.Lifecycle
var rcfg lock.Retention
if opts.Expiration.Expire {
// Check if the current bucket has a configured lifecycle policy
lc, _ = globalLifecycleSys.Get(bucket)
rcfg, _ = globalBucketObjectLockSys.Get(bucket)
}
// expiration attempted on a bucket with no lifecycle
// rules shall be rejected.
if lc == nil && opts.Expiration.Expire {
if opts.VersionID != "" {
return objInfo, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
return objInfo, ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
// Acquire a write lock before deleting the object.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
versionFound := true
objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response.
goi, writeQuorum, gerr := es.getObjectInfoAndQuorum(ctx, bucket, object, opts)
if gerr != nil && goi.Name == "" {
switch gerr.(type) {
case InsufficientReadQuorum:
return objInfo, InsufficientWriteQuorum{}
}
// For delete marker replication, versionID being replicated will not exist on disk
if opts.DeleteMarker {
versionFound = false
} else {
return objInfo, gerr
}
}
if opts.Expiration.Expire {
action := evalActionFromLifecycle(ctx, *lc, rcfg, goi, false)
var isErr bool
switch action {
case lifecycle.NoneAction:
isErr = true
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
isErr = true
}
if isErr {
if goi.VersionID != "" {
return goi, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: goi.VersionID,
}
}
return goi, ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
}
defer NSUpdated(bucket, object)
var markDelete bool
// Determine whether to mark object deleted for replication
if goi.VersionID != "" {
markDelete = true
}
// Default deleteMarker to true if object is under versioning
deleteMarker := opts.Versioned
if opts.VersionID != "" {
// case where replica version needs to be deleted on target cluster
if versionFound && opts.DeleteMarkerReplicationStatus() == replication.Replica {
markDelete = false
}
if opts.VersionPurgeStatus().Empty() && opts.DeleteMarkerReplicationStatus().Empty() {
markDelete = false
}
if opts.VersionPurgeStatus() == Complete {
markDelete = false
}
// Version is found but we do not wish to create more delete markers
// now, since VersionPurgeStatus() is already set, we can let the
// lower layers decide this. This fixes a regression that was introduced
// in PR #14555 where !VersionPurgeStatus.Empty() is automatically
// considered as Delete marker true to avoid listing such objects by
// regular ListObjects() calls. However for delete replication this
// ends up being a problem because "upon" a successful delete this
// ends up creating a new delete marker that is spurious and unnecessary.
if versionFound {
if !goi.VersionPurgeStatus.Empty() {
deleteMarker = false
} else if !goi.DeleteMarker { // implies a versioned delete of object
deleteMarker = false
}
}
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
fvID := mustGetUUID()
if markDelete {
if opts.Versioned || opts.VersionSuspended {
if !deleteMarker {
// versioning suspended means we add `null` version as
// delete marker, if its not decided already.
deleteMarker = opts.VersionSuspended && opts.VersionID == ""
}
fi := FileInfo{
Name: object,
Deleted: deleteMarker,
MarkDeleted: markDelete,
ModTime: modTime,
ReplicationState: opts.DeleteReplication,
TransitionStatus: opts.Transition.Status,
ExpireRestored: opts.Transition.ExpireRestored,
}
fi.SetTierFreeVersionID(fvID)
if opts.Versioned {
fi.VersionID = mustGetUUID()
if opts.VersionID != "" {
fi.VersionID = opts.VersionID
}
}
// versioning suspended means we add `null` version as
// delete marker. Add delete marker, since we don't have
// any version specified explicitly. Or if a particular
// version id needs to be replicated.
if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, opts.DeleteMarker); err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
}
// Delete the object version on all disks.
dfi := FileInfo{
Name: object,
VersionID: opts.VersionID,
MarkDeleted: markDelete,
Deleted: deleteMarker,
ModTime: modTime,
ReplicationState: opts.DeleteReplication,
TransitionStatus: opts.Transition.Status,
ExpireRestored: opts.Transition.ExpireRestored,
}
dfi.SetTierFreeVersionID(fvID)
if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, dfi, opts.DeleteMarker); err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
return ObjectInfo{
Bucket: bucket,
Name: object,
VersionID: opts.VersionID,
VersionPurgeStatusInternal: opts.DeleteReplication.VersionPurgeStatusInternal,
ReplicationStatusInternal: opts.DeleteReplication.ReplicationStatusInternal,
}, nil
}
func (es *erasureSingle) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
if !opts.NoLock {
// Lock the object before updating metadata.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
}
disks := []StorageAPI{es.disk}
var metaArr []FileInfo
var errs []error
// Read metadata associated with the object from all disks.
metaArr, errs = readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fi.Deleted {
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
}
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
// if version-id is not specified retention is supposed to be set on the latest object.
if opts.VersionID == "" {
opts.VersionID = fi.VersionID
}
objInfo := fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
if opts.EvalMetadataFn != nil {
if err := opts.EvalMetadataFn(objInfo); err != nil {
return ObjectInfo{}, err
}
}
for k, v := range objInfo.UserDefined {
fi.Metadata[k] = v
}
fi.ModTime = opts.MTime
fi.VersionID = opts.VersionID
if err = es.updateObjectMeta(ctx, bucket, object, fi, onlineDisks...); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
// PutObjectTags - replace or add tags to an existing object
func (es *erasureSingle) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
// Lock the object before updating tags.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
disks := []StorageAPI{es.disk}
var metaArr []FileInfo
var errs []error
// Read metadata associated with the object from all disks.
if opts.VersionID != "" {
metaArr, errs = readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
} else {
metaArr, errs = readAllXL(ctx, disks, bucket, object, false)
}
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return ObjectInfo{}, toObjectErr(errFileNotFound, bucket, object)
}
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
}
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
fi.Metadata[xhttp.AmzObjectTagging] = tags
fi.ReplicationState = opts.PutReplicationState()
for k, v := range opts.UserDefined {
fi.Metadata[k] = v
}
if err = es.updateObjectMeta(ctx, bucket, object, fi, onlineDisks...); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
// updateObjectMeta will update the metadata of a file.
func (es *erasureSingle) updateObjectMeta(ctx context.Context, bucket, object string, fi FileInfo, onlineDisks ...StorageAPI) error {
if len(fi.Metadata) == 0 {
return nil
}
g := errgroup.WithNErrs(len(onlineDisks))
// Start writing `xl.meta` to all disks in parallel.
for index := range onlineDisks {
index := index
g.Go(func() error {
if onlineDisks[index] == nil {
return errDiskNotFound
}
return onlineDisks[index].UpdateMetadata(ctx, bucket, object, fi)
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
return reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, 1)
}
// DeleteObjectTags - delete object tags from an existing object
func (es *erasureSingle) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
return es.PutObjectTags(ctx, bucket, object, "", opts)
}
// GetObjectTags - get object tags from an existing object
func (es *erasureSingle) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
// GetObjectInfo will return tag value as well
oi, err := es.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
return nil, err
}
return tags.ParseObjectTags(oi.UserTags)
}
// TransitionObject - transition object content to target tier.
func (es *erasureSingle) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
tgtClient, err := globalTierConfigMgr.getDriver(opts.Transition.Tier)
if err != nil {
return err
}
// Acquire write lock before starting to transition the object.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
fi, metaArr, onlineDisks, err := es.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil {
return toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return toObjectErr(errMethodNotAllowed, bucket, object)
}
// verify that the object queued for transition is identical to that on disk.
if !opts.MTime.Equal(fi.ModTime) || !strings.EqualFold(opts.Transition.ETag, extractETag(fi.Metadata)) {
return toObjectErr(errFileNotFound, bucket, object)
}
// if object already transitioned, return
if fi.TransitionStatus == lifecycle.TransitionComplete {
return nil
}
defer NSUpdated(bucket, object)
destObj, err := genTransitionObjName(bucket)
if err != nil {
return err
}
pr, pw := xioutil.WaitPipe()
go func() {
err := es.getObjectWithFileInfo(ctx, bucket, object, 0, fi.Size, pw, fi, metaArr, onlineDisks)
pw.CloseWithError(err)
}()
var rv remoteVersionID
rv, err = tgtClient.Put(ctx, destObj, pr, fi.Size)
pr.CloseWithError(err)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to transition %s/%s(%s) to %s tier: %w", bucket, object, opts.VersionID, opts.Transition.Tier, err))
return err
}
fi.TransitionStatus = lifecycle.TransitionComplete
fi.TransitionedObjName = destObj
fi.TransitionTier = opts.Transition.Tier
fi.TransitionVersionID = string(rv)
eventName := event.ObjectTransitionComplete
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := fi.Erasure.DataBlocks
if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks {
writeQuorum++
}
if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, false); err != nil {
eventName = event.ObjectTransitionFailed
}
objInfo := fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [ILM-Transition]",
})
auditLogLifecycle(ctx, objInfo, ILMTransition)
return err
}
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
// This is similar to PostObjectRestore from AWS GLACIER
// storage class. When PostObjectRestore API is called, a temporary copy of the object
// is restored locally to the bucket on source cluster until the restore expiry date.
// The copy that was transitioned continues to reside in the transitioned tier.
func (es *erasureSingle) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
return es.restoreTransitionedObject(ctx, bucket, object, opts)
}
// update restore status header in the metadata
func (es *erasureSingle) updateRestoreMetadata(ctx context.Context, bucket, object string, objInfo ObjectInfo, opts ObjectOptions, rerr error) error {
oi := objInfo.Clone()
oi.metadataOnly = true // Perform only metadata updates.
if rerr == nil {
oi.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
} else { // allow retry in the case of failure to restore
delete(oi.UserDefined, xhttp.AmzRestore)
}
if _, err := es.CopyObject(ctx, bucket, object, bucket, object, oi, ObjectOptions{
VersionID: oi.VersionID,
}, ObjectOptions{
VersionID: oi.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err))
return err
}
return nil
}
// restoreTransitionedObject for multipart object chunks the file stream from remote tier into the same number of parts
// as in the xl.meta for this version and rehydrates the part.n into the fi.DataDir for this version as in the xl.meta
func (es *erasureSingle) restoreTransitionedObject(ctx context.Context, bucket string, object string, opts ObjectOptions) error {
setRestoreHeaderFn := func(oi ObjectInfo, rerr error) error {
es.updateRestoreMetadata(ctx, bucket, object, oi, opts, rerr)
return rerr
}
var oi ObjectInfo
// get the file info on disk for transitioned object
actualfi, _, _, err := es.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
oi = actualfi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
ropts := putRestoreOpts(bucket, object, opts.Transition.RestoreRequest, oi)
if len(oi.Parts) == 1 {
var rs *HTTPRangeSpec
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
defer gr.Close()
hashReader, err := hash.NewReader(gr, gr.ObjInfo.Size, "", "", gr.ObjInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
pReader := NewPutObjReader(hashReader)
ropts.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
_, err = es.PutObject(ctx, bucket, object, pReader, ropts)
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
uploadID, err := es.NewMultipartUpload(ctx, bucket, object, ropts)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
var uploadedParts []CompletePart
var rs *HTTPRangeSpec
// get reader from the warm backend - note that even in the case of encrypted objects, this stream is still encrypted.
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
defer gr.Close()
// rehydrate the parts back on disk as per the original xl.meta prior to transition
for _, partInfo := range oi.Parts {
hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
pInfo, err := es.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
if err != nil {
return setRestoreHeaderFn(oi, err)
}
if pInfo.Size != partInfo.Size {
return setRestoreHeaderFn(oi, InvalidObjectState{Bucket: bucket, Object: object})
}
uploadedParts = append(uploadedParts, CompletePart{
PartNumber: pInfo.PartNumber,
ETag: pInfo.ETag,
})
}
_, err = es.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{
MTime: oi.ModTime,
})
return setRestoreHeaderFn(oi, err)
}
func (es *erasureSingle) getUploadIDDir(bucket, object, uploadID string) string {
return pathJoin(es.getMultipartSHADir(bucket, object), uploadID)
}
func (es *erasureSingle) getMultipartSHADir(bucket, object string) string {
return getSHA256Hash([]byte(pathJoin(bucket, object)))
}
// checkUploadIDExists - verify if a given uploadID exists and is valid.
func (es *erasureSingle) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) (err error) {
defer func() {
if err == errFileNotFound {
err = errUploadIDNotFound
}
}()
disks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
metaArr, errs := readAllFileInfo(ctx, disks, minioMetaMultipartBucket, es.getUploadIDDir(bucket, object, uploadID), "", false)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return err
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return reducedErr
}
// List all online disks.
_, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
_, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
return err
}
// Removes part given by partName belonging to a mulitpart upload from minioMetaBucket
func (es *erasureSingle) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) {
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber))
storageDisks := []StorageAPI{es.disk}
g := errgroup.WithNErrs(len(storageDisks))
for index, disk := range storageDisks {
if disk == nil {
continue
}
index := index
g.Go(func() error {
// Ignoring failure to remove parts that weren't present in CompleteMultipartUpload
// requests. xl.meta is the authoritative source of truth on which parts constitute
// the object. The presence of parts that don't belong in the object doesn't affect correctness.
_ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath, DeleteOptions{
Recursive: false,
Force: false,
})
return nil
}, index)
}
g.Wait()
}
// Remove the old multipart uploads on the given disk.
func (es *erasureSingle) cleanupStaleUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) {
now := time.Now()
diskPath := disk.Endpoint().Path
readDirFn(pathJoin(diskPath, minioMetaMultipartBucket), func(shaDir string, typ os.FileMode) error {
return readDirFn(pathJoin(diskPath, minioMetaMultipartBucket, shaDir), func(uploadIDDir string, typ os.FileMode) error {
uploadIDPath := pathJoin(shaDir, uploadIDDir)
fi, err := disk.ReadVersion(ctx, minioMetaMultipartBucket, uploadIDPath, "", false)
if err != nil {
return nil
}
wait := es.deletedCleanupSleeper.Timer(ctx)
if now.Sub(fi.ModTime) > expiry {
es.disk.RenameFile(context.Background(), minioMetaMultipartBucket, uploadIDPath, minioMetaTmpDeletedBucket, mustGetUUID())
}
wait()
return nil
})
})
readDirFn(pathJoin(diskPath, minioMetaTmpBucket), func(tmpDir string, typ os.FileMode) error {
if tmpDir == ".trash/" { // do not remove .trash/ here, it has its own routines
return nil
}
vi, err := disk.StatVol(ctx, pathJoin(minioMetaTmpBucket, tmpDir))
if err != nil {
return nil
}
wait := es.deletedCleanupSleeper.Timer(ctx)
if now.Sub(vi.Created) > expiry {
disk.Delete(ctx, minioMetaTmpBucket, tmpDir, DeleteOptions{
Recursive: true,
Force: false,
})
}
wait()
return nil
})
}
// ListMultipartUploads - lists all the pending multipart
// uploads for a particular object in a bucket.
//
// Implements minimal S3 compatible ListMultipartUploads API. We do
// not support prefix based listing, this is a deliberate attempt
// towards simplification of multipart APIs.
// The resulting ListMultipartsInfo structure is unmarshalled directly as XML.
func (es *erasureSingle) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, es); err != nil {
return ListMultipartsInfo{}, err
}
result.MaxUploads = maxUploads
result.KeyMarker = keyMarker
result.Prefix = object
result.Delimiter = delimiter
uploadIDs, err := es.disk.ListDir(ctx, minioMetaMultipartBucket, es.getMultipartSHADir(bucket, object), -1)
if err != nil {
if err == errFileNotFound {
return result, nil
}
logger.LogIf(ctx, err)
return result, toObjectErr(err, bucket, object)
}
for i := range uploadIDs {
uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], SlashSeparator)
}
// S3 spec says uploadIDs should be sorted based on initiated time, we need
// to read the metadata entry.
var uploads []MultipartInfo
populatedUploadIds := set.NewStringSet()
for _, uploadID := range uploadIDs {
if populatedUploadIds.Contains(uploadID) {
continue
}
fi, err := es.disk.ReadVersion(ctx, minioMetaMultipartBucket, pathJoin(es.getUploadIDDir(bucket, object, uploadID)), "", false)
if err != nil {
return result, toObjectErr(err, bucket, object)
}
populatedUploadIds.Add(uploadID)
uploads = append(uploads, MultipartInfo{
Object: object,
UploadID: uploadID,
Initiated: fi.ModTime,
})
}
sort.Slice(uploads, func(i int, j int) bool {
return uploads[i].Initiated.Before(uploads[j].Initiated)
})
uploadIndex := 0
if uploadIDMarker != "" {
for uploadIndex < len(uploads) {
if uploads[uploadIndex].UploadID != uploadIDMarker {
uploadIndex++
continue
}
if uploads[uploadIndex].UploadID == uploadIDMarker {
uploadIndex++
break
}
uploadIndex++
}
}
for uploadIndex < len(uploads) {
result.Uploads = append(result.Uploads, uploads[uploadIndex])
result.NextUploadIDMarker = uploads[uploadIndex].UploadID
uploadIndex++
if len(result.Uploads) == maxUploads {
break
}
}
result.IsTruncated = uploadIndex < len(uploads)
if !result.IsTruncated {
result.NextKeyMarker = ""
result.NextUploadIDMarker = ""
}
return result, nil
}
// newMultipartUpload - wrapper for initializing a new multipart
// request; returns a unique upload id.
//
// Internally this function creates 'uploads.json' associated for the
// incoming object at
// '.minio.sys/multipart/bucket/object/uploads.json' on all the
// disks. `uploads.json` carries metadata regarding on-going multipart
// operation(s) on the object.
func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
onlineDisks := []StorageAPI{es.disk}
parityDrives := 0
dataDrives := len(onlineDisks) - parityDrives
// we now know the number of blocks this object needs for data and parity.
// establish the writeQuorum using this data
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum++
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(onlineDisks))
fi := newFileInfo(pathJoin(bucket, object), dataDrives, parityDrives)
fi.VersionID = opts.VersionID
if opts.Versioned && fi.VersionID == "" {
fi.VersionID = mustGetUUID()
}
fi.DataDir = mustGetUUID()
// Initialize erasure metadata.
for index := range partsMetadata {
partsMetadata[index] = fi
}
// Guess content-type from the extension if possible.
if opts.UserDefined["content-type"] == "" {
opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object))
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(onlineDisks, partsMetadata, fi)
// Fill all the necessary metadata.
// Update `xl.meta` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Fresh = true
partsMetadata[index].ModTime = modTime
partsMetadata[index].Metadata = opts.UserDefined
}
uploadID := mustGetUUID()
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
// Write updated `xl.meta` to all disks.
if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
// Return success.
return uploadID, nil
}
// NewMultipartUpload - initialize a new multipart upload, returns a
// unique id. The unique id returned here is of UUID form, for each
// subsequent request each UUID is unique.
//
// Implements S3 compatible initiate multipart API.
func (es *erasureSingle) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
if err := checkNewMultipartArgs(ctx, bucket, object, es); err != nil {
return "", err
}
// No metadata is set, allocate a new one.
if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string)
}
return es.newMultipartUpload(ctx, bucket, object, opts)
}
// CopyObjectPart - reads incoming stream and internally erasure codes
// them. This call is similar to put object part operation but the source
// data is read from an existing object.
//
// Implements S3 compatible Upload Part Copy API.
func (es *erasureSingle) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
partInfo, err := es.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts)
if err != nil {
return pi, toObjectErr(err, dstBucket, dstObject)
}
// Success.
return partInfo, nil
}
// PutObjectPart - reads incoming stream and internally erasure codes
// them. This call is similar to single put operation but it is part
// of the multipart transaction.
//
// Implements S3 compatible Upload Part API.
func (es *erasureSingle) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
if err := checkPutObjectPartArgs(ctx, bucket, object, es); err != nil {
return PartInfo{}, err
}
// Write lock for this part ID.
// Held throughout the operation.
partIDLock := es.NewNSLock(bucket, pathJoin(object, uploadID, strconv.Itoa(partID)))
plkctx, err := partIDLock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return PartInfo{}, err
}
pctx := plkctx.Context()
defer partIDLock.Unlock(plkctx.Cancel)
// Read lock for upload id.
// Only held while reading the upload metadata.
uploadIDRLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
rlkctx, err := uploadIDRLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return PartInfo{}, err
}
rctx := rlkctx.Context()
defer func() {
if uploadIDRLock != nil {
uploadIDRLock.RUnlock(rlkctx.Cancel)
}
}()
data := r.Reader
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(rctx, errInvalidArgument, logger.Application)
return pi, toObjectErr(errInvalidArgument)
}
var partsMetadata []FileInfo
var errs []error
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
// Validates if upload ID exists.
if err = es.checkUploadIDExists(rctx, bucket, object, uploadID); err != nil {
return pi, toObjectErr(err, bucket, object, uploadID)
}
storageDisks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
partsMetadata, errs = readAllFileInfo(rctx, storageDisks, minioMetaMultipartBucket,
uploadIDPath, "", false)
// Unlock upload id locks before, so others can get it.
uploadIDRLock.RUnlock(rlkctx.Cancel)
uploadIDRLock = nil
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(pctx, partsMetadata, errs, 0)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
reducedErr := reduceWriteQuorumErrs(pctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(pctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return pi, err
}
onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
// Need a unique name for the part being written in minioMetaBucket to
// accommodate concurrent PutObjectPart requests
partSuffix := fmt.Sprintf("part.%d", partID)
tmpPart := mustGetUUID()
tmpPartPath := pathJoin(tmpPart, partSuffix)
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
var online int
defer func() {
if online != len(onlineDisks) {
es.disk.RenameFile(context.Background(), minioMetaTmpBucket, tmpPart, minioMetaTmpDeletedBucket, mustGetUUID())
}
}()
erasure, err := NewErasure(pctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1:
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512)
} else {
buffer = es.bp.Get()
defer es.bp.Put(buffer)
}
case size >= fi.Erasure.BlockSize:
buffer = es.bp.Get()
defer es.bp.Put(buffer)
case size < fi.Erasure.BlockSize:
// No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smalles.
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
}
if len(buffer) > int(fi.Erasure.BlockSize) {
buffer = buffer[:fi.Erasure.BlockSize]
}
writers := make([]io.Writer, len(onlineDisks))
for i, disk := range onlineDisks {
if disk == nil {
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
}
toEncode := io.Reader(data)
if data.Size() > bigFileThreshold {
// Add input readahead.
// We use 2 buffers, so we always have a full buffer of input.
bufA := es.bp.Get()
bufB := es.bp.Get()
defer es.bp.Put(bufA)
defer es.bp.Put(bufB)
ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]})
if err == nil {
toEncode = ra
defer ra.Close()
}
}
n, err := erasure.Encode(pctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
return pi, IncompleteBody{Bucket: bucket, Object: object}
}
for i := range writers {
if writers[i] == nil {
onlineDisks[i] = nil
}
}
// Acquire write lock to update metadata.
uploadIDWLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
wlkctx, err := uploadIDWLock.GetLock(pctx, globalOperationTimeout)
if err != nil {
return PartInfo{}, err
}
wctx := wlkctx.Context()
defer uploadIDWLock.Unlock(wlkctx.Cancel)
// Validates if upload ID exists.
if err = es.checkUploadIDExists(wctx, bucket, object, uploadID); err != nil {
return pi, toObjectErr(err, bucket, object, uploadID)
}
// Rename temporary part file to its final location.
partPath := pathJoin(uploadIDPath, fi.DataDir, partSuffix)
onlineDisks, err = renamePart(wctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, writeQuorum)
if err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
// Read metadata again because it might be updated with parallel upload of another part.
partsMetadata, errs = readAllFileInfo(wctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
reducedErr = reduceWriteQuorumErrs(wctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object)
}
// Get current highest version based on re-read partsMetadata.
onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err = pickValidFileInfo(wctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return pi, err
}
// Once part is successfully committed, proceed with updating erasure metadata.
fi.ModTime = UTCNow()
md5hex := r.MD5CurrentHexString()
var index []byte
if opts.IndexCB != nil {
index = opts.IndexCB()
}
// Add the current part.
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), fi.ModTime, index)
for i, disk := range onlineDisks {
if disk == OfflineDisk {
continue
}
partsMetadata[i].Size = fi.Size
partsMetadata[i].ModTime = fi.ModTime
partsMetadata[i].Parts = fi.Parts
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: partID,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(writers[i]),
})
}
// Writes update `xl.meta` format for each disk.
if _, err = writeUniqueFileInfo(wctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
online = countOnlineDisks(onlineDisks)
// Return success.
return PartInfo{
PartNumber: partID,
ETag: md5hex,
LastModified: fi.ModTime,
Size: n,
ActualSize: data.ActualSize(),
}, nil
}
// GetMultipartInfo returns multipart metadata uploaded during newMultipartUpload, used
// by callers to verify object states
// - encrypted
// - compressed
func (es *erasureSingle) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
if err := checkListPartsArgs(ctx, bucket, object, es); err != nil {
return MultipartInfo{}, err
}
result := MultipartInfo{
Bucket: bucket,
Object: object,
UploadID: uploadID,
}
uploadIDLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return MultipartInfo{}, err
}
ctx = lkctx.Context()
defer uploadIDLock.RUnlock(lkctx.Cancel)
if err := es.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, toObjectErr(err, bucket, object, uploadID)
}
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
storageDisks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, opts.VersionID, false)
// get Quorum for this object
readQuorum, _, err := objectQuorumFromMeta(ctx, partsMetadata, errs, 0)
if err != nil {
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum)
if reducedErr == errErasureReadQuorum {
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
}
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum)
if err != nil {
return result, err
}
result.UserDefined = cloneMSS(fi.Metadata)
return result, nil
}
// ListObjectParts - lists all previously uploaded parts for a given
// object and uploadID. Takes additional input of part-number-marker
// to indicate where the listing should begin from.
//
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is marshaled directly into XML and
// replied back to the client.
func (es *erasureSingle) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
if err := checkListPartsArgs(ctx, bucket, object, es); err != nil {
return ListPartsInfo{}, err
}
uploadIDLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return ListPartsInfo{}, err
}
ctx = lkctx.Context()
defer uploadIDLock.RUnlock(lkctx.Cancel)
if err := es.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, toObjectErr(err, bucket, object, uploadID)
}
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
storageDisks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(ctx, partsMetadata, errs, 0)
if err != nil {
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
}
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return result, err
}
// Populate the result stub.
result.Bucket = bucket
result.Object = object
result.UploadID = uploadID
result.MaxParts = maxParts
result.PartNumberMarker = partNumberMarker
result.UserDefined = cloneMSS(fi.Metadata)
// For empty number of parts or maxParts as zero, return right here.
if len(fi.Parts) == 0 || maxParts == 0 {
return result, nil
}
// Limit output to maxPartsList.
if maxParts > maxPartsList {
maxParts = maxPartsList
}
// Only parts with higher part numbers will be listed.
partIdx := objectPartIndex(fi.Parts, partNumberMarker)
parts := fi.Parts
if partIdx != -1 {
parts = fi.Parts[partIdx+1:]
}
count := maxParts
for _, part := range parts {
result.Parts = append(result.Parts, PartInfo{
PartNumber: part.Number,
ETag: part.ETag,
LastModified: fi.ModTime,
Size: part.Size,
})
count--
if count == 0 {
break
}
}
// If listed entries are more than maxParts, we set IsTruncated as true.
if len(parts) > len(result.Parts) {
result.IsTruncated = true
// Make sure to fill next part number marker if IsTruncated is
// true for subsequent listing.
nextPartNumberMarker := result.Parts[len(result.Parts)-1].PartNumber
result.NextPartNumberMarker = nextPartNumberMarker
}
return result, nil
}
// CompleteMultipartUpload - completes an ongoing multipart
// transaction after receiving all the parts indicated by the client.
// Returns an md5sum calculated by concatenating all the individual
// md5sums of all the parts.
//
// Implements S3 compatible Complete multipart API.
func (es *erasureSingle) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) {
if err = checkCompleteMultipartArgs(ctx, bucket, object, es); err != nil {
return oi, err
}
// Hold read-locks to verify uploaded parts, also disallows
// parallel part uploads as well.
uploadIDLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
rlkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return oi, err
}
rctx := rlkctx.Context()
defer uploadIDLock.RUnlock(rlkctx.Cancel)
if err = es.checkUploadIDExists(rctx, bucket, object, uploadID); err != nil {
return oi, toObjectErr(err, bucket, object, uploadID)
}
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
storageDisks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllFileInfo(rctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(rctx, partsMetadata, errs, 0)
if err != nil {
return oi, toObjectErr(err, bucket, object)
}
reducedErr := reduceWriteQuorumErrs(rctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return oi, toObjectErr(reducedErr, bucket, object)
}
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(rctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return oi, err
}
// Calculate full object size.
var objectSize int64
// Calculate consolidated actual size.
var objectActualSize int64
// Order online disks in accordance with distribution order.
// Order parts metadata in accordance with distribution order.
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi)
// Save current erasure metadata for validation.
currentFI := fi
// Allocate parts similar to incoming slice.
fi.Parts = make([]ObjectPartInfo, len(parts))
// Validate each part and then commit to disk.
for i, part := range parts {
partIdx := objectPartIndex(currentFI.Parts, part.PartNumber)
// All parts should have same part number.
if partIdx == -1 {
invp := InvalidPart{
PartNumber: part.PartNumber,
GotETag: part.ETag,
}
return oi, invp
}
// ensure that part ETag is canonicalized to strip off extraneous quotes
part.ETag = canonicalizeETag(part.ETag)
if currentFI.Parts[partIdx].ETag != part.ETag {
invp := InvalidPart{
PartNumber: part.PartNumber,
ExpETag: currentFI.Parts[partIdx].ETag,
GotETag: part.ETag,
}
return oi, invp
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) {
return oi, PartTooSmall{
PartNumber: part.PartNumber,
PartSize: currentFI.Parts[partIdx].ActualSize,
PartETag: part.ETag,
}
}
// Save for total object size.
objectSize += currentFI.Parts[partIdx].Size
// Save the consolidated actual size.
objectActualSize += currentFI.Parts[partIdx].ActualSize
// Add incoming parts.
fi.Parts[i] = ObjectPartInfo{
Number: part.PartNumber,
Size: currentFI.Parts[partIdx].Size,
ActualSize: currentFI.Parts[partIdx].ActualSize,
Index: currentFI.Parts[partIdx].Index,
}
}
// Save the final object size and modtime.
fi.Size = objectSize
fi.ModTime = opts.MTime
if opts.MTime.IsZero() {
fi.ModTime = UTCNow()
}
// Save successfully calculated md5sum.
fi.Metadata["etag"] = opts.UserDefined["etag"]
if fi.Metadata["etag"] == "" {
fi.Metadata["etag"] = getCompleteMultipartMD5(parts)
}
// Save the consolidated actual size.
fi.Metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10)
// Update all erasure metadata, make sure to not modify fields like
// checksum which are different on each disks.
for index := range partsMetadata {
if partsMetadata[index].IsValid() {
partsMetadata[index].Size = fi.Size
partsMetadata[index].ModTime = fi.ModTime
partsMetadata[index].Metadata = fi.Metadata
partsMetadata[index].Parts = fi.Parts
}
}
// Hold namespace to complete the transaction
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return oi, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
// Write final `xl.meta` at uploadID location
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum)
if err != nil {
return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
// Remove parts that weren't present in CompleteMultipartUpload request.
for _, curpart := range currentFI.Parts {
if objectPartIndex(fi.Parts, curpart.Number) == -1 {
// Delete the missing part files. e.g,
// Request 1: NewMultipart
// Request 2: PutObjectPart 1
// Request 3: PutObjectPart 2
// Request 4: CompleteMultipartUpload --part 2
// N.B. 1st part is not present. This part should be removed from the storage.
es.removeObjectPart(bucket, object, uploadID, fi.DataDir, curpart.Number)
}
}
// Rename the multipart object to final location.
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath,
partsMetadata, bucket, object, writeQuorum); err != nil {
return oi, toObjectErr(err, bucket, object)
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
}
// we are adding a new version to this object under the namespace lock, so this is the latest version.
fi.IsLatest = true
// Success, return object info.
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
// AbortMultipartUpload - aborts an ongoing multipart operation
// signified by the input uploadID. This is an atomic operation
// doesn't require clients to initiate multiple such requests.
//
// All parts are purged from all disks and reference to the uploadID
// would be removed from the system, rollback is not possible on this
// operation.
func (es *erasureSingle) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (err error) {
if err = checkAbortMultipartArgs(ctx, bucket, object, es); err != nil {
return err
}
lk := es.NewNSLock(bucket, pathJoin(object, uploadID))
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
// Validates if upload ID exists.
if err := es.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return toObjectErr(err, bucket, object, uploadID)
}
// Cleanup all uploaded parts.
es.disk.RenameFile(ctx, minioMetaMultipartBucket, es.getUploadIDDir(bucket, object, uploadID), minioMetaTmpDeletedBucket, mustGetUUID())
// Successfully purged.
return nil
}
func (es *erasureSingle) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
var loi ListObjectsInfo
// Automatically remove the object/version is an expiry lifecycle rule can be applied
lc, _ := globalLifecycleSys.Get(bucket)
// Check if bucket is object locked.
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" {
// Optimization for certain applications like
// - Cohesity
// - Actifio, Splunk etc.
// which send ListObjects requests where the actual object
// itself is the prefix and max-keys=1 in such scenarios
// we can simply verify locally if such an object exists
// to avoid the need for ListObjects().
objInfo, err := es.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true})
if err == nil {
if lc != nil {
action := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo, false)
switch action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
fallthrough
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
return loi, nil
}
}
loi.Objects = append(loi.Objects, objInfo)
return loi, nil
}
}
opts := listPathOptions{
Bucket: bucket,
Prefix: prefix,
Separator: delimiter,
Limit: maxKeysPlusOne(maxKeys, marker != ""),
Marker: marker,
InclDeleted: false,
AskDisks: globalAPIConfig.getListQuorum(),
Lifecycle: lc,
Retention: rcfg,
}
merged, err := es.listPath(ctx, &opts)
if err != nil && err != io.EOF {
if !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
return loi, err
}
merged.forwardPast(opts.Marker)
defer merged.truncate(0) // Release when returning
// Default is recursive, if delimiter is set then list non recursive.
objects := merged.fileInfos(bucket, prefix, delimiter)
loi.IsTruncated = err == nil && len(objects) > 0
if maxKeys > 0 && len(objects) > maxKeys {
objects = objects[:maxKeys]
loi.IsTruncated = true
}
for _, obj := range objects {
if obj.IsDir && obj.ModTime.IsZero() && delimiter != "" {
loi.Prefixes = append(loi.Prefixes, obj.Name)
} else {
loi.Objects = append(loi.Objects, obj)
}
}
if loi.IsTruncated {
last := objects[len(objects)-1]
loi.NextMarker = opts.encodeMarker(last.Name)
}
return loi, nil
}
func (es *erasureSingle) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) {
marker := continuationToken
if marker == "" {
marker = startAfter
}
loi, err := es.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, err
}
listObjectsV2Info := ListObjectsV2Info{
IsTruncated: loi.IsTruncated,
ContinuationToken: continuationToken,
NextContinuationToken: loi.NextMarker,
Objects: loi.Objects,
Prefixes: loi.Prefixes,
}
return listObjectsV2Info, err
}
func (es *erasureSingle) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) {
loi := ListObjectVersionsInfo{}
if marker == "" && versionMarker != "" {
return loi, NotImplemented{}
}
opts := listPathOptions{
Bucket: bucket,
Prefix: prefix,
Separator: delimiter,
Limit: maxKeysPlusOne(maxKeys, marker != ""),
Marker: marker,
InclDeleted: true,
AskDisks: "strict",
Versioned: true,
}
merged, err := es.listPath(ctx, &opts)
if err != nil && err != io.EOF {
return loi, err
}
defer merged.truncate(0) // Release when returning
if versionMarker == "" {
o := listPathOptions{Marker: marker}
// If we are not looking for a specific version skip it.
o.parseMarker()
merged.forwardPast(o.Marker)
}
objects := merged.fileInfoVersions(bucket, prefix, delimiter, versionMarker)
loi.IsTruncated = err == nil && len(objects) > 0
if maxKeys > 0 && len(objects) > maxKeys {
objects = objects[:maxKeys]
loi.IsTruncated = true
}
for _, obj := range objects {
if obj.IsDir && obj.ModTime.IsZero() && delimiter != "" {
loi.Prefixes = append(loi.Prefixes, obj.Name)
} else {
loi.Objects = append(loi.Objects, obj)
}
}
if loi.IsTruncated {
last := objects[len(objects)-1]
loi.NextMarker = opts.encodeMarker(last.Name)
loi.NextVersionIDMarker = last.VersionID
}
return loi, nil
}
// Walk a bucket, optionally prefix recursively, until we have returned
// all the content to objectInfo channel, it is callers responsibility
// to allocate a receive channel for ObjectInfo, upon any unhandled
// error walker returns error. Optionally if context.Done() is received
// then Walk() stops the walker.
func (es *erasureSingle) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error {
if err := checkListObjsArgs(ctx, bucket, prefix, "", es); err != nil {
// Upon error close the channel.
close(results)
return err
}
ctx, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
defer close(results)
versioned := opts.Versioned || opts.VersionSuspended
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
loadEntry := func(entry metaCacheEntry) {
if entry.isDir() {
return
}
fivs, err := entry.fileInfoVersions(bucket)
if err != nil {
cancel()
return
}
if opts.WalkAscending {
for i := len(fivs.Versions) - 1; i >= 0; i-- {
version := fivs.Versions[i]
results <- version.ToObjectInfo(bucket, version.Name, versioned)
}
return
}
for _, version := range fivs.Versions {
results <- version.ToObjectInfo(bucket, version.Name, versioned)
}
}
// How to resolve partial results.
resolver := metadataResolutionParams{
dirQuorum: 1,
objQuorum: 1,
bucket: bucket,
}
path := baseDirFromPrefix(prefix)
filterPrefix := strings.Trim(strings.TrimPrefix(prefix, path), slashSeparator)
if path == prefix {
filterPrefix = ""
}
lopts := listPathRawOptions{
disks: []StorageAPI{es.disk},
bucket: bucket,
path: path,
filterPrefix: filterPrefix,
recursive: true,
forwardTo: "",
minDisks: 1,
reportNotFound: false,
agreed: loadEntry,
partial: func(entries metaCacheEntries, _ []error) {
entry, ok := entries.resolve(&resolver)
if !ok {
// check if we can get one entry atleast
// proceed to heal nonetheless.
entry, _ = entries.firstFound()
}
loadEntry(*entry)
},
finished: nil,
}
if err := listPathRaw(ctx, lopts); err != nil {
logger.LogIf(ctx, fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts))
return
}
}()
wg.Wait()
}()
return nil
}
// nsScanner will start scanning buckets and send updated totals as they are traversed.
// Updates are sent on a regular basis and the caller *must* consume them.
func (es *erasureSingle) nsScanner(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, wantCycle uint32, updates chan<- dataUsageCache, healScanMode madmin.HealScanMode) error {
if len(buckets) == 0 {
return nil
}
// Collect disks we can use.
disks := []StorageAPI{es.disk}
// Load bucket totals
oldCache := dataUsageCache{}
if err := oldCache.load(ctx, es, dataUsageCacheName); err != nil {
return err
}
// New cache..
cache := dataUsageCache{
Info: dataUsageCacheInfo{
Name: dataUsageRoot,
NextCycle: oldCache.Info.NextCycle,
},
Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)),
}
bloom := bf.bytes()
// Put all buckets into channel.
bucketCh := make(chan BucketInfo, len(buckets))
// Add new buckets first
for _, b := range buckets {
if oldCache.find(b.Name) == nil {
bucketCh <- b
}
}
// Add existing buckets.
for _, b := range buckets {
e := oldCache.find(b.Name)
if e != nil {
cache.replace(b.Name, dataUsageRoot, *e)
bucketCh <- b
}
}
close(bucketCh)
bucketResults := make(chan dataUsageEntryInfo, len(disks))
// Start async collector/saver.
// This goroutine owns the cache.
var saverWg sync.WaitGroup
saverWg.Add(1)
go func() {
// Add jitter to the update time so multiple sets don't sync up.
updateTime := 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64())
t := time.NewTicker(updateTime)
defer t.Stop()
defer saverWg.Done()
var lastSave time.Time
for {
select {
case <-ctx.Done():
// Return without saving.
return
case <-t.C:
if cache.Info.LastUpdate.Equal(lastSave) {
continue
}
logger.LogIf(ctx, cache.save(ctx, es, dataUsageCacheName))
updates <- cache.clone()
lastSave = cache.Info.LastUpdate
case v, ok := <-bucketResults:
if !ok {
// Save final state...
cache.Info.NextCycle = wantCycle
cache.Info.LastUpdate = time.Now()
logger.LogIf(ctx, cache.save(ctx, es, dataUsageCacheName))
updates <- cache
return
}
cache.replace(v.Name, v.Parent, v.Entry)
cache.Info.LastUpdate = time.Now()
}
}
}()
// Shuffle disks to ensure a total randomness of bucket/disk association to ensure
// that objects that are not present in all disks are accounted and ILM applied.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
r.Shuffle(len(disks), func(i, j int) { disks[i], disks[j] = disks[j], disks[i] })
// Start one scanner per disk
var wg sync.WaitGroup
wg.Add(len(disks))
for i := range disks {
go func(i int) {
defer wg.Done()
disk := disks[i]
for bucket := range bucketCh {
select {
case <-ctx.Done():
return
default:
}
// Load cache for bucket
cacheName := pathJoin(bucket.Name, dataUsageCacheName)
cache := dataUsageCache{}
logger.LogIf(ctx, cache.load(ctx, es, cacheName))
if cache.Info.Name == "" {
cache.Info.Name = bucket.Name
}
cache.Info.BloomFilter = bloom
cache.Info.SkipHealing = true
cache.Info.NextCycle = wantCycle
if cache.Info.Name != bucket.Name {
logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name))
cache.Info = dataUsageCacheInfo{
Name: bucket.Name,
LastUpdate: time.Time{},
NextCycle: wantCycle,
}
}
// Collect updates.
updates := make(chan dataUsageEntry, 1)
var wg sync.WaitGroup
wg.Add(1)
go func(name string) {
defer wg.Done()
for update := range updates {
bucketResults <- dataUsageEntryInfo{
Name: name,
Parent: dataUsageRoot,
Entry: update,
}
}
}(cache.Info.Name)
// Calc usage
before := cache.Info.LastUpdate
var err error
cache, err = disk.NSScanner(ctx, cache, updates, healScanMode)
cache.Info.BloomFilter = nil
if err != nil {
if !cache.Info.LastUpdate.IsZero() && cache.Info.LastUpdate.After(before) {
logger.LogIf(ctx, cache.save(ctx, es, cacheName))
} else {
logger.LogIf(ctx, err)
}
// This ensures that we don't close
// bucketResults channel while the
// updates-collector goroutine still
// holds a reference to this.
wg.Wait()
continue
}
wg.Wait()
var root dataUsageEntry
if r := cache.root(); r != nil {
root = cache.flatten(*r)
}
t := time.Now()
bucketResults <- dataUsageEntryInfo{
Name: cache.Info.Name,
Parent: dataUsageRoot,
Entry: root,
}
// We want to avoid synchronizing up all writes in case
// the results are piled up.
time.Sleep(time.Duration(float64(time.Since(t)) * rand.Float64()))
// Save cache
logger.LogIf(ctx, cache.save(ctx, es, cacheName))
}
}(i)
}
wg.Wait()
close(bucketResults)
saverWg.Wait()
return nil
}
func (es *erasureSingle) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32, healScanMode madmin.HealScanMode) error {
// Updates must be closed before we return.
defer close(updates)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var wg sync.WaitGroup
var mu sync.Mutex
results := make([]dataUsageCache, 1)
var firstErr error
allBuckets, err := es.ListBuckets(ctx, BucketOptions{})
if err != nil {
return err
}
if len(allBuckets) == 0 {
updates <- DataUsageInfo{} // no buckets found update data usage to reflect latest state
return nil
}
// Scanner latest allBuckets first.
sort.Slice(allBuckets, func(i, j int) bool {
return allBuckets[i].Created.After(allBuckets[j].Created)
})
wg.Add(1)
go func() {
updates := make(chan dataUsageCache, 1)
defer close(updates)
// Start update collector.
go func() {
defer wg.Done()
for info := range updates {
mu.Lock()
results[0] = info
mu.Unlock()
}
}()
// Start scanner. Blocks until done.
err := es.nsScanner(ctx, allBuckets, bf, wantCycle, updates, healScanMode)
if err != nil {
logger.LogIf(ctx, err)
mu.Lock()
if firstErr == nil {
firstErr = err
}
// Cancel remaining...
cancel()
mu.Unlock()
return
}
}()
updateCloser := make(chan chan struct{})
go func() {
updateTicker := time.NewTicker(30 * time.Second)
defer updateTicker.Stop()
var lastUpdate time.Time
// We need to merge since we will get the same buckets from each pool.
// Therefore to get the exact bucket sizes we must merge before we can convert.
var allMerged dataUsageCache
update := func() {
mu.Lock()
defer mu.Unlock()
allMerged = dataUsageCache{Info: dataUsageCacheInfo{Name: dataUsageRoot}}
for _, info := range results {
if info.Info.LastUpdate.IsZero() {
// Not filled yet.
return
}
allMerged.merge(info)
}
if allMerged.root() != nil && allMerged.Info.LastUpdate.After(lastUpdate) {
updates <- allMerged.dui(allMerged.Info.Name, allBuckets)
lastUpdate = allMerged.Info.LastUpdate
}
}
for {
select {
case <-ctx.Done():
return
case v := <-updateCloser:
update()
close(v)
return
case <-updateTicker.C:
update()
}
}
}()
wg.Wait()
ch := make(chan struct{})
select {
case updateCloser <- ch:
<-ch
case <-ctx.Done():
if firstErr == nil {
firstErr = ctx.Err()
}
}
return firstErr
}
// GetRawData will return all files with a given raw path to the callback.
// Errors are ignored, only errors from the callback are returned.
// For now only direct file paths are supported.
func (es *erasureSingle) GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error {
found := 0
stats, err := es.disk.StatInfoFile(ctx, volume, file, true)
if err != nil {
return err
}
for _, si := range stats {
found++
var r io.ReadCloser
if !si.Dir {
r, err = es.disk.ReadFileStream(ctx, volume, si.Name, 0, si.Size)
if err != nil {
continue
}
} else {
r = io.NopCloser(bytes.NewBuffer([]byte{}))
}
// Keep disk path instead of ID, to ensure that the downloaded zip file can be
// easily automated with `minio server hostname{1...n}/disk{1...m}`.
err = fn(r, es.disk.Hostname(), es.disk.Endpoint().Path, pathJoin(volume, si.Name), si)
r.Close()
if err != nil {
return err
}
}
if found == 0 {
return errFileNotFound
}
return nil
}
fix: cluster healthcheck for single drive setups (#15415)
single drive setups must return '200 OK' if
drive is accessible, current master returns '503'
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
"github.com/klauspost/readahead"
"github.com/minio/madmin-go"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/bpool"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/sync/errgroup"
"github.com/minio/pkg/mimedb"
)
// erasureSingle - Implements single drive XL layer
type erasureSingle struct {
GatewayUnsupported
disk StorageAPI
endpoint Endpoint
// Locker mutex map.
nsMutex *nsLockMap
// Byte pools used for temporary i/o buffers.
bp *bpool.BytePoolCap
deletedCleanupSleeper *dynamicSleeper
// Shut down async operations
shutdown context.CancelFunc
format *formatErasureV3
}
// Initialize new set of erasure coded sets.
func newErasureSingle(ctx context.Context, storageDisk StorageAPI, format *formatErasureV3) (ObjectLayer, error) {
// Number of buffers, max 2GB
n := (2 * humanize.GiByte) / (blockSizeV2 * 2)
// Initialize byte pool once for all sets, bpool size is set to
// setCount * setDriveCount with each memory upto blockSizeV2.
bp := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2)
// Initialize the erasure sets instance.
s := &erasureSingle{
disk: storageDisk,
endpoint: storageDisk.Endpoint(),
format: format,
nsMutex: newNSLock(false),
bp: bp,
deletedCleanupSleeper: newDynamicSleeper(10, 2*time.Second, false),
}
// start cleanup stale uploads go-routine.
go s.cleanupStaleUploads(ctx)
// start cleanup of deleted objects.
go s.cleanupDeletedObjects(ctx)
ctx, s.shutdown = context.WithCancel(ctx)
go intDataUpdateTracker.start(ctx, s.endpoint.Path)
return s, nil
}
// List all buckets from one of the set, we are not doing merge
// sort here just for simplification. As per design it is assumed
// that all buckets are present on all sets.
func (es *erasureSingle) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
var listBuckets []BucketInfo
healBuckets := map[string]VolInfo{}
// lists all unique buckets across drives.
if err := listAllBuckets(ctx, []StorageAPI{es.disk}, healBuckets, 0); err != nil {
return nil, err
}
// include deleted buckets in listBuckets output
deletedBuckets := map[string]VolInfo{}
if opts.Deleted {
// lists all deleted buckets across drives.
if err := listDeletedBuckets(ctx, []StorageAPI{es.disk}, deletedBuckets, 0); err != nil {
return nil, err
}
}
for _, v := range healBuckets {
bi := BucketInfo{
Name: v.Name,
Created: v.Created,
}
if vi, ok := deletedBuckets[v.Name]; ok {
bi.Deleted = vi.Created
}
listBuckets = append(listBuckets, bi)
}
for _, v := range deletedBuckets {
if _, ok := healBuckets[v.Name]; !ok {
listBuckets = append(listBuckets, BucketInfo{
Name: v.Name,
Deleted: v.Created,
})
}
}
sort.Slice(listBuckets, func(i, j int) bool {
return listBuckets[i].Name < listBuckets[j].Name
})
for i := range listBuckets {
meta, err := globalBucketMetadataSys.Get(listBuckets[i].Name)
if err == nil {
listBuckets[i].Created = meta.Created
}
}
return listBuckets, nil
}
func (es *erasureSingle) cleanupStaleUploads(ctx context.Context) {
timer := time.NewTimer(globalAPIConfig.getStaleUploadsCleanupInterval())
defer timer.Stop()
for {
select {
case <-ctx.Done():
return
case <-timer.C:
es.cleanupStaleUploadsOnDisk(ctx, es.disk, globalAPIConfig.getStaleUploadsExpiry())
// Reset for the next interval
timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval())
}
}
}
// cleanup ".trash/" folder every 5m minutes with sufficient sleep cycles, between each
// deletes a dynamic sleeper is used with a factor of 10 ratio with max delay between
// deletes to be 2 seconds.
func (es *erasureSingle) cleanupDeletedObjects(ctx context.Context) {
timer := time.NewTimer(globalAPIConfig.getDeleteCleanupInterval())
defer timer.Stop()
for {
select {
case <-ctx.Done():
return
case <-timer.C:
es.cleanupDeletedObjectsInner(ctx)
// Reset for the next interval
timer.Reset(globalAPIConfig.getDeleteCleanupInterval())
}
}
}
// NewNSLock - initialize a new namespace RWLocker instance.
func (es *erasureSingle) NewNSLock(bucket string, objects ...string) RWLocker {
return es.nsMutex.NewNSLock(nil, bucket, objects...)
}
// Shutdown function for object storage interface.
func (es *erasureSingle) Shutdown(ctx context.Context) error {
defer es.shutdown()
// Add any object layer shutdown activities here.
closeStorageDisks(es.disk)
return nil
}
func (es *erasureSingle) SetDriveCounts() []int {
return []int{1}
}
func (es *erasureSingle) BackendInfo() (b madmin.BackendInfo) {
b.Type = madmin.Erasure
scParity := 0
rrSCParity := 0
// Data blocks can vary per pool, but parity is same.
for _, setDriveCount := range es.SetDriveCounts() {
b.StandardSCData = append(b.StandardSCData, setDriveCount-scParity)
b.RRSCData = append(b.RRSCData, setDriveCount-rrSCParity)
}
b.StandardSCParity = scParity
b.RRSCParity = rrSCParity
return
}
// StorageInfo - returns underlying storage statistics.
func (es *erasureSingle) StorageInfo(ctx context.Context) (StorageInfo, []error) {
disks := []StorageAPI{es.disk}
endpoints := []Endpoint{es.endpoint}
storageInfo, errs := getStorageInfo(disks, endpoints)
storageInfo.Backend = es.BackendInfo()
return storageInfo, errs
}
// LocalStorageInfo - returns underlying local storage statistics.
func (es *erasureSingle) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
disks := []StorageAPI{es.disk}
endpoints := []Endpoint{es.endpoint}
var localDisks []StorageAPI
var localEndpoints []Endpoint
for i, endpoint := range endpoints {
if endpoint.IsLocal {
localDisks = append(localDisks, disks[i])
localEndpoints = append(localEndpoints, endpoint)
}
}
return getStorageInfo(localDisks, localEndpoints)
}
// Clean-up previously deleted objects. from .minio.sys/tmp/.trash/
func (es *erasureSingle) cleanupDeletedObjectsInner(ctx context.Context) {
diskPath := es.disk.Endpoint().Path
readDirFn(pathJoin(diskPath, minioMetaTmpDeletedBucket), func(ddir string, typ os.FileMode) error {
wait := es.deletedCleanupSleeper.Timer(ctx)
removeAll(pathJoin(diskPath, minioMetaTmpDeletedBucket, ddir))
wait()
return nil
})
}
func (es *erasureSingle) renameAll(ctx context.Context, bucket, prefix string) {
if es.disk != nil {
es.disk.RenameFile(ctx, bucket, prefix, minioMetaTmpDeletedBucket, mustGetUUID())
}
}
type renameAllStorager interface {
renameAll(ctx context.Context, bucket, prefix string)
}
// Bucket operations
// MakeBucket - make a bucket.
func (es *erasureSingle) MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error {
defer NSUpdated(bucket, slashSeparator)
// Lock the bucket name before creating.
lk := es.NewNSLock(minioMetaTmpBucket, bucket+".lck")
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
// Verify if bucket is valid.
if !isMinioMetaBucketName(bucket) {
if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil {
return BucketNameInvalid{Bucket: bucket}
}
}
if err := es.disk.MakeVol(ctx, bucket); err != nil {
if opts.ForceCreate && errors.Is(err, errVolumeExists) {
// No need to return error when force create was
// requested.
return nil
}
if !errors.Is(err, errVolumeExists) {
logger.LogIf(ctx, err)
}
return toObjectErr(err, bucket)
}
// If it doesn't exist we get a new, so ignore errors
meta := newBucketMetadata(bucket)
meta.SetCreatedAt(opts.CreatedAt)
if opts.LockEnabled {
meta.VersioningConfigXML = enabledBucketVersioningConfig
meta.ObjectLockConfigXML = enabledBucketObjectLockConfig
}
if opts.VersioningEnabled {
meta.VersioningConfigXML = enabledBucketVersioningConfig
}
if err := meta.Save(context.Background(), es); err != nil {
return toObjectErr(err, bucket)
}
globalBucketMetadataSys.Set(bucket, meta)
return nil
}
// GetBucketInfo - returns BucketInfo for a bucket.
func (es *erasureSingle) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bi BucketInfo, e error) {
volInfo, err := es.disk.StatVol(ctx, bucket)
if err != nil {
if opts.Deleted {
if dvi, derr := es.disk.StatVol(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket)); derr == nil {
return BucketInfo{Name: bucket, Deleted: dvi.Created}, nil
}
}
return bi, toObjectErr(err, bucket)
}
return BucketInfo{Name: volInfo.Name, Created: volInfo.Created}, nil
}
// DeleteBucket - deletes a bucket.
func (es *erasureSingle) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
// Collect if all disks report volume not found.
defer NSUpdated(bucket, slashSeparator)
err := es.disk.DeleteVol(ctx, bucket, opts.Force)
// Purge the entire bucket metadata entirely.
deleteBucketMetadata(ctx, es, bucket)
globalBucketMetadataSys.Remove(bucket)
if err == nil || errors.Is(err, errVolumeNotFound) {
if opts.SRDeleteOp == MarkDelete {
es.markDelete(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
}
}
return toObjectErr(err, bucket)
}
// markDelete creates a vol entry in .minio.sys/buckets/.deleted until site replication
// syncs the delete to peers
func (es *erasureSingle) markDelete(ctx context.Context, bucket, prefix string) error {
err := es.disk.MakeVol(ctx, pathJoin(bucket, prefix))
if err != nil && errors.Is(err, errVolumeExists) {
return nil
}
return toObjectErr(err, bucket)
}
// purgeDelete deletes vol entry in .minio.sys/buckets/.deleted after site replication
// syncs the delete to peers OR on a new MakeBucket call.
func (es *erasureSingle) purgeDelete(ctx context.Context, bucket, prefix string) error {
err := es.disk.DeleteVol(ctx, pathJoin(bucket, prefix), true)
return toObjectErr(err, bucket)
}
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
func (es *erasureSingle) IsNotificationSupported() bool {
return true
}
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
func (es *erasureSingle) IsListenSupported() bool {
return true
}
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (es *erasureSingle) IsEncryptionSupported() bool {
return true
}
// IsCompressionSupported returns whether compression is applicable for this layer.
func (es *erasureSingle) IsCompressionSupported() bool {
return true
}
// IsTaggingSupported indicates whethes *erasureSingle implements tagging support.
func (es *erasureSingle) IsTaggingSupported() bool {
return true
}
// Object Operations
// CopyObject - copy object source object to destination object.
// if source object and destination object are same we only
// update metadata.
func (es *erasureSingle) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, err error) {
defer NSUpdated(dstBucket, dstObject)
srcObject = encodeDirObject(srcObject)
dstObject = encodeDirObject(dstObject)
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
if !dstOpts.NoLock {
ns := es.NewNSLock(dstBucket, dstObject)
lkctx, err := ns.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer ns.Unlock(lkctx.Cancel)
dstOpts.NoLock = true
}
if cpSrcDstSame && srcInfo.metadataOnly {
// Read metadata associated with the object from all disks.
storageDisks := []StorageAPI{es.disk}
var metaArr []FileInfo
var errs []error
// Read metadata associated with the object from all disks.
if srcOpts.VersionID != "" {
metaArr, errs = readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, true)
} else {
metaArr, errs = readAllXL(ctx, storageDisks, srcBucket, srcObject, true)
}
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
if fi.Deleted {
if srcOpts.VersionID == "" {
return oi, toObjectErr(errFileNotFound, srcBucket, srcObject)
}
return fi.ToObjectInfo(srcBucket, srcObject, srcOpts.Versioned || srcOpts.VersionSuspended), toObjectErr(errMethodNotAllowed, srcBucket, srcObject)
}
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
versionID := srcInfo.VersionID
if srcInfo.versionOnly {
versionID = dstOpts.VersionID
// preserve destination versionId if specified.
if versionID == "" {
versionID = mustGetUUID()
fi.IsLatest = true // we are creating a new version so this is latest.
}
modTime = UTCNow()
}
// If the data is not inlined, we may end up incorrectly
// inlining the data here, that leads to an inconsistent
// situation where some objects are were not inlined
// were now inlined, make sure to `nil` the Data such
// that xl.meta is written as expected.
if !fi.InlineData() {
fi.Data = nil
}
fi.VersionID = versionID // set any new versionID we might have created
fi.ModTime = modTime // set modTime for the new versionID
if !dstOpts.MTime.IsZero() {
modTime = dstOpts.MTime
fi.ModTime = dstOpts.MTime
}
fi.Metadata = srcInfo.UserDefined
srcInfo.UserDefined["etag"] = srcInfo.ETag
// Update `xl.meta` content on each disks.
for index := range metaArr {
if metaArr[index].IsValid() {
metaArr[index].ModTime = modTime
metaArr[index].VersionID = versionID
metaArr[index].Metadata = srcInfo.UserDefined
if !metaArr[index].InlineData() {
// If the data is not inlined, we may end up incorrectly
// inlining the data here, that leads to an inconsistent
// situation where some objects are were not inlined
// were now inlined, make sure to `nil` the Data such
// that xl.meta is written as expected.
metaArr[index].Data = nil
}
}
}
// Write unique `xl.meta` for each disk.
if _, err = writeUniqueFileInfo(ctx, onlineDisks, srcBucket, srcObject, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
return fi.ToObjectInfo(srcBucket, srcObject, srcOpts.Versioned || srcOpts.VersionSuspended), nil
}
putOpts := ObjectOptions{
ServerSideEncryption: dstOpts.ServerSideEncryption,
UserDefined: srcInfo.UserDefined,
Versioned: dstOpts.Versioned,
VersionID: dstOpts.VersionID,
MTime: dstOpts.MTime,
NoLock: true,
}
return es.PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
}
// GetObjectNInfo - returns object info and an object
// Read(Closer). When err != nil, the returned reader is always nil.
func (es *erasureSingle) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return nil, err
}
object = encodeDirObject(object)
var unlockOnDefer bool
nsUnlocker := func() {}
defer func() {
if unlockOnDefer {
nsUnlocker()
}
}()
// Acquire lock
if lockType != noLock {
lock := es.NewNSLock(bucket, object)
switch lockType {
case writeLock:
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return nil, err
}
ctx = lkctx.Context()
nsUnlocker = func() { lock.Unlock(lkctx.Cancel) }
case readLock:
lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return nil, err
}
ctx = lkctx.Context()
nsUnlocker = func() { lock.RUnlock(lkctx.Cancel) }
}
unlockOnDefer = true
}
fi, metaArr, onlineDisks, err := es.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil {
return nil, toObjectErr(err, bucket, object)
}
objInfo := fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
if objInfo.DeleteMarker {
if opts.VersionID == "" {
return &GetObjectReader{
ObjInfo: objInfo,
}, toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return &GetObjectReader{
ObjInfo: objInfo,
}, toObjectErr(errMethodNotAllowed, bucket, object)
}
if objInfo.IsRemote() {
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, h, objInfo, opts)
if err != nil {
return nil, err
}
unlockOnDefer = false
return gr.WithCleanupFuncs(nsUnlocker), nil
}
fn, off, length, err := NewGetObjectReader(rs, objInfo, opts)
if err != nil {
return nil, err
}
unlockOnDefer = false
pr, pw := xioutil.WaitPipe()
go func() {
pw.CloseWithError(es.getObjectWithFileInfo(ctx, bucket, object, off, length, pw, fi, metaArr, onlineDisks))
}()
// Cleanup function to cause the go routine above to exit, in
// case of incomplete read.
pipeCloser := func() {
pr.CloseWithError(nil)
}
return fn(pr, h, pipeCloser, nsUnlocker)
}
func (es *erasureSingle) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
// Reorder online disks based on erasure distribution ordes.
// Reorder parts metadata based on erasure distribution ordes.
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
// For negative length read everything.
if length < 0 {
length = fi.Size - startOffset
}
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > fi.Size || startOffset+length > fi.Size {
logger.LogIf(ctx, InvalidRange{startOffset, length, fi.Size}, logger.Application)
return InvalidRange{startOffset, length, fi.Size}
}
// Get start part index and offset.
partIndex, partOffset, err := fi.ObjectToPartOffset(ctx, startOffset)
if err != nil {
return InvalidRange{startOffset, length, fi.Size}
}
// Calculate endOffset according to length
endOffset := startOffset
if length > 0 {
endOffset += length - 1
}
// Get last part index to read given length.
lastPartIndex, _, err := fi.ObjectToPartOffset(ctx, endOffset)
if err != nil {
return InvalidRange{startOffset, length, fi.Size}
}
var totalBytesRead int64
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return toObjectErr(err, bucket, object)
}
// once we have obtained a common FileInfo i.e latest, we should stick
// to single dataDir to read the content to avoid reading from some other
// dataDir that has stale FileInfo{} to ensure that we fail appropriately
// during reads and expect the same dataDir everywhere.
dataDir := fi.DataDir
for ; partIndex <= lastPartIndex; partIndex++ {
if length == totalBytesRead {
break
}
partNumber := fi.Parts[partIndex].Number
// Save the current part name and size.
partSize := fi.Parts[partIndex].Size
partLength := partSize - partOffset
// partLength should be adjusted so that we don't write more data than what was requested.
if partLength > (length - totalBytesRead) {
partLength = length - totalBytesRead
}
tillOffset := erasure.ShardFileOffset(partOffset, partLength, partSize)
// Get the checksums of the current part.
readers := make([]io.ReaderAt, len(onlineDisks))
prefer := make([]bool, len(onlineDisks))
for index, disk := range onlineDisks {
if disk == OfflineDisk {
continue
}
if !metaArr[index].IsValid() {
continue
}
checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber)
partPath := pathJoin(object, dataDir, fmt.Sprintf("part.%d", partNumber))
readers[index] = newBitrotReader(disk, metaArr[index].Data, bucket, partPath, tillOffset,
checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize())
// Prefer local disks
prefer[index] = disk.Hostname() == ""
}
_, err = erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer)
// Note: we should not be defer'ing the following closeBitrotReaders() call as
// we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time
// we return from this function.
closeBitrotReaders(readers)
if err != nil {
return toObjectErr(err, bucket, object)
}
for i, r := range readers {
if r == nil {
onlineDisks[i] = OfflineDisk
}
}
// Track total bytes read from disk and written to the client.
totalBytesRead += partLength
// partOffset will be valid only for the first part, hence reset it to 0 for
// the remaining parts.
partOffset = 0
} // End of read all parts loop.
// Return success.
return nil
}
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (es *erasureSingle) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) {
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return info, err
}
object = encodeDirObject(object)
if !opts.NoLock {
// Lock the object before reading.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.RUnlock(lkctx.Cancel)
}
return es.getObjectInfo(ctx, bucket, object, opts)
}
func (es *erasureSingle) getObjectFileInfo(ctx context.Context, bucket, object string, opts ObjectOptions, readData bool) (fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI, err error) {
disks := []StorageAPI{es.disk}
var errs []error
// Read metadata associated with the object from all disks.
metaArr, errs = readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, readData)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return fi, nil, nil, toObjectErr(err, bucket, object)
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return fi, nil, nil, toObjectErr(reducedErr, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return fi, nil, nil, err
}
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
return fi, metaArr, onlineDisks, nil
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (es *erasureSingle) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
fi, _, _, err := es.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
objInfo = fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
if fi.Deleted {
if opts.VersionID == "" || opts.DeleteMarker {
return objInfo, toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return objInfo, toObjectErr(errMethodNotAllowed, bucket, object)
}
return objInfo, nil
}
// getObjectInfoAndQuroum - wrapper for reading object metadata and constructs ObjectInfo, additionally returns write quorum for the object.
func (es *erasureSingle) getObjectInfoAndQuorum(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, wquorum int, err error) {
fi, _, _, err := es.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return objInfo, 1, toObjectErr(err, bucket, object)
}
wquorum = fi.Erasure.DataBlocks
if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks {
wquorum++
}
objInfo = fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
if !fi.VersionPurgeStatus().Empty() && opts.VersionID != "" {
// Make sure to return object info to provide extra information.
return objInfo, wquorum, toObjectErr(errMethodNotAllowed, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" || opts.DeleteMarker {
return objInfo, wquorum, toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return objInfo, wquorum, toObjectErr(errMethodNotAllowed, bucket, object)
}
return objInfo, wquorum, nil
}
func (es *erasureSingle) putMetacacheObject(ctx context.Context, key string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
data := r.Reader
// No metadata is set, allocate a new one.
if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string)
}
storageDisks := []StorageAPI{es.disk}
// Get parity and data drive count based on storage class metadata
parityDrives := 0
dataDrives := len(storageDisks) - parityDrives
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum++
}
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.Application)
return ObjectInfo{}, toObjectErr(errInvalidArgument)
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(storageDisks))
fi := newFileInfo(pathJoin(minioMetaBucket, key), dataDrives, parityDrives)
fi.DataDir = mustGetUUID()
// Initialize erasure metadata.
for index := range partsMetadata {
partsMetadata[index] = fi
}
// Order disks according to erasure distribution
var onlineDisks []StorageAPI
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi)
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size >= fi.Erasure.BlockSize:
buffer = es.bp.Get()
defer es.bp.Put(buffer)
case size < fi.Erasure.BlockSize:
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
}
if len(buffer) > int(fi.Erasure.BlockSize) {
buffer = buffer[:fi.Erasure.BlockSize]
}
shardFileSize := erasure.ShardFileSize(data.Size())
writers := make([]io.Writer, len(onlineDisks))
inlineBuffers := make([]*bytes.Buffer, len(onlineDisks))
for i, disk := range onlineDisks {
if disk == nil {
continue
}
if disk.IsOnline() {
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
}
}
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaBucket, key)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
}
var index []byte
if opts.IndexCB != nil {
index = opts.IndexCB()
}
modTime := UTCNow()
for i, w := range writers {
if w == nil {
// Make sure to avoid writing to disks which we couldn't complete in erasure.Encode()
onlineDisks[i] = nil
continue
}
partsMetadata[i].Data = inlineBuffers[i].Bytes()
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(w),
})
}
// Fill all the necessary metadata.
// Update `xl.meta` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Size = n
partsMetadata[index].Fresh = true
partsMetadata[index].ModTime = modTime
partsMetadata[index].Metadata = opts.UserDefined
}
// Set an additional header when data is inlined.
for index := range partsMetadata {
partsMetadata[index].SetInlineData()
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
}
if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaBucket, key, partsMetadata, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key)
}
return fi.ToObjectInfo(minioMetaBucket, key, opts.Versioned || opts.VersionSuspended), nil
}
// PutObject - creates an object upon reading from the input stream
// until EOF, erasure codes the data across all disk and additionally
// writes `xl.meta` which carries the necessary metadata for future
// object operations.
func (es *erasureSingle) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
// Validate put object input args.
if err := checkPutObjectArgs(ctx, bucket, object, es); err != nil {
return ObjectInfo{}, err
}
object = encodeDirObject(object)
if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, es.disk), data.Size()) {
return ObjectInfo{}, toObjectErr(errDiskFull)
}
return es.putObject(ctx, bucket, object, data, opts)
}
// putObject wrapper for erasureObjects PutObject
func (es *erasureSingle) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
data := r.Reader
// No metadata is set, allocate a new one.
if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string)
}
storageDisks := []StorageAPI{es.disk}
parityDrives := 0
dataDrives := len(storageDisks) - parityDrives
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum++
}
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.Application)
return ObjectInfo{}, toObjectErr(errInvalidArgument)
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(storageDisks))
fi := newFileInfo(pathJoin(bucket, object), dataDrives, parityDrives)
fi.VersionID = opts.VersionID
if opts.Versioned && fi.VersionID == "" {
fi.VersionID = mustGetUUID()
}
fi.DataDir = mustGetUUID()
uniqueID := mustGetUUID()
tempObj := uniqueID
// Initialize erasure metadata.
for index := range partsMetadata {
partsMetadata[index] = fi
}
// Order disks according to erasure distribution
var onlineDisks []StorageAPI
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi)
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1:
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512)
} else {
buffer = es.bp.Get()
defer es.bp.Put(buffer)
}
case size >= fi.Erasure.BlockSize:
buffer = es.bp.Get()
defer es.bp.Put(buffer)
case size < fi.Erasure.BlockSize:
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
}
if len(buffer) > int(fi.Erasure.BlockSize) {
buffer = buffer[:fi.Erasure.BlockSize]
}
partName := "part.1"
tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName)
// Delete temporary object in the event of failure.
// If PutObject succeeded there would be no temporary
// object to delete.
var online int
defer func() {
if online != len(onlineDisks) {
es.disk.RenameFile(context.Background(), minioMetaTmpBucket, tempObj, minioMetaTmpDeletedBucket, mustGetUUID())
}
}()
shardFileSize := erasure.ShardFileSize(data.Size())
writers := make([]io.Writer, len(onlineDisks))
var inlineBuffers []*bytes.Buffer
if shardFileSize >= 0 {
if !opts.Versioned && shardFileSize < smallFileThreshold {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
} else if shardFileSize < smallFileThreshold/8 {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
}
} else {
// If compressed, use actual size to determine.
if sz := erasure.ShardFileSize(data.ActualSize()); sz > 0 {
if !opts.Versioned && sz < smallFileThreshold {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
} else if sz < smallFileThreshold/8 {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
}
}
}
for i, disk := range onlineDisks {
if disk == nil {
continue
}
if !disk.IsOnline() {
continue
}
if len(inlineBuffers) > 0 {
sz := shardFileSize
if sz < 0 {
sz = data.ActualSize()
}
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, sz))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize())
}
toEncode := io.Reader(data)
if data.Size() > bigFileThreshold {
// We use 2 buffers, so we always have a full buffer of input.
bufA := es.bp.Get()
bufB := es.bp.Get()
defer es.bp.Put(bufA)
defer es.bp.Put(bufB)
ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]})
if err == nil {
toEncode = ra
defer ra.Close()
}
logger.LogIf(ctx, err)
}
n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object}
}
if !opts.NoLock {
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
}
var index []byte
if opts.IndexCB != nil {
index = opts.IndexCB()
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
for i, w := range writers {
if w == nil {
onlineDisks[i] = nil
continue
}
if len(inlineBuffers) > 0 && inlineBuffers[i] != nil {
partsMetadata[i].Data = inlineBuffers[i].Bytes()
} else {
partsMetadata[i].Data = nil
}
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(w),
})
}
if opts.UserDefined["etag"] == "" {
opts.UserDefined["etag"] = r.MD5CurrentHexString()
}
// Guess content-type from the extension if possible.
if opts.UserDefined["content-type"] == "" {
opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object))
}
// Fill all the necessary metadata.
// Update `xl.meta` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Metadata = opts.UserDefined
partsMetadata[index].Size = n
partsMetadata[index].ModTime = modTime
}
if len(inlineBuffers) > 0 {
// Set an additional header when data is inlined.
for index := range partsMetadata {
partsMetadata[index].SetInlineData()
}
}
// Rename the successfully written temporary object to final location.
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil {
if errors.Is(err, errFileNotFound) {
return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object)
}
logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
}
fi.ReplicationState = opts.PutReplicationState()
online = countOnlineDisks(onlineDisks)
// we are adding a new version to this object under the namespace lock, so this is the latest version.
fi.IsLatest = true
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
func (es *erasureSingle) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo, forceDelMarker bool) error {
return es.disk.DeleteVersion(ctx, bucket, object, fi, forceDelMarker)
}
// DeleteObjects deletes objects/versions in bulk, this function will still automatically split objects list
// into smaller bulks if some object names are found to be duplicated in the delete list, splitting
// into smaller bulks will avoid holding twice the write lock of the duplicated object names.
func (es *erasureSingle) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
errs := make([]error, len(objects))
dobjects := make([]DeletedObject, len(objects))
objSets := set.NewStringSet()
for i := range errs {
objects[i].ObjectName = encodeDirObject(objects[i].ObjectName)
errs[i] = checkDelObjArgs(ctx, bucket, objects[i].ObjectName)
objSets.Add(objects[i].ObjectName)
}
// Acquire a bulk write lock across 'objects'
multiDeleteLock := es.NewNSLock(bucket, objSets.ToSlice()...)
lkctx, err := multiDeleteLock.GetLock(ctx, globalOperationTimeout)
if err != nil {
for i := range errs {
errs[i] = err
}
return dobjects, errs
}
ctx = lkctx.Context()
defer multiDeleteLock.Unlock(lkctx.Cancel)
writeQuorums := make([]int, len(objects))
storageDisks := []StorageAPI{es.disk}
for i := range objects {
// Single drive write quorum is '1'
writeQuorums[i] = 1
}
versionsMap := make(map[string]FileInfoVersions, len(objects))
for i := range objects {
// Construct the FileInfo data that needs to be preserved on the disk.
vr := FileInfo{
Name: objects[i].ObjectName,
VersionID: objects[i].VersionID,
ReplicationState: objects[i].ReplicationState(),
// save the index to set correct error at this index.
Idx: i,
}
vr.SetTierFreeVersionID(mustGetUUID())
// VersionID is not set means delete is not specific about
// any version, look for if the bucket is versioned or not.
if objects[i].VersionID == "" {
// MinIO extension to bucket version configuration
suspended := opts.VersionSuspended
versioned := opts.Versioned
if opts.PrefixEnabledFn != nil {
versioned = opts.PrefixEnabledFn(objects[i].ObjectName)
}
if versioned || suspended {
// Bucket is versioned and no version was explicitly
// mentioned for deletes, create a delete marker instead.
vr.ModTime = UTCNow()
vr.Deleted = true
// Versioning suspended means that we add a `null` version
// delete marker, if not add a new version for this delete
// marker.
if versioned {
vr.VersionID = mustGetUUID()
}
}
}
// De-dup same object name to collect multiple versions for same object.
v, ok := versionsMap[objects[i].ObjectName]
if ok {
v.Versions = append(v.Versions, vr)
} else {
v = FileInfoVersions{
Name: vr.Name,
Versions: []FileInfo{vr},
}
}
if vr.Deleted {
dobjects[i] = DeletedObject{
DeleteMarker: vr.Deleted,
DeleteMarkerVersionID: vr.VersionID,
DeleteMarkerMTime: DeleteMarkerMTime{vr.ModTime},
ObjectName: vr.Name,
ReplicationState: vr.ReplicationState,
}
} else {
dobjects[i] = DeletedObject{
ObjectName: vr.Name,
VersionID: vr.VersionID,
ReplicationState: vr.ReplicationState,
}
}
versionsMap[objects[i].ObjectName] = v
}
dedupVersions := make([]FileInfoVersions, 0, len(versionsMap))
for _, version := range versionsMap {
dedupVersions = append(dedupVersions, version)
}
// Initialize list of errors.
delObjErrs := make([][]error, len(storageDisks))
var wg sync.WaitGroup
// Remove versions in bulk for each disk
for index, disk := range storageDisks {
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
delObjErrs[index] = make([]error, len(objects))
if disk == nil {
for i := range objects {
delObjErrs[index][i] = errDiskNotFound
}
return
}
errs := disk.DeleteVersions(ctx, bucket, dedupVersions)
for i, err := range errs {
if err == nil {
continue
}
for _, v := range dedupVersions[i].Versions {
if err == errFileNotFound || err == errFileVersionNotFound {
if !dobjects[v.Idx].DeleteMarker {
// Not delete marker, if not found, ok.
continue
}
}
delObjErrs[index][v.Idx] = err
}
}
}(index, disk)
}
wg.Wait()
// Reduce errors for each object
for objIndex := range objects {
diskErrs := make([]error, len(storageDisks))
// Iterate over disks to fetch the error
// of deleting of the current object
for i := range delObjErrs {
// delObjErrs[i] is not nil when disks[i] is also not nil
if delObjErrs[i] != nil {
diskErrs[i] = delObjErrs[i][objIndex]
}
}
err := reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex])
if objects[objIndex].VersionID != "" {
errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName, objects[objIndex].VersionID)
} else {
errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName)
}
defer NSUpdated(bucket, objects[objIndex].ObjectName)
}
return dobjects, errs
}
func (es *erasureSingle) deletePrefix(ctx context.Context, bucket, prefix string) error {
dirPrefix := encodeDirObject(prefix)
defer es.disk.Delete(ctx, bucket, dirPrefix, DeleteOptions{
Recursive: true,
Force: true,
})
return es.disk.Delete(ctx, bucket, prefix, DeleteOptions{
Recursive: true,
Force: true,
})
}
// DeleteObject - deletes an object, this call doesn't necessary reply
// any error as it is not necessary for the handler to reply back a
// response to the client request.
func (es *erasureSingle) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if err = checkDelObjArgs(ctx, bucket, object); err != nil {
return objInfo, err
}
if opts.DeletePrefix {
return ObjectInfo{}, toObjectErr(es.deletePrefix(ctx, bucket, object), bucket, object)
}
object = encodeDirObject(object)
var lc *lifecycle.Lifecycle
var rcfg lock.Retention
if opts.Expiration.Expire {
// Check if the current bucket has a configured lifecycle policy
lc, _ = globalLifecycleSys.Get(bucket)
rcfg, _ = globalBucketObjectLockSys.Get(bucket)
}
// expiration attempted on a bucket with no lifecycle
// rules shall be rejected.
if lc == nil && opts.Expiration.Expire {
if opts.VersionID != "" {
return objInfo, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
return objInfo, ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
// Acquire a write lock before deleting the object.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
versionFound := true
objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response.
goi, writeQuorum, gerr := es.getObjectInfoAndQuorum(ctx, bucket, object, opts)
if gerr != nil && goi.Name == "" {
switch gerr.(type) {
case InsufficientReadQuorum:
return objInfo, InsufficientWriteQuorum{}
}
// For delete marker replication, versionID being replicated will not exist on disk
if opts.DeleteMarker {
versionFound = false
} else {
return objInfo, gerr
}
}
if opts.Expiration.Expire {
action := evalActionFromLifecycle(ctx, *lc, rcfg, goi, false)
var isErr bool
switch action {
case lifecycle.NoneAction:
isErr = true
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
isErr = true
}
if isErr {
if goi.VersionID != "" {
return goi, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: goi.VersionID,
}
}
return goi, ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
}
defer NSUpdated(bucket, object)
var markDelete bool
// Determine whether to mark object deleted for replication
if goi.VersionID != "" {
markDelete = true
}
// Default deleteMarker to true if object is under versioning
deleteMarker := opts.Versioned
if opts.VersionID != "" {
// case where replica version needs to be deleted on target cluster
if versionFound && opts.DeleteMarkerReplicationStatus() == replication.Replica {
markDelete = false
}
if opts.VersionPurgeStatus().Empty() && opts.DeleteMarkerReplicationStatus().Empty() {
markDelete = false
}
if opts.VersionPurgeStatus() == Complete {
markDelete = false
}
// Version is found but we do not wish to create more delete markers
// now, since VersionPurgeStatus() is already set, we can let the
// lower layers decide this. This fixes a regression that was introduced
// in PR #14555 where !VersionPurgeStatus.Empty() is automatically
// considered as Delete marker true to avoid listing such objects by
// regular ListObjects() calls. However for delete replication this
// ends up being a problem because "upon" a successful delete this
// ends up creating a new delete marker that is spurious and unnecessary.
if versionFound {
if !goi.VersionPurgeStatus.Empty() {
deleteMarker = false
} else if !goi.DeleteMarker { // implies a versioned delete of object
deleteMarker = false
}
}
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
fvID := mustGetUUID()
if markDelete {
if opts.Versioned || opts.VersionSuspended {
if !deleteMarker {
// versioning suspended means we add `null` version as
// delete marker, if its not decided already.
deleteMarker = opts.VersionSuspended && opts.VersionID == ""
}
fi := FileInfo{
Name: object,
Deleted: deleteMarker,
MarkDeleted: markDelete,
ModTime: modTime,
ReplicationState: opts.DeleteReplication,
TransitionStatus: opts.Transition.Status,
ExpireRestored: opts.Transition.ExpireRestored,
}
fi.SetTierFreeVersionID(fvID)
if opts.Versioned {
fi.VersionID = mustGetUUID()
if opts.VersionID != "" {
fi.VersionID = opts.VersionID
}
}
// versioning suspended means we add `null` version as
// delete marker. Add delete marker, since we don't have
// any version specified explicitly. Or if a particular
// version id needs to be replicated.
if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, opts.DeleteMarker); err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
}
// Delete the object version on all disks.
dfi := FileInfo{
Name: object,
VersionID: opts.VersionID,
MarkDeleted: markDelete,
Deleted: deleteMarker,
ModTime: modTime,
ReplicationState: opts.DeleteReplication,
TransitionStatus: opts.Transition.Status,
ExpireRestored: opts.Transition.ExpireRestored,
}
dfi.SetTierFreeVersionID(fvID)
if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, dfi, opts.DeleteMarker); err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
return ObjectInfo{
Bucket: bucket,
Name: object,
VersionID: opts.VersionID,
VersionPurgeStatusInternal: opts.DeleteReplication.VersionPurgeStatusInternal,
ReplicationStatusInternal: opts.DeleteReplication.ReplicationStatusInternal,
}, nil
}
func (es *erasureSingle) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
if !opts.NoLock {
// Lock the object before updating metadata.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
}
disks := []StorageAPI{es.disk}
var metaArr []FileInfo
var errs []error
// Read metadata associated with the object from all disks.
metaArr, errs = readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fi.Deleted {
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
}
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
// if version-id is not specified retention is supposed to be set on the latest object.
if opts.VersionID == "" {
opts.VersionID = fi.VersionID
}
objInfo := fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
if opts.EvalMetadataFn != nil {
if err := opts.EvalMetadataFn(objInfo); err != nil {
return ObjectInfo{}, err
}
}
for k, v := range objInfo.UserDefined {
fi.Metadata[k] = v
}
fi.ModTime = opts.MTime
fi.VersionID = opts.VersionID
if err = es.updateObjectMeta(ctx, bucket, object, fi, onlineDisks...); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
// PutObjectTags - replace or add tags to an existing object
func (es *erasureSingle) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
// Lock the object before updating tags.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
disks := []StorageAPI{es.disk}
var metaArr []FileInfo
var errs []error
// Read metadata associated with the object from all disks.
if opts.VersionID != "" {
metaArr, errs = readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
} else {
metaArr, errs = readAllXL(ctx, disks, bucket, object, false)
}
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return ObjectInfo{}, toObjectErr(errFileNotFound, bucket, object)
}
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
}
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
fi.Metadata[xhttp.AmzObjectTagging] = tags
fi.ReplicationState = opts.PutReplicationState()
for k, v := range opts.UserDefined {
fi.Metadata[k] = v
}
if err = es.updateObjectMeta(ctx, bucket, object, fi, onlineDisks...); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
// updateObjectMeta will update the metadata of a file.
func (es *erasureSingle) updateObjectMeta(ctx context.Context, bucket, object string, fi FileInfo, onlineDisks ...StorageAPI) error {
if len(fi.Metadata) == 0 {
return nil
}
g := errgroup.WithNErrs(len(onlineDisks))
// Start writing `xl.meta` to all disks in parallel.
for index := range onlineDisks {
index := index
g.Go(func() error {
if onlineDisks[index] == nil {
return errDiskNotFound
}
return onlineDisks[index].UpdateMetadata(ctx, bucket, object, fi)
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
return reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, 1)
}
// DeleteObjectTags - delete object tags from an existing object
func (es *erasureSingle) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
return es.PutObjectTags(ctx, bucket, object, "", opts)
}
// GetObjectTags - get object tags from an existing object
func (es *erasureSingle) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
// GetObjectInfo will return tag value as well
oi, err := es.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
return nil, err
}
return tags.ParseObjectTags(oi.UserTags)
}
// TransitionObject - transition object content to target tier.
func (es *erasureSingle) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
tgtClient, err := globalTierConfigMgr.getDriver(opts.Transition.Tier)
if err != nil {
return err
}
// Acquire write lock before starting to transition the object.
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
fi, metaArr, onlineDisks, err := es.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil {
return toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return toObjectErr(errMethodNotAllowed, bucket, object)
}
// verify that the object queued for transition is identical to that on disk.
if !opts.MTime.Equal(fi.ModTime) || !strings.EqualFold(opts.Transition.ETag, extractETag(fi.Metadata)) {
return toObjectErr(errFileNotFound, bucket, object)
}
// if object already transitioned, return
if fi.TransitionStatus == lifecycle.TransitionComplete {
return nil
}
defer NSUpdated(bucket, object)
destObj, err := genTransitionObjName(bucket)
if err != nil {
return err
}
pr, pw := xioutil.WaitPipe()
go func() {
err := es.getObjectWithFileInfo(ctx, bucket, object, 0, fi.Size, pw, fi, metaArr, onlineDisks)
pw.CloseWithError(err)
}()
var rv remoteVersionID
rv, err = tgtClient.Put(ctx, destObj, pr, fi.Size)
pr.CloseWithError(err)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to transition %s/%s(%s) to %s tier: %w", bucket, object, opts.VersionID, opts.Transition.Tier, err))
return err
}
fi.TransitionStatus = lifecycle.TransitionComplete
fi.TransitionedObjName = destObj
fi.TransitionTier = opts.Transition.Tier
fi.TransitionVersionID = string(rv)
eventName := event.ObjectTransitionComplete
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := fi.Erasure.DataBlocks
if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks {
writeQuorum++
}
if err = es.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, false); err != nil {
eventName = event.ObjectTransitionFailed
}
objInfo := fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [ILM-Transition]",
})
auditLogLifecycle(ctx, objInfo, ILMTransition)
return err
}
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
// This is similar to PostObjectRestore from AWS GLACIER
// storage class. When PostObjectRestore API is called, a temporary copy of the object
// is restored locally to the bucket on source cluster until the restore expiry date.
// The copy that was transitioned continues to reside in the transitioned tier.
func (es *erasureSingle) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
return es.restoreTransitionedObject(ctx, bucket, object, opts)
}
// update restore status header in the metadata
func (es *erasureSingle) updateRestoreMetadata(ctx context.Context, bucket, object string, objInfo ObjectInfo, opts ObjectOptions, rerr error) error {
oi := objInfo.Clone()
oi.metadataOnly = true // Perform only metadata updates.
if rerr == nil {
oi.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
} else { // allow retry in the case of failure to restore
delete(oi.UserDefined, xhttp.AmzRestore)
}
if _, err := es.CopyObject(ctx, bucket, object, bucket, object, oi, ObjectOptions{
VersionID: oi.VersionID,
}, ObjectOptions{
VersionID: oi.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err))
return err
}
return nil
}
// restoreTransitionedObject for multipart object chunks the file stream from remote tier into the same number of parts
// as in the xl.meta for this version and rehydrates the part.n into the fi.DataDir for this version as in the xl.meta
func (es *erasureSingle) restoreTransitionedObject(ctx context.Context, bucket string, object string, opts ObjectOptions) error {
setRestoreHeaderFn := func(oi ObjectInfo, rerr error) error {
es.updateRestoreMetadata(ctx, bucket, object, oi, opts, rerr)
return rerr
}
var oi ObjectInfo
// get the file info on disk for transitioned object
actualfi, _, _, err := es.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
oi = actualfi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended)
ropts := putRestoreOpts(bucket, object, opts.Transition.RestoreRequest, oi)
if len(oi.Parts) == 1 {
var rs *HTTPRangeSpec
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
defer gr.Close()
hashReader, err := hash.NewReader(gr, gr.ObjInfo.Size, "", "", gr.ObjInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
pReader := NewPutObjReader(hashReader)
ropts.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
_, err = es.PutObject(ctx, bucket, object, pReader, ropts)
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
uploadID, err := es.NewMultipartUpload(ctx, bucket, object, ropts)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
var uploadedParts []CompletePart
var rs *HTTPRangeSpec
// get reader from the warm backend - note that even in the case of encrypted objects, this stream is still encrypted.
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
defer gr.Close()
// rehydrate the parts back on disk as per the original xl.meta prior to transition
for _, partInfo := range oi.Parts {
hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
pInfo, err := es.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
if err != nil {
return setRestoreHeaderFn(oi, err)
}
if pInfo.Size != partInfo.Size {
return setRestoreHeaderFn(oi, InvalidObjectState{Bucket: bucket, Object: object})
}
uploadedParts = append(uploadedParts, CompletePart{
PartNumber: pInfo.PartNumber,
ETag: pInfo.ETag,
})
}
_, err = es.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{
MTime: oi.ModTime,
})
return setRestoreHeaderFn(oi, err)
}
func (es *erasureSingle) getUploadIDDir(bucket, object, uploadID string) string {
return pathJoin(es.getMultipartSHADir(bucket, object), uploadID)
}
func (es *erasureSingle) getMultipartSHADir(bucket, object string) string {
return getSHA256Hash([]byte(pathJoin(bucket, object)))
}
// checkUploadIDExists - verify if a given uploadID exists and is valid.
func (es *erasureSingle) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) (err error) {
defer func() {
if err == errFileNotFound {
err = errUploadIDNotFound
}
}()
disks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
metaArr, errs := readAllFileInfo(ctx, disks, minioMetaMultipartBucket, es.getUploadIDDir(bucket, object, uploadID), "", false)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, 0)
if err != nil {
return err
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return reducedErr
}
// List all online disks.
_, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
_, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
return err
}
// Removes part given by partName belonging to a mulitpart upload from minioMetaBucket
func (es *erasureSingle) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) {
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber))
storageDisks := []StorageAPI{es.disk}
g := errgroup.WithNErrs(len(storageDisks))
for index, disk := range storageDisks {
if disk == nil {
continue
}
index := index
g.Go(func() error {
// Ignoring failure to remove parts that weren't present in CompleteMultipartUpload
// requests. xl.meta is the authoritative source of truth on which parts constitute
// the object. The presence of parts that don't belong in the object doesn't affect correctness.
_ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath, DeleteOptions{
Recursive: false,
Force: false,
})
return nil
}, index)
}
g.Wait()
}
// Remove the old multipart uploads on the given disk.
func (es *erasureSingle) cleanupStaleUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) {
now := time.Now()
diskPath := disk.Endpoint().Path
readDirFn(pathJoin(diskPath, minioMetaMultipartBucket), func(shaDir string, typ os.FileMode) error {
return readDirFn(pathJoin(diskPath, minioMetaMultipartBucket, shaDir), func(uploadIDDir string, typ os.FileMode) error {
uploadIDPath := pathJoin(shaDir, uploadIDDir)
fi, err := disk.ReadVersion(ctx, minioMetaMultipartBucket, uploadIDPath, "", false)
if err != nil {
return nil
}
wait := es.deletedCleanupSleeper.Timer(ctx)
if now.Sub(fi.ModTime) > expiry {
es.disk.RenameFile(context.Background(), minioMetaMultipartBucket, uploadIDPath, minioMetaTmpDeletedBucket, mustGetUUID())
}
wait()
return nil
})
})
readDirFn(pathJoin(diskPath, minioMetaTmpBucket), func(tmpDir string, typ os.FileMode) error {
if tmpDir == ".trash/" { // do not remove .trash/ here, it has its own routines
return nil
}
vi, err := disk.StatVol(ctx, pathJoin(minioMetaTmpBucket, tmpDir))
if err != nil {
return nil
}
wait := es.deletedCleanupSleeper.Timer(ctx)
if now.Sub(vi.Created) > expiry {
disk.Delete(ctx, minioMetaTmpBucket, tmpDir, DeleteOptions{
Recursive: true,
Force: false,
})
}
wait()
return nil
})
}
// ListMultipartUploads - lists all the pending multipart
// uploads for a particular object in a bucket.
//
// Implements minimal S3 compatible ListMultipartUploads API. We do
// not support prefix based listing, this is a deliberate attempt
// towards simplification of multipart APIs.
// The resulting ListMultipartsInfo structure is unmarshalled directly as XML.
func (es *erasureSingle) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, es); err != nil {
return ListMultipartsInfo{}, err
}
result.MaxUploads = maxUploads
result.KeyMarker = keyMarker
result.Prefix = object
result.Delimiter = delimiter
uploadIDs, err := es.disk.ListDir(ctx, minioMetaMultipartBucket, es.getMultipartSHADir(bucket, object), -1)
if err != nil {
if err == errFileNotFound {
return result, nil
}
logger.LogIf(ctx, err)
return result, toObjectErr(err, bucket, object)
}
for i := range uploadIDs {
uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], SlashSeparator)
}
// S3 spec says uploadIDs should be sorted based on initiated time, we need
// to read the metadata entry.
var uploads []MultipartInfo
populatedUploadIds := set.NewStringSet()
for _, uploadID := range uploadIDs {
if populatedUploadIds.Contains(uploadID) {
continue
}
fi, err := es.disk.ReadVersion(ctx, minioMetaMultipartBucket, pathJoin(es.getUploadIDDir(bucket, object, uploadID)), "", false)
if err != nil {
return result, toObjectErr(err, bucket, object)
}
populatedUploadIds.Add(uploadID)
uploads = append(uploads, MultipartInfo{
Object: object,
UploadID: uploadID,
Initiated: fi.ModTime,
})
}
sort.Slice(uploads, func(i int, j int) bool {
return uploads[i].Initiated.Before(uploads[j].Initiated)
})
uploadIndex := 0
if uploadIDMarker != "" {
for uploadIndex < len(uploads) {
if uploads[uploadIndex].UploadID != uploadIDMarker {
uploadIndex++
continue
}
if uploads[uploadIndex].UploadID == uploadIDMarker {
uploadIndex++
break
}
uploadIndex++
}
}
for uploadIndex < len(uploads) {
result.Uploads = append(result.Uploads, uploads[uploadIndex])
result.NextUploadIDMarker = uploads[uploadIndex].UploadID
uploadIndex++
if len(result.Uploads) == maxUploads {
break
}
}
result.IsTruncated = uploadIndex < len(uploads)
if !result.IsTruncated {
result.NextKeyMarker = ""
result.NextUploadIDMarker = ""
}
return result, nil
}
// newMultipartUpload - wrapper for initializing a new multipart
// request; returns a unique upload id.
//
// Internally this function creates 'uploads.json' associated for the
// incoming object at
// '.minio.sys/multipart/bucket/object/uploads.json' on all the
// disks. `uploads.json` carries metadata regarding on-going multipart
// operation(s) on the object.
func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
onlineDisks := []StorageAPI{es.disk}
parityDrives := 0
dataDrives := len(onlineDisks) - parityDrives
// we now know the number of blocks this object needs for data and parity.
// establish the writeQuorum using this data
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum++
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(onlineDisks))
fi := newFileInfo(pathJoin(bucket, object), dataDrives, parityDrives)
fi.VersionID = opts.VersionID
if opts.Versioned && fi.VersionID == "" {
fi.VersionID = mustGetUUID()
}
fi.DataDir = mustGetUUID()
// Initialize erasure metadata.
for index := range partsMetadata {
partsMetadata[index] = fi
}
// Guess content-type from the extension if possible.
if opts.UserDefined["content-type"] == "" {
opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object))
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(onlineDisks, partsMetadata, fi)
// Fill all the necessary metadata.
// Update `xl.meta` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Fresh = true
partsMetadata[index].ModTime = modTime
partsMetadata[index].Metadata = opts.UserDefined
}
uploadID := mustGetUUID()
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
// Write updated `xl.meta` to all disks.
if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
// Return success.
return uploadID, nil
}
// NewMultipartUpload - initialize a new multipart upload, returns a
// unique id. The unique id returned here is of UUID form, for each
// subsequent request each UUID is unique.
//
// Implements S3 compatible initiate multipart API.
func (es *erasureSingle) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
if err := checkNewMultipartArgs(ctx, bucket, object, es); err != nil {
return "", err
}
// No metadata is set, allocate a new one.
if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string)
}
return es.newMultipartUpload(ctx, bucket, object, opts)
}
// CopyObjectPart - reads incoming stream and internally erasure codes
// them. This call is similar to put object part operation but the source
// data is read from an existing object.
//
// Implements S3 compatible Upload Part Copy API.
func (es *erasureSingle) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
partInfo, err := es.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts)
if err != nil {
return pi, toObjectErr(err, dstBucket, dstObject)
}
// Success.
return partInfo, nil
}
// PutObjectPart - reads incoming stream and internally erasure codes
// them. This call is similar to single put operation but it is part
// of the multipart transaction.
//
// Implements S3 compatible Upload Part API.
func (es *erasureSingle) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
if err := checkPutObjectPartArgs(ctx, bucket, object, es); err != nil {
return PartInfo{}, err
}
// Write lock for this part ID.
// Held throughout the operation.
partIDLock := es.NewNSLock(bucket, pathJoin(object, uploadID, strconv.Itoa(partID)))
plkctx, err := partIDLock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return PartInfo{}, err
}
pctx := plkctx.Context()
defer partIDLock.Unlock(plkctx.Cancel)
// Read lock for upload id.
// Only held while reading the upload metadata.
uploadIDRLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
rlkctx, err := uploadIDRLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return PartInfo{}, err
}
rctx := rlkctx.Context()
defer func() {
if uploadIDRLock != nil {
uploadIDRLock.RUnlock(rlkctx.Cancel)
}
}()
data := r.Reader
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(rctx, errInvalidArgument, logger.Application)
return pi, toObjectErr(errInvalidArgument)
}
var partsMetadata []FileInfo
var errs []error
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
// Validates if upload ID exists.
if err = es.checkUploadIDExists(rctx, bucket, object, uploadID); err != nil {
return pi, toObjectErr(err, bucket, object, uploadID)
}
storageDisks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
partsMetadata, errs = readAllFileInfo(rctx, storageDisks, minioMetaMultipartBucket,
uploadIDPath, "", false)
// Unlock upload id locks before, so others can get it.
uploadIDRLock.RUnlock(rlkctx.Cancel)
uploadIDRLock = nil
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(pctx, partsMetadata, errs, 0)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
reducedErr := reduceWriteQuorumErrs(pctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(pctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return pi, err
}
onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
// Need a unique name for the part being written in minioMetaBucket to
// accommodate concurrent PutObjectPart requests
partSuffix := fmt.Sprintf("part.%d", partID)
tmpPart := mustGetUUID()
tmpPartPath := pathJoin(tmpPart, partSuffix)
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
var online int
defer func() {
if online != len(onlineDisks) {
es.disk.RenameFile(context.Background(), minioMetaTmpBucket, tmpPart, minioMetaTmpDeletedBucket, mustGetUUID())
}
}()
erasure, err := NewErasure(pctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1:
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512)
} else {
buffer = es.bp.Get()
defer es.bp.Put(buffer)
}
case size >= fi.Erasure.BlockSize:
buffer = es.bp.Get()
defer es.bp.Put(buffer)
case size < fi.Erasure.BlockSize:
// No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smalles.
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
}
if len(buffer) > int(fi.Erasure.BlockSize) {
buffer = buffer[:fi.Erasure.BlockSize]
}
writers := make([]io.Writer, len(onlineDisks))
for i, disk := range onlineDisks {
if disk == nil {
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
}
toEncode := io.Reader(data)
if data.Size() > bigFileThreshold {
// Add input readahead.
// We use 2 buffers, so we always have a full buffer of input.
bufA := es.bp.Get()
bufB := es.bp.Get()
defer es.bp.Put(bufA)
defer es.bp.Put(bufB)
ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]})
if err == nil {
toEncode = ra
defer ra.Close()
}
}
n, err := erasure.Encode(pctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
return pi, IncompleteBody{Bucket: bucket, Object: object}
}
for i := range writers {
if writers[i] == nil {
onlineDisks[i] = nil
}
}
// Acquire write lock to update metadata.
uploadIDWLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
wlkctx, err := uploadIDWLock.GetLock(pctx, globalOperationTimeout)
if err != nil {
return PartInfo{}, err
}
wctx := wlkctx.Context()
defer uploadIDWLock.Unlock(wlkctx.Cancel)
// Validates if upload ID exists.
if err = es.checkUploadIDExists(wctx, bucket, object, uploadID); err != nil {
return pi, toObjectErr(err, bucket, object, uploadID)
}
// Rename temporary part file to its final location.
partPath := pathJoin(uploadIDPath, fi.DataDir, partSuffix)
onlineDisks, err = renamePart(wctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, writeQuorum)
if err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
// Read metadata again because it might be updated with parallel upload of another part.
partsMetadata, errs = readAllFileInfo(wctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
reducedErr = reduceWriteQuorumErrs(wctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object)
}
// Get current highest version based on re-read partsMetadata.
onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err = pickValidFileInfo(wctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return pi, err
}
// Once part is successfully committed, proceed with updating erasure metadata.
fi.ModTime = UTCNow()
md5hex := r.MD5CurrentHexString()
var index []byte
if opts.IndexCB != nil {
index = opts.IndexCB()
}
// Add the current part.
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), fi.ModTime, index)
for i, disk := range onlineDisks {
if disk == OfflineDisk {
continue
}
partsMetadata[i].Size = fi.Size
partsMetadata[i].ModTime = fi.ModTime
partsMetadata[i].Parts = fi.Parts
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: partID,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(writers[i]),
})
}
// Writes update `xl.meta` format for each disk.
if _, err = writeUniqueFileInfo(wctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
online = countOnlineDisks(onlineDisks)
// Return success.
return PartInfo{
PartNumber: partID,
ETag: md5hex,
LastModified: fi.ModTime,
Size: n,
ActualSize: data.ActualSize(),
}, nil
}
// GetMultipartInfo returns multipart metadata uploaded during newMultipartUpload, used
// by callers to verify object states
// - encrypted
// - compressed
func (es *erasureSingle) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
if err := checkListPartsArgs(ctx, bucket, object, es); err != nil {
return MultipartInfo{}, err
}
result := MultipartInfo{
Bucket: bucket,
Object: object,
UploadID: uploadID,
}
uploadIDLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return MultipartInfo{}, err
}
ctx = lkctx.Context()
defer uploadIDLock.RUnlock(lkctx.Cancel)
if err := es.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, toObjectErr(err, bucket, object, uploadID)
}
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
storageDisks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, opts.VersionID, false)
// get Quorum for this object
readQuorum, _, err := objectQuorumFromMeta(ctx, partsMetadata, errs, 0)
if err != nil {
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum)
if reducedErr == errErasureReadQuorum {
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
}
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum)
if err != nil {
return result, err
}
result.UserDefined = cloneMSS(fi.Metadata)
return result, nil
}
// ListObjectParts - lists all previously uploaded parts for a given
// object and uploadID. Takes additional input of part-number-marker
// to indicate where the listing should begin from.
//
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is marshaled directly into XML and
// replied back to the client.
func (es *erasureSingle) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
if err := checkListPartsArgs(ctx, bucket, object, es); err != nil {
return ListPartsInfo{}, err
}
uploadIDLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return ListPartsInfo{}, err
}
ctx = lkctx.Context()
defer uploadIDLock.RUnlock(lkctx.Cancel)
if err := es.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, toObjectErr(err, bucket, object, uploadID)
}
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
storageDisks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(ctx, partsMetadata, errs, 0)
if err != nil {
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
}
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return result, err
}
// Populate the result stub.
result.Bucket = bucket
result.Object = object
result.UploadID = uploadID
result.MaxParts = maxParts
result.PartNumberMarker = partNumberMarker
result.UserDefined = cloneMSS(fi.Metadata)
// For empty number of parts or maxParts as zero, return right here.
if len(fi.Parts) == 0 || maxParts == 0 {
return result, nil
}
// Limit output to maxPartsList.
if maxParts > maxPartsList {
maxParts = maxPartsList
}
// Only parts with higher part numbers will be listed.
partIdx := objectPartIndex(fi.Parts, partNumberMarker)
parts := fi.Parts
if partIdx != -1 {
parts = fi.Parts[partIdx+1:]
}
count := maxParts
for _, part := range parts {
result.Parts = append(result.Parts, PartInfo{
PartNumber: part.Number,
ETag: part.ETag,
LastModified: fi.ModTime,
Size: part.Size,
})
count--
if count == 0 {
break
}
}
// If listed entries are more than maxParts, we set IsTruncated as true.
if len(parts) > len(result.Parts) {
result.IsTruncated = true
// Make sure to fill next part number marker if IsTruncated is
// true for subsequent listing.
nextPartNumberMarker := result.Parts[len(result.Parts)-1].PartNumber
result.NextPartNumberMarker = nextPartNumberMarker
}
return result, nil
}
// CompleteMultipartUpload - completes an ongoing multipart
// transaction after receiving all the parts indicated by the client.
// Returns an md5sum calculated by concatenating all the individual
// md5sums of all the parts.
//
// Implements S3 compatible Complete multipart API.
func (es *erasureSingle) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) {
if err = checkCompleteMultipartArgs(ctx, bucket, object, es); err != nil {
return oi, err
}
// Hold read-locks to verify uploaded parts, also disallows
// parallel part uploads as well.
uploadIDLock := es.NewNSLock(bucket, pathJoin(object, uploadID))
rlkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return oi, err
}
rctx := rlkctx.Context()
defer uploadIDLock.RUnlock(rlkctx.Cancel)
if err = es.checkUploadIDExists(rctx, bucket, object, uploadID); err != nil {
return oi, toObjectErr(err, bucket, object, uploadID)
}
uploadIDPath := es.getUploadIDDir(bucket, object, uploadID)
storageDisks := []StorageAPI{es.disk}
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllFileInfo(rctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(rctx, partsMetadata, errs, 0)
if err != nil {
return oi, toObjectErr(err, bucket, object)
}
reducedErr := reduceWriteQuorumErrs(rctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return oi, toObjectErr(reducedErr, bucket, object)
}
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(rctx, partsMetadata, modTime, writeQuorum)
if err != nil {
return oi, err
}
// Calculate full object size.
var objectSize int64
// Calculate consolidated actual size.
var objectActualSize int64
// Order online disks in accordance with distribution order.
// Order parts metadata in accordance with distribution order.
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi)
// Save current erasure metadata for validation.
currentFI := fi
// Allocate parts similar to incoming slice.
fi.Parts = make([]ObjectPartInfo, len(parts))
// Validate each part and then commit to disk.
for i, part := range parts {
partIdx := objectPartIndex(currentFI.Parts, part.PartNumber)
// All parts should have same part number.
if partIdx == -1 {
invp := InvalidPart{
PartNumber: part.PartNumber,
GotETag: part.ETag,
}
return oi, invp
}
// ensure that part ETag is canonicalized to strip off extraneous quotes
part.ETag = canonicalizeETag(part.ETag)
if currentFI.Parts[partIdx].ETag != part.ETag {
invp := InvalidPart{
PartNumber: part.PartNumber,
ExpETag: currentFI.Parts[partIdx].ETag,
GotETag: part.ETag,
}
return oi, invp
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) {
return oi, PartTooSmall{
PartNumber: part.PartNumber,
PartSize: currentFI.Parts[partIdx].ActualSize,
PartETag: part.ETag,
}
}
// Save for total object size.
objectSize += currentFI.Parts[partIdx].Size
// Save the consolidated actual size.
objectActualSize += currentFI.Parts[partIdx].ActualSize
// Add incoming parts.
fi.Parts[i] = ObjectPartInfo{
Number: part.PartNumber,
Size: currentFI.Parts[partIdx].Size,
ActualSize: currentFI.Parts[partIdx].ActualSize,
Index: currentFI.Parts[partIdx].Index,
}
}
// Save the final object size and modtime.
fi.Size = objectSize
fi.ModTime = opts.MTime
if opts.MTime.IsZero() {
fi.ModTime = UTCNow()
}
// Save successfully calculated md5sum.
fi.Metadata["etag"] = opts.UserDefined["etag"]
if fi.Metadata["etag"] == "" {
fi.Metadata["etag"] = getCompleteMultipartMD5(parts)
}
// Save the consolidated actual size.
fi.Metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10)
// Update all erasure metadata, make sure to not modify fields like
// checksum which are different on each disks.
for index := range partsMetadata {
if partsMetadata[index].IsValid() {
partsMetadata[index].Size = fi.Size
partsMetadata[index].ModTime = fi.ModTime
partsMetadata[index].Metadata = fi.Metadata
partsMetadata[index].Parts = fi.Parts
}
}
// Hold namespace to complete the transaction
lk := es.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return oi, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
// Write final `xl.meta` at uploadID location
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum)
if err != nil {
return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
// Remove parts that weren't present in CompleteMultipartUpload request.
for _, curpart := range currentFI.Parts {
if objectPartIndex(fi.Parts, curpart.Number) == -1 {
// Delete the missing part files. e.g,
// Request 1: NewMultipart
// Request 2: PutObjectPart 1
// Request 3: PutObjectPart 2
// Request 4: CompleteMultipartUpload --part 2
// N.B. 1st part is not present. This part should be removed from the storage.
es.removeObjectPart(bucket, object, uploadID, fi.DataDir, curpart.Number)
}
}
// Rename the multipart object to final location.
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath,
partsMetadata, bucket, object, writeQuorum); err != nil {
return oi, toObjectErr(err, bucket, object)
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
}
// we are adding a new version to this object under the namespace lock, so this is the latest version.
fi.IsLatest = true
// Success, return object info.
return fi.ToObjectInfo(bucket, object, opts.Versioned || opts.VersionSuspended), nil
}
// AbortMultipartUpload - aborts an ongoing multipart operation
// signified by the input uploadID. This is an atomic operation
// doesn't require clients to initiate multiple such requests.
//
// All parts are purged from all disks and reference to the uploadID
// would be removed from the system, rollback is not possible on this
// operation.
func (es *erasureSingle) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (err error) {
if err = checkAbortMultipartArgs(ctx, bucket, object, es); err != nil {
return err
}
lk := es.NewNSLock(bucket, pathJoin(object, uploadID))
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
// Validates if upload ID exists.
if err := es.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return toObjectErr(err, bucket, object, uploadID)
}
// Cleanup all uploaded parts.
es.disk.RenameFile(ctx, minioMetaMultipartBucket, es.getUploadIDDir(bucket, object, uploadID), minioMetaTmpDeletedBucket, mustGetUUID())
// Successfully purged.
return nil
}
func (es *erasureSingle) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
var loi ListObjectsInfo
// Automatically remove the object/version is an expiry lifecycle rule can be applied
lc, _ := globalLifecycleSys.Get(bucket)
// Check if bucket is object locked.
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" {
// Optimization for certain applications like
// - Cohesity
// - Actifio, Splunk etc.
// which send ListObjects requests where the actual object
// itself is the prefix and max-keys=1 in such scenarios
// we can simply verify locally if such an object exists
// to avoid the need for ListObjects().
objInfo, err := es.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true})
if err == nil {
if lc != nil {
action := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo, false)
switch action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
fallthrough
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
return loi, nil
}
}
loi.Objects = append(loi.Objects, objInfo)
return loi, nil
}
}
opts := listPathOptions{
Bucket: bucket,
Prefix: prefix,
Separator: delimiter,
Limit: maxKeysPlusOne(maxKeys, marker != ""),
Marker: marker,
InclDeleted: false,
AskDisks: globalAPIConfig.getListQuorum(),
Lifecycle: lc,
Retention: rcfg,
}
merged, err := es.listPath(ctx, &opts)
if err != nil && err != io.EOF {
if !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
return loi, err
}
merged.forwardPast(opts.Marker)
defer merged.truncate(0) // Release when returning
// Default is recursive, if delimiter is set then list non recursive.
objects := merged.fileInfos(bucket, prefix, delimiter)
loi.IsTruncated = err == nil && len(objects) > 0
if maxKeys > 0 && len(objects) > maxKeys {
objects = objects[:maxKeys]
loi.IsTruncated = true
}
for _, obj := range objects {
if obj.IsDir && obj.ModTime.IsZero() && delimiter != "" {
loi.Prefixes = append(loi.Prefixes, obj.Name)
} else {
loi.Objects = append(loi.Objects, obj)
}
}
if loi.IsTruncated {
last := objects[len(objects)-1]
loi.NextMarker = opts.encodeMarker(last.Name)
}
return loi, nil
}
func (es *erasureSingle) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) {
marker := continuationToken
if marker == "" {
marker = startAfter
}
loi, err := es.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, err
}
listObjectsV2Info := ListObjectsV2Info{
IsTruncated: loi.IsTruncated,
ContinuationToken: continuationToken,
NextContinuationToken: loi.NextMarker,
Objects: loi.Objects,
Prefixes: loi.Prefixes,
}
return listObjectsV2Info, err
}
func (es *erasureSingle) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) {
loi := ListObjectVersionsInfo{}
if marker == "" && versionMarker != "" {
return loi, NotImplemented{}
}
opts := listPathOptions{
Bucket: bucket,
Prefix: prefix,
Separator: delimiter,
Limit: maxKeysPlusOne(maxKeys, marker != ""),
Marker: marker,
InclDeleted: true,
AskDisks: "strict",
Versioned: true,
}
merged, err := es.listPath(ctx, &opts)
if err != nil && err != io.EOF {
return loi, err
}
defer merged.truncate(0) // Release when returning
if versionMarker == "" {
o := listPathOptions{Marker: marker}
// If we are not looking for a specific version skip it.
o.parseMarker()
merged.forwardPast(o.Marker)
}
objects := merged.fileInfoVersions(bucket, prefix, delimiter, versionMarker)
loi.IsTruncated = err == nil && len(objects) > 0
if maxKeys > 0 && len(objects) > maxKeys {
objects = objects[:maxKeys]
loi.IsTruncated = true
}
for _, obj := range objects {
if obj.IsDir && obj.ModTime.IsZero() && delimiter != "" {
loi.Prefixes = append(loi.Prefixes, obj.Name)
} else {
loi.Objects = append(loi.Objects, obj)
}
}
if loi.IsTruncated {
last := objects[len(objects)-1]
loi.NextMarker = opts.encodeMarker(last.Name)
loi.NextVersionIDMarker = last.VersionID
}
return loi, nil
}
// Walk a bucket, optionally prefix recursively, until we have returned
// all the content to objectInfo channel, it is callers responsibility
// to allocate a receive channel for ObjectInfo, upon any unhandled
// error walker returns error. Optionally if context.Done() is received
// then Walk() stops the walker.
func (es *erasureSingle) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error {
if err := checkListObjsArgs(ctx, bucket, prefix, "", es); err != nil {
// Upon error close the channel.
close(results)
return err
}
ctx, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
defer close(results)
versioned := opts.Versioned || opts.VersionSuspended
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
loadEntry := func(entry metaCacheEntry) {
if entry.isDir() {
return
}
fivs, err := entry.fileInfoVersions(bucket)
if err != nil {
cancel()
return
}
if opts.WalkAscending {
for i := len(fivs.Versions) - 1; i >= 0; i-- {
version := fivs.Versions[i]
results <- version.ToObjectInfo(bucket, version.Name, versioned)
}
return
}
for _, version := range fivs.Versions {
results <- version.ToObjectInfo(bucket, version.Name, versioned)
}
}
// How to resolve partial results.
resolver := metadataResolutionParams{
dirQuorum: 1,
objQuorum: 1,
bucket: bucket,
}
path := baseDirFromPrefix(prefix)
filterPrefix := strings.Trim(strings.TrimPrefix(prefix, path), slashSeparator)
if path == prefix {
filterPrefix = ""
}
lopts := listPathRawOptions{
disks: []StorageAPI{es.disk},
bucket: bucket,
path: path,
filterPrefix: filterPrefix,
recursive: true,
forwardTo: "",
minDisks: 1,
reportNotFound: false,
agreed: loadEntry,
partial: func(entries metaCacheEntries, _ []error) {
entry, ok := entries.resolve(&resolver)
if !ok {
// check if we can get one entry atleast
// proceed to heal nonetheless.
entry, _ = entries.firstFound()
}
loadEntry(*entry)
},
finished: nil,
}
if err := listPathRaw(ctx, lopts); err != nil {
logger.LogIf(ctx, fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts))
return
}
}()
wg.Wait()
}()
return nil
}
// Health - returns current status of the object layer health, for single drive
// its as simple as returning healthy as long as drive is accessible.
func (es *erasureSingle) Health(ctx context.Context, opts HealthOptions) HealthResult {
_, err := es.disk.DiskInfo(ctx)
if err != nil {
return HealthResult{}
}
if opts.Maintenance {
// Single drive cannot be put under maintenance.
return HealthResult{
Healthy: false,
WriteQuorum: 1,
}
}
return HealthResult{
Healthy: true,
WriteQuorum: 1,
}
}
// ReadHealth - returns current status of the object layer health for reads,
// for single drive its as simple as returning healthy as long as drive is accessible.
func (es *erasureSingle) ReadHealth(ctx context.Context) bool {
res := es.Health(ctx, HealthOptions{})
return res.Healthy
}
// nsScanner will start scanning buckets and send updated totals as they are traversed.
// Updates are sent on a regular basis and the caller *must* consume them.
func (es *erasureSingle) nsScanner(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, wantCycle uint32, updates chan<- dataUsageCache, healScanMode madmin.HealScanMode) error {
if len(buckets) == 0 {
return nil
}
// Collect disks we can use.
disks := []StorageAPI{es.disk}
// Load bucket totals
oldCache := dataUsageCache{}
if err := oldCache.load(ctx, es, dataUsageCacheName); err != nil {
return err
}
// New cache..
cache := dataUsageCache{
Info: dataUsageCacheInfo{
Name: dataUsageRoot,
NextCycle: oldCache.Info.NextCycle,
},
Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)),
}
bloom := bf.bytes()
// Put all buckets into channel.
bucketCh := make(chan BucketInfo, len(buckets))
// Add new buckets first
for _, b := range buckets {
if oldCache.find(b.Name) == nil {
bucketCh <- b
}
}
// Add existing buckets.
for _, b := range buckets {
e := oldCache.find(b.Name)
if e != nil {
cache.replace(b.Name, dataUsageRoot, *e)
bucketCh <- b
}
}
close(bucketCh)
bucketResults := make(chan dataUsageEntryInfo, len(disks))
// Start async collector/saver.
// This goroutine owns the cache.
var saverWg sync.WaitGroup
saverWg.Add(1)
go func() {
// Add jitter to the update time so multiple sets don't sync up.
updateTime := 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64())
t := time.NewTicker(updateTime)
defer t.Stop()
defer saverWg.Done()
var lastSave time.Time
for {
select {
case <-ctx.Done():
// Return without saving.
return
case <-t.C:
if cache.Info.LastUpdate.Equal(lastSave) {
continue
}
logger.LogIf(ctx, cache.save(ctx, es, dataUsageCacheName))
updates <- cache.clone()
lastSave = cache.Info.LastUpdate
case v, ok := <-bucketResults:
if !ok {
// Save final state...
cache.Info.NextCycle = wantCycle
cache.Info.LastUpdate = time.Now()
logger.LogIf(ctx, cache.save(ctx, es, dataUsageCacheName))
updates <- cache
return
}
cache.replace(v.Name, v.Parent, v.Entry)
cache.Info.LastUpdate = time.Now()
}
}
}()
// Shuffle disks to ensure a total randomness of bucket/disk association to ensure
// that objects that are not present in all disks are accounted and ILM applied.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
r.Shuffle(len(disks), func(i, j int) { disks[i], disks[j] = disks[j], disks[i] })
// Start one scanner per disk
var wg sync.WaitGroup
wg.Add(len(disks))
for i := range disks {
go func(i int) {
defer wg.Done()
disk := disks[i]
for bucket := range bucketCh {
select {
case <-ctx.Done():
return
default:
}
// Load cache for bucket
cacheName := pathJoin(bucket.Name, dataUsageCacheName)
cache := dataUsageCache{}
logger.LogIf(ctx, cache.load(ctx, es, cacheName))
if cache.Info.Name == "" {
cache.Info.Name = bucket.Name
}
cache.Info.BloomFilter = bloom
cache.Info.SkipHealing = true
cache.Info.NextCycle = wantCycle
if cache.Info.Name != bucket.Name {
logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name))
cache.Info = dataUsageCacheInfo{
Name: bucket.Name,
LastUpdate: time.Time{},
NextCycle: wantCycle,
}
}
// Collect updates.
updates := make(chan dataUsageEntry, 1)
var wg sync.WaitGroup
wg.Add(1)
go func(name string) {
defer wg.Done()
for update := range updates {
bucketResults <- dataUsageEntryInfo{
Name: name,
Parent: dataUsageRoot,
Entry: update,
}
}
}(cache.Info.Name)
// Calc usage
before := cache.Info.LastUpdate
var err error
cache, err = disk.NSScanner(ctx, cache, updates, healScanMode)
cache.Info.BloomFilter = nil
if err != nil {
if !cache.Info.LastUpdate.IsZero() && cache.Info.LastUpdate.After(before) {
logger.LogIf(ctx, cache.save(ctx, es, cacheName))
} else {
logger.LogIf(ctx, err)
}
// This ensures that we don't close
// bucketResults channel while the
// updates-collector goroutine still
// holds a reference to this.
wg.Wait()
continue
}
wg.Wait()
var root dataUsageEntry
if r := cache.root(); r != nil {
root = cache.flatten(*r)
}
t := time.Now()
bucketResults <- dataUsageEntryInfo{
Name: cache.Info.Name,
Parent: dataUsageRoot,
Entry: root,
}
// We want to avoid synchronizing up all writes in case
// the results are piled up.
time.Sleep(time.Duration(float64(time.Since(t)) * rand.Float64()))
// Save cache
logger.LogIf(ctx, cache.save(ctx, es, cacheName))
}
}(i)
}
wg.Wait()
close(bucketResults)
saverWg.Wait()
return nil
}
func (es *erasureSingle) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32, healScanMode madmin.HealScanMode) error {
// Updates must be closed before we return.
defer close(updates)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var wg sync.WaitGroup
var mu sync.Mutex
results := make([]dataUsageCache, 1)
var firstErr error
allBuckets, err := es.ListBuckets(ctx, BucketOptions{})
if err != nil {
return err
}
if len(allBuckets) == 0 {
updates <- DataUsageInfo{} // no buckets found update data usage to reflect latest state
return nil
}
// Scanner latest allBuckets first.
sort.Slice(allBuckets, func(i, j int) bool {
return allBuckets[i].Created.After(allBuckets[j].Created)
})
wg.Add(1)
go func() {
updates := make(chan dataUsageCache, 1)
defer close(updates)
// Start update collector.
go func() {
defer wg.Done()
for info := range updates {
mu.Lock()
results[0] = info
mu.Unlock()
}
}()
// Start scanner. Blocks until done.
err := es.nsScanner(ctx, allBuckets, bf, wantCycle, updates, healScanMode)
if err != nil {
logger.LogIf(ctx, err)
mu.Lock()
if firstErr == nil {
firstErr = err
}
// Cancel remaining...
cancel()
mu.Unlock()
return
}
}()
updateCloser := make(chan chan struct{})
go func() {
updateTicker := time.NewTicker(30 * time.Second)
defer updateTicker.Stop()
var lastUpdate time.Time
// We need to merge since we will get the same buckets from each pool.
// Therefore to get the exact bucket sizes we must merge before we can convert.
var allMerged dataUsageCache
update := func() {
mu.Lock()
defer mu.Unlock()
allMerged = dataUsageCache{Info: dataUsageCacheInfo{Name: dataUsageRoot}}
for _, info := range results {
if info.Info.LastUpdate.IsZero() {
// Not filled yet.
return
}
allMerged.merge(info)
}
if allMerged.root() != nil && allMerged.Info.LastUpdate.After(lastUpdate) {
updates <- allMerged.dui(allMerged.Info.Name, allBuckets)
lastUpdate = allMerged.Info.LastUpdate
}
}
for {
select {
case <-ctx.Done():
return
case v := <-updateCloser:
update()
close(v)
return
case <-updateTicker.C:
update()
}
}
}()
wg.Wait()
ch := make(chan struct{})
select {
case updateCloser <- ch:
<-ch
case <-ctx.Done():
if firstErr == nil {
firstErr = ctx.Err()
}
}
return firstErr
}
// GetRawData will return all files with a given raw path to the callback.
// Errors are ignored, only errors from the callback are returned.
// For now only direct file paths are supported.
func (es *erasureSingle) GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error {
found := 0
stats, err := es.disk.StatInfoFile(ctx, volume, file, true)
if err != nil {
return err
}
for _, si := range stats {
found++
var r io.ReadCloser
if !si.Dir {
r, err = es.disk.ReadFileStream(ctx, volume, si.Name, 0, si.Size)
if err != nil {
continue
}
} else {
r = io.NopCloser(bytes.NewBuffer([]byte{}))
}
// Keep disk path instead of ID, to ensure that the downloaded zip file can be
// easily automated with `minio server hostname{1...n}/disk{1...m}`.
err = fn(r, es.disk.Hostname(), es.disk.Endpoint().Path, pathJoin(volume, si.Name), si)
r.Close()
if err != nil {
return err
}
}
if found == 0 {
return errFileNotFound
}
return nil
}
|
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package commands
import (
"fmt"
"strings"
"github.com/juju/bundlechanges"
"github.com/juju/errors"
"gopkg.in/juju/charm.v6-unstable"
"gopkg.in/yaml.v1"
"github.com/juju/juju/api"
"github.com/juju/juju/constraints"
"github.com/juju/juju/environs/config"
)
// deploymentLogger is used to notify clients about the bundle deployment
// progress.
type deploymentLogger interface {
// Infof formats and logs the given message.
Infof(string, ...interface{})
}
// deployBundle deploys the given bundle data using the given API client and
// charm store client. The deployment is not transactional, and its progress is
// notified using the given deployment logger.
func deployBundle(data *charm.BundleData, client *api.Client, csclient *csClient, repoPath string, conf *config.Config, log deploymentLogger) error {
if err := data.Verify(func(s string) error {
_, err := constraints.Parse(s)
return err
}); err != nil {
return errors.Annotate(err, "cannot deploy bundle")
}
// Retrieve bundle changes.
changes := bundlechanges.FromData(data)
h := &bundleHandler{
changes: make(map[string]bundlechanges.Change, len(changes)),
results: make(map[string]string, len(changes)),
client: client,
csclient: csclient,
repoPath: repoPath,
conf: conf,
log: log,
data: data,
}
for _, change := range changes {
h.changes[change.Id()] = change
}
// Deploy the bundle.
var err error
for _, change := range changes {
switch change := change.(type) {
case *bundlechanges.AddCharmChange:
err = h.addCharm(change.Id(), change.Params)
case *bundlechanges.AddMachineChange:
err = h.addMachine(change.Id(), change.Params)
case *bundlechanges.AddRelationChange:
err = h.addRelation(change.Id(), change.Params)
case *bundlechanges.AddServiceChange:
err = h.addService(change.Id(), change.Params)
case *bundlechanges.AddUnitChange:
err = h.addUnit(change.Id(), change.Params)
case *bundlechanges.SetAnnotationsChange:
err = h.setAnnotations(change.Id(), change.Params)
default:
return errors.Errorf("unknown change type: %T", change)
}
if err != nil {
return errors.Annotate(err, "cannot deploy bundle")
}
}
return nil
}
type bundleHandler struct {
changes map[string]bundlechanges.Change
results map[string]string
client *api.Client
csclient *csClient
repoPath string
conf *config.Config
log deploymentLogger
data *charm.BundleData
}
// addCharm adds a charm to the environment.
func (h *bundleHandler) addCharm(id string, p bundlechanges.AddCharmParams) error {
url, repo, err := resolveCharmStoreEntityURL(p.Charm, h.csclient.params, h.repoPath, h.conf)
if err != nil {
return errors.Annotatef(err, "cannot resolve URL %q", p.Charm)
}
if url.Series == "bundle" {
return errors.Errorf("expected charm URL, got bundle URL %q", p.Charm)
}
url, err = addCharmViaAPI(h.client, url, repo, h.csclient)
if err != nil {
return errors.Annotatef(err, "cannot add charm %q", p.Charm)
}
h.log.Infof("added charm %s", url)
// TODO frankban: the key here should really be the change id, but in the
// current bundlechanges format the charm name is included in the service
// change, not a placeholder pointing to the corresponding charm change, as
// it should be instead.
h.results["resolved-"+p.Charm] = url.String()
return nil
}
// addService deploys or update a service with no units. Service options are
// also set or updated.
func (h *bundleHandler) addService(id string, p bundlechanges.AddServiceParams) error {
// TODO frankban: the charm should really be resolved using
// resolve(p.Charm, h.results) at this point: see TODO in addCharm.
ch := h.results["resolved-"+p.Charm]
// TODO frankban: handle service constraints in the bundle changes.
numUnits, configYAML, cons, toMachineSpec := 0, "", constraints.Value{}, ""
if err := h.client.ServiceDeploy(ch, p.Service, numUnits, configYAML, cons, toMachineSpec); err == nil {
h.log.Infof("service %s deployed (charm: %s)", p.Service, ch)
// TODO frankban (bug 1495952): do this check using the cause rather
// than the string when a specific cause is available.
} else if strings.HasSuffix(err.Error(), "service already exists") {
// The service is already deployed in the environment: check that its
// charm is compatible with the one declared in the bundle. If it is,
// reuse the existing service or upgrade to a specified revision.
// Exit with an error otherwise.
if err := upgradeCharm(h.client, h.log, p.Service, ch); err != nil {
return errors.Annotatef(err, "cannot upgrade service %q", p.Service)
}
} else {
return errors.Annotatef(err, "cannot deploy service %q", p.Service)
}
if len(p.Options) > 0 {
if err := setServiceOptions(h.client, p.Service, p.Options); err != nil {
return errors.Trace(err)
}
h.log.Infof("service %s configured", p.Service)
}
h.results[id] = p.Service
return nil
}
// addMachine creates a new top-level machine or container in the environment.
func (h *bundleHandler) addMachine(id string, p bundlechanges.AddMachineParams) error {
// TODO frankban: implement this method.
return nil
}
// addRelation creates a relationship between two services.
func (h *bundleHandler) addRelation(id string, p bundlechanges.AddRelationParams) error {
ep1 := resolveRelation(p.Endpoint1, h.results)
ep2 := resolveRelation(p.Endpoint2, h.results)
_, err := h.client.AddRelation(ep1, ep2)
if err == nil {
// A new relation has been established.
h.log.Infof("related %s and %s", ep1, ep2)
return nil
}
// TODO frankban (bug 1495952): do this check using the cause rather than
// the string when a specific cause is available.
if strings.HasSuffix(err.Error(), "relation already exists") {
// The relation is already present in the environment.
h.log.Infof("%s and %s are already related", ep1, ep2)
return nil
}
return errors.Annotatef(err, "cannot add relation between %q and %q", ep1, ep2)
}
// addUnit adds a single unit to a service already present in the environment.
func (h *bundleHandler) addUnit(id string, p bundlechanges.AddUnitParams) error {
// TODO frankban: implement this method.
return nil
}
// setAnnotations sets annotations for a service or a machine.
func (h *bundleHandler) setAnnotations(id string, p bundlechanges.SetAnnotationsParams) error {
// TODO frankban: implement this method.
return nil
}
// upgradeCharm upgrades the charm for the given service to the given charm id.
// If the service is already deployed using the given charm id, do nothing.
// This function returns an error if the existing charm and the target one are
// incompatible, meaning an upgrade from one to the other is not allowed.
func upgradeCharm(client *api.Client, log deploymentLogger, service, id string) error {
existing, err := client.ServiceGetCharmURL(service)
if err != nil {
return errors.Annotatef(err, "cannot retrieve info for service %q", service)
}
if existing.String() == id {
log.Infof("reusing service %s (charm: %s)", service, id)
return nil
}
url, err := charm.ParseURL(id)
if err != nil {
return errors.Annotatef(err, "cannot parse charm URL %q", id)
}
if url.WithRevision(-1).Path() != existing.WithRevision(-1).Path() {
return errors.Errorf("bundle charm %q is incompatible with existing charm %q", id, existing)
}
if err := client.ServiceSetCharm(service, id, false); err != nil {
return errors.Annotatef(err, "cannot upgrade charm to %q", id)
}
log.Infof("upgraded charm for existing service %s (from %s to %s)", service, existing, id)
return nil
}
// setServiceOptions changes the configuration for the given service.
func setServiceOptions(client *api.Client, service string, options map[string]interface{}) error {
config, err := yaml.Marshal(map[string]map[string]interface{}{service: options})
if err != nil {
return errors.Annotatef(err, "cannot marshal options for service %q", service)
}
if err := client.ServiceSetYAML(service, string(config)); err != nil {
return errors.Annotatef(err, "cannot set options for service %q", service)
}
return nil
}
// resolve returns the real entity name for the bundle entity (for instance a
// service or a machine) with the given placeholder id.
// A placeholder id is a string like "$deploy-42" or "$addCharm-2", indicating
// the results of a previously applied change. It always starts with a dollar
// sign, followed by the identifier of the referred change. A change id is a
// string indicating the action type ("deploy", "addRelation" etc.), followed
// by a unique incremental number.
func resolve(placeholder string, results map[string]string) string {
if !strings.HasPrefix(placeholder, "$") {
panic(`placeholder does not start with "$"`)
}
id := placeholder[1:]
return results[id]
}
// resolveRelation returns the relation name resolving the included service
// placeholder.
func resolveRelation(e string, results map[string]string) string {
parts := strings.SplitN(e, ":", 2)
service := resolve(parts[0], results)
if len(parts) == 1 {
return service
}
return fmt.Sprintf("%s:%s", service, parts[1])
}
Bundle deployment: introduce isErrServiceExists and isErrRelationExists.
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package commands
import (
"fmt"
"strings"
"github.com/juju/bundlechanges"
"github.com/juju/errors"
"gopkg.in/juju/charm.v6-unstable"
"gopkg.in/yaml.v1"
"github.com/juju/juju/api"
"github.com/juju/juju/constraints"
"github.com/juju/juju/environs/config"
)
// deploymentLogger is used to notify clients about the bundle deployment
// progress.
type deploymentLogger interface {
// Infof formats and logs the given message.
Infof(string, ...interface{})
}
// deployBundle deploys the given bundle data using the given API client and
// charm store client. The deployment is not transactional, and its progress is
// notified using the given deployment logger.
func deployBundle(data *charm.BundleData, client *api.Client, csclient *csClient, repoPath string, conf *config.Config, log deploymentLogger) error {
if err := data.Verify(func(s string) error {
_, err := constraints.Parse(s)
return err
}); err != nil {
return errors.Annotate(err, "cannot deploy bundle")
}
// Retrieve bundle changes.
changes := bundlechanges.FromData(data)
h := &bundleHandler{
changes: make(map[string]bundlechanges.Change, len(changes)),
results: make(map[string]string, len(changes)),
client: client,
csclient: csclient,
repoPath: repoPath,
conf: conf,
log: log,
data: data,
}
for _, change := range changes {
h.changes[change.Id()] = change
}
// Deploy the bundle.
var err error
for _, change := range changes {
switch change := change.(type) {
case *bundlechanges.AddCharmChange:
err = h.addCharm(change.Id(), change.Params)
case *bundlechanges.AddMachineChange:
err = h.addMachine(change.Id(), change.Params)
case *bundlechanges.AddRelationChange:
err = h.addRelation(change.Id(), change.Params)
case *bundlechanges.AddServiceChange:
err = h.addService(change.Id(), change.Params)
case *bundlechanges.AddUnitChange:
err = h.addUnit(change.Id(), change.Params)
case *bundlechanges.SetAnnotationsChange:
err = h.setAnnotations(change.Id(), change.Params)
default:
return errors.Errorf("unknown change type: %T", change)
}
if err != nil {
return errors.Annotate(err, "cannot deploy bundle")
}
}
return nil
}
type bundleHandler struct {
changes map[string]bundlechanges.Change
results map[string]string
client *api.Client
csclient *csClient
repoPath string
conf *config.Config
log deploymentLogger
data *charm.BundleData
}
// addCharm adds a charm to the environment.
func (h *bundleHandler) addCharm(id string, p bundlechanges.AddCharmParams) error {
url, repo, err := resolveCharmStoreEntityURL(p.Charm, h.csclient.params, h.repoPath, h.conf)
if err != nil {
return errors.Annotatef(err, "cannot resolve URL %q", p.Charm)
}
if url.Series == "bundle" {
return errors.Errorf("expected charm URL, got bundle URL %q", p.Charm)
}
url, err = addCharmViaAPI(h.client, url, repo, h.csclient)
if err != nil {
return errors.Annotatef(err, "cannot add charm %q", p.Charm)
}
h.log.Infof("added charm %s", url)
// TODO frankban: the key here should really be the change id, but in the
// current bundlechanges format the charm name is included in the service
// change, not a placeholder pointing to the corresponding charm change, as
// it should be instead.
h.results["resolved-"+p.Charm] = url.String()
return nil
}
// addService deploys or update a service with no units. Service options are
// also set or updated.
func (h *bundleHandler) addService(id string, p bundlechanges.AddServiceParams) error {
// TODO frankban: the charm should really be resolved using
// resolve(p.Charm, h.results) at this point: see TODO in addCharm.
ch := h.results["resolved-"+p.Charm]
// TODO frankban: handle service constraints in the bundle changes.
numUnits, configYAML, cons, toMachineSpec := 0, "", constraints.Value{}, ""
if err := h.client.ServiceDeploy(ch, p.Service, numUnits, configYAML, cons, toMachineSpec); err == nil {
h.log.Infof("service %s deployed (charm: %s)", p.Service, ch)
} else if isErrServiceExists(err) {
// The service is already deployed in the environment: check that its
// charm is compatible with the one declared in the bundle. If it is,
// reuse the existing service or upgrade to a specified revision.
// Exit with an error otherwise.
if err := upgradeCharm(h.client, h.log, p.Service, ch); err != nil {
return errors.Annotatef(err, "cannot upgrade service %q", p.Service)
}
} else {
return errors.Annotatef(err, "cannot deploy service %q", p.Service)
}
if len(p.Options) > 0 {
if err := setServiceOptions(h.client, p.Service, p.Options); err != nil {
return errors.Trace(err)
}
h.log.Infof("service %s configured", p.Service)
}
h.results[id] = p.Service
return nil
}
// addMachine creates a new top-level machine or container in the environment.
func (h *bundleHandler) addMachine(id string, p bundlechanges.AddMachineParams) error {
// TODO frankban: implement this method.
return nil
}
// addRelation creates a relationship between two services.
func (h *bundleHandler) addRelation(id string, p bundlechanges.AddRelationParams) error {
ep1 := resolveRelation(p.Endpoint1, h.results)
ep2 := resolveRelation(p.Endpoint2, h.results)
_, err := h.client.AddRelation(ep1, ep2)
if err == nil {
// A new relation has been established.
h.log.Infof("related %s and %s", ep1, ep2)
return nil
}
if isErrRelationExists(err) {
// The relation is already present in the environment.
h.log.Infof("%s and %s are already related", ep1, ep2)
return nil
}
return errors.Annotatef(err, "cannot add relation between %q and %q", ep1, ep2)
}
// addUnit adds a single unit to a service already present in the environment.
func (h *bundleHandler) addUnit(id string, p bundlechanges.AddUnitParams) error {
// TODO frankban: implement this method.
return nil
}
// setAnnotations sets annotations for a service or a machine.
func (h *bundleHandler) setAnnotations(id string, p bundlechanges.SetAnnotationsParams) error {
// TODO frankban: implement this method.
return nil
}
// upgradeCharm upgrades the charm for the given service to the given charm id.
// If the service is already deployed using the given charm id, do nothing.
// This function returns an error if the existing charm and the target one are
// incompatible, meaning an upgrade from one to the other is not allowed.
func upgradeCharm(client *api.Client, log deploymentLogger, service, id string) error {
existing, err := client.ServiceGetCharmURL(service)
if err != nil {
return errors.Annotatef(err, "cannot retrieve info for service %q", service)
}
if existing.String() == id {
log.Infof("reusing service %s (charm: %s)", service, id)
return nil
}
url, err := charm.ParseURL(id)
if err != nil {
return errors.Annotatef(err, "cannot parse charm URL %q", id)
}
if url.WithRevision(-1).Path() != existing.WithRevision(-1).Path() {
return errors.Errorf("bundle charm %q is incompatible with existing charm %q", id, existing)
}
if err := client.ServiceSetCharm(service, id, false); err != nil {
return errors.Annotatef(err, "cannot upgrade charm to %q", id)
}
log.Infof("upgraded charm for existing service %s (from %s to %s)", service, existing, id)
return nil
}
// setServiceOptions changes the configuration for the given service.
func setServiceOptions(client *api.Client, service string, options map[string]interface{}) error {
config, err := yaml.Marshal(map[string]map[string]interface{}{service: options})
if err != nil {
return errors.Annotatef(err, "cannot marshal options for service %q", service)
}
if err := client.ServiceSetYAML(service, string(config)); err != nil {
return errors.Annotatef(err, "cannot set options for service %q", service)
}
return nil
}
// resolve returns the real entity name for the bundle entity (for instance a
// service or a machine) with the given placeholder id.
// A placeholder id is a string like "$deploy-42" or "$addCharm-2", indicating
// the results of a previously applied change. It always starts with a dollar
// sign, followed by the identifier of the referred change. A change id is a
// string indicating the action type ("deploy", "addRelation" etc.), followed
// by a unique incremental number.
func resolve(placeholder string, results map[string]string) string {
if !strings.HasPrefix(placeholder, "$") {
panic(`placeholder does not start with "$"`)
}
id := placeholder[1:]
return results[id]
}
// resolveRelation returns the relation name resolving the included service
// placeholder.
func resolveRelation(e string, results map[string]string) string {
parts := strings.SplitN(e, ":", 2)
service := resolve(parts[0], results)
if len(parts) == 1 {
return service
}
return fmt.Sprintf("%s:%s", service, parts[1])
}
// isErrServiceExists reports whether the given error has been generated
// from trying to deploy a service that already exists.
func isErrServiceExists(err error) bool {
// TODO frankban (bug 1495952): do this check using the cause rather than
// the string when a specific cause is available.
return strings.HasSuffix(err.Error(), "service already exists")
}
// isErrRelationExists reports whether the given error has been generated
// from trying to create an already established relation.
func isErrRelationExists(err error) bool {
// TODO frankban (bug 1495952): do this check using the cause rather than
// the string when a specific cause is available.
return strings.HasSuffix(err.Error(), "relation already exists")
}
|
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"net/http/pprof"
"os"
"path/filepath"
"strings"
"time"
"github.com/google/zoekt/build"
"github.com/google/zoekt/shards"
"github.com/google/zoekt/web"
)
const logFormat = "2006-01-02T15-04-05.999999999Z07"
// To be set from the linker.
var Version string
func divertLogs(dir string, interval time.Duration) {
t := time.NewTicker(interval)
var last *os.File
for {
nm := filepath.Join(dir, fmt.Sprintf("zoekt-webserver.%s.%d.log", time.Now().Format(logFormat), os.Getpid()))
fmt.Fprintf(os.Stderr, "writing logs to %s\n", nm)
f, err := os.Create(nm)
if err != nil {
// There is not much we can do now.
fmt.Fprintf(os.Stderr, "can't create output file %s: %v\n", nm, err)
os.Exit(2)
}
log.SetOutput(f)
last.Close()
last = f
<-t.C
}
}
const templateExtension = ".html.tpl"
func loadTemplates(tpl *template.Template, dir string) error {
fs, err := filepath.Glob(dir + "/*" + templateExtension)
if err != nil {
log.Fatalf("Glob: %v", err)
}
for _, fn := range fs {
content, err := ioutil.ReadFile(fn)
if err != nil {
return err
}
base := filepath.Base(fn)
base = strings.TrimSuffix(base, ".html")
if _, err := tpl.New(base).Parse(string(content)); err != nil {
return fmt.Errorf("Parse(%s): %v", fn, err)
}
}
return nil
}
func writeTemplates(dir string) error {
if dir == "" {
return fmt.Errorf("must set --template_dir")
}
for k, v := range web.TemplateText {
nm := filepath.Join(dir, k+templateExtension)
if err := ioutil.WriteFile(nm, []byte(v), 0644); err != nil {
return err
}
}
return nil
}
func main() {
logDir := flag.String("log_dir", "", "log to this directory rather than stderr.")
logRefresh := flag.Duration("log_refresh", 24*time.Hour, "if using --log_dir, start writing a new file this often.")
listen := flag.String("listen", ":6070", "listen on this address.")
index := flag.String("index", build.DefaultDir, "set index directory to use")
html := flag.Bool("html", true, "enable HTML interface")
restAPI := flag.Bool("rest_api", false, "enable REST API")
print := flag.Bool("print", false, "enable local result URLs")
enablePprof := flag.Bool("pprof", false, "set to enable remote profiling.")
sslCert := flag.String("ssl_cert", "", "set path to SSL .pem holding certificate.")
sslKey := flag.String("ssl_key", "", "set path to SSL .pem holding key.")
hostCustomization := flag.String(
"host_customization", "",
"specify host customization, as HOST1=QUERY,HOST2=QUERY")
templateDir := flag.String("template_dir", "", "set directory from which to load custom .html.tpl template files")
dumpTemplates := flag.Bool("dump_templates", false, "dump templates into --template_dir and exit.")
flag.Parse()
if *dumpTemplates {
if err := writeTemplates(*templateDir); err != nil {
log.Fatal(err)
}
os.Exit(0)
}
if *logDir != "" {
if fi, err := os.Lstat(*logDir); err != nil || !fi.IsDir() {
log.Fatal("%s is not a directory", *logDir)
}
// We could do fdup acrobatics to also redirect
// stderr, but it is simpler and more portable for the
// caller to divert stderr output if necessary.
go divertLogs(*logDir, *logRefresh)
}
searcher, err := shards.NewDirectorySearcher(*index)
if err != nil {
log.Fatal(err)
}
s := &web.Server{
Searcher: searcher,
Top: web.Top,
Version: Version,
}
if *templateDir != "" {
if err := loadTemplates(s.Top, *templateDir); err != nil {
log.Fatal("loadTemplates: %v", err)
}
}
s.Print = *print
s.HTML = *html
s.RESTAPI = *restAPI
if *hostCustomization != "" {
s.HostCustomQueries = map[string]string{}
for _, h := range strings.SplitN(*hostCustomization, ",", -1) {
if len(h) == 0 {
continue
}
fields := strings.SplitN(h, "=", 2)
if len(fields) < 2 {
log.Fatal("invalid host_customization %q", h)
}
s.HostCustomQueries[fields[0]] = fields[1]
}
}
handler, err := web.NewMux(s)
if err != nil {
log.Fatal(err)
}
if *enablePprof {
handler.HandleFunc("/debug/pprof/", pprof.Index)
handler.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
handler.HandleFunc("/debug/pprof/profile", pprof.Profile)
handler.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
handler.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
watchdogAddr := "http://" + *listen
if *sslCert != "" || *sslKey != "" {
watchdogAddr = "https://" + *listen
}
go watchdog(30*time.Second, watchdogAddr)
if *sslCert != "" || *sslKey != "" {
log.Printf("serving HTTPS on %s", *listen)
err = http.ListenAndServeTLS(*listen, *sslCert, *sslKey, handler)
} else {
log.Printf("serving HTTP on %s", *listen)
err = http.ListenAndServe(*listen, handler)
}
log.Printf("ListenAndServe: %v", err)
}
func watchdogOnce(ctx context.Context, client *http.Client, addr string) error {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
defer cancel()
req, err := http.NewRequest("GET", addr, nil)
if err != nil {
return err
}
req = req.WithContext(ctx)
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("watchdog: status %v", resp.StatusCode)
}
return nil
}
func watchdog(dt time.Duration, addr string) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{
Transport: tr,
}
tick := time.NewTicker(dt)
for _ = range tick.C {
err := watchdogOnce(context.Background(), client, addr)
if err != nil {
log.Fatalf("watchdog: %v", err)
}
}
}
cmd/zoekt-webserver: abort watchdog with log.Panicf
Fixes #34.
Change-Id: I4b8fc35956c2aee44458065b96f357f5a6836dd8
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"net/http/pprof"
"os"
"path/filepath"
"strings"
"time"
"github.com/google/zoekt/build"
"github.com/google/zoekt/shards"
"github.com/google/zoekt/web"
)
const logFormat = "2006-01-02T15-04-05.999999999Z07"
// To be set from the linker.
var Version string
func divertLogs(dir string, interval time.Duration) {
t := time.NewTicker(interval)
var last *os.File
for {
nm := filepath.Join(dir, fmt.Sprintf("zoekt-webserver.%s.%d.log", time.Now().Format(logFormat), os.Getpid()))
fmt.Fprintf(os.Stderr, "writing logs to %s\n", nm)
f, err := os.Create(nm)
if err != nil {
// There is not much we can do now.
fmt.Fprintf(os.Stderr, "can't create output file %s: %v\n", nm, err)
os.Exit(2)
}
log.SetOutput(f)
last.Close()
last = f
<-t.C
}
}
const templateExtension = ".html.tpl"
func loadTemplates(tpl *template.Template, dir string) error {
fs, err := filepath.Glob(dir + "/*" + templateExtension)
if err != nil {
log.Fatalf("Glob: %v", err)
}
for _, fn := range fs {
content, err := ioutil.ReadFile(fn)
if err != nil {
return err
}
base := filepath.Base(fn)
base = strings.TrimSuffix(base, ".html")
if _, err := tpl.New(base).Parse(string(content)); err != nil {
return fmt.Errorf("Parse(%s): %v", fn, err)
}
}
return nil
}
func writeTemplates(dir string) error {
if dir == "" {
return fmt.Errorf("must set --template_dir")
}
for k, v := range web.TemplateText {
nm := filepath.Join(dir, k+templateExtension)
if err := ioutil.WriteFile(nm, []byte(v), 0644); err != nil {
return err
}
}
return nil
}
func main() {
logDir := flag.String("log_dir", "", "log to this directory rather than stderr.")
logRefresh := flag.Duration("log_refresh", 24*time.Hour, "if using --log_dir, start writing a new file this often.")
listen := flag.String("listen", ":6070", "listen on this address.")
index := flag.String("index", build.DefaultDir, "set index directory to use")
html := flag.Bool("html", true, "enable HTML interface")
restAPI := flag.Bool("rest_api", false, "enable REST API")
print := flag.Bool("print", false, "enable local result URLs")
enablePprof := flag.Bool("pprof", false, "set to enable remote profiling.")
sslCert := flag.String("ssl_cert", "", "set path to SSL .pem holding certificate.")
sslKey := flag.String("ssl_key", "", "set path to SSL .pem holding key.")
hostCustomization := flag.String(
"host_customization", "",
"specify host customization, as HOST1=QUERY,HOST2=QUERY")
templateDir := flag.String("template_dir", "", "set directory from which to load custom .html.tpl template files")
dumpTemplates := flag.Bool("dump_templates", false, "dump templates into --template_dir and exit.")
flag.Parse()
if *dumpTemplates {
if err := writeTemplates(*templateDir); err != nil {
log.Fatal(err)
}
os.Exit(0)
}
if *logDir != "" {
if fi, err := os.Lstat(*logDir); err != nil || !fi.IsDir() {
log.Fatal("%s is not a directory", *logDir)
}
// We could do fdup acrobatics to also redirect
// stderr, but it is simpler and more portable for the
// caller to divert stderr output if necessary.
go divertLogs(*logDir, *logRefresh)
}
searcher, err := shards.NewDirectorySearcher(*index)
if err != nil {
log.Fatal(err)
}
s := &web.Server{
Searcher: searcher,
Top: web.Top,
Version: Version,
}
if *templateDir != "" {
if err := loadTemplates(s.Top, *templateDir); err != nil {
log.Fatal("loadTemplates: %v", err)
}
}
s.Print = *print
s.HTML = *html
s.RESTAPI = *restAPI
if *hostCustomization != "" {
s.HostCustomQueries = map[string]string{}
for _, h := range strings.SplitN(*hostCustomization, ",", -1) {
if len(h) == 0 {
continue
}
fields := strings.SplitN(h, "=", 2)
if len(fields) < 2 {
log.Fatal("invalid host_customization %q", h)
}
s.HostCustomQueries[fields[0]] = fields[1]
}
}
handler, err := web.NewMux(s)
if err != nil {
log.Fatal(err)
}
if *enablePprof {
handler.HandleFunc("/debug/pprof/", pprof.Index)
handler.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
handler.HandleFunc("/debug/pprof/profile", pprof.Profile)
handler.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
handler.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
watchdogAddr := "http://" + *listen
if *sslCert != "" || *sslKey != "" {
watchdogAddr = "https://" + *listen
}
go watchdog(30*time.Second, watchdogAddr)
if *sslCert != "" || *sslKey != "" {
log.Printf("serving HTTPS on %s", *listen)
err = http.ListenAndServeTLS(*listen, *sslCert, *sslKey, handler)
} else {
log.Printf("serving HTTP on %s", *listen)
err = http.ListenAndServe(*listen, handler)
}
log.Printf("ListenAndServe: %v", err)
}
func watchdogOnce(ctx context.Context, client *http.Client, addr string) error {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
defer cancel()
req, err := http.NewRequest("GET", addr, nil)
if err != nil {
return err
}
req = req.WithContext(ctx)
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("watchdog: status %v", resp.StatusCode)
}
return nil
}
func watchdog(dt time.Duration, addr string) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{
Transport: tr,
}
tick := time.NewTicker(dt)
for _ = range tick.C {
err := watchdogOnce(context.Background(), client, addr)
if err != nil {
log.Panicf("watchdog: %v", err)
}
}
}
|
package test
import (
"fmt"
"strings"
"sync"
)
type lineWriter struct {
mu sync.Locker
buff string
WriteStringFunc func(line string)
}
func newLineWriter(writeStringFunc func(line string)) *lineWriter {
lw := &lineWriter{
buff: "",
WriteStringFunc: writeStringFunc,
}
return lw
}
func (w *lineWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
w.buff += string(p)
w.mu.Unlock()
for len(w.buff) > 0 {
items := strings.SplitN(w.buff, "\n", 2)
if len(items) < 2 {
break
} else {
line := items[0]
fmt.Printf("line:%s\n", line)
w.buff = items[1]
if w.WriteStringFunc != nil {
w.WriteStringFunc(line + "\n")
}
}
}
return len(p), nil
}
func (w *lineWriter) Close() error {
return nil
}
optimize test flow
package test
import (
"fmt"
"strings"
"sync"
)
type lineWriter struct {
mu sync.Mutex
buff string
WriteStringFunc func(line string)
}
func newLineWriter(writeStringFunc func(line string)) *lineWriter {
lw := &lineWriter{
buff: "",
WriteStringFunc: writeStringFunc,
}
return lw
}
func (w *lineWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
w.buff += string(p)
w.mu.Unlock()
for len(w.buff) > 0 {
items := strings.SplitN(w.buff, "\n", 2)
if len(items) < 2 {
break
} else {
line := items[0]
fmt.Printf("line:%s\n", line)
w.buff = items[1]
if w.WriteStringFunc != nil {
w.WriteStringFunc(line + "\n")
}
}
}
return len(p), nil
}
func (w *lineWriter) Close() error {
return nil
}
|
package main
import (
"fmt"
"go/ast"
"go/token"
"reflect"
)
type varState int
const (
Nil varState = iota
Unused
Used
)
type varUsage struct {
state varState
position token.Pos
}
func copyVarMap(a map[string]varUsage) map[string]varUsage {
b := make(map[string]varUsage)
for k, v := range a {
b[k] = v
}
return b
}
func checkNoAssignUnusedBody(info fileMetadata, body []ast.Stmt, declared map[string]varUsage) {
for _, st := range body {
noAssignStatement(info, st, declared)
}
}
func assignUsedForExpr(info fileMetadata, expr ast.Expr, scope map[string]varUsage) {
if expr == nil {
return
}
switch v := expr.(type) {
case *ast.Ident:
if vinfo, ok := scope[v.Name]; ok {
if vinfo.state != Used {
scope[v.Name] = varUsage{Used, vinfo.position}
}
} else {
scope[v.Name] = varUsage{Used, 0}
}
case *ast.StarExpr:
assignUsedForExpr(info, v.X, scope)
case *ast.BinaryExpr:
assignUsedForExpr(info, v.X, scope)
assignUsedForExpr(info, v.Y, scope)
case *ast.CallExpr:
assignUsedForExpr(info, v.Fun, scope)
for _, arg := range v.Args {
assignUsedForExpr(info, arg, scope)
}
case *ast.CompositeLit:
assignUsedForExpr(info, v.Type, scope)
for _, e := range v.Elts {
assignUsedForExpr(info, e, scope)
}
case *ast.IndexExpr:
assignUsedForExpr(info, v.X, scope)
assignUsedForExpr(info, v.Index, scope)
case *ast.KeyValueExpr:
//Only the value counts
assignUsedForExpr(info, v.Value, scope)
case *ast.ParenExpr:
assignUsedForExpr(info, v.X, scope)
case *ast.SelectorExpr:
// The selected field doesn't count
assignUsedForExpr(info, v.X, scope)
case *ast.SliceExpr:
assignUsedForExpr(info, v.X, scope)
assignUsedForExpr(info, v.Low, scope)
assignUsedForExpr(info, v.High, scope)
assignUsedForExpr(info, v.Max, scope)
case *ast.UnaryExpr:
assignUsedForExpr(info, v.X, scope)
case *ast.TypeAssertExpr:
assignUsedForExpr(info, v.X, scope)
default:
}
}
func lhsForExpr(info fileMetadata, exp ast.Expr, tok token.Token, scope map[string]varUsage) {
if exp == nil {
return
}
var name string
var pos token.Pos
switch expv := exp.(type) {
case *ast.Ident:
name = expv.Name
pos = expv.Pos()
case *ast.StarExpr:
if x, ok := expv.X.(*ast.SelectorExpr); ok {
name = x.Sel.String()
pos = expv.Pos()
} else {
cpos := info.fset.Position(expv.Pos())
fmt.Printf("%s:Weird type of StarExpr: %s\n", cpos, reflect.TypeOf(expv.X))
return
}
case *ast.IndexExpr:
assignUsedForExpr(info, expv.X, scope)
assignUsedForExpr(info, expv.Index, scope)
return
case *ast.SelectorExpr:
assignUsedForExpr(info, expv.X, scope)
return
default:
fmt.Println("noAssignStatement: weird assign type", reflect.TypeOf(exp))
return
}
vinfo, ok := scope[name]
if ok {
if tok == token.ASSIGN && vinfo.state == Unused {
vpos := info.fset.Position(vinfo.position)
cpos := info.fset.Position(pos)
fmt.Printf("%s:Declared variable `%s` which goes unused\n", vpos, name)
fmt.Printf("%s:`%s` gets covered here\n", cpos, name)
hasErrors = true
}
if tok == token.ASSIGN && vinfo.state == Nil {
scope[name] = varUsage{Unused, vinfo.position}
}
} else {
if tok == token.DEFINE || tok == token.ASSIGN {
if name != "_" && name != "err" {
scope[name] = varUsage{Unused, pos}
}
}
}
}
func noAssignStatement(info fileMetadata, st ast.Stmt, localScope map[string]varUsage) {
if st == nil {
return
}
switch v := st.(type) {
// Things which we're checking
case *ast.AssignStmt:
for _, rhs := range v.Rhs {
assignUsedForExpr(info, rhs, localScope)
}
for _, exp := range v.Lhs {
lhsForExpr(info, exp, v.Tok, localScope)
}
case *ast.DeclStmt:
decl := v.Decl.(*ast.GenDecl)
if decl.Tok == token.VAR {
for _, spec := range decl.Specs {
s := spec.(*ast.ValueSpec)
if typ, ok := s.Type.(*ast.Ident); ok {
if typ.Name == "error" {
continue
}
}
for _, varname := range s.Names {
if _, ok := localScope[varname.Name]; !ok {
if varname.Name != "_" {
localScope[varname.Name] = varUsage{Nil, varname.Pos()}
}
}
}
}
}
// Things which may recurse
case *ast.BlockStmt:
checkNoAssignUnusedBody(info, v.List, localScope)
case *ast.DeferStmt:
//TODO(barakmich): Check this
if *debug {
fmt.Println("TODO: Check a defer statement")
}
case *ast.GoStmt:
assignUsedForExpr(info, v.Call, localScope)
case *ast.ForStmt:
noAssignStatement(info, v.Init, localScope)
assignUsedForExpr(info, v.Cond, localScope)
noAssignStatement(info, v.Post, localScope)
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.IfStmt:
ifScope := make(map[string]varUsage)
elseScope := make(map[string]varUsage)
noAssignStatement(info, v.Init, localScope)
assignUsedForExpr(info, v.Cond, localScope)
checkNoAssignUnusedBody(info, v.Body.List, ifScope)
noAssignStatement(info, v.Else, elseScope)
for k, varinfo := range ifScope {
if varinfo.state == Used {
localScope[k] = varinfo
}
}
for k, varinfo := range elseScope {
if varinfo.state == Used {
localScope[k] = varinfo
}
}
case *ast.RangeStmt:
if v.Tok == token.ILLEGAL {
pos := info.fset.Position(v.Pos())
fmt.Printf("%s:Illegal range\n", pos)
}
assignUsedForExpr(info, v.X, localScope)
lhsForExpr(info, v.Key, v.Tok, localScope)
lhsForExpr(info, v.Value, v.Tok, localScope)
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.SelectStmt:
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.SwitchStmt:
noAssignStatement(info, v.Init, localScope)
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.TypeSwitchStmt:
noAssignStatement(info, v.Init, localScope)
noAssignStatement(info, v.Assign, localScope)
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.CaseClause:
newScope := make(map[string]varUsage)
checkNoAssignUnusedBody(info, v.Body, newScope)
for k, varinfo := range newScope {
if varinfo.state == Used {
localScope[k] = varinfo
}
}
case *ast.CommClause:
checkNoAssignUnusedBody(info, v.Body, localScope)
// Things which are easy
case *ast.IncDecStmt:
// IncDec shouldn't count as usage...
case *ast.ReturnStmt:
for _, res := range v.Results {
assignUsedForExpr(info, res, localScope)
}
case *ast.SendStmt:
assignUsedForExpr(info, v.Chan, localScope)
assignUsedForExpr(info, v.Value, localScope)
case *ast.ExprStmt:
assignUsedForExpr(info, v.X, localScope)
case *ast.LabeledStmt:
noAssignStatement(info, v.Stmt, localScope)
case *ast.BranchStmt:
case *ast.EmptyStmt:
case *ast.BadStmt:
pos := info.fset.Position(v.Pos())
if *debug {
fmt.Printf("%s:Bad statement?\n", pos)
}
default:
pos := info.fset.Position(v.Pos())
if *debug {
fmt.Println("The hell is", reflect.TypeOf(st), pos)
}
}
}
func CheckNoAssignUnused(info fileMetadata, file *ast.File) {
for _, obj := range file.Scope.Objects {
funcScope := make(map[string]varUsage)
if obj.Kind == ast.Fun {
decl := obj.Decl.(*ast.FuncDecl)
for _, r := range decl.Type.Params.List {
for _, n := range r.Names {
funcScope[n.Name] = varUsage{Unused, n.Pos()}
}
}
checkNoAssignUnusedBody(info, decl.Body.List, funcScope)
if *debug {
for k, v := range funcScope {
if v.state != Used {
pos := info.fset.Position(v.position)
fmt.Printf("%s:DEBUG: `%s` apparently Unused?\n", pos, k)
}
}
}
}
}
}
add better star checking
package main
import (
"fmt"
"go/ast"
"go/token"
"reflect"
)
type varState int
const (
Nil varState = iota
Unused
Used
)
type varUsage struct {
state varState
position token.Pos
}
func copyVarMap(a map[string]varUsage) map[string]varUsage {
b := make(map[string]varUsage)
for k, v := range a {
b[k] = v
}
return b
}
func checkNoAssignUnusedBody(info fileMetadata, body []ast.Stmt, declared map[string]varUsage) {
for _, st := range body {
noAssignStatement(info, st, declared)
}
}
func assignUsedForExpr(info fileMetadata, expr ast.Expr, scope map[string]varUsage) {
if expr == nil {
return
}
switch v := expr.(type) {
case *ast.Ident:
if vinfo, ok := scope[v.Name]; ok {
if vinfo.state != Used {
scope[v.Name] = varUsage{Used, vinfo.position}
}
} else {
scope[v.Name] = varUsage{Used, 0}
}
case *ast.StarExpr:
assignUsedForExpr(info, v.X, scope)
case *ast.BinaryExpr:
assignUsedForExpr(info, v.X, scope)
assignUsedForExpr(info, v.Y, scope)
case *ast.CallExpr:
assignUsedForExpr(info, v.Fun, scope)
for _, arg := range v.Args {
assignUsedForExpr(info, arg, scope)
}
case *ast.CompositeLit:
assignUsedForExpr(info, v.Type, scope)
for _, e := range v.Elts {
assignUsedForExpr(info, e, scope)
}
case *ast.IndexExpr:
assignUsedForExpr(info, v.X, scope)
assignUsedForExpr(info, v.Index, scope)
case *ast.KeyValueExpr:
//Only the value counts
assignUsedForExpr(info, v.Value, scope)
case *ast.ParenExpr:
assignUsedForExpr(info, v.X, scope)
case *ast.SelectorExpr:
// The selected field doesn't count
assignUsedForExpr(info, v.X, scope)
case *ast.SliceExpr:
assignUsedForExpr(info, v.X, scope)
assignUsedForExpr(info, v.Low, scope)
assignUsedForExpr(info, v.High, scope)
assignUsedForExpr(info, v.Max, scope)
case *ast.UnaryExpr:
assignUsedForExpr(info, v.X, scope)
case *ast.TypeAssertExpr:
assignUsedForExpr(info, v.X, scope)
default:
}
}
func lhsForExpr(info fileMetadata, exp ast.Expr, tok token.Token, scope map[string]varUsage) {
if exp == nil {
return
}
var name string
var pos token.Pos
switch expv := exp.(type) {
case *ast.Ident:
name = expv.Name
pos = expv.Pos()
case *ast.StarExpr:
switch x := expv.X.(type) {
case *ast.SelectorExpr:
name = x.Sel.String()
pos = expv.Pos()
case *ast.Ident:
name = x.Name
pos = expv.Pos()
default:
cpos := info.fset.Position(expv.Pos())
fmt.Printf("%s:Weird type of StarExpr: %s\n", cpos, reflect.TypeOf(expv.X))
return
}
case *ast.IndexExpr:
assignUsedForExpr(info, expv.X, scope)
assignUsedForExpr(info, expv.Index, scope)
return
case *ast.SelectorExpr:
assignUsedForExpr(info, expv.X, scope)
return
default:
fmt.Println("noAssignStatement: weird assign type", reflect.TypeOf(exp))
return
}
vinfo, ok := scope[name]
if ok {
if tok == token.ASSIGN && vinfo.state == Unused {
vpos := info.fset.Position(vinfo.position)
cpos := info.fset.Position(pos)
fmt.Printf("%s:Declared variable `%s` which goes unused\n", vpos, name)
fmt.Printf("%s:`%s` gets covered here\n", cpos, name)
hasErrors = true
}
if tok == token.ASSIGN && vinfo.state == Nil {
scope[name] = varUsage{Unused, vinfo.position}
}
} else {
if tok == token.DEFINE || tok == token.ASSIGN {
if name != "_" && name != "err" {
scope[name] = varUsage{Unused, pos}
}
}
}
}
func noAssignStatement(info fileMetadata, st ast.Stmt, localScope map[string]varUsage) {
if st == nil {
return
}
switch v := st.(type) {
// Things which we're checking
case *ast.AssignStmt:
for _, rhs := range v.Rhs {
assignUsedForExpr(info, rhs, localScope)
}
for _, exp := range v.Lhs {
lhsForExpr(info, exp, v.Tok, localScope)
}
case *ast.DeclStmt:
decl := v.Decl.(*ast.GenDecl)
if decl.Tok == token.VAR {
for _, spec := range decl.Specs {
s := spec.(*ast.ValueSpec)
if typ, ok := s.Type.(*ast.Ident); ok {
if typ.Name == "error" {
continue
}
}
for _, varname := range s.Names {
if _, ok := localScope[varname.Name]; !ok {
if varname.Name != "_" {
localScope[varname.Name] = varUsage{Nil, varname.Pos()}
}
}
}
}
}
// Things which may recurse
case *ast.BlockStmt:
checkNoAssignUnusedBody(info, v.List, localScope)
case *ast.DeferStmt:
//TODO(barakmich): Check this
if *debug {
fmt.Println("TODO: Check a defer statement")
}
case *ast.GoStmt:
assignUsedForExpr(info, v.Call, localScope)
case *ast.ForStmt:
noAssignStatement(info, v.Init, localScope)
assignUsedForExpr(info, v.Cond, localScope)
noAssignStatement(info, v.Post, localScope)
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.IfStmt:
ifScope := make(map[string]varUsage)
elseScope := make(map[string]varUsage)
noAssignStatement(info, v.Init, localScope)
assignUsedForExpr(info, v.Cond, localScope)
checkNoAssignUnusedBody(info, v.Body.List, ifScope)
noAssignStatement(info, v.Else, elseScope)
for k, varinfo := range ifScope {
if varinfo.state == Used {
localScope[k] = varinfo
}
}
for k, varinfo := range elseScope {
if varinfo.state == Used {
localScope[k] = varinfo
}
}
case *ast.RangeStmt:
if v.Tok == token.ILLEGAL {
pos := info.fset.Position(v.Pos())
fmt.Printf("%s:Illegal range\n", pos)
}
assignUsedForExpr(info, v.X, localScope)
lhsForExpr(info, v.Key, v.Tok, localScope)
lhsForExpr(info, v.Value, v.Tok, localScope)
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.SelectStmt:
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.SwitchStmt:
noAssignStatement(info, v.Init, localScope)
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.TypeSwitchStmt:
noAssignStatement(info, v.Init, localScope)
noAssignStatement(info, v.Assign, localScope)
checkNoAssignUnusedBody(info, v.Body.List, localScope)
case *ast.CaseClause:
newScope := make(map[string]varUsage)
checkNoAssignUnusedBody(info, v.Body, newScope)
for k, varinfo := range newScope {
if varinfo.state == Used {
localScope[k] = varinfo
}
}
case *ast.CommClause:
checkNoAssignUnusedBody(info, v.Body, localScope)
// Things which are easy
case *ast.IncDecStmt:
// IncDec shouldn't count as usage...
case *ast.ReturnStmt:
for _, res := range v.Results {
assignUsedForExpr(info, res, localScope)
}
case *ast.SendStmt:
assignUsedForExpr(info, v.Chan, localScope)
assignUsedForExpr(info, v.Value, localScope)
case *ast.ExprStmt:
assignUsedForExpr(info, v.X, localScope)
case *ast.LabeledStmt:
noAssignStatement(info, v.Stmt, localScope)
case *ast.BranchStmt:
case *ast.EmptyStmt:
case *ast.BadStmt:
pos := info.fset.Position(v.Pos())
if *debug {
fmt.Printf("%s:Bad statement?\n", pos)
}
default:
pos := info.fset.Position(v.Pos())
if *debug {
fmt.Println("The hell is", reflect.TypeOf(st), pos)
}
}
}
func CheckNoAssignUnused(info fileMetadata, file *ast.File) {
for _, obj := range file.Scope.Objects {
funcScope := make(map[string]varUsage)
if obj.Kind == ast.Fun {
decl := obj.Decl.(*ast.FuncDecl)
for _, r := range decl.Type.Params.List {
for _, n := range r.Names {
funcScope[n.Name] = varUsage{Unused, n.Pos()}
}
}
checkNoAssignUnusedBody(info, decl.Body.List, funcScope)
if *debug {
for k, v := range funcScope {
if v.state != Used {
pos := info.fset.Position(v.position)
fmt.Printf("%s:DEBUG: `%s` apparently Unused?\n", pos, k)
}
}
}
}
}
}
|
package aggregated_logging
import (
"errors"
"testing"
kapi "k8s.io/kubernetes/pkg/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
"github.com/openshift/origin/pkg/diagnostics/log"
)
const (
testDcPodsKey = "pods"
testDcKey = "deploymentconfigs"
testSkipAnnotation = "skipAddAnnoation"
)
type fakeDeploymentConfigsDiagnostic struct {
fakeDiagnostic
fakePods kapi.PodList
fakeDcs deployapi.DeploymentConfigList
clienterrors map[string]error
}
func newFakeDeploymentConfigsDiagnostic(t *testing.T) *fakeDeploymentConfigsDiagnostic {
return &fakeDeploymentConfigsDiagnostic{
fakeDiagnostic: *newFakeDiagnostic(t),
clienterrors: map[string]error{},
}
}
func (f *fakeDeploymentConfigsDiagnostic) addDeployConfigFor(component string) {
labels := map[string]string{componentKey: component}
dc := deployapi.DeploymentConfig{
ObjectMeta: kapi.ObjectMeta{
Name: component + "Name",
Labels: labels,
},
}
f.fakeDcs.Items = append(f.fakeDcs.Items, dc)
}
func (f *fakeDeploymentConfigsDiagnostic) addPodFor(comp string, state kapi.PodPhase) {
annotations := map[string]string{}
if comp != testSkipAnnotation {
annotations[deployapi.DeploymentConfigAnnotation] = comp
}
pod := kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: comp,
Annotations: annotations,
},
Spec: kapi.PodSpec{},
Status: kapi.PodStatus{
Phase: state,
},
}
f.fakePods.Items = append(f.fakePods.Items, pod)
}
func (f *fakeDeploymentConfigsDiagnostic) deploymentconfigs(project string, options kapi.ListOptions) (*deployapi.DeploymentConfigList, error) {
f.test.Logf(">> calling deploymentconfigs: %s", f.clienterrors)
value, ok := f.clienterrors[testDcKey]
if ok {
f.test.Logf(">> error key found..returning %s", value)
return nil, value
}
f.test.Logf(">> error key not found..")
return &f.fakeDcs, nil
}
func (f *fakeDeploymentConfigsDiagnostic) pods(project string, options kapi.ListOptions) (*kapi.PodList, error) {
value, ok := f.clienterrors[testDcPodsKey]
if ok {
return nil, value
}
return &f.fakePods, nil
}
//test client error listing dcs
func TestCheckDcWhenErrorResponseFromClientRetrievingDc(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
d.clienterrors[testDcKey] = errors.New("error")
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0045", "Exp. an error when client returns error retrieving dcs", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenNoDeployConfigsFound(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0047", "Exp. an error when no DeploymentConfigs are found", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenOpsOrOtherDeployConfigsMissing(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
d.addDeployConfigFor(componentNameEs)
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0060", "Exp. a warning when ops DeploymentConfigs are missing", log.InfoLevel)
d.assertMessage("AGL0065", "Exp. an error when non-ops DeploymentConfigs are missing", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenClientErrorListingPods(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
d.clienterrors[testDcPodsKey] = errors.New("New pod error")
for _, comp := range loggingComponents.List() {
d.addDeployConfigFor(comp)
}
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0075", "Exp. an error when retrieving pods errors", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenNoPodsFoundMatchingDeployConfig(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
for _, comp := range loggingComponents.List() {
d.addDeployConfigFor(comp)
}
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0080", "Exp. an error when retrieving pods errors", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenInVariousStates(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
for _, comp := range loggingComponents.List() {
d.addDeployConfigFor(comp)
d.addPodFor(comp, kapi.PodRunning)
}
d.addPodFor(testSkipAnnotation, kapi.PodRunning)
d.addPodFor("someothercomponent", kapi.PodPending)
d.addDeployConfigFor("somerandom component")
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0085", "Exp. a warning when pod is missing DeployConfig annotation", log.WarnLevel)
d.assertMessage("AGL0090", "Exp. an error when pod is not in running state", log.ErrorLevel)
d.assertMessage("AGL0095", "Exp. an error when pods not found for a DeployConfig", log.ErrorLevel)
d.dumpMessages()
}
modify comment bug for pkg/diagnostics/cluster/aggregated_logging/deploymentconfigs_test.go
package aggregated_logging
import (
"errors"
"testing"
kapi "k8s.io/kubernetes/pkg/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
"github.com/openshift/origin/pkg/diagnostics/log"
)
const (
testDcPodsKey = "pods"
testDcKey = "deploymentconfigs"
testSkipAnnotation = "skipAddAnnoation"
)
type fakeDeploymentConfigsDiagnostic struct {
fakeDiagnostic
fakePods kapi.PodList
fakeDcs deployapi.DeploymentConfigList
clienterrors map[string]error
}
func newFakeDeploymentConfigsDiagnostic(t *testing.T) *fakeDeploymentConfigsDiagnostic {
return &fakeDeploymentConfigsDiagnostic{
fakeDiagnostic: *newFakeDiagnostic(t),
clienterrors: map[string]error{},
}
}
func (f *fakeDeploymentConfigsDiagnostic) addDeployConfigFor(component string) {
labels := map[string]string{componentKey: component}
dc := deployapi.DeploymentConfig{
ObjectMeta: kapi.ObjectMeta{
Name: component + "Name",
Labels: labels,
},
}
f.fakeDcs.Items = append(f.fakeDcs.Items, dc)
}
func (f *fakeDeploymentConfigsDiagnostic) addPodFor(comp string, state kapi.PodPhase) {
annotations := map[string]string{}
if comp != testSkipAnnotation {
annotations[deployapi.DeploymentConfigAnnotation] = comp
}
pod := kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: comp,
Annotations: annotations,
},
Spec: kapi.PodSpec{},
Status: kapi.PodStatus{
Phase: state,
},
}
f.fakePods.Items = append(f.fakePods.Items, pod)
}
func (f *fakeDeploymentConfigsDiagnostic) deploymentconfigs(project string, options kapi.ListOptions) (*deployapi.DeploymentConfigList, error) {
f.test.Logf(">> calling deploymentconfigs: %s", f.clienterrors)
value, ok := f.clienterrors[testDcKey]
if ok {
f.test.Logf(">> error key found..returning %s", value)
return nil, value
}
f.test.Logf(">> error key not found..")
return &f.fakeDcs, nil
}
func (f *fakeDeploymentConfigsDiagnostic) pods(project string, options kapi.ListOptions) (*kapi.PodList, error) {
value, ok := f.clienterrors[testDcPodsKey]
if ok {
return nil, value
}
return &f.fakePods, nil
}
// Test client error listing dcs
func TestCheckDcWhenErrorResponseFromClientRetrievingDc(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
d.clienterrors[testDcKey] = errors.New("error")
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0045", "Exp. an error when client returns error retrieving dcs", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenNoDeployConfigsFound(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0047", "Exp. an error when no DeploymentConfigs are found", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenOpsOrOtherDeployConfigsMissing(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
d.addDeployConfigFor(componentNameEs)
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0060", "Exp. a warning when ops DeploymentConfigs are missing", log.InfoLevel)
d.assertMessage("AGL0065", "Exp. an error when non-ops DeploymentConfigs are missing", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenClientErrorListingPods(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
d.clienterrors[testDcPodsKey] = errors.New("New pod error")
for _, comp := range loggingComponents.List() {
d.addDeployConfigFor(comp)
}
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0075", "Exp. an error when retrieving pods errors", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenNoPodsFoundMatchingDeployConfig(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
for _, comp := range loggingComponents.List() {
d.addDeployConfigFor(comp)
}
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0080", "Exp. an error when retrieving pods errors", log.ErrorLevel)
d.dumpMessages()
}
func TestCheckDcWhenInVariousStates(t *testing.T) {
d := newFakeDeploymentConfigsDiagnostic(t)
for _, comp := range loggingComponents.List() {
d.addDeployConfigFor(comp)
d.addPodFor(comp, kapi.PodRunning)
}
d.addPodFor(testSkipAnnotation, kapi.PodRunning)
d.addPodFor("someothercomponent", kapi.PodPending)
d.addDeployConfigFor("somerandom component")
checkDeploymentConfigs(d, d, fakeProject)
d.assertMessage("AGL0085", "Exp. a warning when pod is missing DeployConfig annotation", log.WarnLevel)
d.assertMessage("AGL0090", "Exp. an error when pod is not in running state", log.ErrorLevel)
d.assertMessage("AGL0095", "Exp. an error when pods not found for a DeployConfig", log.ErrorLevel)
d.dumpMessages()
}
|
package acceptance_test
import (
"encoding/json"
"fmt"
"github.com/cloudfoundry-incubator/cf-test-helpers/cf"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("Common service tests", func() {
Context("Shareable services", func() {
shareableServices := map[string]bool{
"elasticsearch": true,
"influxdb": true,
"mysql": true,
"postgres": true,
"redis": true,
"aws-s3-bucket": false,
"cdn-route": false,
}
It("is service shareable", func() {
retrieveServicesCommand := cf.Cf("curl", "/v2/services")
Expect(retrieveServicesCommand.Wait(testConfig.DefaultTimeoutDuration())).To(Exit(0))
var servicesCommandResp struct {
TotalResults int `json:"total_results"`
Resources []struct {
Entity struct {
Label string `json:"label"`
Description string `json:"description"`
Extra string `json:"extra"`
}
}
}
err := json.Unmarshal(retrieveServicesCommand.Buffer().Contents(), &servicesCommandResp)
Expect(err).NotTo(HaveOccurred())
var message string
//during the run of the pipeline we are running few acceptance tests in parallel
//this can bring a total number of services in the CF instance to greater than 7 (the normal count)
//hence, while checking for 'shareable' services - we need to filter out the fake services created
//by other tests.
// fake service can be identified by having a value 'fake service' in desciption field.
fakeServicesCount := 0
for _, service := range servicesCommandResp.Resources {
if service.Entity.Description == "fake service" {
fakeServicesCount++
continue
}
message = fmt.Sprintf("verifying that %s backing service is shareable", service.Entity.Label)
By(message)
if shareableServices[service.Entity.Label] {
Expect(service.Entity.Extra).To(ContainSubstring("\"shareable\":"),
"Expected %s to have 'shareable' parameter", service.Entity.Label)
Expect(service.Entity.Extra).To(ContainSubstring("\"shareable\": true"),
"Expected %s to be shareable - i.e.: 'shareable' parameter set to 'true'", service.Entity.Label)
} else {
Expect(service.Entity.Extra).ToNot(ContainSubstring("\"shareable\": false"),
"Expected %s NOT to have 'shareable' parameter or to be set to 'false'", service.Entity.Label)
}
}
Expect(servicesCommandResp.TotalResults-fakeServicesCount).To(BeNumerically("==", len(shareableServices)), "the amount of services doesn't match")
})
})
})
Refactor common_service_test to use CAPI v3 api.
common_service_test refactored to use /v3/service_offerings rest api.
package acceptance_test
import (
"encoding/json"
"fmt"
"github.com/cloudfoundry-incubator/cf-test-helpers/cf"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("Common service tests", func() {
Context("Shareable services", func() {
shareableServices := map[string]bool{
"elasticsearch": true,
"influxdb": true,
"mysql": true,
"postgres": true,
"redis": true,
"aws-s3-bucket": false,
"cdn-route": false,
}
It("is service shareable", func() {
retrieveServicesCommand := cf.Cf("curl", "/v3/service_offerings")
Expect(retrieveServicesCommand.Wait(testConfig.DefaultTimeoutDuration())).To(Exit(0))
var servicesCommandResp struct {
Pagination struct {
TotalResults int `json:"total_results"`
}
Resources []struct {
Name string `json:"name"`
Description string `json:"description"`
BrokerCatalog struct {
Metadata struct {
Shareable bool `json:"shareable"`
}
} `json:"broker_catalog"`
}
}
err := json.Unmarshal(retrieveServicesCommand.Buffer().Contents(), &servicesCommandResp)
Expect(err).NotTo(HaveOccurred())
var message string
//during the run of the pipeline we are running few acceptance tests in parallel
//this can bring a total number of services in the CF instance to greater than 7 (the normal count)
//hence, while checking for 'shareable' services - we need to filter out the fake services created
//by other tests.
// fake service can be identified by having a value 'fake service' in desciption field.
fakeServicesCount := 0
for _, service := range servicesCommandResp.Resources {
if service.Description == "fake service" {
fakeServicesCount++
continue
}
message = fmt.Sprintf("verifying that %s backing service is shareable", service.Name)
By(message)
if shareableServices[service.Name] {
Expect(service.BrokerCatalog.Metadata.Shareable).NotTo(BeNil(),
"Expected %s to have 'shareable' parameter", service.Name)
Expect(service.BrokerCatalog.Metadata.Shareable).To(BeTrue(),
"Expected %s to be shareable - i.e.: 'shareable' parameter set to 'true'", service.Name)
} else {
Expect(service.BrokerCatalog.Metadata.Shareable).To(BeFalse(),
"Expected %s NOT to have 'shareable' parameter or to be set to 'false'", service.Name)
}
}
Expect(servicesCommandResp.Pagination.TotalResults-fakeServicesCount).To(BeNumerically("==", len(shareableServices)), "the amount of services doesn't match")
})
})
})
|
package rest
import (
"sort"
"strings"
"github.com/lfq7413/tomato/auth"
)
// Query ...
type Query struct {
auth *auth.Auth
className string
where map[string]interface{}
findOptions map[string]interface{}
response map[string]interface{}
doCount bool
include [][]string
keys []string
}
// NewQuery ...
func NewQuery(
auth *auth.Auth,
className string,
where map[string]interface{},
options map[string]interface{},
) *Query {
query := &Query{
auth: auth,
className: className,
where: where,
findOptions: map[string]interface{}{},
response: nil,
doCount: false,
include: [][]string{},
keys: []string{},
}
for k, v := range options {
switch k {
case "keys":
if s, ok := v.(string); ok {
query.keys = strings.Split(s, ",")
query.keys = append(query.keys, "objectId", "createdAt", "updatedAt")
}
case "count":
query.doCount = true
case "skip":
query.findOptions["skip"] = v
case "limit":
query.findOptions["limit"] = v
case "order":
if s, ok := v.(string); ok {
fields := strings.Split(s, ",")
sortMap := map[string]int{}
for _, v := range fields {
if strings.HasPrefix(v, "-") {
sortMap[v[1:]] = -1
} else {
sortMap[v] = 1
}
}
query.findOptions["sort"] = sortMap
}
case "include":
if s, ok := v.(string); ok { // v = "user.session,name.friend"
paths := strings.Split(s, ",") // paths = ["user.session","name.friend"]
pathSet := []string{}
for _, path := range paths {
parts := strings.Split(path, ".") // parts = ["user","session"]
for lenght := 1; lenght <= len(parts); lenght++ {
pathSet = append(pathSet, strings.Join(parts[0:lenght], "."))
} // pathSet = ["user","user.session"]
} // pathSet = ["user","user.session","name","name.friend"]
sort.Strings(pathSet) // pathSet = ["name","name.friend","user","user.session"]
for _, set := range pathSet {
query.include = append(query.include, strings.Split(set, "."))
} // query.include = [["name"],["name","friend"],["user"],["user","seeeion"]]
}
default:
}
}
return query
}
定义查询过程
package rest
import (
"sort"
"strings"
"github.com/lfq7413/tomato/auth"
)
// Query ...
type Query struct {
auth *auth.Auth
className string
where map[string]interface{}
findOptions map[string]interface{}
response map[string]interface{}
doCount bool
include [][]string
keys []string
}
// NewQuery ...
func NewQuery(
auth *auth.Auth,
className string,
where map[string]interface{},
options map[string]interface{},
) *Query {
query := &Query{
auth: auth,
className: className,
where: where,
findOptions: map[string]interface{}{},
response: nil,
doCount: false,
include: [][]string{},
keys: []string{},
}
for k, v := range options {
switch k {
case "keys":
if s, ok := v.(string); ok {
query.keys = strings.Split(s, ",")
query.keys = append(query.keys, "objectId", "createdAt", "updatedAt")
}
case "count":
query.doCount = true
case "skip":
query.findOptions["skip"] = v
case "limit":
query.findOptions["limit"] = v
case "order":
if s, ok := v.(string); ok {
fields := strings.Split(s, ",")
sortMap := map[string]int{}
for _, v := range fields {
if strings.HasPrefix(v, "-") {
sortMap[v[1:]] = -1
} else {
sortMap[v] = 1
}
}
query.findOptions["sort"] = sortMap
}
case "include":
if s, ok := v.(string); ok { // v = "user.session,name.friend"
paths := strings.Split(s, ",") // paths = ["user.session","name.friend"]
pathSet := []string{}
for _, path := range paths {
parts := strings.Split(path, ".") // parts = ["user","session"]
for lenght := 1; lenght <= len(parts); lenght++ {
pathSet = append(pathSet, strings.Join(parts[0:lenght], "."))
} // pathSet = ["user","user.session"]
} // pathSet = ["user","user.session","name","name.friend"]
sort.Strings(pathSet) // pathSet = ["name","name.friend","user","user.session"]
for _, set := range pathSet {
query.include = append(query.include, strings.Split(set, "."))
} // query.include = [["name"],["name","friend"],["user"],["user","seeeion"]]
}
default:
}
}
return query
}
// Execute ...
func (q *Query) Execute() map[string]interface{} {
q.getUserAndRoleACL()
q.validateClientClassCreation()
q.replaceSelect()
q.replaceDontSelect()
q.replaceInQuery()
q.replaceNotInQuery()
q.runFind()
q.runCount()
q.handleInclude()
return q.response
}
func (q *Query) getUserAndRoleACL() error {
return nil
}
func (q *Query) validateClientClassCreation() error {
return nil
}
func (q *Query) replaceSelect() error {
return nil
}
func (q *Query) replaceDontSelect() error {
return nil
}
func (q *Query) replaceInQuery() error {
return nil
}
func (q *Query) replaceNotInQuery() error {
return nil
}
func (q *Query) runFind() error {
return nil
}
func (q *Query) runCount() error {
return nil
}
func (q *Query) handleInclude() error {
return nil
}
|
package goformkeeper
import (
"testing"
)
func TestResultRequired(t *testing.T) {
r := NewResult()
if r.HasFailure() {
t.Errorf("Result shouldn't have failure")
}
r.putRequiredFailure("Field01", "Field01 Is Empty")
if !r.HasFailure() {
t.Errorf("Result should have failure")
}
if !r.FailedOn("Field01") {
t.Errorf("FailedOn(\"Field01\") should return true")
}
if !r.FailedOnConstraint("Field01", "required") {
t.Errorf("FailedOnConstraint(\"Field01\", \"required\") should return true")
}
if r.MessageOn("Field01") != "Field01 Is Empty" {
t.Errorf("MessageOn(\"required\") returns invalid value %s", r.MessageOn("Field01"))
}
if r.MessageOnConstraint("Field01", "required") != "Field01 Is Empty" {
t.Errorf("MessageOnConstraint(\"Field01\", \"required\") returns invalid value %s", r.MessageOnConstraint("Field01", "required"))
}
}
func TestResult2(t *testing.T) {
r := NewResult()
if r.HasFailure() {
t.Errorf("Result shouldn't have failure")
}
f01 := NewFailureForField("Field02", "Field02 Has Error")
f01.failOnConstraint("length", "Field02 Length is Invalid")
r.AddFailure(f01)
if !r.HasFailure() {
t.Errorf("Result should have failure")
}
if r.FailedOn("Field01") {
t.Errorf("FailedOn(\"Field01\") should return false")
}
if !r.FailedOn("Field02") {
t.Errorf("FailedOn(\"Field02\") should return true")
}
if !r.FailedOnConstraint("Field02", "length") {
t.Errorf("FailedOnConstraint(\"Field02\", \"required\") should return true")
}
if r.MessageOn("Field02") != "Field02 Has Error" {
t.Errorf("MessageOn(\"Field02\") returns invalid value %s", r.MessageOn("Field02"))
}
if r.MessageOnConstraint("Field02", "length") != "Field02 Length is Invalid" {
t.Errorf("MessageOnConstraint(\"Field02\", \"length\") returns invalid value %s", r.MessageOnConstraint("Field02", "length"))
}
f01.failOnConstraint("email", "Field02 is not Email Address")
f02 := NewFailureForField("Field03", "Field03 Has Error")
f02.failOnConstraint("length", "Field03 Length is Invalid")
r.AddFailure(f02)
m := r.Messages()
if len(m) != 2 {
t.Errorf("Messages() returns invalid number")
}
if m[0] != "Field02 Has Error" {
t.Errorf("first error message returns wrong string: %s", m[0])
}
if m[1] != "Field03 Has Error" {
t.Errorf("second error message returns wrong string: %s", m[1])
}
m2 := r.MessagesOn("Field02")
if len(m2) != 2 {
t.Errorf("MessagesOn(\"Field02\") returns invalid number")
}
if m2[0] != "Field02 Length is Invalid" {
t.Errorf("first error message returns wrong string: %s", m2[0])
}
if m2[1] != "Field02 is not Email Address" {
t.Errorf("second error message returns wrong string: %s", m2[1])
}
}
adds Test for Results:FailedFields
package goformkeeper
import (
"testing"
)
func TestResultRequired(t *testing.T) {
r := NewResult()
if r.HasFailure() {
t.Errorf("Result shouldn't have failure")
}
r.putRequiredFailure("Field01", "Field01 Is Empty")
if !r.HasFailure() {
t.Errorf("Result should have failure")
}
if !r.FailedOn("Field01") {
t.Errorf("FailedOn(\"Field01\") should return true")
}
if !r.FailedOnConstraint("Field01", "required") {
t.Errorf("FailedOnConstraint(\"Field01\", \"required\") should return true")
}
if r.MessageOn("Field01") != "Field01 Is Empty" {
t.Errorf("MessageOn(\"required\") returns invalid value %s", r.MessageOn("Field01"))
}
if r.MessageOnConstraint("Field01", "required") != "Field01 Is Empty" {
t.Errorf("MessageOnConstraint(\"Field01\", \"required\") returns invalid value %s", r.MessageOnConstraint("Field01", "required"))
}
}
func TestResult2(t *testing.T) {
r := NewResult()
if r.HasFailure() {
t.Errorf("Result shouldn't have failure")
}
f01 := NewFailureForField("Field02", "Field02 Has Error")
f01.failOnConstraint("length", "Field02 Length is Invalid")
r.AddFailure(f01)
if !r.HasFailure() {
t.Errorf("Result should have failure")
}
if r.FailedOn("Field01") {
t.Errorf("FailedOn(\"Field01\") should return false")
}
if !r.FailedOn("Field02") {
t.Errorf("FailedOn(\"Field02\") should return true")
}
if !r.FailedOnConstraint("Field02", "length") {
t.Errorf("FailedOnConstraint(\"Field02\", \"required\") should return true")
}
if r.MessageOn("Field02") != "Field02 Has Error" {
t.Errorf("MessageOn(\"Field02\") returns invalid value %s", r.MessageOn("Field02"))
}
if r.MessageOnConstraint("Field02", "length") != "Field02 Length is Invalid" {
t.Errorf("MessageOnConstraint(\"Field02\", \"length\") returns invalid value %s", r.MessageOnConstraint("Field02", "length"))
}
f01.failOnConstraint("email", "Field02 is not Email Address")
f02 := NewFailureForField("Field03", "Field03 Has Error")
f02.failOnConstraint("length", "Field03 Length is Invalid")
r.AddFailure(f02)
m := r.Messages()
if len(m) != 2 {
t.Errorf("Messages() returns invalid number")
}
if m[0] != "Field02 Has Error" {
t.Errorf("first error message returns wrong string: %s", m[0])
}
if m[1] != "Field03 Has Error" {
t.Errorf("second error message returns wrong string: %s", m[1])
}
m2 := r.MessagesOn("Field02")
if len(m2) != 2 {
t.Errorf("MessagesOn(\"Field02\") returns invalid number")
}
if m2[0] != "Field02 Length is Invalid" {
t.Errorf("first error message returns wrong string: %s", m2[0])
}
if m2[1] != "Field02 is not Email Address" {
t.Errorf("second error message returns wrong string: %s", m2[1])
}
errorFields := r.FailedFields()
if len(errorFields) != 2 {
t.Errorf("FailedFields() returns invalid number")
}
if errorFields[0] != "Field02" {
t.Errorf("FailedFields() returns wrong field: %s", errorFields[0])
}
if errorFields[1] != "Field03" {
t.Errorf("FailedFields() returns wrong field: %s", errorFields[1])
}
}
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"fmt"
"net/http"
"strings"
"sync"
"time"
systemd "github.com/coreos/go-systemd/daemon"
"github.com/emicklei/go-restful-swagger12"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/apimachinery"
"k8s.io/apimachinery/pkg/apimachinery/registered"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/audit"
genericapi "k8s.io/apiserver/pkg/endpoints"
"k8s.io/apiserver/pkg/endpoints/discovery"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/routes"
restclient "k8s.io/client-go/rest"
openapicommon "k8s.io/kube-openapi/pkg/common"
)
// Info about an API group.
type APIGroupInfo struct {
GroupMeta apimachinery.GroupMeta
// Info about the resources in this group. Its a map from version to resource to the storage.
VersionedResourcesStorageMap map[string]map[string]rest.Storage
// OptionsExternalVersion controls the APIVersion used for common objects in the
// schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may
// define a version "v1beta1" but want to use the Kubernetes "v1" internal objects.
// If nil, defaults to groupMeta.GroupVersion.
// TODO: Remove this when https://github.com/kubernetes/kubernetes/issues/19018 is fixed.
OptionsExternalVersion *schema.GroupVersion
// MetaGroupVersion defaults to "meta.k8s.io/v1" and is the scheme group version used to decode
// common API implementations like ListOptions. Future changes will allow this to vary by group
// version (for when the inevitable meta/v2 group emerges).
MetaGroupVersion *schema.GroupVersion
// Scheme includes all of the types used by this group and how to convert between them (or
// to convert objects from outside of this group that are accepted in this API).
// TODO: replace with interfaces
Scheme *runtime.Scheme
// NegotiatedSerializer controls how this group encodes and decodes data
NegotiatedSerializer runtime.NegotiatedSerializer
// ParameterCodec performs conversions for query parameters passed to API calls
ParameterCodec runtime.ParameterCodec
// SubresourceGroupVersionKind contains the GroupVersionKind overrides for each subresource that is
// accessible from this API group version. The GroupVersionKind is that of the external version of
// the subresource. The key of this map should be the path of the subresource. The keys here should
// match the keys in the Storage map above for subresources.
SubresourceGroupVersionKind map[string]schema.GroupVersionKind
}
// GenericAPIServer contains state for a Kubernetes cluster api server.
type GenericAPIServer struct {
// discoveryAddresses is used to build cluster IPs for discovery.
discoveryAddresses discovery.Addresses
// LoopbackClientConfig is a config for a privileged loopback connection to the API server
LoopbackClientConfig *restclient.Config
// minRequestTimeout is how short the request timeout can be. This is used to build the RESTHandler
minRequestTimeout time.Duration
// legacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests
// to InstallLegacyAPIGroup
legacyAPIGroupPrefixes sets.String
// admissionControl is used to build the RESTStorage that backs an API Group.
admissionControl admission.Interface
// requestContextMapper provides a way to get the context for a request. It may be nil.
requestContextMapper apirequest.RequestContextMapper
SecureServingInfo *SecureServingInfo
// numerical ports, set after listening
effectiveSecurePort int
// ExternalAddress is the address (hostname or IP and port) that should be used in
// external (public internet) URLs for this GenericAPIServer.
ExternalAddress string
// Serializer controls how common API objects not in a group/version prefix are serialized for this server.
// Individual APIGroups may define their own serializers.
Serializer runtime.NegotiatedSerializer
// "Outputs"
// Handler holds the handlers being used by this API server
Handler *APIServerHandler
// listedPathProvider is a lister which provides the set of paths to show at /
listedPathProvider routes.ListedPathProvider
// DiscoveryGroupManager serves /apis
DiscoveryGroupManager discovery.GroupManager
// Enable swagger and/or OpenAPI if these configs are non-nil.
swaggerConfig *swagger.Config
openAPIConfig *openapicommon.Config
// PostStartHooks are each called after the server has started listening, in a separate go func for each
// with no guarantee of ordering between them. The map key is a name used for error reporting.
// It may kill the process with a panic if it wishes to by returning an error.
postStartHookLock sync.Mutex
postStartHooks map[string]postStartHookEntry
postStartHooksCalled bool
disabledPostStartHooks sets.String
preShutdownHookLock sync.Mutex
preShutdownHooks map[string]preShutdownHookEntry
preShutdownHooksCalled bool
// healthz checks
healthzLock sync.Mutex
healthzChecks []healthz.HealthzChecker
healthzCreated bool
// auditing. The backend is started after the server starts listening.
AuditBackend audit.Backend
// enableAPIResponseCompression indicates whether API Responses should support compression
// if the client requests it via Accept-Encoding
enableAPIResponseCompression bool
// delegationTarget is the next delegate in the chain or nil
delegationTarget DelegationTarget
}
// DelegationTarget is an interface which allows for composition of API servers with top level handling that works
// as expected.
type DelegationTarget interface {
// UnprotectedHandler returns a handler that is NOT protected by a normal chain
UnprotectedHandler() http.Handler
// RequestContextMapper returns the existing RequestContextMapper. Because we cannot rewire all existing
// uses of this function, this will be used in any delegating API server
RequestContextMapper() apirequest.RequestContextMapper
// PostStartHooks returns the post-start hooks that need to be combined
PostStartHooks() map[string]postStartHookEntry
// PreShutdownHooks returns the pre-stop hooks that need to be combined
PreShutdownHooks() map[string]preShutdownHookEntry
// HealthzChecks returns the healthz checks that need to be combined
HealthzChecks() []healthz.HealthzChecker
// ListedPaths returns the paths for supporting an index
ListedPaths() []string
// NextDelegate returns the next delegationTarget in the chain of delegations
NextDelegate() DelegationTarget
}
func (s *GenericAPIServer) UnprotectedHandler() http.Handler {
// when we delegate, we need the server we're delegating to choose whether or not to use gorestful
return s.Handler.Director
}
func (s *GenericAPIServer) PostStartHooks() map[string]postStartHookEntry {
return s.postStartHooks
}
func (s *GenericAPIServer) PreShutdownHooks() map[string]preShutdownHookEntry {
return s.preShutdownHooks
}
func (s *GenericAPIServer) HealthzChecks() []healthz.HealthzChecker {
return s.healthzChecks
}
func (s *GenericAPIServer) ListedPaths() []string {
return s.listedPathProvider.ListedPaths()
}
func (s *GenericAPIServer) NextDelegate() DelegationTarget {
return s.delegationTarget
}
var EmptyDelegate = emptyDelegate{
requestContextMapper: apirequest.NewRequestContextMapper(),
}
type emptyDelegate struct {
requestContextMapper apirequest.RequestContextMapper
}
func (s emptyDelegate) UnprotectedHandler() http.Handler {
return nil
}
func (s emptyDelegate) PostStartHooks() map[string]postStartHookEntry {
return map[string]postStartHookEntry{}
}
func (s emptyDelegate) PreShutdownHooks() map[string]preShutdownHookEntry {
return map[string]preShutdownHookEntry{}
}
func (s emptyDelegate) HealthzChecks() []healthz.HealthzChecker {
return []healthz.HealthzChecker{}
}
func (s emptyDelegate) ListedPaths() []string {
return []string{}
}
func (s emptyDelegate) RequestContextMapper() apirequest.RequestContextMapper {
return s.requestContextMapper
}
func (s emptyDelegate) NextDelegate() DelegationTarget {
return nil
}
// RequestContextMapper is exposed so that third party resource storage can be build in a different location.
// TODO refactor third party resource storage
func (s *GenericAPIServer) RequestContextMapper() apirequest.RequestContextMapper {
return s.requestContextMapper
}
// MinRequestTimeout is exposed so that third party resource storage can be build in a different location.
// TODO refactor third party resource storage
func (s *GenericAPIServer) MinRequestTimeout() time.Duration {
return s.minRequestTimeout
}
type preparedGenericAPIServer struct {
*GenericAPIServer
}
// PrepareRun does post API installation setup steps.
func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer {
if s.swaggerConfig != nil {
routes.Swagger{Config: s.swaggerConfig}.Install(s.Handler.GoRestfulContainer)
}
if s.openAPIConfig != nil {
routes.OpenAPI{
Config: s.openAPIConfig,
}.Install(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux)
}
s.installHealthz()
return preparedGenericAPIServer{s}
}
// Run spawns the secure http server. It only returns if stopCh is closed
// or the secure port cannot be listened on initially.
func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
err := s.NonBlockingRun(stopCh)
if err != nil {
return err
}
<-stopCh
if s.GenericAPIServer.AuditBackend != nil {
s.GenericAPIServer.AuditBackend.Shutdown()
}
return s.RunPreShutdownHooks()
}
// NonBlockingRun spawns the secure http server. An error is
// returned if the secure port cannot be listened on.
func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error {
// Use an internal stop channel to allow cleanup of the listeners on error.
internalStopCh := make(chan struct{})
if s.SecureServingInfo != nil && s.Handler != nil {
if err := s.serveSecurely(internalStopCh); err != nil {
close(internalStopCh)
return err
}
}
// Now that listener have bound successfully, it is the
// responsibility of the caller to close the provided channel to
// ensure cleanup.
go func() {
<-stopCh
close(internalStopCh)
}()
// Start the audit backend before any request comes in. This means we cannot turn it into a
// post start hook because without calling Backend.Run the Backend.ProcessEvents call might block.
if s.AuditBackend != nil {
if err := s.AuditBackend.Run(stopCh); err != nil {
return fmt.Errorf("failed to run the audit backend: %v", err)
}
}
s.RunPostStartHooks(stopCh)
if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil {
glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err)
}
return nil
}
// EffectiveSecurePort returns the secure port we bound to.
func (s *GenericAPIServer) EffectiveSecurePort() int {
return s.effectiveSecurePort
}
// installAPIResources is a private method for installing the REST storage backing each api groupversionresource
func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo) error {
for _, groupVersion := range apiGroupInfo.GroupMeta.GroupVersions {
if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 {
glog.Warningf("Skipping API %v because it has no resources.", groupVersion)
continue
}
apiGroupVersion := s.getAPIGroupVersion(apiGroupInfo, groupVersion, apiPrefix)
if apiGroupInfo.OptionsExternalVersion != nil {
apiGroupVersion.OptionsExternalVersion = apiGroupInfo.OptionsExternalVersion
}
if err := apiGroupVersion.InstallREST(s.Handler.GoRestfulContainer); err != nil {
return fmt.Errorf("Unable to setup API %v: %v", apiGroupInfo, err)
}
}
return nil
}
func (s *GenericAPIServer) InstallLegacyAPIGroup(apiPrefix string, apiGroupInfo *APIGroupInfo) error {
if !s.legacyAPIGroupPrefixes.Has(apiPrefix) {
return fmt.Errorf("%q is not in the allowed legacy API prefixes: %v", apiPrefix, s.legacyAPIGroupPrefixes.List())
}
if err := s.installAPIResources(apiPrefix, apiGroupInfo); err != nil {
return err
}
// setup discovery
apiVersions := []string{}
for _, groupVersion := range apiGroupInfo.GroupMeta.GroupVersions {
apiVersions = append(apiVersions, groupVersion.Version)
}
// Install the version handler.
// Add a handler at /<apiPrefix> to enumerate the supported api versions.
s.Handler.GoRestfulContainer.Add(discovery.NewLegacyRootAPIHandler(s.discoveryAddresses, s.Serializer, apiPrefix, apiVersions, s.requestContextMapper).WebService())
return nil
}
// Exposes the given api group in the API.
func (s *GenericAPIServer) InstallAPIGroup(apiGroupInfo *APIGroupInfo) error {
// Do not register empty group or empty version. Doing so claims /apis/ for the wrong entity to be returned.
// Catching these here places the error much closer to its origin
if len(apiGroupInfo.GroupMeta.GroupVersion.Group) == 0 {
return fmt.Errorf("cannot register handler with an empty group for %#v", *apiGroupInfo)
}
if len(apiGroupInfo.GroupMeta.GroupVersion.Version) == 0 {
return fmt.Errorf("cannot register handler with an empty version for %#v", *apiGroupInfo)
}
if err := s.installAPIResources(APIGroupPrefix, apiGroupInfo); err != nil {
return err
}
// setup discovery
// Install the version handler.
// Add a handler at /apis/<groupName> to enumerate all versions supported by this group.
apiVersionsForDiscovery := []metav1.GroupVersionForDiscovery{}
for _, groupVersion := range apiGroupInfo.GroupMeta.GroupVersions {
// Check the config to make sure that we elide versions that don't have any resources
if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 {
continue
}
apiVersionsForDiscovery = append(apiVersionsForDiscovery, metav1.GroupVersionForDiscovery{
GroupVersion: groupVersion.String(),
Version: groupVersion.Version,
})
}
preferredVersionForDiscovery := metav1.GroupVersionForDiscovery{
GroupVersion: apiGroupInfo.GroupMeta.GroupVersion.String(),
Version: apiGroupInfo.GroupMeta.GroupVersion.Version,
}
apiGroup := metav1.APIGroup{
Name: apiGroupInfo.GroupMeta.GroupVersion.Group,
Versions: apiVersionsForDiscovery,
PreferredVersion: preferredVersionForDiscovery,
}
s.DiscoveryGroupManager.AddGroup(apiGroup)
s.Handler.GoRestfulContainer.Add(discovery.NewAPIGroupHandler(s.Serializer, apiGroup, s.requestContextMapper).WebService())
return nil
}
func (s *GenericAPIServer) getAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion, apiPrefix string) *genericapi.APIGroupVersion {
storage := make(map[string]rest.Storage)
for k, v := range apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version] {
storage[strings.ToLower(k)] = v
}
version := s.newAPIGroupVersion(apiGroupInfo, groupVersion)
version.Root = apiPrefix
version.Storage = storage
return version
}
func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion) *genericapi.APIGroupVersion {
return &genericapi.APIGroupVersion{
GroupVersion: groupVersion,
MetaGroupVersion: apiGroupInfo.MetaGroupVersion,
ParameterCodec: apiGroupInfo.ParameterCodec,
Serializer: apiGroupInfo.NegotiatedSerializer,
Creater: apiGroupInfo.Scheme,
Convertor: apiGroupInfo.Scheme,
UnsafeConvertor: runtime.UnsafeObjectConvertor(apiGroupInfo.Scheme),
Defaulter: apiGroupInfo.Scheme,
Typer: apiGroupInfo.Scheme,
SubresourceGroupVersionKind: apiGroupInfo.SubresourceGroupVersionKind,
Linker: apiGroupInfo.GroupMeta.SelfLinker,
Mapper: apiGroupInfo.GroupMeta.RESTMapper,
Admit: s.admissionControl,
Context: s.RequestContextMapper(),
MinRequestTimeout: s.minRequestTimeout,
EnableAPIResponseCompression: s.enableAPIResponseCompression,
}
}
// NewDefaultAPIGroupInfo returns an APIGroupInfo stubbed with "normal" values
// exposed for easier composition from other packages
func NewDefaultAPIGroupInfo(group string, registry *registered.APIRegistrationManager, scheme *runtime.Scheme, parameterCodec runtime.ParameterCodec, codecs serializer.CodecFactory) APIGroupInfo {
groupMeta := registry.GroupOrDie(group)
return APIGroupInfo{
GroupMeta: *groupMeta,
VersionedResourcesStorageMap: map[string]map[string]rest.Storage{},
// TODO unhardcode this. It was hardcoded before, but we need to re-evaluate
OptionsExternalVersion: &schema.GroupVersion{Version: "v1"},
Scheme: scheme,
ParameterCodec: parameterCodec,
NegotiatedSerializer: codecs,
}
}
audit backend run before http server start and register presShutdown hook
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"fmt"
"net/http"
"strings"
"sync"
"time"
systemd "github.com/coreos/go-systemd/daemon"
"github.com/emicklei/go-restful-swagger12"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/apimachinery"
"k8s.io/apimachinery/pkg/apimachinery/registered"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/audit"
genericapi "k8s.io/apiserver/pkg/endpoints"
"k8s.io/apiserver/pkg/endpoints/discovery"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/routes"
restclient "k8s.io/client-go/rest"
openapicommon "k8s.io/kube-openapi/pkg/common"
)
// Info about an API group.
type APIGroupInfo struct {
GroupMeta apimachinery.GroupMeta
// Info about the resources in this group. Its a map from version to resource to the storage.
VersionedResourcesStorageMap map[string]map[string]rest.Storage
// OptionsExternalVersion controls the APIVersion used for common objects in the
// schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may
// define a version "v1beta1" but want to use the Kubernetes "v1" internal objects.
// If nil, defaults to groupMeta.GroupVersion.
// TODO: Remove this when https://github.com/kubernetes/kubernetes/issues/19018 is fixed.
OptionsExternalVersion *schema.GroupVersion
// MetaGroupVersion defaults to "meta.k8s.io/v1" and is the scheme group version used to decode
// common API implementations like ListOptions. Future changes will allow this to vary by group
// version (for when the inevitable meta/v2 group emerges).
MetaGroupVersion *schema.GroupVersion
// Scheme includes all of the types used by this group and how to convert between them (or
// to convert objects from outside of this group that are accepted in this API).
// TODO: replace with interfaces
Scheme *runtime.Scheme
// NegotiatedSerializer controls how this group encodes and decodes data
NegotiatedSerializer runtime.NegotiatedSerializer
// ParameterCodec performs conversions for query parameters passed to API calls
ParameterCodec runtime.ParameterCodec
// SubresourceGroupVersionKind contains the GroupVersionKind overrides for each subresource that is
// accessible from this API group version. The GroupVersionKind is that of the external version of
// the subresource. The key of this map should be the path of the subresource. The keys here should
// match the keys in the Storage map above for subresources.
SubresourceGroupVersionKind map[string]schema.GroupVersionKind
}
// GenericAPIServer contains state for a Kubernetes cluster api server.
type GenericAPIServer struct {
// discoveryAddresses is used to build cluster IPs for discovery.
discoveryAddresses discovery.Addresses
// LoopbackClientConfig is a config for a privileged loopback connection to the API server
LoopbackClientConfig *restclient.Config
// minRequestTimeout is how short the request timeout can be. This is used to build the RESTHandler
minRequestTimeout time.Duration
// legacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests
// to InstallLegacyAPIGroup
legacyAPIGroupPrefixes sets.String
// admissionControl is used to build the RESTStorage that backs an API Group.
admissionControl admission.Interface
// requestContextMapper provides a way to get the context for a request. It may be nil.
requestContextMapper apirequest.RequestContextMapper
SecureServingInfo *SecureServingInfo
// numerical ports, set after listening
effectiveSecurePort int
// ExternalAddress is the address (hostname or IP and port) that should be used in
// external (public internet) URLs for this GenericAPIServer.
ExternalAddress string
// Serializer controls how common API objects not in a group/version prefix are serialized for this server.
// Individual APIGroups may define their own serializers.
Serializer runtime.NegotiatedSerializer
// "Outputs"
// Handler holds the handlers being used by this API server
Handler *APIServerHandler
// listedPathProvider is a lister which provides the set of paths to show at /
listedPathProvider routes.ListedPathProvider
// DiscoveryGroupManager serves /apis
DiscoveryGroupManager discovery.GroupManager
// Enable swagger and/or OpenAPI if these configs are non-nil.
swaggerConfig *swagger.Config
openAPIConfig *openapicommon.Config
// PostStartHooks are each called after the server has started listening, in a separate go func for each
// with no guarantee of ordering between them. The map key is a name used for error reporting.
// It may kill the process with a panic if it wishes to by returning an error.
postStartHookLock sync.Mutex
postStartHooks map[string]postStartHookEntry
postStartHooksCalled bool
disabledPostStartHooks sets.String
preShutdownHookLock sync.Mutex
preShutdownHooks map[string]preShutdownHookEntry
preShutdownHooksCalled bool
// healthz checks
healthzLock sync.Mutex
healthzChecks []healthz.HealthzChecker
healthzCreated bool
// auditing. The backend is started after the server starts listening.
AuditBackend audit.Backend
// enableAPIResponseCompression indicates whether API Responses should support compression
// if the client requests it via Accept-Encoding
enableAPIResponseCompression bool
// delegationTarget is the next delegate in the chain or nil
delegationTarget DelegationTarget
}
// DelegationTarget is an interface which allows for composition of API servers with top level handling that works
// as expected.
type DelegationTarget interface {
// UnprotectedHandler returns a handler that is NOT protected by a normal chain
UnprotectedHandler() http.Handler
// RequestContextMapper returns the existing RequestContextMapper. Because we cannot rewire all existing
// uses of this function, this will be used in any delegating API server
RequestContextMapper() apirequest.RequestContextMapper
// PostStartHooks returns the post-start hooks that need to be combined
PostStartHooks() map[string]postStartHookEntry
// PreShutdownHooks returns the pre-stop hooks that need to be combined
PreShutdownHooks() map[string]preShutdownHookEntry
// HealthzChecks returns the healthz checks that need to be combined
HealthzChecks() []healthz.HealthzChecker
// ListedPaths returns the paths for supporting an index
ListedPaths() []string
// NextDelegate returns the next delegationTarget in the chain of delegations
NextDelegate() DelegationTarget
}
func (s *GenericAPIServer) UnprotectedHandler() http.Handler {
// when we delegate, we need the server we're delegating to choose whether or not to use gorestful
return s.Handler.Director
}
func (s *GenericAPIServer) PostStartHooks() map[string]postStartHookEntry {
return s.postStartHooks
}
func (s *GenericAPIServer) PreShutdownHooks() map[string]preShutdownHookEntry {
return s.preShutdownHooks
}
func (s *GenericAPIServer) HealthzChecks() []healthz.HealthzChecker {
return s.healthzChecks
}
func (s *GenericAPIServer) ListedPaths() []string {
return s.listedPathProvider.ListedPaths()
}
func (s *GenericAPIServer) NextDelegate() DelegationTarget {
return s.delegationTarget
}
var EmptyDelegate = emptyDelegate{
requestContextMapper: apirequest.NewRequestContextMapper(),
}
type emptyDelegate struct {
requestContextMapper apirequest.RequestContextMapper
}
func (s emptyDelegate) UnprotectedHandler() http.Handler {
return nil
}
func (s emptyDelegate) PostStartHooks() map[string]postStartHookEntry {
return map[string]postStartHookEntry{}
}
func (s emptyDelegate) PreShutdownHooks() map[string]preShutdownHookEntry {
return map[string]preShutdownHookEntry{}
}
func (s emptyDelegate) HealthzChecks() []healthz.HealthzChecker {
return []healthz.HealthzChecker{}
}
func (s emptyDelegate) ListedPaths() []string {
return []string{}
}
func (s emptyDelegate) RequestContextMapper() apirequest.RequestContextMapper {
return s.requestContextMapper
}
func (s emptyDelegate) NextDelegate() DelegationTarget {
return nil
}
// RequestContextMapper is exposed so that third party resource storage can be build in a different location.
// TODO refactor third party resource storage
func (s *GenericAPIServer) RequestContextMapper() apirequest.RequestContextMapper {
return s.requestContextMapper
}
// MinRequestTimeout is exposed so that third party resource storage can be build in a different location.
// TODO refactor third party resource storage
func (s *GenericAPIServer) MinRequestTimeout() time.Duration {
return s.minRequestTimeout
}
type preparedGenericAPIServer struct {
*GenericAPIServer
}
// PrepareRun does post API installation setup steps.
func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer {
if s.swaggerConfig != nil {
routes.Swagger{Config: s.swaggerConfig}.Install(s.Handler.GoRestfulContainer)
}
if s.openAPIConfig != nil {
routes.OpenAPI{
Config: s.openAPIConfig,
}.Install(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux)
}
s.installHealthz()
return preparedGenericAPIServer{s}
}
// Run spawns the secure http server. It only returns if stopCh is closed
// or the secure port cannot be listened on initially.
func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
// Register audit backend preShutdownHook.
if s.AuditBackend != nil {
s.AddPreShutdownHook("audit-backend", func() error {
s.AuditBackend.Shutdown()
return nil
})
}
err := s.NonBlockingRun(stopCh)
if err != nil {
return err
}
<-stopCh
return s.RunPreShutdownHooks()
}
// NonBlockingRun spawns the secure http server. An error is
// returned if the secure port cannot be listened on.
func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error {
// Start the audit backend before any request comes in. This means we must call Backend.Run
// before http server start serving. Otherwise the Backend.ProcessEvents call might block.
if s.AuditBackend != nil {
if err := s.AuditBackend.Run(stopCh); err != nil {
return fmt.Errorf("failed to run the audit backend: %v", err)
}
}
// Use an internal stop channel to allow cleanup of the listeners on error.
internalStopCh := make(chan struct{})
if s.SecureServingInfo != nil && s.Handler != nil {
if err := s.serveSecurely(internalStopCh); err != nil {
close(internalStopCh)
return err
}
}
// Now that listener have bound successfully, it is the
// responsibility of the caller to close the provided channel to
// ensure cleanup.
go func() {
<-stopCh
close(internalStopCh)
}()
s.RunPostStartHooks(stopCh)
if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil {
glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err)
}
return nil
}
// EffectiveSecurePort returns the secure port we bound to.
func (s *GenericAPIServer) EffectiveSecurePort() int {
return s.effectiveSecurePort
}
// installAPIResources is a private method for installing the REST storage backing each api groupversionresource
func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo) error {
for _, groupVersion := range apiGroupInfo.GroupMeta.GroupVersions {
if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 {
glog.Warningf("Skipping API %v because it has no resources.", groupVersion)
continue
}
apiGroupVersion := s.getAPIGroupVersion(apiGroupInfo, groupVersion, apiPrefix)
if apiGroupInfo.OptionsExternalVersion != nil {
apiGroupVersion.OptionsExternalVersion = apiGroupInfo.OptionsExternalVersion
}
if err := apiGroupVersion.InstallREST(s.Handler.GoRestfulContainer); err != nil {
return fmt.Errorf("Unable to setup API %v: %v", apiGroupInfo, err)
}
}
return nil
}
func (s *GenericAPIServer) InstallLegacyAPIGroup(apiPrefix string, apiGroupInfo *APIGroupInfo) error {
if !s.legacyAPIGroupPrefixes.Has(apiPrefix) {
return fmt.Errorf("%q is not in the allowed legacy API prefixes: %v", apiPrefix, s.legacyAPIGroupPrefixes.List())
}
if err := s.installAPIResources(apiPrefix, apiGroupInfo); err != nil {
return err
}
// setup discovery
apiVersions := []string{}
for _, groupVersion := range apiGroupInfo.GroupMeta.GroupVersions {
apiVersions = append(apiVersions, groupVersion.Version)
}
// Install the version handler.
// Add a handler at /<apiPrefix> to enumerate the supported api versions.
s.Handler.GoRestfulContainer.Add(discovery.NewLegacyRootAPIHandler(s.discoveryAddresses, s.Serializer, apiPrefix, apiVersions, s.requestContextMapper).WebService())
return nil
}
// Exposes the given api group in the API.
func (s *GenericAPIServer) InstallAPIGroup(apiGroupInfo *APIGroupInfo) error {
// Do not register empty group or empty version. Doing so claims /apis/ for the wrong entity to be returned.
// Catching these here places the error much closer to its origin
if len(apiGroupInfo.GroupMeta.GroupVersion.Group) == 0 {
return fmt.Errorf("cannot register handler with an empty group for %#v", *apiGroupInfo)
}
if len(apiGroupInfo.GroupMeta.GroupVersion.Version) == 0 {
return fmt.Errorf("cannot register handler with an empty version for %#v", *apiGroupInfo)
}
if err := s.installAPIResources(APIGroupPrefix, apiGroupInfo); err != nil {
return err
}
// setup discovery
// Install the version handler.
// Add a handler at /apis/<groupName> to enumerate all versions supported by this group.
apiVersionsForDiscovery := []metav1.GroupVersionForDiscovery{}
for _, groupVersion := range apiGroupInfo.GroupMeta.GroupVersions {
// Check the config to make sure that we elide versions that don't have any resources
if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 {
continue
}
apiVersionsForDiscovery = append(apiVersionsForDiscovery, metav1.GroupVersionForDiscovery{
GroupVersion: groupVersion.String(),
Version: groupVersion.Version,
})
}
preferredVersionForDiscovery := metav1.GroupVersionForDiscovery{
GroupVersion: apiGroupInfo.GroupMeta.GroupVersion.String(),
Version: apiGroupInfo.GroupMeta.GroupVersion.Version,
}
apiGroup := metav1.APIGroup{
Name: apiGroupInfo.GroupMeta.GroupVersion.Group,
Versions: apiVersionsForDiscovery,
PreferredVersion: preferredVersionForDiscovery,
}
s.DiscoveryGroupManager.AddGroup(apiGroup)
s.Handler.GoRestfulContainer.Add(discovery.NewAPIGroupHandler(s.Serializer, apiGroup, s.requestContextMapper).WebService())
return nil
}
func (s *GenericAPIServer) getAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion, apiPrefix string) *genericapi.APIGroupVersion {
storage := make(map[string]rest.Storage)
for k, v := range apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version] {
storage[strings.ToLower(k)] = v
}
version := s.newAPIGroupVersion(apiGroupInfo, groupVersion)
version.Root = apiPrefix
version.Storage = storage
return version
}
func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion) *genericapi.APIGroupVersion {
return &genericapi.APIGroupVersion{
GroupVersion: groupVersion,
MetaGroupVersion: apiGroupInfo.MetaGroupVersion,
ParameterCodec: apiGroupInfo.ParameterCodec,
Serializer: apiGroupInfo.NegotiatedSerializer,
Creater: apiGroupInfo.Scheme,
Convertor: apiGroupInfo.Scheme,
UnsafeConvertor: runtime.UnsafeObjectConvertor(apiGroupInfo.Scheme),
Defaulter: apiGroupInfo.Scheme,
Typer: apiGroupInfo.Scheme,
SubresourceGroupVersionKind: apiGroupInfo.SubresourceGroupVersionKind,
Linker: apiGroupInfo.GroupMeta.SelfLinker,
Mapper: apiGroupInfo.GroupMeta.RESTMapper,
Admit: s.admissionControl,
Context: s.RequestContextMapper(),
MinRequestTimeout: s.minRequestTimeout,
EnableAPIResponseCompression: s.enableAPIResponseCompression,
}
}
// NewDefaultAPIGroupInfo returns an APIGroupInfo stubbed with "normal" values
// exposed for easier composition from other packages
func NewDefaultAPIGroupInfo(group string, registry *registered.APIRegistrationManager, scheme *runtime.Scheme, parameterCodec runtime.ParameterCodec, codecs serializer.CodecFactory) APIGroupInfo {
groupMeta := registry.GroupOrDie(group)
return APIGroupInfo{
GroupMeta: *groupMeta,
VersionedResourcesStorageMap: map[string]map[string]rest.Storage{},
// TODO unhardcode this. It was hardcoded before, but we need to re-evaluate
OptionsExternalVersion: &schema.GroupVersion{Version: "v1"},
Scheme: scheme,
ParameterCodec: parameterCodec,
NegotiatedSerializer: codecs,
}
}
|
package sofa
import (
"crypto/hmac"
"crypto/sha1"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"strings"
// Authentication with certificates can break if this is not included even though no methods
// are called directly.
// TODO: See if this can be controlled with tags.
_ "crypto/sha512"
)
// Authenticator is an interface for anything which can supply authentication to a CouchDB server.
// The Authenticator is given access to every request made & also allowed to perform an initial setup
// on the connection.
type Authenticator interface {
// Authenticate adds authentication to an existing http.Request.
Authenticate(req *http.Request)
// Client returns a client with the correct authentication setup to contact the CouchDB server.
Client() (*http.Client, error)
// Setup uses the provided connection to setup any authentication information which requires accessing
// the CouchDB server.
Setup(*Connection) error
}
type nullAuthenticator struct{}
func (t *nullAuthenticator) Authenticate(req *http.Request) {}
func (t *nullAuthenticator) Client() (*http.Client, error) {
return &http.Client{}, nil
}
func (t *nullAuthenticator) Setup(con *Connection) error {
return nil
}
// NullAuthenticator is an Authenticator which does no work - it implements the interface but
// does not supply any authentication information to the CouchDB server.
func NullAuthenticator() Authenticator {
return &nullAuthenticator{}
}
type basicAuthenticator struct {
Username string
Password string
}
func (a *basicAuthenticator) Authenticate(req *http.Request) {
// Basic auth headers must be set for every individual request
req.SetBasicAuth(a.Username, a.Password)
}
func (a *basicAuthenticator) Client() (*http.Client, error) {
return &http.Client{}, nil
}
func (a *basicAuthenticator) Setup(con *Connection) error {
return nil
}
// BasicAuthenticator returns an implementation of the Authenticator interface which does HTTP basic
// authentication. If you are not using SSL then this will result in credentials being sent in plain
// text.
func BasicAuthenticator(user, pass string) Authenticator {
return &basicAuthenticator{
Username: user,
Password: pass,
}
}
type clientCertAuthenticator struct {
CertPath string
KeyPath string
CaPath string
}
// ClientCertAuthenticator provides an Authenticator which uses a client SSL certificate
// to authenticate to the couchdb server
func ClientCertAuthenticator(certPath, keyPath, caPath string) (Authenticator, error) {
return &clientCertAuthenticator{
CertPath: certPath,
KeyPath: keyPath,
CaPath: caPath,
}, nil
}
func (c *clientCertAuthenticator) Authenticate(req *http.Request) {}
func (c *clientCertAuthenticator) Client() (*http.Client, error) {
cert, err := tls.LoadX509KeyPair(c.CertPath, c.KeyPath)
if err != nil {
return nil, err
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
}
if c.CaPath != "" {
caCert, err := ioutil.ReadFile(c.CaPath)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.RootCAs = caCertPool
}
tlsConfig.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: tlsConfig}
return &http.Client{
Transport: transport,
}, nil
}
func (c *clientCertAuthenticator) Setup(con *Connection) error {
return nil
}
type cookieAuthenticator struct{}
// CookieAuthenticator returns an implementation of the Authenticator interface which supports
// authentication
func CookieAuthenticator() Authenticator {
return &cookieAuthenticator{}
}
func (a *cookieAuthenticator) Authenticate(req *http.Request) {}
func (a *cookieAuthenticator) Client() (*http.Client, error) {
jar, err := cookiejar.New(nil)
if err != nil {
return nil, err
}
return &http.Client{Jar: jar}, nil
}
func (a *cookieAuthenticator) Setup(con *Connection) error {
return nil
}
type proxyAuthenticator struct {
Username string
Roles string
Token string
}
// ProxyAuthenticator returns an implementation of the Authenticator interface which supports
// the proxy authentication method described in the CouchDB documentation. This should not be
// used against a production server as the proxy would be expected to set the headers in that
// case.
func ProxyAuthenticator(username string, roles []string, secret string) Authenticator {
var token = ""
if secret != "" {
mac := hmac.New(sha1.New, []byte(secret))
io.WriteString(mac, username)
token = fmt.Sprintf("%x", mac.Sum(nil))
}
return &proxyAuthenticator{
Username: username,
Roles: strings.Join(roles, ","),
Token: token,
}
}
func (a *proxyAuthenticator) Authenticate(req *http.Request) {
req.Header.Set("X-Auth-CouchDB-UserName", a.Username)
req.Header.Set("X-Auth-CouchDB-Roles", a.Roles)
if a.Token != "" {
req.Header.Set("X-Auth-CouchDB-Token", a.Token)
}
}
func (a *proxyAuthenticator) Client() (*http.Client, error) {
return &http.Client{}, nil
}
func (a *proxyAuthenticator) Setup(con *Connection) error {
return nil
}
Start adding a way to use password-protected certs
package sofa
import (
"crypto/hmac"
"crypto/sha1"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"strings"
// Authentication with certificates can break if this is not included even though no methods
// are called directly.
// TODO: See if this can be controlled with tags.
_ "crypto/sha512"
)
// Authenticator is an interface for anything which can supply authentication to a CouchDB server.
// The Authenticator is given access to every request made & also allowed to perform an initial setup
// on the connection.
type Authenticator interface {
// Authenticate adds authentication to an existing http.Request.
Authenticate(req *http.Request)
// Client returns a client with the correct authentication setup to contact the CouchDB server.
Client() (*http.Client, error)
// Setup uses the provided connection to setup any authentication information which requires accessing
// the CouchDB server.
Setup(*Connection) error
}
type nullAuthenticator struct{}
func (t *nullAuthenticator) Authenticate(req *http.Request) {}
func (t *nullAuthenticator) Client() (*http.Client, error) {
return &http.Client{}, nil
}
func (t *nullAuthenticator) Setup(con *Connection) error {
return nil
}
// NullAuthenticator is an Authenticator which does no work - it implements the interface but
// does not supply any authentication information to the CouchDB server.
func NullAuthenticator() Authenticator {
return &nullAuthenticator{}
}
type basicAuthenticator struct {
Username string
Password string
}
func (a *basicAuthenticator) Authenticate(req *http.Request) {
// Basic auth headers must be set for every individual request
req.SetBasicAuth(a.Username, a.Password)
}
func (a *basicAuthenticator) Client() (*http.Client, error) {
return &http.Client{}, nil
}
func (a *basicAuthenticator) Setup(con *Connection) error {
return nil
}
// BasicAuthenticator returns an implementation of the Authenticator interface which does HTTP basic
// authentication. If you are not using SSL then this will result in credentials being sent in plain
// text.
func BasicAuthenticator(user, pass string) Authenticator {
return &basicAuthenticator{
Username: user,
Password: pass,
}
}
type clientCertAuthenticator struct {
CertPath string
KeyPath string
CaPath string
Password string
}
// ClientCertAuthenticator provides an Authenticator which uses a client SSL certificate
// to authenticate to the couchdb server
func ClientCertAuthenticator(certPath, keyPath, caPath string) (Authenticator, error) {
return &clientCertAuthenticator{
CertPath: certPath,
KeyPath: keyPath,
CaPath: caPath,
}, nil
}
// ClientCertAuthenticatorPassword provides an Authenticator which uses a client SSL certificate
// to authenticate to the couchdb server. This version allows the user to specify the password
// `the key is encrypted with.
func ClientCertAuthenticatorPassword(certPath, keyPath, caPath, password string) (Authenticator, error) {
return &clientCertAuthenticator{
CertPath: certPath,
KeyPath: keyPath,
CaPath: caPath,
Password: password,
}, nil
}
func (c *clientCertAuthenticator) Authenticate(req *http.Request) {}
func (c *clientCertAuthenticator) Client() (*http.Client, error) {
var cert tls.Certificate
var err error
if c.Password == "" {
cert, err = tls.LoadX509KeyPair(c.CertPath, c.KeyPath)
} else {
keyBytes, err := ioutil.ReadFile(c.KeyPath)
if err != nil {
return nil, err
}
pemBlock, _ := pem.Decode(keyBytes)
if pemBlock == nil {
return nil, errors.New("expecting a PEM block in encrypted private key file")
}
decBytes, err := x509.DecryptPEMBlock(pemBlock, []byte(c.Password))
if err != nil {
return nil, err
}
certBytes, err := ioutil.ReadFile(c.CertPath)
if err != nil {
return nil, err
}
cert, err = tls.X509KeyPair(certBytes, decBytes)
}
if err != nil {
return nil, err
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
}
if c.CaPath != "" {
caCert, err := ioutil.ReadFile(c.CaPath)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.RootCAs = caCertPool
}
tlsConfig.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: tlsConfig}
return &http.Client{
Transport: transport,
}, nil
}
func (c *clientCertAuthenticator) Setup(con *Connection) error {
return nil
}
type cookieAuthenticator struct{}
// CookieAuthenticator returns an implementation of the Authenticator interface which supports
// authentication
func CookieAuthenticator() Authenticator {
return &cookieAuthenticator{}
}
func (a *cookieAuthenticator) Authenticate(req *http.Request) {}
func (a *cookieAuthenticator) Client() (*http.Client, error) {
jar, err := cookiejar.New(nil)
if err != nil {
return nil, err
}
return &http.Client{Jar: jar}, nil
}
func (a *cookieAuthenticator) Setup(con *Connection) error {
return nil
}
type proxyAuthenticator struct {
Username string
Roles string
Token string
}
// ProxyAuthenticator returns an implementation of the Authenticator interface which supports
// the proxy authentication method described in the CouchDB documentation. This should not be
// used against a production server as the proxy would be expected to set the headers in that
// case.
func ProxyAuthenticator(username string, roles []string, secret string) Authenticator {
var token = ""
if secret != "" {
mac := hmac.New(sha1.New, []byte(secret))
io.WriteString(mac, username)
token = fmt.Sprintf("%x", mac.Sum(nil))
}
return &proxyAuthenticator{
Username: username,
Roles: strings.Join(roles, ","),
Token: token,
}
}
func (a *proxyAuthenticator) Authenticate(req *http.Request) {
req.Header.Set("X-Auth-CouchDB-UserName", a.Username)
req.Header.Set("X-Auth-CouchDB-Roles", a.Roles)
if a.Token != "" {
req.Header.Set("X-Auth-CouchDB-Token", a.Token)
}
}
func (a *proxyAuthenticator) Client() (*http.Client, error) {
return &http.Client{}, nil
}
func (a *proxyAuthenticator) Setup(con *Connection) error {
return nil
}
|
package main
import (
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"log"
"net/url"
"os"
"strings"
"sync"
"golang.org/x/net/context"
"github.com/codegangsta/cli"
pb "github.com/creiht/formic/proto"
mb "github.com/letterj/oohhc/proto/filesystem"
"bazil.org/fuse"
"github.com/satori/go.uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type server struct {
fs *fs
wg sync.WaitGroup
}
func newserver(fs *fs) *server {
s := &server{
fs: fs,
}
return s
}
func (s *server) serve() error {
defer s.wg.Wait()
for {
req, err := s.fs.conn.ReadRequest()
if err != nil {
if err == io.EOF {
break
}
return err
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.fs.handle(req)
}()
}
return nil
}
func debuglog(msg interface{}) {
fmt.Fprintf(os.Stderr, "%v\n", msg)
}
type rpc struct {
conn *grpc.ClientConn
api pb.ApiClient
}
func newrpc(conn *grpc.ClientConn) *rpc {
r := &rpc{
conn: conn,
api: pb.NewApiClient(conn),
}
return r
}
// NullWriter ...
type NullWriter int
func (NullWriter) Write([]byte) (int, error) { return 0, nil }
func main() {
// Process command line arguments
var token string
var acctNum string
var fsNum string
var serverAddr string
app := cli.NewApp()
app.Name = "cfs"
app.Usage = "Client used to test filesysd"
app.Version = "0.5.0"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "token, T",
Value: "",
Usage: "Access token",
EnvVar: "OOHHC_TOKEN_KEY",
Destination: &token,
},
}
app.Commands = []cli.Command{
{
Name: "show",
Usage: "Show a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid>",
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for show.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.ShowFS(context.Background(), &mb.ShowFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("SHOW Results: %s", result.Payload)
},
},
{
Name: "create",
Usage: "Create a File Systems",
ArgsUsage: "<region>://<account uuid> -N <file system name>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "name, N",
Value: "",
Usage: "Name of the file system",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for show.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
}
// For create serverAddr and acctnum are required
serverAddr, acctNum, _ = parseurl(c.Args().Get(0))
if c.String("name") == "" {
fmt.Println("File system name is a required field.")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.CreateFS(context.Background(), &mb.CreateFSRequest{Acctnum: acctNum, FSName: c.String("name"), Token: token})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("Create Results: %s", result.Payload)
},
},
{
Name: "list",
Usage: "List File Systems for an account",
ArgsUsage: "<region>://<account uuid>",
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for list.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
serverAddr, acctNum, _ = parseurl(c.Args().Get(0))
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.ListFS(context.Background(), &mb.ListFSRequest{Acctnum: acctNum, Token: token})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("LIST Results: %s", result.Payload)
},
},
{
Name: "delete",
Usage: "Delete a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid>",
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for delete.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.DeleteFS(context.Background(), &mb.DeleteFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("Delete Results: %s", result.Payload)
},
},
{
Name: "update",
Usage: "Update a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid> -o [OPTIONS]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "name, N",
Value: "",
Usage: "Name of the file system",
},
cli.StringFlag{
Name: "S, status",
Value: "",
Usage: "Status of the file system",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for update.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
if c.String("name") != "" {
fmt.Printf("Invalid File System String: %q\n", c.String("name"))
os.Exit(1)
}
fsMod := &mb.ModFS{
Name: c.String("name"),
Status: c.String("status"),
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.UpdateFS(context.Background(), &mb.UpdateFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token, Filesys: fsMod})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("Update Results: %s", result.Payload)
},
},
{
Name: "grant",
Usage: "Grant an Addr access to a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid> -addr <IP Address>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "addr",
Value: "",
Usage: "Address to Grant",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for delete.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
if c.String("addr") == "" {
fmt.Println("addr is required")
os.Exit(1)
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.GrantAddrFS(context.Background(), &mb.GrantAddrFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token, Addr: c.String("addr")})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
},
},
{
Name: "revoke",
Usage: "Revoke an Addr's access to a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid> -addr <IP Address>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "addr",
Value: "",
Usage: "Address to Revoke",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for revoke.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
if c.String("addr") == "" {
fmt.Println("addr is required")
os.Exit(1)
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.RevokeAddrFS(context.Background(), &mb.RevokeAddrFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token, Addr: c.String("addr")})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
},
},
{
Name: "verify",
Usage: "Verify an Addr has access to a file system",
ArgsUsage: "<region>://<account uuid>/<file system uuid> -addr <IP Address>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "addr",
Value: "",
Usage: "Address to check",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for revoke.")
os.Exit(1)
}
if c.String("addr") == "" {
fmt.Println("addr is required")
os.Exit(1)
}
serverAddr, fsNum, _ = parseurl(c.Args().Get(0))
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.LookupAddrFS(context.Background(), &mb.LookupAddrFSRequest{FSid: fsNum, Addr: c.String("addr")})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
},
},
{
Name: "mount",
Usage: "mount a file system",
ArgsUsage: "<region>://<file system uuid> <[mount point> -o [OPTIONS]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "o",
Value: "",
Usage: "mount options",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for revoke.")
os.Exit(1)
}
serverAddr, fsNum, _ = parseurl(c.Args().Get(0))
fsnum, err := uuid.FromString(fsNum)
if err != nil {
fmt.Print("File System id is not valid: ", err)
}
mountpoint := c.Args().Get(1)
// check mountpoint exists
if _, ferr := os.Stat(mountpoint); os.IsNotExist(ferr) {
log.Printf("Mount point %s does not exist\n\n", mountpoint)
os.Exit(1)
}
fusermountPath()
// process file system options
if c.String("o") != "" {
clargs := getArgs(c.String("o"))
// crapy debug log handling :)
if debug, ok := clargs["debug"]; ok {
if debug == "false" {
log.SetFlags(0)
log.SetOutput(ioutil.Discard)
}
} else {
log.SetFlags(0)
log.SetOutput(ioutil.Discard)
}
}
// Setup grpc
var opts []grpc.DialOption
creds := credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(serverAddr, opts...)
if err != nil {
log.Fatalf("failed to dial: %v", err)
}
defer conn.Close()
// Work with fuse
cfs, err := fuse.Mount(
mountpoint,
fuse.FSName("cfs"),
fuse.Subtype("cfs"),
fuse.LocalVolume(),
fuse.VolumeName("CFS"),
fuse.AllowOther(),
fuse.DefaultPermissions(),
)
if err != nil {
log.Fatal(err)
}
defer cfs.Close()
rpc := newrpc(conn)
fs := newfs(cfs, rpc, fsnum.String())
err = fs.InitFs()
if err != nil {
log.Fatal(err)
}
srv := newserver(fs)
if err := srv.serve(); err != nil {
log.Fatal(err)
}
<-cfs.Ready
if err := cfs.MountError; err != nil {
log.Fatal(err)
}
},
},
}
app.Run(os.Args)
}
// getArgs is passed a command line and breaks it up into commands
// the valid format is <device> <mount point> -o [Options]
func getArgs(args string) map[string]string {
// Setup declarations
var optList []string
requiredOptions := []string{}
clargs := make(map[string]string)
// process options -o
optList = strings.Split(args, ",")
for _, item := range optList {
if strings.Contains(item, "=") {
value := strings.Split(item, "=")
if value[0] == "" || value[1] == "" {
log.Printf("Invalid option %s, %s no value\n\n", value[0], value[1])
os.Exit(1)
} else {
clargs[value[0]] = value[1]
}
} else {
clargs[item] = ""
}
}
// Verify required options exist
for _, v := range requiredOptions {
_, ok := clargs[v]
if !ok {
log.Printf("%s is a required option", v)
os.Exit(1)
}
}
// load in device and mountPoint
return clargs
}
func fusermountPath() {
// Grab the current path
currentPath := os.Getenv("PATH")
if len(currentPath) == 0 {
// using mount seem to not have a path
// fusermount is in /bin
os.Setenv("PATH", "/bin")
}
}
// setupWS ...
func setupWS(svr string) *grpc.ClientConn {
var opts []grpc.DialOption
creds := credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(svr, opts...)
if err != nil {
log.Fatalf("failed to dial: %v", err)
}
return conn
}
// parseurl ...
func parseurl(urlstr string) (string, string, string) {
// a = string of arguments
var srv string
u, err := url.Parse(urlstr)
if err != nil {
fmt.Printf("Url parse error: %v\n", err)
os.Exit(1)
}
switch u.Scheme {
case "aio":
srv = "127.0.0.1:8448"
case "iad":
srv = "api.ea.iad.rackfs.com:8448"
default:
fmt.Printf("Invalid region %s\n", u.Scheme)
os.Exit(1)
}
if u.Host == "" {
fmt.Println("Invalid URL no account or file system id")
os.Exit(1)
}
fmt.Println(srv)
if u.Path != "" {
return srv, u.Host, u.Path[1:]
}
return srv, u.Host, u.Path
}
reset aio port to 8445
package main
import (
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"log"
"net/url"
"os"
"strings"
"sync"
"golang.org/x/net/context"
"github.com/codegangsta/cli"
pb "github.com/creiht/formic/proto"
mb "github.com/letterj/oohhc/proto/filesystem"
"bazil.org/fuse"
"github.com/satori/go.uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type server struct {
fs *fs
wg sync.WaitGroup
}
func newserver(fs *fs) *server {
s := &server{
fs: fs,
}
return s
}
func (s *server) serve() error {
defer s.wg.Wait()
for {
req, err := s.fs.conn.ReadRequest()
if err != nil {
if err == io.EOF {
break
}
return err
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.fs.handle(req)
}()
}
return nil
}
func debuglog(msg interface{}) {
fmt.Fprintf(os.Stderr, "%v\n", msg)
}
type rpc struct {
conn *grpc.ClientConn
api pb.ApiClient
}
func newrpc(conn *grpc.ClientConn) *rpc {
r := &rpc{
conn: conn,
api: pb.NewApiClient(conn),
}
return r
}
// NullWriter ...
type NullWriter int
func (NullWriter) Write([]byte) (int, error) { return 0, nil }
func main() {
// Process command line arguments
var token string
var acctNum string
var fsNum string
var serverAddr string
app := cli.NewApp()
app.Name = "cfs"
app.Usage = "Client used to test filesysd"
app.Version = "0.5.0"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "token, T",
Value: "",
Usage: "Access token",
EnvVar: "OOHHC_TOKEN_KEY",
Destination: &token,
},
}
app.Commands = []cli.Command{
{
Name: "show",
Usage: "Show a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid>",
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for show.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.ShowFS(context.Background(), &mb.ShowFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("SHOW Results: %s", result.Payload)
},
},
{
Name: "create",
Usage: "Create a File Systems",
ArgsUsage: "<region>://<account uuid> -N <file system name>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "name, N",
Value: "",
Usage: "Name of the file system",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for show.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
}
// For create serverAddr and acctnum are required
serverAddr, acctNum, _ = parseurl(c.Args().Get(0))
if c.String("name") == "" {
fmt.Println("File system name is a required field.")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.CreateFS(context.Background(), &mb.CreateFSRequest{Acctnum: acctNum, FSName: c.String("name"), Token: token})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("Create Results: %s", result.Payload)
},
},
{
Name: "list",
Usage: "List File Systems for an account",
ArgsUsage: "<region>://<account uuid>",
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for list.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
serverAddr, acctNum, _ = parseurl(c.Args().Get(0))
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.ListFS(context.Background(), &mb.ListFSRequest{Acctnum: acctNum, Token: token})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("LIST Results: %s", result.Payload)
},
},
{
Name: "delete",
Usage: "Delete a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid>",
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for delete.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.DeleteFS(context.Background(), &mb.DeleteFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("Delete Results: %s", result.Payload)
},
},
{
Name: "update",
Usage: "Update a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid> -o [OPTIONS]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "name, N",
Value: "",
Usage: "Name of the file system",
},
cli.StringFlag{
Name: "S, status",
Value: "",
Usage: "Status of the file system",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for update.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
if c.String("name") != "" {
fmt.Printf("Invalid File System String: %q\n", c.String("name"))
os.Exit(1)
}
fsMod := &mb.ModFS{
Name: c.String("name"),
Status: c.String("status"),
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.UpdateFS(context.Background(), &mb.UpdateFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token, Filesys: fsMod})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
log.Printf("Update Results: %s", result.Payload)
},
},
{
Name: "grant",
Usage: "Grant an Addr access to a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid> -addr <IP Address>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "addr",
Value: "",
Usage: "Address to Grant",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for delete.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
if c.String("addr") == "" {
fmt.Println("addr is required")
os.Exit(1)
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.GrantAddrFS(context.Background(), &mb.GrantAddrFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token, Addr: c.String("addr")})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
},
},
{
Name: "revoke",
Usage: "Revoke an Addr's access to a File Systems",
ArgsUsage: "<region>://<account uuid>/<file system uuid> -addr <IP Address>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "addr",
Value: "",
Usage: "Address to Revoke",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for revoke.")
os.Exit(1)
}
if token == "" {
fmt.Println("Token is required")
os.Exit(1)
}
if c.String("addr") == "" {
fmt.Println("addr is required")
os.Exit(1)
}
serverAddr, acctNum, fsNum = parseurl(c.Args().Get(0))
if fsNum == "" {
fmt.Println("Missing file system id")
os.Exit(1)
}
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.RevokeAddrFS(context.Background(), &mb.RevokeAddrFSRequest{Acctnum: acctNum, FSid: fsNum, Token: token, Addr: c.String("addr")})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
},
},
{
Name: "verify",
Usage: "Verify an Addr has access to a file system",
ArgsUsage: "<region>://<account uuid>/<file system uuid> -addr <IP Address>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "addr",
Value: "",
Usage: "Address to check",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for revoke.")
os.Exit(1)
}
if c.String("addr") == "" {
fmt.Println("addr is required")
os.Exit(1)
}
serverAddr, fsNum, _ = parseurl(c.Args().Get(0))
conn := setupWS(serverAddr)
ws := mb.NewFileSystemAPIClient(conn)
result, err := ws.LookupAddrFS(context.Background(), &mb.LookupAddrFSRequest{FSid: fsNum, Addr: c.String("addr")})
if err != nil {
log.Fatalf("Bad Request: %v", err)
conn.Close()
os.Exit(1)
}
conn.Close()
log.Printf("Result: %s\n", result.Status)
},
},
{
Name: "mount",
Usage: "mount a file system",
ArgsUsage: "<region>://<file system uuid> <[mount point> -o [OPTIONS]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "o",
Value: "",
Usage: "mount options",
},
},
Action: func(c *cli.Context) {
if !c.Args().Present() {
fmt.Println("Invalid syntax for revoke.")
os.Exit(1)
}
serverAddr, fsNum, _ = parseurl(c.Args().Get(0))
fsnum, err := uuid.FromString(fsNum)
if err != nil {
fmt.Print("File System id is not valid: ", err)
}
mountpoint := c.Args().Get(1)
// check mountpoint exists
if _, ferr := os.Stat(mountpoint); os.IsNotExist(ferr) {
log.Printf("Mount point %s does not exist\n\n", mountpoint)
os.Exit(1)
}
fusermountPath()
// process file system options
if c.String("o") != "" {
clargs := getArgs(c.String("o"))
// crapy debug log handling :)
if debug, ok := clargs["debug"]; ok {
if debug == "false" {
log.SetFlags(0)
log.SetOutput(ioutil.Discard)
}
} else {
log.SetFlags(0)
log.SetOutput(ioutil.Discard)
}
}
// Setup grpc
var opts []grpc.DialOption
creds := credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(serverAddr, opts...)
if err != nil {
log.Fatalf("failed to dial: %v", err)
}
defer conn.Close()
// Work with fuse
cfs, err := fuse.Mount(
mountpoint,
fuse.FSName("cfs"),
fuse.Subtype("cfs"),
fuse.LocalVolume(),
fuse.VolumeName("CFS"),
fuse.AllowOther(),
fuse.DefaultPermissions(),
)
if err != nil {
log.Fatal(err)
}
defer cfs.Close()
rpc := newrpc(conn)
fs := newfs(cfs, rpc, fsnum.String())
err = fs.InitFs()
if err != nil {
log.Fatal(err)
}
srv := newserver(fs)
if err := srv.serve(); err != nil {
log.Fatal(err)
}
<-cfs.Ready
if err := cfs.MountError; err != nil {
log.Fatal(err)
}
},
},
}
app.Run(os.Args)
}
// getArgs is passed a command line and breaks it up into commands
// the valid format is <device> <mount point> -o [Options]
func getArgs(args string) map[string]string {
// Setup declarations
var optList []string
requiredOptions := []string{}
clargs := make(map[string]string)
// process options -o
optList = strings.Split(args, ",")
for _, item := range optList {
if strings.Contains(item, "=") {
value := strings.Split(item, "=")
if value[0] == "" || value[1] == "" {
log.Printf("Invalid option %s, %s no value\n\n", value[0], value[1])
os.Exit(1)
} else {
clargs[value[0]] = value[1]
}
} else {
clargs[item] = ""
}
}
// Verify required options exist
for _, v := range requiredOptions {
_, ok := clargs[v]
if !ok {
log.Printf("%s is a required option", v)
os.Exit(1)
}
}
// load in device and mountPoint
return clargs
}
func fusermountPath() {
// Grab the current path
currentPath := os.Getenv("PATH")
if len(currentPath) == 0 {
// using mount seem to not have a path
// fusermount is in /bin
os.Setenv("PATH", "/bin")
}
}
// setupWS ...
func setupWS(svr string) *grpc.ClientConn {
var opts []grpc.DialOption
creds := credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = append(opts, grpc.WithTransportCredentials(creds))
conn, err := grpc.Dial(svr, opts...)
if err != nil {
log.Fatalf("failed to dial: %v", err)
}
return conn
}
// parseurl ...
func parseurl(urlstr string) (string, string, string) {
// a = string of arguments
var srv string
u, err := url.Parse(urlstr)
if err != nil {
fmt.Printf("Url parse error: %v\n", err)
os.Exit(1)
}
switch u.Scheme {
case "aio":
srv = "127.0.0.1:8445"
case "iad":
srv = "api.ea.iad.rackfs.com:8448"
default:
fmt.Printf("Invalid region %s\n", u.Scheme)
os.Exit(1)
}
if u.Host == "" {
fmt.Println("Invalid URL no account or file system id")
os.Exit(1)
}
fmt.Println(srv)
if u.Path != "" {
return srv, u.Host, u.Path[1:]
}
return srv, u.Host, u.Path
}
|
// Copyright 2015 Square Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
import (
"fmt"
"math"
"strconv"
"github.com/square/metrics/api"
"github.com/square/metrics/function"
)
var Timeshift = function.MetricFunction{
Name: "transform.timeshift",
MinArguments: 2,
MaxArguments: 2,
Compute: func(context function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) {
value, err := arguments[1].Evaluate(context)
if err != nil {
return nil, err
}
duration, err := value.ToDuration()
if err != nil {
return nil, err
}
newContext := context
newContext.Timerange = newContext.Timerange.Shift(duration)
result, err := arguments[0].Evaluate(newContext)
if err != nil {
return nil, err
}
if seriesValue, ok := result.(api.SeriesList); ok {
seriesValue.Timerange = context.Timerange
seriesValue.Query = fmt.Sprintf("transform.timeshift(%s,%s)", result.GetName(), value.GetName())
seriesValue.Name = seriesValue.Query
return seriesValue, nil
}
return result, nil
},
}
var MovingAverage = function.MetricFunction{
Name: "transform.moving_average",
MinArguments: 2,
MaxArguments: 2,
Compute: func(context function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) {
// Applying a similar trick as did TimeshiftFunction. It fetches data prior to the start of the timerange.
sizeValue, err := arguments[1].Evaluate(context)
if err != nil {
return nil, err
}
size, err := sizeValue.ToDuration()
if err != nil {
return nil, err
}
limit := int(float64(size)/float64(context.Timerange.Resolution()) + 0.5) // Limit is the number of items to include in the average
if limit < 1 {
// At least one value must be included at all times
limit = 1
}
newContext := context
timerange := context.Timerange
newContext.Timerange, err = api.NewSnappedTimerange(timerange.Start()-int64(limit-1)*timerange.ResolutionMillis(), timerange.End(), timerange.ResolutionMillis())
if err != nil {
return nil, err
}
// The new context has a timerange which is extended beyond the query's.
listValue, err := arguments[0].Evaluate(newContext)
if err != nil {
return nil, err
}
// This value must be a SeriesList.
list, err := listValue.ToSeriesList(newContext.Timerange)
if err != nil {
return nil, err
}
// The timerange must be reverted.
list.Timerange = context.Timerange
// Update each series in the list.
for index, series := range list.Series {
// The series will be given a (shorter) replaced list of values.
results := make([]float64, context.Timerange.Slots())
count := 0
sum := 0.0
for i := range series.Values {
// Add the new element, if it isn't NaN.
if !math.IsNaN(series.Values[i]) {
sum += series.Values[i]
count++
}
// Remove the oldest element, if it isn't NaN, and it's in range.
// (e.g., if limit = 1, then this removes the previous element from the sum).
if i >= limit && !math.IsNaN(series.Values[i-limit]) {
sum -= series.Values[i-limit]
count--
}
// Numerical error could (possibly) cause count == 0 but sum != 0.
if i-limit+1 >= 0 {
if count == 0 {
results[i-limit+1] = math.NaN()
} else {
results[i-limit+1] = sum / float64(count)
}
}
}
list.Series[index].Values = results
}
list.Query = fmt.Sprintf("transform.moving_average(%s, %s)", listValue.GetName(), sizeValue.GetName())
list.Name = list.Query
return list, nil
},
}
var Alias = function.MetricFunction{
Name: "transform.alias",
MinArguments: 2,
MaxArguments: 2,
Compute: func(context function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) {
value, err := arguments[0].Evaluate(context)
if err != nil {
return nil, err
}
list, err := value.ToSeriesList(context.Timerange)
if err != nil {
return nil, err
}
nameValue, err := arguments[1].Evaluate(context)
if err != nil {
return nil, err
}
name, err := nameValue.ToString()
if err != nil {
return nil, err
}
list.Name = name
list.Query = fmt.Sprintf("transform.alias(%s, %s)", value.GetName(), strconv.Quote(name))
return list, nil
},
}
// Derivative is special because it needs to get one extra data point to the left
// This transform estimates the "change per second" between the two samples (scaled consecutive difference)
var Derivative = newDerivativeBasedTransform("derivative", derivative)
func derivative(values []float64, parameters []function.Value, scale float64) ([]float64, error) {
result := make([]float64, len(values)-1)
for i := range values {
if i == 0 {
continue
}
// Scaled difference
result[i-1] = (values[i] - values[i-1]) / scale
}
return result, nil
}
// Rate is special because it needs to get one extra data point to the left.
// This transform functions mostly like Derivative but bounds the result to be positive.
// Specifically this function is designed for strictly increasing counters that
// only decrease when reset to zero. That is, thie function returns consecutive
// differences which are at least 0, or math.Max of the newly reported value and 0
var Rate = newDerivativeBasedTransform("rate", rate)
func rate(values []float64, parameters []function.Value, scale float64) ([]float64, error) {
result := make([]float64, len(values)-1)
for i := range values {
if i == 0 {
continue
}
// Scaled difference
result[i-1] = (values[i] - values[i-1]) / scale
if result[i-1] < 0 {
// values[i] is our best approximatation of the delta between i-1 and i
result[i-1] = math.Max(values[i], 0) / scale
}
}
return result, nil
}
// newDerivativeBasedTransform returns a function.MetricFunction that performs
// a delta between two data points. The transform parameter is a function of type
// transform is expected to return an array of values whose length is 1 less
// than the given series
func newDerivativeBasedTransform(name string, transformer transform) function.MetricFunction {
return function.MetricFunction{
Name: "transform." + name,
MinArguments: 1,
MaxArguments: 1,
Compute: func(context function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) {
var err error
// Calcuate the new timerange to include one extra point to the left
newContext := context
timerange := context.Timerange
newContext.Timerange, err = api.NewSnappedTimerange(timerange.Start()-timerange.ResolutionMillis(), timerange.End(), timerange.ResolutionMillis())
if err != nil {
return nil, err
}
// The new context has a timerange which is extended beyond the query's.
listValue, err := arguments[0].Evaluate(newContext)
if err != nil {
return nil, err
}
// This value must be a SeriesList.
list, err := listValue.ToSeriesList(newContext.Timerange)
if err != nil {
return nil, err
}
// Reset the timerange
list.Timerange = context.Timerange
result, err := ApplyTransform(list, transformer, []function.Value{})
if err != nil {
return nil, err
}
// Validate our series are the correct length
for i := range result.Series {
if len(result.Series[i].Values) != len(list.Series[i].Values)-1 {
return nil, fmt.Errorf("Expected transform to return %d values, received %d", len(list.Series[i].Values)-1, len(result.Series[i].Values))
}
}
result.Query = fmt.Sprintf("transform.%s(%s)", name, listValue.GetName())
result.Name = result.Query
return result, nil
},
}
}
Add comment about the changing behavior of Rate
// Copyright 2015 Square Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
import (
"fmt"
"math"
"strconv"
"github.com/square/metrics/api"
"github.com/square/metrics/function"
)
var Timeshift = function.MetricFunction{
Name: "transform.timeshift",
MinArguments: 2,
MaxArguments: 2,
Compute: func(context function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) {
value, err := arguments[1].Evaluate(context)
if err != nil {
return nil, err
}
duration, err := value.ToDuration()
if err != nil {
return nil, err
}
newContext := context
newContext.Timerange = newContext.Timerange.Shift(duration)
result, err := arguments[0].Evaluate(newContext)
if err != nil {
return nil, err
}
if seriesValue, ok := result.(api.SeriesList); ok {
seriesValue.Timerange = context.Timerange
seriesValue.Query = fmt.Sprintf("transform.timeshift(%s,%s)", result.GetName(), value.GetName())
seriesValue.Name = seriesValue.Query
return seriesValue, nil
}
return result, nil
},
}
var MovingAverage = function.MetricFunction{
Name: "transform.moving_average",
MinArguments: 2,
MaxArguments: 2,
Compute: func(context function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) {
// Applying a similar trick as did TimeshiftFunction. It fetches data prior to the start of the timerange.
sizeValue, err := arguments[1].Evaluate(context)
if err != nil {
return nil, err
}
size, err := sizeValue.ToDuration()
if err != nil {
return nil, err
}
limit := int(float64(size)/float64(context.Timerange.Resolution()) + 0.5) // Limit is the number of items to include in the average
if limit < 1 {
// At least one value must be included at all times
limit = 1
}
newContext := context
timerange := context.Timerange
newContext.Timerange, err = api.NewSnappedTimerange(timerange.Start()-int64(limit-1)*timerange.ResolutionMillis(), timerange.End(), timerange.ResolutionMillis())
if err != nil {
return nil, err
}
// The new context has a timerange which is extended beyond the query's.
listValue, err := arguments[0].Evaluate(newContext)
if err != nil {
return nil, err
}
// This value must be a SeriesList.
list, err := listValue.ToSeriesList(newContext.Timerange)
if err != nil {
return nil, err
}
// The timerange must be reverted.
list.Timerange = context.Timerange
// Update each series in the list.
for index, series := range list.Series {
// The series will be given a (shorter) replaced list of values.
results := make([]float64, context.Timerange.Slots())
count := 0
sum := 0.0
for i := range series.Values {
// Add the new element, if it isn't NaN.
if !math.IsNaN(series.Values[i]) {
sum += series.Values[i]
count++
}
// Remove the oldest element, if it isn't NaN, and it's in range.
// (e.g., if limit = 1, then this removes the previous element from the sum).
if i >= limit && !math.IsNaN(series.Values[i-limit]) {
sum -= series.Values[i-limit]
count--
}
// Numerical error could (possibly) cause count == 0 but sum != 0.
if i-limit+1 >= 0 {
if count == 0 {
results[i-limit+1] = math.NaN()
} else {
results[i-limit+1] = sum / float64(count)
}
}
}
list.Series[index].Values = results
}
list.Query = fmt.Sprintf("transform.moving_average(%s, %s)", listValue.GetName(), sizeValue.GetName())
list.Name = list.Query
return list, nil
},
}
var Alias = function.MetricFunction{
Name: "transform.alias",
MinArguments: 2,
MaxArguments: 2,
Compute: func(context function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) {
value, err := arguments[0].Evaluate(context)
if err != nil {
return nil, err
}
list, err := value.ToSeriesList(context.Timerange)
if err != nil {
return nil, err
}
nameValue, err := arguments[1].Evaluate(context)
if err != nil {
return nil, err
}
name, err := nameValue.ToString()
if err != nil {
return nil, err
}
list.Name = name
list.Query = fmt.Sprintf("transform.alias(%s, %s)", value.GetName(), strconv.Quote(name))
return list, nil
},
}
// Derivative is special because it needs to get one extra data point to the left
// This transform estimates the "change per second" between the two samples (scaled consecutive difference)
var Derivative = newDerivativeBasedTransform("derivative", derivative)
func derivative(values []float64, parameters []function.Value, scale float64) ([]float64, error) {
result := make([]float64, len(values)-1)
for i := range values {
if i == 0 {
continue
}
// Scaled difference
result[i-1] = (values[i] - values[i-1]) / scale
}
return result, nil
}
// Rate is special because it needs to get one extra data point to the left.
// This transform functions mostly like Derivative but bounds the result to be positive.
// Specifically this function is designed for strictly increasing counters that
// only decrease when reset to zero. That is, thie function returns consecutive
// differences which are at least 0, or math.Max of the newly reported value and 0
var Rate = newDerivativeBasedTransform("rate", rate)
func rate(values []float64, parameters []function.Value, scale float64) ([]float64, error) {
result := make([]float64, len(values)-1)
for i := range values {
if i == 0 {
continue
}
// Scaled difference
result[i-1] = (values[i] - values[i-1]) / scale
if result[i-1] < 0 {
// values[i] is our best approximatation of the delta between i-1 and i
// Why? This should only be used on counters, so if v[i] - v[i-1] < 0 then
// the counter has reset, and we know *at least* v[i] increments have happened
result[i-1] = math.Max(values[i], 0) / scale
}
}
return result, nil
}
// newDerivativeBasedTransform returns a function.MetricFunction that performs
// a delta between two data points. The transform parameter is a function of type
// transform is expected to return an array of values whose length is 1 less
// than the given series
func newDerivativeBasedTransform(name string, transformer transform) function.MetricFunction {
return function.MetricFunction{
Name: "transform." + name,
MinArguments: 1,
MaxArguments: 1,
Compute: func(context function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) {
var err error
// Calcuate the new timerange to include one extra point to the left
newContext := context
timerange := context.Timerange
newContext.Timerange, err = api.NewSnappedTimerange(timerange.Start()-timerange.ResolutionMillis(), timerange.End(), timerange.ResolutionMillis())
if err != nil {
return nil, err
}
// The new context has a timerange which is extended beyond the query's.
listValue, err := arguments[0].Evaluate(newContext)
if err != nil {
return nil, err
}
// This value must be a SeriesList.
list, err := listValue.ToSeriesList(newContext.Timerange)
if err != nil {
return nil, err
}
// Reset the timerange
list.Timerange = context.Timerange
result, err := ApplyTransform(list, transformer, []function.Value{})
if err != nil {
return nil, err
}
// Validate our series are the correct length
for i := range result.Series {
if len(result.Series[i].Values) != len(list.Series[i].Values)-1 {
return nil, fmt.Errorf("Expected transform to return %d values, received %d", len(list.Series[i].Values)-1, len(result.Series[i].Values))
}
}
result.Query = fmt.Sprintf("transform.%s(%s)", name, listValue.GetName())
result.Name = result.Query
return result, nil
},
}
}
|
[ION-1218] Add CreatedAt and UpdatedAt to SBOMEntry
|
package apidef
import (
"encoding/base64"
"encoding/json"
"encoding/xml"
"text/template"
"github.com/clbanning/mxj"
"github.com/lonelycode/osin"
"gopkg.in/mgo.v2/bson"
"time"
"github.com/TykTechnologies/gojsonschema"
"github.com/TykTechnologies/tyk/regexp"
)
type AuthProviderCode string
type SessionProviderCode string
type StorageEngineCode string
type TykEvent string // A type so we can ENUM event types easily, e.g. EventQuotaExceeded
type TykEventHandlerName string // A type for handler codes in API definitions
type EndpointMethodAction string
type TemplateMode string
type MiddlewareDriver string
type IdExtractorSource string
type IdExtractorType string
type AuthTypeEnum string
type RoutingTriggerOnType string
const (
NoAction EndpointMethodAction = "no_action"
Reply EndpointMethodAction = "reply"
UseBlob TemplateMode = "blob"
UseFile TemplateMode = "file"
RequestXML RequestInputType = "xml"
RequestJSON RequestInputType = "json"
OttoDriver MiddlewareDriver = "otto"
PythonDriver MiddlewareDriver = "python"
LuaDriver MiddlewareDriver = "lua"
GrpcDriver MiddlewareDriver = "grpc"
GoPluginDriver MiddlewareDriver = "goplugin"
BodySource IdExtractorSource = "body"
HeaderSource IdExtractorSource = "header"
QuerystringSource IdExtractorSource = "querystring"
FormSource IdExtractorSource = "form"
ValueExtractor IdExtractorType = "value"
XPathExtractor IdExtractorType = "xpath"
RegexExtractor IdExtractorType = "regex"
// For multi-type auth
AuthToken AuthTypeEnum = "auth_token"
HMACKey AuthTypeEnum = "hmac_key"
BasicAuthUser AuthTypeEnum = "basic_auth_user"
JWTClaim AuthTypeEnum = "jwt_claim"
OIDCUser AuthTypeEnum = "oidc_user"
OAuthKey AuthTypeEnum = "oauth_key"
UnsetAuth AuthTypeEnum = ""
// For routing triggers
All RoutingTriggerOnType = "all"
Any RoutingTriggerOnType = "any"
Ignore RoutingTriggerOnType = ""
)
type EndpointMethodMeta struct {
Action EndpointMethodAction `bson:"action" json:"action"`
Code int `bson:"code" json:"code"`
Data string `bson:"data" json:"data"`
Headers map[string]string `bson:"headers" json:"headers"`
}
type EndPointMeta struct {
Path string `bson:"path" json:"path"`
IgnoreCase bool `bson:"ignore_case" json:"ignore_case"`
MethodActions map[string]EndpointMethodMeta `bson:"method_actions" json:"method_actions"`
}
type CacheMeta struct {
Method string `bson:"method" json:"method"`
Path string `bson:"path" json:"path"`
CacheKeyRegex string `bson:"cache_key_regex" json:"cache_key_regex"`
CacheOnlyResponseCodes []int `bson:"cache_response_codes" json:"cache_response_codes"`
}
type RequestInputType string
type TemplateData struct {
Input RequestInputType `bson:"input_type" json:"input_type"`
Mode TemplateMode `bson:"template_mode" json:"template_mode"`
EnableSession bool `bson:"enable_session" json:"enable_session"`
TemplateSource string `bson:"template_source" json:"template_source"`
}
type TemplateMeta struct {
TemplateData TemplateData `bson:"template_data" json:"template_data"`
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
}
type TransformJQMeta struct {
Filter string `bson:"filter" json:"filter"`
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
}
type HeaderInjectionMeta struct {
DeleteHeaders []string `bson:"delete_headers" json:"delete_headers"`
AddHeaders map[string]string `bson:"add_headers" json:"add_headers"`
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
ActOnResponse bool `bson:"act_on" json:"act_on"`
}
type HardTimeoutMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
TimeOut int `bson:"timeout" json:"timeout"`
}
type TrackEndpointMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
}
type InternalMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
}
type RequestSizeMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
SizeLimit int64 `bson:"size_limit" json:"size_limit"`
}
type CircuitBreakerMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
ThresholdPercent float64 `bson:"threshold_percent" json:"threshold_percent"`
Samples int64 `bson:"samples" json:"samples"`
ReturnToServiceAfter int `bson:"return_to_service_after" json:"return_to_service_after"`
}
type StringRegexMap struct {
MatchPattern string `bson:"match_rx" json:"match_rx"`
Reverse bool `bson:"reverse" json:"reverse"`
matchRegex *regexp.Regexp
}
type RoutingTriggerOptions struct {
HeaderMatches map[string]StringRegexMap `bson:"header_matches" json:"header_matches"`
QueryValMatches map[string]StringRegexMap `bson:"query_val_matches" json:"query_val_matches"`
PathPartMatches map[string]StringRegexMap `bson:"path_part_matches" json:"path_part_matches"`
SessionMetaMatches map[string]StringRegexMap `bson:"session_meta_matches" json:"session_meta_matches"`
RequestContextMatches map[string]StringRegexMap `bson:"request_context_matches" json:"request_context_matches"`
PayloadMatches StringRegexMap `bson:"payload_matches" json:"payload_matches"`
}
type RoutingTrigger struct {
On RoutingTriggerOnType `bson:"on" json:"on"`
Options RoutingTriggerOptions `bson:"options" json:"options"`
RewriteTo string `bson:"rewrite_to" json:"rewrite_to"`
}
type URLRewriteMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
MatchPattern string `bson:"match_pattern" json:"match_pattern"`
RewriteTo string `bson:"rewrite_to" json:"rewrite_to"`
Triggers []RoutingTrigger `bson:"triggers" json:"triggers"`
MatchRegexp *regexp.Regexp `json:"-"`
}
type VirtualMeta struct {
ResponseFunctionName string `bson:"response_function_name" json:"response_function_name"`
FunctionSourceType string `bson:"function_source_type" json:"function_source_type"`
FunctionSourceURI string `bson:"function_source_uri" json:"function_source_uri"`
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
UseSession bool `bson:"use_session" json:"use_session"`
ProxyOnError bool `bson:"proxy_on_error" json:"proxy_on_error"`
}
type MethodTransformMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
ToMethod string `bson:"to_method" json:"to_method"`
}
type ValidatePathMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
Schema map[string]interface{} `bson:"schema" json:"schema"`
SchemaB64 string `bson:"schema_b64" json:"schema_b64,omitempty"`
SchemaCache gojsonschema.JSONLoader `bson:"-" json:"-"`
// Allows override of default 422 Unprocessible Entity response code for validation errors.
ErrorResponseCode int `bson:"error_response_code" json:"error_response_code"`
}
type ExtendedPathsSet struct {
Ignored []EndPointMeta `bson:"ignored" json:"ignored,omitempty"`
WhiteList []EndPointMeta `bson:"white_list" json:"white_list,omitempty"`
BlackList []EndPointMeta `bson:"black_list" json:"black_list,omitempty"`
Cached []string `bson:"cache" json:"cache,omitempty"`
AdvanceCacheConfig []CacheMeta `bson:"advance_cache_config" json:"advance_cache_config,omitempty"`
Transform []TemplateMeta `bson:"transform" json:"transform,omitempty"`
TransformResponse []TemplateMeta `bson:"transform_response" json:"transform_response,omitempty"`
TransformJQ []TransformJQMeta `bson:"transform_jq" json:"transform_jq,omitempty"`
TransformJQResponse []TransformJQMeta `bson:"transform_jq_response" json:"transform_jq_response,omitempty"`
TransformHeader []HeaderInjectionMeta `bson:"transform_headers" json:"transform_headers,omitempty"`
TransformResponseHeader []HeaderInjectionMeta `bson:"transform_response_headers" json:"transform_response_headers,omitempty"`
HardTimeouts []HardTimeoutMeta `bson:"hard_timeouts" json:"hard_timeouts,omitempty"`
CircuitBreaker []CircuitBreakerMeta `bson:"circuit_breakers" json:"circuit_breakers,omitempty"`
URLRewrite []URLRewriteMeta `bson:"url_rewrites" json:"url_rewrites,omitempty"`
Virtual []VirtualMeta `bson:"virtual" json:"virtual,omitempty"`
SizeLimit []RequestSizeMeta `bson:"size_limits" json:"size_limits,omitempty"`
MethodTransforms []MethodTransformMeta `bson:"method_transforms" json:"method_transforms,omitempty"`
TrackEndpoints []TrackEndpointMeta `bson:"track_endpoints" json:"track_endpoints,omitempty"`
DoNotTrackEndpoints []TrackEndpointMeta `bson:"do_not_track_endpoints" json:"do_not_track_endpoints,omitempty"`
ValidateJSON []ValidatePathMeta `bson:"validate_json" json:"validate_json,omitempty"`
Internal []InternalMeta `bson:"internal" json:"internal,omitempty"`
}
type GraphQLDataSourceConfig struct {
DataSourceType string `bson:"data_source_type" json:"data_source_type"`
DataSourceConfig interface{} `bson:"data_source_config" json:"data_source_config"`
}
type GraphQLConfig struct {
Schema string `bson:"schema" json:"schema"`
GraphQLDataSources []GraphQLDataSourceConfig `bson:"graphql_data_sources" json:"graphql_data_sources"`
}
type VersionInfo struct {
Name string `bson:"name" json:"name"`
Expires string `bson:"expires" json:"expires"`
ExpiresTs time.Time `bson:"-" json:"-"`
Paths struct {
Ignored []string `bson:"ignored" json:"ignored"`
WhiteList []string `bson:"white_list" json:"white_list"`
BlackList []string `bson:"black_list" json:"black_list"`
} `bson:"paths" json:"paths"`
UseExtendedPaths bool `bson:"use_extended_paths" json:"use_extended_paths"`
ExtendedPaths ExtendedPathsSet `bson:"extended_paths" json:"extended_paths"`
GlobalHeaders map[string]string `bson:"global_headers" json:"global_headers"`
GlobalHeadersRemove []string `bson:"global_headers_remove" json:"global_headers_remove"`
GlobalSizeLimit int64 `bson:"global_size_limit" json:"global_size_limit"`
OverrideTarget string `bson:"override_target" json:"override_target"`
GraphQL GraphQLConfig `bson:"graphql" json:"graphql"`
}
type AuthProviderMeta struct {
Name AuthProviderCode `bson:"name" json:"name"`
StorageEngine StorageEngineCode `bson:"storage_engine" json:"storage_engine"`
Meta map[string]interface{} `bson:"meta" json:"meta"`
}
type SessionProviderMeta struct {
Name SessionProviderCode `bson:"name" json:"name"`
StorageEngine StorageEngineCode `bson:"storage_engine" json:"storage_engine"`
Meta map[string]interface{} `bson:"meta" json:"meta"`
}
type EventHandlerTriggerConfig struct {
Handler TykEventHandlerName `bson:"handler_name" json:"handler_name"`
HandlerMeta map[string]interface{} `bson:"handler_meta" json:"handler_meta"`
}
type EventHandlerMetaConfig struct {
Events map[TykEvent][]EventHandlerTriggerConfig `bson:"events" json:"events"`
}
type MiddlewareDefinition struct {
Name string `bson:"name" json:"name"`
Path string `bson:"path" json:"path"`
RequireSession bool `bson:"require_session" json:"require_session"`
RawBodyOnly bool `bson:"raw_body_only" json:"raw_body_only"`
}
type MiddlewareIdExtractor struct {
ExtractFrom IdExtractorSource `bson:"extract_from" json:"extract_from"`
ExtractWith IdExtractorType `bson:"extract_with" json:"extract_with"`
ExtractorConfig map[string]interface{} `bson:"extractor_config" json:"extractor_config"`
Extractor interface{} `bson:"-" json:"-"`
}
type MiddlewareSection struct {
Pre []MiddlewareDefinition `bson:"pre" json:"pre"`
Post []MiddlewareDefinition `bson:"post" json:"post"`
PostKeyAuth []MiddlewareDefinition `bson:"post_key_auth" json:"post_key_auth"`
AuthCheck MiddlewareDefinition `bson:"auth_check" json:"auth_check"`
Response []MiddlewareDefinition `bson:"response" json:"response"`
Driver MiddlewareDriver `bson:"driver" json:"driver"`
IdExtractor MiddlewareIdExtractor `bson:"id_extractor" json:"id_extractor"`
}
type CacheOptions struct {
CacheTimeout int64 `bson:"cache_timeout" json:"cache_timeout"`
EnableCache bool `bson:"enable_cache" json:"enable_cache"`
CacheAllSafeRequests bool `bson:"cache_all_safe_requests" json:"cache_all_safe_requests"`
CacheOnlyResponseCodes []int `bson:"cache_response_codes" json:"cache_response_codes"`
EnableUpstreamCacheControl bool `bson:"enable_upstream_cache_control" json:"enable_upstream_cache_control"`
CacheControlTTLHeader string `bson:"cache_control_ttl_header" json:"cache_control_ttl_header"`
CacheByHeaders []string `bson:"cache_by_headers" json:"cache_by_headers"`
}
type ResponseProcessor struct {
Name string `bson:"name" json:"name"`
Options interface{} `bson:"options" json:"options"`
}
type HostCheckObject struct {
CheckURL string `bson:"url" json:"url"`
Protocol string `bson:"protocol" json:"protocol"`
Timeout time.Duration `bson:"timeout" json:"timeout"`
EnableProxyProtocol bool `bson:"enable_proxy_protocol" json:"enable_proxy_protocol"`
Commands []CheckCommand `bson:"commands" json:"commands"`
Method string `bson:"method" json:"method"`
Headers map[string]string `bson:"headers" json:"headers"`
Body string `bson:"body" json:"body"`
}
type CheckCommand struct {
Name string `bson:"name" json:"name"`
Message string `bson:"message" json:"message"`
}
type ServiceDiscoveryConfiguration struct {
UseDiscoveryService bool `bson:"use_discovery_service" json:"use_discovery_service"`
QueryEndpoint string `bson:"query_endpoint" json:"query_endpoint"`
UseNestedQuery bool `bson:"use_nested_query" json:"use_nested_query"`
ParentDataPath string `bson:"parent_data_path" json:"parent_data_path"`
DataPath string `bson:"data_path" json:"data_path"`
PortDataPath string `bson:"port_data_path" json:"port_data_path"`
TargetPath string `bson:"target_path" json:"target_path"`
UseTargetList bool `bson:"use_target_list" json:"use_target_list"`
CacheTimeout int64 `bson:"cache_timeout" json:"cache_timeout"`
EndpointReturnsList bool `bson:"endpoint_returns_list" json:"endpoint_returns_list"`
}
type OIDProviderConfig struct {
Issuer string `bson:"issuer" json:"issuer"`
ClientIDs map[string]string `bson:"client_ids" json:"client_ids"`
}
type OpenIDOptions struct {
Providers []OIDProviderConfig `bson:"providers" json:"providers"`
SegregateByClient bool `bson:"segregate_by_client" json:"segregate_by_client"`
}
// APIDefinition represents the configuration for a single proxied API and it's versions.
//
// swagger:model
type APIDefinition struct {
Id bson.ObjectId `bson:"_id,omitempty" json:"id,omitempty"`
Name string `bson:"name" json:"name"`
Slug string `bson:"slug" json:"slug"`
ListenPort int `bson:"listen_port" json:"listen_port"`
Protocol string `bson:"protocol" json:"protocol"`
EnableProxyProtocol bool `bson:"enable_proxy_protocol" json:"enable_proxy_protocol"`
APIID string `bson:"api_id" json:"api_id"`
OrgID string `bson:"org_id" json:"org_id"`
UseKeylessAccess bool `bson:"use_keyless" json:"use_keyless"`
UseOauth2 bool `bson:"use_oauth2" json:"use_oauth2"`
UseOpenID bool `bson:"use_openid" json:"use_openid"`
OpenIDOptions OpenIDOptions `bson:"openid_options" json:"openid_options"`
Oauth2Meta struct {
AllowedAccessTypes []osin.AccessRequestType `bson:"allowed_access_types" json:"allowed_access_types"`
AllowedAuthorizeTypes []osin.AuthorizeRequestType `bson:"allowed_authorize_types" json:"allowed_authorize_types"`
AuthorizeLoginRedirect string `bson:"auth_login_redirect" json:"auth_login_redirect"`
} `bson:"oauth_meta" json:"oauth_meta"`
Auth AuthConfig `bson:"auth" json:"auth"` // Deprecated: Use AuthConfigs instead.
AuthConfigs map[string]AuthConfig `bson:"auth_configs" json:"auth_configs"`
UseBasicAuth bool `bson:"use_basic_auth" json:"use_basic_auth"`
BasicAuth struct {
DisableCaching bool `bson:"disable_caching" json:"disable_caching"`
CacheTTL int `bson:"cache_ttl" json:"cache_ttl"`
ExtractFromBody bool `bson:"extract_from_body" json:"extract_from_body"`
BodyUserRegexp string `bson:"body_user_regexp" json:"body_user_regexp"`
BodyPasswordRegexp string `bson:"body_password_regexp" json:"body_password_regexp"`
} `bson:"basic_auth" json:"basic_auth"`
UseMutualTLSAuth bool `bson:"use_mutual_tls_auth" json:"use_mutual_tls_auth"`
ClientCertificates []string `bson:"client_certificates" json:"client_certificates"`
UpstreamCertificates map[string]string `bson:"upstream_certificates" json:"upstream_certificates"`
PinnedPublicKeys map[string]string `bson:"pinned_public_keys" json:"pinned_public_keys"`
EnableJWT bool `bson:"enable_jwt" json:"enable_jwt"`
UseStandardAuth bool `bson:"use_standard_auth" json:"use_standard_auth"`
UseGoPluginAuth bool `bson:"use_go_plugin_auth" json:"use_go_plugin_auth"`
EnableCoProcessAuth bool `bson:"enable_coprocess_auth" json:"enable_coprocess_auth"`
JWTSigningMethod string `bson:"jwt_signing_method" json:"jwt_signing_method"`
JWTSource string `bson:"jwt_source" json:"jwt_source"`
JWTIdentityBaseField string `bson:"jwt_identit_base_field" json:"jwt_identity_base_field"`
JWTClientIDBaseField string `bson:"jwt_client_base_field" json:"jwt_client_base_field"`
JWTPolicyFieldName string `bson:"jwt_policy_field_name" json:"jwt_policy_field_name"`
JWTDefaultPolicies []string `bson:"jwt_default_policies" json:"jwt_default_policies"`
JWTIssuedAtValidationSkew uint64 `bson:"jwt_issued_at_validation_skew" json:"jwt_issued_at_validation_skew"`
JWTExpiresAtValidationSkew uint64 `bson:"jwt_expires_at_validation_skew" json:"jwt_expires_at_validation_skew"`
JWTNotBeforeValidationSkew uint64 `bson:"jwt_not_before_validation_skew" json:"jwt_not_before_validation_skew"`
JWTSkipKid bool `bson:"jwt_skip_kid" json:"jwt_skip_kid"`
JWTScopeToPolicyMapping map[string]string `bson:"jwt_scope_to_policy_mapping" json:"jwt_scope_to_policy_mapping"`
JWTScopeClaimName string `bson:"jwt_scope_claim_name" json:"jwt_scope_claim_name"`
NotificationsDetails NotificationsManager `bson:"notifications" json:"notifications"`
EnableSignatureChecking bool `bson:"enable_signature_checking" json:"enable_signature_checking"`
HmacAllowedClockSkew float64 `bson:"hmac_allowed_clock_skew" json:"hmac_allowed_clock_skew"`
HmacAllowedAlgorithms []string `bson:"hmac_allowed_algorithms" json:"hmac_allowed_algorithms"`
RequestSigning RequestSigningMeta `bson:"request_signing" json:"request_signing"`
BaseIdentityProvidedBy AuthTypeEnum `bson:"base_identity_provided_by" json:"base_identity_provided_by"`
VersionDefinition struct {
Location string `bson:"location" json:"location"`
Key string `bson:"key" json:"key"`
StripPath bool `bson:"strip_path" json:"strip_path"`
} `bson:"definition" json:"definition"`
VersionData struct {
NotVersioned bool `bson:"not_versioned" json:"not_versioned"`
DefaultVersion string `bson:"default_version" json:"default_version"`
Versions map[string]VersionInfo `bson:"versions" json:"versions"`
} `bson:"version_data" json:"version_data"`
UptimeTests struct {
CheckList []HostCheckObject `bson:"check_list" json:"check_list"`
Config struct {
ExpireUptimeAnalyticsAfter int64 `bson:"expire_utime_after" json:"expire_utime_after"` // must have an expireAt TTL index set (http://docs.mongodb.org/manual/tutorial/expire-data/)
ServiceDiscovery ServiceDiscoveryConfiguration `bson:"service_discovery" json:"service_discovery"`
RecheckWait int `bson:"recheck_wait" json:"recheck_wait"`
} `bson:"config" json:"config"`
} `bson:"uptime_tests" json:"uptime_tests"`
Proxy struct {
PreserveHostHeader bool `bson:"preserve_host_header" json:"preserve_host_header"`
ListenPath string `bson:"listen_path" json:"listen_path"`
TargetURL string `bson:"target_url" json:"target_url"`
DisableStripSlash bool `bson:"disable_strip_slash" json:"disable_strip_slash"`
StripListenPath bool `bson:"strip_listen_path" json:"strip_listen_path"`
EnableLoadBalancing bool `bson:"enable_load_balancing" json:"enable_load_balancing"`
Targets []string `bson:"target_list" json:"target_list"`
StructuredTargetList *HostList `bson:"-" json:"-"`
CheckHostAgainstUptimeTests bool `bson:"check_host_against_uptime_tests" json:"check_host_against_uptime_tests"`
ServiceDiscovery ServiceDiscoveryConfiguration `bson:"service_discovery" json:"service_discovery"`
Transport struct {
SSLInsecureSkipVerify bool `bson:"ssl_insecure_skip_verify" json:"ssl_insecure_skip_verify"`
SSLCipherSuites []string `bson:"ssl_ciphers" json:"ssl_ciphers"`
SSLMinVersion uint16 `bson:"ssl_min_version" json:"ssl_min_version"`
SSLForceCommonNameCheck bool `json:"ssl_force_common_name_check"`
ProxyURL string `bson:"proxy_url" json:"proxy_url"`
} `bson:"transport" json:"transport"`
} `bson:"proxy" json:"proxy"`
DisableRateLimit bool `bson:"disable_rate_limit" json:"disable_rate_limit"`
DisableQuota bool `bson:"disable_quota" json:"disable_quota"`
CustomMiddleware MiddlewareSection `bson:"custom_middleware" json:"custom_middleware"`
CustomMiddlewareBundle string `bson:"custom_middleware_bundle" json:"custom_middleware_bundle"`
CacheOptions CacheOptions `bson:"cache_options" json:"cache_options"`
SessionLifetime int64 `bson:"session_lifetime" json:"session_lifetime"`
Active bool `bson:"active" json:"active"`
Internal bool `bson:"internal" json:"internal"`
AuthProvider AuthProviderMeta `bson:"auth_provider" json:"auth_provider"`
SessionProvider SessionProviderMeta `bson:"session_provider" json:"session_provider"`
EventHandlers EventHandlerMetaConfig `bson:"event_handlers" json:"event_handlers"`
EnableBatchRequestSupport bool `bson:"enable_batch_request_support" json:"enable_batch_request_support"`
EnableIpWhiteListing bool `mapstructure:"enable_ip_whitelisting" bson:"enable_ip_whitelisting" json:"enable_ip_whitelisting"`
AllowedIPs []string `mapstructure:"allowed_ips" bson:"allowed_ips" json:"allowed_ips"`
EnableIpBlacklisting bool `mapstructure:"enable_ip_blacklisting" bson:"enable_ip_blacklisting" json:"enable_ip_blacklisting"`
BlacklistedIPs []string `mapstructure:"blacklisted_ips" bson:"blacklisted_ips" json:"blacklisted_ips"`
DontSetQuotasOnCreate bool `mapstructure:"dont_set_quota_on_create" bson:"dont_set_quota_on_create" json:"dont_set_quota_on_create"`
ExpireAnalyticsAfter int64 `mapstructure:"expire_analytics_after" bson:"expire_analytics_after" json:"expire_analytics_after"` // must have an expireAt TTL index set (http://docs.mongodb.org/manual/tutorial/expire-data/)
ResponseProcessors []ResponseProcessor `bson:"response_processors" json:"response_processors"`
CORS struct {
Enable bool `bson:"enable" json:"enable"`
AllowedOrigins []string `bson:"allowed_origins" json:"allowed_origins"`
AllowedMethods []string `bson:"allowed_methods" json:"allowed_methods"`
AllowedHeaders []string `bson:"allowed_headers" json:"allowed_headers"`
ExposedHeaders []string `bson:"exposed_headers" json:"exposed_headers"`
AllowCredentials bool `bson:"allow_credentials" json:"allow_credentials"`
MaxAge int `bson:"max_age" json:"max_age"`
OptionsPassthrough bool `bson:"options_passthrough" json:"options_passthrough"`
Debug bool `bson:"debug" json:"debug"`
} `bson:"CORS" json:"CORS"`
Domain string `bson:"domain" json:"domain"`
Certificates []string `bson:"certificates" json:"certificates"`
DoNotTrack bool `bson:"do_not_track" json:"do_not_track"`
Tags []string `bson:"tags" json:"tags"`
EnableContextVars bool `bson:"enable_context_vars" json:"enable_context_vars"`
ConfigData map[string]interface{} `bson:"config_data" json:"config_data"`
TagHeaders []string `bson:"tag_headers" json:"tag_headers"`
GlobalRateLimit GlobalRateLimit `bson:"global_rate_limit" json:"global_rate_limit"`
StripAuthData bool `bson:"strip_auth_data" json:"strip_auth_data"`
EnableDetailedRecording bool `bson:"enable_detailed_recording" json:"enable_detailed_recording"`
}
type AuthConfig struct {
UseParam bool `mapstructure:"use_param" bson:"use_param" json:"use_param"`
ParamName string `mapstructure:"param_name" bson:"param_name" json:"param_name"`
UseCookie bool `mapstructure:"use_cookie" bson:"use_cookie" json:"use_cookie"`
CookieName string `mapstructure:"cookie_name" bson:"cookie_name" json:"cookie_name"`
AuthHeaderName string `mapstructure:"auth_header_name" bson:"auth_header_name" json:"auth_header_name"`
UseCertificate bool `mapstructure:"use_certificate" bson:"use_certificate" json:"use_certificate"`
ValidateSignature bool `mapstructure:"validate_signature" bson:"validate_signature" json:"validate_signature"`
Signature SignatureConfig `mapstructure:"signature" bson:"signature" json:"signature,omitempty"`
}
type SignatureConfig struct {
Algorithm string `mapstructure:"algorithm" bson:"algorithm" json:"algorithm"`
Header string `mapstructure:"header" bson:"header" json:"header"`
Secret string `mapstructure:"secret" bson:"secret" json:"secret"`
AllowedClockSkew int64 `mapstructure:"allowed_clock_skew" bson:"allowed_clock_skew" json:"allowed_clock_skew"`
ErrorCode int `mapstructure:"error_code" bson:"error_code" json:"error_code"`
ErrorMessage string `mapstructure:"error_message" bson:"error_message" json:"error_message"`
}
type GlobalRateLimit struct {
Rate float64 `bson:"rate" json:"rate"`
Per float64 `bson:"per" json:"per"`
}
type BundleManifest struct {
FileList []string `bson:"file_list" json:"file_list"`
CustomMiddleware MiddlewareSection `bson:"custom_middleware" json:"custom_middleware"`
Checksum string `bson:"checksum" json:"checksum"`
Signature string `bson:"signature" json:"signature"`
}
type RequestSigningMeta struct {
IsEnabled bool `bson:"is_enabled" json:"is_enabled"`
Secret string `bson:"secret" json:"secret"`
KeyId string `bson:"key_id" json:"key_id"`
Algorithm string `bson:"algorithm" json:"algorithm"`
HeaderList []string `bson:"header_list" json:"header_list"`
CertificateId string `bson:"certificate_id" json:"certificate_id"`
SignatureHeader string `bson:"signature_header" json:"signature_header"`
}
// Clean will URL encode map[string]struct variables for saving
func (a *APIDefinition) EncodeForDB() {
newVersion := make(map[string]VersionInfo)
for k, v := range a.VersionData.Versions {
newK := base64.StdEncoding.EncodeToString([]byte(k))
v.Name = newK
newVersion[newK] = v
}
a.VersionData.Versions = newVersion
newUpstreamCerts := make(map[string]string)
for domain, cert := range a.UpstreamCertificates {
newD := base64.StdEncoding.EncodeToString([]byte(domain))
newUpstreamCerts[newD] = cert
}
a.UpstreamCertificates = newUpstreamCerts
newPinnedPublicKeys := make(map[string]string)
for domain, cert := range a.PinnedPublicKeys {
newD := base64.StdEncoding.EncodeToString([]byte(domain))
newPinnedPublicKeys[newD] = cert
}
a.PinnedPublicKeys = newPinnedPublicKeys
for i, version := range a.VersionData.Versions {
for j, oldSchema := range version.ExtendedPaths.ValidateJSON {
jsBytes, _ := json.Marshal(oldSchema.Schema)
oldSchema.SchemaB64 = base64.StdEncoding.EncodeToString(jsBytes)
oldSchema.Schema = nil
a.VersionData.Versions[i].ExtendedPaths.ValidateJSON[j] = oldSchema
}
}
// Auth is deprecated so this code tries to maintain backward compatibility
if a.Auth.AuthHeaderName == "" {
a.Auth = a.AuthConfigs["authToken"]
}
}
func (a *APIDefinition) DecodeFromDB() {
newVersion := make(map[string]VersionInfo)
for k, v := range a.VersionData.Versions {
newK, err := base64.StdEncoding.DecodeString(k)
if err != nil {
log.Error("Couldn't Decode, leaving as it may be legacy...")
newVersion[k] = v
} else {
v.Name = string(newK)
newVersion[string(newK)] = v
}
}
a.VersionData.Versions = newVersion
newUpstreamCerts := make(map[string]string)
for domain, cert := range a.UpstreamCertificates {
newD, err := base64.StdEncoding.DecodeString(domain)
if err != nil {
log.Error("Couldn't Decode, leaving as it may be legacy...")
newUpstreamCerts[domain] = cert
} else {
newUpstreamCerts[string(newD)] = cert
}
}
a.UpstreamCertificates = newUpstreamCerts
newPinnedPublicKeys := make(map[string]string)
for domain, cert := range a.PinnedPublicKeys {
newD, err := base64.StdEncoding.DecodeString(domain)
if err != nil {
log.Error("Couldn't Decode, leaving as it may be legacy...")
newPinnedPublicKeys[domain] = cert
} else {
newPinnedPublicKeys[string(newD)] = cert
}
}
a.PinnedPublicKeys = newPinnedPublicKeys
for i, version := range a.VersionData.Versions {
for j, oldSchema := range version.ExtendedPaths.ValidateJSON {
jsBytes, _ := base64.StdEncoding.DecodeString(oldSchema.SchemaB64)
json.Unmarshal(jsBytes, &oldSchema.Schema)
oldSchema.SchemaB64 = ""
a.VersionData.Versions[i].ExtendedPaths.ValidateJSON[j] = oldSchema
}
}
// Auth is deprecated so this code tries to maintain backward compatibility
makeCompatible := func(authType string) {
if a.AuthConfigs == nil {
a.AuthConfigs = make(map[string]AuthConfig)
}
_, ok := a.AuthConfigs[authType]
if !ok {
a.AuthConfigs[authType] = a.Auth
}
}
makeCompatible("authToken")
makeCompatible("jwt")
}
func (s *StringRegexMap) Check(value string) (match string) {
if s.matchRegex == nil {
return
}
return s.matchRegex.FindString(value)
}
func (s *StringRegexMap) FindStringSubmatch(value string) (matched bool, match []string) {
if s.matchRegex == nil {
return
}
match = s.matchRegex.FindStringSubmatch(value)
if !s.Reverse {
matched = len(match) > 0
} else {
matched = len(match) == 0
}
return
}
func (s *StringRegexMap) FindAllStringSubmatch(value string, n int) (matched bool, matches [][]string) {
matches = s.matchRegex.FindAllStringSubmatch(value, n)
if !s.Reverse {
matched = len(matches) > 0
} else {
matched = len(matches) == 0
}
return
}
func (s *StringRegexMap) Init() error {
var err error
if s.matchRegex, err = regexp.Compile(s.MatchPattern); err != nil {
log.WithError(err).WithField("MatchPattern", s.MatchPattern).
Error("Could not compile matchRegex for StringRegexMap")
return err
}
return nil
}
func DummyAPI() APIDefinition {
endpointMeta := EndPointMeta{
Path: "abc",
MethodActions: map[string]EndpointMethodMeta{
"GET": {
Action: Reply,
Code: 200,
Data: "testdata",
Headers: map[string]string{"header": "value"},
},
},
}
templateMeta := TemplateMeta{
TemplateData: TemplateData{Input: RequestJSON, Mode: UseBlob},
}
transformJQMeta := TransformJQMeta{
Filter: "filter",
Path: "path",
Method: "method",
}
headerInjectionMeta := HeaderInjectionMeta{
DeleteHeaders: []string{"header1", "header2"},
AddHeaders: map[string]string{},
Path: "path",
Method: "method",
}
hardTimeoutMeta := HardTimeoutMeta{Path: "path", Method: "method", TimeOut: 0}
circuitBreakerMeta := CircuitBreakerMeta{
Path: "path",
Method: "method",
ThresholdPercent: 0.0,
Samples: 0,
ReturnToServiceAfter: 0,
}
// TODO: Extend triggers
urlRewriteMeta := URLRewriteMeta{
Path: "",
Method: "method",
MatchPattern: "matchpattern",
RewriteTo: "rewriteto",
Triggers: []RoutingTrigger{},
}
virtualMeta := VirtualMeta{
ResponseFunctionName: "responsefunctioname",
FunctionSourceType: "functionsourcetype",
FunctionSourceURI: "functionsourceuri",
Path: "path",
Method: "method",
}
sizeLimit := RequestSizeMeta{
Path: "path",
Method: "method",
SizeLimit: 0,
}
methodTransformMeta := MethodTransformMeta{Path: "path", Method: "method", ToMethod: "tomethod"}
trackEndpointMeta := TrackEndpointMeta{Path: "path", Method: "method"}
internalMeta := InternalMeta{Path: "path", Method: "method"}
validatePathMeta := ValidatePathMeta{Path: "path", Method: "method", Schema: map[string]interface{}{}, SchemaB64: ""}
paths := struct {
Ignored []string `bson:"ignored" json:"ignored"`
WhiteList []string `bson:"white_list" json:"white_list"`
BlackList []string `bson:"black_list" json:"black_list"`
}{
Ignored: []string{},
WhiteList: []string{},
BlackList: []string{},
}
versionInfo := VersionInfo{
Name: "Default",
UseExtendedPaths: true,
Paths: paths,
ExtendedPaths: ExtendedPathsSet{
Ignored: []EndPointMeta{endpointMeta},
WhiteList: []EndPointMeta{endpointMeta},
BlackList: []EndPointMeta{endpointMeta},
Cached: []string{},
Transform: []TemplateMeta{templateMeta},
TransformResponse: []TemplateMeta{templateMeta},
TransformJQ: []TransformJQMeta{transformJQMeta},
TransformJQResponse: []TransformJQMeta{transformJQMeta},
TransformHeader: []HeaderInjectionMeta{headerInjectionMeta},
TransformResponseHeader: []HeaderInjectionMeta{headerInjectionMeta},
HardTimeouts: []HardTimeoutMeta{hardTimeoutMeta},
CircuitBreaker: []CircuitBreakerMeta{circuitBreakerMeta},
URLRewrite: []URLRewriteMeta{urlRewriteMeta},
Virtual: []VirtualMeta{virtualMeta},
SizeLimit: []RequestSizeMeta{sizeLimit},
MethodTransforms: []MethodTransformMeta{methodTransformMeta},
TrackEndpoints: []TrackEndpointMeta{trackEndpointMeta},
DoNotTrackEndpoints: []TrackEndpointMeta{trackEndpointMeta},
Internal: []InternalMeta{internalMeta},
ValidateJSON: []ValidatePathMeta{validatePathMeta},
},
}
versionData := struct {
NotVersioned bool `bson:"not_versioned" json:"not_versioned"`
DefaultVersion string `bson:"default_version" json:"default_version"`
Versions map[string]VersionInfo `bson:"versions" json:"versions"`
}{
NotVersioned: true,
DefaultVersion: "",
Versions: map[string]VersionInfo{
"Default": versionInfo,
},
}
return APIDefinition{
VersionData: versionData,
ConfigData: map[string]interface{}{},
AllowedIPs: []string{},
PinnedPublicKeys: map[string]string{},
ResponseProcessors: []ResponseProcessor{},
ClientCertificates: []string{},
BlacklistedIPs: []string{},
TagHeaders: []string{},
UpstreamCertificates: map[string]string{},
JWTScopeToPolicyMapping: map[string]string{},
HmacAllowedAlgorithms: []string{},
CustomMiddleware: MiddlewareSection{
Post: []MiddlewareDefinition{},
Pre: []MiddlewareDefinition{},
PostKeyAuth: []MiddlewareDefinition{},
AuthCheck: MiddlewareDefinition{},
IdExtractor: MiddlewareIdExtractor{
ExtractorConfig: map[string]interface{}{},
},
},
Tags: []string{},
}
}
var Template = template.New("").Funcs(map[string]interface{}{
"jsonMarshal": func(v interface{}) (string, error) {
bs, err := json.Marshal(v)
return string(bs), err
},
"xmlMarshal": func(v interface{}) (string, error) {
var err error
var xmlValue []byte
mv, ok := v.(mxj.Map)
if ok {
mxj.XMLEscapeChars(true)
xmlValue, err = mv.Xml()
} else {
res, ok := v.(map[string]interface{})
if ok {
mxj.XMLEscapeChars(true)
xmlValue, err = mxj.Map(res).Xml()
} else {
xmlValue, err = xml.MarshalIndent(v, "", " ")
}
}
return string(xmlValue), err
},
})
update & extend GraphQL API definition
package apidef
import (
"encoding/base64"
"encoding/json"
"encoding/xml"
"text/template"
"github.com/clbanning/mxj"
"github.com/lonelycode/osin"
"gopkg.in/mgo.v2/bson"
"time"
"github.com/TykTechnologies/gojsonschema"
"github.com/TykTechnologies/tyk/regexp"
)
type AuthProviderCode string
type SessionProviderCode string
type StorageEngineCode string
type TykEvent string // A type so we can ENUM event types easily, e.g. EventQuotaExceeded
type TykEventHandlerName string // A type for handler codes in API definitions
type EndpointMethodAction string
type TemplateMode string
type MiddlewareDriver string
type IdExtractorSource string
type IdExtractorType string
type AuthTypeEnum string
type RoutingTriggerOnType string
const (
NoAction EndpointMethodAction = "no_action"
Reply EndpointMethodAction = "reply"
UseBlob TemplateMode = "blob"
UseFile TemplateMode = "file"
RequestXML RequestInputType = "xml"
RequestJSON RequestInputType = "json"
OttoDriver MiddlewareDriver = "otto"
PythonDriver MiddlewareDriver = "python"
LuaDriver MiddlewareDriver = "lua"
GrpcDriver MiddlewareDriver = "grpc"
GoPluginDriver MiddlewareDriver = "goplugin"
BodySource IdExtractorSource = "body"
HeaderSource IdExtractorSource = "header"
QuerystringSource IdExtractorSource = "querystring"
FormSource IdExtractorSource = "form"
ValueExtractor IdExtractorType = "value"
XPathExtractor IdExtractorType = "xpath"
RegexExtractor IdExtractorType = "regex"
// For multi-type auth
AuthToken AuthTypeEnum = "auth_token"
HMACKey AuthTypeEnum = "hmac_key"
BasicAuthUser AuthTypeEnum = "basic_auth_user"
JWTClaim AuthTypeEnum = "jwt_claim"
OIDCUser AuthTypeEnum = "oidc_user"
OAuthKey AuthTypeEnum = "oauth_key"
UnsetAuth AuthTypeEnum = ""
// For routing triggers
All RoutingTriggerOnType = "all"
Any RoutingTriggerOnType = "any"
Ignore RoutingTriggerOnType = ""
)
type EndpointMethodMeta struct {
Action EndpointMethodAction `bson:"action" json:"action"`
Code int `bson:"code" json:"code"`
Data string `bson:"data" json:"data"`
Headers map[string]string `bson:"headers" json:"headers"`
}
type EndPointMeta struct {
Path string `bson:"path" json:"path"`
IgnoreCase bool `bson:"ignore_case" json:"ignore_case"`
MethodActions map[string]EndpointMethodMeta `bson:"method_actions" json:"method_actions"`
}
type CacheMeta struct {
Method string `bson:"method" json:"method"`
Path string `bson:"path" json:"path"`
CacheKeyRegex string `bson:"cache_key_regex" json:"cache_key_regex"`
CacheOnlyResponseCodes []int `bson:"cache_response_codes" json:"cache_response_codes"`
}
type RequestInputType string
type TemplateData struct {
Input RequestInputType `bson:"input_type" json:"input_type"`
Mode TemplateMode `bson:"template_mode" json:"template_mode"`
EnableSession bool `bson:"enable_session" json:"enable_session"`
TemplateSource string `bson:"template_source" json:"template_source"`
}
type TemplateMeta struct {
TemplateData TemplateData `bson:"template_data" json:"template_data"`
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
}
type TransformJQMeta struct {
Filter string `bson:"filter" json:"filter"`
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
}
type HeaderInjectionMeta struct {
DeleteHeaders []string `bson:"delete_headers" json:"delete_headers"`
AddHeaders map[string]string `bson:"add_headers" json:"add_headers"`
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
ActOnResponse bool `bson:"act_on" json:"act_on"`
}
type HardTimeoutMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
TimeOut int `bson:"timeout" json:"timeout"`
}
type TrackEndpointMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
}
type InternalMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
}
type RequestSizeMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
SizeLimit int64 `bson:"size_limit" json:"size_limit"`
}
type CircuitBreakerMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
ThresholdPercent float64 `bson:"threshold_percent" json:"threshold_percent"`
Samples int64 `bson:"samples" json:"samples"`
ReturnToServiceAfter int `bson:"return_to_service_after" json:"return_to_service_after"`
}
type StringRegexMap struct {
MatchPattern string `bson:"match_rx" json:"match_rx"`
Reverse bool `bson:"reverse" json:"reverse"`
matchRegex *regexp.Regexp
}
type RoutingTriggerOptions struct {
HeaderMatches map[string]StringRegexMap `bson:"header_matches" json:"header_matches"`
QueryValMatches map[string]StringRegexMap `bson:"query_val_matches" json:"query_val_matches"`
PathPartMatches map[string]StringRegexMap `bson:"path_part_matches" json:"path_part_matches"`
SessionMetaMatches map[string]StringRegexMap `bson:"session_meta_matches" json:"session_meta_matches"`
RequestContextMatches map[string]StringRegexMap `bson:"request_context_matches" json:"request_context_matches"`
PayloadMatches StringRegexMap `bson:"payload_matches" json:"payload_matches"`
}
type RoutingTrigger struct {
On RoutingTriggerOnType `bson:"on" json:"on"`
Options RoutingTriggerOptions `bson:"options" json:"options"`
RewriteTo string `bson:"rewrite_to" json:"rewrite_to"`
}
type URLRewriteMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
MatchPattern string `bson:"match_pattern" json:"match_pattern"`
RewriteTo string `bson:"rewrite_to" json:"rewrite_to"`
Triggers []RoutingTrigger `bson:"triggers" json:"triggers"`
MatchRegexp *regexp.Regexp `json:"-"`
}
type VirtualMeta struct {
ResponseFunctionName string `bson:"response_function_name" json:"response_function_name"`
FunctionSourceType string `bson:"function_source_type" json:"function_source_type"`
FunctionSourceURI string `bson:"function_source_uri" json:"function_source_uri"`
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
UseSession bool `bson:"use_session" json:"use_session"`
ProxyOnError bool `bson:"proxy_on_error" json:"proxy_on_error"`
}
type MethodTransformMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
ToMethod string `bson:"to_method" json:"to_method"`
}
type ValidatePathMeta struct {
Path string `bson:"path" json:"path"`
Method string `bson:"method" json:"method"`
Schema map[string]interface{} `bson:"schema" json:"schema"`
SchemaB64 string `bson:"schema_b64" json:"schema_b64,omitempty"`
SchemaCache gojsonschema.JSONLoader `bson:"-" json:"-"`
// Allows override of default 422 Unprocessible Entity response code for validation errors.
ErrorResponseCode int `bson:"error_response_code" json:"error_response_code"`
}
type ExtendedPathsSet struct {
Ignored []EndPointMeta `bson:"ignored" json:"ignored,omitempty"`
WhiteList []EndPointMeta `bson:"white_list" json:"white_list,omitempty"`
BlackList []EndPointMeta `bson:"black_list" json:"black_list,omitempty"`
Cached []string `bson:"cache" json:"cache,omitempty"`
AdvanceCacheConfig []CacheMeta `bson:"advance_cache_config" json:"advance_cache_config,omitempty"`
Transform []TemplateMeta `bson:"transform" json:"transform,omitempty"`
TransformResponse []TemplateMeta `bson:"transform_response" json:"transform_response,omitempty"`
TransformJQ []TransformJQMeta `bson:"transform_jq" json:"transform_jq,omitempty"`
TransformJQResponse []TransformJQMeta `bson:"transform_jq_response" json:"transform_jq_response,omitempty"`
TransformHeader []HeaderInjectionMeta `bson:"transform_headers" json:"transform_headers,omitempty"`
TransformResponseHeader []HeaderInjectionMeta `bson:"transform_response_headers" json:"transform_response_headers,omitempty"`
HardTimeouts []HardTimeoutMeta `bson:"hard_timeouts" json:"hard_timeouts,omitempty"`
CircuitBreaker []CircuitBreakerMeta `bson:"circuit_breakers" json:"circuit_breakers,omitempty"`
URLRewrite []URLRewriteMeta `bson:"url_rewrites" json:"url_rewrites,omitempty"`
Virtual []VirtualMeta `bson:"virtual" json:"virtual,omitempty"`
SizeLimit []RequestSizeMeta `bson:"size_limits" json:"size_limits,omitempty"`
MethodTransforms []MethodTransformMeta `bson:"method_transforms" json:"method_transforms,omitempty"`
TrackEndpoints []TrackEndpointMeta `bson:"track_endpoints" json:"track_endpoints,omitempty"`
DoNotTrackEndpoints []TrackEndpointMeta `bson:"do_not_track_endpoints" json:"do_not_track_endpoints,omitempty"`
ValidateJSON []ValidatePathMeta `bson:"validate_json" json:"validate_json,omitempty"`
Internal []InternalMeta `bson:"internal" json:"internal,omitempty"`
}
// GraphQLConfig is the root config object for GraphQL Middlewares
type GraphQLConfig struct {
// BasePath is the base path for the GraphQL API, Subscriptions Endpoint as well as the GraphQLPlayground
BasePath string `bson:"base_path" json:"base_path"`
// GraphQLApi is the API specific configuration
GraphQLApi GraphQLApi `bson:"graphql_api" json:"graphql_api"`
// GraphQLPlayground is the Playground specific configuration
GraphQLPlayground GraphQLPlayground `bson:"playground" json:"playground"`
}
// GraphQLApi is the configuration for the GraphQL Middleware
type GraphQLApi struct {
// Endpoint is, combined with the base path, the route which the GraphQL Middleware reacts to
Endpoint string `bson:"endpoint" json:"endpoint"`
// Schema is the GraphQL Schema exposed by the GraphQL API/Upstream/Engine
Schema string `bson:"schema" json:"schema"`
// Subscriptions is the configuration regarding websocket subscriptions
Subscriptions GraphQLSubscriptions `bson:"subscriptions" json:"subscriptions"`
// Execution defines the mode and configuration in which the GraphQL middleware should operate
Execution GraphQLExecution `bson:"execution" json:"execution"`
}
// GraphQLSubscriptions is the configuration for websocket based GraphQL Subscriptions
type GraphQLSubscriptions struct {
// Enabled defines if Subscriptions should be accepted
Enabled bool `bson:"enabled" json:"enabled"`
// Endpoint is, combined with the base path, the route on which websocket upgrade requests will be accepted
Endpoint string `bson:"endpoint" json:"endpoint"`
// SecureWebsocketProtocolHeaderVariable is the SecWebsocketProtocolHeader variable expected by the websocket client
SecureWebsocketProtocolHeaderVariable string `bson:"secure_websocket_protocol_header_variable" json:"secure_websocket_protocol_header_variable"`
}
// GraphQLExecution defines the GraphQL Execution Mode as well as its configuration
type GraphQLExecution struct {
// Mode is the mode in which the Middleware should operate
Mode GraphQLExecutionMode `bson:"mode" json:"mode"`
// Validation defines the behaviour regarding GraphQL request validation
Validation GraphQLExecutionValidation `bson:"validation" json:"validation"`
// Config is the GraphQLExecutionMode specific configuration object
Config json.RawMessage `bson:"config" json:"config"`
}
// GraphQLExecutionMode is the mode in which the GraphQL Middleware should operate
type GraphQLExecutionMode int
const (
// GraphQLExecutionModeProxyOnly is the mode in which the GraphQL Middleware doesn't evaluate the GraphQL request
// In other terms, the GraphQL Middleware will not act as a GraphQL server in itself.
// The GraphQL Middleware will (optionally) validate the request and leave the execution up to the upstream.
GraphQLExecutionModeProxyOnly GraphQLExecutionMode = iota + 1
// GraphQLExecutionModeExecutionEngine is the mode in which the GraphQL Middleware will evaluate every request.
// This means the Middleware will act as a independent GraphQL service which might delegate partial execution to upstreams.
GraphQLExecutionModeExecutionEngine
)
type GraphQLExecutionValidation struct {
Enabled bool `bson:"enabled" json:"enabled"`
}
type GraphQLPlayground struct {
Enabled bool `bson:"enabled" json:"enabled"`
Path string `bson:"path" json:"path"`
}
type VersionInfo struct {
Name string `bson:"name" json:"name"`
Expires string `bson:"expires" json:"expires"`
ExpiresTs time.Time `bson:"-" json:"-"`
Paths struct {
Ignored []string `bson:"ignored" json:"ignored"`
WhiteList []string `bson:"white_list" json:"white_list"`
BlackList []string `bson:"black_list" json:"black_list"`
} `bson:"paths" json:"paths"`
UseExtendedPaths bool `bson:"use_extended_paths" json:"use_extended_paths"`
ExtendedPaths ExtendedPathsSet `bson:"extended_paths" json:"extended_paths"`
GlobalHeaders map[string]string `bson:"global_headers" json:"global_headers"`
GlobalHeadersRemove []string `bson:"global_headers_remove" json:"global_headers_remove"`
GlobalSizeLimit int64 `bson:"global_size_limit" json:"global_size_limit"`
OverrideTarget string `bson:"override_target" json:"override_target"`
GraphQL GraphQLConfig `bson:"graphql" json:"graphql"`
}
type AuthProviderMeta struct {
Name AuthProviderCode `bson:"name" json:"name"`
StorageEngine StorageEngineCode `bson:"storage_engine" json:"storage_engine"`
Meta map[string]interface{} `bson:"meta" json:"meta"`
}
type SessionProviderMeta struct {
Name SessionProviderCode `bson:"name" json:"name"`
StorageEngine StorageEngineCode `bson:"storage_engine" json:"storage_engine"`
Meta map[string]interface{} `bson:"meta" json:"meta"`
}
type EventHandlerTriggerConfig struct {
Handler TykEventHandlerName `bson:"handler_name" json:"handler_name"`
HandlerMeta map[string]interface{} `bson:"handler_meta" json:"handler_meta"`
}
type EventHandlerMetaConfig struct {
Events map[TykEvent][]EventHandlerTriggerConfig `bson:"events" json:"events"`
}
type MiddlewareDefinition struct {
Name string `bson:"name" json:"name"`
Path string `bson:"path" json:"path"`
RequireSession bool `bson:"require_session" json:"require_session"`
RawBodyOnly bool `bson:"raw_body_only" json:"raw_body_only"`
}
type MiddlewareIdExtractor struct {
ExtractFrom IdExtractorSource `bson:"extract_from" json:"extract_from"`
ExtractWith IdExtractorType `bson:"extract_with" json:"extract_with"`
ExtractorConfig map[string]interface{} `bson:"extractor_config" json:"extractor_config"`
Extractor interface{} `bson:"-" json:"-"`
}
type MiddlewareSection struct {
Pre []MiddlewareDefinition `bson:"pre" json:"pre"`
Post []MiddlewareDefinition `bson:"post" json:"post"`
PostKeyAuth []MiddlewareDefinition `bson:"post_key_auth" json:"post_key_auth"`
AuthCheck MiddlewareDefinition `bson:"auth_check" json:"auth_check"`
Response []MiddlewareDefinition `bson:"response" json:"response"`
Driver MiddlewareDriver `bson:"driver" json:"driver"`
IdExtractor MiddlewareIdExtractor `bson:"id_extractor" json:"id_extractor"`
}
type CacheOptions struct {
CacheTimeout int64 `bson:"cache_timeout" json:"cache_timeout"`
EnableCache bool `bson:"enable_cache" json:"enable_cache"`
CacheAllSafeRequests bool `bson:"cache_all_safe_requests" json:"cache_all_safe_requests"`
CacheOnlyResponseCodes []int `bson:"cache_response_codes" json:"cache_response_codes"`
EnableUpstreamCacheControl bool `bson:"enable_upstream_cache_control" json:"enable_upstream_cache_control"`
CacheControlTTLHeader string `bson:"cache_control_ttl_header" json:"cache_control_ttl_header"`
CacheByHeaders []string `bson:"cache_by_headers" json:"cache_by_headers"`
}
type ResponseProcessor struct {
Name string `bson:"name" json:"name"`
Options interface{} `bson:"options" json:"options"`
}
type HostCheckObject struct {
CheckURL string `bson:"url" json:"url"`
Protocol string `bson:"protocol" json:"protocol"`
Timeout time.Duration `bson:"timeout" json:"timeout"`
EnableProxyProtocol bool `bson:"enable_proxy_protocol" json:"enable_proxy_protocol"`
Commands []CheckCommand `bson:"commands" json:"commands"`
Method string `bson:"method" json:"method"`
Headers map[string]string `bson:"headers" json:"headers"`
Body string `bson:"body" json:"body"`
}
type CheckCommand struct {
Name string `bson:"name" json:"name"`
Message string `bson:"message" json:"message"`
}
type ServiceDiscoveryConfiguration struct {
UseDiscoveryService bool `bson:"use_discovery_service" json:"use_discovery_service"`
QueryEndpoint string `bson:"query_endpoint" json:"query_endpoint"`
UseNestedQuery bool `bson:"use_nested_query" json:"use_nested_query"`
ParentDataPath string `bson:"parent_data_path" json:"parent_data_path"`
DataPath string `bson:"data_path" json:"data_path"`
PortDataPath string `bson:"port_data_path" json:"port_data_path"`
TargetPath string `bson:"target_path" json:"target_path"`
UseTargetList bool `bson:"use_target_list" json:"use_target_list"`
CacheTimeout int64 `bson:"cache_timeout" json:"cache_timeout"`
EndpointReturnsList bool `bson:"endpoint_returns_list" json:"endpoint_returns_list"`
}
type OIDProviderConfig struct {
Issuer string `bson:"issuer" json:"issuer"`
ClientIDs map[string]string `bson:"client_ids" json:"client_ids"`
}
type OpenIDOptions struct {
Providers []OIDProviderConfig `bson:"providers" json:"providers"`
SegregateByClient bool `bson:"segregate_by_client" json:"segregate_by_client"`
}
// APIDefinition represents the configuration for a single proxied API and it's versions.
//
// swagger:model
type APIDefinition struct {
Id bson.ObjectId `bson:"_id,omitempty" json:"id,omitempty"`
Name string `bson:"name" json:"name"`
Slug string `bson:"slug" json:"slug"`
ListenPort int `bson:"listen_port" json:"listen_port"`
Protocol string `bson:"protocol" json:"protocol"`
EnableProxyProtocol bool `bson:"enable_proxy_protocol" json:"enable_proxy_protocol"`
APIID string `bson:"api_id" json:"api_id"`
OrgID string `bson:"org_id" json:"org_id"`
UseKeylessAccess bool `bson:"use_keyless" json:"use_keyless"`
UseOauth2 bool `bson:"use_oauth2" json:"use_oauth2"`
UseOpenID bool `bson:"use_openid" json:"use_openid"`
OpenIDOptions OpenIDOptions `bson:"openid_options" json:"openid_options"`
Oauth2Meta struct {
AllowedAccessTypes []osin.AccessRequestType `bson:"allowed_access_types" json:"allowed_access_types"`
AllowedAuthorizeTypes []osin.AuthorizeRequestType `bson:"allowed_authorize_types" json:"allowed_authorize_types"`
AuthorizeLoginRedirect string `bson:"auth_login_redirect" json:"auth_login_redirect"`
} `bson:"oauth_meta" json:"oauth_meta"`
Auth AuthConfig `bson:"auth" json:"auth"` // Deprecated: Use AuthConfigs instead.
AuthConfigs map[string]AuthConfig `bson:"auth_configs" json:"auth_configs"`
UseBasicAuth bool `bson:"use_basic_auth" json:"use_basic_auth"`
BasicAuth struct {
DisableCaching bool `bson:"disable_caching" json:"disable_caching"`
CacheTTL int `bson:"cache_ttl" json:"cache_ttl"`
ExtractFromBody bool `bson:"extract_from_body" json:"extract_from_body"`
BodyUserRegexp string `bson:"body_user_regexp" json:"body_user_regexp"`
BodyPasswordRegexp string `bson:"body_password_regexp" json:"body_password_regexp"`
} `bson:"basic_auth" json:"basic_auth"`
UseMutualTLSAuth bool `bson:"use_mutual_tls_auth" json:"use_mutual_tls_auth"`
ClientCertificates []string `bson:"client_certificates" json:"client_certificates"`
UpstreamCertificates map[string]string `bson:"upstream_certificates" json:"upstream_certificates"`
PinnedPublicKeys map[string]string `bson:"pinned_public_keys" json:"pinned_public_keys"`
EnableJWT bool `bson:"enable_jwt" json:"enable_jwt"`
UseStandardAuth bool `bson:"use_standard_auth" json:"use_standard_auth"`
UseGoPluginAuth bool `bson:"use_go_plugin_auth" json:"use_go_plugin_auth"`
EnableCoProcessAuth bool `bson:"enable_coprocess_auth" json:"enable_coprocess_auth"`
JWTSigningMethod string `bson:"jwt_signing_method" json:"jwt_signing_method"`
JWTSource string `bson:"jwt_source" json:"jwt_source"`
JWTIdentityBaseField string `bson:"jwt_identit_base_field" json:"jwt_identity_base_field"`
JWTClientIDBaseField string `bson:"jwt_client_base_field" json:"jwt_client_base_field"`
JWTPolicyFieldName string `bson:"jwt_policy_field_name" json:"jwt_policy_field_name"`
JWTDefaultPolicies []string `bson:"jwt_default_policies" json:"jwt_default_policies"`
JWTIssuedAtValidationSkew uint64 `bson:"jwt_issued_at_validation_skew" json:"jwt_issued_at_validation_skew"`
JWTExpiresAtValidationSkew uint64 `bson:"jwt_expires_at_validation_skew" json:"jwt_expires_at_validation_skew"`
JWTNotBeforeValidationSkew uint64 `bson:"jwt_not_before_validation_skew" json:"jwt_not_before_validation_skew"`
JWTSkipKid bool `bson:"jwt_skip_kid" json:"jwt_skip_kid"`
JWTScopeToPolicyMapping map[string]string `bson:"jwt_scope_to_policy_mapping" json:"jwt_scope_to_policy_mapping"`
JWTScopeClaimName string `bson:"jwt_scope_claim_name" json:"jwt_scope_claim_name"`
NotificationsDetails NotificationsManager `bson:"notifications" json:"notifications"`
EnableSignatureChecking bool `bson:"enable_signature_checking" json:"enable_signature_checking"`
HmacAllowedClockSkew float64 `bson:"hmac_allowed_clock_skew" json:"hmac_allowed_clock_skew"`
HmacAllowedAlgorithms []string `bson:"hmac_allowed_algorithms" json:"hmac_allowed_algorithms"`
RequestSigning RequestSigningMeta `bson:"request_signing" json:"request_signing"`
BaseIdentityProvidedBy AuthTypeEnum `bson:"base_identity_provided_by" json:"base_identity_provided_by"`
VersionDefinition struct {
Location string `bson:"location" json:"location"`
Key string `bson:"key" json:"key"`
StripPath bool `bson:"strip_path" json:"strip_path"`
} `bson:"definition" json:"definition"`
VersionData struct {
NotVersioned bool `bson:"not_versioned" json:"not_versioned"`
DefaultVersion string `bson:"default_version" json:"default_version"`
Versions map[string]VersionInfo `bson:"versions" json:"versions"`
} `bson:"version_data" json:"version_data"`
UptimeTests struct {
CheckList []HostCheckObject `bson:"check_list" json:"check_list"`
Config struct {
ExpireUptimeAnalyticsAfter int64 `bson:"expire_utime_after" json:"expire_utime_after"` // must have an expireAt TTL index set (http://docs.mongodb.org/manual/tutorial/expire-data/)
ServiceDiscovery ServiceDiscoveryConfiguration `bson:"service_discovery" json:"service_discovery"`
RecheckWait int `bson:"recheck_wait" json:"recheck_wait"`
} `bson:"config" json:"config"`
} `bson:"uptime_tests" json:"uptime_tests"`
Proxy struct {
PreserveHostHeader bool `bson:"preserve_host_header" json:"preserve_host_header"`
ListenPath string `bson:"listen_path" json:"listen_path"`
TargetURL string `bson:"target_url" json:"target_url"`
DisableStripSlash bool `bson:"disable_strip_slash" json:"disable_strip_slash"`
StripListenPath bool `bson:"strip_listen_path" json:"strip_listen_path"`
EnableLoadBalancing bool `bson:"enable_load_balancing" json:"enable_load_balancing"`
Targets []string `bson:"target_list" json:"target_list"`
StructuredTargetList *HostList `bson:"-" json:"-"`
CheckHostAgainstUptimeTests bool `bson:"check_host_against_uptime_tests" json:"check_host_against_uptime_tests"`
ServiceDiscovery ServiceDiscoveryConfiguration `bson:"service_discovery" json:"service_discovery"`
Transport struct {
SSLInsecureSkipVerify bool `bson:"ssl_insecure_skip_verify" json:"ssl_insecure_skip_verify"`
SSLCipherSuites []string `bson:"ssl_ciphers" json:"ssl_ciphers"`
SSLMinVersion uint16 `bson:"ssl_min_version" json:"ssl_min_version"`
SSLForceCommonNameCheck bool `json:"ssl_force_common_name_check"`
ProxyURL string `bson:"proxy_url" json:"proxy_url"`
} `bson:"transport" json:"transport"`
} `bson:"proxy" json:"proxy"`
DisableRateLimit bool `bson:"disable_rate_limit" json:"disable_rate_limit"`
DisableQuota bool `bson:"disable_quota" json:"disable_quota"`
CustomMiddleware MiddlewareSection `bson:"custom_middleware" json:"custom_middleware"`
CustomMiddlewareBundle string `bson:"custom_middleware_bundle" json:"custom_middleware_bundle"`
CacheOptions CacheOptions `bson:"cache_options" json:"cache_options"`
SessionLifetime int64 `bson:"session_lifetime" json:"session_lifetime"`
Active bool `bson:"active" json:"active"`
Internal bool `bson:"internal" json:"internal"`
AuthProvider AuthProviderMeta `bson:"auth_provider" json:"auth_provider"`
SessionProvider SessionProviderMeta `bson:"session_provider" json:"session_provider"`
EventHandlers EventHandlerMetaConfig `bson:"event_handlers" json:"event_handlers"`
EnableBatchRequestSupport bool `bson:"enable_batch_request_support" json:"enable_batch_request_support"`
EnableIpWhiteListing bool `mapstructure:"enable_ip_whitelisting" bson:"enable_ip_whitelisting" json:"enable_ip_whitelisting"`
AllowedIPs []string `mapstructure:"allowed_ips" bson:"allowed_ips" json:"allowed_ips"`
EnableIpBlacklisting bool `mapstructure:"enable_ip_blacklisting" bson:"enable_ip_blacklisting" json:"enable_ip_blacklisting"`
BlacklistedIPs []string `mapstructure:"blacklisted_ips" bson:"blacklisted_ips" json:"blacklisted_ips"`
DontSetQuotasOnCreate bool `mapstructure:"dont_set_quota_on_create" bson:"dont_set_quota_on_create" json:"dont_set_quota_on_create"`
ExpireAnalyticsAfter int64 `mapstructure:"expire_analytics_after" bson:"expire_analytics_after" json:"expire_analytics_after"` // must have an expireAt TTL index set (http://docs.mongodb.org/manual/tutorial/expire-data/)
ResponseProcessors []ResponseProcessor `bson:"response_processors" json:"response_processors"`
CORS struct {
Enable bool `bson:"enable" json:"enable"`
AllowedOrigins []string `bson:"allowed_origins" json:"allowed_origins"`
AllowedMethods []string `bson:"allowed_methods" json:"allowed_methods"`
AllowedHeaders []string `bson:"allowed_headers" json:"allowed_headers"`
ExposedHeaders []string `bson:"exposed_headers" json:"exposed_headers"`
AllowCredentials bool `bson:"allow_credentials" json:"allow_credentials"`
MaxAge int `bson:"max_age" json:"max_age"`
OptionsPassthrough bool `bson:"options_passthrough" json:"options_passthrough"`
Debug bool `bson:"debug" json:"debug"`
} `bson:"CORS" json:"CORS"`
Domain string `bson:"domain" json:"domain"`
Certificates []string `bson:"certificates" json:"certificates"`
DoNotTrack bool `bson:"do_not_track" json:"do_not_track"`
Tags []string `bson:"tags" json:"tags"`
EnableContextVars bool `bson:"enable_context_vars" json:"enable_context_vars"`
ConfigData map[string]interface{} `bson:"config_data" json:"config_data"`
TagHeaders []string `bson:"tag_headers" json:"tag_headers"`
GlobalRateLimit GlobalRateLimit `bson:"global_rate_limit" json:"global_rate_limit"`
StripAuthData bool `bson:"strip_auth_data" json:"strip_auth_data"`
EnableDetailedRecording bool `bson:"enable_detailed_recording" json:"enable_detailed_recording"`
}
type AuthConfig struct {
UseParam bool `mapstructure:"use_param" bson:"use_param" json:"use_param"`
ParamName string `mapstructure:"param_name" bson:"param_name" json:"param_name"`
UseCookie bool `mapstructure:"use_cookie" bson:"use_cookie" json:"use_cookie"`
CookieName string `mapstructure:"cookie_name" bson:"cookie_name" json:"cookie_name"`
AuthHeaderName string `mapstructure:"auth_header_name" bson:"auth_header_name" json:"auth_header_name"`
UseCertificate bool `mapstructure:"use_certificate" bson:"use_certificate" json:"use_certificate"`
ValidateSignature bool `mapstructure:"validate_signature" bson:"validate_signature" json:"validate_signature"`
Signature SignatureConfig `mapstructure:"signature" bson:"signature" json:"signature,omitempty"`
}
type SignatureConfig struct {
Algorithm string `mapstructure:"algorithm" bson:"algorithm" json:"algorithm"`
Header string `mapstructure:"header" bson:"header" json:"header"`
Secret string `mapstructure:"secret" bson:"secret" json:"secret"`
AllowedClockSkew int64 `mapstructure:"allowed_clock_skew" bson:"allowed_clock_skew" json:"allowed_clock_skew"`
ErrorCode int `mapstructure:"error_code" bson:"error_code" json:"error_code"`
ErrorMessage string `mapstructure:"error_message" bson:"error_message" json:"error_message"`
}
type GlobalRateLimit struct {
Rate float64 `bson:"rate" json:"rate"`
Per float64 `bson:"per" json:"per"`
}
type BundleManifest struct {
FileList []string `bson:"file_list" json:"file_list"`
CustomMiddleware MiddlewareSection `bson:"custom_middleware" json:"custom_middleware"`
Checksum string `bson:"checksum" json:"checksum"`
Signature string `bson:"signature" json:"signature"`
}
type RequestSigningMeta struct {
IsEnabled bool `bson:"is_enabled" json:"is_enabled"`
Secret string `bson:"secret" json:"secret"`
KeyId string `bson:"key_id" json:"key_id"`
Algorithm string `bson:"algorithm" json:"algorithm"`
HeaderList []string `bson:"header_list" json:"header_list"`
CertificateId string `bson:"certificate_id" json:"certificate_id"`
SignatureHeader string `bson:"signature_header" json:"signature_header"`
}
// Clean will URL encode map[string]struct variables for saving
func (a *APIDefinition) EncodeForDB() {
newVersion := make(map[string]VersionInfo)
for k, v := range a.VersionData.Versions {
newK := base64.StdEncoding.EncodeToString([]byte(k))
v.Name = newK
newVersion[newK] = v
}
a.VersionData.Versions = newVersion
newUpstreamCerts := make(map[string]string)
for domain, cert := range a.UpstreamCertificates {
newD := base64.StdEncoding.EncodeToString([]byte(domain))
newUpstreamCerts[newD] = cert
}
a.UpstreamCertificates = newUpstreamCerts
newPinnedPublicKeys := make(map[string]string)
for domain, cert := range a.PinnedPublicKeys {
newD := base64.StdEncoding.EncodeToString([]byte(domain))
newPinnedPublicKeys[newD] = cert
}
a.PinnedPublicKeys = newPinnedPublicKeys
for i, version := range a.VersionData.Versions {
for j, oldSchema := range version.ExtendedPaths.ValidateJSON {
jsBytes, _ := json.Marshal(oldSchema.Schema)
oldSchema.SchemaB64 = base64.StdEncoding.EncodeToString(jsBytes)
oldSchema.Schema = nil
a.VersionData.Versions[i].ExtendedPaths.ValidateJSON[j] = oldSchema
}
}
// Auth is deprecated so this code tries to maintain backward compatibility
if a.Auth.AuthHeaderName == "" {
a.Auth = a.AuthConfigs["authToken"]
}
}
func (a *APIDefinition) DecodeFromDB() {
newVersion := make(map[string]VersionInfo)
for k, v := range a.VersionData.Versions {
newK, err := base64.StdEncoding.DecodeString(k)
if err != nil {
log.Error("Couldn't Decode, leaving as it may be legacy...")
newVersion[k] = v
} else {
v.Name = string(newK)
newVersion[string(newK)] = v
}
}
a.VersionData.Versions = newVersion
newUpstreamCerts := make(map[string]string)
for domain, cert := range a.UpstreamCertificates {
newD, err := base64.StdEncoding.DecodeString(domain)
if err != nil {
log.Error("Couldn't Decode, leaving as it may be legacy...")
newUpstreamCerts[domain] = cert
} else {
newUpstreamCerts[string(newD)] = cert
}
}
a.UpstreamCertificates = newUpstreamCerts
newPinnedPublicKeys := make(map[string]string)
for domain, cert := range a.PinnedPublicKeys {
newD, err := base64.StdEncoding.DecodeString(domain)
if err != nil {
log.Error("Couldn't Decode, leaving as it may be legacy...")
newPinnedPublicKeys[domain] = cert
} else {
newPinnedPublicKeys[string(newD)] = cert
}
}
a.PinnedPublicKeys = newPinnedPublicKeys
for i, version := range a.VersionData.Versions {
for j, oldSchema := range version.ExtendedPaths.ValidateJSON {
jsBytes, _ := base64.StdEncoding.DecodeString(oldSchema.SchemaB64)
json.Unmarshal(jsBytes, &oldSchema.Schema)
oldSchema.SchemaB64 = ""
a.VersionData.Versions[i].ExtendedPaths.ValidateJSON[j] = oldSchema
}
}
// Auth is deprecated so this code tries to maintain backward compatibility
makeCompatible := func(authType string) {
if a.AuthConfigs == nil {
a.AuthConfigs = make(map[string]AuthConfig)
}
_, ok := a.AuthConfigs[authType]
if !ok {
a.AuthConfigs[authType] = a.Auth
}
}
makeCompatible("authToken")
makeCompatible("jwt")
}
func (s *StringRegexMap) Check(value string) (match string) {
if s.matchRegex == nil {
return
}
return s.matchRegex.FindString(value)
}
func (s *StringRegexMap) FindStringSubmatch(value string) (matched bool, match []string) {
if s.matchRegex == nil {
return
}
match = s.matchRegex.FindStringSubmatch(value)
if !s.Reverse {
matched = len(match) > 0
} else {
matched = len(match) == 0
}
return
}
func (s *StringRegexMap) FindAllStringSubmatch(value string, n int) (matched bool, matches [][]string) {
matches = s.matchRegex.FindAllStringSubmatch(value, n)
if !s.Reverse {
matched = len(matches) > 0
} else {
matched = len(matches) == 0
}
return
}
func (s *StringRegexMap) Init() error {
var err error
if s.matchRegex, err = regexp.Compile(s.MatchPattern); err != nil {
log.WithError(err).WithField("MatchPattern", s.MatchPattern).
Error("Could not compile matchRegex for StringRegexMap")
return err
}
return nil
}
func DummyAPI() APIDefinition {
endpointMeta := EndPointMeta{
Path: "abc",
MethodActions: map[string]EndpointMethodMeta{
"GET": {
Action: Reply,
Code: 200,
Data: "testdata",
Headers: map[string]string{"header": "value"},
},
},
}
templateMeta := TemplateMeta{
TemplateData: TemplateData{Input: RequestJSON, Mode: UseBlob},
}
transformJQMeta := TransformJQMeta{
Filter: "filter",
Path: "path",
Method: "method",
}
headerInjectionMeta := HeaderInjectionMeta{
DeleteHeaders: []string{"header1", "header2"},
AddHeaders: map[string]string{},
Path: "path",
Method: "method",
}
hardTimeoutMeta := HardTimeoutMeta{Path: "path", Method: "method", TimeOut: 0}
circuitBreakerMeta := CircuitBreakerMeta{
Path: "path",
Method: "method",
ThresholdPercent: 0.0,
Samples: 0,
ReturnToServiceAfter: 0,
}
// TODO: Extend triggers
urlRewriteMeta := URLRewriteMeta{
Path: "",
Method: "method",
MatchPattern: "matchpattern",
RewriteTo: "rewriteto",
Triggers: []RoutingTrigger{},
}
virtualMeta := VirtualMeta{
ResponseFunctionName: "responsefunctioname",
FunctionSourceType: "functionsourcetype",
FunctionSourceURI: "functionsourceuri",
Path: "path",
Method: "method",
}
sizeLimit := RequestSizeMeta{
Path: "path",
Method: "method",
SizeLimit: 0,
}
methodTransformMeta := MethodTransformMeta{Path: "path", Method: "method", ToMethod: "tomethod"}
trackEndpointMeta := TrackEndpointMeta{Path: "path", Method: "method"}
internalMeta := InternalMeta{Path: "path", Method: "method"}
validatePathMeta := ValidatePathMeta{Path: "path", Method: "method", Schema: map[string]interface{}{}, SchemaB64: ""}
paths := struct {
Ignored []string `bson:"ignored" json:"ignored"`
WhiteList []string `bson:"white_list" json:"white_list"`
BlackList []string `bson:"black_list" json:"black_list"`
}{
Ignored: []string{},
WhiteList: []string{},
BlackList: []string{},
}
versionInfo := VersionInfo{
Name: "Default",
UseExtendedPaths: true,
Paths: paths,
ExtendedPaths: ExtendedPathsSet{
Ignored: []EndPointMeta{endpointMeta},
WhiteList: []EndPointMeta{endpointMeta},
BlackList: []EndPointMeta{endpointMeta},
Cached: []string{},
Transform: []TemplateMeta{templateMeta},
TransformResponse: []TemplateMeta{templateMeta},
TransformJQ: []TransformJQMeta{transformJQMeta},
TransformJQResponse: []TransformJQMeta{transformJQMeta},
TransformHeader: []HeaderInjectionMeta{headerInjectionMeta},
TransformResponseHeader: []HeaderInjectionMeta{headerInjectionMeta},
HardTimeouts: []HardTimeoutMeta{hardTimeoutMeta},
CircuitBreaker: []CircuitBreakerMeta{circuitBreakerMeta},
URLRewrite: []URLRewriteMeta{urlRewriteMeta},
Virtual: []VirtualMeta{virtualMeta},
SizeLimit: []RequestSizeMeta{sizeLimit},
MethodTransforms: []MethodTransformMeta{methodTransformMeta},
TrackEndpoints: []TrackEndpointMeta{trackEndpointMeta},
DoNotTrackEndpoints: []TrackEndpointMeta{trackEndpointMeta},
Internal: []InternalMeta{internalMeta},
ValidateJSON: []ValidatePathMeta{validatePathMeta},
},
}
versionData := struct {
NotVersioned bool `bson:"not_versioned" json:"not_versioned"`
DefaultVersion string `bson:"default_version" json:"default_version"`
Versions map[string]VersionInfo `bson:"versions" json:"versions"`
}{
NotVersioned: true,
DefaultVersion: "",
Versions: map[string]VersionInfo{
"Default": versionInfo,
},
}
return APIDefinition{
VersionData: versionData,
ConfigData: map[string]interface{}{},
AllowedIPs: []string{},
PinnedPublicKeys: map[string]string{},
ResponseProcessors: []ResponseProcessor{},
ClientCertificates: []string{},
BlacklistedIPs: []string{},
TagHeaders: []string{},
UpstreamCertificates: map[string]string{},
JWTScopeToPolicyMapping: map[string]string{},
HmacAllowedAlgorithms: []string{},
CustomMiddleware: MiddlewareSection{
Post: []MiddlewareDefinition{},
Pre: []MiddlewareDefinition{},
PostKeyAuth: []MiddlewareDefinition{},
AuthCheck: MiddlewareDefinition{},
IdExtractor: MiddlewareIdExtractor{
ExtractorConfig: map[string]interface{}{},
},
},
Tags: []string{},
}
}
var Template = template.New("").Funcs(map[string]interface{}{
"jsonMarshal": func(v interface{}) (string, error) {
bs, err := json.Marshal(v)
return string(bs), err
},
"xmlMarshal": func(v interface{}) (string, error) {
var err error
var xmlValue []byte
mv, ok := v.(mxj.Map)
if ok {
mxj.XMLEscapeChars(true)
xmlValue, err = mv.Xml()
} else {
res, ok := v.(map[string]interface{})
if ok {
mxj.XMLEscapeChars(true)
xmlValue, err = mxj.Map(res).Xml()
} else {
xmlValue, err = xml.MarshalIndent(v, "", " ")
}
}
return string(xmlValue), err
},
})
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package init_repo
import (
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/kubernetes-incubator/apiserver-builder/cmd/apiserver-boot/boot/util"
"github.com/spf13/cobra"
)
var glideInstallCmd = &cobra.Command{
Use: "glide",
Short: "Install glide.yaml, glide.lock and vendor/.",
Long: `Install glide.yaml, glide.lock and vendor/.`,
Example: `# Bootstrap vendor/ from the src packaged with apiserver-boot
apiserver-boot init glide
# Install vendor/ from using "glide install --strip-vendor"
apiserver-boot init glide --fetch
`,
Run: RunGlideInstall,
}
var fetch bool
var builderCommit string
func AddGlideInstallCmd(cmd *cobra.Command) {
glideInstallCmd.Flags().BoolVar(&fetch, "fetch", true, "if true, fetch new glide deps instead of copying the ones packaged with the tools")
glideInstallCmd.Flags().StringVar(&builderCommit, "commit", "", "if specified with fetch, use this commit for the apiserver-builder deps")
cmd.AddCommand(glideInstallCmd)
}
func retrieveVersion(versionString string) string {
const V = "version"
versionString = strings.ToLower(versionString)
i := strings.Index(versionString, V)
if i >= 0 {
i += len(V)
} else {
i = 0
}
var r rune
var j int
for j, r = range versionString[i:] {
if '0' <= r && r <= '9' {
goto FindEnd
}
}
return ""
FindEnd:
var k int
j += i
for k, r = range versionString[j:] {
if (r < '0' || '9' < r) && r != '.' {
goto Final
}
}
return versionString[j:]
Final:
return versionString[j : k+j]
}
func fetchGlide() {
o, err := exec.Command("glide", "-v").CombinedOutput()
if err != nil {
log.Fatal("must install glide v0.12 or later")
}
v := retrieveVersion(string(o))
if !strings.HasPrefix(v, "0.12") && !strings.HasPrefix(v, "0.13") {
log.Fatalf("must install glide or later, was %s", o)
}
c := exec.Command("glide", "install", "--strip-vendor")
c.Stderr = os.Stderr
c.Stdout = os.Stdout
err = c.Run()
if err != nil {
log.Fatalf("failed to run glide install\n%v\n", err)
}
}
func copyGlide() {
// Move up two directories from the location of the `apiserver-boot`
// executable to find the `vendor` directory we package with our
// releases. TODO(campbellalex@google.com): this doesn't work for people
// who used `go install` to put `apiserver-boot` in their $GOPATH/bin.
e, err := os.Executable()
if err != nil {
log.Fatal("unable to get directory of apiserver-builder tools")
}
e = filepath.Dir(filepath.Dir(e))
doCmd := func(cmd string, args ...string) {
c := exec.Command(cmd, args...)
c.Stderr = os.Stderr
c.Stdout = os.Stdout
err = c.Run()
if err != nil {
log.Fatalf("failed to copy go dependencies %v", err)
}
}
doCmd("tar", "-xzvf", filepath.Join(e, "bin", "glide.tar.gz"))
}
func RunGlideInstall(cmd *cobra.Command, args []string) {
createGlide()
if fetch {
fetchGlide()
} else {
copyGlide()
}
}
type glideTemplateArguments struct {
Repo string
BuilderCommit string
}
var glideTemplate = `
package: {{.Repo}}
import:
{{ if .BuilderCommit -}}
- package: github.com/kubernetes-incubator/apiserver-builder
version: {{ .BuilderCommit }}
{{ end -}}
- package: k8s.io/api
version: c9fffff41e45e3c00186ac6b00d2cb585734d43e
- package: k8s.io/apimachinery
version: 7da60ba7ddca684051555f2c558eef2dfebc70d5
- package: k8s.io/apiserver
version: e24df9a2e58151a85874948908a454d511066460
- package: k8s.io/client-go
version: 1be407b92aa39a2f63ddbb3d46104a1fd425fda0
- package: github.com/go-openapi/analysis
version: b44dc874b601d9e4e2f6e19140e794ba24bead3b
- package: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
- package: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
- package: github.com/go-openapi/loads
version: 18441dfa706d924a39a030ee2c3b1d8d81917b38
- package: github.com/go-openapi/spec
version: 6aced65f8501fe1217321abf0749d354824ba2ff
- package: github.com/go-openapi/swag
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
- package: github.com/golang/glog
version: 44145f04b68cf362d9c4df2182967c2275eaefed
- package: github.com/pkg/errors
version: a22138067af1c4942683050411a841ade67fe1eb
- package: github.com/spf13/cobra
version: 7b1b6e8dc027253d45fc029bc269d1c019f83a34
- package: github.com/spf13/pflag
version: d90f37a48761fe767528f31db1955e4f795d652f
ignore:
- {{.Repo}}
`
func createGlide() {
dir, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
path := filepath.Join(dir, "glide.yaml")
util.WriteIfNotFound(path, "glide-template", glideTemplate,
glideTemplateArguments{
util.Repo,
builderCommit,
})
}
Don't use `tar` command for unpacking vendored libs
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package init_repo
import (
"archive/tar"
"compress/gzip"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/kubernetes-incubator/apiserver-builder/cmd/apiserver-boot/boot/util"
"github.com/spf13/cobra"
"io/ioutil"
)
var glideInstallCmd = &cobra.Command{
Use: "glide",
Short: "Install glide.yaml, glide.lock and vendor/.",
Long: `Install glide.yaml, glide.lock and vendor/.`,
Example: `# Bootstrap vendor/ from the src packaged with apiserver-boot
apiserver-boot init glide
# Install vendor/ from using "glide install --strip-vendor"
apiserver-boot init glide --fetch
`,
Run: RunGlideInstall,
}
var fetch bool
var builderCommit string
func AddGlideInstallCmd(cmd *cobra.Command) {
glideInstallCmd.Flags().BoolVar(&fetch, "fetch", true, "if true, fetch new glide deps instead of copying the ones packaged with the tools")
glideInstallCmd.Flags().StringVar(&builderCommit, "commit", "", "if specified with fetch, use this commit for the apiserver-builder deps")
cmd.AddCommand(glideInstallCmd)
}
func retrieveVersion(versionString string) string {
const V = "version"
versionString = strings.ToLower(versionString)
i := strings.Index(versionString, V)
if i >= 0 {
i += len(V)
} else {
i = 0
}
var r rune
var j int
for j, r = range versionString[i:] {
if '0' <= r && r <= '9' {
goto FindEnd
}
}
return ""
FindEnd:
var k int
j += i
for k, r = range versionString[j:] {
if (r < '0' || '9' < r) && r != '.' {
goto Final
}
}
return versionString[j:]
Final:
return versionString[j : k+j]
}
func fetchGlide() {
o, err := exec.Command("glide", "-v").CombinedOutput()
if err != nil {
log.Fatal("must install glide v0.12 or later")
}
v := retrieveVersion(string(o))
if !strings.HasPrefix(v, "0.12") && !strings.HasPrefix(v, "0.13") {
log.Fatalf("must install glide or later, was %s", o)
}
c := exec.Command("glide", "install", "--strip-vendor")
c.Stderr = os.Stderr
c.Stdout = os.Stdout
err = c.Run()
if err != nil {
log.Fatalf("failed to run glide install\n%v\n", err)
}
}
func copyGlide() {
// Move up two directories from the location of the `apiserver-boot`
// executable to find the `vendor` directory we package with our
// releases.
e, err := os.Executable()
if err != nil {
log.Fatal("unable to get directory of apiserver-builder tools")
}
e = filepath.Dir(filepath.Dir(e))
// read the file
f := filepath.Join(e, "bin", "glide.tar.gz")
fr, err := os.Open(f)
if err != nil {
log.Fatalf("failed to read vendor tar file %s %v", f, err)
}
defer fr.Close()
// setup gzip of tar
gr, err := gzip.NewReader(fr)
if err != nil {
log.Fatalf("failed to read vendor tar file %s %v", f, err)
}
defer gr.Close()
// setup tar reader
tr := tar.NewReader(gr)
for file, err := tr.Next(); err == nil; file, err = tr.Next() {
p := filepath.Join(".", file.Name)
err := os.MkdirAll(filepath.Dir(p), 0755)
if err != nil {
log.Fatalf("Could not create directory %s: %v", filepath.Dir(p), err)
}
b, err := ioutil.ReadAll(tr)
if err != nil {
log.Fatalf("Could not read file %s: %v", file.Name, err)
}
err = ioutil.WriteFile(p, b, os.FileMode(file.Mode))
if err != nil {
log.Fatalf("Could not write file %s: %v", p, err)
}
}
}
func RunGlideInstall(cmd *cobra.Command, args []string) {
createGlide()
if fetch {
fetchGlide()
} else {
copyGlide()
}
}
type glideTemplateArguments struct {
Repo string
BuilderCommit string
}
var glideTemplate = `
package: {{.Repo}}
import:
{{ if .BuilderCommit -}}
- package: github.com/kubernetes-incubator/apiserver-builder
version: {{ .BuilderCommit }}
{{ end -}}
- package: k8s.io/api
version: c9fffff41e45e3c00186ac6b00d2cb585734d43e
- package: k8s.io/apimachinery
version: 7da60ba7ddca684051555f2c558eef2dfebc70d5
- package: k8s.io/apiserver
version: e24df9a2e58151a85874948908a454d511066460
- package: k8s.io/client-go
version: 1be407b92aa39a2f63ddbb3d46104a1fd425fda0
- package: github.com/go-openapi/analysis
version: b44dc874b601d9e4e2f6e19140e794ba24bead3b
- package: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
- package: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
- package: github.com/go-openapi/loads
version: 18441dfa706d924a39a030ee2c3b1d8d81917b38
- package: github.com/go-openapi/spec
version: 6aced65f8501fe1217321abf0749d354824ba2ff
- package: github.com/go-openapi/swag
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
- package: github.com/golang/glog
version: 44145f04b68cf362d9c4df2182967c2275eaefed
- package: github.com/pkg/errors
version: a22138067af1c4942683050411a841ade67fe1eb
- package: github.com/spf13/cobra
version: 7b1b6e8dc027253d45fc029bc269d1c019f83a34
- package: github.com/spf13/pflag
version: d90f37a48761fe767528f31db1955e4f795d652f
ignore:
- {{.Repo}}
`
func createGlide() {
dir, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
path := filepath.Join(dir, "glide.yaml")
util.WriteIfNotFound(path, "glide-template", glideTemplate,
glideTemplateArguments{
util.Repo,
builderCommit,
})
}
|
package remote
import (
"code.google.com/p/go.net/context"
)
const key = "remote"
// Setter defines a context that enables setting values.
type Setter interface {
Set(string, interface{})
}
// FromContext returns the Remote associated with this context.
func FromContext(c context.Context) Remote {
return c.Value(key).(Remote)
}
// ToContext adds the Remote to this context if it supports
// the Setter interface.
func ToContext(c Setter, r Remote) {
c.Set(key, r)
}
ability to get remote from context
package remote
import (
"golang.org/x/net/context"
)
const key = "remote"
// Setter defines a context that enables setting values.
type Setter interface {
Set(string, interface{})
}
// FromContext returns the Remote associated with this context.
func FromContext(c context.Context) Remote {
return c.Value(key).(Remote)
}
// ToContext adds the Remote to this context if it supports
// the Setter interface.
func ToContext(c Setter, r Remote) {
c.Set(key, r)
}
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"fmt"
"math"
"math/rand"
. "runtime"
"strings"
"testing"
"unsafe"
)
func TestMemHash32Equality(t *testing.T) {
if *UseAeshash {
t.Skip("skipping since AES hash implementation is used")
}
var b [4]byte
r := rand.New(rand.NewSource(1234))
seed := uintptr(r.Uint64())
for i := 0; i < 100; i++ {
randBytes(r, b[:])
got := MemHash32(unsafe.Pointer(&b), seed)
want := MemHash(unsafe.Pointer(&b), seed, 4)
if got != want {
t.Errorf("MemHash32(%x, %v) = %v; want %v", b, seed, got, want)
}
}
}
func TestMemHash64Equality(t *testing.T) {
if *UseAeshash {
t.Skip("skipping since AES hash implementation is used")
}
var b [8]byte
r := rand.New(rand.NewSource(1234))
seed := uintptr(r.Uint64())
for i := 0; i < 100; i++ {
randBytes(r, b[:])
got := MemHash64(unsafe.Pointer(&b), seed)
want := MemHash(unsafe.Pointer(&b), seed, 8)
if got != want {
t.Errorf("MemHash64(%x, %v) = %v; want %v", b, seed, got, want)
}
}
}
// Smhasher is a torture test for hash functions.
// https://code.google.com/p/smhasher/
// This code is a port of some of the Smhasher tests to Go.
//
// The current AES hash function passes Smhasher. Our fallback
// hash functions don't, so we only enable the difficult tests when
// we know the AES implementation is available.
// Sanity checks.
// hash should not depend on values outside key.
// hash should not depend on alignment.
func TestSmhasherSanity(t *testing.T) {
r := rand.New(rand.NewSource(1234))
const REP = 10
const KEYMAX = 128
const PAD = 16
const OFFMAX = 16
for k := 0; k < REP; k++ {
for n := 0; n < KEYMAX; n++ {
for i := 0; i < OFFMAX; i++ {
var b [KEYMAX + OFFMAX + 2*PAD]byte
var c [KEYMAX + OFFMAX + 2*PAD]byte
randBytes(r, b[:])
randBytes(r, c[:])
copy(c[PAD+i:PAD+i+n], b[PAD:PAD+n])
if BytesHash(b[PAD:PAD+n], 0) != BytesHash(c[PAD+i:PAD+i+n], 0) {
t.Errorf("hash depends on bytes outside key")
}
}
}
}
}
type HashSet struct {
m map[uintptr]struct{} // set of hashes added
n int // number of hashes added
}
func newHashSet() *HashSet {
return &HashSet{make(map[uintptr]struct{}), 0}
}
func (s *HashSet) add(h uintptr) {
s.m[h] = struct{}{}
s.n++
}
func (s *HashSet) addS(x string) {
s.add(StringHash(x, 0))
}
func (s *HashSet) addB(x []byte) {
s.add(BytesHash(x, 0))
}
func (s *HashSet) addS_seed(x string, seed uintptr) {
s.add(StringHash(x, seed))
}
func (s *HashSet) check(t *testing.T) {
const SLOP = 10.0
collisions := s.n - len(s.m)
//fmt.Printf("%d/%d\n", len(s.m), s.n)
pairs := int64(s.n) * int64(s.n-1) / 2
expected := float64(pairs) / math.Pow(2.0, float64(hashSize))
stddev := math.Sqrt(expected)
if float64(collisions) > expected+SLOP*(3*stddev+1) {
t.Errorf("unexpected number of collisions: got=%d mean=%f stddev=%f", collisions, expected, stddev)
}
}
// a string plus adding zeros must make distinct hashes
func TestSmhasherAppendedZeros(t *testing.T) {
s := "hello" + strings.Repeat("\x00", 256)
h := newHashSet()
for i := 0; i <= len(s); i++ {
h.addS(s[:i])
}
h.check(t)
}
// All 0-3 byte strings have distinct hashes.
func TestSmhasherSmallKeys(t *testing.T) {
h := newHashSet()
var b [3]byte
for i := 0; i < 256; i++ {
b[0] = byte(i)
h.addB(b[:1])
for j := 0; j < 256; j++ {
b[1] = byte(j)
h.addB(b[:2])
if !testing.Short() {
for k := 0; k < 256; k++ {
b[2] = byte(k)
h.addB(b[:3])
}
}
}
}
h.check(t)
}
// Different length strings of all zeros have distinct hashes.
func TestSmhasherZeros(t *testing.T) {
N := 256 * 1024
if testing.Short() {
N = 1024
}
h := newHashSet()
b := make([]byte, N)
for i := 0; i <= N; i++ {
h.addB(b[:i])
}
h.check(t)
}
// Strings with up to two nonzero bytes all have distinct hashes.
func TestSmhasherTwoNonzero(t *testing.T) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
h := newHashSet()
for n := 2; n <= 16; n++ {
twoNonZero(h, n)
}
h.check(t)
}
func twoNonZero(h *HashSet, n int) {
b := make([]byte, n)
// all zero
h.addB(b[:])
// one non-zero byte
for i := 0; i < n; i++ {
for x := 1; x < 256; x++ {
b[i] = byte(x)
h.addB(b[:])
b[i] = 0
}
}
// two non-zero bytes
for i := 0; i < n; i++ {
for x := 1; x < 256; x++ {
b[i] = byte(x)
for j := i + 1; j < n; j++ {
for y := 1; y < 256; y++ {
b[j] = byte(y)
h.addB(b[:])
b[j] = 0
}
}
b[i] = 0
}
}
}
// Test strings with repeats, like "abcdabcdabcdabcd..."
func TestSmhasherCyclic(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
r := rand.New(rand.NewSource(1234))
const REPEAT = 8
const N = 1000000
for n := 4; n <= 12; n++ {
h := newHashSet()
b := make([]byte, REPEAT*n)
for i := 0; i < N; i++ {
b[0] = byte(i * 79 % 97)
b[1] = byte(i * 43 % 137)
b[2] = byte(i * 151 % 197)
b[3] = byte(i * 199 % 251)
randBytes(r, b[4:n])
for j := n; j < n*REPEAT; j++ {
b[j] = b[j-n]
}
h.addB(b)
}
h.check(t)
}
}
// Test strings with only a few bits set
func TestSmhasherSparse(t *testing.T) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
sparse(t, 32, 6)
sparse(t, 40, 6)
sparse(t, 48, 5)
sparse(t, 56, 5)
sparse(t, 64, 5)
sparse(t, 96, 4)
sparse(t, 256, 3)
sparse(t, 2048, 2)
}
func sparse(t *testing.T, n int, k int) {
b := make([]byte, n/8)
h := newHashSet()
setbits(h, b, 0, k)
h.check(t)
}
// set up to k bits at index i and greater
func setbits(h *HashSet, b []byte, i int, k int) {
h.addB(b)
if k == 0 {
return
}
for j := i; j < len(b)*8; j++ {
b[j/8] |= byte(1 << uint(j&7))
setbits(h, b, j+1, k-1)
b[j/8] &= byte(^(1 << uint(j&7)))
}
}
// Test all possible combinations of n blocks from the set s.
// "permutation" is a bad name here, but it is what Smhasher uses.
func TestSmhasherPermutation(t *testing.T) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7}, 8)
permutation(t, []uint32{0, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 8)
permutation(t, []uint32{0, 1}, 20)
permutation(t, []uint32{0, 1 << 31}, 20)
permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 6)
}
func permutation(t *testing.T, s []uint32, n int) {
b := make([]byte, n*4)
h := newHashSet()
genPerm(h, b, s, 0)
h.check(t)
}
func genPerm(h *HashSet, b []byte, s []uint32, n int) {
h.addB(b[:n])
if n == len(b) {
return
}
for _, v := range s {
b[n] = byte(v)
b[n+1] = byte(v >> 8)
b[n+2] = byte(v >> 16)
b[n+3] = byte(v >> 24)
genPerm(h, b, s, n+4)
}
}
type Key interface {
clear() // set bits all to 0
random(r *rand.Rand) // set key to something random
bits() int // how many bits key has
flipBit(i int) // flip bit i of the key
hash() uintptr // hash the key
name() string // for error reporting
}
type BytesKey struct {
b []byte
}
func (k *BytesKey) clear() {
for i := range k.b {
k.b[i] = 0
}
}
func (k *BytesKey) random(r *rand.Rand) {
randBytes(r, k.b)
}
func (k *BytesKey) bits() int {
return len(k.b) * 8
}
func (k *BytesKey) flipBit(i int) {
k.b[i>>3] ^= byte(1 << uint(i&7))
}
func (k *BytesKey) hash() uintptr {
return BytesHash(k.b, 0)
}
func (k *BytesKey) name() string {
return fmt.Sprintf("bytes%d", len(k.b))
}
type Int32Key struct {
i uint32
}
func (k *Int32Key) clear() {
k.i = 0
}
func (k *Int32Key) random(r *rand.Rand) {
k.i = r.Uint32()
}
func (k *Int32Key) bits() int {
return 32
}
func (k *Int32Key) flipBit(i int) {
k.i ^= 1 << uint(i)
}
func (k *Int32Key) hash() uintptr {
return Int32Hash(k.i, 0)
}
func (k *Int32Key) name() string {
return "int32"
}
type Int64Key struct {
i uint64
}
func (k *Int64Key) clear() {
k.i = 0
}
func (k *Int64Key) random(r *rand.Rand) {
k.i = uint64(r.Uint32()) + uint64(r.Uint32())<<32
}
func (k *Int64Key) bits() int {
return 64
}
func (k *Int64Key) flipBit(i int) {
k.i ^= 1 << uint(i)
}
func (k *Int64Key) hash() uintptr {
return Int64Hash(k.i, 0)
}
func (k *Int64Key) name() string {
return "int64"
}
type EfaceKey struct {
i interface{}
}
func (k *EfaceKey) clear() {
k.i = nil
}
func (k *EfaceKey) random(r *rand.Rand) {
k.i = uint64(r.Int63())
}
func (k *EfaceKey) bits() int {
// use 64 bits. This tests inlined interfaces
// on 64-bit targets and indirect interfaces on
// 32-bit targets.
return 64
}
func (k *EfaceKey) flipBit(i int) {
k.i = k.i.(uint64) ^ uint64(1)<<uint(i)
}
func (k *EfaceKey) hash() uintptr {
return EfaceHash(k.i, 0)
}
func (k *EfaceKey) name() string {
return "Eface"
}
type IfaceKey struct {
i interface {
F()
}
}
type fInter uint64
func (x fInter) F() {
}
func (k *IfaceKey) clear() {
k.i = nil
}
func (k *IfaceKey) random(r *rand.Rand) {
k.i = fInter(r.Int63())
}
func (k *IfaceKey) bits() int {
// use 64 bits. This tests inlined interfaces
// on 64-bit targets and indirect interfaces on
// 32-bit targets.
return 64
}
func (k *IfaceKey) flipBit(i int) {
k.i = k.i.(fInter) ^ fInter(1)<<uint(i)
}
func (k *IfaceKey) hash() uintptr {
return IfaceHash(k.i, 0)
}
func (k *IfaceKey) name() string {
return "Iface"
}
// Flipping a single bit of a key should flip each output bit with 50% probability.
func TestSmhasherAvalanche(t *testing.T) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
avalancheTest1(t, &BytesKey{make([]byte, 2)})
avalancheTest1(t, &BytesKey{make([]byte, 4)})
avalancheTest1(t, &BytesKey{make([]byte, 8)})
avalancheTest1(t, &BytesKey{make([]byte, 16)})
avalancheTest1(t, &BytesKey{make([]byte, 32)})
avalancheTest1(t, &BytesKey{make([]byte, 200)})
avalancheTest1(t, &Int32Key{})
avalancheTest1(t, &Int64Key{})
avalancheTest1(t, &EfaceKey{})
avalancheTest1(t, &IfaceKey{})
}
func avalancheTest1(t *testing.T, k Key) {
const REP = 100000
r := rand.New(rand.NewSource(1234))
n := k.bits()
// grid[i][j] is a count of whether flipping
// input bit i affects output bit j.
grid := make([][hashSize]int, n)
for z := 0; z < REP; z++ {
// pick a random key, hash it
k.random(r)
h := k.hash()
// flip each bit, hash & compare the results
for i := 0; i < n; i++ {
k.flipBit(i)
d := h ^ k.hash()
k.flipBit(i)
// record the effects of that bit flip
g := &grid[i]
for j := 0; j < hashSize; j++ {
g[j] += int(d & 1)
d >>= 1
}
}
}
// Each entry in the grid should be about REP/2.
// More precisely, we did N = k.bits() * hashSize experiments where
// each is the sum of REP coin flips. We want to find bounds on the
// sum of coin flips such that a truly random experiment would have
// all sums inside those bounds with 99% probability.
N := n * hashSize
var c float64
// find c such that Prob(mean-c*stddev < x < mean+c*stddev)^N > .9999
for c = 0.0; math.Pow(math.Erf(c/math.Sqrt(2)), float64(N)) < .9999; c += .1 {
}
c *= 4.0 // allowed slack - we don't need to be perfectly random
mean := .5 * REP
stddev := .5 * math.Sqrt(REP)
low := int(mean - c*stddev)
high := int(mean + c*stddev)
for i := 0; i < n; i++ {
for j := 0; j < hashSize; j++ {
x := grid[i][j]
if x < low || x > high {
t.Errorf("bad bias for %s bit %d -> bit %d: %d/%d\n", k.name(), i, j, x, REP)
}
}
}
}
// All bit rotations of a set of distinct keys
func TestSmhasherWindowed(t *testing.T) {
windowed(t, &Int32Key{})
windowed(t, &Int64Key{})
windowed(t, &BytesKey{make([]byte, 128)})
}
func windowed(t *testing.T, k Key) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
const BITS = 16
for r := 0; r < k.bits(); r++ {
h := newHashSet()
for i := 0; i < 1<<BITS; i++ {
k.clear()
for j := 0; j < BITS; j++ {
if i>>uint(j)&1 != 0 {
k.flipBit((j + r) % k.bits())
}
}
h.add(k.hash())
}
h.check(t)
}
}
// All keys of the form prefix + [A-Za-z0-9]*N + suffix.
func TestSmhasherText(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
text(t, "Foo", "Bar")
text(t, "FooBar", "")
text(t, "", "FooBar")
}
func text(t *testing.T, prefix, suffix string) {
const N = 4
const S = "ABCDEFGHIJKLMNOPQRSTabcdefghijklmnopqrst0123456789"
const L = len(S)
b := make([]byte, len(prefix)+N+len(suffix))
copy(b, prefix)
copy(b[len(prefix)+N:], suffix)
h := newHashSet()
c := b[len(prefix):]
for i := 0; i < L; i++ {
c[0] = S[i]
for j := 0; j < L; j++ {
c[1] = S[j]
for k := 0; k < L; k++ {
c[2] = S[k]
for x := 0; x < L; x++ {
c[3] = S[x]
h.addB(b)
}
}
}
}
h.check(t)
}
// Make sure different seed values generate different hashes.
func TestSmhasherSeed(t *testing.T) {
h := newHashSet()
const N = 100000
s := "hello"
for i := 0; i < N; i++ {
h.addS_seed(s, uintptr(i))
}
h.check(t)
}
// size of the hash output (32 or 64 bits)
const hashSize = 32 + int(^uintptr(0)>>63<<5)
func randBytes(r *rand.Rand, b []byte) {
for i := range b {
b[i] = byte(r.Uint32())
}
}
func benchmarkHash(b *testing.B, n int) {
s := strings.Repeat("A", n)
for i := 0; i < b.N; i++ {
StringHash(s, 0)
}
b.SetBytes(int64(n))
}
func BenchmarkHash5(b *testing.B) { benchmarkHash(b, 5) }
func BenchmarkHash16(b *testing.B) { benchmarkHash(b, 16) }
func BenchmarkHash64(b *testing.B) { benchmarkHash(b, 64) }
func BenchmarkHash1024(b *testing.B) { benchmarkHash(b, 1024) }
func BenchmarkHash65536(b *testing.B) { benchmarkHash(b, 65536) }
func TestArrayHash(t *testing.T) {
// Make sure that "" in arrays hash correctly. The hash
// should at least scramble the input seed so that, e.g.,
// {"","foo"} and {"foo",""} have different hashes.
// If the hash is bad, then all (8 choose 4) = 70 keys
// have the same hash. If so, we allocate 70/8 = 8
// overflow buckets. If the hash is good we don't
// normally allocate any overflow buckets, and the
// probability of even one or two overflows goes down rapidly.
// (There is always 1 allocation of the bucket array. The map
// header is allocated on the stack.)
f := func() {
// Make the key type at most 128 bytes. Otherwise,
// we get an allocation per key.
type key [8]string
m := make(map[key]bool, 70)
// fill m with keys that have 4 "foo"s and 4 ""s.
for i := 0; i < 256; i++ {
var k key
cnt := 0
for j := uint(0); j < 8; j++ {
if i>>j&1 != 0 {
k[j] = "foo"
cnt++
}
}
if cnt == 4 {
m[k] = true
}
}
if len(m) != 70 {
t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m))
}
}
if n := testing.AllocsPerRun(10, f); n > 6 {
t.Errorf("too many allocs %f - hash not balanced", n)
}
}
func TestStructHash(t *testing.T) {
// See the comment in TestArrayHash.
f := func() {
type key struct {
a, b, c, d, e, f, g, h string
}
m := make(map[key]bool, 70)
// fill m with keys that have 4 "foo"s and 4 ""s.
for i := 0; i < 256; i++ {
var k key
cnt := 0
if i&1 != 0 {
k.a = "foo"
cnt++
}
if i&2 != 0 {
k.b = "foo"
cnt++
}
if i&4 != 0 {
k.c = "foo"
cnt++
}
if i&8 != 0 {
k.d = "foo"
cnt++
}
if i&16 != 0 {
k.e = "foo"
cnt++
}
if i&32 != 0 {
k.f = "foo"
cnt++
}
if i&64 != 0 {
k.g = "foo"
cnt++
}
if i&128 != 0 {
k.h = "foo"
cnt++
}
if cnt == 4 {
m[k] = true
}
}
if len(m) != 70 {
t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m))
}
}
if n := testing.AllocsPerRun(10, f); n > 6 {
t.Errorf("too many allocs %f - hash not balanced", n)
}
}
var sink uint64
func BenchmarkAlignedLoad(b *testing.B) {
var buf [16]byte
p := unsafe.Pointer(&buf[0])
var s uint64
for i := 0; i < b.N; i++ {
s += ReadUnaligned64(p)
}
sink = s
}
func BenchmarkUnalignedLoad(b *testing.B) {
var buf [16]byte
p := unsafe.Pointer(&buf[1])
var s uint64
for i := 0; i < b.N; i++ {
s += ReadUnaligned64(p)
}
sink = s
}
func TestCollisions(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
if j == i {
continue
}
var a [16]byte
m := make(map[uint16]struct{}, 1<<16)
for n := 0; n < 1<<16; n++ {
a[i] = byte(n)
a[j] = byte(n >> 8)
m[uint16(BytesHash(a[:], 0))] = struct{}{}
}
if len(m) <= 1<<15 {
t.Errorf("too many collisions i=%d j=%d outputs=%d out of 65536\n", i, j, len(m))
}
}
}
}
runtime: remove redundant slicing
In the twoNonZero function in hash_test, the buffer is sliced as [:] three times. This change deletes them.
Change-Id: I0701d0c810b4f3e267f80133a0dcdb4ed81fe356
Reviewed-on: https://go-review.googlesource.com/c/156138
Reviewed-by: Keith Randall <8c99c3a9284e493be632950b84cd789d08ed3e9d@golang.org>
Run-TryBot: Keith Randall <8c99c3a9284e493be632950b84cd789d08ed3e9d@golang.org>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"fmt"
"math"
"math/rand"
. "runtime"
"strings"
"testing"
"unsafe"
)
func TestMemHash32Equality(t *testing.T) {
if *UseAeshash {
t.Skip("skipping since AES hash implementation is used")
}
var b [4]byte
r := rand.New(rand.NewSource(1234))
seed := uintptr(r.Uint64())
for i := 0; i < 100; i++ {
randBytes(r, b[:])
got := MemHash32(unsafe.Pointer(&b), seed)
want := MemHash(unsafe.Pointer(&b), seed, 4)
if got != want {
t.Errorf("MemHash32(%x, %v) = %v; want %v", b, seed, got, want)
}
}
}
func TestMemHash64Equality(t *testing.T) {
if *UseAeshash {
t.Skip("skipping since AES hash implementation is used")
}
var b [8]byte
r := rand.New(rand.NewSource(1234))
seed := uintptr(r.Uint64())
for i := 0; i < 100; i++ {
randBytes(r, b[:])
got := MemHash64(unsafe.Pointer(&b), seed)
want := MemHash(unsafe.Pointer(&b), seed, 8)
if got != want {
t.Errorf("MemHash64(%x, %v) = %v; want %v", b, seed, got, want)
}
}
}
// Smhasher is a torture test for hash functions.
// https://code.google.com/p/smhasher/
// This code is a port of some of the Smhasher tests to Go.
//
// The current AES hash function passes Smhasher. Our fallback
// hash functions don't, so we only enable the difficult tests when
// we know the AES implementation is available.
// Sanity checks.
// hash should not depend on values outside key.
// hash should not depend on alignment.
func TestSmhasherSanity(t *testing.T) {
r := rand.New(rand.NewSource(1234))
const REP = 10
const KEYMAX = 128
const PAD = 16
const OFFMAX = 16
for k := 0; k < REP; k++ {
for n := 0; n < KEYMAX; n++ {
for i := 0; i < OFFMAX; i++ {
var b [KEYMAX + OFFMAX + 2*PAD]byte
var c [KEYMAX + OFFMAX + 2*PAD]byte
randBytes(r, b[:])
randBytes(r, c[:])
copy(c[PAD+i:PAD+i+n], b[PAD:PAD+n])
if BytesHash(b[PAD:PAD+n], 0) != BytesHash(c[PAD+i:PAD+i+n], 0) {
t.Errorf("hash depends on bytes outside key")
}
}
}
}
}
type HashSet struct {
m map[uintptr]struct{} // set of hashes added
n int // number of hashes added
}
func newHashSet() *HashSet {
return &HashSet{make(map[uintptr]struct{}), 0}
}
func (s *HashSet) add(h uintptr) {
s.m[h] = struct{}{}
s.n++
}
func (s *HashSet) addS(x string) {
s.add(StringHash(x, 0))
}
func (s *HashSet) addB(x []byte) {
s.add(BytesHash(x, 0))
}
func (s *HashSet) addS_seed(x string, seed uintptr) {
s.add(StringHash(x, seed))
}
func (s *HashSet) check(t *testing.T) {
const SLOP = 10.0
collisions := s.n - len(s.m)
//fmt.Printf("%d/%d\n", len(s.m), s.n)
pairs := int64(s.n) * int64(s.n-1) / 2
expected := float64(pairs) / math.Pow(2.0, float64(hashSize))
stddev := math.Sqrt(expected)
if float64(collisions) > expected+SLOP*(3*stddev+1) {
t.Errorf("unexpected number of collisions: got=%d mean=%f stddev=%f", collisions, expected, stddev)
}
}
// a string plus adding zeros must make distinct hashes
func TestSmhasherAppendedZeros(t *testing.T) {
s := "hello" + strings.Repeat("\x00", 256)
h := newHashSet()
for i := 0; i <= len(s); i++ {
h.addS(s[:i])
}
h.check(t)
}
// All 0-3 byte strings have distinct hashes.
func TestSmhasherSmallKeys(t *testing.T) {
h := newHashSet()
var b [3]byte
for i := 0; i < 256; i++ {
b[0] = byte(i)
h.addB(b[:1])
for j := 0; j < 256; j++ {
b[1] = byte(j)
h.addB(b[:2])
if !testing.Short() {
for k := 0; k < 256; k++ {
b[2] = byte(k)
h.addB(b[:3])
}
}
}
}
h.check(t)
}
// Different length strings of all zeros have distinct hashes.
func TestSmhasherZeros(t *testing.T) {
N := 256 * 1024
if testing.Short() {
N = 1024
}
h := newHashSet()
b := make([]byte, N)
for i := 0; i <= N; i++ {
h.addB(b[:i])
}
h.check(t)
}
// Strings with up to two nonzero bytes all have distinct hashes.
func TestSmhasherTwoNonzero(t *testing.T) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
h := newHashSet()
for n := 2; n <= 16; n++ {
twoNonZero(h, n)
}
h.check(t)
}
func twoNonZero(h *HashSet, n int) {
b := make([]byte, n)
// all zero
h.addB(b)
// one non-zero byte
for i := 0; i < n; i++ {
for x := 1; x < 256; x++ {
b[i] = byte(x)
h.addB(b)
b[i] = 0
}
}
// two non-zero bytes
for i := 0; i < n; i++ {
for x := 1; x < 256; x++ {
b[i] = byte(x)
for j := i + 1; j < n; j++ {
for y := 1; y < 256; y++ {
b[j] = byte(y)
h.addB(b)
b[j] = 0
}
}
b[i] = 0
}
}
}
// Test strings with repeats, like "abcdabcdabcdabcd..."
func TestSmhasherCyclic(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
r := rand.New(rand.NewSource(1234))
const REPEAT = 8
const N = 1000000
for n := 4; n <= 12; n++ {
h := newHashSet()
b := make([]byte, REPEAT*n)
for i := 0; i < N; i++ {
b[0] = byte(i * 79 % 97)
b[1] = byte(i * 43 % 137)
b[2] = byte(i * 151 % 197)
b[3] = byte(i * 199 % 251)
randBytes(r, b[4:n])
for j := n; j < n*REPEAT; j++ {
b[j] = b[j-n]
}
h.addB(b)
}
h.check(t)
}
}
// Test strings with only a few bits set
func TestSmhasherSparse(t *testing.T) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
sparse(t, 32, 6)
sparse(t, 40, 6)
sparse(t, 48, 5)
sparse(t, 56, 5)
sparse(t, 64, 5)
sparse(t, 96, 4)
sparse(t, 256, 3)
sparse(t, 2048, 2)
}
func sparse(t *testing.T, n int, k int) {
b := make([]byte, n/8)
h := newHashSet()
setbits(h, b, 0, k)
h.check(t)
}
// set up to k bits at index i and greater
func setbits(h *HashSet, b []byte, i int, k int) {
h.addB(b)
if k == 0 {
return
}
for j := i; j < len(b)*8; j++ {
b[j/8] |= byte(1 << uint(j&7))
setbits(h, b, j+1, k-1)
b[j/8] &= byte(^(1 << uint(j&7)))
}
}
// Test all possible combinations of n blocks from the set s.
// "permutation" is a bad name here, but it is what Smhasher uses.
func TestSmhasherPermutation(t *testing.T) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7}, 8)
permutation(t, []uint32{0, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 8)
permutation(t, []uint32{0, 1}, 20)
permutation(t, []uint32{0, 1 << 31}, 20)
permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 6)
}
func permutation(t *testing.T, s []uint32, n int) {
b := make([]byte, n*4)
h := newHashSet()
genPerm(h, b, s, 0)
h.check(t)
}
func genPerm(h *HashSet, b []byte, s []uint32, n int) {
h.addB(b[:n])
if n == len(b) {
return
}
for _, v := range s {
b[n] = byte(v)
b[n+1] = byte(v >> 8)
b[n+2] = byte(v >> 16)
b[n+3] = byte(v >> 24)
genPerm(h, b, s, n+4)
}
}
type Key interface {
clear() // set bits all to 0
random(r *rand.Rand) // set key to something random
bits() int // how many bits key has
flipBit(i int) // flip bit i of the key
hash() uintptr // hash the key
name() string // for error reporting
}
type BytesKey struct {
b []byte
}
func (k *BytesKey) clear() {
for i := range k.b {
k.b[i] = 0
}
}
func (k *BytesKey) random(r *rand.Rand) {
randBytes(r, k.b)
}
func (k *BytesKey) bits() int {
return len(k.b) * 8
}
func (k *BytesKey) flipBit(i int) {
k.b[i>>3] ^= byte(1 << uint(i&7))
}
func (k *BytesKey) hash() uintptr {
return BytesHash(k.b, 0)
}
func (k *BytesKey) name() string {
return fmt.Sprintf("bytes%d", len(k.b))
}
type Int32Key struct {
i uint32
}
func (k *Int32Key) clear() {
k.i = 0
}
func (k *Int32Key) random(r *rand.Rand) {
k.i = r.Uint32()
}
func (k *Int32Key) bits() int {
return 32
}
func (k *Int32Key) flipBit(i int) {
k.i ^= 1 << uint(i)
}
func (k *Int32Key) hash() uintptr {
return Int32Hash(k.i, 0)
}
func (k *Int32Key) name() string {
return "int32"
}
type Int64Key struct {
i uint64
}
func (k *Int64Key) clear() {
k.i = 0
}
func (k *Int64Key) random(r *rand.Rand) {
k.i = uint64(r.Uint32()) + uint64(r.Uint32())<<32
}
func (k *Int64Key) bits() int {
return 64
}
func (k *Int64Key) flipBit(i int) {
k.i ^= 1 << uint(i)
}
func (k *Int64Key) hash() uintptr {
return Int64Hash(k.i, 0)
}
func (k *Int64Key) name() string {
return "int64"
}
type EfaceKey struct {
i interface{}
}
func (k *EfaceKey) clear() {
k.i = nil
}
func (k *EfaceKey) random(r *rand.Rand) {
k.i = uint64(r.Int63())
}
func (k *EfaceKey) bits() int {
// use 64 bits. This tests inlined interfaces
// on 64-bit targets and indirect interfaces on
// 32-bit targets.
return 64
}
func (k *EfaceKey) flipBit(i int) {
k.i = k.i.(uint64) ^ uint64(1)<<uint(i)
}
func (k *EfaceKey) hash() uintptr {
return EfaceHash(k.i, 0)
}
func (k *EfaceKey) name() string {
return "Eface"
}
type IfaceKey struct {
i interface {
F()
}
}
type fInter uint64
func (x fInter) F() {
}
func (k *IfaceKey) clear() {
k.i = nil
}
func (k *IfaceKey) random(r *rand.Rand) {
k.i = fInter(r.Int63())
}
func (k *IfaceKey) bits() int {
// use 64 bits. This tests inlined interfaces
// on 64-bit targets and indirect interfaces on
// 32-bit targets.
return 64
}
func (k *IfaceKey) flipBit(i int) {
k.i = k.i.(fInter) ^ fInter(1)<<uint(i)
}
func (k *IfaceKey) hash() uintptr {
return IfaceHash(k.i, 0)
}
func (k *IfaceKey) name() string {
return "Iface"
}
// Flipping a single bit of a key should flip each output bit with 50% probability.
func TestSmhasherAvalanche(t *testing.T) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
avalancheTest1(t, &BytesKey{make([]byte, 2)})
avalancheTest1(t, &BytesKey{make([]byte, 4)})
avalancheTest1(t, &BytesKey{make([]byte, 8)})
avalancheTest1(t, &BytesKey{make([]byte, 16)})
avalancheTest1(t, &BytesKey{make([]byte, 32)})
avalancheTest1(t, &BytesKey{make([]byte, 200)})
avalancheTest1(t, &Int32Key{})
avalancheTest1(t, &Int64Key{})
avalancheTest1(t, &EfaceKey{})
avalancheTest1(t, &IfaceKey{})
}
func avalancheTest1(t *testing.T, k Key) {
const REP = 100000
r := rand.New(rand.NewSource(1234))
n := k.bits()
// grid[i][j] is a count of whether flipping
// input bit i affects output bit j.
grid := make([][hashSize]int, n)
for z := 0; z < REP; z++ {
// pick a random key, hash it
k.random(r)
h := k.hash()
// flip each bit, hash & compare the results
for i := 0; i < n; i++ {
k.flipBit(i)
d := h ^ k.hash()
k.flipBit(i)
// record the effects of that bit flip
g := &grid[i]
for j := 0; j < hashSize; j++ {
g[j] += int(d & 1)
d >>= 1
}
}
}
// Each entry in the grid should be about REP/2.
// More precisely, we did N = k.bits() * hashSize experiments where
// each is the sum of REP coin flips. We want to find bounds on the
// sum of coin flips such that a truly random experiment would have
// all sums inside those bounds with 99% probability.
N := n * hashSize
var c float64
// find c such that Prob(mean-c*stddev < x < mean+c*stddev)^N > .9999
for c = 0.0; math.Pow(math.Erf(c/math.Sqrt(2)), float64(N)) < .9999; c += .1 {
}
c *= 4.0 // allowed slack - we don't need to be perfectly random
mean := .5 * REP
stddev := .5 * math.Sqrt(REP)
low := int(mean - c*stddev)
high := int(mean + c*stddev)
for i := 0; i < n; i++ {
for j := 0; j < hashSize; j++ {
x := grid[i][j]
if x < low || x > high {
t.Errorf("bad bias for %s bit %d -> bit %d: %d/%d\n", k.name(), i, j, x, REP)
}
}
}
}
// All bit rotations of a set of distinct keys
func TestSmhasherWindowed(t *testing.T) {
windowed(t, &Int32Key{})
windowed(t, &Int64Key{})
windowed(t, &BytesKey{make([]byte, 128)})
}
func windowed(t *testing.T, k Key) {
if GOARCH == "wasm" {
t.Skip("Too slow on wasm")
}
if testing.Short() {
t.Skip("Skipping in short mode")
}
const BITS = 16
for r := 0; r < k.bits(); r++ {
h := newHashSet()
for i := 0; i < 1<<BITS; i++ {
k.clear()
for j := 0; j < BITS; j++ {
if i>>uint(j)&1 != 0 {
k.flipBit((j + r) % k.bits())
}
}
h.add(k.hash())
}
h.check(t)
}
}
// All keys of the form prefix + [A-Za-z0-9]*N + suffix.
func TestSmhasherText(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
text(t, "Foo", "Bar")
text(t, "FooBar", "")
text(t, "", "FooBar")
}
func text(t *testing.T, prefix, suffix string) {
const N = 4
const S = "ABCDEFGHIJKLMNOPQRSTabcdefghijklmnopqrst0123456789"
const L = len(S)
b := make([]byte, len(prefix)+N+len(suffix))
copy(b, prefix)
copy(b[len(prefix)+N:], suffix)
h := newHashSet()
c := b[len(prefix):]
for i := 0; i < L; i++ {
c[0] = S[i]
for j := 0; j < L; j++ {
c[1] = S[j]
for k := 0; k < L; k++ {
c[2] = S[k]
for x := 0; x < L; x++ {
c[3] = S[x]
h.addB(b)
}
}
}
}
h.check(t)
}
// Make sure different seed values generate different hashes.
func TestSmhasherSeed(t *testing.T) {
h := newHashSet()
const N = 100000
s := "hello"
for i := 0; i < N; i++ {
h.addS_seed(s, uintptr(i))
}
h.check(t)
}
// size of the hash output (32 or 64 bits)
const hashSize = 32 + int(^uintptr(0)>>63<<5)
func randBytes(r *rand.Rand, b []byte) {
for i := range b {
b[i] = byte(r.Uint32())
}
}
func benchmarkHash(b *testing.B, n int) {
s := strings.Repeat("A", n)
for i := 0; i < b.N; i++ {
StringHash(s, 0)
}
b.SetBytes(int64(n))
}
func BenchmarkHash5(b *testing.B) { benchmarkHash(b, 5) }
func BenchmarkHash16(b *testing.B) { benchmarkHash(b, 16) }
func BenchmarkHash64(b *testing.B) { benchmarkHash(b, 64) }
func BenchmarkHash1024(b *testing.B) { benchmarkHash(b, 1024) }
func BenchmarkHash65536(b *testing.B) { benchmarkHash(b, 65536) }
func TestArrayHash(t *testing.T) {
// Make sure that "" in arrays hash correctly. The hash
// should at least scramble the input seed so that, e.g.,
// {"","foo"} and {"foo",""} have different hashes.
// If the hash is bad, then all (8 choose 4) = 70 keys
// have the same hash. If so, we allocate 70/8 = 8
// overflow buckets. If the hash is good we don't
// normally allocate any overflow buckets, and the
// probability of even one or two overflows goes down rapidly.
// (There is always 1 allocation of the bucket array. The map
// header is allocated on the stack.)
f := func() {
// Make the key type at most 128 bytes. Otherwise,
// we get an allocation per key.
type key [8]string
m := make(map[key]bool, 70)
// fill m with keys that have 4 "foo"s and 4 ""s.
for i := 0; i < 256; i++ {
var k key
cnt := 0
for j := uint(0); j < 8; j++ {
if i>>j&1 != 0 {
k[j] = "foo"
cnt++
}
}
if cnt == 4 {
m[k] = true
}
}
if len(m) != 70 {
t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m))
}
}
if n := testing.AllocsPerRun(10, f); n > 6 {
t.Errorf("too many allocs %f - hash not balanced", n)
}
}
func TestStructHash(t *testing.T) {
// See the comment in TestArrayHash.
f := func() {
type key struct {
a, b, c, d, e, f, g, h string
}
m := make(map[key]bool, 70)
// fill m with keys that have 4 "foo"s and 4 ""s.
for i := 0; i < 256; i++ {
var k key
cnt := 0
if i&1 != 0 {
k.a = "foo"
cnt++
}
if i&2 != 0 {
k.b = "foo"
cnt++
}
if i&4 != 0 {
k.c = "foo"
cnt++
}
if i&8 != 0 {
k.d = "foo"
cnt++
}
if i&16 != 0 {
k.e = "foo"
cnt++
}
if i&32 != 0 {
k.f = "foo"
cnt++
}
if i&64 != 0 {
k.g = "foo"
cnt++
}
if i&128 != 0 {
k.h = "foo"
cnt++
}
if cnt == 4 {
m[k] = true
}
}
if len(m) != 70 {
t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m))
}
}
if n := testing.AllocsPerRun(10, f); n > 6 {
t.Errorf("too many allocs %f - hash not balanced", n)
}
}
var sink uint64
func BenchmarkAlignedLoad(b *testing.B) {
var buf [16]byte
p := unsafe.Pointer(&buf[0])
var s uint64
for i := 0; i < b.N; i++ {
s += ReadUnaligned64(p)
}
sink = s
}
func BenchmarkUnalignedLoad(b *testing.B) {
var buf [16]byte
p := unsafe.Pointer(&buf[1])
var s uint64
for i := 0; i < b.N; i++ {
s += ReadUnaligned64(p)
}
sink = s
}
func TestCollisions(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
if j == i {
continue
}
var a [16]byte
m := make(map[uint16]struct{}, 1<<16)
for n := 0; n < 1<<16; n++ {
a[i] = byte(n)
a[j] = byte(n >> 8)
m[uint16(BytesHash(a[:], 0))] = struct{}{}
}
if len(m) <= 1<<15 {
t.Errorf("too many collisions i=%d j=%d outputs=%d out of 65536\n", i, j, len(m))
}
}
}
}
|
package report_test
import (
"testing"
"github.com/weaveworks/scope/report"
)
var (
clientHostID = "client.host.com"
clientHostName = clientHostID
clientHostNodeID = report.MakeHostNodeID(clientHostID)
clientAddress = "10.10.10.20"
serverHostID = "server.host.com"
serverHostName = serverHostID
serverHostNodeID = report.MakeHostNodeID(serverHostID)
serverAddress = "10.10.10.1"
unknownHostID = "" // by definition, we don't know it
unknownAddress = "172.16.93.112" // will be a pseudonode, no corresponding host
client54001EndpointNodeID = report.MakeEndpointNodeID(clientHostID, "", clientAddress, "54001") // i.e. curl
client54002EndpointNodeID = report.MakeEndpointNodeID(clientHostID, "", clientAddress, "54002") // also curl
server80EndpointNodeID = report.MakeEndpointNodeID(serverHostID, "", serverAddress, "80") // i.e. apache
unknown1EndpointNodeID = report.MakeEndpointNodeID(unknownHostID, "", unknownAddress, "10001")
unknown2EndpointNodeID = report.MakeEndpointNodeID(unknownHostID, "", unknownAddress, "10002")
unknown3EndpointNodeID = report.MakeEndpointNodeID(unknownHostID, "", unknownAddress, "10003")
clientAddressNodeID = report.MakeAddressNodeID(clientHostID, clientAddress)
serverAddressNodeID = report.MakeAddressNodeID(serverHostID, serverAddress)
unknownAddressNodeID = report.MakeAddressNodeID(unknownHostID, unknownAddress)
)
func TestEndpointNodeID(t *testing.T) {
for _, bad := range []string{
clientAddressNodeID,
serverAddressNodeID,
unknownAddressNodeID,
clientHostNodeID,
serverHostNodeID,
"host.com;1.2.3.4",
"a;b",
"a;",
";b",
";",
"",
} {
if haveName, haveAddress, havePort, ok := report.ParseEndpointNodeID(bad); ok {
t.Errorf("%q: expected failure, but got {%q, %q, %q}", bad, haveName, haveAddress, havePort)
}
}
for input, want := range map[string]struct{ name, address, port string }{
report.MakeEndpointNodeID("host.com", "namespaceid", "127.0.0.1", "c"): {"host.com-namespaceid", "127.0.0.1", "c"},
report.MakeEndpointNodeID("host.com", "", "1.2.3.4", "c"): {"", "1.2.3.4", "c"},
"a;b;c": {"a", "b", "c"},
} {
haveName, haveAddress, havePort, ok := report.ParseEndpointNodeID(input)
if !ok {
t.Errorf("%q: not OK", input)
continue
}
if want.name != haveName ||
want.address != haveAddress ||
want.port != havePort {
t.Errorf("%q: want %q, have {%q, %q, %q}", input, want, haveName, haveAddress, havePort)
}
}
}
Add test to check ECS Service node id parsing is back-compatible
package report_test
import (
"testing"
"github.com/weaveworks/scope/report"
)
var (
clientHostID = "client.host.com"
clientHostName = clientHostID
clientHostNodeID = report.MakeHostNodeID(clientHostID)
clientAddress = "10.10.10.20"
serverHostID = "server.host.com"
serverHostName = serverHostID
serverHostNodeID = report.MakeHostNodeID(serverHostID)
serverAddress = "10.10.10.1"
unknownHostID = "" // by definition, we don't know it
unknownAddress = "172.16.93.112" // will be a pseudonode, no corresponding host
client54001EndpointNodeID = report.MakeEndpointNodeID(clientHostID, "", clientAddress, "54001") // i.e. curl
client54002EndpointNodeID = report.MakeEndpointNodeID(clientHostID, "", clientAddress, "54002") // also curl
server80EndpointNodeID = report.MakeEndpointNodeID(serverHostID, "", serverAddress, "80") // i.e. apache
unknown1EndpointNodeID = report.MakeEndpointNodeID(unknownHostID, "", unknownAddress, "10001")
unknown2EndpointNodeID = report.MakeEndpointNodeID(unknownHostID, "", unknownAddress, "10002")
unknown3EndpointNodeID = report.MakeEndpointNodeID(unknownHostID, "", unknownAddress, "10003")
clientAddressNodeID = report.MakeAddressNodeID(clientHostID, clientAddress)
serverAddressNodeID = report.MakeAddressNodeID(serverHostID, serverAddress)
unknownAddressNodeID = report.MakeAddressNodeID(unknownHostID, unknownAddress)
)
func TestEndpointNodeID(t *testing.T) {
for _, bad := range []string{
clientAddressNodeID,
serverAddressNodeID,
unknownAddressNodeID,
clientHostNodeID,
serverHostNodeID,
"host.com;1.2.3.4",
"a;b",
"a;",
";b",
";",
"",
} {
if haveName, haveAddress, havePort, ok := report.ParseEndpointNodeID(bad); ok {
t.Errorf("%q: expected failure, but got {%q, %q, %q}", bad, haveName, haveAddress, havePort)
}
}
for input, want := range map[string]struct{ name, address, port string }{
report.MakeEndpointNodeID("host.com", "namespaceid", "127.0.0.1", "c"): {"host.com-namespaceid", "127.0.0.1", "c"},
report.MakeEndpointNodeID("host.com", "", "1.2.3.4", "c"): {"", "1.2.3.4", "c"},
"a;b;c": {"a", "b", "c"},
} {
haveName, haveAddress, havePort, ok := report.ParseEndpointNodeID(input)
if !ok {
t.Errorf("%q: not OK", input)
continue
}
if want.name != haveName ||
want.address != haveAddress ||
want.port != havePort {
t.Errorf("%q: want %q, have {%q, %q, %q}", input, want, haveName, haveAddress, havePort)
}
}
}
func TestECSServiceNodeIDCompat(t *testing.T) {
testID := "my-service;<ecs_service>"
testName := "my-service"
_, name, ok := report.ParseECSServiceNodeID(testID)
if !ok {
t.Errorf("Failed to parse backwards-compatible id %q", testID)
}
if name != testName {
t.Errorf("Backwards-compatible id %q parsed name to %q, expected %q", testID, name, testName)
}
}
|
// Copyright 2022 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kexec
import (
"bytes"
"debug/elf"
"encoding/binary"
"fmt"
"log"
"github.com/u-root/u-root/pkg/boot/align"
)
// PurgeLoad loads an elf file at the desired address.
func PurgeLoad(kmem *Memory, elfBuf []byte, start, param uintptr) (uintptr, error) {
elfFile, err := elf.NewFile(bytes.NewReader(elfBuf))
if err != nil {
return 0, fmt.Errorf("parse elf file from elf buffer: %v", err)
}
log.Printf("Elf file: %#v, %d Progs", elfFile, len(elfFile.Progs))
if len(elfFile.Progs) != 1 {
return 0, fmt.Errorf("parse elf file: can only handle one Prog, not %d", len(elfFile.Progs))
}
p := elfFile.Progs[0]
// the package really wants things page-sized, and rather than
// deal with all the bugs that arise from that, just keep it happy.
p.Memsz = uint64(align.AlignUpPageSize(uint(p.Memsz)))
b := make([]byte, p.Memsz)
if _, err := p.ReadAt(b[:p.Filesz], 0); err != nil {
return 0, err
}
entry := elfFile.Entry
Debug("Start is %#x, param is %#x", start, param)
binary.LittleEndian.PutUint64(b[8:], uint64(start))
binary.LittleEndian.PutUint64(b[16:], uint64(param))
min := uintptr(p.Vaddr)
max := uintptr(p.Vaddr + uint64(len(b)))
phyRange, err := kmem.ReservePhys(uint(len(b)), RangeFromInterval(min, max))
if err != nil {
return uintptr(entry), fmt.Errorf("reserve phys ram of size %d between range(%d, %d): %v", len(b), min, max, err)
}
kmem.Segments.Insert(NewSegment(b, phyRange))
return uintptr(entry), nil
}
kexec PurgLoad: clarify docs + errors
Signed-off-by: Chris Koch <cd2178739c1fb8f241acc6c7bd5abe02c150d7e5@google.com>
// Copyright 2022 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kexec
import (
"bytes"
"debug/elf"
"encoding/binary"
"fmt"
"log"
"github.com/u-root/u-root/pkg/boot/align"
)
// PurgeLoad loads an ELF (which must be one of the purgatories from the
// pkg/boot/linux package) into kmem, instructing it to jump to entry with RSI
// set to rsi.
func PurgeLoad(kmem *Memory, elfBuf []byte, start, param uintptr) (uintptr, error) {
elfFile, err := elf.NewFile(bytes.NewReader(elfBuf))
if err != nil {
return 0, fmt.Errorf("parse purgatory ELF file from ELF buffer: %v", err)
}
log.Printf("Elf file: %#v, %d Progs", elfFile, len(elfFile.Progs))
if len(elfFile.Progs) != 1 {
return 0, fmt.Errorf("parse purgatory ELF file: can only handle one Prog, not %d", len(elfFile.Progs))
}
p := elfFile.Progs[0]
// the package really wants things page-sized, and rather than
// deal with all the bugs that arise from that, just keep it happy.
p.Memsz = uint64(align.AlignUpPageSize(uint(p.Memsz)))
b := make([]byte, p.Memsz)
if _, err := p.ReadAt(b[:p.Filesz], 0); err != nil {
return 0, err
}
entry := uintptr(elfFile.Entry)
Debug("Start is %#x, param is %#x", start, param)
binary.LittleEndian.PutUint64(b[8:], uint64(start))
binary.LittleEndian.PutUint64(b[16:], uint64(param))
// TODO: Purgatories would be easier to handle if they were position-independent.
min := uintptr(p.Vaddr)
max := uintptr(p.Vaddr + uint64(len(b)))
phyRange, err := kmem.ReservePhys(uint(len(b)), RangeFromInterval(min, max))
if err != nil {
return 0, fmt.Errorf("purgatory: reserve phys ram of size %d between range(%d, %d): %v", len(b), min, max, err)
}
kmem.Segments.Insert(NewSegment(b, phyRange))
return entry, nil
}
|
package cmd
import (
"bufio"
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
kcmd "k8s.io/kubernetes/pkg/kubectl/cmd"
"k8s.io/kubernetes/pkg/kubectl/cmd/config"
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
kvalidation "k8s.io/kubernetes/pkg/util/validation"
"github.com/openshift/origin/pkg/cmd/cli/cmd/create"
cmdconfig "github.com/openshift/origin/pkg/cmd/cli/config"
"github.com/openshift/origin/pkg/cmd/cli/describe"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
)
func tab(original string) string {
lines := []string{}
scanner := bufio.NewScanner(strings.NewReader(original))
for scanner.Scan() {
lines = append(lines, " "+scanner.Text())
}
return strings.Join(lines, "\n")
}
const (
getLong = `Display one or many resources
Possible resources include builds, buildConfigs, services, pods, etc.
Some resources may omit advanced details that you can see with '-o wide'.
If you want an even more detailed view, use '%[1]s describe'.`
getExample = ` # List all pods in ps output format.
%[1]s get pods
# List a single replication controller with specified ID in ps output format.
%[1]s get rc redis
# List all pods and show more details about them.
%[1]s get -o wide pods
# List a single pod in JSON output format.
%[1]s get -o json pod redis-pod
# Return only the status value of the specified pod.
%[1]s get -o template pod redis-pod --template={{.currentState.status}}`
)
// NewCmdGet is a wrapper for the Kubernetes cli get command
func NewCmdGet(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdGet(f.Factory, out)
cmd.Long = fmt.Sprintf(getLong, fullName)
cmd.Example = fmt.Sprintf(getExample, fullName)
cmd.SuggestFor = []string{"list"}
return cmd
}
const (
replaceLong = `Replace a resource by filename or stdin
JSON and YAML formats are accepted.`
replaceExample = ` # Replace a pod using the data in pod.json.
%[1]s replace -f pod.json
# Replace a pod based on the JSON passed into stdin.
cat pod.json | %[1]s replace -f -
# Force replace, delete and then re-create the resource
%[1]s replace --force -f pod.json`
)
// NewCmdReplace is a wrapper for the Kubernetes cli replace command
func NewCmdReplace(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdReplace(f.Factory, out)
cmd.Long = replaceLong
cmd.Example = fmt.Sprintf(replaceExample, fullName)
return cmd
}
const (
patchLong = `Update field(s) of a resource using strategic merge patch
JSON and YAML formats are accepted.`
patchExample = ` # Partially update a node using strategic merge patch
%[1]s patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'`
)
// NewCmdPatch is a wrapper for the Kubernetes cli patch command
func NewCmdPatch(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdPatch(f.Factory, out)
cmd.Long = patchLong
cmd.Example = fmt.Sprintf(patchExample, fullName)
return cmd
}
const (
deleteLong = `Delete a resource
JSON and YAML formats are accepted.
If both a filename and command line arguments are passed, the command line
arguments are used and the filename is ignored.
Note that the delete command does NOT do resource version checks, so if someone
submits an update to a resource right when you submit a delete, their update
will be lost along with the rest of the resource.`
deleteExample = ` # Delete a pod using the type and ID specified in pod.json.
%[1]s delete -f pod.json
# Delete a pod based on the type and ID in the JSON passed into stdin.
cat pod.json | %[1]s delete -f -
# Delete pods and services with label name=myLabel.
%[1]s delete pods,services -l name=myLabel
# Delete a pod with ID 1234-56-7890-234234-456456.
%[1]s delete pod 1234-56-7890-234234-456456
# Delete all resources associated with a running app, includes
# buildconfig,deploymentconfig,service,imagestream,route and pod,
# where 'appName' is listed in 'Labels' of 'oc describe [resource] [resource name]' output.
%[1]s delete all -l app=appName
# Delete all pods
%[1]s delete pods --all`
)
// NewCmdDelete is a wrapper for the Kubernetes cli delete command
func NewCmdDelete(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdDelete(f.Factory, out)
cmd.Long = deleteLong
cmd.Short = "Delete one or more resources"
cmd.Example = fmt.Sprintf(deleteExample, fullName)
cmd.SuggestFor = []string{"remove", "stop"}
return cmd
}
const (
createLong = `Create a resource by filename or stdin
JSON and YAML formats are accepted.`
createExample = ` # Create a pod using the data in pod.json.
%[1]s create -f pod.json
# Create a pod based on the JSON passed into stdin.
cat pod.json | %[1]s create -f -`
)
// NewCmdCreate is a wrapper for the Kubernetes cli create command
func NewCmdCreate(parentName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdCreate(f.Factory, out)
cmd.Long = createLong
cmd.Example = fmt.Sprintf(createExample, parentName)
// create subcommands
cmd.AddCommand(NewCmdCreateRoute(parentName, f, out))
cmd.AddCommand(create.NewCmdCreatePolicyBinding(create.PolicyBindingRecommendedName, parentName+" create "+create.PolicyBindingRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateDeploymentConfig(create.DeploymentConfigRecommendedName, parentName+" create "+create.DeploymentConfigRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateClusterQuota(create.ClusterQuotaRecommendedName, parentName+" create "+create.ClusterQuotaRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateUser(create.UserRecommendedName, parentName+" create "+create.UserRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateIdentity(create.IdentityRecommendedName, parentName+" create "+create.IdentityRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateUserIdentityMapping(create.UserIdentityMappingRecommendedName, parentName+" create "+create.UserIdentityMappingRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateImageStream(create.ImageStreamRecommendedName, parentName+" create "+create.ImageStreamRecommendedName, f, out))
adjustCmdExamples(cmd, parentName, "create")
return cmd
}
const (
execLong = `Execute a command in a container`
execExample = ` # Get output from running 'date' in ruby-container from pod 'mypod'
%[1]s exec mypod -c ruby-container date
# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 'mypod' and sends stdout/stderr from 'bash' back to the client
%[1]s exec mypod -c ruby-container -i -t -- bash -il`
completionLong = `This command prints shell code which must be evaluation to provide interactive
completion of openshift cli commands.`
completionExample = ` # Generate the openshift cli completion code for bash
source <(oc completion bash)
# The above example depends on the bash-completion
framework. It must be sourced before sourcing the openshift cli completion, i.e. on the Mac:
brew install bash-completion
source $(brew --prefix)/etc/bash_completion
source <(oc completion bash)
# In zsh, the following will load openshift cli zsh completion:
source <(oc completion zsh)`
)
func NewCmdCompletion(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdCompletion(f.Factory, out)
cmd.Long = completionLong
cmd.Example = completionExample
return cmd
}
// NewCmdExec is a wrapper for the Kubernetes cli exec command
func NewCmdExec(fullName string, f *clientcmd.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command {
cmd := kcmd.NewCmdExec(f.Factory, cmdIn, cmdOut, cmdErr)
cmd.Use = "exec [options] POD [-c CONTAINER] -- COMMAND [args...]"
cmd.Long = execLong
cmd.Example = fmt.Sprintf(execExample, fullName)
cmd.Flag("pod").Usage = cmd.Flag("pod").Usage + " (deprecated)"
return cmd
}
const (
portForwardLong = `Forward 1 or more local ports to a pod`
portForwardExample = ` # Listens on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod
%[1]s port-forward mypod 5000 6000
# Listens on port 8888 locally, forwarding to 5000 in the pod
%[1]s port-forward mypod 8888:5000
# Listens on a random port locally, forwarding to 5000 in the pod
%[1]s port-forward mypod :5000
# Listens on a random port locally, forwarding to 5000 in the pod
%[1]s port-forward mypod 0:5000`
)
// NewCmdPortForward is a wrapper for the Kubernetes cli port-forward command
func NewCmdPortForward(fullName string, f *clientcmd.Factory, out, errout io.Writer) *cobra.Command {
cmd := kcmd.NewCmdPortForward(f.Factory, out, errout)
cmd.Long = portForwardLong
cmd.Example = fmt.Sprintf(portForwardExample, fullName)
cmd.Flag("pod").Usage = cmd.Flag("pod").Usage + " (deprecated)"
return cmd
}
const (
describeLong = `Show details of a specific resource
This command joins many API calls together to form a detailed description of a
given resource.`
describeExample = ` # Provide details about the ruby-22-centos7 image repository
%[1]s describe imageRepository ruby-22-centos7
# Provide details about the ruby-sample-build build configuration
%[1]s describe bc ruby-sample-build`
)
// NewCmdDescribe is a wrapper for the Kubernetes cli describe command
func NewCmdDescribe(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdDescribe(f.Factory, out)
cmd.Long = describeLong
cmd.Example = fmt.Sprintf(describeExample, fullName)
cmd.ValidArgs = describe.DescribableResources()
return cmd
}
const (
proxyLong = `Run a proxy to the API server`
proxyExample = ` # Run a proxy to the api server on port 8011, serving static content from ./local/www/
%[1]s proxy --port=8011 --www=./local/www/
# Run a proxy to the api server on an arbitrary local port.
# The chosen port for the server will be output to stdout.
%[1]s proxy --port=0
# Run a proxy to the api server, changing the api prefix to my-api
# This makes e.g. the pods api available at localhost:8011/my-api/api/v1/pods/
%[1]s proxy --api-prefix=/my-api`
)
// NewCmdProxy is a wrapper for the Kubernetes cli proxy command
func NewCmdProxy(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdProxy(f.Factory, out)
cmd.Long = proxyLong
cmd.Example = fmt.Sprintf(proxyExample, fullName)
return cmd
}
const (
scaleLong = `Set a new size for a deployment or replication controller
Scale also allows users to specify one or more preconditions for the scale action.
If --current-replicas or --resource-version is specified, it is validated before the
scale is attempted, and it is guaranteed that the precondition holds true when the
scale is sent to the server.
Note that scaling a deployment configuration with no deployments will update the
desired replicas in the configuration template.`
scaleExample = ` # Scale replication controller named 'foo' to 3.
%[1]s scale --replicas=3 replicationcontrollers foo
# If the replication controller named foo's current size is 2, scale foo to 3.
%[1]s scale --current-replicas=2 --replicas=3 replicationcontrollers foo
# Scale the latest deployment of 'bar'. In case of no deployment, bar's template
# will be scaled instead.
%[1]s scale --replicas=10 dc bar`
)
// NewCmdScale is a wrapper for the Kubernetes cli scale command
func NewCmdScale(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdScale(f.Factory, out)
cmd.Short = "Change the number of pods in a deployment"
cmd.Long = scaleLong
cmd.Example = fmt.Sprintf(scaleExample, fullName)
cmd.ValidArgs = []string{"deploymentconfig", "job", "replicationcontroller"}
return cmd
}
const (
autoScaleLong = `Autoscale a deployment config or replication controller.
Looks up a deployment config or replication controller by name and creates an autoscaler that uses
this deployment config or replication controller as a reference. An autoscaler can automatically
increase or decrease number of pods deployed within the system as needed.`
autoScaleExample = ` # Auto scale a deployment config "foo", with the number of pods between 2 to 10, target CPU utilization at a default value that server applies:
%[1]s autoscale dc/foo --min=2 --max=10
# Auto scale a replication controller "foo", with the number of pods between 1 to 5, target CPU utilization at 80%%
%[1]s autoscale rc/foo --max=5 --cpu-percent=80`
)
// NewCmdAutoscale is a wrapper for the Kubernetes cli autoscale command
func NewCmdAutoscale(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdAutoscale(f.Factory, out)
cmd.Short = "Autoscale a deployment config or replication controller"
cmd.Long = autoScaleLong
cmd.Example = fmt.Sprintf(autoScaleExample, fullName)
return cmd
}
const (
runLong = `Create and run a particular image, possibly replicated
Creates a deployment config to manage the created container(s). You can choose to run in the
foreground for an interactive container execution. You may pass 'run/v1' to
--generator to create a replication controller instead of a deployment config.`
runExample = ` # Starts a single instance of nginx.
%[1]s run nginx --image=nginx
# Starts a replicated instance of nginx.
%[1]s run nginx --image=nginx --replicas=5
# Dry run. Print the corresponding API objects without creating them.
%[1]s run nginx --image=nginx --dry-run
# Start a single instance of nginx, but overload the spec of the replication
# controller with a partial set of values parsed from JSON.
%[1]s run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
# Start a single instance of nginx and keep it in the foreground, don't restart it if it exits.
%[1]s run -i --tty nginx --image=nginx --restart=Never`
// TODO: uncomment these when arguments are delivered upstream
// Start the nginx container using the default command, but use custom
// arguments (arg1 .. argN) for that command.
//%[1]s run nginx --image=nginx -- <arg1> <arg2> ... <argN>
// Start the nginx container using a different command and custom arguments
//%[1]s run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>`
)
// NewCmdRun is a wrapper for the Kubernetes cli run command
func NewCmdRun(fullName string, f *clientcmd.Factory, in io.Reader, out, errout io.Writer) *cobra.Command {
opts := &kcmd.RunOptions{DefaultRestartAlwaysGenerator: "deploymentconfig/v1", DefaultGenerator: kcmdutil.RunPodV1GeneratorName}
cmd := kcmd.NewCmdRunWithOptions(f.Factory, opts, in, out, errout)
cmd.Long = runLong
cmd.Example = fmt.Sprintf(runExample, fullName)
cmd.SuggestFor = []string{"image"}
cmd.Flags().Set("generator", "")
cmd.Flag("generator").Usage = "The name of the API generator to use. Default is 'deploymentconfig/v1' if --restart=Always, otherwise the default is 'run-pod/v1'."
cmd.Flag("generator").DefValue = ""
cmd.Flag("generator").Changed = false
return cmd
}
const (
attachLong = `Attach to a running container
Attach the current shell to a remote container, returning output or setting up a full
terminal session. Can be used to debug containers and invoke interactive commands.`
attachExample = ` # Get output from running pod 123456-7890, using the first container by default
%[1]s attach 123456-7890
# Get output from ruby-container from pod 123456-7890
%[1]s attach 123456-7890 -c ruby-container
# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780
# and sends stdout/stderr from 'bash' back to the client
%[1]s attach 123456-7890 -c ruby-container -i -t`
)
// NewCmdAttach is a wrapper for the Kubernetes cli attach command
func NewCmdAttach(fullName string, f *clientcmd.Factory, in io.Reader, out, errout io.Writer) *cobra.Command {
cmd := kcmd.NewCmdAttach(f.Factory, in, out, errout)
cmd.Long = attachLong
cmd.Example = fmt.Sprintf(attachExample, fullName)
return cmd
}
const (
annotateLong = `Update the annotations on one or more resources
An annotation is a key/value pair that can hold larger (compared to a label),
and possibly not human-readable, data. It is intended to store non-identifying
auxiliary data, especially data manipulated by tools and system extensions. If
--overwrite is true, then existing annotations can be overwritten, otherwise
attempting to overwrite an annotation will result in an error. If
--resource-version is specified, then updates will use this resource version,
otherwise the existing resource-version will be used.
Run '%[1]s types' for a list of valid resources.`
annotateExample = ` # Update pod 'foo' with the annotation 'description' and the value 'my frontend'.
# If the same annotation is set multiple times, only the last value will be applied
%[1]s annotate pods foo description='my frontend'
# Update pod 'foo' with the annotation 'description' and the value
# 'my frontend running nginx', overwriting any existing value.
%[1]s annotate --overwrite pods foo description='my frontend running nginx'
# Update all pods in the namespace
%[1]s annotate pods --all description='my frontend running nginx'
# Update pod 'foo' only if the resource is unchanged from version 1.
%[1]s annotate pods foo description='my frontend running nginx' --resource-version=1
# Update pod 'foo' by removing an annotation named 'description' if it exists.
# Does not require the --overwrite flag.
%[1]s annotate pods foo description-`
)
// NewCmdAnnotate is a wrapper for the Kubernetes cli annotate command
func NewCmdAnnotate(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdAnnotate(f.Factory, out)
cmd.Long = fmt.Sprintf(annotateLong, fullName)
cmd.Example = fmt.Sprintf(annotateExample, fullName)
return cmd
}
const (
labelLong = `Update the labels on one or more resources
A valid label value is consisted of letters and/or numbers with a max length of %[1]d
characters. If --overwrite is true, then existing labels can be overwritten, otherwise
attempting to overwrite a label will result in an error. If --resource-version is
specified, then updates will use this resource version, otherwise the existing
resource-version will be used.`
labelExample = ` # Update pod 'foo' with the label 'unhealthy' and the value 'true'.
%[1]s label pods foo unhealthy=true
# Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.
%[1]s label --overwrite pods foo status=unhealthy
# Update all pods in the namespace
%[1]s label pods --all status=unhealthy
# Update pod 'foo' only if the resource is unchanged from version 1.
%[1]s label pods foo status=unhealthy --resource-version=1
# Update pod 'foo' by removing a label named 'bar' if it exists.
# Does not require the --overwrite flag.
%[1]s label pods foo bar-`
)
// NewCmdLabel is a wrapper for the Kubernetes cli label command
func NewCmdLabel(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdLabel(f.Factory, out)
cmd.Long = fmt.Sprintf(labelLong, kvalidation.LabelValueMaxLength)
cmd.Example = fmt.Sprintf(labelExample, fullName)
return cmd
}
const (
applyLong = `Apply a configuration to a resource by filename or stdin.
JSON and YAML formats are accepted.`
applyExample = `# Apply the configuration in pod.json to a pod.
%[1]s apply -f ./pod.json
# Apply the JSON passed into stdin to a pod.
cat pod.json | %[1]s apply -f -`
)
func NewCmdApply(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdApply(f.Factory, out)
cmd.Long = applyLong
cmd.Example = fmt.Sprintf(applyExample, fullName)
return cmd
}
const (
explainLong = `Documentation of resources.
Possible resource types include: pods (po), services (svc),
replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs),
limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc),
resourcequotas (quota), namespaces (ns) or endpoints (ep).`
explainExample = `# Get the documentation of the resource and its fields
%[1]s explain pods
# Get the documentation of a specific field of a resource
%[1]s explain pods.spec.containers`
)
func NewCmdExplain(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdExplain(f.Factory, out)
cmd.Long = explainLong
cmd.Example = fmt.Sprintf(explainExample, fullName)
return cmd
}
const (
convertLong = `Convert config files between different API versions. Both YAML
and JSON formats are accepted.
The command takes filename, directory, or URL as input, and convert it into format
of version specified by --output-version flag. If target version is not specified or
not supported, convert to latest version.
The default output will be printed to stdout in YAML format. One can use -o option
to change to output destination.
`
convertExample = `# Convert 'pod.yaml' to latest version and print to stdout.
%[1]s convert -f pod.yaml
# Convert the live state of the resource specified by 'pod.yaml' to the latest version
# and print to stdout in json format.
%[1]s convert -f pod.yaml --local -o json
# Convert all files under current directory to latest version and create them all.
%[1]s convert -f . | kubectl create -f -
`
)
func NewCmdConvert(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdConvert(f.Factory, out)
cmd.Long = convertLong
cmd.Example = fmt.Sprintf(convertExample, fullName)
return cmd
}
const (
editLong = `
Edit a resource from the default editor
The edit command allows you to directly edit any API resource you can retrieve via the
command line tools. It will open the editor defined by your OC_EDITOR, or EDITOR environment
variables, or fall back to 'vi' for Linux or 'notepad' for Windows. You can edit multiple
objects, although changes are applied one at a time. The command accepts filenames as well
as command line arguments, although the files you point to must be previously saved versions
of resources.
The files to edit will be output in the default API version, or a version specified
by --output-version. The default format is YAML - if you would like to edit in JSON
pass -o json. The flag --windows-line-endings can be used to force Windows line endings,
otherwise the default for your operating system will be used.
In the event an error occurs while updating, a temporary file will be created on disk
that contains your unapplied changes. The most common error when updating a resource
is another editor changing the resource on the server. When this occurs, you will have
to apply your changes to the newer version of the resource, or update your temporary
saved copy to include the latest resource version.`
editExample = ` # Edit the service named 'docker-registry':
%[1]s edit svc/docker-registry
# Edit the DeploymentConfig named 'my-deployment':
%[1]s edit dc/my-deployment
# Use an alternative editor
OC_EDITOR="nano" %[1]s edit dc/my-deployment
# Edit the service 'docker-registry' in JSON using the v1beta3 API format:
%[1]s edit svc/docker-registry --output-version=v1beta3 -o json`
)
func NewCmdEdit(fullName string, f *clientcmd.Factory, out, errout io.Writer) *cobra.Command {
cmd := kcmd.NewCmdEdit(f.Factory, out, errout)
cmd.Long = editLong
cmd.Example = fmt.Sprintf(editExample, fullName)
return cmd
}
const (
configLong = `
Manage the client config files
The client stores configuration in the current user's home directory (under the .kube directory as
config). When you login the first time, a new config file is created, and subsequent project changes with the
'project' command will set the current context. These subcommands allow you to manage the config directly.
Reference: https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubeconfig-file.md`
configExample = ` # Change the config context to use
%[1]s %[2]s use-context my-context
# Set the value of a config preference
%[1]s %[2]s set preferences.some true`
)
func NewCmdConfig(parentName, name string) *cobra.Command {
pathOptions := &kclientcmd.PathOptions{
GlobalFile: cmdconfig.RecommendedHomeFile,
EnvVar: cmdconfig.OpenShiftConfigPathEnvVar,
ExplicitFileFlag: cmdconfig.OpenShiftConfigFlagName,
GlobalFileSubpath: cmdconfig.OpenShiftConfigHomeDirFileName,
LoadingRules: cmdconfig.NewOpenShiftClientConfigLoadingRules(),
}
pathOptions.LoadingRules.DoNotResolvePaths = true
cmd := config.NewCmdConfig(pathOptions, os.Stdout)
cmd.Short = "Change configuration files for the client"
cmd.Long = configLong
cmd.Example = fmt.Sprintf(configExample, parentName, name)
adjustCmdExamples(cmd, parentName, name)
return cmd
}
func adjustCmdExamples(cmd *cobra.Command, parentName string, name string) {
for _, subCmd := range cmd.Commands() {
adjustCmdExamples(subCmd, parentName, cmd.Name())
}
cmd.Example = strings.Replace(cmd.Example, "kubectl", parentName, -1)
tabbing := " "
examples := []string{}
scanner := bufio.NewScanner(strings.NewReader(cmd.Example))
for scanner.Scan() {
examples = append(examples, tabbing+strings.TrimSpace(scanner.Text()))
}
cmd.Example = strings.Join(examples, "\n")
}
update completion help to use root command in examples
package cmd
import (
"bufio"
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
kcmd "k8s.io/kubernetes/pkg/kubectl/cmd"
"k8s.io/kubernetes/pkg/kubectl/cmd/config"
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
kvalidation "k8s.io/kubernetes/pkg/util/validation"
"github.com/openshift/origin/pkg/cmd/cli/cmd/create"
cmdconfig "github.com/openshift/origin/pkg/cmd/cli/config"
"github.com/openshift/origin/pkg/cmd/cli/describe"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
)
func tab(original string) string {
lines := []string{}
scanner := bufio.NewScanner(strings.NewReader(original))
for scanner.Scan() {
lines = append(lines, " "+scanner.Text())
}
return strings.Join(lines, "\n")
}
const (
getLong = `Display one or many resources
Possible resources include builds, buildConfigs, services, pods, etc.
Some resources may omit advanced details that you can see with '-o wide'.
If you want an even more detailed view, use '%[1]s describe'.`
getExample = ` # List all pods in ps output format.
%[1]s get pods
# List a single replication controller with specified ID in ps output format.
%[1]s get rc redis
# List all pods and show more details about them.
%[1]s get -o wide pods
# List a single pod in JSON output format.
%[1]s get -o json pod redis-pod
# Return only the status value of the specified pod.
%[1]s get -o template pod redis-pod --template={{.currentState.status}}`
)
// NewCmdGet is a wrapper for the Kubernetes cli get command
func NewCmdGet(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdGet(f.Factory, out)
cmd.Long = fmt.Sprintf(getLong, fullName)
cmd.Example = fmt.Sprintf(getExample, fullName)
cmd.SuggestFor = []string{"list"}
return cmd
}
const (
replaceLong = `Replace a resource by filename or stdin
JSON and YAML formats are accepted.`
replaceExample = ` # Replace a pod using the data in pod.json.
%[1]s replace -f pod.json
# Replace a pod based on the JSON passed into stdin.
cat pod.json | %[1]s replace -f -
# Force replace, delete and then re-create the resource
%[1]s replace --force -f pod.json`
)
// NewCmdReplace is a wrapper for the Kubernetes cli replace command
func NewCmdReplace(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdReplace(f.Factory, out)
cmd.Long = replaceLong
cmd.Example = fmt.Sprintf(replaceExample, fullName)
return cmd
}
const (
patchLong = `Update field(s) of a resource using strategic merge patch
JSON and YAML formats are accepted.`
patchExample = ` # Partially update a node using strategic merge patch
%[1]s patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'`
)
// NewCmdPatch is a wrapper for the Kubernetes cli patch command
func NewCmdPatch(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdPatch(f.Factory, out)
cmd.Long = patchLong
cmd.Example = fmt.Sprintf(patchExample, fullName)
return cmd
}
const (
deleteLong = `Delete a resource
JSON and YAML formats are accepted.
If both a filename and command line arguments are passed, the command line
arguments are used and the filename is ignored.
Note that the delete command does NOT do resource version checks, so if someone
submits an update to a resource right when you submit a delete, their update
will be lost along with the rest of the resource.`
deleteExample = ` # Delete a pod using the type and ID specified in pod.json.
%[1]s delete -f pod.json
# Delete a pod based on the type and ID in the JSON passed into stdin.
cat pod.json | %[1]s delete -f -
# Delete pods and services with label name=myLabel.
%[1]s delete pods,services -l name=myLabel
# Delete a pod with ID 1234-56-7890-234234-456456.
%[1]s delete pod 1234-56-7890-234234-456456
# Delete all resources associated with a running app, includes
# buildconfig,deploymentconfig,service,imagestream,route and pod,
# where 'appName' is listed in 'Labels' of 'oc describe [resource] [resource name]' output.
%[1]s delete all -l app=appName
# Delete all pods
%[1]s delete pods --all`
)
// NewCmdDelete is a wrapper for the Kubernetes cli delete command
func NewCmdDelete(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdDelete(f.Factory, out)
cmd.Long = deleteLong
cmd.Short = "Delete one or more resources"
cmd.Example = fmt.Sprintf(deleteExample, fullName)
cmd.SuggestFor = []string{"remove", "stop"}
return cmd
}
const (
createLong = `Create a resource by filename or stdin
JSON and YAML formats are accepted.`
createExample = ` # Create a pod using the data in pod.json.
%[1]s create -f pod.json
# Create a pod based on the JSON passed into stdin.
cat pod.json | %[1]s create -f -`
)
// NewCmdCreate is a wrapper for the Kubernetes cli create command
func NewCmdCreate(parentName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdCreate(f.Factory, out)
cmd.Long = createLong
cmd.Example = fmt.Sprintf(createExample, parentName)
// create subcommands
cmd.AddCommand(NewCmdCreateRoute(parentName, f, out))
cmd.AddCommand(create.NewCmdCreatePolicyBinding(create.PolicyBindingRecommendedName, parentName+" create "+create.PolicyBindingRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateDeploymentConfig(create.DeploymentConfigRecommendedName, parentName+" create "+create.DeploymentConfigRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateClusterQuota(create.ClusterQuotaRecommendedName, parentName+" create "+create.ClusterQuotaRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateUser(create.UserRecommendedName, parentName+" create "+create.UserRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateIdentity(create.IdentityRecommendedName, parentName+" create "+create.IdentityRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateUserIdentityMapping(create.UserIdentityMappingRecommendedName, parentName+" create "+create.UserIdentityMappingRecommendedName, f, out))
cmd.AddCommand(create.NewCmdCreateImageStream(create.ImageStreamRecommendedName, parentName+" create "+create.ImageStreamRecommendedName, f, out))
adjustCmdExamples(cmd, parentName, "create")
return cmd
}
const (
execLong = `Execute a command in a container`
execExample = ` # Get output from running 'date' in ruby-container from pod 'mypod'
%[1]s exec mypod -c ruby-container date
# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 'mypod' and sends stdout/stderr from 'bash' back to the client
%[1]s exec mypod -c ruby-container -i -t -- bash -il`
)
func NewCmdCompletion(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmdHelpString := "openshift cli"
cmdHelpName := fullName
if strings.HasSuffix(fullName, "admin") || strings.HasSuffix(fullName, "oadm") {
cmdHelpString = "openshift admin"
} else if strings.HasSuffix(fullName, "completion") {
cmdHelpName = "openshift"
cmdHelpString = "openshift"
}
cmd := kcmd.NewCmdCompletion(f.Factory, out)
cmd.Long = `This command prints shell code which must be evaluated to provide interactive
completion of ` + cmdHelpString + ` commands.`
cmd.Example = ` # Generate the ` + cmdHelpString + ` completion code for bash
source <(` + cmdHelpName + ` completion bash)
# The above example depends on the bash-completion
framework. It must be sourced before sourcing the openshift cli completion, i.e. on the Mac:
brew install bash-completion
source $(brew --prefix)/etc/bash_completion
source <(` + cmdHelpName + ` completion bash)
# In zsh, the following will load openshift cli zsh completion:
source <(` + cmdHelpName + ` completion zsh)`
return cmd
}
// NewCmdExec is a wrapper for the Kubernetes cli exec command
func NewCmdExec(fullName string, f *clientcmd.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command {
cmd := kcmd.NewCmdExec(f.Factory, cmdIn, cmdOut, cmdErr)
cmd.Use = "exec [options] POD [-c CONTAINER] -- COMMAND [args...]"
cmd.Long = execLong
cmd.Example = fmt.Sprintf(execExample, fullName)
cmd.Flag("pod").Usage = cmd.Flag("pod").Usage + " (deprecated)"
return cmd
}
const (
portForwardLong = `Forward 1 or more local ports to a pod`
portForwardExample = ` # Listens on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod
%[1]s port-forward mypod 5000 6000
# Listens on port 8888 locally, forwarding to 5000 in the pod
%[1]s port-forward mypod 8888:5000
# Listens on a random port locally, forwarding to 5000 in the pod
%[1]s port-forward mypod :5000
# Listens on a random port locally, forwarding to 5000 in the pod
%[1]s port-forward mypod 0:5000`
)
// NewCmdPortForward is a wrapper for the Kubernetes cli port-forward command
func NewCmdPortForward(fullName string, f *clientcmd.Factory, out, errout io.Writer) *cobra.Command {
cmd := kcmd.NewCmdPortForward(f.Factory, out, errout)
cmd.Long = portForwardLong
cmd.Example = fmt.Sprintf(portForwardExample, fullName)
cmd.Flag("pod").Usage = cmd.Flag("pod").Usage + " (deprecated)"
return cmd
}
const (
describeLong = `Show details of a specific resource
This command joins many API calls together to form a detailed description of a
given resource.`
describeExample = ` # Provide details about the ruby-22-centos7 image repository
%[1]s describe imageRepository ruby-22-centos7
# Provide details about the ruby-sample-build build configuration
%[1]s describe bc ruby-sample-build`
)
// NewCmdDescribe is a wrapper for the Kubernetes cli describe command
func NewCmdDescribe(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdDescribe(f.Factory, out)
cmd.Long = describeLong
cmd.Example = fmt.Sprintf(describeExample, fullName)
cmd.ValidArgs = describe.DescribableResources()
return cmd
}
const (
proxyLong = `Run a proxy to the API server`
proxyExample = ` # Run a proxy to the api server on port 8011, serving static content from ./local/www/
%[1]s proxy --port=8011 --www=./local/www/
# Run a proxy to the api server on an arbitrary local port.
# The chosen port for the server will be output to stdout.
%[1]s proxy --port=0
# Run a proxy to the api server, changing the api prefix to my-api
# This makes e.g. the pods api available at localhost:8011/my-api/api/v1/pods/
%[1]s proxy --api-prefix=/my-api`
)
// NewCmdProxy is a wrapper for the Kubernetes cli proxy command
func NewCmdProxy(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdProxy(f.Factory, out)
cmd.Long = proxyLong
cmd.Example = fmt.Sprintf(proxyExample, fullName)
return cmd
}
const (
scaleLong = `Set a new size for a deployment or replication controller
Scale also allows users to specify one or more preconditions for the scale action.
If --current-replicas or --resource-version is specified, it is validated before the
scale is attempted, and it is guaranteed that the precondition holds true when the
scale is sent to the server.
Note that scaling a deployment configuration with no deployments will update the
desired replicas in the configuration template.`
scaleExample = ` # Scale replication controller named 'foo' to 3.
%[1]s scale --replicas=3 replicationcontrollers foo
# If the replication controller named foo's current size is 2, scale foo to 3.
%[1]s scale --current-replicas=2 --replicas=3 replicationcontrollers foo
# Scale the latest deployment of 'bar'. In case of no deployment, bar's template
# will be scaled instead.
%[1]s scale --replicas=10 dc bar`
)
// NewCmdScale is a wrapper for the Kubernetes cli scale command
func NewCmdScale(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdScale(f.Factory, out)
cmd.Short = "Change the number of pods in a deployment"
cmd.Long = scaleLong
cmd.Example = fmt.Sprintf(scaleExample, fullName)
cmd.ValidArgs = []string{"deploymentconfig", "job", "replicationcontroller"}
return cmd
}
const (
autoScaleLong = `Autoscale a deployment config or replication controller.
Looks up a deployment config or replication controller by name and creates an autoscaler that uses
this deployment config or replication controller as a reference. An autoscaler can automatically
increase or decrease number of pods deployed within the system as needed.`
autoScaleExample = ` # Auto scale a deployment config "foo", with the number of pods between 2 to 10, target CPU utilization at a default value that server applies:
%[1]s autoscale dc/foo --min=2 --max=10
# Auto scale a replication controller "foo", with the number of pods between 1 to 5, target CPU utilization at 80%%
%[1]s autoscale rc/foo --max=5 --cpu-percent=80`
)
// NewCmdAutoscale is a wrapper for the Kubernetes cli autoscale command
func NewCmdAutoscale(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdAutoscale(f.Factory, out)
cmd.Short = "Autoscale a deployment config or replication controller"
cmd.Long = autoScaleLong
cmd.Example = fmt.Sprintf(autoScaleExample, fullName)
return cmd
}
const (
runLong = `Create and run a particular image, possibly replicated
Creates a deployment config to manage the created container(s). You can choose to run in the
foreground for an interactive container execution. You may pass 'run/v1' to
--generator to create a replication controller instead of a deployment config.`
runExample = ` # Starts a single instance of nginx.
%[1]s run nginx --image=nginx
# Starts a replicated instance of nginx.
%[1]s run nginx --image=nginx --replicas=5
# Dry run. Print the corresponding API objects without creating them.
%[1]s run nginx --image=nginx --dry-run
# Start a single instance of nginx, but overload the spec of the replication
# controller with a partial set of values parsed from JSON.
%[1]s run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
# Start a single instance of nginx and keep it in the foreground, don't restart it if it exits.
%[1]s run -i --tty nginx --image=nginx --restart=Never`
// TODO: uncomment these when arguments are delivered upstream
// Start the nginx container using the default command, but use custom
// arguments (arg1 .. argN) for that command.
//%[1]s run nginx --image=nginx -- <arg1> <arg2> ... <argN>
// Start the nginx container using a different command and custom arguments
//%[1]s run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>`
)
// NewCmdRun is a wrapper for the Kubernetes cli run command
func NewCmdRun(fullName string, f *clientcmd.Factory, in io.Reader, out, errout io.Writer) *cobra.Command {
opts := &kcmd.RunOptions{DefaultRestartAlwaysGenerator: "deploymentconfig/v1", DefaultGenerator: kcmdutil.RunPodV1GeneratorName}
cmd := kcmd.NewCmdRunWithOptions(f.Factory, opts, in, out, errout)
cmd.Long = runLong
cmd.Example = fmt.Sprintf(runExample, fullName)
cmd.SuggestFor = []string{"image"}
cmd.Flags().Set("generator", "")
cmd.Flag("generator").Usage = "The name of the API generator to use. Default is 'deploymentconfig/v1' if --restart=Always, otherwise the default is 'run-pod/v1'."
cmd.Flag("generator").DefValue = ""
cmd.Flag("generator").Changed = false
return cmd
}
const (
attachLong = `Attach to a running container
Attach the current shell to a remote container, returning output or setting up a full
terminal session. Can be used to debug containers and invoke interactive commands.`
attachExample = ` # Get output from running pod 123456-7890, using the first container by default
%[1]s attach 123456-7890
# Get output from ruby-container from pod 123456-7890
%[1]s attach 123456-7890 -c ruby-container
# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780
# and sends stdout/stderr from 'bash' back to the client
%[1]s attach 123456-7890 -c ruby-container -i -t`
)
// NewCmdAttach is a wrapper for the Kubernetes cli attach command
func NewCmdAttach(fullName string, f *clientcmd.Factory, in io.Reader, out, errout io.Writer) *cobra.Command {
cmd := kcmd.NewCmdAttach(f.Factory, in, out, errout)
cmd.Long = attachLong
cmd.Example = fmt.Sprintf(attachExample, fullName)
return cmd
}
const (
annotateLong = `Update the annotations on one or more resources
An annotation is a key/value pair that can hold larger (compared to a label),
and possibly not human-readable, data. It is intended to store non-identifying
auxiliary data, especially data manipulated by tools and system extensions. If
--overwrite is true, then existing annotations can be overwritten, otherwise
attempting to overwrite an annotation will result in an error. If
--resource-version is specified, then updates will use this resource version,
otherwise the existing resource-version will be used.
Run '%[1]s types' for a list of valid resources.`
annotateExample = ` # Update pod 'foo' with the annotation 'description' and the value 'my frontend'.
# If the same annotation is set multiple times, only the last value will be applied
%[1]s annotate pods foo description='my frontend'
# Update pod 'foo' with the annotation 'description' and the value
# 'my frontend running nginx', overwriting any existing value.
%[1]s annotate --overwrite pods foo description='my frontend running nginx'
# Update all pods in the namespace
%[1]s annotate pods --all description='my frontend running nginx'
# Update pod 'foo' only if the resource is unchanged from version 1.
%[1]s annotate pods foo description='my frontend running nginx' --resource-version=1
# Update pod 'foo' by removing an annotation named 'description' if it exists.
# Does not require the --overwrite flag.
%[1]s annotate pods foo description-`
)
// NewCmdAnnotate is a wrapper for the Kubernetes cli annotate command
func NewCmdAnnotate(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdAnnotate(f.Factory, out)
cmd.Long = fmt.Sprintf(annotateLong, fullName)
cmd.Example = fmt.Sprintf(annotateExample, fullName)
return cmd
}
const (
labelLong = `Update the labels on one or more resources
A valid label value is consisted of letters and/or numbers with a max length of %[1]d
characters. If --overwrite is true, then existing labels can be overwritten, otherwise
attempting to overwrite a label will result in an error. If --resource-version is
specified, then updates will use this resource version, otherwise the existing
resource-version will be used.`
labelExample = ` # Update pod 'foo' with the label 'unhealthy' and the value 'true'.
%[1]s label pods foo unhealthy=true
# Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.
%[1]s label --overwrite pods foo status=unhealthy
# Update all pods in the namespace
%[1]s label pods --all status=unhealthy
# Update pod 'foo' only if the resource is unchanged from version 1.
%[1]s label pods foo status=unhealthy --resource-version=1
# Update pod 'foo' by removing a label named 'bar' if it exists.
# Does not require the --overwrite flag.
%[1]s label pods foo bar-`
)
// NewCmdLabel is a wrapper for the Kubernetes cli label command
func NewCmdLabel(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdLabel(f.Factory, out)
cmd.Long = fmt.Sprintf(labelLong, kvalidation.LabelValueMaxLength)
cmd.Example = fmt.Sprintf(labelExample, fullName)
return cmd
}
const (
applyLong = `Apply a configuration to a resource by filename or stdin.
JSON and YAML formats are accepted.`
applyExample = `# Apply the configuration in pod.json to a pod.
%[1]s apply -f ./pod.json
# Apply the JSON passed into stdin to a pod.
cat pod.json | %[1]s apply -f -`
)
func NewCmdApply(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdApply(f.Factory, out)
cmd.Long = applyLong
cmd.Example = fmt.Sprintf(applyExample, fullName)
return cmd
}
const (
explainLong = `Documentation of resources.
Possible resource types include: pods (po), services (svc),
replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs),
limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc),
resourcequotas (quota), namespaces (ns) or endpoints (ep).`
explainExample = `# Get the documentation of the resource and its fields
%[1]s explain pods
# Get the documentation of a specific field of a resource
%[1]s explain pods.spec.containers`
)
func NewCmdExplain(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdExplain(f.Factory, out)
cmd.Long = explainLong
cmd.Example = fmt.Sprintf(explainExample, fullName)
return cmd
}
const (
convertLong = `Convert config files between different API versions. Both YAML
and JSON formats are accepted.
The command takes filename, directory, or URL as input, and convert it into format
of version specified by --output-version flag. If target version is not specified or
not supported, convert to latest version.
The default output will be printed to stdout in YAML format. One can use -o option
to change to output destination.
`
convertExample = `# Convert 'pod.yaml' to latest version and print to stdout.
%[1]s convert -f pod.yaml
# Convert the live state of the resource specified by 'pod.yaml' to the latest version
# and print to stdout in json format.
%[1]s convert -f pod.yaml --local -o json
# Convert all files under current directory to latest version and create them all.
%[1]s convert -f . | kubectl create -f -
`
)
func NewCmdConvert(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
cmd := kcmd.NewCmdConvert(f.Factory, out)
cmd.Long = convertLong
cmd.Example = fmt.Sprintf(convertExample, fullName)
return cmd
}
const (
editLong = `
Edit a resource from the default editor
The edit command allows you to directly edit any API resource you can retrieve via the
command line tools. It will open the editor defined by your OC_EDITOR, or EDITOR environment
variables, or fall back to 'vi' for Linux or 'notepad' for Windows. You can edit multiple
objects, although changes are applied one at a time. The command accepts filenames as well
as command line arguments, although the files you point to must be previously saved versions
of resources.
The files to edit will be output in the default API version, or a version specified
by --output-version. The default format is YAML - if you would like to edit in JSON
pass -o json. The flag --windows-line-endings can be used to force Windows line endings,
otherwise the default for your operating system will be used.
In the event an error occurs while updating, a temporary file will be created on disk
that contains your unapplied changes. The most common error when updating a resource
is another editor changing the resource on the server. When this occurs, you will have
to apply your changes to the newer version of the resource, or update your temporary
saved copy to include the latest resource version.`
editExample = ` # Edit the service named 'docker-registry':
%[1]s edit svc/docker-registry
# Edit the DeploymentConfig named 'my-deployment':
%[1]s edit dc/my-deployment
# Use an alternative editor
OC_EDITOR="nano" %[1]s edit dc/my-deployment
# Edit the service 'docker-registry' in JSON using the v1beta3 API format:
%[1]s edit svc/docker-registry --output-version=v1beta3 -o json`
)
func NewCmdEdit(fullName string, f *clientcmd.Factory, out, errout io.Writer) *cobra.Command {
cmd := kcmd.NewCmdEdit(f.Factory, out, errout)
cmd.Long = editLong
cmd.Example = fmt.Sprintf(editExample, fullName)
return cmd
}
const (
configLong = `
Manage the client config files
The client stores configuration in the current user's home directory (under the .kube directory as
config). When you login the first time, a new config file is created, and subsequent project changes with the
'project' command will set the current context. These subcommands allow you to manage the config directly.
Reference: https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubeconfig-file.md`
configExample = ` # Change the config context to use
%[1]s %[2]s use-context my-context
# Set the value of a config preference
%[1]s %[2]s set preferences.some true`
)
func NewCmdConfig(parentName, name string) *cobra.Command {
pathOptions := &kclientcmd.PathOptions{
GlobalFile: cmdconfig.RecommendedHomeFile,
EnvVar: cmdconfig.OpenShiftConfigPathEnvVar,
ExplicitFileFlag: cmdconfig.OpenShiftConfigFlagName,
GlobalFileSubpath: cmdconfig.OpenShiftConfigHomeDirFileName,
LoadingRules: cmdconfig.NewOpenShiftClientConfigLoadingRules(),
}
pathOptions.LoadingRules.DoNotResolvePaths = true
cmd := config.NewCmdConfig(pathOptions, os.Stdout)
cmd.Short = "Change configuration files for the client"
cmd.Long = configLong
cmd.Example = fmt.Sprintf(configExample, parentName, name)
adjustCmdExamples(cmd, parentName, name)
return cmd
}
func adjustCmdExamples(cmd *cobra.Command, parentName string, name string) {
for _, subCmd := range cmd.Commands() {
adjustCmdExamples(subCmd, parentName, cmd.Name())
}
cmd.Example = strings.Replace(cmd.Example, "kubectl", parentName, -1)
tabbing := " "
examples := []string{}
scanner := bufio.NewScanner(strings.NewReader(cmd.Example))
for scanner.Scan() {
examples = append(examples, tabbing+strings.TrimSpace(scanner.Text()))
}
cmd.Example = strings.Join(examples, "\n")
}
|
package command
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseOptions(t *testing.T) {
// Test default behavior
opts, err := ParseOptions([]string{})
assert.NoError(t, err)
assert.Equal(t, false, opts.Sessions)
assert.Equal(t, "", opts.Prefix)
assert.Equal(t, "", opts.ConnectToken)
assert.Equal(t, "", opts.ConnectHeaders)
assert.Equal(t, false, opts.DisableSSH)
assert.Equal(t, false, opts.DisablePrettyJson)
assert.Equal(t, false, opts.DisableConnectionIdleTimeout)
assert.Equal(t, 180, opts.ConnectionIdleTimeout)
assert.Equal(t, false, opts.Cors)
assert.Equal(t, "*", opts.CorsOrigin)
// Test sessions
opts, err = ParseOptions([]string{"--sessions", "1"})
assert.NoError(t, err)
assert.Equal(t, true, opts.Sessions)
opts, err = ParseOptions([]string{"--sessions", "1", "--bookmark", "test"})
assert.EqualError(t, err, "--bookmark is not allowed in multi-session mode")
// Test url prefix
opts, err = ParseOptions([]string{"--prefix", "pgweb"})
assert.NoError(t, err)
assert.Equal(t, "pgweb/", opts.Prefix)
opts, err = ParseOptions([]string{"--prefix", "pgweb/"})
assert.NoError(t, err)
assert.Equal(t, "pgweb/", opts.Prefix)
// Test connect backend options
opts, err = ParseOptions([]string{"--connect-backend", "test"})
assert.EqualError(t, err, "--sessions flag must be set")
opts, err = ParseOptions([]string{"--connect-backend", "test", "--sessions"})
assert.EqualError(t, err, "--connect-token flag must be set")
opts, err = ParseOptions([]string{"--connect-backend", "test", "--sessions", "--connect-token", "token"})
assert.NoError(t, err)
}
Take out failing test due to new sessions behavior
package command
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseOptions(t *testing.T) {
// Test default behavior
opts, err := ParseOptions([]string{})
assert.NoError(t, err)
assert.Equal(t, false, opts.Sessions)
assert.Equal(t, "", opts.Prefix)
assert.Equal(t, "", opts.ConnectToken)
assert.Equal(t, "", opts.ConnectHeaders)
assert.Equal(t, false, opts.DisableSSH)
assert.Equal(t, false, opts.DisablePrettyJson)
assert.Equal(t, false, opts.DisableConnectionIdleTimeout)
assert.Equal(t, 180, opts.ConnectionIdleTimeout)
assert.Equal(t, false, opts.Cors)
assert.Equal(t, "*", opts.CorsOrigin)
// Test sessions
opts, err = ParseOptions([]string{"--sessions", "1"})
assert.NoError(t, err)
assert.Equal(t, true, opts.Sessions)
// Test url prefix
opts, err = ParseOptions([]string{"--prefix", "pgweb"})
assert.NoError(t, err)
assert.Equal(t, "pgweb/", opts.Prefix)
opts, err = ParseOptions([]string{"--prefix", "pgweb/"})
assert.NoError(t, err)
assert.Equal(t, "pgweb/", opts.Prefix)
// Test connect backend options
opts, err = ParseOptions([]string{"--connect-backend", "test"})
assert.EqualError(t, err, "--sessions flag must be set")
opts, err = ParseOptions([]string{"--connect-backend", "test", "--sessions"})
assert.EqualError(t, err, "--connect-token flag must be set")
opts, err = ParseOptions([]string{"--connect-backend", "test", "--sessions", "--connect-token", "token"})
assert.NoError(t, err)
}
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osd
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"github.com/rook/rook/pkg/clusterd"
cephconfig "github.com/rook/rook/pkg/daemon/ceph/config"
)
const (
keyringFileName = "keyring"
bootstrapOsdKeyring = "bootstrap-osd/ceph.keyring"
)
// get the bootstrap OSD root dir
func getBootstrapOSDDir(configDir string) string {
return path.Join(configDir, "bootstrap-osd")
}
func getOSDRootDir(root string, osdID int) string {
return filepath.Join(root, fmt.Sprintf("osd%d", osdID))
}
// get the full path to the given OSD's config file
func getOSDConfFilePath(osdDataPath, clusterName string) string {
return fmt.Sprintf("%s/%s.config", osdDataPath, clusterName)
}
// get the full path to the given OSD's keyring
func getOSDKeyringPath(osdDataPath string) string {
return filepath.Join(osdDataPath, keyringFileName)
}
// get the full path to the given OSD's journal
func getOSDJournalPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "journal")
}
// get the full path to the given OSD's temporary mon map
func getOSDTempMonMapPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "tmp", "activate.monmap")
}
// create a keyring for the bootstrap-osd client, it gets a limited set of privileges
func createOSDBootstrapKeyring(context *clusterd.Context, clusterName, rootDir string) error {
username := "client.bootstrap-osd"
keyringPath := path.Join(rootDir, bootstrapOsdKeyring)
access := []string{"mon", "allow profile bootstrap-osd"}
keyringEval := func(key string) string {
return fmt.Sprintf(bootstrapOSDKeyringTemplate, key)
}
return cephconfig.CreateKeyring(context, clusterName, username, keyringPath, access, keyringEval)
}
// CopyBinariesForDaemon copies the "tini" and "rook" binaries to a shared volume at the target path.
// This is necessary for the filestore on a device scenario when rook needs to mount a directory
// in the same container as the ceph process so it can be unmounted upon exit.
func CopyBinariesForDaemon(target string) error {
if err := copyBinary("/usr/local/bin", target, "rook"); err != nil {
return err
}
return copyBinary("/", target, "tini")
}
func copyBinary(sourceDir, targetDir, filename string) error {
sourcePath := path.Join(sourceDir, filename)
targetPath := path.Join(targetDir, filename)
logger.Infof("copying %s to %s", sourcePath, targetPath)
sourceFile, err := os.Open(sourcePath)
if err != nil {
return err
}
defer sourceFile.Close()
destinationFile, err := os.Create(targetPath)
if err != nil {
return err
}
defer destinationFile.Close()
if _, err := io.Copy(destinationFile, sourceFile); err != nil {
return err
}
return os.Chmod(targetPath, 0755)
}
ceph: Check before copying binaries in osd pods (#3099)
ceph: Check before copying binaries in osd pods
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osd
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"github.com/rook/rook/pkg/clusterd"
cephconfig "github.com/rook/rook/pkg/daemon/ceph/config"
)
const (
keyringFileName = "keyring"
bootstrapOsdKeyring = "bootstrap-osd/ceph.keyring"
)
// get the bootstrap OSD root dir
func getBootstrapOSDDir(configDir string) string {
return path.Join(configDir, "bootstrap-osd")
}
func getOSDRootDir(root string, osdID int) string {
return filepath.Join(root, fmt.Sprintf("osd%d", osdID))
}
// get the full path to the given OSD's config file
func getOSDConfFilePath(osdDataPath, clusterName string) string {
return fmt.Sprintf("%s/%s.config", osdDataPath, clusterName)
}
// get the full path to the given OSD's keyring
func getOSDKeyringPath(osdDataPath string) string {
return filepath.Join(osdDataPath, keyringFileName)
}
// get the full path to the given OSD's journal
func getOSDJournalPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "journal")
}
// get the full path to the given OSD's temporary mon map
func getOSDTempMonMapPath(osdDataPath string) string {
return filepath.Join(osdDataPath, "tmp", "activate.monmap")
}
// create a keyring for the bootstrap-osd client, it gets a limited set of privileges
func createOSDBootstrapKeyring(context *clusterd.Context, clusterName, rootDir string) error {
username := "client.bootstrap-osd"
keyringPath := path.Join(rootDir, bootstrapOsdKeyring)
access := []string{"mon", "allow profile bootstrap-osd"}
keyringEval := func(key string) string {
return fmt.Sprintf(bootstrapOSDKeyringTemplate, key)
}
return cephconfig.CreateKeyring(context, clusterName, username, keyringPath, access, keyringEval)
}
// CopyBinariesForDaemon copies the "tini" and "rook" binaries to a shared volume at the target path.
// This is necessary for the filestore on a device scenario when rook needs to mount a directory
// in the same container as the ceph process so it can be unmounted upon exit.
func CopyBinariesForDaemon(target string) error {
if err := copyBinary("/usr/local/bin", target, "rook"); err != nil {
return err
}
return copyBinary("/", target, "tini")
}
func copyBinary(sourceDir, targetDir, filename string) error {
sourcePath := path.Join(sourceDir, filename)
targetPath := path.Join(targetDir, filename)
logger.Infof("copying %s to %s", sourcePath, targetPath)
// Check if the target path exists, and skip the copy if it does
if _, err := os.Stat(targetPath); err == nil {
return nil
}
sourceFile, err := os.Open(sourcePath)
if err != nil {
return err
}
defer sourceFile.Close()
destinationFile, err := os.Create(targetPath)
if err != nil {
return err
}
defer destinationFile.Close()
if _, err := io.Copy(destinationFile, sourceFile); err != nil {
return err
}
return os.Chmod(targetPath, 0755)
}
|
// Copyright 2017 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
This module implements the etcd database operation of data structure
defined in api module.
*/
package etcd
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"time"
log "github.com/golang/glog"
c "github.com/opensds/opensds/pkg/context"
"github.com/opensds/opensds/pkg/model"
"github.com/opensds/opensds/pkg/utils"
"github.com/opensds/opensds/pkg/utils/constants"
"github.com/opensds/opensds/pkg/utils/urls"
uuid "github.com/satori/go.uuid"
)
const (
defaultSortKey = "ID"
defaultBlockProfileName = "default_block"
defaultFileProfileName = "default_file"
typeBlock = "block"
typeFile = "file"
)
var validKey = []string{"limit", "offset", "sortDir", "sortKey"}
const (
typeFileShares string = "FileShares"
typeFileShareSnapshots string = "FileShareSnapshots"
typeDocks string = "Docks"
typePools string = "Pools"
typeProfiles string = "Profiles"
typeVolumes string = "Volumes"
typeAttachments string = "Attachments"
typeVolumeSnapshots string = "VolumeSnapshots"
)
var sortableKeysMap = map[string][]string{
typeFileShares: {"ID", "NAME", "STATUS", "AVAILABILITYZONE", "PROFILEID", "TENANTID", "SIZE", "POOLID", "DESCRIPTION"},
typeFileShareSnapshots: {"ID", "NAME", "VOLUMEID", "STATUS", "USERID", "TENANTID", "SIZE"},
typeDocks: {"ID", "NAME", "STATUS", "ENDPOINT", "DRIVERNAME", "DESCRIPTION"},
typePools: {"ID", "NAME", "STATUS", "AVAILABILITYZONE", "DOCKID"},
typeProfiles: {"ID", "NAME", "DESCRIPTION"},
typeVolumes: {"ID", "NAME", "STATUS", "AVAILABILITYZONE", "PROFILEID", "TENANTID", "SIZE", "POOLID", "DESCRIPTION", "GROUPID"},
typeAttachments: {"ID", "VOLUMEID", "STATUS", "USERID", "TENANTID", "SIZE"},
typeVolumeSnapshots: {"ID", "NAME", "VOLUMEID", "STATUS", "USERID", "TENANTID", "SIZE"},
}
func IsAdminContext(ctx *c.Context) bool {
return ctx.IsAdmin
}
func AuthorizeProjectContext(ctx *c.Context, tenantId string) bool {
return ctx.TenantId == tenantId
}
// NewClient
func NewClient(edps []string) *Client {
return &Client{
clientInterface: Init(edps),
}
}
// Client
type Client struct {
clientInterface
}
//Parameter
type Parameter struct {
beginIdx, endIdx int
sortDir, sortKey string
}
//IsInArray
func (c *Client) IsInArray(e string, s []string) bool {
for _, v := range s {
if strings.EqualFold(e, v) {
return true
}
}
return false
}
func (c *Client) SelectOrNot(m map[string][]string) bool {
for key := range m {
if !utils.Contained(key, validKey) {
return true
}
}
return false
}
//Get parameter limit
func (c *Client) GetLimit(m map[string][]string) int {
var limit int
var err error
v, ok := m["limit"]
if ok {
limit, err = strconv.Atoi(v[0])
if err != nil || limit < 0 {
log.Warning("Invalid input limit:", limit, ",use default value instead:50")
return constants.DefaultLimit
}
} else {
log.Warning("The parameter limit is not present,use default value instead:50")
return constants.DefaultLimit
}
return limit
}
//Get parameter offset
func (c *Client) GetOffset(m map[string][]string, size int) int {
var offset int
var err error
v, ok := m["offset"]
if ok {
offset, err = strconv.Atoi(v[0])
if err != nil || offset < 0 || offset > size {
log.Warning("Invalid input offset or input offset is out of bounds:", offset, ",use default value instead:0")
return constants.DefaultOffset
}
} else {
log.Warning("The parameter offset is not present,use default value instead:0")
return constants.DefaultOffset
}
return offset
}
//Get parameter sortDir
func (c *Client) GetSortDir(m map[string][]string) string {
var sortDir string
v, ok := m["sortDir"]
if ok {
sortDir = v[0]
if !strings.EqualFold(sortDir, "desc") && !strings.EqualFold(sortDir, "asc") {
log.Warning("Invalid input sortDir:", sortDir, ",use default value instead:desc")
return constants.DefaultSortDir
}
} else {
log.Warning("The parameter sortDir is not present,use default value instead:desc")
return constants.DefaultSortDir
}
return sortDir
}
//Get parameter sortKey
func (c *Client) GetSortKey(m map[string][]string, sortKeys []string) string {
var sortKey string
v, ok := m["sortKey"]
if ok {
sortKey = strings.ToUpper(v[0])
if !c.IsInArray(sortKey, sortKeys) {
log.Warning("Invalid input sortKey:", sortKey, ",use default value instead:ID")
return defaultSortKey
}
} else {
log.Warning("The parameter sortKey is not present,use default value instead:ID")
return defaultSortKey
}
return sortKey
}
func (c *Client) FilterAndSort(src interface{}, params map[string][]string, sortableKeys []string) interface{} {
var ret interface{}
ret = utils.Filter(src, params)
if len(params["sortKey"]) > 0 && utils.ContainsIgnoreCase(sortableKeys, params["sortKey"][0]) {
ret = utils.Sort(ret, params["sortKey"][0], c.GetSortDir(params))
}
ret = utils.Slice(ret, c.GetOffset(params, reflect.ValueOf(src).Len()), c.GetLimit(params))
return ret
}
//ParameterFilter
func (c *Client) ParameterFilter(m map[string][]string, size int, sortKeys []string) *Parameter {
limit := c.GetLimit(m)
offset := c.GetOffset(m, size)
beginIdx := offset
endIdx := limit + offset
// If use not specified the limit return all the items.
if limit == constants.DefaultLimit || endIdx > size {
endIdx = size
}
sortDir := c.GetSortDir(m)
sortKey := c.GetSortKey(m, sortKeys)
return &Parameter{beginIdx, endIdx, sortDir, sortKey}
}
// ************* FileShare code block *************
var fileshare_sortKey string
type FileShareSlice []*model.FileShareSpec
func (fileshare FileShareSlice) Len() int { return len(fileshare) }
func (fileshare FileShareSlice) Swap(i, j int) {
fileshare[i], fileshare[j] = fileshare[j], fileshare[i]
}
func (fileshare FileShareSlice) Less(i, j int) bool {
switch fileshare_sortKey {
case "ID":
return fileshare[i].Id < fileshare[j].Id
case "NAME":
return fileshare[i].Name < fileshare[j].Name
case "STATUS":
return fileshare[i].Status < fileshare[j].Status
case "AVAILABILITYZONE":
return fileshare[i].AvailabilityZone < fileshare[j].AvailabilityZone
case "PROFILEID":
return fileshare[i].ProfileId < fileshare[j].ProfileId
case "TENANTID":
return fileshare[i].TenantId < fileshare[j].TenantId
case "SIZE":
return fileshare[i].Size < fileshare[j].Size
case "POOLID":
return fileshare[i].PoolId < fileshare[j].PoolId
case "DESCRIPTION":
return fileshare[i].Description < fileshare[j].Description
}
return false
}
func (c *Client) FindFileShareValue(k string, p *model.FileShareSpec) string {
switch k {
case "Id":
return p.Id
case "CreatedAt":
return p.CreatedAt
case "UpdatedAt":
return p.UpdatedAt
case "TenantId":
return p.TenantId
case "UserId":
return p.UserId
case "Name":
return p.Name
case "Description":
return p.Description
case "AvailabilityZone":
return p.AvailabilityZone
case "Size":
return strconv.FormatInt(p.Size, 10)
case "Status":
return p.Status
case "PoolId":
return p.PoolId
case "ProfileId":
return p.ProfileId
}
return ""
}
func (c *Client) CreateFileShareAcl(ctx *c.Context, fshare *model.FileShareAclSpec) (*model.FileShareAclSpec, error) {
acls, err := c.ListFileSharesAcl(ctx)
if err != nil {
log.Error("failed to list acls")
return nil, err
}
for _, acl := range acls {
if acl.AccessTo == fshare.AccessTo {
errstr := "acl already exists for this ip: "+acl.AccessTo+". If you want to set new acl, first delete the existing one"
log.Error(errstr)
return nil, fmt.Errorf(errstr)
}
}
fshare.TenantId = ctx.TenantId
fshareBody, err := json.Marshal(fshare)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, ctx.TenantId, fshare.Id),
Content: string(fshareBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("when create fileshare access rules in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return fshare, nil
}
// UpdateFileShareAcl
func (c *Client) UpdateFileShareAcl(ctx *c.Context, acl *model.FileShareAclSpec) (*model.FileShareAclSpec, error) {
result, err := c.GetFileShareAcl(ctx, acl.Id)
if err != nil {
return nil, err
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
result.Metadata = acl.Metadata
jsonBody, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, result.TenantId, acl.Id),
NewContent: string(jsonBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("when update fileshare acl in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
func (c *Client) CreateFileShare(ctx *c.Context, fshare *model.FileShareSpec) (*model.FileShareSpec, error) {
fshare.TenantId = ctx.TenantId
fshareBody, err := json.Marshal(fshare)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, ctx.TenantId, fshare.Id),
Content: string(fshareBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("when create fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return fshare, nil
}
func (c *Client) SortFileShares(shares []*model.FileShareSpec, p *Parameter) []*model.FileShareSpec {
volume_sortKey = p.sortKey
if strings.EqualFold(p.sortDir, "dsc") {
sort.Sort(FileShareSlice(shares))
} else {
sort.Sort(sort.Reverse(FileShareSlice(shares)))
}
return shares
}
func (c *Client) ListFileSharesAclWithFilter(ctx *c.Context, m map[string][]string) ([]*model.FileShareAclSpec, error) {
fileshares, err := c.ListFileSharesAcl(ctx)
if err != nil {
log.Error("list fileshare failed: ", err)
return nil, err
}
return fileshares, nil
}
func (c *Client) ListFileSharesAcl(ctx *c.Context) ([]*model.FileShareAclSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, ctx.TenantId),
}
// Admin user should get all fileshares including the fileshares whose tenant is not admin.
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateFileShareAclURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("when list fileshares in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fileshares = []*model.FileShareAclSpec{}
if len(dbRes.Message) == 0 {
return fileshares, nil
}
for _, msg := range dbRes.Message {
var share = &model.FileShareAclSpec{}
if err := json.Unmarshal([]byte(msg), share); err != nil {
log.Error("when parsing fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
fileshares = append(fileshares, share)
}
return fileshares, nil
}
func (c *Client) ListFileShareAclsByShareId(ctx *c.Context, fileshareId string) ([]*model.FileShareAclSpec, error) {
acls, err := c.ListFileSharesAcl(ctx)
if err != nil {
return nil, err
}
var aclList []*model.FileShareAclSpec
for _, acl := range acls {
if acl.FileShareId == fileshareId {
aclList = append(aclList, acl)
}
}
return aclList, nil
}
func (c *Client) ListSnapshotsByShareId(ctx *c.Context, fileshareId string) ([]*model.FileShareSnapshotSpec, error) {
snaps, err := c.ListFileShareSnapshots(ctx)
if err != nil {
return nil, err
}
var snapList []*model.FileShareSnapshotSpec
for _, snap := range snaps {
if snap.FileShareId == fileshareId {
snapList = append(snapList, snap)
}
}
return snapList, nil
}
func (c *Client) ListFileSharesWithFilter(ctx *c.Context, m map[string][]string) ([]*model.FileShareSpec, error) {
fileshares, err := c.ListFileShares(ctx)
if err != nil {
log.Error("list fileshare failed: ", err)
return nil, err
}
tmpFileshares := c.FilterAndSort(fileshares, m, sortableKeysMap[typeFileShares])
var res = []*model.FileShareSpec{}
for _, data := range tmpFileshares.([]interface{}) {
res = append(res, data.(*model.FileShareSpec))
}
return res, nil
}
// ListFileShares
func (c *Client) ListFileShares(ctx *c.Context) ([]*model.FileShareSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, ctx.TenantId),
}
// Admin user should get all fileshares including the fileshares whose tenant is not admin.
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateFileShareURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("when list fileshares in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fileshares = []*model.FileShareSpec{}
if len(dbRes.Message) == 0 {
return fileshares, nil
}
for _, msg := range dbRes.Message {
var share = &model.FileShareSpec{}
if err := json.Unmarshal([]byte(msg), share); err != nil {
log.Error("when parsing fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
fileshares = append(fileshares, share)
}
return fileshares, nil
}
// ListFileSharesByProfileId
func (c *Client) ListFileSharesByProfileId(ctx *c.Context, prfId string) ([]string, error) {
fileshares, err := c.ListFileShares(ctx)
if err != nil {
return nil, err
}
var res_fileshares []string
for _, shares := range fileshares {
if shares.ProfileId == prfId {
res_fileshares = append(res_fileshares, shares.Name)
}
}
return res_fileshares, nil
}
// GetFileShareAcl
func (c *Client) GetFileShareAcl(ctx *c.Context, aclID string) (*model.FileShareAclSpec, error) {
acl, err := c.getFileShareAcl(ctx, aclID)
if !IsAdminContext(ctx) || err == nil {
return acl, err
}
acls, err := c.ListFileSharesAcl(ctx)
if err != nil {
return nil, err
}
for _, f := range acls {
if f.Id == aclID {
return f, nil
}
}
return nil, fmt.Errorf("specified fileshare acl(%s) can't find", aclID)
}
func (c *Client) getFileShareAcl(ctx *c.Context, aclID string) (*model.FileShareAclSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, ctx.TenantId, aclID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("when get fileshare acl in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var acl = &model.FileShareAclSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), acl); err != nil {
log.Error("when parsing fileshare acl in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return acl, nil
}
// GetFileShare
func (c *Client) GetFileShare(ctx *c.Context, fshareID string) (*model.FileShareSpec, error) {
fshare, err := c.getFileShare(ctx, fshareID)
if !IsAdminContext(ctx) || err == nil {
return fshare, err
}
fshares, err := c.ListFileShares(ctx)
if err != nil {
return nil, err
}
for _, f := range fshares {
if f.Id == fshareID {
return f, nil
}
}
return nil, fmt.Errorf("specified fileshare(%s) can't find", fshareID)
}
func (c *Client) getFileShare(ctx *c.Context, fshareID string) (*model.FileShareSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, ctx.TenantId, fshareID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("when get fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fshare = &model.FileShareSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), fshare); err != nil {
log.Error("when parsing fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return fshare, nil
}
// UpdateFileShare ...
func (c *Client) UpdateFileShare(ctx *c.Context, fshare *model.FileShareSpec) (*model.FileShareSpec, error) {
result, err := c.GetFileShare(ctx, fshare.Id)
if err != nil {
return nil, err
}
if fshare.Name != "" {
result.Name = fshare.Name
}
if fshare.Description != "" {
result.Description = fshare.Description
}
if fshare.ExportLocations != nil {
result.ExportLocations = fshare.ExportLocations
}
if fshare.Protocols != nil {
result.Protocols = fshare.Protocols
}
if fshare.Metadata != nil {
result.Metadata = fshare.Metadata
}
if fshare.Status != "" {
result.Status = fshare.Status
}
if fshare.PoolId != "" {
result.PoolId = fshare.PoolId
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
log.V(5).Infof("update file share object %+v into db", result)
body, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, result.TenantId, fshare.Id),
NewContent: string(body),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("when update fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteFileShareAcl
func (c *Client) DeleteFileShareAcl(ctx *c.Context, aclID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
fshare, err := c.GetFileShareAcl(ctx, aclID)
if err != nil {
log.Error(err)
return err
}
tenantId = fshare.TenantId
}
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, tenantId, aclID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("when delete fileshare in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// DeleteFileShare
func (c *Client) DeleteFileShare(ctx *c.Context, fileshareID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
fshare, err := c.GetFileShare(ctx, fileshareID)
if err != nil {
log.Error(err)
return err
}
tenantId = fshare.TenantId
}
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, tenantId, fileshareID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("when delete fileshare in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// CreateFileShareSnapshot
func (c *Client) CreateFileShareSnapshot(ctx *c.Context, snp *model.FileShareSnapshotSpec) (*model.FileShareSnapshotSpec, error) {
snp.TenantId = ctx.TenantId
snpBody, err := json.Marshal(snp)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, ctx.TenantId, snp.Id),
Content: string(snpBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("when create fileshare snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return snp, nil
}
func (c *Client) GetFileShareSnapshot(ctx *c.Context, snpID string) (*model.FileShareSnapshotSpec, error) {
snap, err := c.getFileShareSnapshot(ctx, snpID)
if !IsAdminContext(ctx) || err == nil {
return snap, err
}
snaps, err := c.ListFileShareSnapshots(ctx)
if err != nil {
return nil, err
}
for _, v := range snaps {
if v.Id == snpID {
return v, nil
}
}
return nil, fmt.Errorf("specified fileshare snapshot(%s) can't find", snpID)
}
// GetFileShareSnapshot
func (c *Client) getFileShareSnapshot(ctx *c.Context, snpID string) (*model.FileShareSnapshotSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, ctx.TenantId, snpID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("when get fileshare attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fs = &model.FileShareSnapshotSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), fs); err != nil {
log.Error("when parsing fileshare snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return fs, nil
}
// ListFileShareSnapshots
func (c *Client) ListFileShareSnapshots(ctx *c.Context) ([]*model.FileShareSnapshotSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateFileShareSnapshotURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("when list fileshare snapshots in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fss = []*model.FileShareSnapshotSpec{}
if len(dbRes.Message) == 0 {
return fss, nil
}
for _, msg := range dbRes.Message {
var fs = &model.FileShareSnapshotSpec{}
if err := json.Unmarshal([]byte(msg), fs); err != nil {
log.Error("When parsing fileshare snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
fss = append(fss, fs)
}
return fss, nil
}
func (c *Client) ListFileShareSnapshotsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.FileShareSnapshotSpec, error) {
fileshareSnapshots, err := c.ListFileShareSnapshots(ctx)
if err != nil {
log.Error("list fileshareSnapshots failed: ", err)
return nil, err
}
tmpFileshareSnapshots := c.FilterAndSort(fileshareSnapshots, m, sortableKeysMap[typeFileShareSnapshots])
var res = []*model.FileShareSnapshotSpec{}
for _, data := range tmpFileshareSnapshots.([]interface{}) {
res = append(res, data.(*model.FileShareSnapshotSpec))
}
return res, nil
}
// UpdateFileShareSnapshot
func (c *Client) UpdateFileShareSnapshot(ctx *c.Context, snpID string, snp *model.FileShareSnapshotSpec) (*model.FileShareSnapshotSpec, error) {
result, err := c.GetFileShareSnapshot(ctx, snpID)
if err != nil {
return nil, err
}
if snp.Name != "" {
result.Name = snp.Name
}
if snp.Description != "" {
result.Description = snp.Description
}
if snp.Status != "" {
result.Status = snp.Status
}
if snp.SnapshotSize > 0 {
result.SnapshotSize = snp.SnapshotSize
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
result.Metadata = snp.Metadata
atcBody, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, result.TenantId, snpID),
NewContent: string(atcBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("when update fileshare snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteFileShareSnapshot
func (c *Client) DeleteFileShareSnapshot(ctx *c.Context, snpID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
snap, err := c.GetFileShareSnapshot(ctx, snpID)
if err != nil {
log.Error(err)
return err
}
tenantId = snap.TenantId
}
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, tenantId, snpID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("when delete fileshare snapshot in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// ********************** End Of FileShare *********************
// CreateDock
func (c *Client) CreateDock(ctx *c.Context, dck *model.DockSpec) (*model.DockSpec, error) {
if dck.Id == "" {
dck.Id = uuid.NewV4().String()
}
if dck.CreatedAt == "" {
dck.CreatedAt = time.Now().Format(constants.TimeFormat)
}
dckBody, err := json.Marshal(dck)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, "", dck.Id),
Content: string(dckBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("when create dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return dck, nil
}
// GetDock
func (c *Client) GetDock(ctx *c.Context, dckID string) (*model.DockSpec, error) {
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, "", dckID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("when get dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var dck = &model.DockSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), dck); err != nil {
log.Error("when parsing dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return dck, nil
}
// GetDockByPoolId
func (c *Client) GetDockByPoolId(ctx *c.Context, poolId string) (*model.DockSpec, error) {
pool, err := c.GetPool(ctx, poolId)
if err != nil {
log.Error("Get pool failed in db: ", err)
return nil, err
}
docks, err := c.ListDocks(ctx)
if err != nil {
log.Error("List docks failed failed in db: ", err)
return nil, err
}
for _, dock := range docks {
if pool.DockId == dock.Id {
return dock, nil
}
}
return nil, errors.New("Get dock failed by pool id: " + poolId)
}
// ListDocks
func (c *Client) ListDocks(ctx *c.Context) ([]*model.DockSpec, error) {
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, ""),
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list docks in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var dcks = []*model.DockSpec{}
if len(dbRes.Message) == 0 {
return dcks, nil
}
for _, msg := range dbRes.Message {
var dck = &model.DockSpec{}
if err := json.Unmarshal([]byte(msg), dck); err != nil {
log.Error("When parsing dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
dcks = append(dcks, dck)
}
return dcks, nil
}
func (c *Client) ListDocksWithFilter(ctx *c.Context, m map[string][]string) ([]*model.DockSpec, error) {
docks, err := c.ListDocks(ctx)
if err != nil {
log.Error("List docks failed: ", err.Error())
return nil, err
}
tmpDocks := c.FilterAndSort(docks, m, sortableKeysMap[typeDocks])
var res = []*model.DockSpec{}
for _, data := range tmpDocks.([]interface{}) {
res = append(res, data.(*model.DockSpec))
}
return res, nil
}
// UpdateDock
func (c *Client) UpdateDock(ctx *c.Context, dckID, name, desp string) (*model.DockSpec, error) {
dck, err := c.GetDock(ctx, dckID)
if err != nil {
return nil, err
}
if name != "" {
dck.Name = name
}
if desp != "" {
dck.Description = desp
}
dck.UpdatedAt = time.Now().Format(constants.TimeFormat)
dckBody, err := json.Marshal(dck)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, "", dckID),
NewContent: string(dckBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return dck, nil
}
// DeleteDock
func (c *Client) DeleteDock(ctx *c.Context, dckID string) error {
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, "", dckID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete dock in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// CreatePool
func (c *Client) CreatePool(ctx *c.Context, pol *model.StoragePoolSpec) (*model.StoragePoolSpec, error) {
if pol.Id == "" {
pol.Id = uuid.NewV4().String()
}
if pol.CreatedAt == "" {
pol.CreatedAt = time.Now().Format(constants.TimeFormat)
}
polBody, err := json.Marshal(pol)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, "", pol.Id),
Content: string(polBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create pol in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return pol, nil
}
func (c *Client) ListPoolsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.StoragePoolSpec, error) {
pools, err := c.ListPools(ctx)
if err != nil {
log.Error("List pools failed: ", err.Error())
return nil, err
}
tmpPools := c.FilterAndSort(pools, m, sortableKeysMap[typePools])
var res = []*model.StoragePoolSpec{}
for _, data := range tmpPools.([]interface{}) {
res = append(res, data.(*model.StoragePoolSpec))
}
return res, nil
}
// GetPool
func (c *Client) GetPool(ctx *c.Context, polID string) (*model.StoragePoolSpec, error) {
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, "", polID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var pol = &model.StoragePoolSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), pol); err != nil {
log.Error("When parsing pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return pol, nil
}
//ListAvailabilityZones
func (c *Client) ListAvailabilityZones(ctx *c.Context) ([]string, error) {
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, ""),
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("Failed to get AZ for pools in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var azs = []string{}
if len(dbRes.Message) == 0 {
return azs, nil
}
for _, msg := range dbRes.Message {
var pol = &model.StoragePoolSpec{}
if err := json.Unmarshal([]byte(msg), pol); err != nil {
log.Error("When parsing pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
azs = append(azs, pol.AvailabilityZone)
}
//remove redundant AZ
azs = utils.RvRepElement(azs)
return azs, nil
}
// ListPools
func (c *Client) ListPools(ctx *c.Context) ([]*model.StoragePoolSpec, error) {
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, ""),
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list pools in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var pols = []*model.StoragePoolSpec{}
if len(dbRes.Message) == 0 {
return pols, nil
}
for _, msg := range dbRes.Message {
var pol = &model.StoragePoolSpec{}
if err := json.Unmarshal([]byte(msg), pol); err != nil {
log.Error("When parsing pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
pols = append(pols, pol)
}
return pols, nil
}
// UpdatePool
func (c *Client) UpdatePool(ctx *c.Context, polID, name, desp string, usedCapacity int64, used bool) (*model.StoragePoolSpec, error) {
pol, err := c.GetPool(ctx, polID)
if err != nil {
return nil, err
}
if name != "" {
pol.Name = name
}
if desp != "" {
pol.Description = desp
}
pol.UpdatedAt = time.Now().Format(constants.TimeFormat)
polBody, err := json.Marshal(pol)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, "", polID),
NewContent: string(polBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return pol, nil
}
// DeletePool
func (c *Client) DeletePool(ctx *c.Context, polID string) error {
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, "", polID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete pool in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// CreateProfile
func (c *Client) CreateProfile(ctx *c.Context, prf *model.ProfileSpec) (*model.ProfileSpec, error) {
if prf.Id == "" {
prf.Id = uuid.NewV4().String()
}
if prf.CreatedAt == "" {
prf.CreatedAt = time.Now().Format(constants.TimeFormat)
}
// profile name must be unique.
if _, err := c.getProfileByName(ctx, prf.Name); err == nil {
return nil, fmt.Errorf("the profile name '%s' already exists", prf.Name)
}
prfBody, err := json.Marshal(prf)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, "", prf.Id),
Content: string(prfBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return prf, nil
}
// GetProfile
func (c *Client) GetProfile(ctx *c.Context, prfID string) (*model.ProfileSpec, error) {
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, "", prfID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var prf = &model.ProfileSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), prf); err != nil {
log.Error("When parsing profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return prf, nil
}
func (c *Client) getProfileByName(ctx *c.Context, name string) (*model.ProfileSpec, error) {
profiles, err := c.ListProfiles(ctx)
if err != nil {
log.Error("List profile failed: ", err)
return nil, err
}
for _, profile := range profiles {
if profile.Name == name {
return profile, nil
}
}
var msg = fmt.Sprintf("can't find profile(name: %s)", name)
return nil, model.NewNotFoundError(msg)
}
func (c *Client) getProfileByNameAndType(ctx *c.Context, name, storageType string) (*model.ProfileSpec, error) {
profiles, err := c.ListProfiles(ctx)
if err != nil {
log.Error("List profile failed: ", err)
return nil, err
}
for _, profile := range profiles {
if profile.Name == name && profile.StorageType == storageType {
return profile, nil
}
}
var msg = fmt.Sprintf("can't find profile(name: %s, storageType:%s)", name, storageType)
return nil, model.NewNotFoundError(msg)
}
// GetDefaultProfile
func (c *Client) GetDefaultProfile(ctx *c.Context) (*model.ProfileSpec, error) {
return c.getProfileByNameAndType(ctx, defaultBlockProfileName, typeBlock)
}
// GetDefaultProfileFileShare
func (c *Client) GetDefaultProfileFileShare(ctx *c.Context) (*model.ProfileSpec, error) {
return c.getProfileByNameAndType(ctx, defaultFileProfileName, typeFile)
}
// ListProfiles
func (c *Client) ListProfiles(ctx *c.Context) ([]*model.ProfileSpec, error) {
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, ""),
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list profiles in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var prfs = []*model.ProfileSpec{}
if len(dbRes.Message) == 0 {
return prfs, nil
}
for _, msg := range dbRes.Message {
var prf = &model.ProfileSpec{}
if err := json.Unmarshal([]byte(msg), prf); err != nil {
log.Error("When parsing profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
prfs = append(prfs, prf)
}
return prfs, nil
}
func (c *Client) ListProfilesWithFilter(ctx *c.Context, m map[string][]string) ([]*model.ProfileSpec, error) {
profiles, err := c.ListProfiles(ctx)
if err != nil {
log.Error("List profiles failed: ", err)
return nil, err
}
tmpProfiles := c.FilterAndSort(profiles, m, sortableKeysMap[typeProfiles])
var res = []*model.ProfileSpec{}
for _, data := range tmpProfiles.([]interface{}) {
res = append(res, data.(*model.ProfileSpec))
}
return res, nil
}
// UpdateProfile
func (c *Client) UpdateProfile(ctx *c.Context, prfID string, input *model.ProfileSpec) (*model.ProfileSpec, error) {
prf, err := c.GetProfile(ctx, prfID)
if err != nil {
return nil, err
}
if name := input.Name; name != "" {
prf.Name = name
}
if desp := input.Description; desp != "" {
prf.Description = desp
}
prf.UpdatedAt = time.Now().Format(constants.TimeFormat)
if props := input.CustomProperties; len(props) != 0 {
if prf.CustomProperties == nil {
prf.CustomProperties = make(map[string]interface{})
}
for k, v := range props {
prf.CustomProperties[k] = v
}
}
prf.UpdatedAt = time.Now().Format(constants.TimeFormat)
prfBody, err := json.Marshal(prf)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, "", prfID),
NewContent: string(prfBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return prf, nil
}
// DeleteProfile
func (c *Client) DeleteProfile(ctx *c.Context, prfID string) error {
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, "", prfID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete profile in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// AddCustomProperty
func (c *Client) AddCustomProperty(ctx *c.Context, prfID string, ext model.CustomPropertiesSpec) (*model.CustomPropertiesSpec, error) {
prf, err := c.GetProfile(ctx, prfID)
if err != nil {
return nil, err
}
if prf.CustomProperties == nil {
prf.CustomProperties = make(map[string]interface{})
}
for k, v := range ext {
prf.CustomProperties[k] = v
}
prf.UpdatedAt = time.Now().Format(constants.TimeFormat)
if _, err = c.CreateProfile(ctx, prf); err != nil {
return nil, err
}
return &prf.CustomProperties, nil
}
// ListCustomProperties
func (c *Client) ListCustomProperties(ctx *c.Context, prfID string) (*model.CustomPropertiesSpec, error) {
prf, err := c.GetProfile(ctx, prfID)
if err != nil {
return nil, err
}
return &prf.CustomProperties, nil
}
// RemoveCustomProperty
func (c *Client) RemoveCustomProperty(ctx *c.Context, prfID, customKey string) error {
prf, err := c.GetProfile(ctx, prfID)
if err != nil {
return err
}
delete(prf.CustomProperties, customKey)
if _, err = c.CreateProfile(ctx, prf); err != nil {
return err
}
return nil
}
// CreateVolume
func (c *Client) CreateVolume(ctx *c.Context, vol *model.VolumeSpec) (*model.VolumeSpec, error) {
profiles, err := c.ListProfiles(ctx)
if err != nil {
return nil, err
} else if len(profiles) == 0 {
return nil, errors.New("No profile in db.")
}
vol.TenantId = ctx.TenantId
volBody, err := json.Marshal(vol)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, ctx.TenantId, vol.Id),
Content: string(volBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vol, nil
}
// GetVolume
func (c *Client) GetVolume(ctx *c.Context, volID string) (*model.VolumeSpec, error) {
vol, err := c.getVolume(ctx, volID)
if !IsAdminContext(ctx) || err == nil {
return vol, err
}
vols, err := c.ListVolumes(ctx)
if err != nil {
return nil, err
}
for _, v := range vols {
if v.Id == volID {
return v, nil
}
}
return nil, fmt.Errorf("specified volume(%s) can't find", volID)
}
func (c *Client) getVolume(ctx *c.Context, volID string) (*model.VolumeSpec, error) {
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, ctx.TenantId, volID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vol = &model.VolumeSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), vol); err != nil {
log.Error("When parsing volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vol, nil
}
// ListVolumes
func (c *Client) ListVolumes(ctx *c.Context) ([]*model.VolumeSpec, error) {
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, ctx.TenantId),
}
// Admin user should get all volumes including the volumes whose tenant is not admin.
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateVolumeURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list volumes in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vols = []*model.VolumeSpec{}
if len(dbRes.Message) == 0 {
return vols, nil
}
for _, msg := range dbRes.Message {
var vol = &model.VolumeSpec{}
if err := json.Unmarshal([]byte(msg), vol); err != nil {
log.Error("When parsing volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
vols = append(vols, vol)
}
return vols, nil
}
// ListVolumesByProfileId
func (c *Client) ListVolumesByProfileId(ctx *c.Context, prfID string) ([]string, error) {
vols, err := c.ListVolumes(ctx)
if err != nil {
return nil, err
}
var resvols []string
for _, v := range vols {
if v.ProfileId == prfID {
resvols = append(resvols, v.Name)
}
}
return resvols, nil
}
var volume_sortKey string
type VolumeSlice []*model.VolumeSpec
func (volume VolumeSlice) Len() int { return len(volume) }
func (volume VolumeSlice) Swap(i, j int) { volume[i], volume[j] = volume[j], volume[i] }
func (volume VolumeSlice) Less(i, j int) bool {
switch volume_sortKey {
case "ID":
return volume[i].Id < volume[j].Id
case "NAME":
return volume[i].Name < volume[j].Name
case "STATUS":
return volume[i].Status < volume[j].Status
case "AVAILABILITYZONE":
return volume[i].AvailabilityZone < volume[j].AvailabilityZone
case "PROFILEID":
return volume[i].ProfileId < volume[j].ProfileId
case "TENANTID":
return volume[i].TenantId < volume[j].TenantId
case "SIZE":
return volume[i].Size < volume[j].Size
case "POOLID":
return volume[i].PoolId < volume[j].PoolId
case "DESCRIPTION":
return volume[i].Description < volume[j].Description
case "GROUPID":
return volume[i].GroupId < volume[j].GroupId
// TODO:case "lun_id" (admin_only)
}
return false
}
func (c *Client) FindVolumeValue(k string, p *model.VolumeSpec) string {
switch k {
case "Id":
return p.Id
case "CreatedAt":
return p.CreatedAt
case "UpdatedAt":
return p.UpdatedAt
case "TenantId":
return p.TenantId
case "UserId":
return p.UserId
case "Name":
return p.Name
case "Description":
return p.Description
case "AvailabilityZone":
return p.AvailabilityZone
case "Size":
return strconv.FormatInt(p.Size, 10)
case "Status":
return p.Status
case "PoolId":
return p.PoolId
case "ProfileId":
return p.ProfileId
case "GroupId":
return p.GroupId
case "DurableName":
return p.Identifier.DurableName
case "DurableNameFormat":
return p.Identifier.DurableNameFormat
}
return ""
}
func (c *Client) SortVolumes(volumes []*model.VolumeSpec, p *Parameter) []*model.VolumeSpec {
volume_sortKey = p.sortKey
if strings.EqualFold(p.sortDir, "asc") {
sort.Sort(VolumeSlice(volumes))
} else {
sort.Sort(sort.Reverse(VolumeSlice(volumes)))
}
return volumes
}
func (c *Client) ListVolumesWithFilter(ctx *c.Context, m map[string][]string) ([]*model.VolumeSpec, error) {
volumes, err := c.ListVolumes(ctx)
if err != nil {
log.Error("List volumes failed: ", err)
return nil, err
}
tmpVolumes := c.FilterAndSort(volumes, m, sortableKeysMap[typeVolumes])
var res = []*model.VolumeSpec{}
for _, data := range tmpVolumes.([]interface{}) {
res = append(res, data.(*model.VolumeSpec))
}
return res, nil
}
// UpdateVolume ...
func (c *Client) UpdateVolume(ctx *c.Context, vol *model.VolumeSpec) (*model.VolumeSpec, error) {
result, err := c.GetVolume(ctx, vol.Id)
if err != nil {
return nil, err
}
if vol.Name != "" {
result.Name = vol.Name
}
if vol.AvailabilityZone != "" {
result.AvailabilityZone = vol.AvailabilityZone
}
if vol.Description != "" {
result.Description = vol.Description
}
if vol.Metadata != nil {
result.Metadata = utils.MergeStringMaps(result.Metadata, vol.Metadata)
}
if vol.Identifier != nil {
result.Identifier = vol.Identifier
}
if vol.PoolId != "" {
result.PoolId = vol.PoolId
}
if vol.ProfileId != "" {
result.ProfileId = vol.ProfileId
}
if vol.Size != 0 {
result.Size = vol.Size
}
if vol.Status != "" {
result.Status = vol.Status
}
if vol.ReplicationDriverData != nil {
result.ReplicationDriverData = vol.ReplicationDriverData
}
if vol.MultiAttach {
result.MultiAttach = vol.MultiAttach
}
if vol.GroupId != "" {
result.GroupId = vol.GroupId
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
body, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, result.TenantId, vol.Id),
NewContent: string(body),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteVolume
func (c *Client) DeleteVolume(ctx *c.Context, volID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
vol, err := c.GetVolume(ctx, volID)
if err != nil {
log.Error(err)
return err
}
tenantId = vol.TenantId
}
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, tenantId, volID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete volume in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// ExtendVolume ...
func (c *Client) ExtendVolume(ctx *c.Context, vol *model.VolumeSpec) (*model.VolumeSpec, error) {
result, err := c.GetVolume(ctx, vol.Id)
if err != nil {
return nil, err
}
if vol.Size > 0 {
result.Size = vol.Size
}
result.Status = vol.Status
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
body, err := json.Marshal(result)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, ctx.TenantId, vol.Id),
NewContent: string(body),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When extend volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// CreateVolumeAttachment
func (c *Client) CreateVolumeAttachment(ctx *c.Context, attachment *model.VolumeAttachmentSpec) (*model.VolumeAttachmentSpec, error) {
if attachment.Id == "" {
attachment.Id = uuid.NewV4().String()
}
attachment.CreatedAt = time.Now().Format(constants.TimeFormat)
attachment.TenantId = ctx.TenantId
atcBody, err := json.Marshal(attachment)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, ctx.TenantId, attachment.Id),
Content: string(atcBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return attachment, nil
}
func (c *Client) GetVolumeAttachment(ctx *c.Context, attachmentId string) (*model.VolumeAttachmentSpec, error) {
attach, err := c.getVolumeAttachment(ctx, attachmentId)
if !IsAdminContext(ctx) || err == nil {
return attach, err
}
attachs, err := c.ListVolumeAttachments(ctx, "")
if err != nil {
return nil, err
}
for _, v := range attachs {
if v.Id == attachmentId {
return v, nil
}
}
return nil, fmt.Errorf("specified volume attachment(%s) can't find", attachmentId)
}
// GetVolumeAttachment
func (c *Client) getVolumeAttachment(ctx *c.Context, attachmentId string) (*model.VolumeAttachmentSpec, error) {
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, ctx.TenantId, attachmentId),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var atc = &model.VolumeAttachmentSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), atc); err != nil {
log.Error("When parsing volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return atc, nil
}
// ListVolumeAttachments
func (c *Client) ListVolumeAttachments(ctx *c.Context, volumeId string) ([]*model.VolumeAttachmentSpec, error) {
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateAttachmentURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list volume attachments in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var atcs = []*model.VolumeAttachmentSpec{}
for _, msg := range dbRes.Message {
var atc = &model.VolumeAttachmentSpec{}
if err := json.Unmarshal([]byte(msg), atc); err != nil {
log.Error("When parsing volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
if len(volumeId) == 0 || atc.VolumeId == volumeId {
atcs = append(atcs, atc)
}
}
return atcs, nil
}
func (c *Client) ListVolumeAttachmentsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.VolumeAttachmentSpec, error) {
var volumeId string
if v, ok := m["VolumeId"]; ok {
volumeId = v[0]
}
attachments, err := c.ListVolumeAttachments(ctx, volumeId)
if err != nil {
log.Error("List volumes failed: ", err)
return nil, err
}
tmpAttachments := c.FilterAndSort(attachments, m, sortableKeysMap[typeAttachments])
var res = []*model.VolumeAttachmentSpec{}
for _, data := range tmpAttachments.([]interface{}) {
res = append(res, data.(*model.VolumeAttachmentSpec))
}
return res, nil
}
// UpdateVolumeAttachment
func (c *Client) UpdateVolumeAttachment(ctx *c.Context, attachmentId string, attachment *model.VolumeAttachmentSpec) (*model.VolumeAttachmentSpec, error) {
result, err := c.GetVolumeAttachment(ctx, attachmentId)
if err != nil {
return nil, err
}
if len(attachment.Mountpoint) > 0 {
result.Mountpoint = attachment.Mountpoint
}
if len(attachment.Status) > 0 {
result.Status = attachment.Status
}
// Update DriverVolumeType
if len(attachment.DriverVolumeType) > 0 {
result.DriverVolumeType = attachment.DriverVolumeType
}
// Update connectionData
// Debug
log.V(8).Infof("etcd: update volume attachment connection data from db: %v", result.ConnectionData)
log.V(8).Infof("etcd: update volume attachment connection data from target: %v", attachment.ConnectionData)
if attachment.ConnectionData != nil {
if result.ConnectionData == nil {
result.ConnectionData = make(map[string]interface{})
}
for k, v := range attachment.ConnectionData {
result.ConnectionData[k] = v
}
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
atcBody, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, result.TenantId, attachmentId),
NewContent: string(atcBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteVolumeAttachment
func (c *Client) DeleteVolumeAttachment(ctx *c.Context, attachmentId string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
attach, err := c.GetVolumeAttachment(ctx, attachmentId)
if err != nil {
log.Error(err)
return err
}
tenantId = attach.TenantId
}
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, tenantId, attachmentId),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete volume attachment in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// CreateVolumeSnapshot
func (c *Client) CreateVolumeSnapshot(ctx *c.Context, snp *model.VolumeSnapshotSpec) (*model.VolumeSnapshotSpec, error) {
snp.TenantId = ctx.TenantId
snpBody, err := json.Marshal(snp)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, ctx.TenantId, snp.Id),
Content: string(snpBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create volume snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return snp, nil
}
func (c *Client) GetVolumeSnapshot(ctx *c.Context, snpID string) (*model.VolumeSnapshotSpec, error) {
snap, err := c.getVolumeSnapshot(ctx, snpID)
if !IsAdminContext(ctx) || err == nil {
return snap, err
}
snaps, err := c.ListVolumeSnapshots(ctx)
if err != nil {
return nil, err
}
for _, v := range snaps {
if v.Id == snpID {
return v, nil
}
}
return nil, fmt.Errorf("specified volume snapshot(%s) can't find", snpID)
}
// GetVolumeSnapshot
func (c *Client) getVolumeSnapshot(ctx *c.Context, snpID string) (*model.VolumeSnapshotSpec, error) {
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, ctx.TenantId, snpID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vs = &model.VolumeSnapshotSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), vs); err != nil {
log.Error("When parsing volume snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vs, nil
}
// ListVolumeSnapshots
func (c *Client) ListVolumeSnapshots(ctx *c.Context) ([]*model.VolumeSnapshotSpec, error) {
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateSnapshotURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list volume snapshots in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vss = []*model.VolumeSnapshotSpec{}
if len(dbRes.Message) == 0 {
return vss, nil
}
for _, msg := range dbRes.Message {
var vs = &model.VolumeSnapshotSpec{}
if err := json.Unmarshal([]byte(msg), vs); err != nil {
log.Error("When parsing volume snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
vss = append(vss, vs)
}
return vss, nil
}
func (c *Client) ListVolumeSnapshotsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.VolumeSnapshotSpec, error) {
volumeSnapshots, err := c.ListVolumeSnapshots(ctx)
if err != nil {
log.Error("List volumeSnapshots failed: ", err)
return nil, err
}
tmpVolumeSnapshots := c.FilterAndSort(volumeSnapshots, m, sortableKeysMap[typeVolumeSnapshots])
var res = []*model.VolumeSnapshotSpec{}
for _, data := range tmpVolumeSnapshots.([]interface{}) {
res = append(res, data.(*model.VolumeSnapshotSpec))
}
return res, nil
}
// UpdateVolumeSnapshot
func (c *Client) UpdateVolumeSnapshot(ctx *c.Context, snpID string, snp *model.VolumeSnapshotSpec) (*model.VolumeSnapshotSpec, error) {
result, err := c.GetVolumeSnapshot(ctx, snpID)
if err != nil {
return nil, err
}
if snp.Name != "" {
result.Name = snp.Name
}
if snp.Metadata != nil {
result.Metadata = utils.MergeStringMaps(result.Metadata, snp.Metadata)
}
if snp.Size > 0 {
result.Size = snp.Size
}
if snp.VolumeId != "" {
result.VolumeId = snp.VolumeId
}
if snp.Description != "" {
result.Description = snp.Description
}
if snp.Status != "" {
result.Status = snp.Status
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
atcBody, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, result.TenantId, snpID),
NewContent: string(atcBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update volume snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteVolumeSnapshot
func (c *Client) DeleteVolumeSnapshot(ctx *c.Context, snpID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
snap, err := c.GetVolumeSnapshot(ctx, snpID)
if err != nil {
log.Error(err)
return err
}
tenantId = snap.TenantId
}
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, tenantId, snpID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete volume snapshot in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
func (c *Client) CreateReplication(ctx *c.Context, r *model.ReplicationSpec) (*model.ReplicationSpec, error) {
if r.Id == "" {
r.Id = uuid.NewV4().String()
}
r.TenantId = ctx.TenantId
r.CreatedAt = time.Now().Format(constants.TimeFormat)
rBody, err := json.Marshal(r)
if err != nil {
return nil, err
}
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, ctx.TenantId, r.Id),
Content: string(rBody),
}
resp := c.Create(req)
if resp.Status != "Success" {
log.Error("When create replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
return r, nil
}
func (c *Client) GetReplication(ctx *c.Context, replicationId string) (*model.ReplicationSpec, error) {
replication, err := c.getReplication(ctx, replicationId)
if !IsAdminContext(ctx) || err == nil {
return replication, err
}
replications, err := c.ListReplication(ctx)
if err != nil {
return nil, err
}
for _, r := range replications {
if r.Id == replicationId {
return r, nil
}
}
return nil, fmt.Errorf("specified replication(%s) can't find", replicationId)
}
func (c *Client) GetReplicationByVolumeId(ctx *c.Context, volumeId string) (*model.ReplicationSpec, error) {
replications, err := c.ListReplication(ctx)
if err != nil {
return nil, err
}
for _, r := range replications {
if volumeId == r.PrimaryVolumeId || volumeId == r.SecondaryVolumeId {
return r, nil
}
}
return nil, model.NewNotFoundError(fmt.Sprintf("can't find specified replication by volume id %s", volumeId))
}
func (c *Client) getReplication(ctx *c.Context, replicationId string) (*model.ReplicationSpec, error) {
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, ctx.TenantId, replicationId),
}
resp := c.Get(req)
if resp.Status != "Success" {
log.Error("When get pool in db:", resp.Error)
return nil, errors.New(resp.Error)
}
var r = &model.ReplicationSpec{}
if err := json.Unmarshal([]byte(resp.Message[0]), r); err != nil {
log.Error("When parsing replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
return r, nil
}
func (c *Client) ListReplication(ctx *c.Context) ([]*model.ReplicationSpec, error) {
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
req.Url = urls.GenerateReplicationURL(urls.Etcd, "")
}
resp := c.List(req)
if resp.Status != "Success" {
log.Error("When list replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
var replicas = []*model.ReplicationSpec{}
if len(resp.Message) == 0 {
return replicas, nil
}
for _, msg := range resp.Message {
var r = &model.ReplicationSpec{}
if err := json.Unmarshal([]byte(msg), r); err != nil {
log.Error("When parsing replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
replicas = append(replicas, r)
}
return replicas, nil
}
func (c *Client) filterByName(param map[string][]string, spec interface{}, filterList map[string]interface{}) bool {
v := reflect.ValueOf(spec)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
for key := range param {
_, ok := filterList[key]
if !ok {
continue
}
filed := v.FieldByName(key)
if !filed.IsValid() {
continue
}
paramVal := param[key][0]
var val string
switch filed.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val = strconv.FormatInt(filed.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val = strconv.FormatUint(filed.Uint(), 10)
case reflect.String:
val = filed.String()
default:
return false
}
if !strings.EqualFold(paramVal, val) {
return false
}
}
return true
}
func (c *Client) SelectReplication(param map[string][]string, replications []*model.ReplicationSpec) []*model.ReplicationSpec {
if !c.SelectOrNot(param) {
return replications
}
filterList := map[string]interface{}{
"Id": nil,
"CreatedAt": nil,
"UpdatedAt": nil,
"Name": nil,
"Description": nil,
"PrimaryVolumeId": nil,
"SecondaryVolumeId": nil,
}
var rlist = []*model.ReplicationSpec{}
for _, r := range replications {
if c.filterByName(param, r, filterList) {
rlist = append(rlist, r)
}
}
return rlist
}
type ReplicationsCompareFunc func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool
var replicationsCompareFunc ReplicationsCompareFunc
type ReplicationSlice []*model.ReplicationSpec
func (r ReplicationSlice) Len() int { return len(r) }
func (r ReplicationSlice) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r ReplicationSlice) Less(i, j int) bool { return replicationsCompareFunc(r[i], r[j]) }
var replicationSortKey2Func = map[string]ReplicationsCompareFunc{
"ID": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.Id > b.Id },
"NAME": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.Name > b.Name },
"REPLICATIONSTATUS": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool {
return a.ReplicationStatus > b.ReplicationStatus
},
"AVAILABILITYZONE": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool {
return a.AvailabilityZone > b.AvailabilityZone
},
"PROFILEID": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.ProfileId > b.ProfileId },
"TENANTID": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.TenantId > b.TenantId },
"POOLID": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.PoolId > b.PoolId },
}
func (c *Client) SortReplications(replications []*model.ReplicationSpec, p *Parameter) []*model.ReplicationSpec {
replicationsCompareFunc = replicationSortKey2Func[p.sortKey]
if strings.EqualFold(p.sortDir, "asc") {
sort.Sort(ReplicationSlice(replications))
} else {
sort.Sort(sort.Reverse(ReplicationSlice(replications)))
}
return replications
}
func (c *Client) ListReplicationWithFilter(ctx *c.Context, m map[string][]string) ([]*model.ReplicationSpec, error) {
replicas, err := c.ListReplication(ctx)
if err != nil {
log.Error("List replications failed: ", err)
return nil, err
}
rlist := c.SelectReplication(m, replicas)
var sortKeys []string
for k := range replicationSortKey2Func {
sortKeys = append(sortKeys, k)
}
p := c.ParameterFilter(m, len(rlist), sortKeys)
return c.SortReplications(rlist, p)[p.beginIdx:p.endIdx], nil
}
func (c *Client) DeleteReplication(ctx *c.Context, replicationId string) error {
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
r, err := c.GetReplication(ctx, replicationId)
if err != nil {
return err
}
tenantId = r.TenantId
}
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, tenantId, replicationId),
}
reps := c.Delete(req)
if reps.Status != "Success" {
log.Error("When delete replication in db:", reps.Error)
return errors.New(reps.Error)
}
return nil
}
func (c *Client) UpdateReplication(ctx *c.Context, replicationId string, input *model.ReplicationSpec) (*model.ReplicationSpec, error) {
r, err := c.GetReplication(ctx, replicationId)
if err != nil {
return nil, err
}
if input.ProfileId != "" {
r.ProfileId = input.ProfileId
}
if input.Name != "" {
r.Name = input.Name
}
if input.Description != "" {
r.Description = input.Description
}
if input.PrimaryReplicationDriverData != nil {
r.PrimaryReplicationDriverData = input.PrimaryReplicationDriverData
}
if input.SecondaryReplicationDriverData != nil {
r.SecondaryReplicationDriverData = input.SecondaryReplicationDriverData
}
if input.Metadata != nil {
r.Metadata = utils.MergeStringMaps(r.Metadata, input.Metadata)
}
if input.ReplicationStatus != "" {
r.ReplicationStatus = input.ReplicationStatus
}
r.UpdatedAt = time.Now().Format(constants.TimeFormat)
b, err := json.Marshal(r)
if err != nil {
return nil, err
}
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
tenantId = r.TenantId
}
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, tenantId, replicationId),
NewContent: string(b),
}
resp := c.Update(req)
if resp.Status != "Success" {
log.Error("When update replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
return r, nil
}
func (c *Client) CreateVolumeGroup(ctx *c.Context, vg *model.VolumeGroupSpec) (*model.VolumeGroupSpec, error) {
vg.TenantId = ctx.TenantId
vgBody, err := json.Marshal(vg)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, ctx.TenantId, vg.Id),
Content: string(vgBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vg, nil
}
func (c *Client) GetVolumeGroup(ctx *c.Context, vgId string) (*model.VolumeGroupSpec, error) {
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, ctx.TenantId, vgId),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vg = &model.VolumeGroupSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), vg); err != nil {
log.Error("When parsing volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vg, nil
}
func (c *Client) UpdateVolumeGroup(ctx *c.Context, vgUpdate *model.VolumeGroupSpec) (*model.VolumeGroupSpec, error) {
vg, err := c.GetVolumeGroup(ctx, vgUpdate.Id)
if err != nil {
return nil, err
}
if vgUpdate.Name != "" && vgUpdate.Name != vg.Name {
vg.Name = vgUpdate.Name
}
if vgUpdate.AvailabilityZone != "" && vgUpdate.AvailabilityZone != vg.AvailabilityZone {
vg.AvailabilityZone = vgUpdate.AvailabilityZone
}
if vgUpdate.Description != "" && vgUpdate.Description != vg.Description {
vg.Description = vgUpdate.Description
}
if vgUpdate.PoolId != "" && vgUpdate.PoolId != vg.PoolId {
vg.PoolId = vgUpdate.PoolId
}
if vg.Status != "" && vgUpdate.Status != vg.Status {
vg.Status = vgUpdate.Status
}
if vgUpdate.PoolId != "" && vgUpdate.PoolId != vg.PoolId {
vg.PoolId = vgUpdate.PoolId
}
if vgUpdate.CreatedAt != "" && vgUpdate.CreatedAt != vg.CreatedAt {
vg.CreatedAt = vgUpdate.CreatedAt
}
if vgUpdate.UpdatedAt != "" && vgUpdate.UpdatedAt != vg.UpdatedAt {
vg.UpdatedAt = vgUpdate.UpdatedAt
}
vgBody, err := json.Marshal(vg)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, ctx.TenantId, vgUpdate.Id),
NewContent: string(vgBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vg, nil
}
func (c *Client) UpdateStatus(ctx *c.Context, in interface{}, status string) error {
switch in.(type) {
case *model.VolumeSnapshotSpec:
snap := in.(*model.VolumeSnapshotSpec)
snap.Status = status
if _, errUpdate := c.UpdateVolumeSnapshot(ctx, snap.Id, snap); errUpdate != nil {
log.Error("Error occurs when update volume snapshot status in db:", errUpdate.Error())
return errUpdate
}
case *model.VolumeAttachmentSpec:
attm := in.(*model.VolumeAttachmentSpec)
attm.Status = status
if _, errUpdate := c.UpdateVolumeAttachment(ctx, attm.Id, attm); errUpdate != nil {
log.Error("Error occurred in dock module when update volume attachment status in db:", errUpdate)
return errUpdate
}
case *model.VolumeSpec:
volume := in.(*model.VolumeSpec)
volume.Status = status
if _, errUpdate := c.UpdateVolume(ctx, volume); errUpdate != nil {
log.Error("When update volume status in db:", errUpdate.Error())
return errUpdate
}
case *model.FileShareSpec:
fileshare := in.(*model.FileShareSpec)
fileshare.Status = status
if _, errUpdate := c.UpdateFileShare(ctx, fileshare); errUpdate != nil {
log.Error("when update fileshare status in db:", errUpdate.Error())
return errUpdate
}
case *model.FileShareSnapshotSpec:
fsnap := in.(*model.FileShareSnapshotSpec)
fsnap.Status = status
if _, errUpdate := c.UpdateFileShareSnapshot(ctx, fsnap.Id, fsnap); errUpdate != nil {
log.Error("when update fileshare status in db:", errUpdate.Error())
return errUpdate
}
case *model.VolumeGroupSpec:
vg := in.(*model.VolumeGroupSpec)
vg.Status = status
if _, errUpdate := c.UpdateVolumeGroup(ctx, vg); errUpdate != nil {
log.Error("When update volume status in db:", errUpdate.Error())
return errUpdate
}
case []*model.VolumeSpec:
vols := in.([]*model.VolumeSpec)
if _, errUpdate := c.VolumesToUpdate(ctx, vols); errUpdate != nil {
return errUpdate
}
case *model.ReplicationSpec:
replica := in.(*model.ReplicationSpec)
replica.ReplicationStatus = status
if _, errUpdate := c.UpdateReplication(ctx, replica.Id, replica); errUpdate != nil {
return errUpdate
}
}
return nil
}
func (c *Client) ListVolumesByGroupId(ctx *c.Context, vgId string) ([]*model.VolumeSpec, error) {
volumes, err := c.ListVolumes(ctx)
if err != nil {
return nil, err
}
var volumesInSameGroup []*model.VolumeSpec
for _, v := range volumes {
if v.GroupId == vgId {
volumesInSameGroup = append(volumesInSameGroup, v)
}
}
return volumesInSameGroup, nil
}
func (c *Client) VolumesToUpdate(ctx *c.Context, volumeList []*model.VolumeSpec) ([]*model.VolumeSpec, error) {
var volumeRefs []*model.VolumeSpec
for _, values := range volumeList {
v, err := c.UpdateVolume(ctx, values)
if err != nil {
return nil, err
}
volumeRefs = append(volumeRefs, v)
}
return volumeRefs, nil
}
// ListVolumes
func (c *Client) ListVolumeGroups(ctx *c.Context) ([]*model.VolumeGroupSpec, error) {
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateVolumeGroupURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list volume groups in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var groups []*model.VolumeGroupSpec
if len(dbRes.Message) == 0 {
return groups, nil
}
for _, msg := range dbRes.Message {
var group = &model.VolumeGroupSpec{}
if err := json.Unmarshal([]byte(msg), group); err != nil {
log.Error("When parsing volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
groups = append(groups, group)
}
return groups, nil
}
func (c *Client) DeleteVolumeGroup(ctx *c.Context, volumeGroupId string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
group, err := c.GetVolumeGroup(ctx, volumeGroupId)
if err != nil {
log.Error(err)
return err
}
tenantId = group.TenantId
}
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, tenantId, volumeGroupId),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete volume group in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
func (c *Client) ListSnapshotsByVolumeId(ctx *c.Context, volumeId string) ([]*model.VolumeSnapshotSpec, error) {
snaps, err := c.ListVolumeSnapshots(ctx)
if err != nil {
return nil, err
}
var snapList []*model.VolumeSnapshotSpec
for _, snap := range snaps {
if snap.VolumeId == volumeId {
snapList = append(snapList, snap)
}
}
return snapList, nil
}
func (c *Client) ListAttachmentsByVolumeId(ctx *c.Context, volumeId string) ([]*model.VolumeAttachmentSpec, error) {
return c.ListVolumeAttachments(ctx, volumeId)
}
func (c *Client) ListVolumeGroupsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.VolumeGroupSpec, error) {
vgs, err := c.ListVolumeGroups(ctx)
if err != nil {
log.Error("List volume groups failed: ", err)
return nil, err
}
rlist := c.SelectVolumeGroup(m, vgs)
var sortKeys []string
for k := range volumeGroupSortKey2Func {
sortKeys = append(sortKeys, k)
}
p := c.ParameterFilter(m, len(rlist), sortKeys)
return c.SortVolumeGroups(rlist, p)[p.beginIdx:p.endIdx], nil
}
type VolumeGroupCompareFunc func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool
var volumeGroupCompareFunc VolumeGroupCompareFunc
type VolumeGroupSlice []*model.VolumeGroupSpec
func (v VolumeGroupSlice) Len() int { return len(v) }
func (v VolumeGroupSlice) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (v VolumeGroupSlice) Less(i, j int) bool { return volumeGroupCompareFunc(v[i], v[j]) }
var volumeGroupSortKey2Func = map[string]VolumeGroupCompareFunc{
"ID": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool { return a.Id > b.Id },
"NAME": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool { return a.Name > b.Name },
"STATUS": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool {
return a.Status > b.Status
},
"AVAILABILITYZONE": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool {
return a.AvailabilityZone > b.AvailabilityZone
},
"TENANTID": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool { return a.TenantId > b.TenantId },
"POOLID": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool { return a.PoolId > b.PoolId },
}
func (c *Client) SortVolumeGroups(vgs []*model.VolumeGroupSpec, p *Parameter) []*model.VolumeGroupSpec {
volumeGroupCompareFunc = volumeGroupSortKey2Func[p.sortKey]
if strings.EqualFold(p.sortDir, "asc") {
sort.Sort(VolumeGroupSlice(vgs))
} else {
sort.Sort(sort.Reverse(VolumeGroupSlice(vgs)))
}
return vgs
}
func (c *Client) SelectVolumeGroup(param map[string][]string, vgs []*model.VolumeGroupSpec) []*model.VolumeGroupSpec {
if !c.SelectOrNot(param) {
return vgs
}
filterList := map[string]interface{}{
"Id": nil,
"CreatedAt": nil,
"UpdatedAt": nil,
"Name": nil,
"Status": nil,
"TenantId": nil,
"UserId": nil,
"Description": nil,
"AvailabilityZone": nil,
"PoolId": nil,
}
var vglist = []*model.VolumeGroupSpec{}
for _, vg := range vgs {
if c.filterByName(param, vg, filterList) {
vglist = append(vglist, vg)
}
}
return vglist
}
func (c *Client) ListHosts(ctx *c.Context, m map[string][]string) ([]*model.HostSpec, error) {
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateHostURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list hosts in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var hosts = []*model.HostSpec{}
if len(dbRes.Message) == 0 {
return hosts, nil
}
for _, msg := range dbRes.Message {
var host = &model.HostSpec{}
if err := json.Unmarshal([]byte(msg), host); err != nil {
log.Error("When parsing host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
hosts = append(hosts, host)
}
tmpHosts := utils.Filter(hosts, m)
if len(m["sortKey"]) > 0 && utils.Contains([]string{"hostName", "createdAt"}, m["sortKey"][0]) {
tmpHosts = utils.Sort(tmpHosts, m["sortKey"][0], c.GetSortDir(m))
}
tmpHosts = utils.Slice(tmpHosts, c.GetOffset(m, len(hosts)), c.GetLimit(m))
var res = []*model.HostSpec{}
for _, data := range tmpHosts.([]interface{}) {
res = append(res, data.(*model.HostSpec))
}
return res, nil
}
func (c *Client) ListHostsByName(ctx *c.Context, hostName string) ([]*model.HostSpec, error) {
hosts, err := c.ListHosts(ctx, map[string][]string{"hostName": []string{hostName}})
if err != nil {
log.Error("List hosts failed: ", err)
return nil, err
}
var res []*model.HostSpec
for _, host := range hosts {
if hostName == host.HostName {
res = append(res, host)
}
}
return res, nil
}
func (c *Client) CreateHost(ctx *c.Context, host *model.HostSpec) (*model.HostSpec, error) {
host.TenantId = ctx.TenantId
if host.Id == "" {
host.Id = uuid.NewV4().String()
}
host.CreatedAt = time.Now().Format(constants.TimeFormat)
hostBody, err := json.Marshal(host)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, ctx.TenantId, host.Id),
Content: string(hostBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return host, nil
}
func (c *Client) UpdateHost(ctx *c.Context, host *model.HostSpec) (*model.HostSpec, error) {
result, err := c.GetHost(ctx, host.Id)
if err != nil {
return nil, err
}
if host.HostName != "" {
result.HostName = host.HostName
}
if host.IP != "" {
result.IP = host.IP
}
if host.Port > 0 {
result.Port = host.Port
}
if host.AccessMode != "" {
result.AccessMode = host.AccessMode
}
if host.Username != "" {
result.Username = host.Username
}
if host.Password != "" {
result.Password = host.Password
}
if len(host.AvailabilityZones) > 0 {
result.AvailabilityZones = host.AvailabilityZones
}
if len(host.Initiators) > 0 {
result.Initiators = host.Initiators
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
body, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, result.TenantId, result.Id),
NewContent: string(body),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
func (c *Client) GetHost(ctx *c.Context, hostId string) (*model.HostSpec, error) {
host, err := c.getHost(ctx, hostId)
if !IsAdminContext(ctx) || err == nil {
return host, err
}
hosts, err := c.ListHosts(ctx, map[string][]string{"id": []string{hostId}})
if err != nil {
return nil, err
}
for _, v := range hosts {
if v.Id == hostId {
return v, nil
}
}
return nil, fmt.Errorf("specified host(%s) can't find", hostId)
}
func (c *Client) getHost(ctx *c.Context, hostId string) (*model.HostSpec, error) {
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, ctx.TenantId, hostId),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var host = &model.HostSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), host); err != nil {
log.Error("When parsing host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return host, nil
}
func (c *Client) DeleteHost(ctx *c.Context, hostId string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
host, err := c.GetHost(ctx, hostId)
if err != nil {
log.Error(err)
return err
}
tenantId = host.TenantId
}
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, tenantId, hostId),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete host in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
Fix issue about osType cann't be updated
// Copyright 2017 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
This module implements the etcd database operation of data structure
defined in api module.
*/
package etcd
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"time"
log "github.com/golang/glog"
c "github.com/opensds/opensds/pkg/context"
"github.com/opensds/opensds/pkg/model"
"github.com/opensds/opensds/pkg/utils"
"github.com/opensds/opensds/pkg/utils/constants"
"github.com/opensds/opensds/pkg/utils/urls"
uuid "github.com/satori/go.uuid"
)
const (
defaultSortKey = "ID"
defaultBlockProfileName = "default_block"
defaultFileProfileName = "default_file"
typeBlock = "block"
typeFile = "file"
)
var validKey = []string{"limit", "offset", "sortDir", "sortKey"}
const (
typeFileShares string = "FileShares"
typeFileShareSnapshots string = "FileShareSnapshots"
typeDocks string = "Docks"
typePools string = "Pools"
typeProfiles string = "Profiles"
typeVolumes string = "Volumes"
typeAttachments string = "Attachments"
typeVolumeSnapshots string = "VolumeSnapshots"
)
var sortableKeysMap = map[string][]string{
typeFileShares: {"ID", "NAME", "STATUS", "AVAILABILITYZONE", "PROFILEID", "TENANTID", "SIZE", "POOLID", "DESCRIPTION"},
typeFileShareSnapshots: {"ID", "NAME", "VOLUMEID", "STATUS", "USERID", "TENANTID", "SIZE"},
typeDocks: {"ID", "NAME", "STATUS", "ENDPOINT", "DRIVERNAME", "DESCRIPTION"},
typePools: {"ID", "NAME", "STATUS", "AVAILABILITYZONE", "DOCKID"},
typeProfiles: {"ID", "NAME", "DESCRIPTION"},
typeVolumes: {"ID", "NAME", "STATUS", "AVAILABILITYZONE", "PROFILEID", "TENANTID", "SIZE", "POOLID", "DESCRIPTION", "GROUPID"},
typeAttachments: {"ID", "VOLUMEID", "STATUS", "USERID", "TENANTID", "SIZE"},
typeVolumeSnapshots: {"ID", "NAME", "VOLUMEID", "STATUS", "USERID", "TENANTID", "SIZE"},
}
func IsAdminContext(ctx *c.Context) bool {
return ctx.IsAdmin
}
func AuthorizeProjectContext(ctx *c.Context, tenantId string) bool {
return ctx.TenantId == tenantId
}
// NewClient
func NewClient(edps []string) *Client {
return &Client{
clientInterface: Init(edps),
}
}
// Client
type Client struct {
clientInterface
}
//Parameter
type Parameter struct {
beginIdx, endIdx int
sortDir, sortKey string
}
//IsInArray
func (c *Client) IsInArray(e string, s []string) bool {
for _, v := range s {
if strings.EqualFold(e, v) {
return true
}
}
return false
}
func (c *Client) SelectOrNot(m map[string][]string) bool {
for key := range m {
if !utils.Contained(key, validKey) {
return true
}
}
return false
}
//Get parameter limit
func (c *Client) GetLimit(m map[string][]string) int {
var limit int
var err error
v, ok := m["limit"]
if ok {
limit, err = strconv.Atoi(v[0])
if err != nil || limit < 0 {
log.Warning("Invalid input limit:", limit, ",use default value instead:50")
return constants.DefaultLimit
}
} else {
log.Warning("The parameter limit is not present,use default value instead:50")
return constants.DefaultLimit
}
return limit
}
//Get parameter offset
func (c *Client) GetOffset(m map[string][]string, size int) int {
var offset int
var err error
v, ok := m["offset"]
if ok {
offset, err = strconv.Atoi(v[0])
if err != nil || offset < 0 || offset > size {
log.Warning("Invalid input offset or input offset is out of bounds:", offset, ",use default value instead:0")
return constants.DefaultOffset
}
} else {
log.Warning("The parameter offset is not present,use default value instead:0")
return constants.DefaultOffset
}
return offset
}
//Get parameter sortDir
func (c *Client) GetSortDir(m map[string][]string) string {
var sortDir string
v, ok := m["sortDir"]
if ok {
sortDir = v[0]
if !strings.EqualFold(sortDir, "desc") && !strings.EqualFold(sortDir, "asc") {
log.Warning("Invalid input sortDir:", sortDir, ",use default value instead:desc")
return constants.DefaultSortDir
}
} else {
log.Warning("The parameter sortDir is not present,use default value instead:desc")
return constants.DefaultSortDir
}
return sortDir
}
//Get parameter sortKey
func (c *Client) GetSortKey(m map[string][]string, sortKeys []string) string {
var sortKey string
v, ok := m["sortKey"]
if ok {
sortKey = strings.ToUpper(v[0])
if !c.IsInArray(sortKey, sortKeys) {
log.Warning("Invalid input sortKey:", sortKey, ",use default value instead:ID")
return defaultSortKey
}
} else {
log.Warning("The parameter sortKey is not present,use default value instead:ID")
return defaultSortKey
}
return sortKey
}
func (c *Client) FilterAndSort(src interface{}, params map[string][]string, sortableKeys []string) interface{} {
var ret interface{}
ret = utils.Filter(src, params)
if len(params["sortKey"]) > 0 && utils.ContainsIgnoreCase(sortableKeys, params["sortKey"][0]) {
ret = utils.Sort(ret, params["sortKey"][0], c.GetSortDir(params))
}
ret = utils.Slice(ret, c.GetOffset(params, reflect.ValueOf(src).Len()), c.GetLimit(params))
return ret
}
//ParameterFilter
func (c *Client) ParameterFilter(m map[string][]string, size int, sortKeys []string) *Parameter {
limit := c.GetLimit(m)
offset := c.GetOffset(m, size)
beginIdx := offset
endIdx := limit + offset
// If use not specified the limit return all the items.
if limit == constants.DefaultLimit || endIdx > size {
endIdx = size
}
sortDir := c.GetSortDir(m)
sortKey := c.GetSortKey(m, sortKeys)
return &Parameter{beginIdx, endIdx, sortDir, sortKey}
}
// ************* FileShare code block *************
var fileshare_sortKey string
type FileShareSlice []*model.FileShareSpec
func (fileshare FileShareSlice) Len() int { return len(fileshare) }
func (fileshare FileShareSlice) Swap(i, j int) {
fileshare[i], fileshare[j] = fileshare[j], fileshare[i]
}
func (fileshare FileShareSlice) Less(i, j int) bool {
switch fileshare_sortKey {
case "ID":
return fileshare[i].Id < fileshare[j].Id
case "NAME":
return fileshare[i].Name < fileshare[j].Name
case "STATUS":
return fileshare[i].Status < fileshare[j].Status
case "AVAILABILITYZONE":
return fileshare[i].AvailabilityZone < fileshare[j].AvailabilityZone
case "PROFILEID":
return fileshare[i].ProfileId < fileshare[j].ProfileId
case "TENANTID":
return fileshare[i].TenantId < fileshare[j].TenantId
case "SIZE":
return fileshare[i].Size < fileshare[j].Size
case "POOLID":
return fileshare[i].PoolId < fileshare[j].PoolId
case "DESCRIPTION":
return fileshare[i].Description < fileshare[j].Description
}
return false
}
func (c *Client) FindFileShareValue(k string, p *model.FileShareSpec) string {
switch k {
case "Id":
return p.Id
case "CreatedAt":
return p.CreatedAt
case "UpdatedAt":
return p.UpdatedAt
case "TenantId":
return p.TenantId
case "UserId":
return p.UserId
case "Name":
return p.Name
case "Description":
return p.Description
case "AvailabilityZone":
return p.AvailabilityZone
case "Size":
return strconv.FormatInt(p.Size, 10)
case "Status":
return p.Status
case "PoolId":
return p.PoolId
case "ProfileId":
return p.ProfileId
}
return ""
}
func (c *Client) CreateFileShareAcl(ctx *c.Context, fshare *model.FileShareAclSpec) (*model.FileShareAclSpec, error) {
acls, err := c.ListFileSharesAcl(ctx)
if err != nil {
log.Error("failed to list acls")
return nil, err
}
for _, acl := range acls {
if acl.AccessTo == fshare.AccessTo {
errstr := "acl already exists for this ip: " + acl.AccessTo + ". If you want to set new acl, first delete the existing one"
log.Error(errstr)
return nil, fmt.Errorf(errstr)
}
}
fshare.TenantId = ctx.TenantId
fshareBody, err := json.Marshal(fshare)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, ctx.TenantId, fshare.Id),
Content: string(fshareBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("when create fileshare access rules in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return fshare, nil
}
// UpdateFileShareAcl
func (c *Client) UpdateFileShareAcl(ctx *c.Context, acl *model.FileShareAclSpec) (*model.FileShareAclSpec, error) {
result, err := c.GetFileShareAcl(ctx, acl.Id)
if err != nil {
return nil, err
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
result.Metadata = acl.Metadata
jsonBody, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, result.TenantId, acl.Id),
NewContent: string(jsonBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("when update fileshare acl in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
func (c *Client) CreateFileShare(ctx *c.Context, fshare *model.FileShareSpec) (*model.FileShareSpec, error) {
fshare.TenantId = ctx.TenantId
fshareBody, err := json.Marshal(fshare)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, ctx.TenantId, fshare.Id),
Content: string(fshareBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("when create fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return fshare, nil
}
func (c *Client) SortFileShares(shares []*model.FileShareSpec, p *Parameter) []*model.FileShareSpec {
volume_sortKey = p.sortKey
if strings.EqualFold(p.sortDir, "dsc") {
sort.Sort(FileShareSlice(shares))
} else {
sort.Sort(sort.Reverse(FileShareSlice(shares)))
}
return shares
}
func (c *Client) ListFileSharesAclWithFilter(ctx *c.Context, m map[string][]string) ([]*model.FileShareAclSpec, error) {
fileshares, err := c.ListFileSharesAcl(ctx)
if err != nil {
log.Error("list fileshare failed: ", err)
return nil, err
}
return fileshares, nil
}
func (c *Client) ListFileSharesAcl(ctx *c.Context) ([]*model.FileShareAclSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, ctx.TenantId),
}
// Admin user should get all fileshares including the fileshares whose tenant is not admin.
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateFileShareAclURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("when list fileshares in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fileshares = []*model.FileShareAclSpec{}
if len(dbRes.Message) == 0 {
return fileshares, nil
}
for _, msg := range dbRes.Message {
var share = &model.FileShareAclSpec{}
if err := json.Unmarshal([]byte(msg), share); err != nil {
log.Error("when parsing fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
fileshares = append(fileshares, share)
}
return fileshares, nil
}
func (c *Client) ListFileShareAclsByShareId(ctx *c.Context, fileshareId string) ([]*model.FileShareAclSpec, error) {
acls, err := c.ListFileSharesAcl(ctx)
if err != nil {
return nil, err
}
var aclList []*model.FileShareAclSpec
for _, acl := range acls {
if acl.FileShareId == fileshareId {
aclList = append(aclList, acl)
}
}
return aclList, nil
}
func (c *Client) ListSnapshotsByShareId(ctx *c.Context, fileshareId string) ([]*model.FileShareSnapshotSpec, error) {
snaps, err := c.ListFileShareSnapshots(ctx)
if err != nil {
return nil, err
}
var snapList []*model.FileShareSnapshotSpec
for _, snap := range snaps {
if snap.FileShareId == fileshareId {
snapList = append(snapList, snap)
}
}
return snapList, nil
}
func (c *Client) ListFileSharesWithFilter(ctx *c.Context, m map[string][]string) ([]*model.FileShareSpec, error) {
fileshares, err := c.ListFileShares(ctx)
if err != nil {
log.Error("list fileshare failed: ", err)
return nil, err
}
tmpFileshares := c.FilterAndSort(fileshares, m, sortableKeysMap[typeFileShares])
var res = []*model.FileShareSpec{}
for _, data := range tmpFileshares.([]interface{}) {
res = append(res, data.(*model.FileShareSpec))
}
return res, nil
}
// ListFileShares
func (c *Client) ListFileShares(ctx *c.Context) ([]*model.FileShareSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, ctx.TenantId),
}
// Admin user should get all fileshares including the fileshares whose tenant is not admin.
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateFileShareURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("when list fileshares in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fileshares = []*model.FileShareSpec{}
if len(dbRes.Message) == 0 {
return fileshares, nil
}
for _, msg := range dbRes.Message {
var share = &model.FileShareSpec{}
if err := json.Unmarshal([]byte(msg), share); err != nil {
log.Error("when parsing fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
fileshares = append(fileshares, share)
}
return fileshares, nil
}
// ListFileSharesByProfileId
func (c *Client) ListFileSharesByProfileId(ctx *c.Context, prfId string) ([]string, error) {
fileshares, err := c.ListFileShares(ctx)
if err != nil {
return nil, err
}
var res_fileshares []string
for _, shares := range fileshares {
if shares.ProfileId == prfId {
res_fileshares = append(res_fileshares, shares.Name)
}
}
return res_fileshares, nil
}
// GetFileShareAcl
func (c *Client) GetFileShareAcl(ctx *c.Context, aclID string) (*model.FileShareAclSpec, error) {
acl, err := c.getFileShareAcl(ctx, aclID)
if !IsAdminContext(ctx) || err == nil {
return acl, err
}
acls, err := c.ListFileSharesAcl(ctx)
if err != nil {
return nil, err
}
for _, f := range acls {
if f.Id == aclID {
return f, nil
}
}
return nil, fmt.Errorf("specified fileshare acl(%s) can't find", aclID)
}
func (c *Client) getFileShareAcl(ctx *c.Context, aclID string) (*model.FileShareAclSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, ctx.TenantId, aclID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("when get fileshare acl in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var acl = &model.FileShareAclSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), acl); err != nil {
log.Error("when parsing fileshare acl in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return acl, nil
}
// GetFileShare
func (c *Client) GetFileShare(ctx *c.Context, fshareID string) (*model.FileShareSpec, error) {
fshare, err := c.getFileShare(ctx, fshareID)
if !IsAdminContext(ctx) || err == nil {
return fshare, err
}
fshares, err := c.ListFileShares(ctx)
if err != nil {
return nil, err
}
for _, f := range fshares {
if f.Id == fshareID {
return f, nil
}
}
return nil, fmt.Errorf("specified fileshare(%s) can't find", fshareID)
}
func (c *Client) getFileShare(ctx *c.Context, fshareID string) (*model.FileShareSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, ctx.TenantId, fshareID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("when get fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fshare = &model.FileShareSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), fshare); err != nil {
log.Error("when parsing fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return fshare, nil
}
// UpdateFileShare ...
func (c *Client) UpdateFileShare(ctx *c.Context, fshare *model.FileShareSpec) (*model.FileShareSpec, error) {
result, err := c.GetFileShare(ctx, fshare.Id)
if err != nil {
return nil, err
}
if fshare.Name != "" {
result.Name = fshare.Name
}
if fshare.Description != "" {
result.Description = fshare.Description
}
if fshare.ExportLocations != nil {
result.ExportLocations = fshare.ExportLocations
}
if fshare.Protocols != nil {
result.Protocols = fshare.Protocols
}
if fshare.Metadata != nil {
result.Metadata = fshare.Metadata
}
if fshare.Status != "" {
result.Status = fshare.Status
}
if fshare.PoolId != "" {
result.PoolId = fshare.PoolId
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
log.V(5).Infof("update file share object %+v into db", result)
body, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, result.TenantId, fshare.Id),
NewContent: string(body),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("when update fileshare in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteFileShareAcl
func (c *Client) DeleteFileShareAcl(ctx *c.Context, aclID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
fshare, err := c.GetFileShareAcl(ctx, aclID)
if err != nil {
log.Error(err)
return err
}
tenantId = fshare.TenantId
}
dbReq := &Request{
Url: urls.GenerateFileShareAclURL(urls.Etcd, tenantId, aclID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("when delete fileshare in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// DeleteFileShare
func (c *Client) DeleteFileShare(ctx *c.Context, fileshareID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
fshare, err := c.GetFileShare(ctx, fileshareID)
if err != nil {
log.Error(err)
return err
}
tenantId = fshare.TenantId
}
dbReq := &Request{
Url: urls.GenerateFileShareURL(urls.Etcd, tenantId, fileshareID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("when delete fileshare in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// CreateFileShareSnapshot
func (c *Client) CreateFileShareSnapshot(ctx *c.Context, snp *model.FileShareSnapshotSpec) (*model.FileShareSnapshotSpec, error) {
snp.TenantId = ctx.TenantId
snpBody, err := json.Marshal(snp)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, ctx.TenantId, snp.Id),
Content: string(snpBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("when create fileshare snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return snp, nil
}
func (c *Client) GetFileShareSnapshot(ctx *c.Context, snpID string) (*model.FileShareSnapshotSpec, error) {
snap, err := c.getFileShareSnapshot(ctx, snpID)
if !IsAdminContext(ctx) || err == nil {
return snap, err
}
snaps, err := c.ListFileShareSnapshots(ctx)
if err != nil {
return nil, err
}
for _, v := range snaps {
if v.Id == snpID {
return v, nil
}
}
return nil, fmt.Errorf("specified fileshare snapshot(%s) can't find", snpID)
}
// GetFileShareSnapshot
func (c *Client) getFileShareSnapshot(ctx *c.Context, snpID string) (*model.FileShareSnapshotSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, ctx.TenantId, snpID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("when get fileshare attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fs = &model.FileShareSnapshotSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), fs); err != nil {
log.Error("when parsing fileshare snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return fs, nil
}
// ListFileShareSnapshots
func (c *Client) ListFileShareSnapshots(ctx *c.Context) ([]*model.FileShareSnapshotSpec, error) {
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateFileShareSnapshotURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("when list fileshare snapshots in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var fss = []*model.FileShareSnapshotSpec{}
if len(dbRes.Message) == 0 {
return fss, nil
}
for _, msg := range dbRes.Message {
var fs = &model.FileShareSnapshotSpec{}
if err := json.Unmarshal([]byte(msg), fs); err != nil {
log.Error("When parsing fileshare snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
fss = append(fss, fs)
}
return fss, nil
}
func (c *Client) ListFileShareSnapshotsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.FileShareSnapshotSpec, error) {
fileshareSnapshots, err := c.ListFileShareSnapshots(ctx)
if err != nil {
log.Error("list fileshareSnapshots failed: ", err)
return nil, err
}
tmpFileshareSnapshots := c.FilterAndSort(fileshareSnapshots, m, sortableKeysMap[typeFileShareSnapshots])
var res = []*model.FileShareSnapshotSpec{}
for _, data := range tmpFileshareSnapshots.([]interface{}) {
res = append(res, data.(*model.FileShareSnapshotSpec))
}
return res, nil
}
// UpdateFileShareSnapshot
func (c *Client) UpdateFileShareSnapshot(ctx *c.Context, snpID string, snp *model.FileShareSnapshotSpec) (*model.FileShareSnapshotSpec, error) {
result, err := c.GetFileShareSnapshot(ctx, snpID)
if err != nil {
return nil, err
}
if snp.Name != "" {
result.Name = snp.Name
}
if snp.Description != "" {
result.Description = snp.Description
}
if snp.Status != "" {
result.Status = snp.Status
}
if snp.SnapshotSize > 0 {
result.SnapshotSize = snp.SnapshotSize
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
result.Metadata = snp.Metadata
atcBody, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, result.TenantId, snpID),
NewContent: string(atcBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("when update fileshare snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteFileShareSnapshot
func (c *Client) DeleteFileShareSnapshot(ctx *c.Context, snpID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
snap, err := c.GetFileShareSnapshot(ctx, snpID)
if err != nil {
log.Error(err)
return err
}
tenantId = snap.TenantId
}
dbReq := &Request{
Url: urls.GenerateFileShareSnapshotURL(urls.Etcd, tenantId, snpID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("when delete fileshare snapshot in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// ********************** End Of FileShare *********************
// CreateDock
func (c *Client) CreateDock(ctx *c.Context, dck *model.DockSpec) (*model.DockSpec, error) {
if dck.Id == "" {
dck.Id = uuid.NewV4().String()
}
if dck.CreatedAt == "" {
dck.CreatedAt = time.Now().Format(constants.TimeFormat)
}
dckBody, err := json.Marshal(dck)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, "", dck.Id),
Content: string(dckBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("when create dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return dck, nil
}
// GetDock
func (c *Client) GetDock(ctx *c.Context, dckID string) (*model.DockSpec, error) {
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, "", dckID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("when get dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var dck = &model.DockSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), dck); err != nil {
log.Error("when parsing dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return dck, nil
}
// GetDockByPoolId
func (c *Client) GetDockByPoolId(ctx *c.Context, poolId string) (*model.DockSpec, error) {
pool, err := c.GetPool(ctx, poolId)
if err != nil {
log.Error("Get pool failed in db: ", err)
return nil, err
}
docks, err := c.ListDocks(ctx)
if err != nil {
log.Error("List docks failed failed in db: ", err)
return nil, err
}
for _, dock := range docks {
if pool.DockId == dock.Id {
return dock, nil
}
}
return nil, errors.New("Get dock failed by pool id: " + poolId)
}
// ListDocks
func (c *Client) ListDocks(ctx *c.Context) ([]*model.DockSpec, error) {
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, ""),
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list docks in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var dcks = []*model.DockSpec{}
if len(dbRes.Message) == 0 {
return dcks, nil
}
for _, msg := range dbRes.Message {
var dck = &model.DockSpec{}
if err := json.Unmarshal([]byte(msg), dck); err != nil {
log.Error("When parsing dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
dcks = append(dcks, dck)
}
return dcks, nil
}
func (c *Client) ListDocksWithFilter(ctx *c.Context, m map[string][]string) ([]*model.DockSpec, error) {
docks, err := c.ListDocks(ctx)
if err != nil {
log.Error("List docks failed: ", err.Error())
return nil, err
}
tmpDocks := c.FilterAndSort(docks, m, sortableKeysMap[typeDocks])
var res = []*model.DockSpec{}
for _, data := range tmpDocks.([]interface{}) {
res = append(res, data.(*model.DockSpec))
}
return res, nil
}
// UpdateDock
func (c *Client) UpdateDock(ctx *c.Context, dckID, name, desp string) (*model.DockSpec, error) {
dck, err := c.GetDock(ctx, dckID)
if err != nil {
return nil, err
}
if name != "" {
dck.Name = name
}
if desp != "" {
dck.Description = desp
}
dck.UpdatedAt = time.Now().Format(constants.TimeFormat)
dckBody, err := json.Marshal(dck)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, "", dckID),
NewContent: string(dckBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update dock in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return dck, nil
}
// DeleteDock
func (c *Client) DeleteDock(ctx *c.Context, dckID string) error {
dbReq := &Request{
Url: urls.GenerateDockURL(urls.Etcd, "", dckID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete dock in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// CreatePool
func (c *Client) CreatePool(ctx *c.Context, pol *model.StoragePoolSpec) (*model.StoragePoolSpec, error) {
if pol.Id == "" {
pol.Id = uuid.NewV4().String()
}
if pol.CreatedAt == "" {
pol.CreatedAt = time.Now().Format(constants.TimeFormat)
}
polBody, err := json.Marshal(pol)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, "", pol.Id),
Content: string(polBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create pol in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return pol, nil
}
func (c *Client) ListPoolsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.StoragePoolSpec, error) {
pools, err := c.ListPools(ctx)
if err != nil {
log.Error("List pools failed: ", err.Error())
return nil, err
}
tmpPools := c.FilterAndSort(pools, m, sortableKeysMap[typePools])
var res = []*model.StoragePoolSpec{}
for _, data := range tmpPools.([]interface{}) {
res = append(res, data.(*model.StoragePoolSpec))
}
return res, nil
}
// GetPool
func (c *Client) GetPool(ctx *c.Context, polID string) (*model.StoragePoolSpec, error) {
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, "", polID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var pol = &model.StoragePoolSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), pol); err != nil {
log.Error("When parsing pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return pol, nil
}
//ListAvailabilityZones
func (c *Client) ListAvailabilityZones(ctx *c.Context) ([]string, error) {
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, ""),
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("Failed to get AZ for pools in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var azs = []string{}
if len(dbRes.Message) == 0 {
return azs, nil
}
for _, msg := range dbRes.Message {
var pol = &model.StoragePoolSpec{}
if err := json.Unmarshal([]byte(msg), pol); err != nil {
log.Error("When parsing pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
azs = append(azs, pol.AvailabilityZone)
}
//remove redundant AZ
azs = utils.RvRepElement(azs)
return azs, nil
}
// ListPools
func (c *Client) ListPools(ctx *c.Context) ([]*model.StoragePoolSpec, error) {
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, ""),
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list pools in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var pols = []*model.StoragePoolSpec{}
if len(dbRes.Message) == 0 {
return pols, nil
}
for _, msg := range dbRes.Message {
var pol = &model.StoragePoolSpec{}
if err := json.Unmarshal([]byte(msg), pol); err != nil {
log.Error("When parsing pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
pols = append(pols, pol)
}
return pols, nil
}
// UpdatePool
func (c *Client) UpdatePool(ctx *c.Context, polID, name, desp string, usedCapacity int64, used bool) (*model.StoragePoolSpec, error) {
pol, err := c.GetPool(ctx, polID)
if err != nil {
return nil, err
}
if name != "" {
pol.Name = name
}
if desp != "" {
pol.Description = desp
}
pol.UpdatedAt = time.Now().Format(constants.TimeFormat)
polBody, err := json.Marshal(pol)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, "", polID),
NewContent: string(polBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update pool in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return pol, nil
}
// DeletePool
func (c *Client) DeletePool(ctx *c.Context, polID string) error {
dbReq := &Request{
Url: urls.GeneratePoolURL(urls.Etcd, "", polID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete pool in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// CreateProfile
func (c *Client) CreateProfile(ctx *c.Context, prf *model.ProfileSpec) (*model.ProfileSpec, error) {
if prf.Id == "" {
prf.Id = uuid.NewV4().String()
}
if prf.CreatedAt == "" {
prf.CreatedAt = time.Now().Format(constants.TimeFormat)
}
// profile name must be unique.
if _, err := c.getProfileByName(ctx, prf.Name); err == nil {
return nil, fmt.Errorf("the profile name '%s' already exists", prf.Name)
}
prfBody, err := json.Marshal(prf)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, "", prf.Id),
Content: string(prfBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return prf, nil
}
// GetProfile
func (c *Client) GetProfile(ctx *c.Context, prfID string) (*model.ProfileSpec, error) {
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, "", prfID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var prf = &model.ProfileSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), prf); err != nil {
log.Error("When parsing profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return prf, nil
}
func (c *Client) getProfileByName(ctx *c.Context, name string) (*model.ProfileSpec, error) {
profiles, err := c.ListProfiles(ctx)
if err != nil {
log.Error("List profile failed: ", err)
return nil, err
}
for _, profile := range profiles {
if profile.Name == name {
return profile, nil
}
}
var msg = fmt.Sprintf("can't find profile(name: %s)", name)
return nil, model.NewNotFoundError(msg)
}
func (c *Client) getProfileByNameAndType(ctx *c.Context, name, storageType string) (*model.ProfileSpec, error) {
profiles, err := c.ListProfiles(ctx)
if err != nil {
log.Error("List profile failed: ", err)
return nil, err
}
for _, profile := range profiles {
if profile.Name == name && profile.StorageType == storageType {
return profile, nil
}
}
var msg = fmt.Sprintf("can't find profile(name: %s, storageType:%s)", name, storageType)
return nil, model.NewNotFoundError(msg)
}
// GetDefaultProfile
func (c *Client) GetDefaultProfile(ctx *c.Context) (*model.ProfileSpec, error) {
return c.getProfileByNameAndType(ctx, defaultBlockProfileName, typeBlock)
}
// GetDefaultProfileFileShare
func (c *Client) GetDefaultProfileFileShare(ctx *c.Context) (*model.ProfileSpec, error) {
return c.getProfileByNameAndType(ctx, defaultFileProfileName, typeFile)
}
// ListProfiles
func (c *Client) ListProfiles(ctx *c.Context) ([]*model.ProfileSpec, error) {
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, ""),
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list profiles in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var prfs = []*model.ProfileSpec{}
if len(dbRes.Message) == 0 {
return prfs, nil
}
for _, msg := range dbRes.Message {
var prf = &model.ProfileSpec{}
if err := json.Unmarshal([]byte(msg), prf); err != nil {
log.Error("When parsing profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
prfs = append(prfs, prf)
}
return prfs, nil
}
func (c *Client) ListProfilesWithFilter(ctx *c.Context, m map[string][]string) ([]*model.ProfileSpec, error) {
profiles, err := c.ListProfiles(ctx)
if err != nil {
log.Error("List profiles failed: ", err)
return nil, err
}
tmpProfiles := c.FilterAndSort(profiles, m, sortableKeysMap[typeProfiles])
var res = []*model.ProfileSpec{}
for _, data := range tmpProfiles.([]interface{}) {
res = append(res, data.(*model.ProfileSpec))
}
return res, nil
}
// UpdateProfile
func (c *Client) UpdateProfile(ctx *c.Context, prfID string, input *model.ProfileSpec) (*model.ProfileSpec, error) {
prf, err := c.GetProfile(ctx, prfID)
if err != nil {
return nil, err
}
if name := input.Name; name != "" {
prf.Name = name
}
if desp := input.Description; desp != "" {
prf.Description = desp
}
prf.UpdatedAt = time.Now().Format(constants.TimeFormat)
if props := input.CustomProperties; len(props) != 0 {
if prf.CustomProperties == nil {
prf.CustomProperties = make(map[string]interface{})
}
for k, v := range props {
prf.CustomProperties[k] = v
}
}
prf.UpdatedAt = time.Now().Format(constants.TimeFormat)
prfBody, err := json.Marshal(prf)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, "", prfID),
NewContent: string(prfBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update profile in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return prf, nil
}
// DeleteProfile
func (c *Client) DeleteProfile(ctx *c.Context, prfID string) error {
dbReq := &Request{
Url: urls.GenerateProfileURL(urls.Etcd, "", prfID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete profile in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// AddCustomProperty
func (c *Client) AddCustomProperty(ctx *c.Context, prfID string, ext model.CustomPropertiesSpec) (*model.CustomPropertiesSpec, error) {
prf, err := c.GetProfile(ctx, prfID)
if err != nil {
return nil, err
}
if prf.CustomProperties == nil {
prf.CustomProperties = make(map[string]interface{})
}
for k, v := range ext {
prf.CustomProperties[k] = v
}
prf.UpdatedAt = time.Now().Format(constants.TimeFormat)
if _, err = c.CreateProfile(ctx, prf); err != nil {
return nil, err
}
return &prf.CustomProperties, nil
}
// ListCustomProperties
func (c *Client) ListCustomProperties(ctx *c.Context, prfID string) (*model.CustomPropertiesSpec, error) {
prf, err := c.GetProfile(ctx, prfID)
if err != nil {
return nil, err
}
return &prf.CustomProperties, nil
}
// RemoveCustomProperty
func (c *Client) RemoveCustomProperty(ctx *c.Context, prfID, customKey string) error {
prf, err := c.GetProfile(ctx, prfID)
if err != nil {
return err
}
delete(prf.CustomProperties, customKey)
if _, err = c.CreateProfile(ctx, prf); err != nil {
return err
}
return nil
}
// CreateVolume
func (c *Client) CreateVolume(ctx *c.Context, vol *model.VolumeSpec) (*model.VolumeSpec, error) {
profiles, err := c.ListProfiles(ctx)
if err != nil {
return nil, err
} else if len(profiles) == 0 {
return nil, errors.New("No profile in db.")
}
vol.TenantId = ctx.TenantId
volBody, err := json.Marshal(vol)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, ctx.TenantId, vol.Id),
Content: string(volBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vol, nil
}
// GetVolume
func (c *Client) GetVolume(ctx *c.Context, volID string) (*model.VolumeSpec, error) {
vol, err := c.getVolume(ctx, volID)
if !IsAdminContext(ctx) || err == nil {
return vol, err
}
vols, err := c.ListVolumes(ctx)
if err != nil {
return nil, err
}
for _, v := range vols {
if v.Id == volID {
return v, nil
}
}
return nil, fmt.Errorf("specified volume(%s) can't find", volID)
}
func (c *Client) getVolume(ctx *c.Context, volID string) (*model.VolumeSpec, error) {
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, ctx.TenantId, volID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vol = &model.VolumeSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), vol); err != nil {
log.Error("When parsing volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vol, nil
}
// ListVolumes
func (c *Client) ListVolumes(ctx *c.Context) ([]*model.VolumeSpec, error) {
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, ctx.TenantId),
}
// Admin user should get all volumes including the volumes whose tenant is not admin.
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateVolumeURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list volumes in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vols = []*model.VolumeSpec{}
if len(dbRes.Message) == 0 {
return vols, nil
}
for _, msg := range dbRes.Message {
var vol = &model.VolumeSpec{}
if err := json.Unmarshal([]byte(msg), vol); err != nil {
log.Error("When parsing volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
vols = append(vols, vol)
}
return vols, nil
}
// ListVolumesByProfileId
func (c *Client) ListVolumesByProfileId(ctx *c.Context, prfID string) ([]string, error) {
vols, err := c.ListVolumes(ctx)
if err != nil {
return nil, err
}
var resvols []string
for _, v := range vols {
if v.ProfileId == prfID {
resvols = append(resvols, v.Name)
}
}
return resvols, nil
}
var volume_sortKey string
type VolumeSlice []*model.VolumeSpec
func (volume VolumeSlice) Len() int { return len(volume) }
func (volume VolumeSlice) Swap(i, j int) { volume[i], volume[j] = volume[j], volume[i] }
func (volume VolumeSlice) Less(i, j int) bool {
switch volume_sortKey {
case "ID":
return volume[i].Id < volume[j].Id
case "NAME":
return volume[i].Name < volume[j].Name
case "STATUS":
return volume[i].Status < volume[j].Status
case "AVAILABILITYZONE":
return volume[i].AvailabilityZone < volume[j].AvailabilityZone
case "PROFILEID":
return volume[i].ProfileId < volume[j].ProfileId
case "TENANTID":
return volume[i].TenantId < volume[j].TenantId
case "SIZE":
return volume[i].Size < volume[j].Size
case "POOLID":
return volume[i].PoolId < volume[j].PoolId
case "DESCRIPTION":
return volume[i].Description < volume[j].Description
case "GROUPID":
return volume[i].GroupId < volume[j].GroupId
// TODO:case "lun_id" (admin_only)
}
return false
}
func (c *Client) FindVolumeValue(k string, p *model.VolumeSpec) string {
switch k {
case "Id":
return p.Id
case "CreatedAt":
return p.CreatedAt
case "UpdatedAt":
return p.UpdatedAt
case "TenantId":
return p.TenantId
case "UserId":
return p.UserId
case "Name":
return p.Name
case "Description":
return p.Description
case "AvailabilityZone":
return p.AvailabilityZone
case "Size":
return strconv.FormatInt(p.Size, 10)
case "Status":
return p.Status
case "PoolId":
return p.PoolId
case "ProfileId":
return p.ProfileId
case "GroupId":
return p.GroupId
case "DurableName":
return p.Identifier.DurableName
case "DurableNameFormat":
return p.Identifier.DurableNameFormat
}
return ""
}
func (c *Client) SortVolumes(volumes []*model.VolumeSpec, p *Parameter) []*model.VolumeSpec {
volume_sortKey = p.sortKey
if strings.EqualFold(p.sortDir, "asc") {
sort.Sort(VolumeSlice(volumes))
} else {
sort.Sort(sort.Reverse(VolumeSlice(volumes)))
}
return volumes
}
func (c *Client) ListVolumesWithFilter(ctx *c.Context, m map[string][]string) ([]*model.VolumeSpec, error) {
volumes, err := c.ListVolumes(ctx)
if err != nil {
log.Error("List volumes failed: ", err)
return nil, err
}
tmpVolumes := c.FilterAndSort(volumes, m, sortableKeysMap[typeVolumes])
var res = []*model.VolumeSpec{}
for _, data := range tmpVolumes.([]interface{}) {
res = append(res, data.(*model.VolumeSpec))
}
return res, nil
}
// UpdateVolume ...
func (c *Client) UpdateVolume(ctx *c.Context, vol *model.VolumeSpec) (*model.VolumeSpec, error) {
result, err := c.GetVolume(ctx, vol.Id)
if err != nil {
return nil, err
}
if vol.Name != "" {
result.Name = vol.Name
}
if vol.AvailabilityZone != "" {
result.AvailabilityZone = vol.AvailabilityZone
}
if vol.Description != "" {
result.Description = vol.Description
}
if vol.Metadata != nil {
result.Metadata = utils.MergeStringMaps(result.Metadata, vol.Metadata)
}
if vol.Identifier != nil {
result.Identifier = vol.Identifier
}
if vol.PoolId != "" {
result.PoolId = vol.PoolId
}
if vol.ProfileId != "" {
result.ProfileId = vol.ProfileId
}
if vol.Size != 0 {
result.Size = vol.Size
}
if vol.Status != "" {
result.Status = vol.Status
}
if vol.ReplicationDriverData != nil {
result.ReplicationDriverData = vol.ReplicationDriverData
}
if vol.MultiAttach {
result.MultiAttach = vol.MultiAttach
}
if vol.GroupId != "" {
result.GroupId = vol.GroupId
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
body, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, result.TenantId, vol.Id),
NewContent: string(body),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteVolume
func (c *Client) DeleteVolume(ctx *c.Context, volID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
vol, err := c.GetVolume(ctx, volID)
if err != nil {
log.Error(err)
return err
}
tenantId = vol.TenantId
}
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, tenantId, volID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete volume in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// ExtendVolume ...
func (c *Client) ExtendVolume(ctx *c.Context, vol *model.VolumeSpec) (*model.VolumeSpec, error) {
result, err := c.GetVolume(ctx, vol.Id)
if err != nil {
return nil, err
}
if vol.Size > 0 {
result.Size = vol.Size
}
result.Status = vol.Status
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
body, err := json.Marshal(result)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateVolumeURL(urls.Etcd, ctx.TenantId, vol.Id),
NewContent: string(body),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When extend volume in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// CreateVolumeAttachment
func (c *Client) CreateVolumeAttachment(ctx *c.Context, attachment *model.VolumeAttachmentSpec) (*model.VolumeAttachmentSpec, error) {
if attachment.Id == "" {
attachment.Id = uuid.NewV4().String()
}
attachment.CreatedAt = time.Now().Format(constants.TimeFormat)
attachment.TenantId = ctx.TenantId
atcBody, err := json.Marshal(attachment)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, ctx.TenantId, attachment.Id),
Content: string(atcBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return attachment, nil
}
func (c *Client) GetVolumeAttachment(ctx *c.Context, attachmentId string) (*model.VolumeAttachmentSpec, error) {
attach, err := c.getVolumeAttachment(ctx, attachmentId)
if !IsAdminContext(ctx) || err == nil {
return attach, err
}
attachs, err := c.ListVolumeAttachments(ctx, "")
if err != nil {
return nil, err
}
for _, v := range attachs {
if v.Id == attachmentId {
return v, nil
}
}
return nil, fmt.Errorf("specified volume attachment(%s) can't find", attachmentId)
}
// GetVolumeAttachment
func (c *Client) getVolumeAttachment(ctx *c.Context, attachmentId string) (*model.VolumeAttachmentSpec, error) {
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, ctx.TenantId, attachmentId),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var atc = &model.VolumeAttachmentSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), atc); err != nil {
log.Error("When parsing volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return atc, nil
}
// ListVolumeAttachments
func (c *Client) ListVolumeAttachments(ctx *c.Context, volumeId string) ([]*model.VolumeAttachmentSpec, error) {
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateAttachmentURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list volume attachments in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var atcs = []*model.VolumeAttachmentSpec{}
for _, msg := range dbRes.Message {
var atc = &model.VolumeAttachmentSpec{}
if err := json.Unmarshal([]byte(msg), atc); err != nil {
log.Error("When parsing volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
if len(volumeId) == 0 || atc.VolumeId == volumeId {
atcs = append(atcs, atc)
}
}
return atcs, nil
}
func (c *Client) ListVolumeAttachmentsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.VolumeAttachmentSpec, error) {
var volumeId string
if v, ok := m["VolumeId"]; ok {
volumeId = v[0]
}
attachments, err := c.ListVolumeAttachments(ctx, volumeId)
if err != nil {
log.Error("List volumes failed: ", err)
return nil, err
}
tmpAttachments := c.FilterAndSort(attachments, m, sortableKeysMap[typeAttachments])
var res = []*model.VolumeAttachmentSpec{}
for _, data := range tmpAttachments.([]interface{}) {
res = append(res, data.(*model.VolumeAttachmentSpec))
}
return res, nil
}
// UpdateVolumeAttachment
func (c *Client) UpdateVolumeAttachment(ctx *c.Context, attachmentId string, attachment *model.VolumeAttachmentSpec) (*model.VolumeAttachmentSpec, error) {
result, err := c.GetVolumeAttachment(ctx, attachmentId)
if err != nil {
return nil, err
}
if len(attachment.Mountpoint) > 0 {
result.Mountpoint = attachment.Mountpoint
}
if len(attachment.Status) > 0 {
result.Status = attachment.Status
}
// Update DriverVolumeType
if len(attachment.DriverVolumeType) > 0 {
result.DriverVolumeType = attachment.DriverVolumeType
}
// Update connectionData
// Debug
log.V(8).Infof("etcd: update volume attachment connection data from db: %v", result.ConnectionData)
log.V(8).Infof("etcd: update volume attachment connection data from target: %v", attachment.ConnectionData)
if attachment.ConnectionData != nil {
if result.ConnectionData == nil {
result.ConnectionData = make(map[string]interface{})
}
for k, v := range attachment.ConnectionData {
result.ConnectionData[k] = v
}
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
atcBody, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, result.TenantId, attachmentId),
NewContent: string(atcBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteVolumeAttachment
func (c *Client) DeleteVolumeAttachment(ctx *c.Context, attachmentId string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
attach, err := c.GetVolumeAttachment(ctx, attachmentId)
if err != nil {
log.Error(err)
return err
}
tenantId = attach.TenantId
}
dbReq := &Request{
Url: urls.GenerateAttachmentURL(urls.Etcd, tenantId, attachmentId),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete volume attachment in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
// CreateVolumeSnapshot
func (c *Client) CreateVolumeSnapshot(ctx *c.Context, snp *model.VolumeSnapshotSpec) (*model.VolumeSnapshotSpec, error) {
snp.TenantId = ctx.TenantId
snpBody, err := json.Marshal(snp)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, ctx.TenantId, snp.Id),
Content: string(snpBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create volume snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return snp, nil
}
func (c *Client) GetVolumeSnapshot(ctx *c.Context, snpID string) (*model.VolumeSnapshotSpec, error) {
snap, err := c.getVolumeSnapshot(ctx, snpID)
if !IsAdminContext(ctx) || err == nil {
return snap, err
}
snaps, err := c.ListVolumeSnapshots(ctx)
if err != nil {
return nil, err
}
for _, v := range snaps {
if v.Id == snpID {
return v, nil
}
}
return nil, fmt.Errorf("specified volume snapshot(%s) can't find", snpID)
}
// GetVolumeSnapshot
func (c *Client) getVolumeSnapshot(ctx *c.Context, snpID string) (*model.VolumeSnapshotSpec, error) {
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, ctx.TenantId, snpID),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get volume attachment in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vs = &model.VolumeSnapshotSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), vs); err != nil {
log.Error("When parsing volume snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vs, nil
}
// ListVolumeSnapshots
func (c *Client) ListVolumeSnapshots(ctx *c.Context) ([]*model.VolumeSnapshotSpec, error) {
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateSnapshotURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list volume snapshots in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vss = []*model.VolumeSnapshotSpec{}
if len(dbRes.Message) == 0 {
return vss, nil
}
for _, msg := range dbRes.Message {
var vs = &model.VolumeSnapshotSpec{}
if err := json.Unmarshal([]byte(msg), vs); err != nil {
log.Error("When parsing volume snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
vss = append(vss, vs)
}
return vss, nil
}
func (c *Client) ListVolumeSnapshotsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.VolumeSnapshotSpec, error) {
volumeSnapshots, err := c.ListVolumeSnapshots(ctx)
if err != nil {
log.Error("List volumeSnapshots failed: ", err)
return nil, err
}
tmpVolumeSnapshots := c.FilterAndSort(volumeSnapshots, m, sortableKeysMap[typeVolumeSnapshots])
var res = []*model.VolumeSnapshotSpec{}
for _, data := range tmpVolumeSnapshots.([]interface{}) {
res = append(res, data.(*model.VolumeSnapshotSpec))
}
return res, nil
}
// UpdateVolumeSnapshot
func (c *Client) UpdateVolumeSnapshot(ctx *c.Context, snpID string, snp *model.VolumeSnapshotSpec) (*model.VolumeSnapshotSpec, error) {
result, err := c.GetVolumeSnapshot(ctx, snpID)
if err != nil {
return nil, err
}
if snp.Name != "" {
result.Name = snp.Name
}
if snp.Metadata != nil {
result.Metadata = utils.MergeStringMaps(result.Metadata, snp.Metadata)
}
if snp.Size > 0 {
result.Size = snp.Size
}
if snp.VolumeId != "" {
result.VolumeId = snp.VolumeId
}
if snp.Description != "" {
result.Description = snp.Description
}
if snp.Status != "" {
result.Status = snp.Status
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
atcBody, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, result.TenantId, snpID),
NewContent: string(atcBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update volume snapshot in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
// DeleteVolumeSnapshot
func (c *Client) DeleteVolumeSnapshot(ctx *c.Context, snpID string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
snap, err := c.GetVolumeSnapshot(ctx, snpID)
if err != nil {
log.Error(err)
return err
}
tenantId = snap.TenantId
}
dbReq := &Request{
Url: urls.GenerateSnapshotURL(urls.Etcd, tenantId, snpID),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete volume snapshot in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
func (c *Client) CreateReplication(ctx *c.Context, r *model.ReplicationSpec) (*model.ReplicationSpec, error) {
if r.Id == "" {
r.Id = uuid.NewV4().String()
}
r.TenantId = ctx.TenantId
r.CreatedAt = time.Now().Format(constants.TimeFormat)
rBody, err := json.Marshal(r)
if err != nil {
return nil, err
}
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, ctx.TenantId, r.Id),
Content: string(rBody),
}
resp := c.Create(req)
if resp.Status != "Success" {
log.Error("When create replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
return r, nil
}
func (c *Client) GetReplication(ctx *c.Context, replicationId string) (*model.ReplicationSpec, error) {
replication, err := c.getReplication(ctx, replicationId)
if !IsAdminContext(ctx) || err == nil {
return replication, err
}
replications, err := c.ListReplication(ctx)
if err != nil {
return nil, err
}
for _, r := range replications {
if r.Id == replicationId {
return r, nil
}
}
return nil, fmt.Errorf("specified replication(%s) can't find", replicationId)
}
func (c *Client) GetReplicationByVolumeId(ctx *c.Context, volumeId string) (*model.ReplicationSpec, error) {
replications, err := c.ListReplication(ctx)
if err != nil {
return nil, err
}
for _, r := range replications {
if volumeId == r.PrimaryVolumeId || volumeId == r.SecondaryVolumeId {
return r, nil
}
}
return nil, model.NewNotFoundError(fmt.Sprintf("can't find specified replication by volume id %s", volumeId))
}
func (c *Client) getReplication(ctx *c.Context, replicationId string) (*model.ReplicationSpec, error) {
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, ctx.TenantId, replicationId),
}
resp := c.Get(req)
if resp.Status != "Success" {
log.Error("When get pool in db:", resp.Error)
return nil, errors.New(resp.Error)
}
var r = &model.ReplicationSpec{}
if err := json.Unmarshal([]byte(resp.Message[0]), r); err != nil {
log.Error("When parsing replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
return r, nil
}
func (c *Client) ListReplication(ctx *c.Context) ([]*model.ReplicationSpec, error) {
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
req.Url = urls.GenerateReplicationURL(urls.Etcd, "")
}
resp := c.List(req)
if resp.Status != "Success" {
log.Error("When list replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
var replicas = []*model.ReplicationSpec{}
if len(resp.Message) == 0 {
return replicas, nil
}
for _, msg := range resp.Message {
var r = &model.ReplicationSpec{}
if err := json.Unmarshal([]byte(msg), r); err != nil {
log.Error("When parsing replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
replicas = append(replicas, r)
}
return replicas, nil
}
func (c *Client) filterByName(param map[string][]string, spec interface{}, filterList map[string]interface{}) bool {
v := reflect.ValueOf(spec)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
for key := range param {
_, ok := filterList[key]
if !ok {
continue
}
filed := v.FieldByName(key)
if !filed.IsValid() {
continue
}
paramVal := param[key][0]
var val string
switch filed.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val = strconv.FormatInt(filed.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val = strconv.FormatUint(filed.Uint(), 10)
case reflect.String:
val = filed.String()
default:
return false
}
if !strings.EqualFold(paramVal, val) {
return false
}
}
return true
}
func (c *Client) SelectReplication(param map[string][]string, replications []*model.ReplicationSpec) []*model.ReplicationSpec {
if !c.SelectOrNot(param) {
return replications
}
filterList := map[string]interface{}{
"Id": nil,
"CreatedAt": nil,
"UpdatedAt": nil,
"Name": nil,
"Description": nil,
"PrimaryVolumeId": nil,
"SecondaryVolumeId": nil,
}
var rlist = []*model.ReplicationSpec{}
for _, r := range replications {
if c.filterByName(param, r, filterList) {
rlist = append(rlist, r)
}
}
return rlist
}
type ReplicationsCompareFunc func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool
var replicationsCompareFunc ReplicationsCompareFunc
type ReplicationSlice []*model.ReplicationSpec
func (r ReplicationSlice) Len() int { return len(r) }
func (r ReplicationSlice) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r ReplicationSlice) Less(i, j int) bool { return replicationsCompareFunc(r[i], r[j]) }
var replicationSortKey2Func = map[string]ReplicationsCompareFunc{
"ID": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.Id > b.Id },
"NAME": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.Name > b.Name },
"REPLICATIONSTATUS": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool {
return a.ReplicationStatus > b.ReplicationStatus
},
"AVAILABILITYZONE": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool {
return a.AvailabilityZone > b.AvailabilityZone
},
"PROFILEID": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.ProfileId > b.ProfileId },
"TENANTID": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.TenantId > b.TenantId },
"POOLID": func(a *model.ReplicationSpec, b *model.ReplicationSpec) bool { return a.PoolId > b.PoolId },
}
func (c *Client) SortReplications(replications []*model.ReplicationSpec, p *Parameter) []*model.ReplicationSpec {
replicationsCompareFunc = replicationSortKey2Func[p.sortKey]
if strings.EqualFold(p.sortDir, "asc") {
sort.Sort(ReplicationSlice(replications))
} else {
sort.Sort(sort.Reverse(ReplicationSlice(replications)))
}
return replications
}
func (c *Client) ListReplicationWithFilter(ctx *c.Context, m map[string][]string) ([]*model.ReplicationSpec, error) {
replicas, err := c.ListReplication(ctx)
if err != nil {
log.Error("List replications failed: ", err)
return nil, err
}
rlist := c.SelectReplication(m, replicas)
var sortKeys []string
for k := range replicationSortKey2Func {
sortKeys = append(sortKeys, k)
}
p := c.ParameterFilter(m, len(rlist), sortKeys)
return c.SortReplications(rlist, p)[p.beginIdx:p.endIdx], nil
}
func (c *Client) DeleteReplication(ctx *c.Context, replicationId string) error {
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
r, err := c.GetReplication(ctx, replicationId)
if err != nil {
return err
}
tenantId = r.TenantId
}
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, tenantId, replicationId),
}
reps := c.Delete(req)
if reps.Status != "Success" {
log.Error("When delete replication in db:", reps.Error)
return errors.New(reps.Error)
}
return nil
}
func (c *Client) UpdateReplication(ctx *c.Context, replicationId string, input *model.ReplicationSpec) (*model.ReplicationSpec, error) {
r, err := c.GetReplication(ctx, replicationId)
if err != nil {
return nil, err
}
if input.ProfileId != "" {
r.ProfileId = input.ProfileId
}
if input.Name != "" {
r.Name = input.Name
}
if input.Description != "" {
r.Description = input.Description
}
if input.PrimaryReplicationDriverData != nil {
r.PrimaryReplicationDriverData = input.PrimaryReplicationDriverData
}
if input.SecondaryReplicationDriverData != nil {
r.SecondaryReplicationDriverData = input.SecondaryReplicationDriverData
}
if input.Metadata != nil {
r.Metadata = utils.MergeStringMaps(r.Metadata, input.Metadata)
}
if input.ReplicationStatus != "" {
r.ReplicationStatus = input.ReplicationStatus
}
r.UpdatedAt = time.Now().Format(constants.TimeFormat)
b, err := json.Marshal(r)
if err != nil {
return nil, err
}
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
tenantId = r.TenantId
}
req := &Request{
Url: urls.GenerateReplicationURL(urls.Etcd, tenantId, replicationId),
NewContent: string(b),
}
resp := c.Update(req)
if resp.Status != "Success" {
log.Error("When update replication in db:", resp.Error)
return nil, errors.New(resp.Error)
}
return r, nil
}
func (c *Client) CreateVolumeGroup(ctx *c.Context, vg *model.VolumeGroupSpec) (*model.VolumeGroupSpec, error) {
vg.TenantId = ctx.TenantId
vgBody, err := json.Marshal(vg)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, ctx.TenantId, vg.Id),
Content: string(vgBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vg, nil
}
func (c *Client) GetVolumeGroup(ctx *c.Context, vgId string) (*model.VolumeGroupSpec, error) {
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, ctx.TenantId, vgId),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var vg = &model.VolumeGroupSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), vg); err != nil {
log.Error("When parsing volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vg, nil
}
func (c *Client) UpdateVolumeGroup(ctx *c.Context, vgUpdate *model.VolumeGroupSpec) (*model.VolumeGroupSpec, error) {
vg, err := c.GetVolumeGroup(ctx, vgUpdate.Id)
if err != nil {
return nil, err
}
if vgUpdate.Name != "" && vgUpdate.Name != vg.Name {
vg.Name = vgUpdate.Name
}
if vgUpdate.AvailabilityZone != "" && vgUpdate.AvailabilityZone != vg.AvailabilityZone {
vg.AvailabilityZone = vgUpdate.AvailabilityZone
}
if vgUpdate.Description != "" && vgUpdate.Description != vg.Description {
vg.Description = vgUpdate.Description
}
if vgUpdate.PoolId != "" && vgUpdate.PoolId != vg.PoolId {
vg.PoolId = vgUpdate.PoolId
}
if vg.Status != "" && vgUpdate.Status != vg.Status {
vg.Status = vgUpdate.Status
}
if vgUpdate.PoolId != "" && vgUpdate.PoolId != vg.PoolId {
vg.PoolId = vgUpdate.PoolId
}
if vgUpdate.CreatedAt != "" && vgUpdate.CreatedAt != vg.CreatedAt {
vg.CreatedAt = vgUpdate.CreatedAt
}
if vgUpdate.UpdatedAt != "" && vgUpdate.UpdatedAt != vg.UpdatedAt {
vg.UpdatedAt = vgUpdate.UpdatedAt
}
vgBody, err := json.Marshal(vg)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, ctx.TenantId, vgUpdate.Id),
NewContent: string(vgBody),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return vg, nil
}
func (c *Client) UpdateStatus(ctx *c.Context, in interface{}, status string) error {
switch in.(type) {
case *model.VolumeSnapshotSpec:
snap := in.(*model.VolumeSnapshotSpec)
snap.Status = status
if _, errUpdate := c.UpdateVolumeSnapshot(ctx, snap.Id, snap); errUpdate != nil {
log.Error("Error occurs when update volume snapshot status in db:", errUpdate.Error())
return errUpdate
}
case *model.VolumeAttachmentSpec:
attm := in.(*model.VolumeAttachmentSpec)
attm.Status = status
if _, errUpdate := c.UpdateVolumeAttachment(ctx, attm.Id, attm); errUpdate != nil {
log.Error("Error occurred in dock module when update volume attachment status in db:", errUpdate)
return errUpdate
}
case *model.VolumeSpec:
volume := in.(*model.VolumeSpec)
volume.Status = status
if _, errUpdate := c.UpdateVolume(ctx, volume); errUpdate != nil {
log.Error("When update volume status in db:", errUpdate.Error())
return errUpdate
}
case *model.FileShareSpec:
fileshare := in.(*model.FileShareSpec)
fileshare.Status = status
if _, errUpdate := c.UpdateFileShare(ctx, fileshare); errUpdate != nil {
log.Error("when update fileshare status in db:", errUpdate.Error())
return errUpdate
}
case *model.FileShareSnapshotSpec:
fsnap := in.(*model.FileShareSnapshotSpec)
fsnap.Status = status
if _, errUpdate := c.UpdateFileShareSnapshot(ctx, fsnap.Id, fsnap); errUpdate != nil {
log.Error("when update fileshare status in db:", errUpdate.Error())
return errUpdate
}
case *model.VolumeGroupSpec:
vg := in.(*model.VolumeGroupSpec)
vg.Status = status
if _, errUpdate := c.UpdateVolumeGroup(ctx, vg); errUpdate != nil {
log.Error("When update volume status in db:", errUpdate.Error())
return errUpdate
}
case []*model.VolumeSpec:
vols := in.([]*model.VolumeSpec)
if _, errUpdate := c.VolumesToUpdate(ctx, vols); errUpdate != nil {
return errUpdate
}
case *model.ReplicationSpec:
replica := in.(*model.ReplicationSpec)
replica.ReplicationStatus = status
if _, errUpdate := c.UpdateReplication(ctx, replica.Id, replica); errUpdate != nil {
return errUpdate
}
}
return nil
}
func (c *Client) ListVolumesByGroupId(ctx *c.Context, vgId string) ([]*model.VolumeSpec, error) {
volumes, err := c.ListVolumes(ctx)
if err != nil {
return nil, err
}
var volumesInSameGroup []*model.VolumeSpec
for _, v := range volumes {
if v.GroupId == vgId {
volumesInSameGroup = append(volumesInSameGroup, v)
}
}
return volumesInSameGroup, nil
}
func (c *Client) VolumesToUpdate(ctx *c.Context, volumeList []*model.VolumeSpec) ([]*model.VolumeSpec, error) {
var volumeRefs []*model.VolumeSpec
for _, values := range volumeList {
v, err := c.UpdateVolume(ctx, values)
if err != nil {
return nil, err
}
volumeRefs = append(volumeRefs, v)
}
return volumeRefs, nil
}
// ListVolumes
func (c *Client) ListVolumeGroups(ctx *c.Context) ([]*model.VolumeGroupSpec, error) {
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateVolumeGroupURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list volume groups in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var groups []*model.VolumeGroupSpec
if len(dbRes.Message) == 0 {
return groups, nil
}
for _, msg := range dbRes.Message {
var group = &model.VolumeGroupSpec{}
if err := json.Unmarshal([]byte(msg), group); err != nil {
log.Error("When parsing volume group in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
groups = append(groups, group)
}
return groups, nil
}
func (c *Client) DeleteVolumeGroup(ctx *c.Context, volumeGroupId string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
group, err := c.GetVolumeGroup(ctx, volumeGroupId)
if err != nil {
log.Error(err)
return err
}
tenantId = group.TenantId
}
dbReq := &Request{
Url: urls.GenerateVolumeGroupURL(urls.Etcd, tenantId, volumeGroupId),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete volume group in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
func (c *Client) ListSnapshotsByVolumeId(ctx *c.Context, volumeId string) ([]*model.VolumeSnapshotSpec, error) {
snaps, err := c.ListVolumeSnapshots(ctx)
if err != nil {
return nil, err
}
var snapList []*model.VolumeSnapshotSpec
for _, snap := range snaps {
if snap.VolumeId == volumeId {
snapList = append(snapList, snap)
}
}
return snapList, nil
}
func (c *Client) ListAttachmentsByVolumeId(ctx *c.Context, volumeId string) ([]*model.VolumeAttachmentSpec, error) {
return c.ListVolumeAttachments(ctx, volumeId)
}
func (c *Client) ListVolumeGroupsWithFilter(ctx *c.Context, m map[string][]string) ([]*model.VolumeGroupSpec, error) {
vgs, err := c.ListVolumeGroups(ctx)
if err != nil {
log.Error("List volume groups failed: ", err)
return nil, err
}
rlist := c.SelectVolumeGroup(m, vgs)
var sortKeys []string
for k := range volumeGroupSortKey2Func {
sortKeys = append(sortKeys, k)
}
p := c.ParameterFilter(m, len(rlist), sortKeys)
return c.SortVolumeGroups(rlist, p)[p.beginIdx:p.endIdx], nil
}
type VolumeGroupCompareFunc func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool
var volumeGroupCompareFunc VolumeGroupCompareFunc
type VolumeGroupSlice []*model.VolumeGroupSpec
func (v VolumeGroupSlice) Len() int { return len(v) }
func (v VolumeGroupSlice) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (v VolumeGroupSlice) Less(i, j int) bool { return volumeGroupCompareFunc(v[i], v[j]) }
var volumeGroupSortKey2Func = map[string]VolumeGroupCompareFunc{
"ID": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool { return a.Id > b.Id },
"NAME": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool { return a.Name > b.Name },
"STATUS": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool {
return a.Status > b.Status
},
"AVAILABILITYZONE": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool {
return a.AvailabilityZone > b.AvailabilityZone
},
"TENANTID": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool { return a.TenantId > b.TenantId },
"POOLID": func(a *model.VolumeGroupSpec, b *model.VolumeGroupSpec) bool { return a.PoolId > b.PoolId },
}
func (c *Client) SortVolumeGroups(vgs []*model.VolumeGroupSpec, p *Parameter) []*model.VolumeGroupSpec {
volumeGroupCompareFunc = volumeGroupSortKey2Func[p.sortKey]
if strings.EqualFold(p.sortDir, "asc") {
sort.Sort(VolumeGroupSlice(vgs))
} else {
sort.Sort(sort.Reverse(VolumeGroupSlice(vgs)))
}
return vgs
}
func (c *Client) SelectVolumeGroup(param map[string][]string, vgs []*model.VolumeGroupSpec) []*model.VolumeGroupSpec {
if !c.SelectOrNot(param) {
return vgs
}
filterList := map[string]interface{}{
"Id": nil,
"CreatedAt": nil,
"UpdatedAt": nil,
"Name": nil,
"Status": nil,
"TenantId": nil,
"UserId": nil,
"Description": nil,
"AvailabilityZone": nil,
"PoolId": nil,
}
var vglist = []*model.VolumeGroupSpec{}
for _, vg := range vgs {
if c.filterByName(param, vg, filterList) {
vglist = append(vglist, vg)
}
}
return vglist
}
func (c *Client) ListHosts(ctx *c.Context, m map[string][]string) ([]*model.HostSpec, error) {
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, ctx.TenantId),
}
if IsAdminContext(ctx) {
dbReq.Url = urls.GenerateHostURL(urls.Etcd, "")
}
dbRes := c.List(dbReq)
if dbRes.Status != "Success" {
log.Error("When list hosts in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var hosts = []*model.HostSpec{}
if len(dbRes.Message) == 0 {
return hosts, nil
}
for _, msg := range dbRes.Message {
var host = &model.HostSpec{}
if err := json.Unmarshal([]byte(msg), host); err != nil {
log.Error("When parsing host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
hosts = append(hosts, host)
}
tmpHosts := utils.Filter(hosts, m)
if len(m["sortKey"]) > 0 && utils.Contains([]string{"hostName", "createdAt"}, m["sortKey"][0]) {
tmpHosts = utils.Sort(tmpHosts, m["sortKey"][0], c.GetSortDir(m))
}
tmpHosts = utils.Slice(tmpHosts, c.GetOffset(m, len(hosts)), c.GetLimit(m))
var res = []*model.HostSpec{}
for _, data := range tmpHosts.([]interface{}) {
res = append(res, data.(*model.HostSpec))
}
return res, nil
}
func (c *Client) ListHostsByName(ctx *c.Context, hostName string) ([]*model.HostSpec, error) {
hosts, err := c.ListHosts(ctx, map[string][]string{"hostName": []string{hostName}})
if err != nil {
log.Error("List hosts failed: ", err)
return nil, err
}
var res []*model.HostSpec
for _, host := range hosts {
if hostName == host.HostName {
res = append(res, host)
}
}
return res, nil
}
func (c *Client) CreateHost(ctx *c.Context, host *model.HostSpec) (*model.HostSpec, error) {
host.TenantId = ctx.TenantId
if host.Id == "" {
host.Id = uuid.NewV4().String()
}
host.CreatedAt = time.Now().Format(constants.TimeFormat)
hostBody, err := json.Marshal(host)
if err != nil {
return nil, err
}
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, ctx.TenantId, host.Id),
Content: string(hostBody),
}
dbRes := c.Create(dbReq)
if dbRes.Status != "Success" {
log.Error("When create host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return host, nil
}
func (c *Client) UpdateHost(ctx *c.Context, host *model.HostSpec) (*model.HostSpec, error) {
result, err := c.GetHost(ctx, host.Id)
if err != nil {
return nil, err
}
if host.HostName != "" {
result.HostName = host.HostName
}
if host.OsType != "" {
result.OsType = host.OsType
}
if host.IP != "" {
result.IP = host.IP
}
if host.Port > 0 {
result.Port = host.Port
}
if host.AccessMode != "" {
result.AccessMode = host.AccessMode
}
if host.Username != "" {
result.Username = host.Username
}
if host.Password != "" {
result.Password = host.Password
}
if len(host.AvailabilityZones) > 0 {
result.AvailabilityZones = host.AvailabilityZones
}
if len(host.Initiators) > 0 {
result.Initiators = host.Initiators
}
// Set update time
result.UpdatedAt = time.Now().Format(constants.TimeFormat)
body, err := json.Marshal(result)
if err != nil {
return nil, err
}
// If an admin want to access other tenant's resource just fake other's tenantId.
if !IsAdminContext(ctx) && !AuthorizeProjectContext(ctx, result.TenantId) {
return nil, fmt.Errorf("opertaion is not permitted")
}
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, result.TenantId, result.Id),
NewContent: string(body),
}
dbRes := c.Update(dbReq)
if dbRes.Status != "Success" {
log.Error("When update host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return result, nil
}
func (c *Client) GetHost(ctx *c.Context, hostId string) (*model.HostSpec, error) {
host, err := c.getHost(ctx, hostId)
if !IsAdminContext(ctx) || err == nil {
return host, err
}
hosts, err := c.ListHosts(ctx, map[string][]string{"id": []string{hostId}})
if err != nil {
return nil, err
}
for _, v := range hosts {
if v.Id == hostId {
return v, nil
}
}
return nil, fmt.Errorf("specified host(%s) can't find", hostId)
}
func (c *Client) getHost(ctx *c.Context, hostId string) (*model.HostSpec, error) {
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, ctx.TenantId, hostId),
}
dbRes := c.Get(dbReq)
if dbRes.Status != "Success" {
log.Error("When get host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
var host = &model.HostSpec{}
if err := json.Unmarshal([]byte(dbRes.Message[0]), host); err != nil {
log.Error("When parsing host in db:", dbRes.Error)
return nil, errors.New(dbRes.Error)
}
return host, nil
}
func (c *Client) DeleteHost(ctx *c.Context, hostId string) error {
// If an admin want to access other tenant's resource just fake other's tenantId.
tenantId := ctx.TenantId
if IsAdminContext(ctx) {
host, err := c.GetHost(ctx, hostId)
if err != nil {
log.Error(err)
return err
}
tenantId = host.TenantId
}
dbReq := &Request{
Url: urls.GenerateHostURL(urls.Etcd, tenantId, hostId),
}
dbRes := c.Delete(dbReq)
if dbRes.Status != "Success" {
log.Error("When delete host in db:", dbRes.Error)
return errors.New(dbRes.Error)
}
return nil
}
|
package k8sutil
import (
"fmt"
"testing"
"github.com/upmc-enterprises/elasticsearch-operator/pkg/apis/elasticsearchoperator/v1"
)
func TestGetESURL(t *testing.T) {
for _, v := range []struct {
host string
expected string
useSSL bool
}{
{"es-ssl", "https://es-ssl:9200", true},
{"es-bla", "http://es-bla:9200", false},
} {
esURL := GetESURL(v.host, &v.useSSL)
if esURL != v.expected {
t.Errorf(fmt.Sprintf("Expected %s, got %s", v.expected, esURL))
}
}
}
func TestSSLCertConfig(t *testing.T) {
memoryCPU := v1.MemoryCPU{
Memory: "128Mi",
CPU: "100m",
}
resources := v1.Resources{
Requests: memoryCPU,
Limits: memoryCPU,
}
clusterName := "test"
useSSL := false
statefulSet := buildStatefulSet("test", clusterName, "master", "foo/image", "test", "1G", "",
"", "", nil, &useSSL, resources, nil)
for _, volume := range statefulSet.Spec.Template.Spec.Volumes {
if volume.Name == fmt.Sprintf("%s-%s", secretName, clusterName) {
t.Errorf("Found volume for certificates, was not expecting it since useSSL is false")
}
}
useSSL = true
statefulSet = buildStatefulSet("test", clusterName, "master", "foo/image", "test", "1G", "",
"", "", nil, &useSSL, resources, nil)
found := false
for _, volume := range statefulSet.Spec.Template.Spec.Volumes {
if volume.Name == fmt.Sprintf("%s-%s", secretName, clusterName) {
found = true
break
}
}
if !found {
t.Errorf("Volume for certificates not found, was expecting it since useSSL is true")
}
}
fix tests
package k8sutil
import (
"fmt"
"testing"
"github.com/upmc-enterprises/elasticsearch-operator/pkg/apis/elasticsearchoperator/v1"
)
func TestGetESURL(t *testing.T) {
for _, v := range []struct {
host string
expected string
useSSL bool
}{
{"es-ssl", "https://es-ssl:9200", true},
{"es-bla", "http://es-bla:9200", false},
} {
esURL := GetESURL(v.host, &v.useSSL)
if esURL != v.expected {
t.Errorf(fmt.Sprintf("Expected %s, got %s", v.expected, esURL))
}
}
}
func TestSSLCertConfig(t *testing.T) {
memoryCPU := v1.MemoryCPU{
Memory: "128Mi",
CPU: "100m",
}
resources := v1.Resources{
Requests: memoryCPU,
Limits: memoryCPU,
}
clusterName := "test"
useSSL := false
statefulSet := buildStatefulSet("test", clusterName, "master", "foo/image", "test", "1G", "",
"", "", "", nil, &useSSL, resources, nil)
for _, volume := range statefulSet.Spec.Template.Spec.Volumes {
if volume.Name == fmt.Sprintf("%s-%s", secretName, clusterName) {
t.Errorf("Found volume for certificates, was not expecting it since useSSL is false")
}
}
useSSL = true
statefulSet = buildStatefulSet("test", clusterName, "master", "foo/image", "test", "1G", "",
"", "", "", nil, &useSSL, resources, nil)
found := false
for _, volume := range statefulSet.Spec.Template.Spec.Volumes {
if volume.Name == fmt.Sprintf("%s-%s", secretName, clusterName) {
found = true
break
}
}
if !found {
t.Errorf("Volume for certificates not found, was expecting it since useSSL is true")
}
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-helpers/storage/ephemeral"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/resource"
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand"
"k8s.io/kubernetes/pkg/kubelet/envvars"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
volumevalidation "k8s.io/kubernetes/pkg/volume/validation"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
utilnet "k8s.io/utils/net"
)
const (
managedHostsHeader = "# Kubernetes-managed hosts file.\n"
managedHostsHeaderWithHostNetwork = "# Kubernetes-managed hosts file (host network).\n"
)
// Container state reason list
const (
PodInitializing = "PodInitializing"
ContainerCreating = "ContainerCreating"
)
// Get a list of pods that have data directories.
func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
podInfos, err := ioutil.ReadDir(kl.getPodsDir())
if err != nil {
return nil, err
}
pods := []types.UID{}
for i := range podInfos {
if podInfos[i].IsDir() {
pods = append(pods, types.UID(podInfos[i].Name()))
}
}
return pods, nil
}
// GetActivePods returns pods that have been admitted to the kubelet that
// are not fully terminated. This is mapped to the "desired state" of the
// kubelet - what pods should be running.
//
// WARNING: Currently this list does not include pods that have been force
// deleted but may still be terminating, which means resources assigned to
// those pods during admission may still be in use. See
// https://github.com/kubernetes/kubernetes/issues/104824
func (kl *Kubelet) GetActivePods() []*v1.Pod {
allPods := kl.podManager.GetPods()
activePods := kl.filterOutInactivePods(allPods)
return activePods
}
// makeBlockVolumes maps the raw block devices specified in the path of the container
// Experimental
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
var devices []kubecontainer.DeviceInfo
for _, device := range container.VolumeDevices {
// check path is absolute
if !filepath.IsAbs(device.DevicePath) {
return nil, fmt.Errorf("error DevicePath `%s` must be an absolute path", device.DevicePath)
}
vol, ok := podVolumes[device.Name]
if !ok || vol.BlockVolumeMapper == nil {
klog.ErrorS(nil, "Block volume cannot be satisfied for container, because the volume is missing or the volume mapper is nil", "containerName", container.Name, "device", device)
return nil, fmt.Errorf("cannot find volume %q to pass into container %q", device.Name, container.Name)
}
// Get a symbolic link associated to a block device under pod device path
dirPath, volName := vol.BlockVolumeMapper.GetPodDeviceMapPath()
symlinkPath := path.Join(dirPath, volName)
if islinkExist, checkErr := blkutil.IsSymlinkExist(symlinkPath); checkErr != nil {
return nil, checkErr
} else if islinkExist {
// Check readOnly in PVCVolumeSource and set read only permission if it's true.
permission := "mrw"
if vol.ReadOnly {
permission = "r"
}
klog.V(4).InfoS("Device will be attached to container in the corresponding path on host", "containerName", container.Name, "path", symlinkPath)
devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: symlinkPath, PathInContainer: device.DevicePath, Permissions: permission})
}
}
return devices, nil
}
// shouldMountHostsFile checks if the nodes /etc/hosts should be mounted
// Kubernetes only mounts on /etc/hosts if:
// - container is not an infrastructure (pause) container
// - container is not already mounting on /etc/hosts
// - if it is Windows and ContainerD is used.
// Kubernetes will not mount /etc/hosts if:
// - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set.
// - Windows pod contains a hostProcess container
func shouldMountHostsFile(pod *v1.Pod, podIPs []string, supportsSingleFileMapping bool) bool {
shouldMount := len(podIPs) > 0 && supportsSingleFileMapping
if runtime.GOOS == "windows" && utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostProcessContainers) {
return shouldMount && !kubecontainer.HasWindowsHostProcessContainer(pod)
}
return shouldMount
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar, supportsSingleFileMapping bool) ([]kubecontainer.Mount, func(), error) {
mountEtcHostsFile := shouldMountHostsFile(pod, podIPs, supportsSingleFileMapping)
klog.V(3).InfoS("Creating hosts mount for container", "pod", klog.KObj(pod), "containerName", container.Name, "podIPs", podIPs, "path", mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
var cleanupAction func()
for i, mount := range container.VolumeMounts {
// do not mount /etc/hosts if container is already mounting on the path
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
vol, ok := podVolumes[mount.Name]
if !ok || vol.Mounter == nil {
klog.ErrorS(nil, "Mount cannot be satisfied for the container, because the volume is missing or the volume mounter (vol.Mounter) is nil",
"containerName", container.Name, "ok", ok, "volumeMounter", mount)
return nil, cleanupAction, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
}
relabelVolume := false
// If the volume supports SELinux and it has not been
// relabeled already and it is not a read-only volume,
// relabel it and mark it as labeled
if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled {
vol.SELinuxLabeled = true
relabelVolume = true
}
hostPath, err := volumeutil.GetPath(vol.Mounter)
if err != nil {
return nil, cleanupAction, err
}
subPath := mount.SubPath
if mount.SubPathExpr != "" {
subPath, err = kubecontainer.ExpandContainerVolumeMounts(mount, expandEnvs)
if err != nil {
return nil, cleanupAction, err
}
}
if subPath != "" {
if filepath.IsAbs(subPath) {
return nil, cleanupAction, fmt.Errorf("error SubPath `%s` must not be an absolute path", subPath)
}
err = volumevalidation.ValidatePathNoBacksteps(subPath)
if err != nil {
return nil, cleanupAction, fmt.Errorf("unable to provision SubPath `%s`: %v", subPath, err)
}
volumePath := hostPath
hostPath = filepath.Join(volumePath, subPath)
if subPathExists, err := hu.PathExists(hostPath); err != nil {
klog.ErrorS(nil, "Could not determine if subPath exists, will not attempt to change its permissions", "path", hostPath)
} else if !subPathExists {
// Create the sub path now because if it's auto-created later when referenced, it may have an
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
// when the pod specifies an fsGroup, and if the directory is not created here, Docker will
// later auto-create it with the incorrect mode 0750
// Make extra care not to escape the volume!
perm, err := hu.GetMode(volumePath)
if err != nil {
return nil, cleanupAction, err
}
if err := subpather.SafeMakeDir(subPath, volumePath, perm); err != nil {
// Don't pass detailed error back to the user because it could give information about host filesystem
klog.ErrorS(err, "Failed to create subPath directory for volumeMount of the container", "containerName", container.Name, "volumeMountName", mount.Name)
return nil, cleanupAction, fmt.Errorf("failed to create subPath directory for volumeMount %q of container %q", mount.Name, container.Name)
}
}
hostPath, cleanupAction, err = subpather.PrepareSafeSubpath(subpath.Subpath{
VolumeMountIndex: i,
Path: hostPath,
VolumeName: vol.InnerVolumeSpecName,
VolumePath: volumePath,
PodDir: podDir,
ContainerName: container.Name,
})
if err != nil {
// Don't pass detailed error back to the user because it could give information about host filesystem
klog.ErrorS(err, "Failed to prepare subPath for volumeMount of the container", "containerName", container.Name, "volumeMountName", mount.Name)
return nil, cleanupAction, fmt.Errorf("failed to prepare subPath for volumeMount %q of container %q", mount.Name, container.Name)
}
}
// Docker Volume Mounts fail on Windows if it is not of the form C:/
if volumeutil.IsWindowsLocalPath(runtime.GOOS, hostPath) {
hostPath = volumeutil.MakeAbsolutePath(runtime.GOOS, hostPath)
}
containerPath := mount.MountPath
// IsAbs returns false for UNC path/SMB shares/named pipes in Windows. So check for those specifically and skip MakeAbsolutePath
if !volumeutil.IsWindowsUNCPath(runtime.GOOS, containerPath) && !filepath.IsAbs(containerPath) {
containerPath = volumeutil.MakeAbsolutePath(runtime.GOOS, containerPath)
}
propagation, err := translateMountPropagation(mount.MountPropagation)
if err != nil {
return nil, cleanupAction, err
}
klog.V(5).InfoS("Mount has propagation", "pod", klog.KObj(pod), "containerName", container.Name, "volumeMountName", mount.Name, "propagation", propagation)
mustMountRO := vol.Mounter.GetAttributes().ReadOnly
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
ReadOnly: mount.ReadOnly || mustMountRO,
SELinuxRelabel: relabelVolume,
Propagation: propagation,
})
}
if mountEtcHostsFile {
hostAliases := pod.Spec.HostAliases
hostsMount, err := makeHostsMount(podDir, podIPs, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork)
if err != nil {
return nil, cleanupAction, err
}
mounts = append(mounts, *hostsMount)
}
return mounts, cleanupAction, nil
}
// translateMountPropagation transforms v1.MountPropagationMode to
// runtimeapi.MountPropagation.
func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.MountPropagation, error) {
if runtime.GOOS == "windows" {
// Windows containers doesn't support mount propagation, use private for it.
// Refer https://docs.docker.com/storage/bind-mounts/#configure-bind-propagation.
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
}
switch {
case mountMode == nil:
// PRIVATE is the default
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
case *mountMode == v1.MountPropagationHostToContainer:
return runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil
case *mountMode == v1.MountPropagationBidirectional:
return runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, nil
case *mountMode == v1.MountPropagationNone:
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
default:
return 0, fmt.Errorf("invalid MountPropagation mode: %q", *mountMode)
}
}
// getEtcHostsPath returns the full host-side path to a pod's generated /etc/hosts file
func getEtcHostsPath(podDir string) string {
hostsFilePath := path.Join(podDir, "etc-hosts")
// Volume Mounts fail on Windows if it is not of the form C:/
return volumeutil.MakeAbsolutePath(runtime.GOOS, hostsFilePath)
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with. podIPs is provided instead of podIP as podIPs
// are present even if dual-stack feature flag is not enabled.
func makeHostsMount(podDir string, podIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) {
hostsFilePath := getEtcHostsPath(podDir)
if err := ensureHostsFile(hostsFilePath, podIPs, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil {
return nil, err
}
return &kubecontainer.Mount{
Name: "k8s-managed-etc-hosts",
ContainerPath: etcHostsPath,
HostPath: hostsFilePath,
ReadOnly: false,
SELinuxRelabel: true,
}, nil
}
// ensureHostsFile ensures that the given host file has an up-to-date ip, host
// name, and domain name.
func ensureHostsFile(fileName string, hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error {
var hostsFileContent []byte
var err error
if useHostNetwork {
// if Pod is using host network, read hosts file from the node's filesystem.
// `etcHostsPath` references the location of the hosts file on the node.
// `/etc/hosts` for *nix systems.
hostsFileContent, err = nodeHostsFileContent(etcHostsPath, hostAliases)
if err != nil {
return err
}
} else {
// if Pod is not using host network, create a managed hosts file with Pod IP and other information.
hostsFileContent = managedHostsFileContent(hostIPs, hostName, hostDomainName, hostAliases)
}
return ioutil.WriteFile(fileName, hostsFileContent, 0644)
}
// nodeHostsFileContent reads the content of node's hosts file.
func nodeHostsFileContent(hostsFilePath string, hostAliases []v1.HostAlias) ([]byte, error) {
hostsFileContent, err := ioutil.ReadFile(hostsFilePath)
if err != nil {
return nil, err
}
var buffer bytes.Buffer
buffer.WriteString(managedHostsHeaderWithHostNetwork)
buffer.Write(hostsFileContent)
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
return buffer.Bytes(), nil
}
// managedHostsFileContent generates the content of the managed etc hosts based on Pod IPs and other
// information.
func managedHostsFileContent(hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte {
var buffer bytes.Buffer
buffer.WriteString(managedHostsHeader)
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost
buffer.WriteString("fe00::0\tip6-localnet\n")
buffer.WriteString("fe00::0\tip6-mcastprefix\n")
buffer.WriteString("fe00::1\tip6-allnodes\n")
buffer.WriteString("fe00::2\tip6-allrouters\n")
if len(hostDomainName) > 0 {
// host entry generated for all IPs in podIPs
// podIPs field is populated for clusters even
// dual-stack feature flag is not enabled.
for _, hostIP := range hostIPs {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
}
} else {
for _, hostIP := range hostIPs {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
}
}
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
return buffer.Bytes()
}
func hostsEntriesFromHostAliases(hostAliases []v1.HostAlias) []byte {
if len(hostAliases) == 0 {
return []byte{}
}
var buffer bytes.Buffer
buffer.WriteString("\n")
buffer.WriteString("# Entries added by HostAliases.\n")
// for each IP, write all aliases onto single line in hosts file
for _, hostAlias := range hostAliases {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostAlias.IP, strings.Join(hostAlias.Hostnames, "\t")))
}
return buffer.Bytes()
}
// truncatePodHostnameIfNeeded truncates the pod hostname if it's longer than 63 chars.
func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
const hostnameMaxLen = 63
if len(hostname) <= hostnameMaxLen {
return hostname, nil
}
truncated := hostname[:hostnameMaxLen]
klog.ErrorS(nil, "Hostname for pod was too long, truncated it", "podName", podName, "hostnameMaxLen", hostnameMaxLen, "truncatedHostname", truncated)
// hostname should not end with '-' or '.'
truncated = strings.TrimRight(truncated, "-.")
if len(truncated) == 0 {
// This should never happen.
return "", fmt.Errorf("hostname for pod %q was invalid: %q", podName, hostname)
}
return truncated, nil
}
// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod,
// given that pod's spec and annotations or returns an error.
func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
clusterDomain := kl.dnsConfigurer.ClusterDomain
hostname := pod.Name
if len(pod.Spec.Hostname) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 {
return "", "", fmt.Errorf("pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";"))
}
hostname = pod.Spec.Hostname
}
hostname, err := truncatePodHostnameIfNeeded(pod.Name, hostname)
if err != nil {
return "", "", err
}
hostDomain := ""
if len(pod.Spec.Subdomain) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 {
return "", "", fmt.Errorf("pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";"))
}
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
}
return hostname, hostDomain, nil
}
// GetPodCgroupParent gets pod cgroup parent from container manager.
func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
pcm := kl.containerManager.NewPodContainerManager()
_, cgroupParent := pcm.GetPodContainerName(pod)
return cgroupParent
}
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
opts, err := kl.containerManager.GetResources(pod, container)
if err != nil {
return nil, nil, err
}
// The value of hostname is the short host name and it is sent to makeMounts to create /etc/hosts file.
hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, nil, err
}
// nodename will be equals to hostname if SetHostnameAsFQDN is nil or false. If SetHostnameFQDN
// is true and hostDomainName is defined, nodename will be the FQDN (hostname.hostDomainName)
nodename, err := util.GetNodenameForKernel(hostname, hostDomainName, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, nil, err
}
opts.Hostname = nodename
podName := volumeutil.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
if err != nil {
return nil, nil, err
}
opts.Devices = append(opts.Devices, blkVolumes...)
envs, err := kl.makeEnvironmentVariables(pod, container, podIP, podIPs)
if err != nil {
return nil, nil, err
}
opts.Envs = append(opts.Envs, envs...)
// we can only mount individual files (e.g.: /etc/hosts, termination-log files) on Windows only if we're using Containerd.
supportsSingleFileMapping := kl.containerRuntime.SupportsSingleFileMapping()
// only podIPs is sent to makeMounts, as podIPs is populated even if dual-stack feature flag is not enabled.
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs, supportsSingleFileMapping)
if err != nil {
return nil, cleanupAction, err
}
opts.Mounts = append(opts.Mounts, mounts...)
// adding TerminationMessagePath on Windows is only allowed if ContainerD is used. Individual files cannot
// be mounted as volumes using Docker for Windows.
if len(container.TerminationMessagePath) != 0 && supportsSingleFileMapping {
p := kl.getPodContainerDir(pod.UID, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
klog.ErrorS(err, "Error on creating dir", "path", p)
} else {
opts.PodContainerDir = p
}
}
// only do this check if the experimental behavior is enabled, otherwise allow it to default to false
if kl.experimentalHostUserNamespaceDefaulting {
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod)
}
return opts, cleanupAction, nil
}
var masterServices = sets.NewString("kubernetes")
// getServiceEnvVarMap makes a map[string]string of env vars for services a
// pod in namespace ns should see.
func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[string]string, error) {
var (
serviceMap = make(map[string]*v1.Service)
m = make(map[string]string)
)
// Get all service resources from the master (via a cache),
// and populate them into service environment variables.
if kl.serviceLister == nil {
// Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars.
return m, nil
}
services, err := kl.serviceLister.List(labels.Everything())
if err != nil {
return m, fmt.Errorf("failed to list services when setting up env vars")
}
// project the services in namespace ns onto the master services
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
if !v1helper.IsServiceIPSet(service) {
continue
}
serviceName := service.Name
// We always want to add environment variabled for master services
// from the master service namespace, even if enableServiceLinks is false.
// We also add environment variables for other services in the same
// namespace, if enableServiceLinks is true.
if service.Namespace == kl.masterServiceNamespace && masterServices.Has(serviceName) {
if _, exists := serviceMap[serviceName]; !exists {
serviceMap[serviceName] = service
}
} else if service.Namespace == ns && enableServiceLinks {
serviceMap[serviceName] = service
}
}
mappedServices := []*v1.Service{}
for key := range serviceMap {
mappedServices = append(mappedServices, serviceMap[key])
}
for _, e := range envvars.FromServices(mappedServices) {
m[e.Name] = e.Value
}
return m, nil
}
// Make the environment variables for a pod in the given namespace.
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) ([]kubecontainer.EnvVar, error) {
if pod.Spec.EnableServiceLinks == nil {
return nil, fmt.Errorf("nil pod.spec.enableServiceLinks encountered, cannot construct envvars")
}
// If the pod originates from the kube-api, when we know that the kube-apiserver is responding and the kubelet's credentials are valid.
// Knowing this, it is reasonable to wait until the service lister has synchronized at least once before attempting to build
// a service env var map. This doesn't present the race below from happening entirely, but it does prevent the "obvious"
// failure case of services simply not having completed a list operation that can reasonably be expected to succeed.
// One common case this prevents is a kubelet restart reading pods before services and some pod not having the
// KUBERNETES_SERVICE_HOST injected because we didn't wait a short time for services to sync before proceeding.
// The KUBERNETES_SERVICE_HOST link is special because it is unconditionally injected into pods and is read by the
// in-cluster-config for pod clients
if !kubetypes.IsStaticPod(pod) && !kl.serviceHasSynced() {
return nil, fmt.Errorf("services have not yet been read at least once, cannot construct envvars")
}
var result []kubecontainer.EnvVar
// Note: These are added to the docker Config, but are not included in the checksum computed
// by kubecontainer.HashContainer(...). That way, we can still determine whether an
// v1.Container is already running by its hash. (We don't want to restart a container just
// because some service changed.)
//
// Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
// and keep trying to resolve the DNS name of the service (recommended).
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace, *pod.Spec.EnableServiceLinks)
if err != nil {
return result, err
}
var (
configMaps = make(map[string]*v1.ConfigMap)
secrets = make(map[string]*v1.Secret)
tmpEnv = make(map[string]string)
)
// Env will override EnvFrom variables.
// Process EnvFrom first then allow Env to replace existing values.
for _, envFrom := range container.EnvFrom {
switch {
case envFrom.ConfigMapRef != nil:
cm := envFrom.ConfigMapRef
name := cm.Name
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
optional := cm.Optional != nil && *cm.Optional
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
configMaps[name] = configMap
}
invalidKeys := []string{}
for k, v := range configMap.Data {
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
if errMsgs := utilvalidation.IsEnvVarName(k); len(errMsgs) != 0 {
invalidKeys = append(invalidKeys, k)
continue
}
tmpEnv[k] = v
}
if len(invalidKeys) > 0 {
sort.Strings(invalidKeys)
kl.recorder.Eventf(pod, v1.EventTypeWarning, "InvalidEnvironmentVariableNames", "Keys [%s] from the EnvFrom configMap %s/%s were skipped since they are considered invalid environment variable names.", strings.Join(invalidKeys, ", "), pod.Namespace, name)
}
case envFrom.SecretRef != nil:
s := envFrom.SecretRef
name := s.Name
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
optional := s.Optional != nil && *s.Optional
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
secrets[name] = secret
}
invalidKeys := []string{}
for k, v := range secret.Data {
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
if errMsgs := utilvalidation.IsEnvVarName(k); len(errMsgs) != 0 {
invalidKeys = append(invalidKeys, k)
continue
}
tmpEnv[k] = string(v)
}
if len(invalidKeys) > 0 {
sort.Strings(invalidKeys)
kl.recorder.Eventf(pod, v1.EventTypeWarning, "InvalidEnvironmentVariableNames", "Keys [%s] from the EnvFrom secret %s/%s were skipped since they are considered invalid environment variable names.", strings.Join(invalidKeys, ", "), pod.Namespace, name)
}
}
}
// Determine the final values of variables:
//
// 1. Determine the final value of each variable:
// a. If the variable's Value is set, expand the `$(var)` references to other
// variables in the .Value field; the sources of variables are the declared
// variables of the container and the service environment variables
// b. If a source is defined for an environment variable, resolve the source
// 2. Create the container's environment in the order variables are declared
// 3. Add remaining service environment vars
var (
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
)
for _, envVar := range container.Env {
runtimeVal := envVar.Value
if runtimeVal != "" {
// Step 1a: expand variable references
runtimeVal = expansion.Expand(runtimeVal, mappingFunc)
} else if envVar.ValueFrom != nil {
// Step 1b: resolve alternate env var sources
switch {
case envVar.ValueFrom.FieldRef != nil:
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP, podIPs)
if err != nil {
return result, err
}
case envVar.ValueFrom.ResourceFieldRef != nil:
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardAPI(pod, container)
if err != nil {
return result, err
}
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
if err != nil {
return result, err
}
case envVar.ValueFrom.ConfigMapKeyRef != nil:
cm := envVar.ValueFrom.ConfigMapKeyRef
name := cm.Name
key := cm.Key
optional := cm.Optional != nil && *cm.Optional
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
configMaps[name] = configMap
}
runtimeVal, ok = configMap.Data[key]
if !ok {
if optional {
continue
}
return result, fmt.Errorf("couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
}
case envVar.ValueFrom.SecretKeyRef != nil:
s := envVar.ValueFrom.SecretKeyRef
name := s.Name
key := s.Key
optional := s.Optional != nil && *s.Optional
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
secrets[name] = secret
}
runtimeValBytes, ok := secret.Data[key]
if !ok {
if optional {
continue
}
return result, fmt.Errorf("couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
}
runtimeVal = string(runtimeValBytes)
}
}
tmpEnv[envVar.Name] = runtimeVal
}
// Append the env vars
for k, v := range tmpEnv {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
// Append remaining service env vars.
for k, v := range serviceEnv {
// Accesses apiserver+Pods.
// So, the master may set service env vars, or kubelet may. In case both are doing
// it, we skip the key from the kubelet-generated ones so we don't have duplicate
// env vars.
// TODO: remove this next line once all platforms use apiserver+Pods.
if _, present := tmpEnv[k]; !present {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
}
return result, nil
}
// podFieldSelectorRuntimeValue returns the runtime value of the given
// selector for a pod.
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string, podIPs []string) (string, error) {
internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "")
if err != nil {
return "", err
}
// make podIPs order match node IP family preference #97979
podIPs = kl.sortPodIPs(podIPs)
if len(podIPs) > 0 {
podIP = podIPs[0]
}
switch internalFieldPath {
case "spec.nodeName":
return pod.Spec.NodeName, nil
case "spec.serviceAccountName":
return pod.Spec.ServiceAccountName, nil
case "status.hostIP":
hostIPs, err := kl.getHostIPsAnyWay()
if err != nil {
return "", err
}
return hostIPs[0].String(), nil
case "status.podIP":
return podIP, nil
case "status.podIPs":
return strings.Join(podIPs, ","), nil
}
return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
}
// containerResourceRuntimeValue returns the value of the provided container resource
func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) {
containerName := fs.ContainerName
if len(containerName) == 0 {
return resource.ExtractContainerResourceValue(fs, container)
}
return resource.ExtractResourceValueByContainerName(fs, pod, containerName)
}
// killPod instructs the container runtime to kill the pod. This method requires that
// the pod status contains the result of the last syncPod, otherwise it may fail to
// terminate newly created containers and sandboxes.
func (kl *Kubelet) killPod(pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error {
// Call the container runtime KillPod method which stops all known running containers of the pod
if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil {
return err
}
if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
klog.V(2).InfoS("Failed to update QoS cgroups while killing pod", "err", err)
}
return nil
}
// makePodDataDirs creates the dirs for the pod datas.
func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error {
uid := pod.UID
if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// getPullSecretsForPod inspects the Pod and retrieves the referenced pull
// secrets.
func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) []v1.Secret {
pullSecrets := []v1.Secret{}
for _, secretRef := range pod.Spec.ImagePullSecrets {
if len(secretRef.Name) == 0 {
// API validation permitted entries with empty names (http://issue.k8s.io/99454#issuecomment-787838112).
// Ignore to avoid unnecessary warnings.
continue
}
secret, err := kl.secretManager.GetSecret(pod.Namespace, secretRef.Name)
if err != nil {
klog.InfoS("Unable to retrieve pull secret, the image pull may not succeed.", "pod", klog.KObj(pod), "secret", klog.KObj(secret), "err", err)
continue
}
pullSecrets = append(pullSecrets, *secret)
}
return pullSecrets
}
func countRunningContainerStatus(status v1.PodStatus) int {
var runningContainers int
for _, c := range status.InitContainerStatuses {
if c.State.Running != nil {
runningContainers++
}
}
for _, c := range status.ContainerStatuses {
if c.State.Running != nil {
runningContainers++
}
}
for _, c := range status.EphemeralContainerStatuses {
if c.State.Running != nil {
runningContainers++
}
}
return runningContainers
}
// PodCouldHaveRunningContainers returns true if the pod with the given UID could still have running
// containers. This returns false if the pod has not yet been started or the pod is unknown.
func (kl *Kubelet) PodCouldHaveRunningContainers(pod *v1.Pod) bool {
return kl.podWorkers.CouldHaveRunningContainers(pod.UID)
}
// PodResourcesAreReclaimed returns true if all required node-level resources that a pod was consuming have
// been reclaimed by the kubelet. Reclaiming resources is a prerequisite to deleting a pod from the API server.
func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool {
if kl.podWorkers.CouldHaveRunningContainers(pod.UID) {
// We shouldn't delete pods that still have running containers
klog.V(3).InfoS("Pod is terminated, but some containers are still running", "pod", klog.KObj(pod))
return false
}
if count := countRunningContainerStatus(status); count > 0 {
// We shouldn't delete pods until the reported pod status contains no more running containers (the previous
// check ensures no more status can be generated, this check verifies we have seen enough of the status)
klog.V(3).InfoS("Pod is terminated, but some container status has not yet been reported", "pod", klog.KObj(pod), "running", count)
return false
}
if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes {
// We shouldn't delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes
klog.V(3).InfoS("Pod is terminated, but some volumes have not been cleaned up", "pod", klog.KObj(pod))
return false
}
if kl.kubeletConfiguration.CgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
if pcm.Exists(pod) {
klog.V(3).InfoS("Pod is terminated, but pod cgroup sandbox has not been cleaned up", "pod", klog.KObj(pod))
return false
}
}
// Note: we leave pod containers to be reclaimed in the background since dockershim requires the
// container for retrieving logs and we want to make sure logs are available until the pod is
// physically deleted.
klog.V(3).InfoS("Pod is terminated and all resources are reclaimed", "pod", klog.KObj(pod))
return true
}
// podResourcesAreReclaimed simply calls PodResourcesAreReclaimed with the most up-to-date status.
func (kl *Kubelet) podResourcesAreReclaimed(pod *v1.Pod) bool {
status, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok {
status = pod.Status
}
return kl.PodResourcesAreReclaimed(pod, status)
}
// filterOutInactivePods returns pods that are not in a terminal phase
// or are known to be fully terminated. This method should only be used
// when the set of pods being filtered is upstream of the pod worker, i.e.
// the pods the pod manager is aware of.
func (kl *Kubelet) filterOutInactivePods(pods []*v1.Pod) []*v1.Pod {
filteredPods := make([]*v1.Pod, 0, len(pods))
for _, p := range pods {
// if a pod is fully terminated by UID, it should be excluded from the
// list of pods
if kl.podWorkers.IsPodKnownTerminated(p.UID) {
continue
}
// terminal pods are considered inactive UNLESS they are actively terminating
if kl.isAdmittedPodTerminal(p) && !kl.podWorkers.IsPodTerminationRequested(p.UID) {
continue
}
filteredPods = append(filteredPods, p)
}
return filteredPods
}
// isAdmittedPodTerminal returns true if the provided config source pod is in
// a terminal phase, or if the Kubelet has already indicated the pod has reached
// a terminal phase but the config source has not accepted it yet. This method
// should only be used within the pod configuration loops that notify the pod
// worker, other components should treat the pod worker as authoritative.
func (kl *Kubelet) isAdmittedPodTerminal(pod *v1.Pod) bool {
// pods are considered inactive if the config source has observed a
// terminal phase (if the Kubelet recorded that the pod reached a terminal
// phase the pod should never be restarted)
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
return true
}
// a pod that has been marked terminal within the Kubelet is considered
// inactive (may have been rejected by Kubelet admision)
if status, ok := kl.statusManager.GetPodStatus(pod.UID); ok {
if status.Phase == v1.PodSucceeded || status.Phase == v1.PodFailed {
return true
}
}
return false
}
// removeOrphanedPodStatuses removes obsolete entries in podStatus where
// the pod is no longer considered bound to this node.
func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) {
podUIDs := make(map[types.UID]bool)
for _, pod := range pods {
podUIDs[pod.UID] = true
}
for _, pod := range mirrorPods {
podUIDs[pod.UID] = true
}
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
}
// deleteOrphanedMirrorPods checks whether pod killer has done with orphaned mirror pod.
// If pod killing is done, podManager.DeleteMirrorPod() is called to delete mirror pod
// from the API server
func (kl *Kubelet) deleteOrphanedMirrorPods() {
mirrorPods := kl.podManager.GetOrphanedMirrorPodNames()
for _, podFullname := range mirrorPods {
if !kl.podWorkers.IsPodForMirrorPodTerminatingByFullName(podFullname) {
_, err := kl.podManager.DeleteMirrorPod(podFullname, nil)
if err != nil {
klog.ErrorS(err, "Encountered error when deleting mirror pod", "podName", podFullname)
} else {
klog.V(3).InfoS("Deleted pod", "podName", podFullname)
}
}
}
}
// HandlePodCleanups performs a series of cleanup work, including terminating
// pod workers, killing unwanted pods, and removing orphaned volumes/pod
// directories. No config changes are sent to pod workers while this method
// is executing which means no new pods can appear.
// NOTE: This function is executed by the main sync loop, so it
// should not contain any blocking calls.
func (kl *Kubelet) HandlePodCleanups() error {
// The kubelet lacks checkpointing, so we need to introspect the set of pods
// in the cgroup tree prior to inspecting the set of pods in our pod manager.
// this ensures our view of the cgroup tree does not mistakenly observe pods
// that are added after the fact...
var (
cgroupPods map[types.UID]cm.CgroupName
err error
)
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
cgroupPods, err = pcm.GetAllPodsFromCgroups()
if err != nil {
return fmt.Errorf("failed to get list of pods that still exist on cgroup mounts: %v", err)
}
}
allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods()
// Pod phase progresses monotonically. Once a pod has reached a final state,
// it should never leave regardless of the restart policy. The statuses
// of such pods should not be changed, and there is no need to sync them.
// TODO: the logic here does not handle two cases:
// 1. If the containers were removed immediately after they died, kubelet
// may fail to generate correct statuses, let alone filtering correctly.
// 2. If kubelet restarted before writing the terminated status for a pod
// to the apiserver, it could still restart the terminated pod (even
// though the pod was not considered terminated by the apiserver).
// These two conditions could be alleviated by checkpointing kubelet.
// Stop the workers for terminated pods not in the config source
klog.V(3).InfoS("Clean up pod workers for terminated pods")
workingPods := kl.podWorkers.SyncKnownPods(allPods)
allPodsByUID := make(map[types.UID]*v1.Pod)
for _, pod := range allPods {
allPodsByUID[pod.UID] = pod
}
// Identify the set of pods that have workers, which should be all pods
// from config that are not terminated, as well as any terminating pods
// that have already been removed from config. Pods that are terminating
// will be added to possiblyRunningPods, to prevent overly aggressive
// cleanup of pod cgroups.
runningPods := make(map[types.UID]sets.Empty)
possiblyRunningPods := make(map[types.UID]sets.Empty)
restartablePods := make(map[types.UID]sets.Empty)
for uid, sync := range workingPods {
switch sync {
case SyncPod:
runningPods[uid] = struct{}{}
possiblyRunningPods[uid] = struct{}{}
case TerminatingPod:
possiblyRunningPods[uid] = struct{}{}
case TerminatedAndRecreatedPod:
restartablePods[uid] = struct{}{}
}
}
// Stop probing pods that are not running
klog.V(3).InfoS("Clean up probes for terminating and terminated pods")
kl.probeManager.CleanupPods(runningPods)
// Terminate any pods that are observed in the runtime but not
// present in the list of known running pods from config.
runningRuntimePods, err := kl.runtimeCache.GetPods()
if err != nil {
klog.ErrorS(err, "Error listing containers")
return err
}
for _, runningPod := range runningRuntimePods {
switch workerState, ok := workingPods[runningPod.ID]; {
case ok && workerState == SyncPod, ok && workerState == TerminatingPod:
// if the pod worker is already in charge of this pod, we don't need to do anything
continue
default:
// If the pod isn't in the set that should be running and isn't already terminating, terminate
// now. This termination is aggressive because all known pods should already be in a known state
// (i.e. a removed static pod should already be terminating), so these are pods that were
// orphaned due to kubelet restart or bugs. Since housekeeping blocks other config changes, we
// know that another pod wasn't started in the background so we are safe to terminate the
// unknown pods.
if _, ok := allPodsByUID[runningPod.ID]; !ok {
klog.V(3).InfoS("Clean up orphaned pod containers", "podUID", runningPod.ID)
one := int64(1)
kl.podWorkers.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
RunningPod: runningPod,
KillPodOptions: &KillPodOptions{
PodTerminationGracePeriodSecondsOverride: &one,
},
})
}
}
}
// Remove orphaned pod statuses not in the total list of known config pods
klog.V(3).InfoS("Clean up orphaned pod statuses")
kl.removeOrphanedPodStatuses(allPods, mirrorPods)
// Note that we just killed the unwanted pods. This may not have reflected
// in the cache. We need to bypass the cache to get the latest set of
// running pods to clean up the volumes.
// TODO: Evaluate the performance impact of bypassing the runtime cache.
runningRuntimePods, err = kl.containerRuntime.GetPods(false)
if err != nil {
klog.ErrorS(err, "Error listing containers")
return err
}
// Remove orphaned volumes from pods that are known not to have any
// containers. Note that we pass all pods (including terminated pods) to
// the function, so that we don't remove volumes associated with terminated
// but not yet deleted pods.
// TODO: this method could more aggressively cleanup terminated pods
// in the future (volumes, mount dirs, logs, and containers could all be
// better separated)
klog.V(3).InfoS("Clean up orphaned pod directories")
err = kl.cleanupOrphanedPodDirs(allPods, runningRuntimePods)
if err != nil {
// We want all cleanup tasks to be run even if one of them failed. So
// we just log an error here and continue other cleanup tasks.
// This also applies to the other clean up tasks.
klog.ErrorS(err, "Failed cleaning up orphaned pod directories")
}
// Remove any orphaned mirror pods (mirror pods are tracked by name via the
// pod worker)
klog.V(3).InfoS("Clean up orphaned mirror pods")
kl.deleteOrphanedMirrorPods()
// Remove any cgroups in the hierarchy for pods that are definitely no longer
// running (not in the container runtime).
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
klog.V(3).InfoS("Clean up orphaned pod cgroups")
kl.cleanupOrphanedPodCgroups(pcm, cgroupPods, possiblyRunningPods)
}
kl.backOff.GC()
// If two pods with the same UID are observed in rapid succession, we need to
// resynchronize the pod worker after the first pod completes and decide whether
// to restart the pod. This happens last to avoid confusing the desired state
// in other components and to increase the likelihood transient OS failures during
// container start are mitigated. In general only static pods will ever reuse UIDs
// since the apiserver uses randomly generated UUIDv4 UIDs with a very low
// probability of collision.
for uid := range restartablePods {
pod, ok := allPodsByUID[uid]
if !ok {
continue
}
if kl.isAdmittedPodTerminal(pod) {
klog.V(3).InfoS("Pod is restartable after termination due to UID reuse, but pod phase is terminal", "pod", klog.KObj(pod), "podUID", pod.UID)
continue
}
start := kl.clock.Now()
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
klog.V(3).InfoS("Pod is restartable after termination due to UID reuse", "pod", klog.KObj(pod), "podUID", pod.UID)
kl.dispatchWork(pod, kubetypes.SyncPodCreate, mirrorPod, start)
}
return nil
}
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := podutil.GetContainerStatus(podStatus.ContainerStatuses, containerName)
if !found {
cStatus, found = podutil.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
}
if !found && utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
cStatus, found = podutil.GetContainerStatus(podStatus.EphemeralContainerStatuses, containerName)
}
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
}
lastState := cStatus.LastTerminationState
waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated
switch {
case previous:
if lastState.Terminated == nil || lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case running != nil:
cID = cStatus.ContainerID
case terminated != nil:
// in cases where the next container didn't start, terminated.ContainerID will be empty, so get logs from the lastState.Terminated.
if terminated.ContainerID == "" {
if lastState.Terminated != nil && lastState.Terminated.ContainerID != "" {
cID = lastState.Terminated.ContainerID
} else {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
} else {
cID = terminated.ContainerID
}
case lastState.Terminated != nil:
if lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case waiting != nil:
// output some info for the most common pending failures
switch reason := waiting.Reason; reason {
case images.ErrImagePull.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
case images.ErrImagePullBackOff.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
default:
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
}
default:
// unrecognized state
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
}
return kubecontainer.ParseContainerID(cID), nil
}
// GetKubeletContainerLogs returns logs from the container
// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt
// or all of them.
func (kl *Kubelet) GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
// Pod workers periodically write status to statusManager. If status is not
// cached there, something is wrong (or kubelet just restarted and hasn't
// caught up yet). Just assume the pod is not ready yet.
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
if err != nil {
return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err)
}
pod, ok := kl.GetPodByName(namespace, name)
if !ok {
return fmt.Errorf("pod %q cannot be found - no logs available", name)
}
podUID := pod.UID
if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
podUID = mirrorPod.UID
}
podStatus, found := kl.statusManager.GetPodStatus(podUID)
if !found {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
podStatus = pod.Status
}
// TODO: Consolidate the logic here with kuberuntime.GetContainerLogs, here we convert container name to containerID,
// but inside kuberuntime we convert container id back to container name and restart count.
// TODO: After separate container log lifecycle management, we should get log based on the existing log files
// instead of container status.
containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous)
if err != nil {
return err
}
// Do a zero-byte write to stdout before handing off to the container runtime.
// This ensures at least one Write call is made to the writer when copying starts,
// even if we then block waiting for log output from the container.
if _, err := stdout.Write([]byte{}); err != nil {
return err
}
if kl.dockerLegacyService != nil {
// dockerLegacyService should only be non-nil when we actually need it, so
// inject it into the runtimeService.
// TODO(random-liu): Remove this hack after deprecating unsupported log driver.
return kl.dockerLegacyService.GetContainerLogs(ctx, pod, containerID, logOptions, stdout, stderr)
}
return kl.containerRuntime.GetContainerLogs(ctx, pod, containerID, logOptions, stdout, stderr)
}
// getPhase returns the phase of a pod given its container info.
func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
pendingInitialization := 0
failedInitialization := 0
for _, container := range spec.InitContainers {
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
if !ok {
pendingInitialization++
continue
}
switch {
case containerStatus.State.Running != nil:
pendingInitialization++
case containerStatus.State.Terminated != nil:
if containerStatus.State.Terminated.ExitCode != 0 {
failedInitialization++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
if containerStatus.LastTerminationState.Terminated.ExitCode != 0 {
failedInitialization++
}
} else {
pendingInitialization++
}
default:
pendingInitialization++
}
}
unknown := 0
running := 0
waiting := 0
stopped := 0
succeeded := 0
for _, container := range spec.Containers {
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
if !ok {
unknown++
continue
}
switch {
case containerStatus.State.Running != nil:
running++
case containerStatus.State.Terminated != nil:
stopped++
if containerStatus.State.Terminated.ExitCode == 0 {
succeeded++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
stopped++
} else {
waiting++
}
default:
unknown++
}
}
if failedInitialization > 0 && spec.RestartPolicy == v1.RestartPolicyNever {
return v1.PodFailed
}
switch {
case pendingInitialization > 0:
fallthrough
case waiting > 0:
klog.V(5).InfoS("Pod waiting > 0, pending")
// One or more containers has not been started
return v1.PodPending
case running > 0 && unknown == 0:
// All containers have been started, and at least
// one container is running
return v1.PodRunning
case running == 0 && stopped > 0 && unknown == 0:
// All containers are terminated
if spec.RestartPolicy == v1.RestartPolicyAlways {
// All containers are in the process of restarting
return v1.PodRunning
}
if stopped == succeeded {
// RestartPolicy is not Always, and all
// containers are terminated in success
return v1.PodSucceeded
}
if spec.RestartPolicy == v1.RestartPolicyNever {
// RestartPolicy is Never, and all containers are
// terminated with at least one in failure
return v1.PodFailed
}
// RestartPolicy is OnFailure, and at least one in failure
// and in the process of restarting
return v1.PodRunning
default:
klog.V(5).InfoS("Pod default case, pending")
return v1.PodPending
}
}
// generateAPIPodStatus creates the final API pod status for a pod, given the
// internal pod status. This method should only be called from within sync*Pod methods.
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
klog.V(3).InfoS("Generating pod status", "pod", klog.KObj(pod))
// use the previous pod status, or the api status, as the basis for this pod
oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID)
if !found {
oldPodStatus = pod.Status
}
s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
// calculate the next phase and preserve reason
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = getPhase(&pod.Spec, allStatus)
klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "oldPhase", oldPodStatus.Phase, "phase", s.Phase)
// Perform a three-way merge between the statuses from the status manager,
// runtime, and generated status to ensure terminal status is correctly set.
if s.Phase != v1.PodFailed && s.Phase != v1.PodSucceeded {
switch {
case oldPodStatus.Phase == v1.PodFailed || oldPodStatus.Phase == v1.PodSucceeded:
klog.V(4).InfoS("Status manager phase was terminal, updating phase to match", "pod", klog.KObj(pod), "phase", oldPodStatus.Phase)
s.Phase = oldPodStatus.Phase
case pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded:
klog.V(4).InfoS("API phase was terminal, updating phase to match", "pod", klog.KObj(pod), "phase", pod.Status.Phase)
s.Phase = pod.Status.Phase
}
}
if s.Phase == oldPodStatus.Phase {
// preserve the reason and message which is associated with the phase
s.Reason = oldPodStatus.Reason
s.Message = oldPodStatus.Message
if len(s.Reason) == 0 {
s.Reason = pod.Status.Reason
}
if len(s.Message) == 0 {
s.Message = pod.Status.Message
}
}
// check if an internal module has requested the pod is evicted and override the reason and message
for _, podSyncHandler := range kl.PodSyncHandlers {
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
s.Phase = v1.PodFailed
s.Reason = result.Reason
s.Message = result.Message
break
}
}
// pods are not allowed to transition out of terminal phases
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
// API server shows terminal phase; transitions are not allowed
if s.Phase != pod.Status.Phase {
klog.ErrorS(nil, "Pod attempted illegal phase transition", "pod", klog.KObj(pod), "originalStatusPhase", pod.Status.Phase, "apiStatusPhase", s.Phase, "apiStatus", s)
// Force back to phase from the API server
s.Phase = pod.Status.Phase
}
}
// ensure the probe managers have up to date status for containers
kl.probeManager.UpdatePodStatus(pod.UID, s)
// preserve all conditions not owned by the kubelet
s.Conditions = make([]v1.PodCondition, 0, len(pod.Status.Conditions)+1)
for _, c := range pod.Status.Conditions {
if !kubetypes.PodConditionByKubelet(c.Type) {
s.Conditions = append(s.Conditions, c)
}
}
// set all Kubelet-owned conditions
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(&pod.Spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(&pod.Spec, s.Conditions, s.ContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(&pod.Spec, s.ContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
})
// set HostIP and initialize PodIP/PodIPs for host network pods
if kl.kubeClient != nil {
hostIPs, err := kl.getHostIPsAnyWay()
if err != nil {
klog.V(4).InfoS("Cannot get host IPs", "err", err)
} else {
s.HostIP = hostIPs[0].String()
if kubecontainer.IsHostNetworkPod(pod) && s.PodIP == "" {
s.PodIP = hostIPs[0].String()
s.PodIPs = []v1.PodIP{{IP: s.PodIP}}
if len(hostIPs) == 2 {
s.PodIPs = append(s.PodIPs, v1.PodIP{IP: hostIPs[1].String()})
}
}
}
}
return *s
}
// sortPodIPs return the PodIPs sorted and truncated by the cluster IP family preference.
// The runtime pod status may have an arbitrary number of IPs, in an arbitrary order.
// PodIPs are obtained by: func (m *kubeGenericRuntimeManager) determinePodSandboxIPs()
// Pick out the first returned IP of the same IP family as the node IP
// first, followed by the first IP of the opposite IP family (if any)
// and use them for the Pod.Status.PodIPs and the Downward API environment variables
func (kl *Kubelet) sortPodIPs(podIPs []string) []string {
ips := make([]string, 0, 2)
var validPrimaryIP, validSecondaryIP func(ip string) bool
if len(kl.nodeIPs) == 0 || utilnet.IsIPv4(kl.nodeIPs[0]) {
validPrimaryIP = utilnet.IsIPv4String
validSecondaryIP = utilnet.IsIPv6String
} else {
validPrimaryIP = utilnet.IsIPv6String
validSecondaryIP = utilnet.IsIPv4String
}
for _, ip := range podIPs {
if validPrimaryIP(ip) {
ips = append(ips, ip)
break
}
}
for _, ip := range podIPs {
if validSecondaryIP(ip) {
ips = append(ips, ip)
break
}
}
return ips
}
// convertStatusToAPIStatus initialize an api PodStatus for the given pod from
// the given internal pod status and the previous state of the pod from the API.
// It is purely transformative and does not alter the kubelet state at all.
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus, oldPodStatus v1.PodStatus) *v1.PodStatus {
var apiPodStatus v1.PodStatus
// copy pod status IPs to avoid race conditions with PodStatus #102806
podIPs := make([]string, len(podStatus.IPs))
for j, ip := range podStatus.IPs {
podIPs[j] = ip
}
// make podIPs order match node IP family preference #97979
podIPs = kl.sortPodIPs(podIPs)
for _, ip := range podIPs {
apiPodStatus.PodIPs = append(apiPodStatus.PodIPs, v1.PodIP{IP: ip})
}
if len(apiPodStatus.PodIPs) > 0 {
apiPodStatus.PodIP = apiPodStatus.PodIPs[0].IP
}
// set status for Pods created on versions of kube older than 1.6
apiPodStatus.QOSClass = v1qos.GetPodQOS(pod)
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.ContainerStatuses,
pod.Spec.Containers,
len(pod.Spec.InitContainers) > 0,
false,
)
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.InitContainerStatuses,
pod.Spec.InitContainers,
len(pod.Spec.InitContainers) > 0,
true,
)
if utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
var ecSpecs []v1.Container
for i := range pod.Spec.EphemeralContainers {
ecSpecs = append(ecSpecs, v1.Container(pod.Spec.EphemeralContainers[i].EphemeralContainerCommon))
}
// #80875: By now we've iterated podStatus 3 times. We could refactor this to make a single
// pass through podStatus.ContainerStatuses
apiPodStatus.EphemeralContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.EphemeralContainerStatuses,
ecSpecs,
len(pod.Spec.InitContainers) > 0,
false,
)
}
return &apiPodStatus
}
// convertToAPIContainerStatuses converts the given internal container
// statuses into API container statuses.
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
convertContainerStatus := func(cs *kubecontainer.Status, oldStatus *v1.ContainerStatus) *v1.ContainerStatus {
cid := cs.ID.String()
status := &v1.ContainerStatus{
Name: cs.Name,
RestartCount: int32(cs.RestartCount),
Image: cs.Image,
ImageID: cs.ImageID,
ContainerID: cid,
}
switch {
case cs.State == kubecontainer.ContainerStateRunning:
status.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.NewTime(cs.StartedAt)}
case cs.State == kubecontainer.ContainerStateCreated:
// Treat containers in the "created" state as if they are exited.
// The pod workers are supposed start all containers it creates in
// one sync (syncPod) iteration. There should not be any normal
// "created" containers when the pod worker generates the status at
// the beginning of a sync iteration.
fallthrough
case cs.State == kubecontainer.ContainerStateExited:
status.State.Terminated = &v1.ContainerStateTerminated{
ExitCode: int32(cs.ExitCode),
Reason: cs.Reason,
Message: cs.Message,
StartedAt: metav1.NewTime(cs.StartedAt),
FinishedAt: metav1.NewTime(cs.FinishedAt),
ContainerID: cid,
}
case cs.State == kubecontainer.ContainerStateUnknown &&
oldStatus != nil && // we have an old status
oldStatus.State.Running != nil: // our previous status was running
// if this happens, then we know that this container was previously running and isn't anymore (assuming the CRI isn't failing to return running containers).
// you can imagine this happening in cases where a container failed and the kubelet didn't ask about it in time to see the result.
// in this case, the container should not to into waiting state immediately because that can make cases like runonce pods actually run
// twice. "container never ran" is different than "container ran and failed". This is handled differently in the kubelet
// and it is handled differently in higher order logic like crashloop detection and handling
status.State.Terminated = &v1.ContainerStateTerminated{
Reason: "ContainerStatusUnknown",
Message: "The container could not be located when the pod was terminated",
ExitCode: 137, // this code indicates an error
}
// the restart count normally comes from the CRI (see near the top of this method), but since this is being added explicitly
// for the case where the CRI did not return a status, we need to manually increment the restart count to be accurate.
status.RestartCount = oldStatus.RestartCount + 1
default:
// this collapses any unknown state to container waiting. If any container is waiting, then the pod status moves to pending even if it is running.
// if I'm reading this correctly, then any failure to read status on any container results in the entire pod going pending even if the containers
// are actually running.
// see https://github.com/kubernetes/kubernetes/blob/5d1b3e26af73dde33ecb6a3e69fb5876ceab192f/pkg/kubelet/kuberuntime/kuberuntime_container.go#L497 to
// https://github.com/kubernetes/kubernetes/blob/8976e3620f8963e72084971d9d4decbd026bf49f/pkg/kubelet/kuberuntime/helpers.go#L58-L71
// and interpreted here https://github.com/kubernetes/kubernetes/blob/b27e78f590a0d43e4a23ca3b2bf1739ca4c6e109/pkg/kubelet/kubelet_pods.go#L1434-L1439
status.State.Waiting = &v1.ContainerStateWaiting{}
}
return status
}
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]v1.ContainerStatus, len(containers))
for _, status := range previousStatus {
oldStatuses[status.Name] = status
}
// Set all container statuses to default waiting state
statuses := make(map[string]*v1.ContainerStatus, len(containers))
defaultWaitingState := v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: ContainerCreating}}
if hasInitContainers {
defaultWaitingState = v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: PodInitializing}}
}
for _, container := range containers {
status := &v1.ContainerStatus{
Name: container.Name,
Image: container.Image,
State: defaultWaitingState,
}
oldStatus, found := oldStatuses[container.Name]
if found {
if oldStatus.State.Terminated != nil {
status = &oldStatus
} else {
// Apply some values from the old statuses as the default values.
status.RestartCount = oldStatus.RestartCount
status.LastTerminationState = oldStatus.LastTerminationState
}
}
statuses[container.Name] = status
}
for _, container := range containers {
found := false
for _, cStatus := range podStatus.ContainerStatuses {
if container.Name == cStatus.Name {
found = true
break
}
}
if found {
continue
}
// if no container is found, then assuming it should be waiting seems plausible, but the status code requires
// that a previous termination be present. If we're offline long enough or something removed the container, then
// the previous termination may not be present. This next code block ensures that if the container was previously running
// then when that container status disappears, we can infer that it terminated even if we don't know the status code.
// By setting the lasttermination state we are able to leave the container status waiting and present more accurate
// data via the API.
oldStatus, ok := oldStatuses[container.Name]
if !ok {
continue
}
if oldStatus.State.Terminated != nil {
// if the old container status was terminated, the lasttermination status is correct
continue
}
if oldStatus.State.Running == nil {
// if the old container status isn't running, then waiting is an appropriate status and we have nothing to do
continue
}
// If we're here, we know the pod was previously running, but doesn't have a terminated status. We will check now to
// see if it's in a pending state.
status := statuses[container.Name]
// If the status we're about to write indicates the default, the Waiting status will force this pod back into Pending.
// That isn't true, we know the pod was previously running.
isDefaultWaitingStatus := status.State.Waiting != nil && status.State.Waiting.Reason == ContainerCreating
if hasInitContainers {
isDefaultWaitingStatus = status.State.Waiting != nil && status.State.Waiting.Reason == PodInitializing
}
if !isDefaultWaitingStatus {
// the status was written, don't override
continue
}
if status.LastTerminationState.Terminated != nil {
// if we already have a termination state, nothing to do
continue
}
// setting this value ensures that we show as stopped here, not as waiting:
// https://github.com/kubernetes/kubernetes/blob/90c9f7b3e198e82a756a68ffeac978a00d606e55/pkg/kubelet/kubelet_pods.go#L1440-L1445
// This prevents the pod from becoming pending
status.LastTerminationState.Terminated = &v1.ContainerStateTerminated{
Reason: "ContainerStatusUnknown",
Message: "The container could not be located when the pod was deleted. The container used to be Running",
ExitCode: 137,
}
// If the pod was not deleted, then it's been restarted. Increment restart count.
if pod.DeletionTimestamp == nil {
status.RestartCount += 1
}
statuses[container.Name] = status
}
// Copy the slice before sorting it
containerStatusesCopy := make([]*kubecontainer.Status, len(podStatus.ContainerStatuses))
copy(containerStatusesCopy, podStatus.ContainerStatuses)
// Make the latest container status comes first.
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(containerStatusesCopy)))
// Set container statuses according to the statuses seen in pod status
containerSeen := map[string]int{}
for _, cStatus := range containerStatusesCopy {
cName := cStatus.Name
if _, ok := statuses[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerSeen[cName] >= 2 {
continue
}
var oldStatusPtr *v1.ContainerStatus
if oldStatus, ok := oldStatuses[cName]; ok {
oldStatusPtr = &oldStatus
}
status := convertContainerStatus(cStatus, oldStatusPtr)
if containerSeen[cName] == 0 {
statuses[cName] = status
} else {
statuses[cName].LastTerminationState = status.State
}
containerSeen[cName] = containerSeen[cName] + 1
}
// Handle the containers failed to be started, which should be in Waiting state.
for _, container := range containers {
if isInitContainer {
// If the init container is terminated with exit code 0, it won't be restarted.
// TODO(random-liu): Handle this in a cleaner way.
s := podStatus.FindContainerStatusByName(container.Name)
if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 {
continue
}
}
// If a container should be restarted in next syncpod, it is *Waiting*.
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
continue
}
status := statuses[container.Name]
reason, ok := kl.reasonCache.Get(pod.UID, container.Name)
if !ok {
// In fact, we could also apply Waiting state here, but it is less informative,
// and the container will be restarted soon, so we prefer the original state here.
// Note that with the current implementation of ShouldContainerBeRestarted the original state here
// could be:
// * Waiting: There is no associated historical container and start failure reason record.
// * Terminated: The container is terminated.
continue
}
if status.State.Terminated != nil {
status.LastTerminationState = status.State
}
status.State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: reason.Err.Error(),
Message: reason.Message,
},
}
statuses[container.Name] = status
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod has a deterministic order.
if isInitContainer {
return kubetypes.SortStatusesOfInitContainers(pod, statuses)
}
containerStatuses := make([]v1.ContainerStatus, 0, len(statuses))
for _, status := range statuses {
containerStatuses = append(containerStatuses, *status)
}
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
return containerStatuses
}
// ServeLogs returns logs of current machine.
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// TODO: allowlist logs we are willing to serve
kl.logServer.ServeHTTP(w, req)
}
// findContainer finds and returns the container with the given pod ID, full name, and container name.
// It returns nil if not found.
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
// Resolve and type convert back again.
// We need the static pod UID but the kubecontainer API works with types.UID.
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
return pod.FindContainerByName(containerName), nil
}
// RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
// TODO(tallclair): Pass a proper timeout value.
return kl.runner.RunInContainer(container.ID, cmd, 0)
}
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
return kl.streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
}
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
// The TTY setting for attach must match the TTY setting in the initial container configuration,
// since whether the process is running in a TTY cannot be changed after it has started. We
// need the api.Pod to get the TTY status.
pod, found := kl.GetPodByFullName(podFullName)
if !found || (string(podUID) != "" && pod.UID != podUID) {
return nil, fmt.Errorf("pod %s not found", podFullName)
}
containerSpec := kubecontainer.GetContainerSpec(pod, containerName)
if containerSpec == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
tty := containerSpec.TTY
return kl.streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
}
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
// Resolve and type convert back again.
// We need the static pod UID but the kubecontainer API works with types.UID.
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
podFullName := kubecontainer.BuildPodFullName(podName, podNamespace)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return nil, fmt.Errorf("pod not found (%q)", podFullName)
}
return kl.streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports)
}
// cleanupOrphanedPodCgroups removes cgroups that should no longer exist.
// it reconciles the cached state of cgroupPods with the specified list of runningPods
func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupPods map[types.UID]cm.CgroupName, possiblyRunningPods map[types.UID]sets.Empty) {
// Iterate over all the found pods to verify if they should be running
for uid, val := range cgroupPods {
// if the pod is in the running set, its not a candidate for cleanup
if _, ok := possiblyRunningPods[uid]; ok {
continue
}
// If volumes have not been unmounted/detached, do not delete the cgroup
// so any memory backed volumes don't have their charges propagated to the
// parent croup. If the volumes still exist, reduce the cpu shares for any
// process in the cgroup to the minimum value while we wait. if the kubelet
// is configured to keep terminated volumes, we will delete the cgroup and not block.
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist && !kl.keepTerminatedPodVolumes {
klog.V(3).InfoS("Orphaned pod found, but volumes not yet removed. Reducing cpu to minimum", "podUID", uid)
if err := pcm.ReduceCPULimits(val); err != nil {
klog.InfoS("Failed to reduce cpu time for pod pending volume cleanup", "podUID", uid, "err", err)
}
continue
}
klog.V(3).InfoS("Orphaned pod found, removing pod cgroups", "podUID", uid)
// Destroy all cgroups of pod that should not be running,
// by first killing all the attached processes to these cgroups.
// We ignore errors thrown by the method, as the housekeeping loop would
// again try to delete these unwanted pod cgroups
go pcm.Destroy(val)
}
}
// enableHostUserNamespace determines if the host user namespace should be used by the container runtime.
// Returns true if the pod is using a host pid, pic, or network namespace, the pod is using a non-namespaced
// capability, the pod contains a privileged container, or the pod has a host path volume.
//
// NOTE: when if a container shares any namespace with another container it must also share the user namespace
// or it will not have the correct capabilities in the namespace. This means that host user namespace
// is enabled per pod, not per container.
func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool {
if kubecontainer.HasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) {
return true
}
return false
}
// hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container.
func hasNonNamespacedCapability(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
for _, cap := range c.SecurityContext.Capabilities.Add {
if cap == "MKNOD" || cap == "SYS_TIME" || cap == "SYS_MODULE" {
return true
}
}
}
}
return false
}
// hasHostVolume returns true if the pod spec has a HostPath volume.
func hasHostVolume(pod *v1.Pod) bool {
for _, v := range pod.Spec.Volumes {
if v.HostPath != nil {
return true
}
}
return false
}
// hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true.
func hasHostNamespace(pod *v1.Pod) bool {
if pod.Spec.SecurityContext == nil {
return false
}
return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID
}
// hasHostMountPVC returns true if a PVC is referencing a HostPath volume.
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
pvcName := ""
switch {
case volume.PersistentVolumeClaim != nil:
pvcName = volume.PersistentVolumeClaim.ClaimName
case volume.Ephemeral != nil:
pvcName = ephemeral.VolumeClaimName(pod, &volume)
default:
continue
}
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
if err != nil {
klog.InfoS("Unable to retrieve pvc", "pvc", klog.KRef(pod.Namespace, pvcName), "err", err)
continue
}
if pvc != nil {
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
klog.InfoS("Unable to retrieve pv", "pvName", pvc.Spec.VolumeName, "err", err)
continue
}
if referencedVolume != nil && referencedVolume.Spec.HostPath != nil {
return true
}
}
}
return false
}
UPSTREAM: 110191: kubelet: only shutdown probes for pods that are terminated
This fixes a bug where terminating pods would not run their readiness
probes. Terminating pods are found within the possiblyRunningPods map.
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-helpers/storage/ephemeral"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/resource"
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand"
"k8s.io/kubernetes/pkg/kubelet/envvars"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
volumevalidation "k8s.io/kubernetes/pkg/volume/validation"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
utilnet "k8s.io/utils/net"
)
const (
managedHostsHeader = "# Kubernetes-managed hosts file.\n"
managedHostsHeaderWithHostNetwork = "# Kubernetes-managed hosts file (host network).\n"
)
// Container state reason list
const (
PodInitializing = "PodInitializing"
ContainerCreating = "ContainerCreating"
)
// Get a list of pods that have data directories.
func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
podInfos, err := ioutil.ReadDir(kl.getPodsDir())
if err != nil {
return nil, err
}
pods := []types.UID{}
for i := range podInfos {
if podInfos[i].IsDir() {
pods = append(pods, types.UID(podInfos[i].Name()))
}
}
return pods, nil
}
// GetActivePods returns pods that have been admitted to the kubelet that
// are not fully terminated. This is mapped to the "desired state" of the
// kubelet - what pods should be running.
//
// WARNING: Currently this list does not include pods that have been force
// deleted but may still be terminating, which means resources assigned to
// those pods during admission may still be in use. See
// https://github.com/kubernetes/kubernetes/issues/104824
func (kl *Kubelet) GetActivePods() []*v1.Pod {
allPods := kl.podManager.GetPods()
activePods := kl.filterOutInactivePods(allPods)
return activePods
}
// makeBlockVolumes maps the raw block devices specified in the path of the container
// Experimental
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
var devices []kubecontainer.DeviceInfo
for _, device := range container.VolumeDevices {
// check path is absolute
if !filepath.IsAbs(device.DevicePath) {
return nil, fmt.Errorf("error DevicePath `%s` must be an absolute path", device.DevicePath)
}
vol, ok := podVolumes[device.Name]
if !ok || vol.BlockVolumeMapper == nil {
klog.ErrorS(nil, "Block volume cannot be satisfied for container, because the volume is missing or the volume mapper is nil", "containerName", container.Name, "device", device)
return nil, fmt.Errorf("cannot find volume %q to pass into container %q", device.Name, container.Name)
}
// Get a symbolic link associated to a block device under pod device path
dirPath, volName := vol.BlockVolumeMapper.GetPodDeviceMapPath()
symlinkPath := path.Join(dirPath, volName)
if islinkExist, checkErr := blkutil.IsSymlinkExist(symlinkPath); checkErr != nil {
return nil, checkErr
} else if islinkExist {
// Check readOnly in PVCVolumeSource and set read only permission if it's true.
permission := "mrw"
if vol.ReadOnly {
permission = "r"
}
klog.V(4).InfoS("Device will be attached to container in the corresponding path on host", "containerName", container.Name, "path", symlinkPath)
devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: symlinkPath, PathInContainer: device.DevicePath, Permissions: permission})
}
}
return devices, nil
}
// shouldMountHostsFile checks if the nodes /etc/hosts should be mounted
// Kubernetes only mounts on /etc/hosts if:
// - container is not an infrastructure (pause) container
// - container is not already mounting on /etc/hosts
// - if it is Windows and ContainerD is used.
// Kubernetes will not mount /etc/hosts if:
// - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set.
// - Windows pod contains a hostProcess container
func shouldMountHostsFile(pod *v1.Pod, podIPs []string, supportsSingleFileMapping bool) bool {
shouldMount := len(podIPs) > 0 && supportsSingleFileMapping
if runtime.GOOS == "windows" && utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostProcessContainers) {
return shouldMount && !kubecontainer.HasWindowsHostProcessContainer(pod)
}
return shouldMount
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar, supportsSingleFileMapping bool) ([]kubecontainer.Mount, func(), error) {
mountEtcHostsFile := shouldMountHostsFile(pod, podIPs, supportsSingleFileMapping)
klog.V(3).InfoS("Creating hosts mount for container", "pod", klog.KObj(pod), "containerName", container.Name, "podIPs", podIPs, "path", mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
var cleanupAction func()
for i, mount := range container.VolumeMounts {
// do not mount /etc/hosts if container is already mounting on the path
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
vol, ok := podVolumes[mount.Name]
if !ok || vol.Mounter == nil {
klog.ErrorS(nil, "Mount cannot be satisfied for the container, because the volume is missing or the volume mounter (vol.Mounter) is nil",
"containerName", container.Name, "ok", ok, "volumeMounter", mount)
return nil, cleanupAction, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
}
relabelVolume := false
// If the volume supports SELinux and it has not been
// relabeled already and it is not a read-only volume,
// relabel it and mark it as labeled
if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled {
vol.SELinuxLabeled = true
relabelVolume = true
}
hostPath, err := volumeutil.GetPath(vol.Mounter)
if err != nil {
return nil, cleanupAction, err
}
subPath := mount.SubPath
if mount.SubPathExpr != "" {
subPath, err = kubecontainer.ExpandContainerVolumeMounts(mount, expandEnvs)
if err != nil {
return nil, cleanupAction, err
}
}
if subPath != "" {
if filepath.IsAbs(subPath) {
return nil, cleanupAction, fmt.Errorf("error SubPath `%s` must not be an absolute path", subPath)
}
err = volumevalidation.ValidatePathNoBacksteps(subPath)
if err != nil {
return nil, cleanupAction, fmt.Errorf("unable to provision SubPath `%s`: %v", subPath, err)
}
volumePath := hostPath
hostPath = filepath.Join(volumePath, subPath)
if subPathExists, err := hu.PathExists(hostPath); err != nil {
klog.ErrorS(nil, "Could not determine if subPath exists, will not attempt to change its permissions", "path", hostPath)
} else if !subPathExists {
// Create the sub path now because if it's auto-created later when referenced, it may have an
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
// when the pod specifies an fsGroup, and if the directory is not created here, Docker will
// later auto-create it with the incorrect mode 0750
// Make extra care not to escape the volume!
perm, err := hu.GetMode(volumePath)
if err != nil {
return nil, cleanupAction, err
}
if err := subpather.SafeMakeDir(subPath, volumePath, perm); err != nil {
// Don't pass detailed error back to the user because it could give information about host filesystem
klog.ErrorS(err, "Failed to create subPath directory for volumeMount of the container", "containerName", container.Name, "volumeMountName", mount.Name)
return nil, cleanupAction, fmt.Errorf("failed to create subPath directory for volumeMount %q of container %q", mount.Name, container.Name)
}
}
hostPath, cleanupAction, err = subpather.PrepareSafeSubpath(subpath.Subpath{
VolumeMountIndex: i,
Path: hostPath,
VolumeName: vol.InnerVolumeSpecName,
VolumePath: volumePath,
PodDir: podDir,
ContainerName: container.Name,
})
if err != nil {
// Don't pass detailed error back to the user because it could give information about host filesystem
klog.ErrorS(err, "Failed to prepare subPath for volumeMount of the container", "containerName", container.Name, "volumeMountName", mount.Name)
return nil, cleanupAction, fmt.Errorf("failed to prepare subPath for volumeMount %q of container %q", mount.Name, container.Name)
}
}
// Docker Volume Mounts fail on Windows if it is not of the form C:/
if volumeutil.IsWindowsLocalPath(runtime.GOOS, hostPath) {
hostPath = volumeutil.MakeAbsolutePath(runtime.GOOS, hostPath)
}
containerPath := mount.MountPath
// IsAbs returns false for UNC path/SMB shares/named pipes in Windows. So check for those specifically and skip MakeAbsolutePath
if !volumeutil.IsWindowsUNCPath(runtime.GOOS, containerPath) && !filepath.IsAbs(containerPath) {
containerPath = volumeutil.MakeAbsolutePath(runtime.GOOS, containerPath)
}
propagation, err := translateMountPropagation(mount.MountPropagation)
if err != nil {
return nil, cleanupAction, err
}
klog.V(5).InfoS("Mount has propagation", "pod", klog.KObj(pod), "containerName", container.Name, "volumeMountName", mount.Name, "propagation", propagation)
mustMountRO := vol.Mounter.GetAttributes().ReadOnly
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
ReadOnly: mount.ReadOnly || mustMountRO,
SELinuxRelabel: relabelVolume,
Propagation: propagation,
})
}
if mountEtcHostsFile {
hostAliases := pod.Spec.HostAliases
hostsMount, err := makeHostsMount(podDir, podIPs, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork)
if err != nil {
return nil, cleanupAction, err
}
mounts = append(mounts, *hostsMount)
}
return mounts, cleanupAction, nil
}
// translateMountPropagation transforms v1.MountPropagationMode to
// runtimeapi.MountPropagation.
func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.MountPropagation, error) {
if runtime.GOOS == "windows" {
// Windows containers doesn't support mount propagation, use private for it.
// Refer https://docs.docker.com/storage/bind-mounts/#configure-bind-propagation.
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
}
switch {
case mountMode == nil:
// PRIVATE is the default
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
case *mountMode == v1.MountPropagationHostToContainer:
return runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil
case *mountMode == v1.MountPropagationBidirectional:
return runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, nil
case *mountMode == v1.MountPropagationNone:
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
default:
return 0, fmt.Errorf("invalid MountPropagation mode: %q", *mountMode)
}
}
// getEtcHostsPath returns the full host-side path to a pod's generated /etc/hosts file
func getEtcHostsPath(podDir string) string {
hostsFilePath := path.Join(podDir, "etc-hosts")
// Volume Mounts fail on Windows if it is not of the form C:/
return volumeutil.MakeAbsolutePath(runtime.GOOS, hostsFilePath)
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with. podIPs is provided instead of podIP as podIPs
// are present even if dual-stack feature flag is not enabled.
func makeHostsMount(podDir string, podIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) {
hostsFilePath := getEtcHostsPath(podDir)
if err := ensureHostsFile(hostsFilePath, podIPs, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil {
return nil, err
}
return &kubecontainer.Mount{
Name: "k8s-managed-etc-hosts",
ContainerPath: etcHostsPath,
HostPath: hostsFilePath,
ReadOnly: false,
SELinuxRelabel: true,
}, nil
}
// ensureHostsFile ensures that the given host file has an up-to-date ip, host
// name, and domain name.
func ensureHostsFile(fileName string, hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error {
var hostsFileContent []byte
var err error
if useHostNetwork {
// if Pod is using host network, read hosts file from the node's filesystem.
// `etcHostsPath` references the location of the hosts file on the node.
// `/etc/hosts` for *nix systems.
hostsFileContent, err = nodeHostsFileContent(etcHostsPath, hostAliases)
if err != nil {
return err
}
} else {
// if Pod is not using host network, create a managed hosts file with Pod IP and other information.
hostsFileContent = managedHostsFileContent(hostIPs, hostName, hostDomainName, hostAliases)
}
return ioutil.WriteFile(fileName, hostsFileContent, 0644)
}
// nodeHostsFileContent reads the content of node's hosts file.
func nodeHostsFileContent(hostsFilePath string, hostAliases []v1.HostAlias) ([]byte, error) {
hostsFileContent, err := ioutil.ReadFile(hostsFilePath)
if err != nil {
return nil, err
}
var buffer bytes.Buffer
buffer.WriteString(managedHostsHeaderWithHostNetwork)
buffer.Write(hostsFileContent)
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
return buffer.Bytes(), nil
}
// managedHostsFileContent generates the content of the managed etc hosts based on Pod IPs and other
// information.
func managedHostsFileContent(hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte {
var buffer bytes.Buffer
buffer.WriteString(managedHostsHeader)
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost
buffer.WriteString("fe00::0\tip6-localnet\n")
buffer.WriteString("fe00::0\tip6-mcastprefix\n")
buffer.WriteString("fe00::1\tip6-allnodes\n")
buffer.WriteString("fe00::2\tip6-allrouters\n")
if len(hostDomainName) > 0 {
// host entry generated for all IPs in podIPs
// podIPs field is populated for clusters even
// dual-stack feature flag is not enabled.
for _, hostIP := range hostIPs {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
}
} else {
for _, hostIP := range hostIPs {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
}
}
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
return buffer.Bytes()
}
func hostsEntriesFromHostAliases(hostAliases []v1.HostAlias) []byte {
if len(hostAliases) == 0 {
return []byte{}
}
var buffer bytes.Buffer
buffer.WriteString("\n")
buffer.WriteString("# Entries added by HostAliases.\n")
// for each IP, write all aliases onto single line in hosts file
for _, hostAlias := range hostAliases {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostAlias.IP, strings.Join(hostAlias.Hostnames, "\t")))
}
return buffer.Bytes()
}
// truncatePodHostnameIfNeeded truncates the pod hostname if it's longer than 63 chars.
func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
const hostnameMaxLen = 63
if len(hostname) <= hostnameMaxLen {
return hostname, nil
}
truncated := hostname[:hostnameMaxLen]
klog.ErrorS(nil, "Hostname for pod was too long, truncated it", "podName", podName, "hostnameMaxLen", hostnameMaxLen, "truncatedHostname", truncated)
// hostname should not end with '-' or '.'
truncated = strings.TrimRight(truncated, "-.")
if len(truncated) == 0 {
// This should never happen.
return "", fmt.Errorf("hostname for pod %q was invalid: %q", podName, hostname)
}
return truncated, nil
}
// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod,
// given that pod's spec and annotations or returns an error.
func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
clusterDomain := kl.dnsConfigurer.ClusterDomain
hostname := pod.Name
if len(pod.Spec.Hostname) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 {
return "", "", fmt.Errorf("pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";"))
}
hostname = pod.Spec.Hostname
}
hostname, err := truncatePodHostnameIfNeeded(pod.Name, hostname)
if err != nil {
return "", "", err
}
hostDomain := ""
if len(pod.Spec.Subdomain) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 {
return "", "", fmt.Errorf("pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";"))
}
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
}
return hostname, hostDomain, nil
}
// GetPodCgroupParent gets pod cgroup parent from container manager.
func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
pcm := kl.containerManager.NewPodContainerManager()
_, cgroupParent := pcm.GetPodContainerName(pod)
return cgroupParent
}
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
opts, err := kl.containerManager.GetResources(pod, container)
if err != nil {
return nil, nil, err
}
// The value of hostname is the short host name and it is sent to makeMounts to create /etc/hosts file.
hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, nil, err
}
// nodename will be equals to hostname if SetHostnameAsFQDN is nil or false. If SetHostnameFQDN
// is true and hostDomainName is defined, nodename will be the FQDN (hostname.hostDomainName)
nodename, err := util.GetNodenameForKernel(hostname, hostDomainName, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, nil, err
}
opts.Hostname = nodename
podName := volumeutil.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
if err != nil {
return nil, nil, err
}
opts.Devices = append(opts.Devices, blkVolumes...)
envs, err := kl.makeEnvironmentVariables(pod, container, podIP, podIPs)
if err != nil {
return nil, nil, err
}
opts.Envs = append(opts.Envs, envs...)
// we can only mount individual files (e.g.: /etc/hosts, termination-log files) on Windows only if we're using Containerd.
supportsSingleFileMapping := kl.containerRuntime.SupportsSingleFileMapping()
// only podIPs is sent to makeMounts, as podIPs is populated even if dual-stack feature flag is not enabled.
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs, supportsSingleFileMapping)
if err != nil {
return nil, cleanupAction, err
}
opts.Mounts = append(opts.Mounts, mounts...)
// adding TerminationMessagePath on Windows is only allowed if ContainerD is used. Individual files cannot
// be mounted as volumes using Docker for Windows.
if len(container.TerminationMessagePath) != 0 && supportsSingleFileMapping {
p := kl.getPodContainerDir(pod.UID, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
klog.ErrorS(err, "Error on creating dir", "path", p)
} else {
opts.PodContainerDir = p
}
}
// only do this check if the experimental behavior is enabled, otherwise allow it to default to false
if kl.experimentalHostUserNamespaceDefaulting {
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod)
}
return opts, cleanupAction, nil
}
var masterServices = sets.NewString("kubernetes")
// getServiceEnvVarMap makes a map[string]string of env vars for services a
// pod in namespace ns should see.
func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[string]string, error) {
var (
serviceMap = make(map[string]*v1.Service)
m = make(map[string]string)
)
// Get all service resources from the master (via a cache),
// and populate them into service environment variables.
if kl.serviceLister == nil {
// Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars.
return m, nil
}
services, err := kl.serviceLister.List(labels.Everything())
if err != nil {
return m, fmt.Errorf("failed to list services when setting up env vars")
}
// project the services in namespace ns onto the master services
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
if !v1helper.IsServiceIPSet(service) {
continue
}
serviceName := service.Name
// We always want to add environment variabled for master services
// from the master service namespace, even if enableServiceLinks is false.
// We also add environment variables for other services in the same
// namespace, if enableServiceLinks is true.
if service.Namespace == kl.masterServiceNamespace && masterServices.Has(serviceName) {
if _, exists := serviceMap[serviceName]; !exists {
serviceMap[serviceName] = service
}
} else if service.Namespace == ns && enableServiceLinks {
serviceMap[serviceName] = service
}
}
mappedServices := []*v1.Service{}
for key := range serviceMap {
mappedServices = append(mappedServices, serviceMap[key])
}
for _, e := range envvars.FromServices(mappedServices) {
m[e.Name] = e.Value
}
return m, nil
}
// Make the environment variables for a pod in the given namespace.
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) ([]kubecontainer.EnvVar, error) {
if pod.Spec.EnableServiceLinks == nil {
return nil, fmt.Errorf("nil pod.spec.enableServiceLinks encountered, cannot construct envvars")
}
// If the pod originates from the kube-api, when we know that the kube-apiserver is responding and the kubelet's credentials are valid.
// Knowing this, it is reasonable to wait until the service lister has synchronized at least once before attempting to build
// a service env var map. This doesn't present the race below from happening entirely, but it does prevent the "obvious"
// failure case of services simply not having completed a list operation that can reasonably be expected to succeed.
// One common case this prevents is a kubelet restart reading pods before services and some pod not having the
// KUBERNETES_SERVICE_HOST injected because we didn't wait a short time for services to sync before proceeding.
// The KUBERNETES_SERVICE_HOST link is special because it is unconditionally injected into pods and is read by the
// in-cluster-config for pod clients
if !kubetypes.IsStaticPod(pod) && !kl.serviceHasSynced() {
return nil, fmt.Errorf("services have not yet been read at least once, cannot construct envvars")
}
var result []kubecontainer.EnvVar
// Note: These are added to the docker Config, but are not included in the checksum computed
// by kubecontainer.HashContainer(...). That way, we can still determine whether an
// v1.Container is already running by its hash. (We don't want to restart a container just
// because some service changed.)
//
// Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
// and keep trying to resolve the DNS name of the service (recommended).
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace, *pod.Spec.EnableServiceLinks)
if err != nil {
return result, err
}
var (
configMaps = make(map[string]*v1.ConfigMap)
secrets = make(map[string]*v1.Secret)
tmpEnv = make(map[string]string)
)
// Env will override EnvFrom variables.
// Process EnvFrom first then allow Env to replace existing values.
for _, envFrom := range container.EnvFrom {
switch {
case envFrom.ConfigMapRef != nil:
cm := envFrom.ConfigMapRef
name := cm.Name
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
optional := cm.Optional != nil && *cm.Optional
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
configMaps[name] = configMap
}
invalidKeys := []string{}
for k, v := range configMap.Data {
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
if errMsgs := utilvalidation.IsEnvVarName(k); len(errMsgs) != 0 {
invalidKeys = append(invalidKeys, k)
continue
}
tmpEnv[k] = v
}
if len(invalidKeys) > 0 {
sort.Strings(invalidKeys)
kl.recorder.Eventf(pod, v1.EventTypeWarning, "InvalidEnvironmentVariableNames", "Keys [%s] from the EnvFrom configMap %s/%s were skipped since they are considered invalid environment variable names.", strings.Join(invalidKeys, ", "), pod.Namespace, name)
}
case envFrom.SecretRef != nil:
s := envFrom.SecretRef
name := s.Name
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
optional := s.Optional != nil && *s.Optional
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
secrets[name] = secret
}
invalidKeys := []string{}
for k, v := range secret.Data {
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
if errMsgs := utilvalidation.IsEnvVarName(k); len(errMsgs) != 0 {
invalidKeys = append(invalidKeys, k)
continue
}
tmpEnv[k] = string(v)
}
if len(invalidKeys) > 0 {
sort.Strings(invalidKeys)
kl.recorder.Eventf(pod, v1.EventTypeWarning, "InvalidEnvironmentVariableNames", "Keys [%s] from the EnvFrom secret %s/%s were skipped since they are considered invalid environment variable names.", strings.Join(invalidKeys, ", "), pod.Namespace, name)
}
}
}
// Determine the final values of variables:
//
// 1. Determine the final value of each variable:
// a. If the variable's Value is set, expand the `$(var)` references to other
// variables in the .Value field; the sources of variables are the declared
// variables of the container and the service environment variables
// b. If a source is defined for an environment variable, resolve the source
// 2. Create the container's environment in the order variables are declared
// 3. Add remaining service environment vars
var (
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
)
for _, envVar := range container.Env {
runtimeVal := envVar.Value
if runtimeVal != "" {
// Step 1a: expand variable references
runtimeVal = expansion.Expand(runtimeVal, mappingFunc)
} else if envVar.ValueFrom != nil {
// Step 1b: resolve alternate env var sources
switch {
case envVar.ValueFrom.FieldRef != nil:
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP, podIPs)
if err != nil {
return result, err
}
case envVar.ValueFrom.ResourceFieldRef != nil:
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardAPI(pod, container)
if err != nil {
return result, err
}
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
if err != nil {
return result, err
}
case envVar.ValueFrom.ConfigMapKeyRef != nil:
cm := envVar.ValueFrom.ConfigMapKeyRef
name := cm.Name
key := cm.Key
optional := cm.Optional != nil && *cm.Optional
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
configMaps[name] = configMap
}
runtimeVal, ok = configMap.Data[key]
if !ok {
if optional {
continue
}
return result, fmt.Errorf("couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
}
case envVar.ValueFrom.SecretKeyRef != nil:
s := envVar.ValueFrom.SecretKeyRef
name := s.Name
key := s.Key
optional := s.Optional != nil && *s.Optional
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
secrets[name] = secret
}
runtimeValBytes, ok := secret.Data[key]
if !ok {
if optional {
continue
}
return result, fmt.Errorf("couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
}
runtimeVal = string(runtimeValBytes)
}
}
tmpEnv[envVar.Name] = runtimeVal
}
// Append the env vars
for k, v := range tmpEnv {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
// Append remaining service env vars.
for k, v := range serviceEnv {
// Accesses apiserver+Pods.
// So, the master may set service env vars, or kubelet may. In case both are doing
// it, we skip the key from the kubelet-generated ones so we don't have duplicate
// env vars.
// TODO: remove this next line once all platforms use apiserver+Pods.
if _, present := tmpEnv[k]; !present {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
}
return result, nil
}
// podFieldSelectorRuntimeValue returns the runtime value of the given
// selector for a pod.
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string, podIPs []string) (string, error) {
internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "")
if err != nil {
return "", err
}
// make podIPs order match node IP family preference #97979
podIPs = kl.sortPodIPs(podIPs)
if len(podIPs) > 0 {
podIP = podIPs[0]
}
switch internalFieldPath {
case "spec.nodeName":
return pod.Spec.NodeName, nil
case "spec.serviceAccountName":
return pod.Spec.ServiceAccountName, nil
case "status.hostIP":
hostIPs, err := kl.getHostIPsAnyWay()
if err != nil {
return "", err
}
return hostIPs[0].String(), nil
case "status.podIP":
return podIP, nil
case "status.podIPs":
return strings.Join(podIPs, ","), nil
}
return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
}
// containerResourceRuntimeValue returns the value of the provided container resource
func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) {
containerName := fs.ContainerName
if len(containerName) == 0 {
return resource.ExtractContainerResourceValue(fs, container)
}
return resource.ExtractResourceValueByContainerName(fs, pod, containerName)
}
// killPod instructs the container runtime to kill the pod. This method requires that
// the pod status contains the result of the last syncPod, otherwise it may fail to
// terminate newly created containers and sandboxes.
func (kl *Kubelet) killPod(pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error {
// Call the container runtime KillPod method which stops all known running containers of the pod
if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil {
return err
}
if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
klog.V(2).InfoS("Failed to update QoS cgroups while killing pod", "err", err)
}
return nil
}
// makePodDataDirs creates the dirs for the pod datas.
func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error {
uid := pod.UID
if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// getPullSecretsForPod inspects the Pod and retrieves the referenced pull
// secrets.
func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) []v1.Secret {
pullSecrets := []v1.Secret{}
for _, secretRef := range pod.Spec.ImagePullSecrets {
if len(secretRef.Name) == 0 {
// API validation permitted entries with empty names (http://issue.k8s.io/99454#issuecomment-787838112).
// Ignore to avoid unnecessary warnings.
continue
}
secret, err := kl.secretManager.GetSecret(pod.Namespace, secretRef.Name)
if err != nil {
klog.InfoS("Unable to retrieve pull secret, the image pull may not succeed.", "pod", klog.KObj(pod), "secret", klog.KObj(secret), "err", err)
continue
}
pullSecrets = append(pullSecrets, *secret)
}
return pullSecrets
}
func countRunningContainerStatus(status v1.PodStatus) int {
var runningContainers int
for _, c := range status.InitContainerStatuses {
if c.State.Running != nil {
runningContainers++
}
}
for _, c := range status.ContainerStatuses {
if c.State.Running != nil {
runningContainers++
}
}
for _, c := range status.EphemeralContainerStatuses {
if c.State.Running != nil {
runningContainers++
}
}
return runningContainers
}
// PodCouldHaveRunningContainers returns true if the pod with the given UID could still have running
// containers. This returns false if the pod has not yet been started or the pod is unknown.
func (kl *Kubelet) PodCouldHaveRunningContainers(pod *v1.Pod) bool {
return kl.podWorkers.CouldHaveRunningContainers(pod.UID)
}
// PodResourcesAreReclaimed returns true if all required node-level resources that a pod was consuming have
// been reclaimed by the kubelet. Reclaiming resources is a prerequisite to deleting a pod from the API server.
func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool {
if kl.podWorkers.CouldHaveRunningContainers(pod.UID) {
// We shouldn't delete pods that still have running containers
klog.V(3).InfoS("Pod is terminated, but some containers are still running", "pod", klog.KObj(pod))
return false
}
if count := countRunningContainerStatus(status); count > 0 {
// We shouldn't delete pods until the reported pod status contains no more running containers (the previous
// check ensures no more status can be generated, this check verifies we have seen enough of the status)
klog.V(3).InfoS("Pod is terminated, but some container status has not yet been reported", "pod", klog.KObj(pod), "running", count)
return false
}
if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes {
// We shouldn't delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes
klog.V(3).InfoS("Pod is terminated, but some volumes have not been cleaned up", "pod", klog.KObj(pod))
return false
}
if kl.kubeletConfiguration.CgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
if pcm.Exists(pod) {
klog.V(3).InfoS("Pod is terminated, but pod cgroup sandbox has not been cleaned up", "pod", klog.KObj(pod))
return false
}
}
// Note: we leave pod containers to be reclaimed in the background since dockershim requires the
// container for retrieving logs and we want to make sure logs are available until the pod is
// physically deleted.
klog.V(3).InfoS("Pod is terminated and all resources are reclaimed", "pod", klog.KObj(pod))
return true
}
// podResourcesAreReclaimed simply calls PodResourcesAreReclaimed with the most up-to-date status.
func (kl *Kubelet) podResourcesAreReclaimed(pod *v1.Pod) bool {
status, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok {
status = pod.Status
}
return kl.PodResourcesAreReclaimed(pod, status)
}
// filterOutInactivePods returns pods that are not in a terminal phase
// or are known to be fully terminated. This method should only be used
// when the set of pods being filtered is upstream of the pod worker, i.e.
// the pods the pod manager is aware of.
func (kl *Kubelet) filterOutInactivePods(pods []*v1.Pod) []*v1.Pod {
filteredPods := make([]*v1.Pod, 0, len(pods))
for _, p := range pods {
// if a pod is fully terminated by UID, it should be excluded from the
// list of pods
if kl.podWorkers.IsPodKnownTerminated(p.UID) {
continue
}
// terminal pods are considered inactive UNLESS they are actively terminating
if kl.isAdmittedPodTerminal(p) && !kl.podWorkers.IsPodTerminationRequested(p.UID) {
continue
}
filteredPods = append(filteredPods, p)
}
return filteredPods
}
// isAdmittedPodTerminal returns true if the provided config source pod is in
// a terminal phase, or if the Kubelet has already indicated the pod has reached
// a terminal phase but the config source has not accepted it yet. This method
// should only be used within the pod configuration loops that notify the pod
// worker, other components should treat the pod worker as authoritative.
func (kl *Kubelet) isAdmittedPodTerminal(pod *v1.Pod) bool {
// pods are considered inactive if the config source has observed a
// terminal phase (if the Kubelet recorded that the pod reached a terminal
// phase the pod should never be restarted)
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
return true
}
// a pod that has been marked terminal within the Kubelet is considered
// inactive (may have been rejected by Kubelet admision)
if status, ok := kl.statusManager.GetPodStatus(pod.UID); ok {
if status.Phase == v1.PodSucceeded || status.Phase == v1.PodFailed {
return true
}
}
return false
}
// removeOrphanedPodStatuses removes obsolete entries in podStatus where
// the pod is no longer considered bound to this node.
func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) {
podUIDs := make(map[types.UID]bool)
for _, pod := range pods {
podUIDs[pod.UID] = true
}
for _, pod := range mirrorPods {
podUIDs[pod.UID] = true
}
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
}
// deleteOrphanedMirrorPods checks whether pod killer has done with orphaned mirror pod.
// If pod killing is done, podManager.DeleteMirrorPod() is called to delete mirror pod
// from the API server
func (kl *Kubelet) deleteOrphanedMirrorPods() {
mirrorPods := kl.podManager.GetOrphanedMirrorPodNames()
for _, podFullname := range mirrorPods {
if !kl.podWorkers.IsPodForMirrorPodTerminatingByFullName(podFullname) {
_, err := kl.podManager.DeleteMirrorPod(podFullname, nil)
if err != nil {
klog.ErrorS(err, "Encountered error when deleting mirror pod", "podName", podFullname)
} else {
klog.V(3).InfoS("Deleted pod", "podName", podFullname)
}
}
}
}
// HandlePodCleanups performs a series of cleanup work, including terminating
// pod workers, killing unwanted pods, and removing orphaned volumes/pod
// directories. No config changes are sent to pod workers while this method
// is executing which means no new pods can appear.
// NOTE: This function is executed by the main sync loop, so it
// should not contain any blocking calls.
func (kl *Kubelet) HandlePodCleanups() error {
// The kubelet lacks checkpointing, so we need to introspect the set of pods
// in the cgroup tree prior to inspecting the set of pods in our pod manager.
// this ensures our view of the cgroup tree does not mistakenly observe pods
// that are added after the fact...
var (
cgroupPods map[types.UID]cm.CgroupName
err error
)
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
cgroupPods, err = pcm.GetAllPodsFromCgroups()
if err != nil {
return fmt.Errorf("failed to get list of pods that still exist on cgroup mounts: %v", err)
}
}
allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods()
// Pod phase progresses monotonically. Once a pod has reached a final state,
// it should never leave regardless of the restart policy. The statuses
// of such pods should not be changed, and there is no need to sync them.
// TODO: the logic here does not handle two cases:
// 1. If the containers were removed immediately after they died, kubelet
// may fail to generate correct statuses, let alone filtering correctly.
// 2. If kubelet restarted before writing the terminated status for a pod
// to the apiserver, it could still restart the terminated pod (even
// though the pod was not considered terminated by the apiserver).
// These two conditions could be alleviated by checkpointing kubelet.
// Stop the workers for terminated pods not in the config source
klog.V(3).InfoS("Clean up pod workers for terminated pods")
workingPods := kl.podWorkers.SyncKnownPods(allPods)
allPodsByUID := make(map[types.UID]*v1.Pod)
for _, pod := range allPods {
allPodsByUID[pod.UID] = pod
}
// Identify the set of pods that have workers, which should be all pods
// from config that are not terminated, as well as any terminating pods
// that have already been removed from config. Pods that are terminating
// will be added to possiblyRunningPods, to prevent overly aggressive
// cleanup of pod cgroups.
runningPods := make(map[types.UID]sets.Empty)
possiblyRunningPods := make(map[types.UID]sets.Empty)
restartablePods := make(map[types.UID]sets.Empty)
for uid, sync := range workingPods {
switch sync {
case SyncPod:
runningPods[uid] = struct{}{}
possiblyRunningPods[uid] = struct{}{}
case TerminatingPod:
possiblyRunningPods[uid] = struct{}{}
case TerminatedAndRecreatedPod:
restartablePods[uid] = struct{}{}
}
}
// Stop probing pods that are not running
klog.V(3).InfoS("Clean up probes for terminated pods")
kl.probeManager.CleanupPods(possiblyRunningPods)
// Terminate any pods that are observed in the runtime but not
// present in the list of known running pods from config.
runningRuntimePods, err := kl.runtimeCache.GetPods()
if err != nil {
klog.ErrorS(err, "Error listing containers")
return err
}
for _, runningPod := range runningRuntimePods {
switch workerState, ok := workingPods[runningPod.ID]; {
case ok && workerState == SyncPod, ok && workerState == TerminatingPod:
// if the pod worker is already in charge of this pod, we don't need to do anything
continue
default:
// If the pod isn't in the set that should be running and isn't already terminating, terminate
// now. This termination is aggressive because all known pods should already be in a known state
// (i.e. a removed static pod should already be terminating), so these are pods that were
// orphaned due to kubelet restart or bugs. Since housekeeping blocks other config changes, we
// know that another pod wasn't started in the background so we are safe to terminate the
// unknown pods.
if _, ok := allPodsByUID[runningPod.ID]; !ok {
klog.V(3).InfoS("Clean up orphaned pod containers", "podUID", runningPod.ID)
one := int64(1)
kl.podWorkers.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
RunningPod: runningPod,
KillPodOptions: &KillPodOptions{
PodTerminationGracePeriodSecondsOverride: &one,
},
})
}
}
}
// Remove orphaned pod statuses not in the total list of known config pods
klog.V(3).InfoS("Clean up orphaned pod statuses")
kl.removeOrphanedPodStatuses(allPods, mirrorPods)
// Note that we just killed the unwanted pods. This may not have reflected
// in the cache. We need to bypass the cache to get the latest set of
// running pods to clean up the volumes.
// TODO: Evaluate the performance impact of bypassing the runtime cache.
runningRuntimePods, err = kl.containerRuntime.GetPods(false)
if err != nil {
klog.ErrorS(err, "Error listing containers")
return err
}
// Remove orphaned volumes from pods that are known not to have any
// containers. Note that we pass all pods (including terminated pods) to
// the function, so that we don't remove volumes associated with terminated
// but not yet deleted pods.
// TODO: this method could more aggressively cleanup terminated pods
// in the future (volumes, mount dirs, logs, and containers could all be
// better separated)
klog.V(3).InfoS("Clean up orphaned pod directories")
err = kl.cleanupOrphanedPodDirs(allPods, runningRuntimePods)
if err != nil {
// We want all cleanup tasks to be run even if one of them failed. So
// we just log an error here and continue other cleanup tasks.
// This also applies to the other clean up tasks.
klog.ErrorS(err, "Failed cleaning up orphaned pod directories")
}
// Remove any orphaned mirror pods (mirror pods are tracked by name via the
// pod worker)
klog.V(3).InfoS("Clean up orphaned mirror pods")
kl.deleteOrphanedMirrorPods()
// Remove any cgroups in the hierarchy for pods that are definitely no longer
// running (not in the container runtime).
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
klog.V(3).InfoS("Clean up orphaned pod cgroups")
kl.cleanupOrphanedPodCgroups(pcm, cgroupPods, possiblyRunningPods)
}
kl.backOff.GC()
// If two pods with the same UID are observed in rapid succession, we need to
// resynchronize the pod worker after the first pod completes and decide whether
// to restart the pod. This happens last to avoid confusing the desired state
// in other components and to increase the likelihood transient OS failures during
// container start are mitigated. In general only static pods will ever reuse UIDs
// since the apiserver uses randomly generated UUIDv4 UIDs with a very low
// probability of collision.
for uid := range restartablePods {
pod, ok := allPodsByUID[uid]
if !ok {
continue
}
if kl.isAdmittedPodTerminal(pod) {
klog.V(3).InfoS("Pod is restartable after termination due to UID reuse, but pod phase is terminal", "pod", klog.KObj(pod), "podUID", pod.UID)
continue
}
start := kl.clock.Now()
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
klog.V(3).InfoS("Pod is restartable after termination due to UID reuse", "pod", klog.KObj(pod), "podUID", pod.UID)
kl.dispatchWork(pod, kubetypes.SyncPodCreate, mirrorPod, start)
}
return nil
}
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := podutil.GetContainerStatus(podStatus.ContainerStatuses, containerName)
if !found {
cStatus, found = podutil.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
}
if !found && utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
cStatus, found = podutil.GetContainerStatus(podStatus.EphemeralContainerStatuses, containerName)
}
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
}
lastState := cStatus.LastTerminationState
waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated
switch {
case previous:
if lastState.Terminated == nil || lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case running != nil:
cID = cStatus.ContainerID
case terminated != nil:
// in cases where the next container didn't start, terminated.ContainerID will be empty, so get logs from the lastState.Terminated.
if terminated.ContainerID == "" {
if lastState.Terminated != nil && lastState.Terminated.ContainerID != "" {
cID = lastState.Terminated.ContainerID
} else {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
} else {
cID = terminated.ContainerID
}
case lastState.Terminated != nil:
if lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case waiting != nil:
// output some info for the most common pending failures
switch reason := waiting.Reason; reason {
case images.ErrImagePull.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
case images.ErrImagePullBackOff.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
default:
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
}
default:
// unrecognized state
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
}
return kubecontainer.ParseContainerID(cID), nil
}
// GetKubeletContainerLogs returns logs from the container
// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt
// or all of them.
func (kl *Kubelet) GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
// Pod workers periodically write status to statusManager. If status is not
// cached there, something is wrong (or kubelet just restarted and hasn't
// caught up yet). Just assume the pod is not ready yet.
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
if err != nil {
return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err)
}
pod, ok := kl.GetPodByName(namespace, name)
if !ok {
return fmt.Errorf("pod %q cannot be found - no logs available", name)
}
podUID := pod.UID
if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
podUID = mirrorPod.UID
}
podStatus, found := kl.statusManager.GetPodStatus(podUID)
if !found {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
podStatus = pod.Status
}
// TODO: Consolidate the logic here with kuberuntime.GetContainerLogs, here we convert container name to containerID,
// but inside kuberuntime we convert container id back to container name and restart count.
// TODO: After separate container log lifecycle management, we should get log based on the existing log files
// instead of container status.
containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous)
if err != nil {
return err
}
// Do a zero-byte write to stdout before handing off to the container runtime.
// This ensures at least one Write call is made to the writer when copying starts,
// even if we then block waiting for log output from the container.
if _, err := stdout.Write([]byte{}); err != nil {
return err
}
if kl.dockerLegacyService != nil {
// dockerLegacyService should only be non-nil when we actually need it, so
// inject it into the runtimeService.
// TODO(random-liu): Remove this hack after deprecating unsupported log driver.
return kl.dockerLegacyService.GetContainerLogs(ctx, pod, containerID, logOptions, stdout, stderr)
}
return kl.containerRuntime.GetContainerLogs(ctx, pod, containerID, logOptions, stdout, stderr)
}
// getPhase returns the phase of a pod given its container info.
func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
pendingInitialization := 0
failedInitialization := 0
for _, container := range spec.InitContainers {
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
if !ok {
pendingInitialization++
continue
}
switch {
case containerStatus.State.Running != nil:
pendingInitialization++
case containerStatus.State.Terminated != nil:
if containerStatus.State.Terminated.ExitCode != 0 {
failedInitialization++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
if containerStatus.LastTerminationState.Terminated.ExitCode != 0 {
failedInitialization++
}
} else {
pendingInitialization++
}
default:
pendingInitialization++
}
}
unknown := 0
running := 0
waiting := 0
stopped := 0
succeeded := 0
for _, container := range spec.Containers {
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
if !ok {
unknown++
continue
}
switch {
case containerStatus.State.Running != nil:
running++
case containerStatus.State.Terminated != nil:
stopped++
if containerStatus.State.Terminated.ExitCode == 0 {
succeeded++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
stopped++
} else {
waiting++
}
default:
unknown++
}
}
if failedInitialization > 0 && spec.RestartPolicy == v1.RestartPolicyNever {
return v1.PodFailed
}
switch {
case pendingInitialization > 0:
fallthrough
case waiting > 0:
klog.V(5).InfoS("Pod waiting > 0, pending")
// One or more containers has not been started
return v1.PodPending
case running > 0 && unknown == 0:
// All containers have been started, and at least
// one container is running
return v1.PodRunning
case running == 0 && stopped > 0 && unknown == 0:
// All containers are terminated
if spec.RestartPolicy == v1.RestartPolicyAlways {
// All containers are in the process of restarting
return v1.PodRunning
}
if stopped == succeeded {
// RestartPolicy is not Always, and all
// containers are terminated in success
return v1.PodSucceeded
}
if spec.RestartPolicy == v1.RestartPolicyNever {
// RestartPolicy is Never, and all containers are
// terminated with at least one in failure
return v1.PodFailed
}
// RestartPolicy is OnFailure, and at least one in failure
// and in the process of restarting
return v1.PodRunning
default:
klog.V(5).InfoS("Pod default case, pending")
return v1.PodPending
}
}
// generateAPIPodStatus creates the final API pod status for a pod, given the
// internal pod status. This method should only be called from within sync*Pod methods.
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
klog.V(3).InfoS("Generating pod status", "pod", klog.KObj(pod))
// use the previous pod status, or the api status, as the basis for this pod
oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID)
if !found {
oldPodStatus = pod.Status
}
s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
// calculate the next phase and preserve reason
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = getPhase(&pod.Spec, allStatus)
klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "oldPhase", oldPodStatus.Phase, "phase", s.Phase)
// Perform a three-way merge between the statuses from the status manager,
// runtime, and generated status to ensure terminal status is correctly set.
if s.Phase != v1.PodFailed && s.Phase != v1.PodSucceeded {
switch {
case oldPodStatus.Phase == v1.PodFailed || oldPodStatus.Phase == v1.PodSucceeded:
klog.V(4).InfoS("Status manager phase was terminal, updating phase to match", "pod", klog.KObj(pod), "phase", oldPodStatus.Phase)
s.Phase = oldPodStatus.Phase
case pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded:
klog.V(4).InfoS("API phase was terminal, updating phase to match", "pod", klog.KObj(pod), "phase", pod.Status.Phase)
s.Phase = pod.Status.Phase
}
}
if s.Phase == oldPodStatus.Phase {
// preserve the reason and message which is associated with the phase
s.Reason = oldPodStatus.Reason
s.Message = oldPodStatus.Message
if len(s.Reason) == 0 {
s.Reason = pod.Status.Reason
}
if len(s.Message) == 0 {
s.Message = pod.Status.Message
}
}
// check if an internal module has requested the pod is evicted and override the reason and message
for _, podSyncHandler := range kl.PodSyncHandlers {
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
s.Phase = v1.PodFailed
s.Reason = result.Reason
s.Message = result.Message
break
}
}
// pods are not allowed to transition out of terminal phases
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
// API server shows terminal phase; transitions are not allowed
if s.Phase != pod.Status.Phase {
klog.ErrorS(nil, "Pod attempted illegal phase transition", "pod", klog.KObj(pod), "originalStatusPhase", pod.Status.Phase, "apiStatusPhase", s.Phase, "apiStatus", s)
// Force back to phase from the API server
s.Phase = pod.Status.Phase
}
}
// ensure the probe managers have up to date status for containers
kl.probeManager.UpdatePodStatus(pod.UID, s)
// preserve all conditions not owned by the kubelet
s.Conditions = make([]v1.PodCondition, 0, len(pod.Status.Conditions)+1)
for _, c := range pod.Status.Conditions {
if !kubetypes.PodConditionByKubelet(c.Type) {
s.Conditions = append(s.Conditions, c)
}
}
// set all Kubelet-owned conditions
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(&pod.Spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(&pod.Spec, s.Conditions, s.ContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(&pod.Spec, s.ContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
})
// set HostIP and initialize PodIP/PodIPs for host network pods
if kl.kubeClient != nil {
hostIPs, err := kl.getHostIPsAnyWay()
if err != nil {
klog.V(4).InfoS("Cannot get host IPs", "err", err)
} else {
s.HostIP = hostIPs[0].String()
if kubecontainer.IsHostNetworkPod(pod) && s.PodIP == "" {
s.PodIP = hostIPs[0].String()
s.PodIPs = []v1.PodIP{{IP: s.PodIP}}
if len(hostIPs) == 2 {
s.PodIPs = append(s.PodIPs, v1.PodIP{IP: hostIPs[1].String()})
}
}
}
}
return *s
}
// sortPodIPs return the PodIPs sorted and truncated by the cluster IP family preference.
// The runtime pod status may have an arbitrary number of IPs, in an arbitrary order.
// PodIPs are obtained by: func (m *kubeGenericRuntimeManager) determinePodSandboxIPs()
// Pick out the first returned IP of the same IP family as the node IP
// first, followed by the first IP of the opposite IP family (if any)
// and use them for the Pod.Status.PodIPs and the Downward API environment variables
func (kl *Kubelet) sortPodIPs(podIPs []string) []string {
ips := make([]string, 0, 2)
var validPrimaryIP, validSecondaryIP func(ip string) bool
if len(kl.nodeIPs) == 0 || utilnet.IsIPv4(kl.nodeIPs[0]) {
validPrimaryIP = utilnet.IsIPv4String
validSecondaryIP = utilnet.IsIPv6String
} else {
validPrimaryIP = utilnet.IsIPv6String
validSecondaryIP = utilnet.IsIPv4String
}
for _, ip := range podIPs {
if validPrimaryIP(ip) {
ips = append(ips, ip)
break
}
}
for _, ip := range podIPs {
if validSecondaryIP(ip) {
ips = append(ips, ip)
break
}
}
return ips
}
// convertStatusToAPIStatus initialize an api PodStatus for the given pod from
// the given internal pod status and the previous state of the pod from the API.
// It is purely transformative and does not alter the kubelet state at all.
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus, oldPodStatus v1.PodStatus) *v1.PodStatus {
var apiPodStatus v1.PodStatus
// copy pod status IPs to avoid race conditions with PodStatus #102806
podIPs := make([]string, len(podStatus.IPs))
for j, ip := range podStatus.IPs {
podIPs[j] = ip
}
// make podIPs order match node IP family preference #97979
podIPs = kl.sortPodIPs(podIPs)
for _, ip := range podIPs {
apiPodStatus.PodIPs = append(apiPodStatus.PodIPs, v1.PodIP{IP: ip})
}
if len(apiPodStatus.PodIPs) > 0 {
apiPodStatus.PodIP = apiPodStatus.PodIPs[0].IP
}
// set status for Pods created on versions of kube older than 1.6
apiPodStatus.QOSClass = v1qos.GetPodQOS(pod)
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.ContainerStatuses,
pod.Spec.Containers,
len(pod.Spec.InitContainers) > 0,
false,
)
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.InitContainerStatuses,
pod.Spec.InitContainers,
len(pod.Spec.InitContainers) > 0,
true,
)
if utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
var ecSpecs []v1.Container
for i := range pod.Spec.EphemeralContainers {
ecSpecs = append(ecSpecs, v1.Container(pod.Spec.EphemeralContainers[i].EphemeralContainerCommon))
}
// #80875: By now we've iterated podStatus 3 times. We could refactor this to make a single
// pass through podStatus.ContainerStatuses
apiPodStatus.EphemeralContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.EphemeralContainerStatuses,
ecSpecs,
len(pod.Spec.InitContainers) > 0,
false,
)
}
return &apiPodStatus
}
// convertToAPIContainerStatuses converts the given internal container
// statuses into API container statuses.
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
convertContainerStatus := func(cs *kubecontainer.Status, oldStatus *v1.ContainerStatus) *v1.ContainerStatus {
cid := cs.ID.String()
status := &v1.ContainerStatus{
Name: cs.Name,
RestartCount: int32(cs.RestartCount),
Image: cs.Image,
ImageID: cs.ImageID,
ContainerID: cid,
}
switch {
case cs.State == kubecontainer.ContainerStateRunning:
status.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.NewTime(cs.StartedAt)}
case cs.State == kubecontainer.ContainerStateCreated:
// Treat containers in the "created" state as if they are exited.
// The pod workers are supposed start all containers it creates in
// one sync (syncPod) iteration. There should not be any normal
// "created" containers when the pod worker generates the status at
// the beginning of a sync iteration.
fallthrough
case cs.State == kubecontainer.ContainerStateExited:
status.State.Terminated = &v1.ContainerStateTerminated{
ExitCode: int32(cs.ExitCode),
Reason: cs.Reason,
Message: cs.Message,
StartedAt: metav1.NewTime(cs.StartedAt),
FinishedAt: metav1.NewTime(cs.FinishedAt),
ContainerID: cid,
}
case cs.State == kubecontainer.ContainerStateUnknown &&
oldStatus != nil && // we have an old status
oldStatus.State.Running != nil: // our previous status was running
// if this happens, then we know that this container was previously running and isn't anymore (assuming the CRI isn't failing to return running containers).
// you can imagine this happening in cases where a container failed and the kubelet didn't ask about it in time to see the result.
// in this case, the container should not to into waiting state immediately because that can make cases like runonce pods actually run
// twice. "container never ran" is different than "container ran and failed". This is handled differently in the kubelet
// and it is handled differently in higher order logic like crashloop detection and handling
status.State.Terminated = &v1.ContainerStateTerminated{
Reason: "ContainerStatusUnknown",
Message: "The container could not be located when the pod was terminated",
ExitCode: 137, // this code indicates an error
}
// the restart count normally comes from the CRI (see near the top of this method), but since this is being added explicitly
// for the case where the CRI did not return a status, we need to manually increment the restart count to be accurate.
status.RestartCount = oldStatus.RestartCount + 1
default:
// this collapses any unknown state to container waiting. If any container is waiting, then the pod status moves to pending even if it is running.
// if I'm reading this correctly, then any failure to read status on any container results in the entire pod going pending even if the containers
// are actually running.
// see https://github.com/kubernetes/kubernetes/blob/5d1b3e26af73dde33ecb6a3e69fb5876ceab192f/pkg/kubelet/kuberuntime/kuberuntime_container.go#L497 to
// https://github.com/kubernetes/kubernetes/blob/8976e3620f8963e72084971d9d4decbd026bf49f/pkg/kubelet/kuberuntime/helpers.go#L58-L71
// and interpreted here https://github.com/kubernetes/kubernetes/blob/b27e78f590a0d43e4a23ca3b2bf1739ca4c6e109/pkg/kubelet/kubelet_pods.go#L1434-L1439
status.State.Waiting = &v1.ContainerStateWaiting{}
}
return status
}
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]v1.ContainerStatus, len(containers))
for _, status := range previousStatus {
oldStatuses[status.Name] = status
}
// Set all container statuses to default waiting state
statuses := make(map[string]*v1.ContainerStatus, len(containers))
defaultWaitingState := v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: ContainerCreating}}
if hasInitContainers {
defaultWaitingState = v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: PodInitializing}}
}
for _, container := range containers {
status := &v1.ContainerStatus{
Name: container.Name,
Image: container.Image,
State: defaultWaitingState,
}
oldStatus, found := oldStatuses[container.Name]
if found {
if oldStatus.State.Terminated != nil {
status = &oldStatus
} else {
// Apply some values from the old statuses as the default values.
status.RestartCount = oldStatus.RestartCount
status.LastTerminationState = oldStatus.LastTerminationState
}
}
statuses[container.Name] = status
}
for _, container := range containers {
found := false
for _, cStatus := range podStatus.ContainerStatuses {
if container.Name == cStatus.Name {
found = true
break
}
}
if found {
continue
}
// if no container is found, then assuming it should be waiting seems plausible, but the status code requires
// that a previous termination be present. If we're offline long enough or something removed the container, then
// the previous termination may not be present. This next code block ensures that if the container was previously running
// then when that container status disappears, we can infer that it terminated even if we don't know the status code.
// By setting the lasttermination state we are able to leave the container status waiting and present more accurate
// data via the API.
oldStatus, ok := oldStatuses[container.Name]
if !ok {
continue
}
if oldStatus.State.Terminated != nil {
// if the old container status was terminated, the lasttermination status is correct
continue
}
if oldStatus.State.Running == nil {
// if the old container status isn't running, then waiting is an appropriate status and we have nothing to do
continue
}
// If we're here, we know the pod was previously running, but doesn't have a terminated status. We will check now to
// see if it's in a pending state.
status := statuses[container.Name]
// If the status we're about to write indicates the default, the Waiting status will force this pod back into Pending.
// That isn't true, we know the pod was previously running.
isDefaultWaitingStatus := status.State.Waiting != nil && status.State.Waiting.Reason == ContainerCreating
if hasInitContainers {
isDefaultWaitingStatus = status.State.Waiting != nil && status.State.Waiting.Reason == PodInitializing
}
if !isDefaultWaitingStatus {
// the status was written, don't override
continue
}
if status.LastTerminationState.Terminated != nil {
// if we already have a termination state, nothing to do
continue
}
// setting this value ensures that we show as stopped here, not as waiting:
// https://github.com/kubernetes/kubernetes/blob/90c9f7b3e198e82a756a68ffeac978a00d606e55/pkg/kubelet/kubelet_pods.go#L1440-L1445
// This prevents the pod from becoming pending
status.LastTerminationState.Terminated = &v1.ContainerStateTerminated{
Reason: "ContainerStatusUnknown",
Message: "The container could not be located when the pod was deleted. The container used to be Running",
ExitCode: 137,
}
// If the pod was not deleted, then it's been restarted. Increment restart count.
if pod.DeletionTimestamp == nil {
status.RestartCount += 1
}
statuses[container.Name] = status
}
// Copy the slice before sorting it
containerStatusesCopy := make([]*kubecontainer.Status, len(podStatus.ContainerStatuses))
copy(containerStatusesCopy, podStatus.ContainerStatuses)
// Make the latest container status comes first.
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(containerStatusesCopy)))
// Set container statuses according to the statuses seen in pod status
containerSeen := map[string]int{}
for _, cStatus := range containerStatusesCopy {
cName := cStatus.Name
if _, ok := statuses[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerSeen[cName] >= 2 {
continue
}
var oldStatusPtr *v1.ContainerStatus
if oldStatus, ok := oldStatuses[cName]; ok {
oldStatusPtr = &oldStatus
}
status := convertContainerStatus(cStatus, oldStatusPtr)
if containerSeen[cName] == 0 {
statuses[cName] = status
} else {
statuses[cName].LastTerminationState = status.State
}
containerSeen[cName] = containerSeen[cName] + 1
}
// Handle the containers failed to be started, which should be in Waiting state.
for _, container := range containers {
if isInitContainer {
// If the init container is terminated with exit code 0, it won't be restarted.
// TODO(random-liu): Handle this in a cleaner way.
s := podStatus.FindContainerStatusByName(container.Name)
if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 {
continue
}
}
// If a container should be restarted in next syncpod, it is *Waiting*.
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
continue
}
status := statuses[container.Name]
reason, ok := kl.reasonCache.Get(pod.UID, container.Name)
if !ok {
// In fact, we could also apply Waiting state here, but it is less informative,
// and the container will be restarted soon, so we prefer the original state here.
// Note that with the current implementation of ShouldContainerBeRestarted the original state here
// could be:
// * Waiting: There is no associated historical container and start failure reason record.
// * Terminated: The container is terminated.
continue
}
if status.State.Terminated != nil {
status.LastTerminationState = status.State
}
status.State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: reason.Err.Error(),
Message: reason.Message,
},
}
statuses[container.Name] = status
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod has a deterministic order.
if isInitContainer {
return kubetypes.SortStatusesOfInitContainers(pod, statuses)
}
containerStatuses := make([]v1.ContainerStatus, 0, len(statuses))
for _, status := range statuses {
containerStatuses = append(containerStatuses, *status)
}
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
return containerStatuses
}
// ServeLogs returns logs of current machine.
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// TODO: allowlist logs we are willing to serve
kl.logServer.ServeHTTP(w, req)
}
// findContainer finds and returns the container with the given pod ID, full name, and container name.
// It returns nil if not found.
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
// Resolve and type convert back again.
// We need the static pod UID but the kubecontainer API works with types.UID.
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
return pod.FindContainerByName(containerName), nil
}
// RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
// TODO(tallclair): Pass a proper timeout value.
return kl.runner.RunInContainer(container.ID, cmd, 0)
}
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
return kl.streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
}
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
// The TTY setting for attach must match the TTY setting in the initial container configuration,
// since whether the process is running in a TTY cannot be changed after it has started. We
// need the api.Pod to get the TTY status.
pod, found := kl.GetPodByFullName(podFullName)
if !found || (string(podUID) != "" && pod.UID != podUID) {
return nil, fmt.Errorf("pod %s not found", podFullName)
}
containerSpec := kubecontainer.GetContainerSpec(pod, containerName)
if containerSpec == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
tty := containerSpec.TTY
return kl.streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
}
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
// Resolve and type convert back again.
// We need the static pod UID but the kubecontainer API works with types.UID.
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
podFullName := kubecontainer.BuildPodFullName(podName, podNamespace)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return nil, fmt.Errorf("pod not found (%q)", podFullName)
}
return kl.streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports)
}
// cleanupOrphanedPodCgroups removes cgroups that should no longer exist.
// it reconciles the cached state of cgroupPods with the specified list of runningPods
func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupPods map[types.UID]cm.CgroupName, possiblyRunningPods map[types.UID]sets.Empty) {
// Iterate over all the found pods to verify if they should be running
for uid, val := range cgroupPods {
// if the pod is in the running set, its not a candidate for cleanup
if _, ok := possiblyRunningPods[uid]; ok {
continue
}
// If volumes have not been unmounted/detached, do not delete the cgroup
// so any memory backed volumes don't have their charges propagated to the
// parent croup. If the volumes still exist, reduce the cpu shares for any
// process in the cgroup to the minimum value while we wait. if the kubelet
// is configured to keep terminated volumes, we will delete the cgroup and not block.
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist && !kl.keepTerminatedPodVolumes {
klog.V(3).InfoS("Orphaned pod found, but volumes not yet removed. Reducing cpu to minimum", "podUID", uid)
if err := pcm.ReduceCPULimits(val); err != nil {
klog.InfoS("Failed to reduce cpu time for pod pending volume cleanup", "podUID", uid, "err", err)
}
continue
}
klog.V(3).InfoS("Orphaned pod found, removing pod cgroups", "podUID", uid)
// Destroy all cgroups of pod that should not be running,
// by first killing all the attached processes to these cgroups.
// We ignore errors thrown by the method, as the housekeeping loop would
// again try to delete these unwanted pod cgroups
go pcm.Destroy(val)
}
}
// enableHostUserNamespace determines if the host user namespace should be used by the container runtime.
// Returns true if the pod is using a host pid, pic, or network namespace, the pod is using a non-namespaced
// capability, the pod contains a privileged container, or the pod has a host path volume.
//
// NOTE: when if a container shares any namespace with another container it must also share the user namespace
// or it will not have the correct capabilities in the namespace. This means that host user namespace
// is enabled per pod, not per container.
func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool {
if kubecontainer.HasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) {
return true
}
return false
}
// hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container.
func hasNonNamespacedCapability(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
for _, cap := range c.SecurityContext.Capabilities.Add {
if cap == "MKNOD" || cap == "SYS_TIME" || cap == "SYS_MODULE" {
return true
}
}
}
}
return false
}
// hasHostVolume returns true if the pod spec has a HostPath volume.
func hasHostVolume(pod *v1.Pod) bool {
for _, v := range pod.Spec.Volumes {
if v.HostPath != nil {
return true
}
}
return false
}
// hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true.
func hasHostNamespace(pod *v1.Pod) bool {
if pod.Spec.SecurityContext == nil {
return false
}
return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID
}
// hasHostMountPVC returns true if a PVC is referencing a HostPath volume.
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
pvcName := ""
switch {
case volume.PersistentVolumeClaim != nil:
pvcName = volume.PersistentVolumeClaim.ClaimName
case volume.Ephemeral != nil:
pvcName = ephemeral.VolumeClaimName(pod, &volume)
default:
continue
}
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
if err != nil {
klog.InfoS("Unable to retrieve pvc", "pvc", klog.KRef(pod.Namespace, pvcName), "err", err)
continue
}
if pvc != nil {
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
klog.InfoS("Unable to retrieve pv", "pvName", pvc.Spec.VolumeName, "err", err)
continue
}
if referencedVolume != nil && referencedVolume.Spec.HostPath != nil {
return true
}
}
}
return false
}
|
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package repository contains helper methods for working with the Git repo.
package repository
import (
"crypto/sha1"
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
)
const branchRefPrefix = "refs/heads/"
// GitRepo represents an instance of a (local) git repository.
type GitRepo struct {
Path string
}
// Run the given git command and return its stdout, or an error if the command fails.
func (repo *GitRepo) runGitCommand(args ...string) (string, error) {
cmd := exec.Command("git", args...)
cmd.Dir = repo.Path
out, err := cmd.Output()
return strings.Trim(string(out), "\n"), err
}
// Run the given git command using the same stdin, stdout, and stderr as the review tool.
func (repo *GitRepo) runGitCommandInline(args ...string) error {
cmd := exec.Command("git", args...)
cmd.Dir = repo.Path
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// NewGitRepo determines if the given working directory is inside of a git repository,
// and returns the corresponding GitRepo instance if it is.
func NewGitRepo(path string) (*GitRepo, error) {
repo := &GitRepo{Path: path}
_, err := repo.runGitCommand("rev-parse")
if err == nil {
return repo, nil
}
if _, ok := err.(*exec.ExitError); ok {
return nil, err
}
return nil, err
}
// GetPath returns the path to the repo.
func (repo *GitRepo) GetPath() string {
return repo.Path
}
// GetRepoStateHash returns a hash which embodies the entire current state of a repository.
func (repo *GitRepo) GetRepoStateHash() (string, error) {
stateSummary, error := repo.runGitCommand("show-ref")
return fmt.Sprintf("%x", sha1.Sum([]byte(stateSummary))), error
}
// GetUserEmail returns the email address that the user has used to configure git.
func (repo *GitRepo) GetUserEmail() (string, error) {
return repo.runGitCommand("config", "user.email")
}
// HasUncommittedChanges returns true if there are local, uncommitted changes.
func (repo *GitRepo) HasUncommittedChanges() (bool, error) {
out, err := repo.runGitCommand("status", "--porcelain")
if err != nil {
return false, err
}
if len(out) > 0 {
return true, nil
}
return false, nil
}
// VerifyCommit verifies that the supplied hash points to a known commit.
func (repo *GitRepo) VerifyCommit(hash string) error {
out, err := repo.runGitCommand("cat-file", "-t", hash)
if err != nil {
return err
}
objectType := strings.TrimSpace(string(out))
if objectType != "commit" {
return fmt.Errorf("Hash %q points to a non-commit object of type %q", hash, objectType)
}
return nil
}
// VerifyGitRef verifies that the supplied ref points to a known commit.
func (repo *GitRepo) VerifyGitRef(ref string) error {
_, err := repo.runGitCommand("show-ref", "--verify", ref)
return err
}
// GetHeadRef returns the ref that is the current HEAD.
func (repo *GitRepo) GetHeadRef() (string, error) {
return repo.runGitCommand("symbolic-ref", "HEAD")
}
// GetCommitHash returns the hash of the commit pointed to by the given ref.
func (repo *GitRepo) GetCommitHash(ref string) (string, error) {
return repo.runGitCommand("show", "-s", "--format=%H", ref)
}
// ResolveRefCommit returns the commit pointed to by the given ref, which may be a remote ref.
//
// This differs from GetCommitHash which only works on exact matches, in that it will try to
// intelligently handle the scenario of a ref not existing locally, but being known to exist
// in a remote repo.
//
// This method should be used when a command may be performed by either the reviewer or the
// reviewee, while GetCommitHash should be used when the encompassing command should only be
// performed by the reviewee.
func (repo *GitRepo) ResolveRefCommit(ref string) (string, error) {
if err := repo.VerifyGitRef(ref); err == nil {
return repo.GetCommitHash(ref)
}
if strings.HasPrefix(ref, "refs/heads/") {
// The ref is a branch. Check if it exists in exactly one remote
pattern := strings.Replace(ref, "refs/heads", "**", 1)
matchingOutput, err := repo.runGitCommand("for-each-ref", "--format=%(refname)", pattern)
if err != nil {
return "", err
}
matchingRefs := strings.Split(matchingOutput, "\n")
if len(matchingRefs) == 1 && matchingRefs[0] != "" {
// There is exactly one match
return repo.GetCommitHash(matchingRefs[0])
}
return "", fmt.Errorf("Unable to find a git ref matching the pattern %q", pattern)
}
return "", fmt.Errorf("Unknown git ref %q", ref)
}
// GetCommitMessage returns the message stored in the commit pointed to by the given ref.
func (repo *GitRepo) GetCommitMessage(ref string) (string, error) {
return repo.runGitCommand("show", "-s", "--format=%B", ref)
}
// GetCommitTime returns the commit time of the commit pointed to by the given ref.
func (repo *GitRepo) GetCommitTime(ref string) (string, error) {
return repo.runGitCommand("show", "-s", "--format=%ct", ref)
}
// GetLastParent returns the last parent of the given commit (as ordered by git).
func (repo *GitRepo) GetLastParent(ref string) (string, error) {
return repo.runGitCommand("rev-list", "--skip", "1", "-n", "1", ref)
}
// GetCommitDetails returns the details of a commit's metadata.
func (repo GitRepo) GetCommitDetails(ref string) (*CommitDetails, error) {
var err error
show := func(formatString string) (result string) {
if err != nil {
return ""
}
result, err = repo.runGitCommand("show", "-s", ref, fmt.Sprintf("--format=tformat:%s", formatString))
return result
}
jsonFormatString := "{\"tree\":\"%T\", \"time\": \"%at\"}"
detailsJSON := show(jsonFormatString)
if err != nil {
return nil, err
}
var details CommitDetails
err = json.Unmarshal([]byte(detailsJSON), &details)
if err != nil {
return nil, err
}
details.Author = show("%an")
details.AuthorEmail = show("%ae")
details.Summary = show("%s")
parentsString := show("%P")
details.Parents = strings.Split(parentsString, " ")
if err != nil {
return nil, err
}
return &details, nil
}
// MergeBase determines if the first commit that is an ancestor of the two arguments.
func (repo *GitRepo) MergeBase(a, b string) (string, error) {
return repo.runGitCommand("merge-base", a, b)
}
// IsAncestor determines if the first argument points to a commit that is an ancestor of the second.
func (repo *GitRepo) IsAncestor(ancestor, descendant string) (bool, error) {
_, err := repo.runGitCommand("merge-base", "--is-ancestor", ancestor, descendant)
if err == nil {
return true, nil
}
if _, ok := err.(*exec.ExitError); ok {
return false, nil
}
return false, fmt.Errorf("Error while trying to determine commit ancestry: %v", err)
}
// Diff computes the diff between two given commits.
func (repo *GitRepo) Diff(left, right string, diffArgs ...string) (string, error) {
args := []string{"diff"}
args = append(args, diffArgs...)
args = append(args, fmt.Sprintf("%s..%s", left, right))
return repo.runGitCommand(args...)
}
// Show returns the contents of the given file at the given commit.
func (repo *GitRepo) Show(commit, path string) (string, error) {
return repo.runGitCommand("show", fmt.Sprintf("%s:%s", commit, path))
}
// SwitchToRef changes the currently-checked-out ref.
func (repo *GitRepo) SwitchToRef(ref string) error {
// If the ref starts with "refs/heads/", then we have to trim that prefix,
// or else we will wind up in a detached HEAD state.
if strings.HasPrefix(ref, branchRefPrefix) {
ref = ref[len(branchRefPrefix):]
}
_, err := repo.runGitCommand("checkout", ref)
return err
}
// MergeRef merges the given ref into the current one.
//
// The ref argument is the ref to merge, and fastForward indicates that the
// current ref should only move forward, as opposed to creating a bubble merge.
// The messages argument(s) provide text that should be included in the default
// merge commit message (separated by blank lines).
func (repo *GitRepo) MergeRef(ref string, fastForward bool, messages ...string) error {
args := []string{"merge"}
if fastForward {
args = append(args, "--ff", "--ff-only")
} else {
args = append(args, "--no-ff")
}
if len(messages) > 0 {
commitMessage := strings.Join(messages, "\n\n")
args = append(args, "-e", "-m", commitMessage)
}
args = append(args, ref)
return repo.runGitCommandInline(args...)
}
// RebaseRef rebases the given ref into the current one.
func (repo *GitRepo) RebaseRef(ref string) error {
return repo.runGitCommandInline("rebase", "-i", ref)
}
// ListCommitsBetween returns the list of commits between the two given revisions.
//
// The "from" parameter is the starting point (exclusive), and the "to" parameter
// is the ending point (inclusive). If the commit pointed to by the "from" parameter
// is not an ancestor of the commit pointed to by the "to" parameter, then the
// merge base of the two is used as the starting point.
//
// The generated list is in chronological order (with the oldest commit first).
func (repo *GitRepo) ListCommitsBetween(from, to string) ([]string, error) {
out, err := repo.runGitCommand("rev-list", "--reverse", "--ancestry-path", from+".."+to)
if err != nil {
return nil, err
}
if out == "" {
return nil, nil
}
return strings.Split(out, "\n"), nil
}
// GetNotes uses the "git" command-line tool to read the notes from the given ref for a given revision.
func (repo *GitRepo) GetNotes(notesRef, revision string) []Note {
var notes []Note
rawNotes, err := repo.runGitCommand("notes", "--ref", notesRef, "show", revision)
if err != nil {
// We just assume that this means there are no notes
return nil
}
for _, line := range strings.Split(rawNotes, "\n") {
notes = append(notes, Note([]byte(line)))
}
return notes
}
// AppendNote appends a note to a revision under the given ref.
func (repo *GitRepo) AppendNote(notesRef, revision string, note Note) error {
_, err := repo.runGitCommand("notes", "--ref", notesRef, "append", "-m", string(note), revision)
return err
}
// ListNotedRevisions returns the collection of revisions that are annotated by notes in the given ref.
func (repo *GitRepo) ListNotedRevisions(notesRef string) []string {
var revisions []string
notesListOut, err := repo.runGitCommand("notes", "--ref", notesRef, "list")
if err != nil {
return nil
}
notesList := strings.Split(notesListOut, "\n")
for _, notePair := range notesList {
noteParts := strings.SplitN(notePair, " ", 2)
if len(noteParts) == 2 {
objHash := noteParts[1]
objType, err := repo.runGitCommand("cat-file", "-t", objHash)
// If a note points to an object that we do not know about (yet), then err will not
// be nil. We can safely just ignore those notes.
if err == nil && objType == "commit" {
revisions = append(revisions, objHash)
}
}
}
return revisions
}
// PushNotes pushes git notes to a remote repo.
func (repo *GitRepo) PushNotes(remote, notesRefPattern string) error {
refspec := fmt.Sprintf("%s:%s", notesRefPattern, notesRefPattern)
// The push is liable to fail if the user forgot to do a pull first, so
// we treat errors as user errors rather than fatal errors.
err := repo.runGitCommandInline("push", remote, refspec)
if err != nil {
return fmt.Errorf("Failed to push to the remote '%s': %v", remote, err)
}
return nil
}
func getRemoteNotesRef(remote, localNotesRef string) string {
relativeNotesRef := strings.TrimPrefix(localNotesRef, "refs/notes/")
return "refs/notes/" + remote + "/" + relativeNotesRef
}
// PullNotes fetches the contents of the given notes ref from a remote repo,
// and then merges them with the corresponding local notes using the
// "cat_sort_uniq" strategy.
func (repo *GitRepo) PullNotes(remote, notesRefPattern string) error {
remoteNotesRefPattern := getRemoteNotesRef(remote, notesRefPattern)
fetchRefSpec := fmt.Sprintf("+%s:%s", notesRefPattern, remoteNotesRefPattern)
err := repo.runGitCommandInline("fetch", remote, fetchRefSpec)
if err != nil {
return err
}
remoteRefs, err := repo.runGitCommand("ls-remote", remote, notesRefPattern)
if err != nil {
return err
}
for _, line := range strings.Split(remoteRefs, "\n") {
lineParts := strings.Split(line, "\t")
if len(lineParts) == 2 {
ref := lineParts[1]
remoteRef := getRemoteNotesRef(remote, ref)
_, err := repo.runGitCommand("notes", "--ref", ref, "merge", remoteRef, "-s", "cat_sort_uniq")
if err != nil {
return err
}
}
}
return nil
}
Submitting review 2c9bff89f0f8
Improve the error messages returned when a git command fails.
Previously, we were simply cascading the error returned by the instance
of exec.Command. However, that winds up just being something of the form
"exit status 128", with all of the real error message going to the
Stderr field.
As such, this commit changes the behavior to save the data written to
stderr, and use it to construct a new error to return.
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package repository contains helper methods for working with the Git repo.
package repository
import (
"bytes"
"crypto/sha1"
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
)
const branchRefPrefix = "refs/heads/"
// GitRepo represents an instance of a (local) git repository.
type GitRepo struct {
Path string
}
// Run the given git command and return its stdout, or an error if the command fails.
func (repo *GitRepo) runGitCommandRaw(args ...string) (string, string, error) {
cmd := exec.Command("git", args...)
cmd.Dir = repo.Path
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err
}
// Run the given git command and return its stdout, or an error if the command fails.
func (repo *GitRepo) runGitCommand(args ...string) (string, error) {
stdout, stderr, err := repo.runGitCommandRaw(args...)
if err != nil {
err = fmt.Errorf(stderr)
}
return stdout, err
}
// Run the given git command using the same stdin, stdout, and stderr as the review tool.
func (repo *GitRepo) runGitCommandInline(args ...string) error {
cmd := exec.Command("git", args...)
cmd.Dir = repo.Path
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// NewGitRepo determines if the given working directory is inside of a git repository,
// and returns the corresponding GitRepo instance if it is.
func NewGitRepo(path string) (*GitRepo, error) {
repo := &GitRepo{Path: path}
_, _, err := repo.runGitCommandRaw("rev-parse")
if err == nil {
return repo, nil
}
if _, ok := err.(*exec.ExitError); ok {
return nil, err
}
return nil, err
}
// GetPath returns the path to the repo.
func (repo *GitRepo) GetPath() string {
return repo.Path
}
// GetRepoStateHash returns a hash which embodies the entire current state of a repository.
func (repo *GitRepo) GetRepoStateHash() (string, error) {
stateSummary, error := repo.runGitCommand("show-ref")
return fmt.Sprintf("%x", sha1.Sum([]byte(stateSummary))), error
}
// GetUserEmail returns the email address that the user has used to configure git.
func (repo *GitRepo) GetUserEmail() (string, error) {
return repo.runGitCommand("config", "user.email")
}
// HasUncommittedChanges returns true if there are local, uncommitted changes.
func (repo *GitRepo) HasUncommittedChanges() (bool, error) {
out, err := repo.runGitCommand("status", "--porcelain")
if err != nil {
return false, err
}
if len(out) > 0 {
return true, nil
}
return false, nil
}
// VerifyCommit verifies that the supplied hash points to a known commit.
func (repo *GitRepo) VerifyCommit(hash string) error {
out, err := repo.runGitCommand("cat-file", "-t", hash)
if err != nil {
return err
}
objectType := strings.TrimSpace(string(out))
if objectType != "commit" {
return fmt.Errorf("Hash %q points to a non-commit object of type %q", hash, objectType)
}
return nil
}
// VerifyGitRef verifies that the supplied ref points to a known commit.
func (repo *GitRepo) VerifyGitRef(ref string) error {
_, err := repo.runGitCommand("show-ref", "--verify", ref)
return err
}
// GetHeadRef returns the ref that is the current HEAD.
func (repo *GitRepo) GetHeadRef() (string, error) {
return repo.runGitCommand("symbolic-ref", "HEAD")
}
// GetCommitHash returns the hash of the commit pointed to by the given ref.
func (repo *GitRepo) GetCommitHash(ref string) (string, error) {
return repo.runGitCommand("show", "-s", "--format=%H", ref)
}
// ResolveRefCommit returns the commit pointed to by the given ref, which may be a remote ref.
//
// This differs from GetCommitHash which only works on exact matches, in that it will try to
// intelligently handle the scenario of a ref not existing locally, but being known to exist
// in a remote repo.
//
// This method should be used when a command may be performed by either the reviewer or the
// reviewee, while GetCommitHash should be used when the encompassing command should only be
// performed by the reviewee.
func (repo *GitRepo) ResolveRefCommit(ref string) (string, error) {
if err := repo.VerifyGitRef(ref); err == nil {
return repo.GetCommitHash(ref)
}
if strings.HasPrefix(ref, "refs/heads/") {
// The ref is a branch. Check if it exists in exactly one remote
pattern := strings.Replace(ref, "refs/heads", "**", 1)
matchingOutput, err := repo.runGitCommand("for-each-ref", "--format=%(refname)", pattern)
if err != nil {
return "", err
}
matchingRefs := strings.Split(matchingOutput, "\n")
if len(matchingRefs) == 1 && matchingRefs[0] != "" {
// There is exactly one match
return repo.GetCommitHash(matchingRefs[0])
}
return "", fmt.Errorf("Unable to find a git ref matching the pattern %q", pattern)
}
return "", fmt.Errorf("Unknown git ref %q", ref)
}
// GetCommitMessage returns the message stored in the commit pointed to by the given ref.
func (repo *GitRepo) GetCommitMessage(ref string) (string, error) {
return repo.runGitCommand("show", "-s", "--format=%B", ref)
}
// GetCommitTime returns the commit time of the commit pointed to by the given ref.
func (repo *GitRepo) GetCommitTime(ref string) (string, error) {
return repo.runGitCommand("show", "-s", "--format=%ct", ref)
}
// GetLastParent returns the last parent of the given commit (as ordered by git).
func (repo *GitRepo) GetLastParent(ref string) (string, error) {
return repo.runGitCommand("rev-list", "--skip", "1", "-n", "1", ref)
}
// GetCommitDetails returns the details of a commit's metadata.
func (repo GitRepo) GetCommitDetails(ref string) (*CommitDetails, error) {
var err error
show := func(formatString string) (result string) {
if err != nil {
return ""
}
result, err = repo.runGitCommand("show", "-s", ref, fmt.Sprintf("--format=tformat:%s", formatString))
return result
}
jsonFormatString := "{\"tree\":\"%T\", \"time\": \"%at\"}"
detailsJSON := show(jsonFormatString)
if err != nil {
return nil, err
}
var details CommitDetails
err = json.Unmarshal([]byte(detailsJSON), &details)
if err != nil {
return nil, err
}
details.Author = show("%an")
details.AuthorEmail = show("%ae")
details.Summary = show("%s")
parentsString := show("%P")
details.Parents = strings.Split(parentsString, " ")
if err != nil {
return nil, err
}
return &details, nil
}
// MergeBase determines if the first commit that is an ancestor of the two arguments.
func (repo *GitRepo) MergeBase(a, b string) (string, error) {
return repo.runGitCommand("merge-base", a, b)
}
// IsAncestor determines if the first argument points to a commit that is an ancestor of the second.
func (repo *GitRepo) IsAncestor(ancestor, descendant string) (bool, error) {
_, _, err := repo.runGitCommandRaw("merge-base", "--is-ancestor", ancestor, descendant)
if err == nil {
return true, nil
}
if _, ok := err.(*exec.ExitError); ok {
return false, nil
}
return false, fmt.Errorf("Error while trying to determine commit ancestry: %v", err)
}
// Diff computes the diff between two given commits.
func (repo *GitRepo) Diff(left, right string, diffArgs ...string) (string, error) {
args := []string{"diff"}
args = append(args, diffArgs...)
args = append(args, fmt.Sprintf("%s..%s", left, right))
return repo.runGitCommand(args...)
}
// Show returns the contents of the given file at the given commit.
func (repo *GitRepo) Show(commit, path string) (string, error) {
return repo.runGitCommand("show", fmt.Sprintf("%s:%s", commit, path))
}
// SwitchToRef changes the currently-checked-out ref.
func (repo *GitRepo) SwitchToRef(ref string) error {
// If the ref starts with "refs/heads/", then we have to trim that prefix,
// or else we will wind up in a detached HEAD state.
if strings.HasPrefix(ref, branchRefPrefix) {
ref = ref[len(branchRefPrefix):]
}
_, err := repo.runGitCommand("checkout", ref)
return err
}
// MergeRef merges the given ref into the current one.
//
// The ref argument is the ref to merge, and fastForward indicates that the
// current ref should only move forward, as opposed to creating a bubble merge.
// The messages argument(s) provide text that should be included in the default
// merge commit message (separated by blank lines).
func (repo *GitRepo) MergeRef(ref string, fastForward bool, messages ...string) error {
args := []string{"merge"}
if fastForward {
args = append(args, "--ff", "--ff-only")
} else {
args = append(args, "--no-ff")
}
if len(messages) > 0 {
commitMessage := strings.Join(messages, "\n\n")
args = append(args, "-e", "-m", commitMessage)
}
args = append(args, ref)
return repo.runGitCommandInline(args...)
}
// RebaseRef rebases the given ref into the current one.
func (repo *GitRepo) RebaseRef(ref string) error {
return repo.runGitCommandInline("rebase", "-i", ref)
}
// ListCommitsBetween returns the list of commits between the two given revisions.
//
// The "from" parameter is the starting point (exclusive), and the "to" parameter
// is the ending point (inclusive). If the commit pointed to by the "from" parameter
// is not an ancestor of the commit pointed to by the "to" parameter, then the
// merge base of the two is used as the starting point.
//
// The generated list is in chronological order (with the oldest commit first).
func (repo *GitRepo) ListCommitsBetween(from, to string) ([]string, error) {
out, err := repo.runGitCommand("rev-list", "--reverse", "--ancestry-path", from+".."+to)
if err != nil {
return nil, err
}
if out == "" {
return nil, nil
}
return strings.Split(out, "\n"), nil
}
// GetNotes uses the "git" command-line tool to read the notes from the given ref for a given revision.
func (repo *GitRepo) GetNotes(notesRef, revision string) []Note {
var notes []Note
rawNotes, err := repo.runGitCommand("notes", "--ref", notesRef, "show", revision)
if err != nil {
// We just assume that this means there are no notes
return nil
}
for _, line := range strings.Split(rawNotes, "\n") {
notes = append(notes, Note([]byte(line)))
}
return notes
}
// AppendNote appends a note to a revision under the given ref.
func (repo *GitRepo) AppendNote(notesRef, revision string, note Note) error {
_, err := repo.runGitCommand("notes", "--ref", notesRef, "append", "-m", string(note), revision)
return err
}
// ListNotedRevisions returns the collection of revisions that are annotated by notes in the given ref.
func (repo *GitRepo) ListNotedRevisions(notesRef string) []string {
var revisions []string
notesListOut, err := repo.runGitCommand("notes", "--ref", notesRef, "list")
if err != nil {
return nil
}
notesList := strings.Split(notesListOut, "\n")
for _, notePair := range notesList {
noteParts := strings.SplitN(notePair, " ", 2)
if len(noteParts) == 2 {
objHash := noteParts[1]
objType, err := repo.runGitCommand("cat-file", "-t", objHash)
// If a note points to an object that we do not know about (yet), then err will not
// be nil. We can safely just ignore those notes.
if err == nil && objType == "commit" {
revisions = append(revisions, objHash)
}
}
}
return revisions
}
// PushNotes pushes git notes to a remote repo.
func (repo *GitRepo) PushNotes(remote, notesRefPattern string) error {
refspec := fmt.Sprintf("%s:%s", notesRefPattern, notesRefPattern)
// The push is liable to fail if the user forgot to do a pull first, so
// we treat errors as user errors rather than fatal errors.
err := repo.runGitCommandInline("push", remote, refspec)
if err != nil {
return fmt.Errorf("Failed to push to the remote '%s': %v", remote, err)
}
return nil
}
func getRemoteNotesRef(remote, localNotesRef string) string {
relativeNotesRef := strings.TrimPrefix(localNotesRef, "refs/notes/")
return "refs/notes/" + remote + "/" + relativeNotesRef
}
// PullNotes fetches the contents of the given notes ref from a remote repo,
// and then merges them with the corresponding local notes using the
// "cat_sort_uniq" strategy.
func (repo *GitRepo) PullNotes(remote, notesRefPattern string) error {
remoteNotesRefPattern := getRemoteNotesRef(remote, notesRefPattern)
fetchRefSpec := fmt.Sprintf("+%s:%s", notesRefPattern, remoteNotesRefPattern)
err := repo.runGitCommandInline("fetch", remote, fetchRefSpec)
if err != nil {
return err
}
remoteRefs, err := repo.runGitCommand("ls-remote", remote, notesRefPattern)
if err != nil {
return err
}
for _, line := range strings.Split(remoteRefs, "\n") {
lineParts := strings.Split(line, "\t")
if len(lineParts) == 2 {
ref := lineParts[1]
remoteRef := getRemoteNotesRef(remote, ref)
_, err := repo.runGitCommand("notes", "--ref", ref, "merge", remoteRef, "-s", "cat_sort_uniq")
if err != nil {
return err
}
}
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.