CombinedText stringlengths 4 3.42M |
|---|
package api
import (
"bytes"
"code.google.com/p/goauth2/oauth"
"fmt"
"github.com/dgrijalva/jwt-go"
"github.com/tsuru/config"
"launchpad.net/gocheck"
"net/http"
"net/http/httptest"
"os"
)
func GetGoogleOAuthCode(oauthConfig *oauth.Config) (string, error) {
code := os.Getenv("GOOGLE_OAUTH_CODE")
if code == "" {
url := oauthConfig.AuthCodeURL("")
return "", fmt.Errorf("Visit this URL (%s) to get a code, then put it in an environment variable called GOOGLE_OAUTH_CODE.\n", url)
}
return code, nil
}
func (s *Suite) TestAuthenticateWithGoogle(c *gocheck.C) {
loadConfig("../testdata/etc/featness-api1.conf")
clientId := os.Getenv("GOOGLE_CLIENT_ID")
if clientId == "" {
c.Skip("Please put your google oauth app client id in an environment variable called GOOGLE_CLIENT_ID.\n")
}
secret := os.Getenv("GOOGLE_CLIENT_SECRET")
if secret == "" {
c.Skip("Please put your google oauth app client secret in an environment variable called GOOGLE_CLIENT_SECRET.\n")
}
config.Set("google_client_id", clientId)
config.Set("google_client_secret", secret)
config.Set("google_token_cache_path", "/tmp/cache.json")
oauthConfig, err := GetGoogleOAuthConfig()
if err != nil {
c.Skip(err.Error())
}
code, err := GetGoogleOAuthCode(oauthConfig)
if err != nil {
c.Skip(err.Error())
return
}
recorder := httptest.NewRecorder()
request, err := http.NewRequest("GET", "/authenticate/google", nil)
c.Assert(err, gocheck.IsNil)
request.Header.Add("X-Auth-Data", fmt.Sprintf("heynemann@gmail.com;%s", code))
AuthenticateWithGoogle(recorder, request)
c.Assert(recorder.Code, gocheck.Equals, http.StatusOK)
header, ok := recorder.HeaderMap["X-Auth-Token"]
c.Assert(ok, gocheck.Equals, true)
c.Assert(header, gocheck.NotNil)
buf := new(bytes.Buffer)
buf.Write([]byte("my-security-key"))
key := buf.Bytes()
token, err := jwt.Parse(header[0], func(t *jwt.Token) ([]byte, error) { return key, nil })
c.Assert(token, gocheck.NotNil)
c.Assert(token.Valid, gocheck.Equals, true)
c.Assert(token.Claims["token"], gocheck.NotNil)
c.Assert(token.Claims["sub"], gocheck.Equals, "heynemann@gmail.com")
c.Assert(token.Claims["iss"], gocheck.Equals, "Google")
c.Assert(token.Claims["iat"], gocheck.NotNil)
c.Assert(token.Claims["exp"], gocheck.NotNil)
}
Changed gocheck to ginkgo
package api
import (
"bytes"
"code.google.com/p/goauth2/oauth"
"fmt"
"github.com/dgrijalva/jwt-go"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tsuru/config"
"net/http"
"net/http/httptest"
"os"
)
func GetGoogleOAuthCode(oauthConfig *oauth.Config) (string, error) {
code := os.Getenv("GOOGLE_OAUTH_CODE")
if code == "" {
url := oauthConfig.AuthCodeURL("")
return "", fmt.Errorf("Visit this URL (%s) to get a code, then put it in an environment variable called GOOGLE_OAUTH_CODE.\n", url)
}
return code, nil
}
var _ = Describe("API google authenticate Module", func() {
Context("when GoogleAuthenticationProvider is called", func() {
It("should have generate token", func() {
loadConfig("../testdata/etc/featness-api1.conf")
clientId := os.Getenv("GOOGLE_CLIENT_ID")
if clientId == "" {
fmt.Println("Please put your google oauth app client id in an environment variable called GOOGLE_CLIENT_ID.\n")
}
secret := os.Getenv("GOOGLE_CLIENT_SECRET")
if secret == "" {
fmt.Println("Please put your google oauth app client secret in an environment variable called GOOGLE_CLIENT_SECRET.\n")
}
config.Set("google_client_id", clientId)
config.Set("google_client_secret", secret)
config.Set("google_token_cache_path", "/tmp/cache.json")
oauthConfig, err := GetGoogleOAuthConfig()
if err != nil {
fmt.Println(err.Error())
}
code, err := GetGoogleOAuthCode(oauthConfig)
if err != nil {
fmt.Println(err.Error())
return
}
recorder := httptest.NewRecorder()
request, err := http.NewRequest("GET", "/authenticate/google", nil)
Expect(err).Should(BeNil())
request.Header.Add("X-Auth-Data", fmt.Sprintf("heynemann@gmail.com;%s", code))
AuthenticateWithGoogle(recorder, request)
Expect(recorder.Code).Should(Equal(200))
header, ok := recorder.HeaderMap["X-Auth-Token"]
Expect(ok).Should(Equal(true))
Expect(header).ShouldNot(BeNil())
buf := new(bytes.Buffer)
buf.Write([]byte("my-security-key"))
key := buf.Bytes()
token, err := jwt.Parse(header[0], func(t *jwt.Token) ([]byte, error) { return key, nil })
Expect(token).ShouldNot(BeNil())
Expect(token.Valid).Should(Equal(true))
Expect(token.Claims["token"]).ShouldNot(BeNil())
Expect(token.Claims["sub"]).Should(Equal("heynemann@gmail.com"))
Expect(token.Claims["iss"]).Should(Equal("Google"))
Expect(token.Claims["iat"]).ShouldNot(BeNil())
Expect(token.Claims["exp"]).ShouldNot(BeNil())
})
})
})
|
// Copyright © 2017 The Things Network
// Use of this source code is governed by the MIT license that can be found in the LICENSE file.
package monitor
import (
"context"
"io"
"sync/atomic"
"github.com/TheThingsNetwork/go-utils/log"
"github.com/TheThingsNetwork/ttn/api"
"github.com/TheThingsNetwork/ttn/api/broker"
"github.com/TheThingsNetwork/ttn/api/fields"
"github.com/TheThingsNetwork/ttn/api/gateway"
"github.com/TheThingsNetwork/ttn/api/router"
"github.com/TheThingsNetwork/ttn/utils/errors"
"github.com/golang/protobuf/ptypes/empty"
)
// NewReferenceMonitorServer creates a new reference monitor server
func NewReferenceMonitorServer(bufferSize int) *ReferenceMonitorServer {
fields.Debug = true
s := &ReferenceMonitorServer{
ctx: log.Get(),
gatewayStatuses: make(chan *gateway.Status, bufferSize),
uplinkMessages: make(chan *router.UplinkMessage, bufferSize),
downlinkMessages: make(chan *router.DownlinkMessage, bufferSize),
brokerUplinkMessages: make(chan *broker.DeduplicatedUplinkMessage, bufferSize),
brokerDownlinkMessages: make(chan *broker.DownlinkMessage, bufferSize),
handlerUplinkMessages: make(chan *broker.DeduplicatedUplinkMessage, bufferSize),
handlerDownlinkMessages: make(chan *broker.DownlinkMessage, bufferSize),
}
for i := 0; i < bufferSize; i++ {
go func() {
for {
select {
case <-s.gatewayStatuses:
atomic.AddUint64(&s.metrics.gatewayStatuses, 1)
case <-s.uplinkMessages:
atomic.AddUint64(&s.metrics.uplinkMessages, 1)
case <-s.downlinkMessages:
atomic.AddUint64(&s.metrics.downlinkMessages, 1)
case <-s.brokerUplinkMessages:
atomic.AddUint64(&s.metrics.brokerUplinkMessages, 1)
case <-s.brokerDownlinkMessages:
atomic.AddUint64(&s.metrics.brokerDownlinkMessages, 1)
case <-s.handlerUplinkMessages:
atomic.AddUint64(&s.metrics.handlerUplinkMessages, 1)
case <-s.handlerDownlinkMessages:
atomic.AddUint64(&s.metrics.handlerDownlinkMessages, 1)
}
}
}()
}
return s
}
type metrics struct {
gatewayStatuses uint64
uplinkMessages uint64
downlinkMessages uint64
brokerUplinkMessages uint64
brokerDownlinkMessages uint64
handlerUplinkMessages uint64
handlerDownlinkMessages uint64
}
// ReferenceMonitorServer is a new reference monitor server
type ReferenceMonitorServer struct {
ctx log.Interface
gatewayStatuses chan *gateway.Status
uplinkMessages chan *router.UplinkMessage
downlinkMessages chan *router.DownlinkMessage
brokerUplinkMessages chan *broker.DeduplicatedUplinkMessage
brokerDownlinkMessages chan *broker.DownlinkMessage
handlerUplinkMessages chan *broker.DeduplicatedUplinkMessage
handlerDownlinkMessages chan *broker.DownlinkMessage
metrics metrics
}
func (s *ReferenceMonitorServer) getAndAuthGateway(ctx context.Context) (string, error) {
id, err := api.IDFromContext(ctx)
if err != nil {
return "", err
}
token, err := api.TokenFromContext(ctx)
if err != nil {
return "", err
}
// Actually validate token here, if failed: return nil, grpc.Errorf(codes.Unauthenticated, "Gateway Authentication Failed")
s.ctx.WithFields(log.Fields{"ID": id, "Token": token}).Info("Gateway Authenticated")
return id, nil
}
// GatewayStatus RPC
func (s *ReferenceMonitorServer) GatewayStatus(stream Monitor_GatewayStatusServer) (err error) {
gatewayID, err := s.getAndAuthGateway(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("GatewayID", gatewayID)
ctx.Info("GatewayStatus stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("GatewayStatus stream ended")
} else {
ctx.Info("GatewayStatus stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
ctx.WithFields(fields.Get(msg)).Info("Received GatewayStatus")
select {
case s.gatewayStatuses <- msg:
default:
ctx.Warn("Dropping Status")
}
}
}
// GatewayUplink RPC
func (s *ReferenceMonitorServer) GatewayUplink(stream Monitor_GatewayUplinkServer) error {
gatewayID, err := s.getAndAuthGateway(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("GatewayID", gatewayID)
ctx.Info("GatewayUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("GatewayUplink stream ended")
} else {
ctx.Info("GatewayUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
ctx.WithFields(fields.Get(msg)).Info("Received UplinkMessage")
select {
case s.uplinkMessages <- msg:
default:
ctx.Warn("Dropping UplinkMessage")
}
}
}
// GatewayDownlink RPC
func (s *ReferenceMonitorServer) GatewayDownlink(stream Monitor_GatewayDownlinkServer) error {
gatewayID, err := s.getAndAuthGateway(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("GatewayID", gatewayID)
ctx.Info("GatewayDownlink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("GatewayDownlink stream ended")
} else {
ctx.Info("GatewayDownlink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
ctx.WithFields(fields.Get(msg)).Info("Received DownlinkMessage")
select {
case s.downlinkMessages <- msg:
default:
ctx.Warn("Dropping DownlinkMessage")
}
}
}
func (s *ReferenceMonitorServer) getAndAuthBroker(ctx context.Context) (string, error) {
id, err := api.IDFromContext(ctx)
if err != nil {
return "", err
}
token, err := api.TokenFromContext(ctx)
if err != nil {
return "", err
}
// Actually validate token here, if failed: return nil, grpc.Errorf(codes.Unauthenticated, "Broker Authentication Failed")
s.ctx.WithFields(log.Fields{"ID": id, "Token": token}).Info("Broker Authenticated")
return id, nil
}
// BrokerUplink RPC
func (s *ReferenceMonitorServer) BrokerUplink(stream Monitor_BrokerUplinkServer) error {
brokerID, err := s.getAndAuthBroker(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("BrokerID", brokerID)
ctx.Info("BrokerUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("BrokerUplink stream ended")
} else {
ctx.Info("BrokerUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
ctx.WithFields(fields.Get(msg)).Info("Received DeduplicatedUplinkMessage")
select {
case s.brokerUplinkMessages <- msg:
default:
ctx.Warn("Dropping DeduplicatedUplinkMessage")
}
}
}
// BrokerDownlink RPC
func (s *ReferenceMonitorServer) BrokerDownlink(stream Monitor_BrokerDownlinkServer) error {
brokerID, err := s.getAndAuthBroker(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("BrokerID", brokerID)
ctx.Info("BrokerUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("BrokerUplink stream ended")
} else {
ctx.Info("BrokerUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
ctx.WithFields(fields.Get(msg)).Info("Received DownlinkMessage")
select {
case s.brokerDownlinkMessages <- msg:
default:
ctx.Warn("Dropping DownlinkMessage")
}
}
}
func (s *ReferenceMonitorServer) getAndAuthHandler(ctx context.Context) (string, error) {
id, err := api.IDFromContext(ctx)
if err != nil {
return "", err
}
token, err := api.TokenFromContext(ctx)
if err != nil {
return "", err
}
// Actually validate token here, if failed: return nil, grpc.Errorf(codes.Unauthenticated, "Handler Authentication Failed")
s.ctx.WithFields(log.Fields{"ID": id, "Token": token}).Info("Handler Authenticated")
return id, nil
}
// HandlerUplink RPC
func (s *ReferenceMonitorServer) HandlerUplink(stream Monitor_HandlerUplinkServer) error {
handlerID, err := s.getAndAuthHandler(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("HandlerID", handlerID)
ctx.Info("HandlerUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("HandlerUplink stream ended")
} else {
ctx.Info("HandlerUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
ctx.WithFields(fields.Get(msg)).Info("Received DeduplicatedUplinkMessage")
select {
case s.handlerUplinkMessages <- msg:
default:
ctx.Warn("Dropping DeduplicatedUplinkMessage")
}
}
}
// HandlerDownlink RPC
func (s *ReferenceMonitorServer) HandlerDownlink(stream Monitor_HandlerDownlinkServer) error {
handlerID, err := s.getAndAuthHandler(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("HandlerID", handlerID)
ctx.Info("HandlerUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("HandlerUplink stream ended")
} else {
ctx.Info("HandlerUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
ctx.WithFields(fields.Get(msg)).Info("Received DownlinkMessage")
select {
case s.handlerDownlinkMessages <- msg:
default:
ctx.Warn("Dropping DownlinkMessage")
}
}
}
Distinguish join and regular message in ref monitor
// Copyright © 2017 The Things Network
// Use of this source code is governed by the MIT license that can be found in the LICENSE file.
package monitor
import (
"context"
"io"
"sync/atomic"
"github.com/TheThingsNetwork/go-utils/log"
"github.com/TheThingsNetwork/ttn/api"
"github.com/TheThingsNetwork/ttn/api/broker"
"github.com/TheThingsNetwork/ttn/api/fields"
"github.com/TheThingsNetwork/ttn/api/gateway"
"github.com/TheThingsNetwork/ttn/api/router"
"github.com/TheThingsNetwork/ttn/utils/errors"
"github.com/golang/protobuf/ptypes/empty"
)
// NewReferenceMonitorServer creates a new reference monitor server
func NewReferenceMonitorServer(bufferSize int) *ReferenceMonitorServer {
fields.Debug = true
s := &ReferenceMonitorServer{
ctx: log.Get(),
gatewayStatuses: make(chan *gateway.Status, bufferSize),
uplinkMessages: make(chan *router.UplinkMessage, bufferSize),
downlinkMessages: make(chan *router.DownlinkMessage, bufferSize),
brokerUplinkMessages: make(chan *broker.DeduplicatedUplinkMessage, bufferSize),
brokerDownlinkMessages: make(chan *broker.DownlinkMessage, bufferSize),
handlerUplinkMessages: make(chan *broker.DeduplicatedUplinkMessage, bufferSize),
handlerDownlinkMessages: make(chan *broker.DownlinkMessage, bufferSize),
}
for i := 0; i < bufferSize; i++ {
go func() {
for {
select {
case <-s.gatewayStatuses:
atomic.AddUint64(&s.metrics.gatewayStatuses, 1)
case <-s.uplinkMessages:
atomic.AddUint64(&s.metrics.uplinkMessages, 1)
case <-s.downlinkMessages:
atomic.AddUint64(&s.metrics.downlinkMessages, 1)
case <-s.brokerUplinkMessages:
atomic.AddUint64(&s.metrics.brokerUplinkMessages, 1)
case <-s.brokerDownlinkMessages:
atomic.AddUint64(&s.metrics.brokerDownlinkMessages, 1)
case <-s.handlerUplinkMessages:
atomic.AddUint64(&s.metrics.handlerUplinkMessages, 1)
case <-s.handlerDownlinkMessages:
atomic.AddUint64(&s.metrics.handlerDownlinkMessages, 1)
}
}
}()
}
return s
}
type metrics struct {
gatewayStatuses uint64
uplinkMessages uint64
downlinkMessages uint64
brokerUplinkMessages uint64
brokerDownlinkMessages uint64
handlerUplinkMessages uint64
handlerDownlinkMessages uint64
}
// ReferenceMonitorServer is a new reference monitor server
type ReferenceMonitorServer struct {
ctx log.Interface
gatewayStatuses chan *gateway.Status
uplinkMessages chan *router.UplinkMessage
downlinkMessages chan *router.DownlinkMessage
brokerUplinkMessages chan *broker.DeduplicatedUplinkMessage
brokerDownlinkMessages chan *broker.DownlinkMessage
handlerUplinkMessages chan *broker.DeduplicatedUplinkMessage
handlerDownlinkMessages chan *broker.DownlinkMessage
metrics metrics
}
func (s *ReferenceMonitorServer) getAndAuthGateway(ctx context.Context) (string, error) {
id, err := api.IDFromContext(ctx)
if err != nil {
return "", err
}
token, err := api.TokenFromContext(ctx)
if err != nil {
return "", err
}
// Actually validate token here, if failed: return nil, grpc.Errorf(codes.Unauthenticated, "Gateway Authentication Failed")
s.ctx.WithFields(log.Fields{"ID": id, "Token": token}).Info("Gateway Authenticated")
return id, nil
}
// GatewayStatus RPC
func (s *ReferenceMonitorServer) GatewayStatus(stream Monitor_GatewayStatusServer) (err error) {
gatewayID, err := s.getAndAuthGateway(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("GatewayID", gatewayID)
ctx.Info("GatewayStatus stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("GatewayStatus stream ended")
} else {
ctx.Info("GatewayStatus stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
ctx.WithFields(fields.Get(msg)).Info("Received GatewayStatus")
select {
case s.gatewayStatuses <- msg:
default:
ctx.Warn("Dropping Status")
}
}
}
// GatewayUplink RPC
func (s *ReferenceMonitorServer) GatewayUplink(stream Monitor_GatewayUplinkServer) error {
gatewayID, err := s.getAndAuthGateway(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("GatewayID", gatewayID)
ctx.Info("GatewayUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("GatewayUplink stream ended")
} else {
ctx.Info("GatewayUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
msg.UnmarshalPayload()
if msg.GetMessage().GetLorawan().GetJoinRequestPayload() != nil {
ctx.WithFields(fields.Get(msg)).Info("Received ActivationRequest")
} else {
ctx.WithFields(fields.Get(msg)).Info("Received UplinkMessage")
}
select {
case s.uplinkMessages <- msg:
default:
ctx.Warn("Dropping UplinkMessage")
}
}
}
// GatewayDownlink RPC
func (s *ReferenceMonitorServer) GatewayDownlink(stream Monitor_GatewayDownlinkServer) error {
gatewayID, err := s.getAndAuthGateway(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("GatewayID", gatewayID)
ctx.Info("GatewayDownlink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("GatewayDownlink stream ended")
} else {
ctx.Info("GatewayDownlink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
msg.UnmarshalPayload()
if msg.GetMessage().GetLorawan().GetJoinAcceptPayload() != nil {
ctx.WithFields(fields.Get(msg)).Info("Received ActivationResponse")
} else {
ctx.WithFields(fields.Get(msg)).Info("Received DownlinkMessage")
}
select {
case s.downlinkMessages <- msg:
default:
ctx.Warn("Dropping DownlinkMessage")
}
}
}
func (s *ReferenceMonitorServer) getAndAuthBroker(ctx context.Context) (string, error) {
id, err := api.IDFromContext(ctx)
if err != nil {
return "", err
}
token, err := api.TokenFromContext(ctx)
if err != nil {
return "", err
}
// Actually validate token here, if failed: return nil, grpc.Errorf(codes.Unauthenticated, "Broker Authentication Failed")
s.ctx.WithFields(log.Fields{"ID": id, "Token": token}).Info("Broker Authenticated")
return id, nil
}
// BrokerUplink RPC
func (s *ReferenceMonitorServer) BrokerUplink(stream Monitor_BrokerUplinkServer) error {
brokerID, err := s.getAndAuthBroker(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("BrokerID", brokerID)
ctx.Info("BrokerUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("BrokerUplink stream ended")
} else {
ctx.Info("BrokerUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
msg.UnmarshalPayload()
if msg.GetMessage().GetLorawan().GetJoinRequestPayload() != nil {
ctx.WithFields(fields.Get(msg)).Info("Received DeduplicatedActivationRequest")
} else {
ctx.WithFields(fields.Get(msg)).Info("Received DeduplicatedUplinkMessage")
}
select {
case s.brokerUplinkMessages <- msg:
default:
ctx.Warn("Dropping DeduplicatedUplinkMessage")
}
}
}
// BrokerDownlink RPC
func (s *ReferenceMonitorServer) BrokerDownlink(stream Monitor_BrokerDownlinkServer) error {
brokerID, err := s.getAndAuthBroker(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("BrokerID", brokerID)
ctx.Info("BrokerUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("BrokerUplink stream ended")
} else {
ctx.Info("BrokerUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
msg.UnmarshalPayload()
if msg.GetMessage().GetLorawan().GetJoinAcceptPayload() != nil {
ctx.WithFields(fields.Get(msg)).Info("Received ActivationResponse")
} else {
ctx.WithFields(fields.Get(msg)).Info("Received DownlinkMessage")
}
select {
case s.brokerDownlinkMessages <- msg:
default:
ctx.Warn("Dropping DownlinkMessage")
}
}
}
func (s *ReferenceMonitorServer) getAndAuthHandler(ctx context.Context) (string, error) {
id, err := api.IDFromContext(ctx)
if err != nil {
return "", err
}
token, err := api.TokenFromContext(ctx)
if err != nil {
return "", err
}
// Actually validate token here, if failed: return nil, grpc.Errorf(codes.Unauthenticated, "Handler Authentication Failed")
s.ctx.WithFields(log.Fields{"ID": id, "Token": token}).Info("Handler Authenticated")
return id, nil
}
// HandlerUplink RPC
func (s *ReferenceMonitorServer) HandlerUplink(stream Monitor_HandlerUplinkServer) error {
handlerID, err := s.getAndAuthHandler(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("HandlerID", handlerID)
ctx.Info("HandlerUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("HandlerUplink stream ended")
} else {
ctx.Info("HandlerUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
msg.UnmarshalPayload()
if msg.GetMessage().GetLorawan().GetJoinRequestPayload() != nil {
ctx.WithFields(fields.Get(msg)).Info("Received DeduplicatedActivationRequest")
} else {
ctx.WithFields(fields.Get(msg)).Info("Received DeduplicatedUplinkMessage")
}
select {
case s.handlerUplinkMessages <- msg:
default:
ctx.Warn("Dropping DeduplicatedUplinkMessage")
}
}
}
// HandlerDownlink RPC
func (s *ReferenceMonitorServer) HandlerDownlink(stream Monitor_HandlerDownlinkServer) error {
handlerID, err := s.getAndAuthHandler(stream.Context())
if err != nil {
return errors.NewErrPermissionDenied(err.Error())
}
ctx := s.ctx.WithField("HandlerID", handlerID)
ctx.Info("HandlerUplink stream started")
defer func() {
if err != nil {
ctx.WithError(err).Info("HandlerUplink stream ended")
} else {
ctx.Info("HandlerUplink stream ended")
}
}()
var streamErr atomic.Value
go func() {
<-stream.Context().Done()
streamErr.Store(stream.Context().Err())
}()
for {
streamErr := streamErr.Load()
if streamErr != nil {
return streamErr.(error)
}
msg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&empty.Empty{})
}
if err != nil {
return err
}
msg.UnmarshalPayload()
if msg.GetMessage().GetLorawan().GetJoinAcceptPayload() != nil {
ctx.WithFields(fields.Get(msg)).Info("Received ActivationResponse")
} else {
ctx.WithFields(fields.Get(msg)).Info("Received DownlinkMessage")
}
select {
case s.handlerDownlinkMessages <- msg:
default:
ctx.Warn("Dropping DownlinkMessage")
}
}
}
|
package bootstrap
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/buildkite/agent/agent/plugin"
"github.com/buildkite/agent/bootstrap/shell"
"github.com/buildkite/agent/env"
"github.com/buildkite/agent/experiments"
"github.com/buildkite/agent/process"
"github.com/buildkite/agent/retry"
"github.com/buildkite/shellwords"
"github.com/pkg/errors"
)
// Bootstrap represents the phases of execution in a Buildkite Job. It's run
// as a sub-process of the buildkite-agent and finishes at the conclusion of a job.
// Historically (prior to v3) the bootstrap was a shell script, but was ported to
// Golang for portability and testability
type Bootstrap struct {
// Config provides the bootstrap configuration
Config
// Shell is the shell environment for the bootstrap
shell *shell.Shell
// Plugins to use
plugins []*plugin.Plugin
// Plugin checkouts from the plugin phases
pluginCheckouts []*pluginCheckout
// Directories to clean up at end of bootstrap
cleanupDirs []string
// A channel to track cancellation
cancelCh chan struct{}
}
// New returns a new Bootstrap instance
func New(conf Config) *Bootstrap {
return &Bootstrap{
Config: conf,
cancelCh: make(chan struct{}),
}
}
// Start runs the bootstrap and returns the exit code
func (b *Bootstrap) Run(ctx context.Context) (exitCode int) {
// Check if not nil to allow for tests to overwrite shell
if b.shell == nil {
var err error
b.shell, err = shell.NewWithContext(ctx)
if err != nil {
fmt.Printf("Error creating shell: %v", err)
return 1
}
b.shell.PTY = b.Config.RunInPty
b.shell.Debug = b.Config.Debug
}
// Listen for cancellation
go func() {
select {
case <-ctx.Done():
return
case <-b.cancelCh:
b.shell.Commentf("Received cancellation signal, interrupting")
b.shell.Interrupt()
}
}()
// Tear down the environment (and fire pre-exit hook) before we exit
defer func() {
if err := b.tearDown(); err != nil {
b.shell.Errorf("Error tearing down bootstrap: %v", err)
// this gets passed back via the named return
exitCode = shell.GetExitCode(err)
}
}()
// Initialize the environment, a failure here will still call the tearDown
if err := b.setUp(); err != nil {
b.shell.Errorf("Error setting up bootstrap: %v", err)
return shell.GetExitCode(err)
}
var includePhase = func(phase string) bool {
if len(b.Phases) == 0 {
return true
}
for _, include := range b.Phases {
if include == phase {
return true
}
}
return false
}
// Execute the bootstrap phases in order
var phaseErr error
if includePhase(`plugin`) {
phaseErr = b.PluginPhase()
}
if phaseErr == nil && includePhase(`checkout`) {
phaseErr = b.CheckoutPhase()
} else {
checkoutDir, exists := b.shell.Env.Get(`BUILDKITE_BUILD_CHECKOUT_PATH`)
if exists {
_ = b.shell.Chdir(checkoutDir)
}
}
if phaseErr == nil && includePhase(`plugin`) {
phaseErr = b.VendoredPluginPhase()
}
if phaseErr == nil && includePhase(`command`) {
phaseErr = b.CommandPhase()
// Only upload artifacts as part of the command phase
if err := b.uploadArtifacts(); err != nil {
b.shell.Errorf("%v", err)
return shell.GetExitCode(err)
}
}
// Phase errors are where something of ours broke that merits a big red error
// this won't include command failures, as we view that as more in the user space
if phaseErr != nil {
b.shell.Errorf("%v", phaseErr)
return shell.GetExitCode(phaseErr)
}
// Use the exit code from the command phase
exitStatus, _ := b.shell.Env.Get(`BUILDKITE_COMMAND_EXIT_STATUS`)
exitStatusCode, _ := strconv.Atoi(exitStatus)
return exitStatusCode
}
// Cancel interrupts any running shell processes and causes the bootstrap to stop
func (b *Bootstrap) Cancel() error {
b.cancelCh <- struct{}{}
return nil
}
// executeHook runs a hook script with the hookRunner
func (b *Bootstrap) executeHook(name string, hookPath string, extraEnviron *env.Environment) error {
if !fileExists(hookPath) {
if b.Debug {
b.shell.Commentf("Skipping %s hook, no script at \"%s\"", name, hookPath)
}
return nil
}
b.shell.Headerf("Running %s hook", name)
// We need a script to wrap the hook script so that we can snaffle the changed
// environment variables
script, err := newHookScriptWrapper(hookPath)
if err != nil {
b.shell.Errorf("Error creating hook script: %v", err)
return err
}
defer script.Close()
cleanHookPath := hookPath
// Show a relative path if we can
if strings.HasPrefix(hookPath, b.shell.Getwd()) {
var err error
if cleanHookPath, err = filepath.Rel(b.shell.Getwd(), hookPath); err != nil {
cleanHookPath = hookPath
}
}
// Show the hook runner in debug, but the thing being run otherwise 💅🏻
if b.Debug {
b.shell.Commentf("A hook runner was written to \"%s\" with the following:", script.Path())
b.shell.Promptf("%s", process.FormatCommand(script.Path(), nil))
} else {
b.shell.Promptf("%s", process.FormatCommand(cleanHookPath, []string{}))
}
// Run the wrapper script
if err := b.shell.RunScript(script.Path(), extraEnviron); err != nil {
exitCode := shell.GetExitCode(err)
b.shell.Env.Set("BUILDKITE_LAST_HOOK_EXIT_STATUS", fmt.Sprintf("%d", exitCode))
// Give a simpler error if it's just a shell exit error
if shell.IsExitError(err) {
return &shell.ExitError{
Code: exitCode,
Message: fmt.Sprintf("The %s hook exited with status %d", name, exitCode),
}
}
return err
}
// Store the last hook exit code for subsequent steps
b.shell.Env.Set("BUILDKITE_LAST_HOOK_EXIT_STATUS", "0")
// Get changed environment
changes, err := script.Changes()
if err != nil {
return errors.Wrapf(err, "Failed to get environment")
}
// Finally, apply changes to the current shell and config
b.applyEnvironmentChanges(changes.Env, changes.Dir)
return nil
}
func (b *Bootstrap) applyEnvironmentChanges(environ *env.Environment, dir string) {
if dir != b.shell.Getwd() {
_ = b.shell.Chdir(dir)
}
// Do we even have any environment variables to change?
if environ != nil && environ.Length() > 0 {
// First, let see any of the environment variables are supposed
// to change the bootstrap configuration at run time.
bootstrapConfigEnvChanges := b.Config.ReadFromEnvironment(environ)
// Print out the env vars that changed. As we go through each
// one, we'll determine if it was a special "bootstrap"
// environment variable that has changed the bootstrap
// configuration at runtime.
//
// If it's "special", we'll show the value it was changed to -
// otherwise we'll hide it. Since we don't know if an
// environment variable contains sensitive information (i.e.
// THIRD_PARTY_API_KEY) we'll just not show any values for
// anything not controlled by us.
for k, v := range environ.ToMap() {
_, ok := bootstrapConfigEnvChanges[k]
if ok {
b.shell.Commentf("%s is now %q", k, v)
} else {
b.shell.Commentf("%s changed", k)
}
}
// Now that we've finished telling the user what's changed,
// let's mutate the current shell environment to include all
// the new values.
b.shell.Env = b.shell.Env.Merge(environ)
}
}
// Returns the absolute path to the best matching hook file in a path, or os.ErrNotExist if none is found
func (b *Bootstrap) findHookFile(hookDir string, name string) (string, error) {
if runtime.GOOS == "windows" {
// check for windows types first
if p, err := shell.LookPath(name, hookDir, ".BAT;.CMD"); err == nil {
return p, nil
}
}
// otherwise chech for th default shell script
if p := filepath.Join(hookDir, name); fileExists(p) {
return p, nil
}
return "", os.ErrNotExist
}
func (b *Bootstrap) hasGlobalHook(name string) bool {
_, err := b.globalHookPath(name)
return err == nil
}
// Returns the absolute path to a global hook, or os.ErrNotExist if none is found
func (b *Bootstrap) globalHookPath(name string) (string, error) {
return b.findHookFile(b.HooksPath, name)
}
// Executes a global hook if one exists
func (b *Bootstrap) executeGlobalHook(name string) error {
if !b.hasGlobalHook(name) {
return nil
}
p, err := b.globalHookPath(name)
if err != nil {
return err
}
return b.executeHook("global "+name, p, nil)
}
// Returns the absolute path to a local hook, or os.ErrNotExist if none is found
func (b *Bootstrap) localHookPath(name string) (string, error) {
return b.findHookFile(filepath.Join(b.shell.Getwd(), ".buildkite", "hooks"), name)
}
func (b *Bootstrap) hasLocalHook(name string) bool {
_, err := b.localHookPath(name)
return err == nil
}
// Executes a local hook
func (b *Bootstrap) executeLocalHook(name string) error {
if !b.hasLocalHook(name) {
return nil
}
localHookPath, err := b.localHookPath(name)
if err != nil {
return nil
}
// For high-security configs, we allow the disabling of local hooks.
localHooksEnabled := b.Config.LocalHooksEnabled
// Allow hooks to disable local hooks by setting BUILDKITE_NO_LOCAL_HOOKS=true
noLocalHooks, _ := b.shell.Env.Get(`BUILDKITE_NO_LOCAL_HOOKS`)
if noLocalHooks == "true" || noLocalHooks == "1" {
localHooksEnabled = false
}
if !localHooksEnabled {
return fmt.Errorf("Refusing to run %s, local hooks are disabled", localHookPath)
}
return b.executeHook("local "+name, localHookPath, nil)
}
// Returns whether or not a file exists on the filesystem. We consider any
// error returned by os.Stat to indicate that the file doesn't exist. We could
// be specific and use os.IsNotExist(err), but most other errors also indicate
// that the file isn't there (or isn't available) so we'll just catch them all.
func fileExists(filename string) bool {
_, err := os.Stat(filename)
return err == nil
}
func dirForAgentName(agentName string) string {
badCharsPattern := regexp.MustCompile("[[:^alnum:]]")
return badCharsPattern.ReplaceAllString(agentName, "-")
}
func dirForRepository(repository string) string {
badCharsPattern := regexp.MustCompile("[[:^alnum:]]")
return badCharsPattern.ReplaceAllString(repository, "-")
}
// Given a repository, it will add the host to the set of SSH known_hosts on the machine
func addRepositoryHostToSSHKnownHosts(sh *shell.Shell, repository string) {
if fileExists(repository) {
return
}
knownHosts, err := findKnownHosts(sh)
if err != nil {
sh.Warningf("Failed to find SSH known_hosts file: %v", err)
return
}
if err = knownHosts.AddFromRepository(repository); err != nil {
sh.Warningf("Error adding to known_hosts: %v", err)
return
}
}
// Makes sure a file is executable
func addExecutePermissionToFile(filename string) error {
s, err := os.Stat(filename)
if err != nil {
return fmt.Errorf("Failed to retrieve file information of \"%s\" (%s)", filename, err)
}
if s.Mode()&0100 == 0 {
err = os.Chmod(filename, s.Mode()|0100)
if err != nil {
return fmt.Errorf("Failed to mark \"%s\" as executable (%s)", filename, err)
}
}
return nil
}
// setUp is run before all the phases run. It's responsible for initializing the
// bootstrap environment
func (b *Bootstrap) setUp() error {
// Create an empty env for us to keep track of our env changes in
b.shell.Env = env.FromSlice(os.Environ())
// Add the $BUILDKITE_BIN_PATH to the $PATH if we've been given one
if b.BinPath != "" {
path, _ := b.shell.Env.Get("PATH")
b.shell.Env.Set("PATH", fmt.Sprintf("%s%s%s", b.BinPath, string(os.PathListSeparator), path))
}
// Set a BUILDKITE_BUILD_CHECKOUT_PATH unless one exists already. We do this here
// so that the environment will have a checkout path to work with
if _, exists := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH"); !exists {
if b.BuildPath == "" {
return fmt.Errorf("Must set either a BUILDKITE_BUILD_PATH or a BUILDKITE_BUILD_CHECKOUT_PATH")
}
b.shell.Env.Set("BUILDKITE_BUILD_CHECKOUT_PATH",
filepath.Join(b.BuildPath, dirForAgentName(b.AgentName), b.OrganizationSlug, b.PipelineSlug))
}
// The job runner sets BUILDKITE_IGNORED_ENV with any keys that were ignored
// or overwritten. This shows a warning to the user so they don't get confused
// when their environment changes don't seem to do anything
if ignored, exists := b.shell.Env.Get("BUILDKITE_IGNORED_ENV"); exists {
b.shell.Headerf("Detected protected environment variables")
b.shell.Commentf("Your pipeline environment has protected environment variables set. " +
"These can only be set via hooks, plugins or the agent configuration.")
for _, env := range strings.Split(ignored, ",") {
b.shell.Warningf("Ignored %s", env)
}
b.shell.Printf("^^^ +++")
}
if b.Debug {
b.shell.Headerf("Buildkite environment variables")
for _, e := range b.shell.Env.ToSlice() {
if strings.HasPrefix(e, "BUILDKITE_AGENT_ACCESS_TOKEN=") {
b.shell.Printf("BUILDKITE_AGENT_ACCESS_TOKEN=******************")
} else if strings.HasPrefix(e, "BUILDKITE") || strings.HasPrefix(e, "CI") || strings.HasPrefix(e, "PATH") {
b.shell.Printf("%s", strings.Replace(e, "\n", "\\n", -1))
}
}
}
// Disable any interactive Git/SSH prompting
b.shell.Env.Set("GIT_TERMINAL_PROMPT", "0")
// It's important to do this before checking out plugins, in case you want
// to use the global environment hook to whitelist the plugins that are
// allowed to be used.
return b.executeGlobalHook("environment")
}
// tearDown is called before the bootstrap exits, even on error
func (b *Bootstrap) tearDown() error {
if err := b.executeGlobalHook("pre-exit"); err != nil {
return err
}
if err := b.executeLocalHook("pre-exit"); err != nil {
return err
}
if err := b.executePluginHook("pre-exit", b.pluginCheckouts); err != nil {
return err
}
// Support deprecated BUILDKITE_DOCKER* env vars
if hasDeprecatedDockerIntegration(b.shell) {
return tearDownDeprecatedDockerIntegration(b.shell)
}
for _, dir := range b.cleanupDirs {
if err := os.RemoveAll(dir); err != nil {
b.shell.Warningf("Failed to remove dir %s: %v", dir, err)
}
}
return nil
}
func (b *Bootstrap) hasPlugins() bool {
if b.Config.Plugins == "" {
return false
}
return true
}
func (b *Bootstrap) loadPlugins() ([]*plugin.Plugin, error) {
if b.plugins != nil {
return b.plugins, nil
}
// Check if we can run plugins (disabled via --no-plugins)
if !b.Config.PluginsEnabled {
if !b.Config.LocalHooksEnabled {
return nil, fmt.Errorf("Plugins have been disabled on this agent with `--no-local-hooks`")
} else if !b.Config.CommandEval {
return nil, fmt.Errorf("Plugins have been disabled on this agent with `--no-command-eval`")
} else {
return nil, fmt.Errorf("Plugins have been disabled on this agent with `--no-plugins`")
}
}
var err error
b.plugins, err = plugin.CreateFromJSON(b.Config.Plugins)
if err != nil {
return nil, errors.Wrap(err, "Failed to parse a plugin definition")
}
return b.plugins, nil
}
func (b *Bootstrap) validatePluginCheckout(checkout *pluginCheckout) error {
if !b.Config.PluginValidation {
return nil
}
if checkout.Definition == nil {
if b.Debug {
b.shell.Commentf("Parsing plugin definition for %s from %s", checkout.Plugin.Name(), checkout.CheckoutDir)
}
// parse the plugin definition from the plugin checkout dir
var err error
checkout.Definition, err = plugin.LoadDefinitionFromDir(checkout.CheckoutDir)
if err == plugin.ErrDefinitionNotFound {
b.shell.Warningf("Failed to find plugin definition for plugin %s", checkout.Plugin.Name())
return nil
} else if err != nil {
return err
}
}
val := &plugin.Validator{}
result := val.Validate(checkout.Definition, checkout.Plugin.Configuration)
if !result.Valid() {
b.shell.Headerf("Plugin validation failed for %q", checkout.Plugin.Name())
json, _ := json.Marshal(checkout.Plugin.Configuration)
b.shell.Commentf("Plugin configuration JSON is %s", json)
return result
}
b.shell.Commentf("Valid plugin configuration for %q", checkout.Plugin.Name())
return nil
}
// PluginPhase is where plugins that weren't filtered in the Environment phase are
// checked out and made available to later phases
func (b *Bootstrap) PluginPhase() error {
if !b.hasPlugins() {
return nil
}
b.shell.Headerf("Setting up plugins")
if b.Debug {
b.shell.Commentf("Plugin JSON is %s", b.Plugins)
}
plugins, err := b.loadPlugins()
if err != nil {
return err
}
checkouts := []*pluginCheckout{}
// Checkout and validate plugins that aren't vendored
for _, p := range plugins {
if p.Vendored {
if b.Debug {
b.shell.Commentf("Skipping vendored plugin %s", p.Name())
}
continue
}
checkout, err := b.checkoutPlugin(p)
if err != nil {
return errors.Wrapf(err, "Failed to checkout plugin %s", p.Name())
}
err = b.validatePluginCheckout(checkout)
if err != nil {
return err
}
checkouts = append(checkouts, checkout)
}
// Store the checkouts for future use
b.pluginCheckouts = checkouts
// Now we can run plugin environment hooks too
return b.executePluginHook("environment", checkouts)
}
// VendoredPluginPhase is where plugins that are included in the checked out code are added
func (b *Bootstrap) VendoredPluginPhase() error {
if !b.hasPlugins() {
return nil
}
b.shell.Headerf("Setting up vendored plugins")
plugins, err := b.loadPlugins()
if err != nil {
return err
}
vendoredCheckouts := []*pluginCheckout{}
// Validate vendored plugins
for _, p := range plugins {
if !p.Vendored {
continue
}
checkoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
pluginLocation, err := filepath.Abs(filepath.Join(checkoutPath, p.Location))
if err != nil {
return errors.Wrapf(err, "Failed to resolve vendored plugin path for plugin %s", p.Name())
}
if !fileExists(pluginLocation) {
return fmt.Errorf("Vendored plugin path %s doesn't exist", p.Location)
}
checkout := &pluginCheckout{
Plugin: p,
CheckoutDir: pluginLocation,
HooksDir: filepath.Join(pluginLocation, "hooks"),
}
// Also make sure that plugin is withing this repository
// checkout and isn't elsewhere on the system.
if !strings.HasPrefix(pluginLocation, checkoutPath+string(os.PathSeparator)) {
return fmt.Errorf("Vendored plugin paths must be within the checked-out repository")
}
err = b.validatePluginCheckout(checkout)
if err != nil {
return err
}
vendoredCheckouts = append(vendoredCheckouts, checkout)
}
// Finally append our vendored checkouts to the rest for subsequent hooks
b.pluginCheckouts = append(b.pluginCheckouts, vendoredCheckouts...)
// Now we can run plugin environment hooks too
return b.executePluginHook("environment", vendoredCheckouts)
}
// Executes a named hook on plugins that have it
func (b *Bootstrap) executePluginHook(name string, checkouts []*pluginCheckout) error {
for _, p := range checkouts {
hookPath, err := b.findHookFile(p.HooksDir, name)
if err != nil {
continue
}
env, _ := p.ConfigurationToEnvironment()
if err := b.executeHook("plugin "+p.Plugin.Name()+" "+name, hookPath, env); err != nil {
return err
}
}
return nil
}
// If any plugin has a hook by this name
func (b *Bootstrap) hasPluginHook(name string) bool {
for _, p := range b.pluginCheckouts {
if _, err := b.findHookFile(p.HooksDir, name); err == nil {
return true
}
}
return false
}
// Checkout a given plugin to the plugins directory and return that directory
func (b *Bootstrap) checkoutPlugin(p *plugin.Plugin) (*pluginCheckout, error) {
// Make sure we have a plugin path before trying to do anything
if b.PluginsPath == "" {
return nil, fmt.Errorf("Can't checkout plugin without a `plugins-path`")
}
// Get the identifer for the plugin
id, err := p.Identifier()
if err != nil {
return nil, err
}
// Ensure the plugin directory exists, otherwise we can't create the lock
err = os.MkdirAll(b.PluginsPath, 0777)
if err != nil {
return nil, err
}
// Try and lock this particular plugin while we check it out (we create
// the file outside of the plugin directory so git clone doesn't have
// a cry about the directory not being empty)
pluginCheckoutHook, err := b.shell.LockFile(filepath.Join(b.PluginsPath, id+".lock"), time.Minute*5)
if err != nil {
return nil, err
}
defer pluginCheckoutHook.Unlock()
// Create a path to the plugin
directory := filepath.Join(b.PluginsPath, id)
pluginGitDirectory := filepath.Join(directory, ".git")
checkout := &pluginCheckout{
Plugin: p,
CheckoutDir: directory,
HooksDir: filepath.Join(directory, "hooks"),
}
// Has it already been checked out?
if fileExists(pluginGitDirectory) {
// It'd be nice to show the current commit of the plugin, so
// let's figure that out.
headCommit, err := gitRevParseInWorkingDirectory(b.shell, directory, "--short=7", "HEAD")
if err != nil {
b.shell.Commentf("Plugin %q already checked out (can't `git rev-parse HEAD` plugin git directory)", p.Label())
} else {
b.shell.Commentf("Plugin %q already checked out (%s)", p.Label(), strings.TrimSpace(headCommit))
}
return checkout, nil
}
// Make the directory
err = os.MkdirAll(directory, 0777)
if err != nil {
return nil, err
}
// Once we've got the lock, we need to make sure another process didn't already
// checkout the plugin
if fileExists(pluginGitDirectory) {
b.shell.Commentf("Plugin \"%s\" already checked out", p.Label())
return checkout, nil
}
repo, err := p.Repository()
if err != nil {
return nil, err
}
b.shell.Commentf("Plugin \"%s\" will be checked out to \"%s\"", p.Location, directory)
if b.Debug {
b.shell.Commentf("Checking if \"%s\" is a local repository", repo)
}
// Switch to the plugin directory
previousWd := b.shell.Getwd()
if err = b.shell.Chdir(directory); err != nil {
return nil, err
}
// Switch back to the previous working directory
defer b.shell.Chdir(previousWd)
b.shell.Commentf("Switching to the plugin directory")
if b.SSHKeyscan {
addRepositoryHostToSSHKnownHosts(b.shell, repo)
}
// Plugin clones shouldn't use custom GitCloneFlags
if err = b.shell.Run("git", "clone", "-v", "--", repo, "."); err != nil {
return nil, err
}
// Switch to the version if we need to
if p.Version != "" {
b.shell.Commentf("Checking out `%s`", p.Version)
if err = b.shell.Run("git", "checkout", "-f", p.Version); err != nil {
return nil, err
}
}
return checkout, nil
}
func (b *Bootstrap) removeCheckoutDir() error {
checkoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
// on windows, sometimes removing large dirs can fail for various reasons
// for instance having files open
// see https://github.com/golang/go/issues/20841
for i := 0; i < 10; i++ {
b.shell.Commentf("Removing %s", checkoutPath)
if err := os.RemoveAll(checkoutPath); err != nil {
b.shell.Errorf("Failed to remove \"%s\" (%s)", checkoutPath, err)
} else {
if _, err := os.Stat(checkoutPath); os.IsNotExist(err) {
return nil
} else {
b.shell.Errorf("Failed to remove %s", checkoutPath)
}
}
b.shell.Commentf("Waiting 10 seconds")
<-time.After(time.Second * 10)
}
return fmt.Errorf("Failed to remove %s", checkoutPath)
}
func (b *Bootstrap) createCheckoutDir() error {
checkoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
if !fileExists(checkoutPath) {
b.shell.Commentf("Creating \"%s\"", checkoutPath)
if err := os.MkdirAll(checkoutPath, 0777); err != nil {
return err
}
}
if b.shell.Getwd() != checkoutPath {
if err := b.shell.Chdir(checkoutPath); err != nil {
return err
}
}
return nil
}
// CheckoutPhase creates the build directory and makes sure we're running the
// build at the right commit.
func (b *Bootstrap) CheckoutPhase() error {
if err := b.executeGlobalHook("pre-checkout"); err != nil {
return err
}
if err := b.executePluginHook("pre-checkout", b.pluginCheckouts); err != nil {
return err
}
// Remove the checkout directory if BUILDKITE_CLEAN_CHECKOUT is present
if b.CleanCheckout {
b.shell.Headerf("Cleaning pipeline checkout")
if err := b.removeCheckoutDir(); err != nil {
return err
}
}
b.shell.Headerf("Preparing working directory")
// If we have a blank repository then use a temp dir for builds
if b.Config.Repository == "" {
buildDir, err := ioutil.TempDir("", "buildkite-job-"+b.Config.JobID)
if err != nil {
return err
}
b.shell.Env.Set(`BUILDKITE_BUILD_CHECKOUT_PATH`, buildDir)
// Track the directory so we can remove it at the end of the bootstrap
b.cleanupDirs = append(b.cleanupDirs, buildDir)
}
// Make sure the build directory exists
if err := b.createCheckoutDir(); err != nil {
return err
}
// There can only be one checkout hook, either plugin or global, in that order
switch {
case b.hasPluginHook("checkout"):
if err := b.executePluginHook("checkout", b.pluginCheckouts); err != nil {
return err
}
case b.hasGlobalHook("checkout"):
if err := b.executeGlobalHook("checkout"); err != nil {
return err
}
default:
if b.Config.Repository != "" {
err := retry.Do(func(s *retry.Stats) error {
err := b.defaultCheckoutPhase()
if err == nil {
return nil
}
switch {
case shell.IsExitError(err) && shell.GetExitCode(err) == -1:
b.shell.Warningf("Checkout was interrupted by a signal")
s.Break()
case errors.Cause(err) == context.Canceled:
b.shell.Warningf("Checkout was cancelled")
s.Break()
default:
b.shell.Warningf("Checkout failed! %s (%s)", err, s)
// Checkout can fail because of corrupted files in the checkout
// which can leave the agent in a state where it keeps failing
// This removes the checkout dir, which means the next checkout
// will be a lot slower (clone vs fetch), but hopefully will
// allow the agent to self-heal
_ = b.removeCheckoutDir()
}
return err
}, &retry.Config{Maximum: 3, Interval: 2 * time.Second})
if err != nil {
return err
}
} else {
b.shell.Commentf("Skipping checkout, BUILDKITE_REPO is empty")
}
}
// Store the current value of BUILDKITE_BUILD_CHECKOUT_PATH, so we can detect if
// one of the post-checkout hooks changed it.
previousCheckoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
// Run post-checkout hooks
if err := b.executeGlobalHook("post-checkout"); err != nil {
return err
}
if err := b.executeLocalHook("post-checkout"); err != nil {
return err
}
if err := b.executePluginHook("post-checkout", b.pluginCheckouts); err != nil {
return err
}
// Capture the new checkout path so we can see if it's changed.
newCheckoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
// If the working directory has been changed by a hook, log and switch to it
if previousCheckoutPath != "" && previousCheckoutPath != newCheckoutPath {
b.shell.Headerf("A post-checkout hook has changed the working directory to \"%s\"", newCheckoutPath)
if err := b.shell.Chdir(newCheckoutPath); err != nil {
return err
}
}
return nil
}
func hasGitSubmodules(sh *shell.Shell) bool {
return fileExists(filepath.Join(sh.Getwd(), ".gitmodules"))
}
// gitMirrorRepository either creates or update the git mirror repository used as a reference later
func (b *Bootstrap) gitMirrorRepository() (string, error) {
path := filepath.Join(b.Config.GitMirrorsPath, dirForRepository(b.Repository))
// Create the base dir if it doesn't exist
if baseDir := filepath.Dir(path); !fileExists(baseDir) {
b.shell.Commentf("Creating \"%s\"", baseDir)
if err := os.MkdirAll(baseDir, 0777); err != nil {
return "", err
}
}
// Try and lock the repository dir to prevent concurrent clones
repoDirLock, err := b.shell.LockFile(path+".lock", time.Minute*5)
if err != nil {
return "", err
}
defer repoDirLock.Unlock()
if !fileExists(path) {
b.shell.Commentf("Cloning a mirror of the repository to %s", path)
if err := gitCloneMirror(b.shell, b.GitCloneFlags, b.Repository, path); err != nil {
return "", err
}
} else {
b.shell.Commentf("Updating existing repository mirror")
// Update the the origin of the repository so we can gracefully handle repository renames
if err := b.shell.Run("git", "--git-dir", path, "remote", "set-url", "origin", b.Repository); err != nil {
return "", err
}
// Update our mirror
if err := b.shell.Run("git", "--git-dir", path, "remote", "update", "--prune"); err != nil {
return "", err
}
}
return path, nil
}
// defaultCheckoutPhase is called by the CheckoutPhase if no global or plugin checkout
// hook exists. It performs the default checkout on the Repository provided in the config
func (b *Bootstrap) defaultCheckoutPhase() error {
var mirrorDir string
// Make sure the build directory exists and that we change directory into it
if err := b.createCheckoutDir(); err != nil {
return err
}
// If we can, get a mirror of the git repository to use for reference later
if experiments.IsEnabled(`git-mirrors`) && b.Config.GitMirrorsPath != "" && b.Config.Repository != "" {
var err error
mirrorDir, err = b.gitMirrorRepository()
if err != nil {
return err
}
}
if b.SSHKeyscan {
addRepositoryHostToSSHKnownHosts(b.shell, b.Repository)
}
gitCloneFlags := b.GitCloneFlags
if mirrorDir != "" {
gitCloneFlags += fmt.Sprintf(" --reference %q", mirrorDir)
}
// Does the git directory exist?
existingGitDir := filepath.Join(b.shell.Getwd(), ".git")
if fileExists(existingGitDir) {
// Update the the origin of the repository so we can gracefully handle repository renames
if err := b.shell.Run("git", "remote", "set-url", "origin", b.Repository); err != nil {
return err
}
} else {
if err := gitClone(b.shell, gitCloneFlags, b.Repository, "."); err != nil {
return err
}
}
// Git clean prior to checkout
if hasGitSubmodules(b.shell) {
if err := gitCleanSubmodules(b.shell, b.GitCleanFlags); err != nil {
return err
}
}
if err := gitClean(b.shell, b.GitCleanFlags); err != nil {
return err
}
// If a refspec is provided then use it instead.
// i.e. `refs/not/a/head`
if b.RefSpec != "" {
b.shell.Commentf("Fetch and checkout custom refspec")
if err := gitFetch(b.shell, "-v --prune", "origin", b.RefSpec); err != nil {
return err
}
if err := b.shell.Run("git", "checkout", "-f", b.Commit); err != nil {
return err
}
// GitHub has a special ref which lets us fetch a pull request head, whether
// or not there is a current head in this repository or another which
// references the commit. We presume a commit sha is provided. See:
// https://help.github.com/articles/checking-out-pull-requests-locally/#modifying-an-inactive-pull-request-locally
} else if b.PullRequest != "false" && strings.Contains(b.PipelineProvider, "github") {
b.shell.Commentf("Fetch and checkout pull request head from GitHub")
refspec := fmt.Sprintf("refs/pull/%s/head", b.PullRequest)
if err := gitFetch(b.shell, "-v", "origin", refspec); err != nil {
return err
}
gitFetchHead, _ := b.shell.RunAndCapture("git", "rev-parse", "FETCH_HEAD")
b.shell.Commentf("FETCH_HEAD is now `%s`", gitFetchHead)
if err := b.shell.Run("git", "checkout", "-f", b.Commit); err != nil {
return err
}
// If the commit is "HEAD" then we can't do a commit-specific fetch and will
// need to fetch the remote head and checkout the fetched head explicitly.
} else if b.Commit == "HEAD" {
b.shell.Commentf("Fetch and checkout remote branch HEAD commit")
if err := gitFetch(b.shell, "-v --prune", "origin", b.Branch); err != nil {
return err
}
if err := b.shell.Run("git", "checkout", "-f", "FETCH_HEAD"); err != nil {
return err
}
// Otherwise fetch and checkout the commit directly. Some repositories don't
// support fetching a specific commit so we fall back to fetching all heads
// and tags, hoping that the commit is included.
} else {
if err := gitFetch(b.shell, "-v", "origin", b.Commit); err != nil {
// By default `git fetch origin` will only fetch tags which are
// reachable from a fetches branch. git 1.9.0+ changed `--tags` to
// fetch all tags in addition to the default refspec, but pre 1.9.0 it
// excludes the default refspec.
gitFetchRefspec, _ := b.shell.RunAndCapture("git", "config", "remote.origin.fetch")
if err := gitFetch(b.shell, "-v --prune", "origin", gitFetchRefspec, "+refs/tags/*:refs/tags/*"); err != nil {
return err
}
}
if err := b.shell.Run("git", "checkout", "-f", b.Commit); err != nil {
return err
}
}
var gitSubmodules bool
if !b.GitSubmodules && hasGitSubmodules(b.shell) {
b.shell.Warningf("This repository has submodules, but submodules are disabled at an agent level")
} else if b.GitSubmodules && hasGitSubmodules(b.shell) {
b.shell.Commentf("Git submodules detected")
gitSubmodules = true
}
if gitSubmodules {
// `submodule sync` will ensure the .git/config
// matches the .gitmodules file. The command
// is only available in git version 1.8.1, so
// if the call fails, continue the bootstrap
// script, and show an informative error.
if err := b.shell.Run("git", "submodule", "sync", "--recursive"); err != nil {
gitVersionOutput, _ := b.shell.RunAndCapture("git", "--version")
b.shell.Warningf("Failed to recursively sync git submodules. This is most likely because you have an older version of git installed (" + gitVersionOutput + ") and you need version 1.8.1 and above. If you're using submodules, it's highly recommended you upgrade if you can.")
}
// Checking for submodule repositories
submoduleRepos, err := gitEnumerateSubmoduleURLs(b.shell)
if err != nil {
b.shell.Warningf("Failed to enumerate git submodules: %v", err)
} else {
for idx, repository := range submoduleRepos {
// submodules might need their fingerprints verified too
if b.SSHKeyscan {
addRepositoryHostToSSHKnownHosts(b.shell, repository)
}
// if we have a git mirror, add the submodule to it
if mirrorDir != "" {
name := fmt.Sprintf("submodule%d", idx+1)
if err := b.shell.Run("git", "--git-dir", mirrorDir, "remote", "add", name, repository); err != nil {
return err
}
}
}
}
if mirrorDir != "" {
if err := b.shell.Run("git", "submodule", "update", "--init", "--recursive", "--force", "--reference", mirrorDir); err != nil {
return err
}
} else {
if err := b.shell.Run("git", "submodule", "update", "--init", "--recursive", "--force"); err != nil {
return err
}
}
if err := b.shell.Run("git", "submodule", "foreach", "--recursive", "git", "reset", "--hard"); err != nil {
return err
}
}
// Git clean after checkout. We need to do this because submodules could have
// changed in between the last checkout and this one. A double clean is the only
// good solution to this problem that we've found
b.shell.Commentf("Cleaning again to catch any post-checkout changes")
if err := gitClean(b.shell, b.GitCleanFlags); err != nil {
return err
}
if gitSubmodules {
if err := gitCleanSubmodules(b.shell, b.GitCleanFlags); err != nil {
return err
}
}
if _, hasToken := b.shell.Env.Get("BUILDKITE_AGENT_ACCESS_TOKEN"); !hasToken {
b.shell.Warningf("Skipping sending Git information to Buildkite as $BUILDKITE_AGENT_ACCESS_TOKEN is missing")
return nil
}
// Grab author and commit information and send
// it back to Buildkite. But before we do,
// we'll check to see if someone else has done
// it first.
b.shell.Commentf("Checking to see if Git data needs to be sent to Buildkite")
if err := b.shell.Run("buildkite-agent", "meta-data", "exists", "buildkite:git:commit"); err != nil {
b.shell.Commentf("Sending Git commit information back to Buildkite")
gitCommitOutput, err := b.shell.RunAndCapture("git", "--no-pager", "show", "HEAD", "-s", "--format=fuller", "--no-color")
if err != nil {
return err
}
if err = b.shell.Run("buildkite-agent", "meta-data", "set", "buildkite:git:commit", gitCommitOutput); err != nil {
return err
}
}
return nil
}
// CommandPhase determines how to run the build, and then runs it
func (b *Bootstrap) CommandPhase() error {
if err := b.executeGlobalHook("pre-command"); err != nil {
return err
}
if err := b.executeLocalHook("pre-command"); err != nil {
return err
}
if err := b.executePluginHook("pre-command", b.pluginCheckouts); err != nil {
return err
}
var commandExitError error
// There can only be one command hook, so we check them in order of plugin, local
switch {
case b.hasPluginHook("command"):
commandExitError = b.executePluginHook("command", b.pluginCheckouts)
case b.hasLocalHook("command"):
commandExitError = b.executeLocalHook("command")
case b.hasGlobalHook("command"):
commandExitError = b.executeGlobalHook("command")
default:
commandExitError = b.defaultCommandPhase()
}
// If the command returned an exit that wasn't a `exec.ExitError`
// (which is returned when the command is actually run, but fails),
// then we'll show it in the log.
if shell.IsExitError(commandExitError) {
if shell.IsExitSignaled(commandExitError) {
b.shell.Errorf("The command was interrupted by a signal")
} else {
b.shell.Errorf("The command exited with status %d", shell.GetExitCode(commandExitError))
}
} else if commandExitError != nil {
b.shell.Errorf(commandExitError.Error())
}
// Expand the command header if the command fails for any reason
if commandExitError != nil {
b.shell.Printf("^^^ +++")
}
// Save the command exit status to the env so hooks + plugins can access it. If there is no error
// this will be zero. It's used to set the exit code later, so it's important
b.shell.Env.Set("BUILDKITE_COMMAND_EXIT_STATUS", fmt.Sprintf("%d", shell.GetExitCode(commandExitError)))
// Run post-command hooks
if err := b.executeGlobalHook("post-command"); err != nil {
return err
}
if err := b.executeLocalHook("post-command"); err != nil {
return err
}
if err := b.executePluginHook("post-command", b.pluginCheckouts); err != nil {
return err
}
return nil
}
// defaultCommandPhase is executed if there is no global or plugin command hook
func (b *Bootstrap) defaultCommandPhase() error {
// Make sure we actually have a command to run
if strings.TrimSpace(b.Command) == "" {
return fmt.Errorf("No command has been provided")
}
scriptFileName := strings.Replace(b.Command, "\n", "", -1)
pathToCommand, err := filepath.Abs(filepath.Join(b.shell.Getwd(), scriptFileName))
commandIsScript := err == nil && fileExists(pathToCommand)
// If the command isn't a script, then it's something we need
// to eval. But before we even try running it, we should double
// check that the agent is allowed to eval commands.
if !commandIsScript && !b.CommandEval {
b.shell.Commentf("No such file: \"%s\"", scriptFileName)
return fmt.Errorf("This agent is not allowed to evaluate console commands. To allow this, re-run this agent without the `--no-command-eval` option, or specify a script within your repository to run instead (such as scripts/test.sh).")
}
// Also make sure that the script we've resolved is definitely within this
// repository checkout and isn't elsewhere on the system.
if commandIsScript && !b.CommandEval && !strings.HasPrefix(pathToCommand, b.shell.Getwd()+string(os.PathSeparator)) {
b.shell.Commentf("No such file: \"%s\"", scriptFileName)
return fmt.Errorf("This agent is only allowed to run scripts within your repository. To allow this, re-run this agent without the `--no-command-eval` option, or specify a script within your repository to run instead (such as scripts/test.sh).")
}
var cmdToExec string
// The shell gets parsed based on the operating system
shell, err := shellwords.Split(b.Shell)
if err != nil {
return fmt.Errorf("Failed to split shell (%q) into tokens: %v", b.Shell, err)
}
if len(shell) == 0 {
return fmt.Errorf("No shell set for bootstrap")
}
// Windows CMD.EXE is horrible and can't handle newline delimited commands. We write
// a batch script so that it works, but we don't like it
if strings.ToUpper(filepath.Base(shell[0])) == `CMD.EXE` {
batchScript, err := b.writeBatchScript(b.Command)
if err != nil {
return err
}
defer os.Remove(batchScript)
b.shell.Headerf("Running batch script")
if b.Debug {
contents, err := ioutil.ReadFile(batchScript)
if err != nil {
return err
}
b.shell.Commentf("Wrote batch script %s\n%s", batchScript, contents)
}
cmdToExec = batchScript
} else if commandIsScript {
// Make script executable
if err = addExecutePermissionToFile(pathToCommand); err != nil {
b.shell.Warningf("Error marking script %q as executable: %v", pathToCommand, err)
return err
}
// Make the path relative to the shell working dir
scriptPath, err := filepath.Rel(b.shell.Getwd(), pathToCommand)
if err != nil {
return err
}
b.shell.Headerf("Running script")
cmdToExec = fmt.Sprintf(".%c%s", os.PathSeparator, scriptPath)
} else {
b.shell.Headerf("Running commands")
cmdToExec = b.Command
}
// Support deprecated BUILDKITE_DOCKER* env vars
if hasDeprecatedDockerIntegration(b.shell) {
if b.Debug {
b.shell.Commentf("Detected deprecated docker environment variables")
}
return runDeprecatedDockerIntegration(b.shell, []string{cmdToExec})
}
var cmd []string
cmd = append(cmd, shell...)
cmd = append(cmd, cmdToExec)
if b.Debug {
b.shell.Promptf("%s", process.FormatCommand(cmd[0], cmd[1:]))
} else {
b.shell.Promptf("%s", cmdToExec)
}
return b.shell.RunWithoutPrompt(cmd[0], cmd[1:]...)
}
func (b *Bootstrap) writeBatchScript(cmd string) (string, error) {
scriptFile, err := shell.TempFileWithExtension(
`buildkite-script.bat`,
)
if err != nil {
return "", err
}
defer scriptFile.Close()
var scriptContents = "@echo off\n"
for _, line := range strings.Split(cmd, "\n") {
if line != "" {
scriptContents += line + "\n" + "if %errorlevel% neq 0 exit /b %errorlevel%\n"
}
}
_, err = io.WriteString(scriptFile, scriptContents)
if err != nil {
return "", err
}
return scriptFile.Name(), nil
}
func (b *Bootstrap) uploadArtifacts() error {
if b.AutomaticArtifactUploadPaths == "" {
return nil
}
// Run pre-artifact hooks
if err := b.executeGlobalHook("pre-artifact"); err != nil {
return err
}
if err := b.executeLocalHook("pre-artifact"); err != nil {
return err
}
if err := b.executePluginHook("pre-artifact", b.pluginCheckouts); err != nil {
return err
}
// Run the artifact upload command
b.shell.Headerf("Uploading artifacts")
args := []string{"artifact", "upload", b.AutomaticArtifactUploadPaths}
// If blank, the upload destination is buildkite
if b.ArtifactUploadDestination != "" {
b.shell.Commentf("Using default artifact upload destination")
args = append(args, b.ArtifactUploadDestination)
}
if err := b.shell.Run("buildkite-agent", args...); err != nil {
return err
}
// Run post-artifact hooks
if err := b.executeGlobalHook("post-artifact"); err != nil {
return err
}
if err := b.executeLocalHook("post-artifact"); err != nil {
return err
}
if err := b.executePluginHook("post-artifact", b.pluginCheckouts); err != nil {
return err
}
return nil
}
// Check for ignored env variables from the job runner. Some
// env (e.g BUILDKITE_BUILD_PATH) can only be set from config or by hooks.
// If these env are set at a pipeline level, we rewrite them to BUILDKITE_X_BUILD_PATH
// and warn on them here so that users know what is going on
func (b *Bootstrap) ignoredEnv() []string {
var ignored []string
for _, env := range os.Environ() {
if strings.HasPrefix(env, `BUILDKITE_X_`) {
ignored = append(ignored, fmt.Sprintf("BUILDKITE_%s",
strings.TrimPrefix(env, `BUILDKITE_X_`)))
}
}
return ignored
}
type pluginCheckout struct {
*plugin.Plugin
*plugin.Definition
CheckoutDir string
HooksDir string
}
Re-arrange output when using git-mirrors
package bootstrap
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/buildkite/agent/agent/plugin"
"github.com/buildkite/agent/bootstrap/shell"
"github.com/buildkite/agent/env"
"github.com/buildkite/agent/experiments"
"github.com/buildkite/agent/process"
"github.com/buildkite/agent/retry"
"github.com/buildkite/shellwords"
"github.com/pkg/errors"
)
// Bootstrap represents the phases of execution in a Buildkite Job. It's run
// as a sub-process of the buildkite-agent and finishes at the conclusion of a job.
// Historically (prior to v3) the bootstrap was a shell script, but was ported to
// Golang for portability and testability
type Bootstrap struct {
// Config provides the bootstrap configuration
Config
// Shell is the shell environment for the bootstrap
shell *shell.Shell
// Plugins to use
plugins []*plugin.Plugin
// Plugin checkouts from the plugin phases
pluginCheckouts []*pluginCheckout
// Directories to clean up at end of bootstrap
cleanupDirs []string
// A channel to track cancellation
cancelCh chan struct{}
}
// New returns a new Bootstrap instance
func New(conf Config) *Bootstrap {
return &Bootstrap{
Config: conf,
cancelCh: make(chan struct{}),
}
}
// Start runs the bootstrap and returns the exit code
func (b *Bootstrap) Run(ctx context.Context) (exitCode int) {
// Check if not nil to allow for tests to overwrite shell
if b.shell == nil {
var err error
b.shell, err = shell.NewWithContext(ctx)
if err != nil {
fmt.Printf("Error creating shell: %v", err)
return 1
}
b.shell.PTY = b.Config.RunInPty
b.shell.Debug = b.Config.Debug
}
// Listen for cancellation
go func() {
select {
case <-ctx.Done():
return
case <-b.cancelCh:
b.shell.Commentf("Received cancellation signal, interrupting")
b.shell.Interrupt()
}
}()
// Tear down the environment (and fire pre-exit hook) before we exit
defer func() {
if err := b.tearDown(); err != nil {
b.shell.Errorf("Error tearing down bootstrap: %v", err)
// this gets passed back via the named return
exitCode = shell.GetExitCode(err)
}
}()
// Initialize the environment, a failure here will still call the tearDown
if err := b.setUp(); err != nil {
b.shell.Errorf("Error setting up bootstrap: %v", err)
return shell.GetExitCode(err)
}
var includePhase = func(phase string) bool {
if len(b.Phases) == 0 {
return true
}
for _, include := range b.Phases {
if include == phase {
return true
}
}
return false
}
// Execute the bootstrap phases in order
var phaseErr error
if includePhase(`plugin`) {
phaseErr = b.PluginPhase()
}
if phaseErr == nil && includePhase(`checkout`) {
phaseErr = b.CheckoutPhase()
} else {
checkoutDir, exists := b.shell.Env.Get(`BUILDKITE_BUILD_CHECKOUT_PATH`)
if exists {
_ = b.shell.Chdir(checkoutDir)
}
}
if phaseErr == nil && includePhase(`plugin`) {
phaseErr = b.VendoredPluginPhase()
}
if phaseErr == nil && includePhase(`command`) {
phaseErr = b.CommandPhase()
// Only upload artifacts as part of the command phase
if err := b.uploadArtifacts(); err != nil {
b.shell.Errorf("%v", err)
return shell.GetExitCode(err)
}
}
// Phase errors are where something of ours broke that merits a big red error
// this won't include command failures, as we view that as more in the user space
if phaseErr != nil {
b.shell.Errorf("%v", phaseErr)
return shell.GetExitCode(phaseErr)
}
// Use the exit code from the command phase
exitStatus, _ := b.shell.Env.Get(`BUILDKITE_COMMAND_EXIT_STATUS`)
exitStatusCode, _ := strconv.Atoi(exitStatus)
return exitStatusCode
}
// Cancel interrupts any running shell processes and causes the bootstrap to stop
func (b *Bootstrap) Cancel() error {
b.cancelCh <- struct{}{}
return nil
}
// executeHook runs a hook script with the hookRunner
func (b *Bootstrap) executeHook(name string, hookPath string, extraEnviron *env.Environment) error {
if !fileExists(hookPath) {
if b.Debug {
b.shell.Commentf("Skipping %s hook, no script at \"%s\"", name, hookPath)
}
return nil
}
b.shell.Headerf("Running %s hook", name)
// We need a script to wrap the hook script so that we can snaffle the changed
// environment variables
script, err := newHookScriptWrapper(hookPath)
if err != nil {
b.shell.Errorf("Error creating hook script: %v", err)
return err
}
defer script.Close()
cleanHookPath := hookPath
// Show a relative path if we can
if strings.HasPrefix(hookPath, b.shell.Getwd()) {
var err error
if cleanHookPath, err = filepath.Rel(b.shell.Getwd(), hookPath); err != nil {
cleanHookPath = hookPath
}
}
// Show the hook runner in debug, but the thing being run otherwise 💅🏻
if b.Debug {
b.shell.Commentf("A hook runner was written to \"%s\" with the following:", script.Path())
b.shell.Promptf("%s", process.FormatCommand(script.Path(), nil))
} else {
b.shell.Promptf("%s", process.FormatCommand(cleanHookPath, []string{}))
}
// Run the wrapper script
if err := b.shell.RunScript(script.Path(), extraEnviron); err != nil {
exitCode := shell.GetExitCode(err)
b.shell.Env.Set("BUILDKITE_LAST_HOOK_EXIT_STATUS", fmt.Sprintf("%d", exitCode))
// Give a simpler error if it's just a shell exit error
if shell.IsExitError(err) {
return &shell.ExitError{
Code: exitCode,
Message: fmt.Sprintf("The %s hook exited with status %d", name, exitCode),
}
}
return err
}
// Store the last hook exit code for subsequent steps
b.shell.Env.Set("BUILDKITE_LAST_HOOK_EXIT_STATUS", "0")
// Get changed environment
changes, err := script.Changes()
if err != nil {
return errors.Wrapf(err, "Failed to get environment")
}
// Finally, apply changes to the current shell and config
b.applyEnvironmentChanges(changes.Env, changes.Dir)
return nil
}
func (b *Bootstrap) applyEnvironmentChanges(environ *env.Environment, dir string) {
if dir != b.shell.Getwd() {
_ = b.shell.Chdir(dir)
}
// Do we even have any environment variables to change?
if environ != nil && environ.Length() > 0 {
// First, let see any of the environment variables are supposed
// to change the bootstrap configuration at run time.
bootstrapConfigEnvChanges := b.Config.ReadFromEnvironment(environ)
// Print out the env vars that changed. As we go through each
// one, we'll determine if it was a special "bootstrap"
// environment variable that has changed the bootstrap
// configuration at runtime.
//
// If it's "special", we'll show the value it was changed to -
// otherwise we'll hide it. Since we don't know if an
// environment variable contains sensitive information (i.e.
// THIRD_PARTY_API_KEY) we'll just not show any values for
// anything not controlled by us.
for k, v := range environ.ToMap() {
_, ok := bootstrapConfigEnvChanges[k]
if ok {
b.shell.Commentf("%s is now %q", k, v)
} else {
b.shell.Commentf("%s changed", k)
}
}
// Now that we've finished telling the user what's changed,
// let's mutate the current shell environment to include all
// the new values.
b.shell.Env = b.shell.Env.Merge(environ)
}
}
// Returns the absolute path to the best matching hook file in a path, or os.ErrNotExist if none is found
func (b *Bootstrap) findHookFile(hookDir string, name string) (string, error) {
if runtime.GOOS == "windows" {
// check for windows types first
if p, err := shell.LookPath(name, hookDir, ".BAT;.CMD"); err == nil {
return p, nil
}
}
// otherwise chech for th default shell script
if p := filepath.Join(hookDir, name); fileExists(p) {
return p, nil
}
return "", os.ErrNotExist
}
func (b *Bootstrap) hasGlobalHook(name string) bool {
_, err := b.globalHookPath(name)
return err == nil
}
// Returns the absolute path to a global hook, or os.ErrNotExist if none is found
func (b *Bootstrap) globalHookPath(name string) (string, error) {
return b.findHookFile(b.HooksPath, name)
}
// Executes a global hook if one exists
func (b *Bootstrap) executeGlobalHook(name string) error {
if !b.hasGlobalHook(name) {
return nil
}
p, err := b.globalHookPath(name)
if err != nil {
return err
}
return b.executeHook("global "+name, p, nil)
}
// Returns the absolute path to a local hook, or os.ErrNotExist if none is found
func (b *Bootstrap) localHookPath(name string) (string, error) {
return b.findHookFile(filepath.Join(b.shell.Getwd(), ".buildkite", "hooks"), name)
}
func (b *Bootstrap) hasLocalHook(name string) bool {
_, err := b.localHookPath(name)
return err == nil
}
// Executes a local hook
func (b *Bootstrap) executeLocalHook(name string) error {
if !b.hasLocalHook(name) {
return nil
}
localHookPath, err := b.localHookPath(name)
if err != nil {
return nil
}
// For high-security configs, we allow the disabling of local hooks.
localHooksEnabled := b.Config.LocalHooksEnabled
// Allow hooks to disable local hooks by setting BUILDKITE_NO_LOCAL_HOOKS=true
noLocalHooks, _ := b.shell.Env.Get(`BUILDKITE_NO_LOCAL_HOOKS`)
if noLocalHooks == "true" || noLocalHooks == "1" {
localHooksEnabled = false
}
if !localHooksEnabled {
return fmt.Errorf("Refusing to run %s, local hooks are disabled", localHookPath)
}
return b.executeHook("local "+name, localHookPath, nil)
}
// Returns whether or not a file exists on the filesystem. We consider any
// error returned by os.Stat to indicate that the file doesn't exist. We could
// be specific and use os.IsNotExist(err), but most other errors also indicate
// that the file isn't there (or isn't available) so we'll just catch them all.
func fileExists(filename string) bool {
_, err := os.Stat(filename)
return err == nil
}
func dirForAgentName(agentName string) string {
badCharsPattern := regexp.MustCompile("[[:^alnum:]]")
return badCharsPattern.ReplaceAllString(agentName, "-")
}
func dirForRepository(repository string) string {
badCharsPattern := regexp.MustCompile("[[:^alnum:]]")
return badCharsPattern.ReplaceAllString(repository, "-")
}
// Given a repository, it will add the host to the set of SSH known_hosts on the machine
func addRepositoryHostToSSHKnownHosts(sh *shell.Shell, repository string) {
if fileExists(repository) {
return
}
knownHosts, err := findKnownHosts(sh)
if err != nil {
sh.Warningf("Failed to find SSH known_hosts file: %v", err)
return
}
if err = knownHosts.AddFromRepository(repository); err != nil {
sh.Warningf("Error adding to known_hosts: %v", err)
return
}
}
// Makes sure a file is executable
func addExecutePermissionToFile(filename string) error {
s, err := os.Stat(filename)
if err != nil {
return fmt.Errorf("Failed to retrieve file information of \"%s\" (%s)", filename, err)
}
if s.Mode()&0100 == 0 {
err = os.Chmod(filename, s.Mode()|0100)
if err != nil {
return fmt.Errorf("Failed to mark \"%s\" as executable (%s)", filename, err)
}
}
return nil
}
// setUp is run before all the phases run. It's responsible for initializing the
// bootstrap environment
func (b *Bootstrap) setUp() error {
// Create an empty env for us to keep track of our env changes in
b.shell.Env = env.FromSlice(os.Environ())
// Add the $BUILDKITE_BIN_PATH to the $PATH if we've been given one
if b.BinPath != "" {
path, _ := b.shell.Env.Get("PATH")
b.shell.Env.Set("PATH", fmt.Sprintf("%s%s%s", b.BinPath, string(os.PathListSeparator), path))
}
// Set a BUILDKITE_BUILD_CHECKOUT_PATH unless one exists already. We do this here
// so that the environment will have a checkout path to work with
if _, exists := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH"); !exists {
if b.BuildPath == "" {
return fmt.Errorf("Must set either a BUILDKITE_BUILD_PATH or a BUILDKITE_BUILD_CHECKOUT_PATH")
}
b.shell.Env.Set("BUILDKITE_BUILD_CHECKOUT_PATH",
filepath.Join(b.BuildPath, dirForAgentName(b.AgentName), b.OrganizationSlug, b.PipelineSlug))
}
// The job runner sets BUILDKITE_IGNORED_ENV with any keys that were ignored
// or overwritten. This shows a warning to the user so they don't get confused
// when their environment changes don't seem to do anything
if ignored, exists := b.shell.Env.Get("BUILDKITE_IGNORED_ENV"); exists {
b.shell.Headerf("Detected protected environment variables")
b.shell.Commentf("Your pipeline environment has protected environment variables set. " +
"These can only be set via hooks, plugins or the agent configuration.")
for _, env := range strings.Split(ignored, ",") {
b.shell.Warningf("Ignored %s", env)
}
b.shell.Printf("^^^ +++")
}
if b.Debug {
b.shell.Headerf("Buildkite environment variables")
for _, e := range b.shell.Env.ToSlice() {
if strings.HasPrefix(e, "BUILDKITE_AGENT_ACCESS_TOKEN=") {
b.shell.Printf("BUILDKITE_AGENT_ACCESS_TOKEN=******************")
} else if strings.HasPrefix(e, "BUILDKITE") || strings.HasPrefix(e, "CI") || strings.HasPrefix(e, "PATH") {
b.shell.Printf("%s", strings.Replace(e, "\n", "\\n", -1))
}
}
}
// Disable any interactive Git/SSH prompting
b.shell.Env.Set("GIT_TERMINAL_PROMPT", "0")
// It's important to do this before checking out plugins, in case you want
// to use the global environment hook to whitelist the plugins that are
// allowed to be used.
return b.executeGlobalHook("environment")
}
// tearDown is called before the bootstrap exits, even on error
func (b *Bootstrap) tearDown() error {
if err := b.executeGlobalHook("pre-exit"); err != nil {
return err
}
if err := b.executeLocalHook("pre-exit"); err != nil {
return err
}
if err := b.executePluginHook("pre-exit", b.pluginCheckouts); err != nil {
return err
}
// Support deprecated BUILDKITE_DOCKER* env vars
if hasDeprecatedDockerIntegration(b.shell) {
return tearDownDeprecatedDockerIntegration(b.shell)
}
for _, dir := range b.cleanupDirs {
if err := os.RemoveAll(dir); err != nil {
b.shell.Warningf("Failed to remove dir %s: %v", dir, err)
}
}
return nil
}
func (b *Bootstrap) hasPlugins() bool {
if b.Config.Plugins == "" {
return false
}
return true
}
func (b *Bootstrap) loadPlugins() ([]*plugin.Plugin, error) {
if b.plugins != nil {
return b.plugins, nil
}
// Check if we can run plugins (disabled via --no-plugins)
if !b.Config.PluginsEnabled {
if !b.Config.LocalHooksEnabled {
return nil, fmt.Errorf("Plugins have been disabled on this agent with `--no-local-hooks`")
} else if !b.Config.CommandEval {
return nil, fmt.Errorf("Plugins have been disabled on this agent with `--no-command-eval`")
} else {
return nil, fmt.Errorf("Plugins have been disabled on this agent with `--no-plugins`")
}
}
var err error
b.plugins, err = plugin.CreateFromJSON(b.Config.Plugins)
if err != nil {
return nil, errors.Wrap(err, "Failed to parse a plugin definition")
}
return b.plugins, nil
}
func (b *Bootstrap) validatePluginCheckout(checkout *pluginCheckout) error {
if !b.Config.PluginValidation {
return nil
}
if checkout.Definition == nil {
if b.Debug {
b.shell.Commentf("Parsing plugin definition for %s from %s", checkout.Plugin.Name(), checkout.CheckoutDir)
}
// parse the plugin definition from the plugin checkout dir
var err error
checkout.Definition, err = plugin.LoadDefinitionFromDir(checkout.CheckoutDir)
if err == plugin.ErrDefinitionNotFound {
b.shell.Warningf("Failed to find plugin definition for plugin %s", checkout.Plugin.Name())
return nil
} else if err != nil {
return err
}
}
val := &plugin.Validator{}
result := val.Validate(checkout.Definition, checkout.Plugin.Configuration)
if !result.Valid() {
b.shell.Headerf("Plugin validation failed for %q", checkout.Plugin.Name())
json, _ := json.Marshal(checkout.Plugin.Configuration)
b.shell.Commentf("Plugin configuration JSON is %s", json)
return result
}
b.shell.Commentf("Valid plugin configuration for %q", checkout.Plugin.Name())
return nil
}
// PluginPhase is where plugins that weren't filtered in the Environment phase are
// checked out and made available to later phases
func (b *Bootstrap) PluginPhase() error {
if !b.hasPlugins() {
return nil
}
b.shell.Headerf("Setting up plugins")
if b.Debug {
b.shell.Commentf("Plugin JSON is %s", b.Plugins)
}
plugins, err := b.loadPlugins()
if err != nil {
return err
}
checkouts := []*pluginCheckout{}
// Checkout and validate plugins that aren't vendored
for _, p := range plugins {
if p.Vendored {
if b.Debug {
b.shell.Commentf("Skipping vendored plugin %s", p.Name())
}
continue
}
checkout, err := b.checkoutPlugin(p)
if err != nil {
return errors.Wrapf(err, "Failed to checkout plugin %s", p.Name())
}
err = b.validatePluginCheckout(checkout)
if err != nil {
return err
}
checkouts = append(checkouts, checkout)
}
// Store the checkouts for future use
b.pluginCheckouts = checkouts
// Now we can run plugin environment hooks too
return b.executePluginHook("environment", checkouts)
}
// VendoredPluginPhase is where plugins that are included in the checked out code are added
func (b *Bootstrap) VendoredPluginPhase() error {
if !b.hasPlugins() {
return nil
}
b.shell.Headerf("Setting up vendored plugins")
plugins, err := b.loadPlugins()
if err != nil {
return err
}
vendoredCheckouts := []*pluginCheckout{}
// Validate vendored plugins
for _, p := range plugins {
if !p.Vendored {
continue
}
checkoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
pluginLocation, err := filepath.Abs(filepath.Join(checkoutPath, p.Location))
if err != nil {
return errors.Wrapf(err, "Failed to resolve vendored plugin path for plugin %s", p.Name())
}
if !fileExists(pluginLocation) {
return fmt.Errorf("Vendored plugin path %s doesn't exist", p.Location)
}
checkout := &pluginCheckout{
Plugin: p,
CheckoutDir: pluginLocation,
HooksDir: filepath.Join(pluginLocation, "hooks"),
}
// Also make sure that plugin is withing this repository
// checkout and isn't elsewhere on the system.
if !strings.HasPrefix(pluginLocation, checkoutPath+string(os.PathSeparator)) {
return fmt.Errorf("Vendored plugin paths must be within the checked-out repository")
}
err = b.validatePluginCheckout(checkout)
if err != nil {
return err
}
vendoredCheckouts = append(vendoredCheckouts, checkout)
}
// Finally append our vendored checkouts to the rest for subsequent hooks
b.pluginCheckouts = append(b.pluginCheckouts, vendoredCheckouts...)
// Now we can run plugin environment hooks too
return b.executePluginHook("environment", vendoredCheckouts)
}
// Executes a named hook on plugins that have it
func (b *Bootstrap) executePluginHook(name string, checkouts []*pluginCheckout) error {
for _, p := range checkouts {
hookPath, err := b.findHookFile(p.HooksDir, name)
if err != nil {
continue
}
env, _ := p.ConfigurationToEnvironment()
if err := b.executeHook("plugin "+p.Plugin.Name()+" "+name, hookPath, env); err != nil {
return err
}
}
return nil
}
// If any plugin has a hook by this name
func (b *Bootstrap) hasPluginHook(name string) bool {
for _, p := range b.pluginCheckouts {
if _, err := b.findHookFile(p.HooksDir, name); err == nil {
return true
}
}
return false
}
// Checkout a given plugin to the plugins directory and return that directory
func (b *Bootstrap) checkoutPlugin(p *plugin.Plugin) (*pluginCheckout, error) {
// Make sure we have a plugin path before trying to do anything
if b.PluginsPath == "" {
return nil, fmt.Errorf("Can't checkout plugin without a `plugins-path`")
}
// Get the identifer for the plugin
id, err := p.Identifier()
if err != nil {
return nil, err
}
// Ensure the plugin directory exists, otherwise we can't create the lock
err = os.MkdirAll(b.PluginsPath, 0777)
if err != nil {
return nil, err
}
// Try and lock this particular plugin while we check it out (we create
// the file outside of the plugin directory so git clone doesn't have
// a cry about the directory not being empty)
pluginCheckoutHook, err := b.shell.LockFile(filepath.Join(b.PluginsPath, id+".lock"), time.Minute*5)
if err != nil {
return nil, err
}
defer pluginCheckoutHook.Unlock()
// Create a path to the plugin
directory := filepath.Join(b.PluginsPath, id)
pluginGitDirectory := filepath.Join(directory, ".git")
checkout := &pluginCheckout{
Plugin: p,
CheckoutDir: directory,
HooksDir: filepath.Join(directory, "hooks"),
}
// Has it already been checked out?
if fileExists(pluginGitDirectory) {
// It'd be nice to show the current commit of the plugin, so
// let's figure that out.
headCommit, err := gitRevParseInWorkingDirectory(b.shell, directory, "--short=7", "HEAD")
if err != nil {
b.shell.Commentf("Plugin %q already checked out (can't `git rev-parse HEAD` plugin git directory)", p.Label())
} else {
b.shell.Commentf("Plugin %q already checked out (%s)", p.Label(), strings.TrimSpace(headCommit))
}
return checkout, nil
}
// Make the directory
err = os.MkdirAll(directory, 0777)
if err != nil {
return nil, err
}
// Once we've got the lock, we need to make sure another process didn't already
// checkout the plugin
if fileExists(pluginGitDirectory) {
b.shell.Commentf("Plugin \"%s\" already checked out", p.Label())
return checkout, nil
}
repo, err := p.Repository()
if err != nil {
return nil, err
}
b.shell.Commentf("Plugin \"%s\" will be checked out to \"%s\"", p.Location, directory)
if b.Debug {
b.shell.Commentf("Checking if \"%s\" is a local repository", repo)
}
// Switch to the plugin directory
previousWd := b.shell.Getwd()
if err = b.shell.Chdir(directory); err != nil {
return nil, err
}
// Switch back to the previous working directory
defer b.shell.Chdir(previousWd)
b.shell.Commentf("Switching to the plugin directory")
if b.SSHKeyscan {
addRepositoryHostToSSHKnownHosts(b.shell, repo)
}
// Plugin clones shouldn't use custom GitCloneFlags
if err = b.shell.Run("git", "clone", "-v", "--", repo, "."); err != nil {
return nil, err
}
// Switch to the version if we need to
if p.Version != "" {
b.shell.Commentf("Checking out `%s`", p.Version)
if err = b.shell.Run("git", "checkout", "-f", p.Version); err != nil {
return nil, err
}
}
return checkout, nil
}
func (b *Bootstrap) removeCheckoutDir() error {
checkoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
// on windows, sometimes removing large dirs can fail for various reasons
// for instance having files open
// see https://github.com/golang/go/issues/20841
for i := 0; i < 10; i++ {
b.shell.Commentf("Removing %s", checkoutPath)
if err := os.RemoveAll(checkoutPath); err != nil {
b.shell.Errorf("Failed to remove \"%s\" (%s)", checkoutPath, err)
} else {
if _, err := os.Stat(checkoutPath); os.IsNotExist(err) {
return nil
} else {
b.shell.Errorf("Failed to remove %s", checkoutPath)
}
}
b.shell.Commentf("Waiting 10 seconds")
<-time.After(time.Second * 10)
}
return fmt.Errorf("Failed to remove %s", checkoutPath)
}
func (b *Bootstrap) createCheckoutDir() error {
checkoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
if !fileExists(checkoutPath) {
b.shell.Commentf("Creating \"%s\"", checkoutPath)
if err := os.MkdirAll(checkoutPath, 0777); err != nil {
return err
}
}
if b.shell.Getwd() != checkoutPath {
if err := b.shell.Chdir(checkoutPath); err != nil {
return err
}
}
return nil
}
// CheckoutPhase creates the build directory and makes sure we're running the
// build at the right commit.
func (b *Bootstrap) CheckoutPhase() error {
if err := b.executeGlobalHook("pre-checkout"); err != nil {
return err
}
if err := b.executePluginHook("pre-checkout", b.pluginCheckouts); err != nil {
return err
}
// Remove the checkout directory if BUILDKITE_CLEAN_CHECKOUT is present
if b.CleanCheckout {
b.shell.Headerf("Cleaning pipeline checkout")
if err := b.removeCheckoutDir(); err != nil {
return err
}
}
b.shell.Headerf("Preparing working directory")
// If we have a blank repository then use a temp dir for builds
if b.Config.Repository == "" {
buildDir, err := ioutil.TempDir("", "buildkite-job-"+b.Config.JobID)
if err != nil {
return err
}
b.shell.Env.Set(`BUILDKITE_BUILD_CHECKOUT_PATH`, buildDir)
// Track the directory so we can remove it at the end of the bootstrap
b.cleanupDirs = append(b.cleanupDirs, buildDir)
}
// Make sure the build directory exists
if err := b.createCheckoutDir(); err != nil {
return err
}
// There can only be one checkout hook, either plugin or global, in that order
switch {
case b.hasPluginHook("checkout"):
if err := b.executePluginHook("checkout", b.pluginCheckouts); err != nil {
return err
}
case b.hasGlobalHook("checkout"):
if err := b.executeGlobalHook("checkout"); err != nil {
return err
}
default:
if b.Config.Repository != "" {
err := retry.Do(func(s *retry.Stats) error {
err := b.defaultCheckoutPhase()
if err == nil {
return nil
}
switch {
case shell.IsExitError(err) && shell.GetExitCode(err) == -1:
b.shell.Warningf("Checkout was interrupted by a signal")
s.Break()
case errors.Cause(err) == context.Canceled:
b.shell.Warningf("Checkout was cancelled")
s.Break()
default:
b.shell.Warningf("Checkout failed! %s (%s)", err, s)
// Checkout can fail because of corrupted files in the checkout
// which can leave the agent in a state where it keeps failing
// This removes the checkout dir, which means the next checkout
// will be a lot slower (clone vs fetch), but hopefully will
// allow the agent to self-heal
_ = b.removeCheckoutDir()
}
return err
}, &retry.Config{Maximum: 3, Interval: 2 * time.Second})
if err != nil {
return err
}
} else {
b.shell.Commentf("Skipping checkout, BUILDKITE_REPO is empty")
}
}
// Store the current value of BUILDKITE_BUILD_CHECKOUT_PATH, so we can detect if
// one of the post-checkout hooks changed it.
previousCheckoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
// Run post-checkout hooks
if err := b.executeGlobalHook("post-checkout"); err != nil {
return err
}
if err := b.executeLocalHook("post-checkout"); err != nil {
return err
}
if err := b.executePluginHook("post-checkout", b.pluginCheckouts); err != nil {
return err
}
// Capture the new checkout path so we can see if it's changed.
newCheckoutPath, _ := b.shell.Env.Get("BUILDKITE_BUILD_CHECKOUT_PATH")
// If the working directory has been changed by a hook, log and switch to it
if previousCheckoutPath != "" && previousCheckoutPath != newCheckoutPath {
b.shell.Headerf("A post-checkout hook has changed the working directory to \"%s\"", newCheckoutPath)
if err := b.shell.Chdir(newCheckoutPath); err != nil {
return err
}
}
return nil
}
func hasGitSubmodules(sh *shell.Shell) bool {
return fileExists(filepath.Join(sh.Getwd(), ".gitmodules"))
}
// gitMirrorRepository either creates or update the git mirror repository used as a reference later
func (b *Bootstrap) gitMirrorRepository() (string, error) {
path := filepath.Join(b.Config.GitMirrorsPath, dirForRepository(b.Repository))
// Create the base dir if it doesn't exist
if baseDir := filepath.Dir(path); !fileExists(baseDir) {
b.shell.Commentf("Creating \"%s\"", baseDir)
if err := os.MkdirAll(baseDir, 0777); err != nil {
return "", err
}
}
// Try and lock the repository dir to prevent concurrent clones
repoDirLock, err := b.shell.LockFile(path+".lock", time.Minute*5)
if err != nil {
return "", err
}
defer repoDirLock.Unlock()
if !fileExists(path) {
b.shell.Commentf("Cloning a mirror of the repository to %s", path)
if err := gitCloneMirror(b.shell, b.GitCloneFlags, b.Repository, path); err != nil {
return "", err
}
} else {
b.shell.Commentf("Updating existing repository mirror")
// Update the the origin of the repository so we can gracefully handle repository renames
if err := b.shell.Run("git", "--git-dir", path, "remote", "set-url", "origin", b.Repository); err != nil {
return "", err
}
// Update our mirror
if err := b.shell.Run("git", "--git-dir", path, "remote", "update", "--prune"); err != nil {
return "", err
}
}
return path, nil
}
// defaultCheckoutPhase is called by the CheckoutPhase if no global or plugin checkout
// hook exists. It performs the default checkout on the Repository provided in the config
func (b *Bootstrap) defaultCheckoutPhase() error {
var mirrorDir string
// If we can, get a mirror of the git repository to use for reference later
if experiments.IsEnabled(`git-mirrors`) && b.Config.GitMirrorsPath != "" && b.Config.Repository != "" {
b.shell.Commentf("Using git-mirrors experiment 🧪")
b.shell.Chdir(b.Config.GitMirrorsPath)
var err error
mirrorDir, err = b.gitMirrorRepository()
if err != nil {
return err
}
}
// Make sure the build directory exists and that we change directory into it
if err := b.createCheckoutDir(); err != nil {
return err
}
if b.SSHKeyscan {
addRepositoryHostToSSHKnownHosts(b.shell, b.Repository)
}
gitCloneFlags := b.GitCloneFlags
if mirrorDir != "" {
gitCloneFlags += fmt.Sprintf(" --reference %q", mirrorDir)
}
// Does the git directory exist?
existingGitDir := filepath.Join(b.shell.Getwd(), ".git")
if fileExists(existingGitDir) {
// Update the the origin of the repository so we can gracefully handle repository renames
if err := b.shell.Run("git", "remote", "set-url", "origin", b.Repository); err != nil {
return err
}
} else {
if err := gitClone(b.shell, gitCloneFlags, b.Repository, "."); err != nil {
return err
}
}
// Git clean prior to checkout
if hasGitSubmodules(b.shell) {
if err := gitCleanSubmodules(b.shell, b.GitCleanFlags); err != nil {
return err
}
}
if err := gitClean(b.shell, b.GitCleanFlags); err != nil {
return err
}
// If a refspec is provided then use it instead.
// i.e. `refs/not/a/head`
if b.RefSpec != "" {
b.shell.Commentf("Fetch and checkout custom refspec")
if err := gitFetch(b.shell, "-v --prune", "origin", b.RefSpec); err != nil {
return err
}
if err := b.shell.Run("git", "checkout", "-f", b.Commit); err != nil {
return err
}
// GitHub has a special ref which lets us fetch a pull request head, whether
// or not there is a current head in this repository or another which
// references the commit. We presume a commit sha is provided. See:
// https://help.github.com/articles/checking-out-pull-requests-locally/#modifying-an-inactive-pull-request-locally
} else if b.PullRequest != "false" && strings.Contains(b.PipelineProvider, "github") {
b.shell.Commentf("Fetch and checkout pull request head from GitHub")
refspec := fmt.Sprintf("refs/pull/%s/head", b.PullRequest)
if err := gitFetch(b.shell, "-v", "origin", refspec); err != nil {
return err
}
gitFetchHead, _ := b.shell.RunAndCapture("git", "rev-parse", "FETCH_HEAD")
b.shell.Commentf("FETCH_HEAD is now `%s`", gitFetchHead)
if err := b.shell.Run("git", "checkout", "-f", b.Commit); err != nil {
return err
}
// If the commit is "HEAD" then we can't do a commit-specific fetch and will
// need to fetch the remote head and checkout the fetched head explicitly.
} else if b.Commit == "HEAD" {
b.shell.Commentf("Fetch and checkout remote branch HEAD commit")
if err := gitFetch(b.shell, "-v --prune", "origin", b.Branch); err != nil {
return err
}
if err := b.shell.Run("git", "checkout", "-f", "FETCH_HEAD"); err != nil {
return err
}
// Otherwise fetch and checkout the commit directly. Some repositories don't
// support fetching a specific commit so we fall back to fetching all heads
// and tags, hoping that the commit is included.
} else {
if err := gitFetch(b.shell, "-v", "origin", b.Commit); err != nil {
// By default `git fetch origin` will only fetch tags which are
// reachable from a fetches branch. git 1.9.0+ changed `--tags` to
// fetch all tags in addition to the default refspec, but pre 1.9.0 it
// excludes the default refspec.
gitFetchRefspec, _ := b.shell.RunAndCapture("git", "config", "remote.origin.fetch")
if err := gitFetch(b.shell, "-v --prune", "origin", gitFetchRefspec, "+refs/tags/*:refs/tags/*"); err != nil {
return err
}
}
if err := b.shell.Run("git", "checkout", "-f", b.Commit); err != nil {
return err
}
}
var gitSubmodules bool
if !b.GitSubmodules && hasGitSubmodules(b.shell) {
b.shell.Warningf("This repository has submodules, but submodules are disabled at an agent level")
} else if b.GitSubmodules && hasGitSubmodules(b.shell) {
b.shell.Commentf("Git submodules detected")
gitSubmodules = true
}
if gitSubmodules {
// `submodule sync` will ensure the .git/config
// matches the .gitmodules file. The command
// is only available in git version 1.8.1, so
// if the call fails, continue the bootstrap
// script, and show an informative error.
if err := b.shell.Run("git", "submodule", "sync", "--recursive"); err != nil {
gitVersionOutput, _ := b.shell.RunAndCapture("git", "--version")
b.shell.Warningf("Failed to recursively sync git submodules. This is most likely because you have an older version of git installed (" + gitVersionOutput + ") and you need version 1.8.1 and above. If you're using submodules, it's highly recommended you upgrade if you can.")
}
// Checking for submodule repositories
submoduleRepos, err := gitEnumerateSubmoduleURLs(b.shell)
if err != nil {
b.shell.Warningf("Failed to enumerate git submodules: %v", err)
} else {
for idx, repository := range submoduleRepos {
// submodules might need their fingerprints verified too
if b.SSHKeyscan {
addRepositoryHostToSSHKnownHosts(b.shell, repository)
}
// if we have a git mirror, add the submodule to it
if mirrorDir != "" {
name := fmt.Sprintf("submodule%d", idx+1)
if err := b.shell.Run("git", "--git-dir", mirrorDir, "remote", "add", name, repository); err != nil {
return err
}
}
}
}
if mirrorDir != "" {
if err := b.shell.Run("git", "submodule", "update", "--init", "--recursive", "--force", "--reference", mirrorDir); err != nil {
return err
}
} else {
if err := b.shell.Run("git", "submodule", "update", "--init", "--recursive", "--force"); err != nil {
return err
}
}
if err := b.shell.Run("git", "submodule", "foreach", "--recursive", "git", "reset", "--hard"); err != nil {
return err
}
}
// Git clean after checkout. We need to do this because submodules could have
// changed in between the last checkout and this one. A double clean is the only
// good solution to this problem that we've found
b.shell.Commentf("Cleaning again to catch any post-checkout changes")
if err := gitClean(b.shell, b.GitCleanFlags); err != nil {
return err
}
if gitSubmodules {
if err := gitCleanSubmodules(b.shell, b.GitCleanFlags); err != nil {
return err
}
}
if _, hasToken := b.shell.Env.Get("BUILDKITE_AGENT_ACCESS_TOKEN"); !hasToken {
b.shell.Warningf("Skipping sending Git information to Buildkite as $BUILDKITE_AGENT_ACCESS_TOKEN is missing")
return nil
}
// Grab author and commit information and send
// it back to Buildkite. But before we do,
// we'll check to see if someone else has done
// it first.
b.shell.Commentf("Checking to see if Git data needs to be sent to Buildkite")
if err := b.shell.Run("buildkite-agent", "meta-data", "exists", "buildkite:git:commit"); err != nil {
b.shell.Commentf("Sending Git commit information back to Buildkite")
gitCommitOutput, err := b.shell.RunAndCapture("git", "--no-pager", "show", "HEAD", "-s", "--format=fuller", "--no-color")
if err != nil {
return err
}
if err = b.shell.Run("buildkite-agent", "meta-data", "set", "buildkite:git:commit", gitCommitOutput); err != nil {
return err
}
}
return nil
}
// CommandPhase determines how to run the build, and then runs it
func (b *Bootstrap) CommandPhase() error {
if err := b.executeGlobalHook("pre-command"); err != nil {
return err
}
if err := b.executeLocalHook("pre-command"); err != nil {
return err
}
if err := b.executePluginHook("pre-command", b.pluginCheckouts); err != nil {
return err
}
var commandExitError error
// There can only be one command hook, so we check them in order of plugin, local
switch {
case b.hasPluginHook("command"):
commandExitError = b.executePluginHook("command", b.pluginCheckouts)
case b.hasLocalHook("command"):
commandExitError = b.executeLocalHook("command")
case b.hasGlobalHook("command"):
commandExitError = b.executeGlobalHook("command")
default:
commandExitError = b.defaultCommandPhase()
}
// If the command returned an exit that wasn't a `exec.ExitError`
// (which is returned when the command is actually run, but fails),
// then we'll show it in the log.
if shell.IsExitError(commandExitError) {
if shell.IsExitSignaled(commandExitError) {
b.shell.Errorf("The command was interrupted by a signal")
} else {
b.shell.Errorf("The command exited with status %d", shell.GetExitCode(commandExitError))
}
} else if commandExitError != nil {
b.shell.Errorf(commandExitError.Error())
}
// Expand the command header if the command fails for any reason
if commandExitError != nil {
b.shell.Printf("^^^ +++")
}
// Save the command exit status to the env so hooks + plugins can access it. If there is no error
// this will be zero. It's used to set the exit code later, so it's important
b.shell.Env.Set("BUILDKITE_COMMAND_EXIT_STATUS", fmt.Sprintf("%d", shell.GetExitCode(commandExitError)))
// Run post-command hooks
if err := b.executeGlobalHook("post-command"); err != nil {
return err
}
if err := b.executeLocalHook("post-command"); err != nil {
return err
}
if err := b.executePluginHook("post-command", b.pluginCheckouts); err != nil {
return err
}
return nil
}
// defaultCommandPhase is executed if there is no global or plugin command hook
func (b *Bootstrap) defaultCommandPhase() error {
// Make sure we actually have a command to run
if strings.TrimSpace(b.Command) == "" {
return fmt.Errorf("No command has been provided")
}
scriptFileName := strings.Replace(b.Command, "\n", "", -1)
pathToCommand, err := filepath.Abs(filepath.Join(b.shell.Getwd(), scriptFileName))
commandIsScript := err == nil && fileExists(pathToCommand)
// If the command isn't a script, then it's something we need
// to eval. But before we even try running it, we should double
// check that the agent is allowed to eval commands.
if !commandIsScript && !b.CommandEval {
b.shell.Commentf("No such file: \"%s\"", scriptFileName)
return fmt.Errorf("This agent is not allowed to evaluate console commands. To allow this, re-run this agent without the `--no-command-eval` option, or specify a script within your repository to run instead (such as scripts/test.sh).")
}
// Also make sure that the script we've resolved is definitely within this
// repository checkout and isn't elsewhere on the system.
if commandIsScript && !b.CommandEval && !strings.HasPrefix(pathToCommand, b.shell.Getwd()+string(os.PathSeparator)) {
b.shell.Commentf("No such file: \"%s\"", scriptFileName)
return fmt.Errorf("This agent is only allowed to run scripts within your repository. To allow this, re-run this agent without the `--no-command-eval` option, or specify a script within your repository to run instead (such as scripts/test.sh).")
}
var cmdToExec string
// The shell gets parsed based on the operating system
shell, err := shellwords.Split(b.Shell)
if err != nil {
return fmt.Errorf("Failed to split shell (%q) into tokens: %v", b.Shell, err)
}
if len(shell) == 0 {
return fmt.Errorf("No shell set for bootstrap")
}
// Windows CMD.EXE is horrible and can't handle newline delimited commands. We write
// a batch script so that it works, but we don't like it
if strings.ToUpper(filepath.Base(shell[0])) == `CMD.EXE` {
batchScript, err := b.writeBatchScript(b.Command)
if err != nil {
return err
}
defer os.Remove(batchScript)
b.shell.Headerf("Running batch script")
if b.Debug {
contents, err := ioutil.ReadFile(batchScript)
if err != nil {
return err
}
b.shell.Commentf("Wrote batch script %s\n%s", batchScript, contents)
}
cmdToExec = batchScript
} else if commandIsScript {
// Make script executable
if err = addExecutePermissionToFile(pathToCommand); err != nil {
b.shell.Warningf("Error marking script %q as executable: %v", pathToCommand, err)
return err
}
// Make the path relative to the shell working dir
scriptPath, err := filepath.Rel(b.shell.Getwd(), pathToCommand)
if err != nil {
return err
}
b.shell.Headerf("Running script")
cmdToExec = fmt.Sprintf(".%c%s", os.PathSeparator, scriptPath)
} else {
b.shell.Headerf("Running commands")
cmdToExec = b.Command
}
// Support deprecated BUILDKITE_DOCKER* env vars
if hasDeprecatedDockerIntegration(b.shell) {
if b.Debug {
b.shell.Commentf("Detected deprecated docker environment variables")
}
return runDeprecatedDockerIntegration(b.shell, []string{cmdToExec})
}
var cmd []string
cmd = append(cmd, shell...)
cmd = append(cmd, cmdToExec)
if b.Debug {
b.shell.Promptf("%s", process.FormatCommand(cmd[0], cmd[1:]))
} else {
b.shell.Promptf("%s", cmdToExec)
}
return b.shell.RunWithoutPrompt(cmd[0], cmd[1:]...)
}
func (b *Bootstrap) writeBatchScript(cmd string) (string, error) {
scriptFile, err := shell.TempFileWithExtension(
`buildkite-script.bat`,
)
if err != nil {
return "", err
}
defer scriptFile.Close()
var scriptContents = "@echo off\n"
for _, line := range strings.Split(cmd, "\n") {
if line != "" {
scriptContents += line + "\n" + "if %errorlevel% neq 0 exit /b %errorlevel%\n"
}
}
_, err = io.WriteString(scriptFile, scriptContents)
if err != nil {
return "", err
}
return scriptFile.Name(), nil
}
func (b *Bootstrap) uploadArtifacts() error {
if b.AutomaticArtifactUploadPaths == "" {
return nil
}
// Run pre-artifact hooks
if err := b.executeGlobalHook("pre-artifact"); err != nil {
return err
}
if err := b.executeLocalHook("pre-artifact"); err != nil {
return err
}
if err := b.executePluginHook("pre-artifact", b.pluginCheckouts); err != nil {
return err
}
// Run the artifact upload command
b.shell.Headerf("Uploading artifacts")
args := []string{"artifact", "upload", b.AutomaticArtifactUploadPaths}
// If blank, the upload destination is buildkite
if b.ArtifactUploadDestination != "" {
b.shell.Commentf("Using default artifact upload destination")
args = append(args, b.ArtifactUploadDestination)
}
if err := b.shell.Run("buildkite-agent", args...); err != nil {
return err
}
// Run post-artifact hooks
if err := b.executeGlobalHook("post-artifact"); err != nil {
return err
}
if err := b.executeLocalHook("post-artifact"); err != nil {
return err
}
if err := b.executePluginHook("post-artifact", b.pluginCheckouts); err != nil {
return err
}
return nil
}
// Check for ignored env variables from the job runner. Some
// env (e.g BUILDKITE_BUILD_PATH) can only be set from config or by hooks.
// If these env are set at a pipeline level, we rewrite them to BUILDKITE_X_BUILD_PATH
// and warn on them here so that users know what is going on
func (b *Bootstrap) ignoredEnv() []string {
var ignored []string
for _, env := range os.Environ() {
if strings.HasPrefix(env, `BUILDKITE_X_`) {
ignored = append(ignored, fmt.Sprintf("BUILDKITE_%s",
strings.TrimPrefix(env, `BUILDKITE_X_`)))
}
}
return ignored
}
type pluginCheckout struct {
*plugin.Plugin
*plugin.Definition
CheckoutDir string
HooksDir string
}
|
package bdiscord
import (
"bytes"
"fmt"
"strings"
"sync"
"github.com/42wim/matterbridge/bridge"
"github.com/42wim/matterbridge/bridge/config"
"github.com/42wim/matterbridge/bridge/helper"
"github.com/bwmarrin/discordgo"
)
const MessageLength = 1950
type Bdiscord struct {
*bridge.Config
c *discordgo.Session
nick string
useChannelID bool
guildID string
webhookID string
webhookToken string
channelsMutex sync.RWMutex
channels []*discordgo.Channel
channelInfoMap map[string]*config.ChannelInfo
membersMutex sync.RWMutex
userMemberMap map[string]*discordgo.Member
nickMemberMap map[string]*discordgo.Member
}
func New(cfg *bridge.Config) bridge.Bridger {
b := &Bdiscord{Config: cfg}
b.userMemberMap = make(map[string]*discordgo.Member)
b.nickMemberMap = make(map[string]*discordgo.Member)
b.channelInfoMap = make(map[string]*config.ChannelInfo)
if b.GetString("WebhookURL") != "" {
b.Log.Debug("Configuring Discord Incoming Webhook")
b.webhookID, b.webhookToken = b.splitURL(b.GetString("WebhookURL"))
}
return b
}
func (b *Bdiscord) Connect() error {
var err error
var token string
b.Log.Info("Connecting")
if b.GetString("WebhookURL") == "" {
b.Log.Info("Connecting using token")
} else {
b.Log.Info("Connecting using webhookurl (for posting) and token")
}
if !strings.HasPrefix(b.GetString("Token"), "Bot ") {
token = "Bot " + b.GetString("Token")
}
b.c, err = discordgo.New(token)
if err != nil {
return err
}
b.Log.Info("Connection succeeded")
b.c.AddHandler(b.messageCreate)
b.c.AddHandler(b.memberUpdate)
b.c.AddHandler(b.messageUpdate)
b.c.AddHandler(b.messageDelete)
err = b.c.Open()
if err != nil {
return err
}
guilds, err := b.c.UserGuilds(100, "", "")
if err != nil {
return err
}
userinfo, err := b.c.User("@me")
if err != nil {
return err
}
serverName := strings.Replace(b.GetString("Server"), "ID:", "", -1)
b.nick = userinfo.Username
b.channelsMutex.Lock()
for _, guild := range guilds {
if guild.Name == serverName || guild.ID == serverName {
b.channels, err = b.c.GuildChannels(guild.ID)
b.guildID = guild.ID
if err != nil {
break
}
}
}
b.channelsMutex.Unlock()
if err != nil {
return err
}
b.channelsMutex.RLock()
for _, channel := range b.channels {
b.Log.Debugf("found channel %#v", channel)
}
b.channelsMutex.RUnlock()
// Obtaining guild members and initializing nickname mapping.
b.membersMutex.Lock()
defer b.membersMutex.Unlock()
members, err := b.c.GuildMembers(b.guildID, "", 1000)
if err != nil {
b.Log.Error("Error obtaining guild members", err)
return err
}
for _, member := range members {
if member == nil {
b.Log.Warnf("Skipping missing information for a user.")
continue
}
b.userMemberMap[member.User.ID] = member
b.nickMemberMap[member.User.Username] = member
if member.Nick != "" {
b.nickMemberMap[member.Nick] = member
}
}
return nil
}
func (b *Bdiscord) Disconnect() error {
return b.c.Close()
}
func (b *Bdiscord) JoinChannel(channel config.ChannelInfo) error {
b.channelsMutex.Lock()
defer b.channelsMutex.Unlock()
b.channelInfoMap[channel.ID] = &channel
idcheck := strings.Split(channel.Name, "ID:")
if len(idcheck) > 1 {
b.useChannelID = true
}
return nil
}
func (b *Bdiscord) Send(msg config.Message) (string, error) {
b.Log.Debugf("=> Receiving %#v", msg)
channelID := b.getChannelID(msg.Channel)
if channelID == "" {
return "", fmt.Errorf("Could not find channelID for %v", msg.Channel)
}
// Make a action /me of the message
if msg.Event == config.EventUserAction {
msg.Text = "_" + msg.Text + "_"
}
// use initial webhook
wID := b.webhookID
wToken := b.webhookToken
// check if have a channel specific webhook
b.channelsMutex.RLock()
if ci, ok := b.channelInfoMap[msg.Channel+b.Account]; ok {
if ci.Options.WebhookURL != "" {
wID, wToken = b.splitURL(ci.Options.WebhookURL)
}
}
b.channelsMutex.RUnlock()
// Use webhook to send the message
if wID != "" {
// skip events
if msg.Event != "" && msg.Event != config.EventJoinLeave && msg.Event != config.EventTopicChange {
return "", nil
}
b.Log.Debugf("Broadcasting using Webhook")
for _, f := range msg.Extra["file"] {
fi := f.(config.FileInfo)
if fi.URL != "" {
msg.Text += " " + fi.URL
}
}
// skip empty messages
if msg.Text == "" {
return "", nil
}
msg.Text = helper.ClipMessage(msg.Text, MessageLength)
msg.Text = b.replaceUserMentions(msg.Text)
// discord username must be [0..32] max
if len(msg.Username) > 32 {
msg.Username = msg.Username[0:32]
}
err := b.c.WebhookExecute(
wID,
wToken,
true,
&discordgo.WebhookParams{
Content: msg.Text,
Username: msg.Username,
AvatarURL: msg.Avatar,
})
return "", err
}
b.Log.Debugf("Broadcasting using token (API)")
// Delete message
if msg.Event == config.EventMsgDelete {
if msg.ID == "" {
return "", nil
}
err := b.c.ChannelMessageDelete(channelID, msg.ID)
return "", err
}
// Upload a file if it exists
if msg.Extra != nil {
for _, rmsg := range helper.HandleExtra(&msg, b.General) {
rmsg.Text = helper.ClipMessage(rmsg.Text, MessageLength)
if _, err := b.c.ChannelMessageSend(channelID, rmsg.Username+rmsg.Text); err != nil {
b.Log.Errorf("Could not send message %#v: %v", rmsg, err)
}
}
// check if we have files to upload (from slack, telegram or mattermost)
if len(msg.Extra["file"]) > 0 {
return b.handleUploadFile(&msg, channelID)
}
}
msg.Text = helper.ClipMessage(msg.Text, MessageLength)
msg.Text = b.replaceUserMentions(msg.Text)
// Edit message
if msg.ID != "" {
_, err := b.c.ChannelMessageEdit(channelID, msg.ID, msg.Username+msg.Text)
return msg.ID, err
}
// Post normal message
res, err := b.c.ChannelMessageSend(channelID, msg.Username+msg.Text)
if err != nil {
return "", err
}
return res.ID, err
}
// useWebhook returns true if we have a webhook defined somewhere
func (b *Bdiscord) useWebhook() bool {
if b.GetString("WebhookURL") != "" {
return true
}
b.channelsMutex.RLock()
defer b.channelsMutex.RUnlock()
for _, channel := range b.channelInfoMap {
if channel.Options.WebhookURL != "" {
return true
}
}
return false
}
// isWebhookID returns true if the specified id is used in a defined webhook
func (b *Bdiscord) isWebhookID(id string) bool {
if b.GetString("WebhookURL") != "" {
wID, _ := b.splitURL(b.GetString("WebhookURL"))
if wID == id {
return true
}
}
b.channelsMutex.RLock()
defer b.channelsMutex.RUnlock()
for _, channel := range b.channelInfoMap {
if channel.Options.WebhookURL != "" {
wID, _ := b.splitURL(channel.Options.WebhookURL)
if wID == id {
return true
}
}
}
return false
}
// handleUploadFile handles native upload of files
func (b *Bdiscord) handleUploadFile(msg *config.Message, channelID string) (string, error) {
var err error
for _, f := range msg.Extra["file"] {
fi := f.(config.FileInfo)
file := discordgo.File{
Name: fi.Name,
ContentType: "",
Reader: bytes.NewReader(*fi.Data),
}
m := discordgo.MessageSend{
Content: msg.Username + fi.Comment,
Files: []*discordgo.File{&file},
}
_, err = b.c.ChannelMessageSendComplex(channelID, &m)
if err != nil {
return "", fmt.Errorf("file upload failed: %#v", err)
}
}
return "", nil
}
Improve error reporting on failure to join Discord. Fixes #672 (#680)
package bdiscord
import (
"bytes"
"errors"
"fmt"
"strings"
"sync"
"github.com/42wim/matterbridge/bridge"
"github.com/42wim/matterbridge/bridge/config"
"github.com/42wim/matterbridge/bridge/helper"
"github.com/bwmarrin/discordgo"
)
const MessageLength = 1950
type Bdiscord struct {
*bridge.Config
c *discordgo.Session
nick string
useChannelID bool
guildID string
webhookID string
webhookToken string
channelsMutex sync.RWMutex
channels []*discordgo.Channel
channelInfoMap map[string]*config.ChannelInfo
membersMutex sync.RWMutex
userMemberMap map[string]*discordgo.Member
nickMemberMap map[string]*discordgo.Member
}
func New(cfg *bridge.Config) bridge.Bridger {
b := &Bdiscord{Config: cfg}
b.userMemberMap = make(map[string]*discordgo.Member)
b.nickMemberMap = make(map[string]*discordgo.Member)
b.channelInfoMap = make(map[string]*config.ChannelInfo)
if b.GetString("WebhookURL") != "" {
b.Log.Debug("Configuring Discord Incoming Webhook")
b.webhookID, b.webhookToken = b.splitURL(b.GetString("WebhookURL"))
}
return b
}
func (b *Bdiscord) Connect() error {
var err error
var token string
var guildFound bool
b.Log.Info("Connecting")
if b.GetString("WebhookURL") == "" {
b.Log.Info("Connecting using token")
} else {
b.Log.Info("Connecting using webhookurl (for posting) and token")
}
if !strings.HasPrefix(b.GetString("Token"), "Bot ") {
token = "Bot " + b.GetString("Token")
}
b.c, err = discordgo.New(token)
if err != nil {
return err
}
b.Log.Info("Connection succeeded")
b.c.AddHandler(b.messageCreate)
b.c.AddHandler(b.memberUpdate)
b.c.AddHandler(b.messageUpdate)
b.c.AddHandler(b.messageDelete)
err = b.c.Open()
if err != nil {
return err
}
guilds, err := b.c.UserGuilds(100, "", "")
if err != nil {
return err
}
userinfo, err := b.c.User("@me")
if err != nil {
return err
}
serverName := strings.Replace(b.GetString("Server"), "ID:", "", -1)
b.nick = userinfo.Username
b.channelsMutex.Lock()
for _, guild := range guilds {
if guild.Name == serverName || guild.ID == serverName {
b.channels, err = b.c.GuildChannels(guild.ID)
b.guildID = guild.ID
guildFound = true
if err != nil {
break
}
}
}
b.channelsMutex.Unlock()
if !guildFound {
msg := fmt.Sprintf("Server \"%s\" not found", b.GetString("Server"))
err = errors.New(msg)
b.Log.Error(msg)
b.Log.Info("Possible values:")
for _, guild := range guilds {
b.Log.Infof("Server=\"%s\" # Server name", guild.Name)
b.Log.Infof("Server=\"%s\" # Server ID", guild.ID)
}
}
if err != nil {
return err
}
b.channelsMutex.RLock()
for _, channel := range b.channels {
b.Log.Debugf("found channel %#v", channel)
}
b.channelsMutex.RUnlock()
// Obtaining guild members and initializing nickname mapping.
b.membersMutex.Lock()
defer b.membersMutex.Unlock()
members, err := b.c.GuildMembers(b.guildID, "", 1000)
if err != nil {
b.Log.Error("Error obtaining server members: ", err)
return err
}
for _, member := range members {
if member == nil {
b.Log.Warnf("Skipping missing information for a user.")
continue
}
b.userMemberMap[member.User.ID] = member
b.nickMemberMap[member.User.Username] = member
if member.Nick != "" {
b.nickMemberMap[member.Nick] = member
}
}
return nil
}
func (b *Bdiscord) Disconnect() error {
return b.c.Close()
}
func (b *Bdiscord) JoinChannel(channel config.ChannelInfo) error {
b.channelsMutex.Lock()
defer b.channelsMutex.Unlock()
b.channelInfoMap[channel.ID] = &channel
idcheck := strings.Split(channel.Name, "ID:")
if len(idcheck) > 1 {
b.useChannelID = true
}
return nil
}
func (b *Bdiscord) Send(msg config.Message) (string, error) {
b.Log.Debugf("=> Receiving %#v", msg)
channelID := b.getChannelID(msg.Channel)
if channelID == "" {
return "", fmt.Errorf("Could not find channelID for %v", msg.Channel)
}
// Make a action /me of the message
if msg.Event == config.EventUserAction {
msg.Text = "_" + msg.Text + "_"
}
// use initial webhook
wID := b.webhookID
wToken := b.webhookToken
// check if have a channel specific webhook
b.channelsMutex.RLock()
if ci, ok := b.channelInfoMap[msg.Channel+b.Account]; ok {
if ci.Options.WebhookURL != "" {
wID, wToken = b.splitURL(ci.Options.WebhookURL)
}
}
b.channelsMutex.RUnlock()
// Use webhook to send the message
if wID != "" {
// skip events
if msg.Event != "" && msg.Event != config.EventJoinLeave && msg.Event != config.EventTopicChange {
return "", nil
}
b.Log.Debugf("Broadcasting using Webhook")
for _, f := range msg.Extra["file"] {
fi := f.(config.FileInfo)
if fi.URL != "" {
msg.Text += " " + fi.URL
}
}
// skip empty messages
if msg.Text == "" {
return "", nil
}
msg.Text = helper.ClipMessage(msg.Text, MessageLength)
msg.Text = b.replaceUserMentions(msg.Text)
// discord username must be [0..32] max
if len(msg.Username) > 32 {
msg.Username = msg.Username[0:32]
}
err := b.c.WebhookExecute(
wID,
wToken,
true,
&discordgo.WebhookParams{
Content: msg.Text,
Username: msg.Username,
AvatarURL: msg.Avatar,
})
return "", err
}
b.Log.Debugf("Broadcasting using token (API)")
// Delete message
if msg.Event == config.EventMsgDelete {
if msg.ID == "" {
return "", nil
}
err := b.c.ChannelMessageDelete(channelID, msg.ID)
return "", err
}
// Upload a file if it exists
if msg.Extra != nil {
for _, rmsg := range helper.HandleExtra(&msg, b.General) {
rmsg.Text = helper.ClipMessage(rmsg.Text, MessageLength)
if _, err := b.c.ChannelMessageSend(channelID, rmsg.Username+rmsg.Text); err != nil {
b.Log.Errorf("Could not send message %#v: %v", rmsg, err)
}
}
// check if we have files to upload (from slack, telegram or mattermost)
if len(msg.Extra["file"]) > 0 {
return b.handleUploadFile(&msg, channelID)
}
}
msg.Text = helper.ClipMessage(msg.Text, MessageLength)
msg.Text = b.replaceUserMentions(msg.Text)
// Edit message
if msg.ID != "" {
_, err := b.c.ChannelMessageEdit(channelID, msg.ID, msg.Username+msg.Text)
return msg.ID, err
}
// Post normal message
res, err := b.c.ChannelMessageSend(channelID, msg.Username+msg.Text)
if err != nil {
return "", err
}
return res.ID, err
}
// useWebhook returns true if we have a webhook defined somewhere
func (b *Bdiscord) useWebhook() bool {
if b.GetString("WebhookURL") != "" {
return true
}
b.channelsMutex.RLock()
defer b.channelsMutex.RUnlock()
for _, channel := range b.channelInfoMap {
if channel.Options.WebhookURL != "" {
return true
}
}
return false
}
// isWebhookID returns true if the specified id is used in a defined webhook
func (b *Bdiscord) isWebhookID(id string) bool {
if b.GetString("WebhookURL") != "" {
wID, _ := b.splitURL(b.GetString("WebhookURL"))
if wID == id {
return true
}
}
b.channelsMutex.RLock()
defer b.channelsMutex.RUnlock()
for _, channel := range b.channelInfoMap {
if channel.Options.WebhookURL != "" {
wID, _ := b.splitURL(channel.Options.WebhookURL)
if wID == id {
return true
}
}
}
return false
}
// handleUploadFile handles native upload of files
func (b *Bdiscord) handleUploadFile(msg *config.Message, channelID string) (string, error) {
var err error
for _, f := range msg.Extra["file"] {
fi := f.(config.FileInfo)
file := discordgo.File{
Name: fi.Name,
ContentType: "",
Reader: bytes.NewReader(*fi.Data),
}
m := discordgo.MessageSend{
Content: msg.Username + fi.Comment,
Files: []*discordgo.File{&file},
}
_, err = b.c.ChannelMessageSendComplex(channelID, &m)
if err != nil {
return "", fmt.Errorf("file upload failed: %#v", err)
}
}
return "", nil
}
|
// Copyright 2016 Stratumn SAS. All rights reserved.
// Use of this source code is governed by the license that can be found in the
// LICENSE file.
// Package rethinkstore implements a store that saves all the segments in a
// RethinkDB database.
package rethinkstore
import (
"encoding/json"
"math"
"strings"
"time"
"github.com/stratumn/sdk/cs"
"github.com/stratumn/sdk/store"
"github.com/stratumn/sdk/types"
rethink "gopkg.in/dancannon/gorethink.v3"
)
func init() {
rethink.SetTags("json", "gorethink")
}
const (
// Name is the name set in the store's information.
Name = "rethink"
// Description is the description set in the store's information.
Description = "Stratumn RethinkDB Store"
// DefaultURL is the default URL of the database.
DefaultURL = "rethinkdb:28015"
// DefaultDB is the default database.
DefaultDB = "test"
// DefaultHard is whether to use hard durability by default.
DefaultHard = true
)
// Config contains configuration options for the store.
type Config struct {
// A version string that will be set in the store's information.
Version string
// A git commit hash that will be set in the store's information.
Commit string
// The URL of the PostgreSQL database, such as "localhost:28015" order
// "localhost:28015,localhost:28016,localhost:28017".
URL string
// The database name
DB string
// Whether to use hard durability.
Hard bool
}
// Info is the info returned by GetInfo.
type Info struct {
Name string `json:"name"`
Description string `json:"description"`
Version string `json:"version"`
Commit string `json:"commit"`
}
// Store is the type that implements github.com/stratumn/sdk/store.Adapter.
type Store struct {
config *Config
didSaveChans []chan *cs.Segment
session *rethink.Session
db rethink.Term
segments rethink.Term
values rethink.Term
}
type wrapper struct {
ID []byte `json:"id"`
Content *cs.Segment `json:"content"`
Priority float64 `json:"priority"`
UpdatedAt time.Time `json:"updatedAt"`
MapID string `json:"mapId"`
PrevLinkHash []byte `json:"prevLinkHash"`
Tags []string `json:"tags"`
Process string `json:"process"`
}
type valueWrapper struct {
ID []byte `json:"id"`
Value []byte `json:"value"`
}
// New creates an instance of a Store.
func New(config *Config) (*Store, error) {
opts := rethink.ConnectOpts{Addresses: strings.Split(config.URL, ",")}
session, err := rethink.Connect(opts)
if err != nil {
return nil, err
}
db := rethink.DB(config.DB)
return &Store{
config: config,
session: session,
db: db,
segments: db.Table("segments"),
values: db.Table("values"),
}, nil
}
// AddDidSaveChannel implements
// github.com/stratumn/sdk/fossilizer.Store.AddDidSaveChannel.
func (a *Store) AddDidSaveChannel(saveChan chan *cs.Segment) {
a.didSaveChans = append(a.didSaveChans, saveChan)
}
// GetInfo implements github.com/stratumn/sdk/store.Adapter.GetInfo.
func (a *Store) GetInfo() (interface{}, error) {
return &Info{
Name: Name,
Description: Description,
Version: a.config.Version,
Commit: a.config.Commit,
}, nil
}
// SaveSegment implements github.com/stratumn/sdk/store.Adapter.SaveSegment.
func (a *Store) SaveSegment(segment *cs.Segment) error {
var (
linkHash = segment.GetLinkHash()
prevLinkHash = segment.Link.GetPrevLinkHash()
)
curr, err := a.GetSegment(segment.GetLinkHash())
if err != nil {
return err
}
if curr != nil {
segment, _ = curr.MergeMeta(segment)
}
w := wrapper{
ID: segment.GetLinkHash()[:],
Content: segment,
Priority: segment.Link.GetPriority(),
UpdatedAt: time.Now().UTC(),
MapID: segment.Link.GetMapID(),
Tags: segment.Link.GetTags(),
Process: segment.Link.GetProcess(),
}
if prevLinkHash != nil {
w.PrevLinkHash = prevLinkHash[:]
}
// rethink does not handle -Inf
if w.Priority == math.Inf(-1) {
w.Priority = -math.MaxFloat64
}
if err := a.segments.Get(linkHash).Replace(&w).Exec(a.session); err != nil {
return err
}
// Send saved segment to all the save channels without blocking.
go func(chans []chan *cs.Segment) {
for _, c := range chans {
c <- segment
}
}(a.didSaveChans)
return nil
}
// GetSegment implements github.com/stratumn/sdk/store.Adapter.GetSegment.
func (a *Store) GetSegment(linkHash *types.Bytes32) (*cs.Segment, error) {
cur, err := a.segments.Get(linkHash[:]).Run(a.session)
if err != nil {
return nil, err
}
defer cur.Close()
var w wrapper
if err := cur.One(&w); err != nil {
if err == rethink.ErrEmptyResult {
return nil, nil
}
return nil, err
}
return w.Content, nil
}
// DeleteSegment implements github.com/stratumn/sdk/store.Adapter.DeleteSegment.
func (a *Store) DeleteSegment(linkHash *types.Bytes32) (*cs.Segment, error) {
res, err := a.segments.
Get(linkHash[:]).
Delete(rethink.DeleteOpts{ReturnChanges: true}).
RunWrite(a.session)
if err != nil {
return nil, err
}
if res.Deleted < 1 {
return nil, nil
}
b, err := json.Marshal(res.Changes[0].OldValue)
if err != nil {
return nil, err
}
var w wrapper
if err := json.Unmarshal(b, &w); err != nil {
return nil, err
}
return w.Content, nil
}
// FindSegments implements github.com/stratumn/sdk/store.Adapter.FindSegments.
func (a *Store) FindSegments(filter *store.SegmentFilter) (cs.SegmentSlice, error) {
var prevLinkHash []byte
q := a.segments
if filter.PrevLinkHash != nil {
if prevLinkHashBytes, err := types.NewBytes32FromString(*filter.PrevLinkHash); prevLinkHash != nil && err == nil {
prevLinkHash = prevLinkHashBytes[:]
}
q = q.Between([]interface{}{
prevLinkHash,
rethink.MinVal,
}, []interface{}{
prevLinkHash,
rethink.MaxVal,
}, rethink.BetweenOpts{
Index: "prevLinkHashOrder",
LeftBound: "closed",
RightBound: "closed",
})
}
if mapIDs := filter.MapIDs; len(mapIDs) > 0 {
ids := make([]interface{}, len(mapIDs))
for i, v := range mapIDs {
ids[i] = v
}
q = q.Filter(func(row rethink.Term) interface{} {
return rethink.Expr(ids).Contains(row.Field("mapId"))
})
// q = q.OrderBy(rethink.OrderByOpts{Index: rethink.Desc("mapIdOrder")})
} else if prevLinkHash := filter.PrevLinkHash; prevLinkHash != nil {
q = q.OrderBy(rethink.OrderByOpts{Index: "prevLinkHashOrder"})
} else {
q = q.OrderBy(rethink.OrderByOpts{Index: rethink.Desc("order")})
}
if process := filter.Process; len(process) > 0 {
q = q.Filter(rethink.Row.Field("process").Eq(process))
}
if tags := filter.Tags; len(tags) > 0 {
t := make([]interface{}, len(tags))
for i, v := range tags {
t[i] = v
}
q = q.Filter(rethink.Row.Field("tags").Contains(t...))
}
q = q.Field("content")
cur, err := q.Skip(filter.Offset).Limit(filter.Limit).Run(a.session)
if err != nil {
return nil, err
}
defer cur.Close()
segments := make(cs.SegmentSlice, 0, filter.Limit)
if err := cur.All(&segments); err != nil {
return nil, err
}
return segments, nil
}
// GetMapIDs implements github.com/stratumn/sdk/store.Adapter.GetMapIDs.
func (a *Store) GetMapIDs(filter *store.MapFilter) ([]string, error) {
q := a.segments
if process := filter.Process; len(process) > 0 {
q = q.Between([]interface{}{
process,
rethink.MinVal,
}, []interface{}{
process,
rethink.MaxVal,
}, rethink.BetweenOpts{
Index: "processOrder",
LeftBound: "closed",
RightBound: "closed",
})
q = q.OrderBy(rethink.OrderByOpts{Index: "processOrder"}).
Distinct(rethink.DistinctOpts{Index: "processOrder"}).
Map(func(row rethink.Term) interface{} {
return row.AtIndex(1)
})
} else {
q = q.Between(rethink.MinVal, rethink.MaxVal, rethink.BetweenOpts{
Index: "mapId",
}).
OrderBy(rethink.OrderByOpts{Index: "mapId"}).
Distinct(rethink.DistinctOpts{Index: "mapId"})
}
cur, err := q.Skip(filter.Pagination.Offset).Limit(filter.Limit).Run(a.session)
if err != nil {
return nil, err
}
defer cur.Close()
mapIDs := []string{}
if err = cur.All(&mapIDs); err != nil {
return nil, err
}
return mapIDs, nil
}
// GetValue implements github.com/stratumn/sdk/store.Adapter.GetValue.
func (a *Store) GetValue(key []byte) ([]byte, error) {
cur, err := a.values.Get(key).Run(a.session)
if err != nil {
return nil, err
}
defer cur.Close()
var w valueWrapper
if err := cur.One(&w); err != nil {
if err == rethink.ErrEmptyResult {
return nil, nil
}
return nil, err
}
return w.Value, nil
}
// SaveValue implements github.com/stratumn/sdk/store.Adapter.SaveValue.
func (a *Store) SaveValue(key, value []byte) error {
v := &valueWrapper{
ID: key,
Value: value,
}
return a.values.Get(key).Replace(&v).Exec(a.session)
}
// DeleteValue implements github.com/stratumn/sdk/store.Adapter.DeleteValue.
func (a *Store) DeleteValue(key []byte) ([]byte, error) {
res, err := a.values.
Get(key).
Delete(rethink.DeleteOpts{ReturnChanges: true}).
RunWrite(a.session)
if err != nil {
return nil, err
}
if res.Deleted < 1 {
return nil, nil
}
b, err := json.Marshal(res.Changes[0].OldValue)
if err != nil {
return nil, err
}
var w valueWrapper
if err := json.Unmarshal(b, &w); err != nil {
return nil, err
}
return w.Value, nil
}
// NewBatch implements github.com/stratumn/sdk/store.Adapter.NewBatch.
func (a *Store) NewBatch() (store.Batch, error) {
return NewBatch(a), nil
}
// Create creates the database tables and indexes.
func (a *Store) Create() (err error) {
exec := func(term rethink.Term) {
if err == nil {
err = term.Exec(a.session)
}
}
tblOpts := rethink.TableCreateOpts{}
if !a.config.Hard {
tblOpts.Durability = "soft"
}
exec(a.db.TableCreate("segments", tblOpts))
exec(a.segments.Wait())
exec(a.segments.IndexCreate("mapId"))
exec(a.segments.IndexWait("mapId"))
exec(a.segments.IndexCreateFunc("order", []interface{}{
rethink.Row.Field("priority"),
rethink.Row.Field("updatedAt"),
}))
exec(a.segments.IndexWait("order"))
exec(a.segments.IndexCreateFunc("mapIdOrder", []interface{}{
rethink.Row.Field("mapId"),
rethink.Row.Field("priority"),
rethink.Row.Field("updatedAt"),
}))
exec(a.segments.IndexWait("mapIdOrder"))
exec(a.segments.IndexCreateFunc("prevLinkHashOrder", []interface{}{
rethink.Row.Field("prevLinkHash"),
rethink.Row.Field("priority"),
rethink.Row.Field("updatedAt"),
}))
exec(a.segments.IndexWait("prevLinkHashOrder"))
exec(a.segments.IndexCreateFunc("processOrder", []interface{}{
rethink.Row.Field("process"),
rethink.Row.Field("mapId"),
}))
exec(a.segments.IndexWait("processOrder"))
exec(a.db.TableCreate("values", tblOpts))
exec(a.values.Wait())
return err
}
// Drop drops the database tables and indexes.
func (a *Store) Drop() (err error) {
exec := func(term rethink.Term) {
if err == nil {
err = term.Exec(a.session)
}
}
exec(a.db.TableDrop("segments"))
exec(a.db.TableDrop("values"))
return
}
// Exists returns whether the database tables exists.
func (a *Store) Exists() (bool, error) {
cur, err := a.db.TableList().Run(a.session)
if err != nil {
return false, err
}
defer cur.Close()
var name string
for cur.Next(&name) {
if name == "segments" || name == "values" {
return true, nil
}
}
return false, nil
}
rethinkstore: fix findPrevlinkHash test
// Copyright 2016 Stratumn SAS. All rights reserved.
// Use of this source code is governed by the license that can be found in the
// LICENSE file.
// Package rethinkstore implements a store that saves all the segments in a
// RethinkDB database.
package rethinkstore
import (
"encoding/json"
"math"
"strings"
"time"
"github.com/stratumn/sdk/cs"
"github.com/stratumn/sdk/store"
"github.com/stratumn/sdk/types"
rethink "gopkg.in/dancannon/gorethink.v3"
)
func init() {
rethink.SetTags("json", "gorethink")
}
const (
// Name is the name set in the store's information.
Name = "rethink"
// Description is the description set in the store's information.
Description = "Stratumn RethinkDB Store"
// DefaultURL is the default URL of the database.
DefaultURL = "rethinkdb:28015"
// DefaultDB is the default database.
DefaultDB = "test"
// DefaultHard is whether to use hard durability by default.
DefaultHard = true
)
// Config contains configuration options for the store.
type Config struct {
// A version string that will be set in the store's information.
Version string
// A git commit hash that will be set in the store's information.
Commit string
// The URL of the PostgreSQL database, such as "localhost:28015" order
// "localhost:28015,localhost:28016,localhost:28017".
URL string
// The database name
DB string
// Whether to use hard durability.
Hard bool
}
// Info is the info returned by GetInfo.
type Info struct {
Name string `json:"name"`
Description string `json:"description"`
Version string `json:"version"`
Commit string `json:"commit"`
}
// Store is the type that implements github.com/stratumn/sdk/store.Adapter.
type Store struct {
config *Config
didSaveChans []chan *cs.Segment
session *rethink.Session
db rethink.Term
segments rethink.Term
values rethink.Term
}
type wrapper struct {
ID []byte `json:"id"`
Content *cs.Segment `json:"content"`
Priority float64 `json:"priority"`
UpdatedAt time.Time `json:"updatedAt"`
MapID string `json:"mapId"`
PrevLinkHash []byte `json:"prevLinkHash"`
Tags []string `json:"tags"`
Process string `json:"process"`
}
type valueWrapper struct {
ID []byte `json:"id"`
Value []byte `json:"value"`
}
// New creates an instance of a Store.
func New(config *Config) (*Store, error) {
opts := rethink.ConnectOpts{Addresses: strings.Split(config.URL, ",")}
session, err := rethink.Connect(opts)
if err != nil {
return nil, err
}
db := rethink.DB(config.DB)
return &Store{
config: config,
session: session,
db: db,
segments: db.Table("segments"),
values: db.Table("values"),
}, nil
}
// AddDidSaveChannel implements
// github.com/stratumn/sdk/fossilizer.Store.AddDidSaveChannel.
func (a *Store) AddDidSaveChannel(saveChan chan *cs.Segment) {
a.didSaveChans = append(a.didSaveChans, saveChan)
}
// GetInfo implements github.com/stratumn/sdk/store.Adapter.GetInfo.
func (a *Store) GetInfo() (interface{}, error) {
return &Info{
Name: Name,
Description: Description,
Version: a.config.Version,
Commit: a.config.Commit,
}, nil
}
// SaveSegment implements github.com/stratumn/sdk/store.Adapter.SaveSegment.
func (a *Store) SaveSegment(segment *cs.Segment) error {
var (
linkHash = segment.GetLinkHash()
prevLinkHash = segment.Link.GetPrevLinkHash()
)
curr, err := a.GetSegment(segment.GetLinkHash())
if err != nil {
return err
}
if curr != nil {
segment, _ = curr.MergeMeta(segment)
}
w := wrapper{
ID: segment.GetLinkHash()[:],
Content: segment,
Priority: segment.Link.GetPriority(),
UpdatedAt: time.Now().UTC(),
MapID: segment.Link.GetMapID(),
Tags: segment.Link.GetTags(),
Process: segment.Link.GetProcess(),
}
if prevLinkHash != nil {
w.PrevLinkHash = prevLinkHash[:]
}
// rethink does not handle -Inf
if w.Priority == math.Inf(-1) {
w.Priority = -math.MaxFloat64
}
if err := a.segments.Get(linkHash).Replace(&w).Exec(a.session); err != nil {
return err
}
// Send saved segment to all the save channels without blocking.
go func(chans []chan *cs.Segment) {
for _, c := range chans {
c <- segment
}
}(a.didSaveChans)
return nil
}
// GetSegment implements github.com/stratumn/sdk/store.Adapter.GetSegment.
func (a *Store) GetSegment(linkHash *types.Bytes32) (*cs.Segment, error) {
cur, err := a.segments.Get(linkHash[:]).Run(a.session)
if err != nil {
return nil, err
}
defer cur.Close()
var w wrapper
if err := cur.One(&w); err != nil {
if err == rethink.ErrEmptyResult {
return nil, nil
}
return nil, err
}
return w.Content, nil
}
// DeleteSegment implements github.com/stratumn/sdk/store.Adapter.DeleteSegment.
func (a *Store) DeleteSegment(linkHash *types.Bytes32) (*cs.Segment, error) {
res, err := a.segments.
Get(linkHash[:]).
Delete(rethink.DeleteOpts{ReturnChanges: true}).
RunWrite(a.session)
if err != nil {
return nil, err
}
if res.Deleted < 1 {
return nil, nil
}
b, err := json.Marshal(res.Changes[0].OldValue)
if err != nil {
return nil, err
}
var w wrapper
if err := json.Unmarshal(b, &w); err != nil {
return nil, err
}
return w.Content, nil
}
// FindSegments implements github.com/stratumn/sdk/store.Adapter.FindSegments.
func (a *Store) FindSegments(filter *store.SegmentFilter) (cs.SegmentSlice, error) {
var prevLinkHash []byte
q := a.segments
if filter.PrevLinkHash != nil {
if prevLinkHashBytes, err := types.NewBytes32FromString(*filter.PrevLinkHash); prevLinkHashBytes != nil && err == nil {
prevLinkHash = prevLinkHashBytes[:]
}
q = q.Between([]interface{}{
prevLinkHash,
rethink.MinVal,
}, []interface{}{
prevLinkHash,
rethink.MaxVal,
}, rethink.BetweenOpts{
Index: "prevLinkHashOrder",
LeftBound: "closed",
RightBound: "closed",
})
}
if mapIDs := filter.MapIDs; len(mapIDs) > 0 {
ids := make([]interface{}, len(mapIDs))
for i, v := range mapIDs {
ids[i] = v
}
q = q.Filter(func(row rethink.Term) interface{} {
return rethink.Expr(ids).Contains(row.Field("mapId"))
})
// q = q.OrderBy(rethink.OrderByOpts{Index: rethink.Desc("mapIdOrder")})
} else if prevLinkHash := filter.PrevLinkHash; prevLinkHash != nil {
q = q.OrderBy(rethink.OrderByOpts{Index: "prevLinkHashOrder"})
} else {
q = q.OrderBy(rethink.OrderByOpts{Index: rethink.Desc("order")})
}
if process := filter.Process; len(process) > 0 {
q = q.Filter(rethink.Row.Field("process").Eq(process))
}
if tags := filter.Tags; len(tags) > 0 {
t := make([]interface{}, len(tags))
for i, v := range tags {
t[i] = v
}
q = q.Filter(rethink.Row.Field("tags").Contains(t...))
}
q = q.Field("content")
cur, err := q.Skip(filter.Offset).Limit(filter.Limit).Run(a.session)
if err != nil {
return nil, err
}
defer cur.Close()
segments := make(cs.SegmentSlice, 0, filter.Limit)
if err := cur.All(&segments); err != nil {
return nil, err
}
return segments, nil
}
// GetMapIDs implements github.com/stratumn/sdk/store.Adapter.GetMapIDs.
func (a *Store) GetMapIDs(filter *store.MapFilter) ([]string, error) {
q := a.segments
if process := filter.Process; len(process) > 0 {
q = q.Between([]interface{}{
process,
rethink.MinVal,
}, []interface{}{
process,
rethink.MaxVal,
}, rethink.BetweenOpts{
Index: "processOrder",
LeftBound: "closed",
RightBound: "closed",
})
q = q.OrderBy(rethink.OrderByOpts{Index: "processOrder"}).
Distinct(rethink.DistinctOpts{Index: "processOrder"}).
Map(func(row rethink.Term) interface{} {
return row.AtIndex(1)
})
} else {
q = q.Between(rethink.MinVal, rethink.MaxVal, rethink.BetweenOpts{
Index: "mapId",
}).
OrderBy(rethink.OrderByOpts{Index: "mapId"}).
Distinct(rethink.DistinctOpts{Index: "mapId"})
}
cur, err := q.Skip(filter.Pagination.Offset).Limit(filter.Limit).Run(a.session)
if err != nil {
return nil, err
}
defer cur.Close()
mapIDs := []string{}
if err = cur.All(&mapIDs); err != nil {
return nil, err
}
return mapIDs, nil
}
// GetValue implements github.com/stratumn/sdk/store.Adapter.GetValue.
func (a *Store) GetValue(key []byte) ([]byte, error) {
cur, err := a.values.Get(key).Run(a.session)
if err != nil {
return nil, err
}
defer cur.Close()
var w valueWrapper
if err := cur.One(&w); err != nil {
if err == rethink.ErrEmptyResult {
return nil, nil
}
return nil, err
}
return w.Value, nil
}
// SaveValue implements github.com/stratumn/sdk/store.Adapter.SaveValue.
func (a *Store) SaveValue(key, value []byte) error {
v := &valueWrapper{
ID: key,
Value: value,
}
return a.values.Get(key).Replace(&v).Exec(a.session)
}
// DeleteValue implements github.com/stratumn/sdk/store.Adapter.DeleteValue.
func (a *Store) DeleteValue(key []byte) ([]byte, error) {
res, err := a.values.
Get(key).
Delete(rethink.DeleteOpts{ReturnChanges: true}).
RunWrite(a.session)
if err != nil {
return nil, err
}
if res.Deleted < 1 {
return nil, nil
}
b, err := json.Marshal(res.Changes[0].OldValue)
if err != nil {
return nil, err
}
var w valueWrapper
if err := json.Unmarshal(b, &w); err != nil {
return nil, err
}
return w.Value, nil
}
// NewBatch implements github.com/stratumn/sdk/store.Adapter.NewBatch.
func (a *Store) NewBatch() (store.Batch, error) {
return NewBatch(a), nil
}
// Create creates the database tables and indexes.
func (a *Store) Create() (err error) {
exec := func(term rethink.Term) {
if err == nil {
err = term.Exec(a.session)
}
}
tblOpts := rethink.TableCreateOpts{}
if !a.config.Hard {
tblOpts.Durability = "soft"
}
exec(a.db.TableCreate("segments", tblOpts))
exec(a.segments.Wait())
exec(a.segments.IndexCreate("mapId"))
exec(a.segments.IndexWait("mapId"))
exec(a.segments.IndexCreateFunc("order", []interface{}{
rethink.Row.Field("priority"),
rethink.Row.Field("updatedAt"),
}))
exec(a.segments.IndexWait("order"))
exec(a.segments.IndexCreateFunc("mapIdOrder", []interface{}{
rethink.Row.Field("mapId"),
rethink.Row.Field("priority"),
rethink.Row.Field("updatedAt"),
}))
exec(a.segments.IndexWait("mapIdOrder"))
exec(a.segments.IndexCreateFunc("prevLinkHashOrder", []interface{}{
rethink.Row.Field("prevLinkHash"),
rethink.Row.Field("priority"),
rethink.Row.Field("updatedAt"),
}))
exec(a.segments.IndexWait("prevLinkHashOrder"))
exec(a.segments.IndexCreateFunc("processOrder", []interface{}{
rethink.Row.Field("process"),
rethink.Row.Field("mapId"),
}))
exec(a.segments.IndexWait("processOrder"))
exec(a.db.TableCreate("values", tblOpts))
exec(a.values.Wait())
return err
}
// Drop drops the database tables and indexes.
func (a *Store) Drop() (err error) {
exec := func(term rethink.Term) {
if err == nil {
err = term.Exec(a.session)
}
}
exec(a.db.TableDrop("segments"))
exec(a.db.TableDrop("values"))
return
}
// Exists returns whether the database tables exists.
func (a *Store) Exists() (bool, error) {
cur, err := a.db.TableList().Run(a.session)
if err != nil {
return false, err
}
defer cur.Close()
var name string
for cur.Next(&name) {
if name == "segments" || name == "values" {
return true, nil
}
}
return false, nil
}
|
package main
import (
"encoding/json"
"fmt"
"sync"
"time"
"github.com/grafana/grafana/pkg/log"
"github.com/intelsdi-x/snap/mgmt/rest/rbody"
"github.com/raintank/raintank-apps/task-agent/snap"
"github.com/raintank/raintank-apps/task-server/model"
)
type TaskCache struct {
sync.RWMutex
c *snap.Client
Tasks map[int64]*model.TaskDTO
SnapTasks map[string]*rbody.ScheduledTask
}
func (t *TaskCache) AddTask(task *model.TaskDTO) error {
t.Lock()
defer t.Unlock()
return t.addTask(task)
}
func (t *TaskCache) addTask(task *model.TaskDTO) error {
t.Tasks[task.Id] = task
snapTaskName := fmt.Sprintf("raintank-apps:%d", task.Id)
snapTask, ok := t.SnapTasks[snapTaskName]
if !ok {
log.Debug("New task recieved %s", snapTaskName)
snapTask, err := t.c.CreateSnapTask(task, snapTaskName)
if err != nil {
return err
}
t.SnapTasks[snapTaskName] = snapTask
} else {
log.Debug("task %s already in the cache.", snapTaskName)
if task.Updated.After(time.Unix(snapTask.CreationTimestamp, 0)) {
log.Debug("%s needs to be updated", snapTaskName)
// need to update task.
if err := t.c.RemoveSnapTask(snapTask); err != nil {
return err
}
snapTask, err := t.c.CreateSnapTask(task, snapTaskName)
if err != nil {
return err
}
t.SnapTasks[snapTaskName] = snapTask
}
}
return nil
}
func (t *TaskCache) Sync() {
t.Lock()
defer t.Unlock()
for _, task := range t.Tasks {
err := t.addTask(task)
if err != nil {
log.Error(3, err.Error())
}
}
}
func (t *TaskCache) RemoveTask(task *model.TaskDTO) error {
t.Lock()
defer t.Unlock()
snapTaskName := fmt.Sprintf("raintank-apps:%d", task.Id)
snapTask, ok := t.SnapTasks[snapTaskName]
if !ok {
log.Debug("task to remove not in cache. %s", snapTaskName)
} else {
if err := t.c.RemoveSnapTask(snapTask); err != nil {
return err
}
delete(t.SnapTasks, snapTaskName)
}
delete(t.Tasks, task.Id)
return nil
}
func (t *TaskCache) IndexSnapTasks(tasks []*rbody.ScheduledTask) error {
t.Lock()
t.SnapTasks = make(map[string]*rbody.ScheduledTask)
for _, task := range tasks {
t.SnapTasks[task.Name] = task
}
t.Unlock()
t.Sync()
return nil
}
var GlobalTaskCache *TaskCache
func InitTaskCache(snapClient *snap.Client) {
GlobalTaskCache = &TaskCache{
c: snapClient,
Tasks: make(map[int64]*model.TaskDTO),
SnapTasks: make(map[string]*rbody.ScheduledTask),
}
}
func HandleTaskUpdate() interface{} {
return func(data []byte) {
tasks := make([]*model.TaskDTO, 0)
err := json.Unmarshal(data, &tasks)
if err != nil {
log.Error(3, "failed to decode taskUpdate payload. %s", err)
return
}
log.Debug("TaskList. %s", data)
for _, t := range tasks {
if err := GlobalTaskCache.AddTask(t); err != nil {
log.Error(3, "failed to add task to cache. %s", err)
}
}
}
}
func HandleTaskAdd() interface{} {
return func(data []byte) {
task := model.TaskDTO{}
err := json.Unmarshal(data, &task)
if err != nil {
log.Error(3, "failed to decode taskAdd payload. %s", err)
return
}
log.Debug("Adding Task. %s", data)
if err := GlobalTaskCache.AddTask(&task); err != nil {
log.Error(3, "failed to add task to cache. %s", err)
}
}
}
func HandleTaskRemove() interface{} {
return func(data []byte) {
task := model.TaskDTO{}
err := json.Unmarshal(data, &task)
if err != nil {
log.Error(3, "failed to decode taskAdd payload. %s", err)
return
}
log.Debug("Removing Task. %s", data)
if err := GlobalTaskCache.RemoveTask(&task); err != nil {
log.Error(3, "failed to remove task from cache. %s", err)
}
}
}
when syncing task list, remove tasks that are no longer present.
package main
import (
"encoding/json"
"fmt"
"sync"
"time"
"github.com/grafana/grafana/pkg/log"
"github.com/intelsdi-x/snap/mgmt/rest/rbody"
"github.com/raintank/raintank-apps/task-agent/snap"
"github.com/raintank/raintank-apps/task-server/model"
)
type TaskCache struct {
sync.RWMutex
c *snap.Client
Tasks map[int64]*model.TaskDTO
SnapTasks map[string]*rbody.ScheduledTask
}
func (t *TaskCache) AddTask(task *model.TaskDTO) error {
t.Lock()
defer t.Unlock()
return t.addTask(task)
}
func (t *TaskCache) addTask(task *model.TaskDTO) error {
t.Tasks[task.Id] = task
snapTaskName := fmt.Sprintf("raintank-apps:%d", task.Id)
snapTask, ok := t.SnapTasks[snapTaskName]
if !ok {
log.Debug("New task recieved %s", snapTaskName)
snapTask, err := t.c.CreateSnapTask(task, snapTaskName)
if err != nil {
return err
}
t.SnapTasks[snapTaskName] = snapTask
} else {
log.Debug("task %s already in the cache.", snapTaskName)
if task.Updated.After(time.Unix(snapTask.CreationTimestamp, 0)) {
log.Debug("%s needs to be updated", snapTaskName)
// need to update task.
if err := t.c.RemoveSnapTask(snapTask); err != nil {
return err
}
snapTask, err := t.c.CreateSnapTask(task, snapTaskName)
if err != nil {
return err
}
t.SnapTasks[snapTaskName] = snapTask
}
}
return nil
}
func (t *TaskCache) Sync() {
t.Lock()
seenTaskIds := make(map[int64]struct{})
for _, task := range t.Tasks {
seenTaskIds[task.Id] = struct{}{}
err := t.addTask(task)
if err != nil {
log.Error(3, err.Error())
}
}
tasksToDel := make([]*model.TaskDTO, 0)
for id, task := range t.Tasks {
if _, ok := seenTaskIds[id]; !ok {
tasksToDel = append(tasksToDel, task)
}
}
t.Unlock()
if len(tasksToDel) > 0 {
for _, task := range tasksToDel {
if err := t.RemoveTask(task); err != nil {
log.Error(3, "Failed to remove task %d", task.Id)
}
}
}
}
func (t *TaskCache) RemoveTask(task *model.TaskDTO) error {
t.Lock()
defer t.Unlock()
snapTaskName := fmt.Sprintf("raintank-apps:%d", task.Id)
snapTask, ok := t.SnapTasks[snapTaskName]
if !ok {
log.Debug("task to remove not in cache. %s", snapTaskName)
} else {
if err := t.c.RemoveSnapTask(snapTask); err != nil {
return err
}
delete(t.SnapTasks, snapTaskName)
}
delete(t.Tasks, task.Id)
return nil
}
func (t *TaskCache) IndexSnapTasks(tasks []*rbody.ScheduledTask) error {
t.Lock()
t.SnapTasks = make(map[string]*rbody.ScheduledTask)
for _, task := range tasks {
t.SnapTasks[task.Name] = task
}
t.Unlock()
t.Sync()
return nil
}
var GlobalTaskCache *TaskCache
func InitTaskCache(snapClient *snap.Client) {
GlobalTaskCache = &TaskCache{
c: snapClient,
Tasks: make(map[int64]*model.TaskDTO),
SnapTasks: make(map[string]*rbody.ScheduledTask),
}
}
func HandleTaskUpdate() interface{} {
return func(data []byte) {
tasks := make([]*model.TaskDTO, 0)
err := json.Unmarshal(data, &tasks)
if err != nil {
log.Error(3, "failed to decode taskUpdate payload. %s", err)
return
}
log.Debug("TaskList. %s", data)
for _, t := range tasks {
if err := GlobalTaskCache.AddTask(t); err != nil {
log.Error(3, "failed to add task to cache. %s", err)
}
}
}
}
func HandleTaskAdd() interface{} {
return func(data []byte) {
task := model.TaskDTO{}
err := json.Unmarshal(data, &task)
if err != nil {
log.Error(3, "failed to decode taskAdd payload. %s", err)
return
}
log.Debug("Adding Task. %s", data)
if err := GlobalTaskCache.AddTask(&task); err != nil {
log.Error(3, "failed to add task to cache. %s", err)
}
}
}
func HandleTaskRemove() interface{} {
return func(data []byte) {
task := model.TaskDTO{}
err := json.Unmarshal(data, &task)
if err != nil {
log.Error(3, "failed to decode taskAdd payload. %s", err)
return
}
log.Debug("Removing Task. %s", data)
if err := GlobalTaskCache.RemoveTask(&task); err != nil {
log.Error(3, "failed to remove task from cache. %s", err)
}
}
}
|
package main
import (
"encoding/csv"
"flag"
"fmt"
. "github.com/abhiyerra/workmachine/app"
"os"
)
type ImageTagging struct {
ImageUrl InputField `work_desc:"Use this image to fill the information below." work_id:"image_url" work_type:"image"`
Tags OutputField `work_desc:"List all the relevent tags separated by a comma for the image. Ex. trees, castle, person" work_id:"tags"`
TextInImage OutputField `work_desc:"Put any text that appears on the image here." work_id:"text_in_image"`
IsCorrectOrientation OutputField `work_desc:"Is the image in the correct orientation?" work_id:"is_correct_orientation" work_type:"checkbox"`
IsLandscape OutputField `work_desc:"Is the image of a landscape?" work_id:"is_landscape" work_type:"checkbox"`
IsPattern OutputField `work_desc:"Is the image of a pattern?" work_id:"is_pattern" work_type:"checkbox"`
IsPerson OutputField `work_desc:"Is the image of a person?" work_id:"is_person" work_type:"checkbox"`
TraditionalClothing OutputField `work_desc:"If it's a person are they wearing a traditional costume?" work_id:"traditional_clothing" work_type:"checkbox"`
IsMap OutputField `work_desc:"Is the image a map?" work_id:"is_map" work_type:"checkbox"`
IsDiagram OutputField `work_desc:"Is the image a diagram?" work_id:"is_diagram" work_type:"checkbox"`
}
func imageUrls(in_file string) (images []ImageTagging) {
file, err := os.Open(in_file)
if err != nil {
panic(err)
}
reader := csv.NewReader(file)
records, err := reader.ReadAll()
for _, i := range records {
// fmt.Printf("%s\n", i[1])
images = append(images, ImageTagging{ImageUrl: InputField(i[1])})
}
if err != nil {
panic(err)
}
return
}
func main() {
var in_file string
flag.StringVar(&in_file, "in_file", "", "input file")
flag.Parse()
if in_file == "" {
fmt.Println("No in file")
os.Exit(1)
}
results_filename := fmt.Sprintf("%s_out.csv", in_file)
results_file, err := os.OpenFile(results_filename, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0660)
if err != nil {
panic(err)
}
defer results_file.Close()
writer := csv.NewWriter(results_file)
image_urls := imageUrls(in_file)
description := `
Look at the image and fill out the appropriate fields. We want to be able to tag all the images correctly. Fill out any appropriate tag that you see.
Here are further instructions: https://github.com/abhiyerra/britishlibrary/wiki/Instructions-&-FAQ`
image_tasks := Task{
Title: "Tag the appropriate images",
Description: description,
Write: func(j *Job) {
fmt.Printf("%v\n", j)
var output []string
for _, i := range j.InputFields {
output = append(output, i.Value)
fmt.Println(i.Value)
}
for _, i := range j.OutputFields {
output = append(output, i.Value)
fmt.Println(i.Value)
}
if err := writer.Write(output); err != nil {
panic(err)
}
writer.Flush()
},
Tasks: image_urls,
}
fmt.Printf("Loaded %d images and starting\n", len(image_urls))
serve := HtmlServe{}
go Serve()
fmt.Println("Serving")
var backend Assigner = serve
NewBatch(image_tasks).Run(backend)
}
New Api
package main
import (
"encoding/csv"
"flag"
"fmt"
. "github.com/abhiyerra/workmachine/app"
"os"
)
type ImageTagging struct {
ImageUrl InputField `work_desc:"Use this image to fill the information below." work_id:"image_url" work_type:"image"`
Tags OutputField `work_desc:"List all the relevent tags separated by a comma for the image. Ex. trees, castle, person" work_id:"tags"`
TextInImage OutputField `work_desc:"Put any text that appears on the image here." work_id:"text_in_image"`
IsCorrectOrientation OutputField `work_desc:"Is the image in the correct orientation?" work_id:"is_correct_orientation" work_type:"checkbox"`
IsLandscape OutputField `work_desc:"Is the image of a landscape?" work_id:"is_landscape" work_type:"checkbox"`
IsPattern OutputField `work_desc:"Is the image of a pattern?" work_id:"is_pattern" work_type:"checkbox"`
IsPerson OutputField `work_desc:"Is the image of a person?" work_id:"is_person" work_type:"checkbox"`
TraditionalClothing OutputField `work_desc:"If it's a person are they wearing a traditional costume?" work_id:"traditional_clothing" work_type:"checkbox"`
IsMap OutputField `work_desc:"Is the image a map?" work_id:"is_map" work_type:"checkbox"`
IsDiagram OutputField `work_desc:"Is the image a diagram?" work_id:"is_diagram" work_type:"checkbox"`
}
func imageUrls(in_file string) (images []ImageTagging) {
file, err := os.Open(in_file)
if err != nil {
panic(err)
}
reader := csv.NewReader(file)
records, err := reader.ReadAll()
for _, i := range records {
// fmt.Printf("%s\n", i[1])
images = append(images, ImageTagging{ImageUrl: InputField(i[1])})
}
if err != nil {
panic(err)
}
return
}
func main() {
var in_file string
flag.StringVar(&in_file, "in_file", "", "input file")
flag.Parse()
if in_file == "" {
fmt.Println("No in file")
os.Exit(1)
}
results_filename := fmt.Sprintf("%s_out.csv", in_file)
results_file, err := os.OpenFile(results_filename, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0660)
if err != nil {
panic(err)
}
defer results_file.Close()
writer := csv.NewWriter(results_file)
image_urls := imageUrls(in_file)
description := `
Look at the image and fill out the appropriate fields. We want to be able to tag all the images correctly. Fill out any appropriate tag that you see.
Here are further instructions: https://github.com/abhiyerra/britishlibrary/wiki/Instructions-&-FAQ`
image_tasks := Task{
Title: "Tag the appropriate images",
Description: description,
Write: func(j *Job) {
fmt.Printf("%v\n", j)
var output []string
for _, i := range j.InputFields {
output = append(output, i.Value)
fmt.Println(i.Value)
}
for _, i := range j.OutputFields {
output = append(output, i.Value)
fmt.Println(i.Value)
}
if err := writer.Write(output); err != nil {
panic(err)
}
writer.Flush()
},
Tasks: image_urls,
}
fmt.Printf("Loaded %d images and starting\n", len(image_urls))
serve := HtmlServe{}
go HtmlServer()
fmt.Println("Serving")
var backend Assigner = serve
NewBatch(image_tasks).Run(backend)
}
|
// Copyright (c) 2013, Nikolay Georgiev
// All rights reserved.
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package sharky
import "testing"
const KEY = ""
const SECRET = ""
const LOGIN = ""
const PASSWORD = ""
func setUp() *Sharky {
return New(KEY, SECRET)
}
func TestNew(t *testing.T) {
sharky := setUp()
if sharky.Key != KEY || sharky.Secret != SECRET {
t.Error("Creating new Sharky has failed")
}
}
func TestSessionIDObtain(t *testing.T) {
sharky := setUp()
sharky.StartSession()
if sharky.SessionID == "" {
t.Error("Failed to obtain SessionID")
}
}
func TestAuthentication(t *testing.T) {
sharky := setUp()
sharky.StartSession()
sharky.Authenticate(LOGIN, PASSWORD)
if sharky.UserInfo == nil {
t.Error("Failed to authenticate")
}
}
func TestSongSearch(t *testing.T) {
sharky := setUp()
sharky.StartSession()
sharky.Authenticate(LOGIN, PASSWORD)
country := sharky.GetCountry("")
song := sharky.GetSongSearchResults("counting stars", country, 10, 0)[0]
if song == nil {
t.Error("Failed to find song")
}
if song.SongName != "Counting Stars" {
t.Error("Failed to find the right song")
}
}
func TestGetAlbumSongs(t *testing.T) {
sharky := setUp()
songs := sharky.GetAlbumSongs("5462", 10)
if songs == nil || len(songs) == 0 {
t.Error("Failed to find album songs")
}
}
Add test for existing album method.
// Copyright (c) 2013, Nikolay Georgiev
// All rights reserved.
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package sharky
import "testing"
const KEY = "golang_nikolay"
const SECRET = "3a27a148229e9daceb45e263646b8d8b"
const LOGIN = "master033+grooveshark@gmail.com"
const PASSWORD = "Mukosolv@n123grooveshark"
func setUp() *Sharky {
return New(KEY, SECRET)
}
func TestNew(t *testing.T) {
sharky := setUp()
if sharky.Key != KEY || sharky.Secret != SECRET {
t.Error("Creating new Sharky has failed")
}
}
func TestSessionIDObtain(t *testing.T) {
sharky := setUp()
sharky.StartSession()
if sharky.SessionID == "" {
t.Error("Failed to obtain SessionID")
}
}
func TestAuthentication(t *testing.T) {
sharky := setUp()
sharky.StartSession()
sharky.Authenticate(LOGIN, PASSWORD)
if sharky.UserInfo == nil {
t.Error("Failed to authenticate")
}
}
func TestSongSearch(t *testing.T) {
sharky := setUp()
sharky.StartSession()
sharky.Authenticate(LOGIN, PASSWORD)
country := sharky.GetCountry("")
song := sharky.GetSongSearchResults("counting stars", country, 10, 0)[0]
if song == nil {
t.Error("Failed to find song")
}
if song.SongName != "Counting Stars" {
t.Error("Failed to find the right song")
}
}
func TestGetAlbumSongs(t *testing.T) {
sharky := setUp()
songs := sharky.GetAlbumSongs("5462", 10)
if songs == nil || len(songs) == 0 {
t.Error("Failed to find album songs")
}
}
func TestGetDoesAlbumExist(t *testing.T) {
sharky := setUp()
doesAlbumExist := sharky.GetDoesAlbumExist("1000")
if !doesAlbumExist {
t.Error("Failed to find album songs")
}
}
|
package mg
import (
"bytes"
"fmt"
"sync"
"time"
)
type taskTick struct{ ActionType }
type Task struct {
Title string
Cancel func()
CancelID string
ShowNow bool
}
type TaskTicket struct {
ID string
Title string
Start time.Time
CancelID string
tracker *taskTracker
showNow bool
cancel func()
}
func (ti *TaskTicket) Done() {
if ti.tracker != nil {
ti.tracker.done(ti.ID)
}
}
func (ti *TaskTicket) Cancel() {
if ti.cancel != nil {
ti.cancel()
}
}
func (ti *TaskTicket) Cancellable() bool {
return ti.cancel != nil
}
type taskTracker struct {
ReducerType
mu sync.Mutex
id uint64
tickets []*TaskTicket
timer *time.Timer
buf bytes.Buffer
}
func (tr *taskTracker) ReducerMount(mx *Ctx) {
tr.mu.Lock()
defer tr.mu.Unlock()
tr.timer = time.NewTimer(1 * time.Second)
dispatch := mx.Store.Dispatch
go func() {
for range tr.timer.C {
dispatch(taskTick{})
}
}()
}
func (tr *taskTracker) ReducerUnmount(*Ctx) {
tr.mu.Lock()
defer tr.mu.Unlock()
for _, t := range tr.tickets {
t.Cancel()
}
}
func (tr *taskTracker) Reduce(mx *Ctx) *State {
tr.mu.Lock()
defer tr.mu.Unlock()
st := mx.State
switch mx.Action.(type) {
case RunCmd:
st = tr.runCmd(st)
case QueryUserCmds:
st = tr.userCmds(st)
case taskTick:
tr.tick()
}
if s := tr.status(); s != "" {
st = st.AddStatus(s)
}
return st
}
func (tr *taskTracker) tick() {
if len(tr.tickets) != 0 {
tr.resetTimer()
}
}
func (tr *taskTracker) userCmds(st *State) *State {
cl := make([]UserCmd, len(tr.tickets))
for i, t := range tr.tickets {
c := UserCmd{
Title: "Cancel " + t.Title,
Name: ".kill",
}
for _, s := range []string{t.CancelID, t.ID} {
if s != "" {
c.Args = append(c.Args, s)
}
}
cl[i] = c
}
return st.AddUserCmds(cl...)
}
func (tr *taskTracker) runCmd(st *State) *State {
return st.AddBuiltinCmds(
BuiltinCmd{
Name: ".kill",
Desc: "List and cancel active tasks",
Run: tr.killBuiltin,
},
)
}
// Cancel cancels the task tid.
// true is returned if the task exists and was canceled
func (tr *taskTracker) Cancel(tid string) bool {
tr.mu.Lock()
defer tr.mu.Unlock()
return tr.cancel(tid)
}
func (tr *taskTracker) cancel(tid string) bool {
for _, t := range tr.tickets {
if t.ID == tid || t.CancelID == tid {
t.Cancel()
return t.Cancellable()
}
}
return false
}
func (tr *taskTracker) killBuiltin(cx *CmdCtx) *State {
tr.mu.Lock()
defer tr.mu.Unlock()
defer cx.Output.Close()
if len(cx.Args) == 0 {
tr.listAll(cx)
} else {
tr.killAll(cx)
}
return cx.State
}
func (tr *taskTracker) killAll(cx *CmdCtx) {
buf := &bytes.Buffer{}
for _, tid := range cx.Args {
fmt.Fprintf(buf, "%s: %v\n", tid, tr.cancel(tid))
}
cx.Output.Write(buf.Bytes())
}
func (tr *taskTracker) listAll(cx *CmdCtx) {
buf := &bytes.Buffer{}
for _, t := range tr.tickets {
id := t.ID
if t.CancelID != "" {
id += "|" + t.CancelID
}
dur := time.Since(t.Start)
if dur < time.Second {
dur = dur.Round(time.Millisecond)
} else {
dur = dur.Round(time.Second)
}
fmt.Fprintf(buf, "ID: %s, Dur: %s, Title: %s\n", id, dur, t.Title)
}
cx.Output.Write(buf.Bytes())
}
func (tr *taskTracker) status() string {
tr.buf.Reset()
now := time.Now()
tr.buf.WriteString("Tasks")
initLen := tr.buf.Len()
title := ""
for _, t := range tr.tickets {
age := now.Sub(t.Start) / time.Second
switch age {
case 0:
case 1:
tr.buf.WriteString(" ◔")
case 2:
tr.buf.WriteString(" ◑")
case 3:
tr.buf.WriteString(" ◕")
default:
tr.buf.WriteString(" ●")
}
if title == "" && t.Title != "" && (age >= 1 || t.showNow) && age <= 3 {
title = t.Title
}
}
if tr.buf.Len() == initLen && title == "" {
return ""
}
if title != "" {
tr.buf.WriteByte(' ')
tr.buf.WriteString(title)
}
return tr.buf.String()
}
func (tr *taskTracker) titles() (stale []string, fresh []string) {
now := time.Now()
for _, t := range tr.tickets {
dur := now.Sub(t.Start)
switch {
case dur >= 5*time.Second:
stale = append(stale, t.Title)
case dur >= 1*time.Second:
fresh = append(fresh, t.Title)
}
}
for _, t := range tr.tickets {
dur := now.Sub(t.Start)
switch {
case dur >= 5*time.Second:
stale = append(stale, t.Title)
case dur >= 1*time.Second:
fresh = append(fresh, t.Title)
}
}
return stale, fresh
}
func (tr *taskTracker) resetTimer() {
defer tr.timer.Reset(1 * time.Second)
}
func (tr *taskTracker) done(id string) {
tr.mu.Lock()
defer tr.mu.Unlock()
defer tr.resetTimer()
l := make([]*TaskTicket, 0, len(tr.tickets)-1)
for _, t := range tr.tickets {
if t.ID != id {
l = append(l, t)
}
}
tr.tickets = l
}
func (tr *taskTracker) Begin(o Task) *TaskTicket {
tr.mu.Lock()
defer tr.mu.Unlock()
defer tr.resetTimer()
if cid := o.CancelID; cid != "" {
for _, t := range tr.tickets {
if t.CancelID == cid {
t.Cancel()
}
}
}
tr.id++
t := &TaskTicket{
ID: fmt.Sprintf("@%d", tr.id),
CancelID: o.CancelID,
Title: o.Title,
Start: time.Now(),
cancel: o.Cancel,
tracker: tr,
showNow: o.ShowNow,
}
tr.tickets = append(tr.tickets, t)
return t
}
sync margo
package mg
import (
"bytes"
"fmt"
"margo.sh/mgpf"
"margo.sh/mgutil"
"sync"
"time"
)
type taskTick struct{ ActionType }
type Task struct {
Title string
Cancel func()
CancelID string
ShowNow bool
}
type TaskTicket struct {
ID string
Title string
Start time.Time
CancelID string
tracker *taskTracker
showNow bool
cancel func()
}
func (ti *TaskTicket) Done() {
if ti.tracker != nil {
ti.tracker.done(ti.ID)
}
}
func (ti *TaskTicket) Cancel() {
if ti.cancel != nil {
ti.cancel()
}
}
func (ti *TaskTicket) Cancellable() bool {
return ti.cancel != nil
}
type taskTracker struct {
ReducerType
mu sync.Mutex
id uint64
tickets []*TaskTicket
timer *time.Timer
buf bytes.Buffer
}
func (tr *taskTracker) ReducerMount(mx *Ctx) {
tr.mu.Lock()
defer tr.mu.Unlock()
tr.timer = time.NewTimer(1 * time.Second)
dispatch := mx.Store.Dispatch
go func() {
for range tr.timer.C {
dispatch(taskTick{})
}
}()
}
func (tr *taskTracker) ReducerUnmount(*Ctx) {
tr.mu.Lock()
defer tr.mu.Unlock()
for _, t := range tr.tickets {
t.Cancel()
}
}
func (tr *taskTracker) Reduce(mx *Ctx) *State {
tr.mu.Lock()
defer tr.mu.Unlock()
st := mx.State
switch mx.Action.(type) {
case RunCmd:
st = tr.runCmd(st)
case QueryUserCmds:
st = tr.userCmds(st)
case taskTick:
tr.tick()
}
if s := tr.status(); s != "" {
st = st.AddStatus(s)
}
return st
}
func (tr *taskTracker) tick() {
if len(tr.tickets) != 0 {
tr.resetTimer()
}
}
func (tr *taskTracker) userCmds(st *State) *State {
cl := make([]UserCmd, len(tr.tickets))
now := time.Now()
for i, t := range tr.tickets {
c := UserCmd{
Title: "Task: Cancel " + t.Title,
Name: ".kill",
}
for _, s := range []string{t.CancelID, t.ID} {
if s != "" {
c.Args = append(c.Args, s)
}
}
c.Desc = fmt.Sprintf("elapsed: %s, cmd: `%s`",
mgpf.D(now.Sub(t.Start)), mgutil.QuoteCmd(c.Name, c.Args...),
)
cl[i] = c
}
return st.AddUserCmds(cl...)
}
func (tr *taskTracker) runCmd(st *State) *State {
return st.AddBuiltinCmds(
BuiltinCmd{
Name: ".kill",
Desc: "List and cancel active tasks",
Run: tr.killBuiltin,
},
)
}
// Cancel cancels the task tid.
// true is returned if the task exists and was canceled
func (tr *taskTracker) Cancel(tid string) bool {
tr.mu.Lock()
defer tr.mu.Unlock()
return tr.cancel(tid)
}
func (tr *taskTracker) cancel(tid string) bool {
for _, t := range tr.tickets {
if t.ID == tid || t.CancelID == tid {
t.Cancel()
return t.Cancellable()
}
}
return false
}
func (tr *taskTracker) killBuiltin(cx *CmdCtx) *State {
tr.mu.Lock()
defer tr.mu.Unlock()
defer cx.Output.Close()
if len(cx.Args) == 0 {
tr.listAll(cx)
} else {
tr.killAll(cx)
}
return cx.State
}
func (tr *taskTracker) killAll(cx *CmdCtx) {
buf := &bytes.Buffer{}
for _, tid := range cx.Args {
fmt.Fprintf(buf, "%s: %v\n", tid, tr.cancel(tid))
}
cx.Output.Write(buf.Bytes())
}
func (tr *taskTracker) listAll(cx *CmdCtx) {
buf := &bytes.Buffer{}
for _, t := range tr.tickets {
id := t.ID
if t.CancelID != "" {
id += "|" + t.CancelID
}
dur := time.Since(t.Start)
if dur < time.Second {
dur = dur.Round(time.Millisecond)
} else {
dur = dur.Round(time.Second)
}
fmt.Fprintf(buf, "ID: %s, Dur: %s, Title: %s\n", id, dur, t.Title)
}
cx.Output.Write(buf.Bytes())
}
func (tr *taskTracker) status() string {
tr.buf.Reset()
now := time.Now()
tr.buf.WriteString("Tasks")
initLen := tr.buf.Len()
title := ""
for _, t := range tr.tickets {
age := now.Sub(t.Start) / time.Second
switch age {
case 0:
case 1:
tr.buf.WriteString(" ◔")
case 2:
tr.buf.WriteString(" ◑")
case 3:
tr.buf.WriteString(" ◕")
default:
tr.buf.WriteString(" ●")
}
if title == "" && t.Title != "" && (age >= 1 || t.showNow) && age <= 3 {
title = t.Title
}
}
if tr.buf.Len() == initLen && title == "" {
return ""
}
if title != "" {
tr.buf.WriteByte(' ')
tr.buf.WriteString(title)
}
return tr.buf.String()
}
func (tr *taskTracker) titles() (stale []string, fresh []string) {
now := time.Now()
for _, t := range tr.tickets {
dur := now.Sub(t.Start)
switch {
case dur >= 5*time.Second:
stale = append(stale, t.Title)
case dur >= 1*time.Second:
fresh = append(fresh, t.Title)
}
}
for _, t := range tr.tickets {
dur := now.Sub(t.Start)
switch {
case dur >= 5*time.Second:
stale = append(stale, t.Title)
case dur >= 1*time.Second:
fresh = append(fresh, t.Title)
}
}
return stale, fresh
}
func (tr *taskTracker) resetTimer() {
defer tr.timer.Reset(1 * time.Second)
}
func (tr *taskTracker) done(id string) {
tr.mu.Lock()
defer tr.mu.Unlock()
defer tr.resetTimer()
l := make([]*TaskTicket, 0, len(tr.tickets)-1)
for _, t := range tr.tickets {
if t.ID != id {
l = append(l, t)
}
}
tr.tickets = l
}
func (tr *taskTracker) Begin(o Task) *TaskTicket {
tr.mu.Lock()
defer tr.mu.Unlock()
defer tr.resetTimer()
if cid := o.CancelID; cid != "" {
for _, t := range tr.tickets {
if t.CancelID == cid {
t.Cancel()
}
}
}
tr.id++
t := &TaskTicket{
ID: fmt.Sprintf("@%d", tr.id),
CancelID: o.CancelID,
Title: o.Title,
Start: time.Now(),
cancel: o.Cancel,
tracker: tr,
showNow: o.ShowNow,
}
tr.tickets = append(tr.tickets, t)
return t
}
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"strings"
"time"
"github.com/emicklei/go-restful"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/mergepatch"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
utiltrace "k8s.io/apiserver/pkg/util/trace"
)
// ContextFunc returns a Context given a request - a context must be returned
type ContextFunc func(req *restful.Request) request.Context
// ScopeNamer handles accessing names from requests and objects
type ScopeNamer interface {
// Namespace returns the appropriate namespace value from the request (may be empty) or an
// error.
Namespace(req *restful.Request) (namespace string, err error)
// Name returns the name from the request, and an optional namespace value if this is a namespace
// scoped call. An error is returned if the name is not available.
Name(req *restful.Request) (namespace, name string, err error)
// ObjectName returns the namespace and name from an object if they exist, or an error if the object
// does not support names.
ObjectName(obj runtime.Object) (namespace, name string, err error)
// SetSelfLink sets the provided URL onto the object. The method should return nil if the object
// does not support selfLinks.
SetSelfLink(obj runtime.Object, url string) error
// GenerateLink creates an encoded URI for a given runtime object that represents the canonical path
// and query.
GenerateLink(req *restful.Request, obj runtime.Object) (uri string, err error)
// GenerateLink creates an encoded URI for a list that represents the canonical path and query.
GenerateListLink(req *restful.Request) (uri string, err error)
}
// RequestScope encapsulates common fields across all RESTful handler methods.
type RequestScope struct {
Namer ScopeNamer
ContextFunc
Serializer runtime.NegotiatedSerializer
runtime.ParameterCodec
Creater runtime.ObjectCreater
Convertor runtime.ObjectConvertor
Copier runtime.ObjectCopier
Resource schema.GroupVersionResource
Kind schema.GroupVersionKind
Subresource string
MetaGroupVersion schema.GroupVersion
}
func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) {
responsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req)
}
// getterFunc performs a get request with the given context and object name. The request
// may be used to deserialize an options object to pass to the getter.
type getterFunc func(ctx request.Context, name string, req *restful.Request) (runtime.Object, error)
// maxRetryWhenPatchConflicts is the maximum number of conflicts retry during a patch operation before returning failure
const maxRetryWhenPatchConflicts = 5
// getResourceHandler is an HTTP handler function for get requests. It delegates to the
// passed-in getterFunc to perform the actual get.
func getResourceHandler(scope RequestScope, getter getterFunc) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
w := res.ResponseWriter
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
result, err := getter(ctx, name, req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
responsewriters.WriteObject(http.StatusOK, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
// GetResource returns a function that handles retrieving a single resource from a rest.Storage object.
func GetResource(r rest.Getter, e rest.Exporter, scope RequestScope) restful.RouteFunction {
return getResourceHandler(scope,
func(ctx request.Context, name string, req *restful.Request) (runtime.Object, error) {
// For performance tracking purposes.
trace := utiltrace.New("Get " + req.Request.URL.Path)
defer trace.LogIfLong(500 * time.Millisecond)
// check for export
options := metav1.GetOptions{}
if values := req.Request.URL.Query(); len(values) > 0 {
exports := metav1.ExportOptions{}
if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &exports); err != nil {
return nil, err
}
if exports.Export {
if e == nil {
return nil, errors.NewBadRequest(fmt.Sprintf("export of %q is not supported", scope.Resource.Resource))
}
return e.Export(ctx, name, exports)
}
if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil {
return nil, err
}
}
return r.Get(ctx, name, &options)
})
}
// GetResourceWithOptions returns a function that handles retrieving a single resource from a rest.Storage object.
func GetResourceWithOptions(r rest.GetterWithOptions, scope RequestScope) restful.RouteFunction {
return getResourceHandler(scope,
func(ctx request.Context, name string, req *restful.Request) (runtime.Object, error) {
opts, subpath, subpathKey := r.NewGetOptions()
if err := getRequestOptions(req, scope, opts, subpath, subpathKey); err != nil {
return nil, err
}
return r.Get(ctx, name, opts)
})
}
func getRequestOptions(req *restful.Request, scope RequestScope, into runtime.Object, subpath bool, subpathKey string) error {
if into == nil {
return nil
}
query := req.Request.URL.Query()
if subpath {
newQuery := make(url.Values)
for k, v := range query {
newQuery[k] = v
}
newQuery[subpathKey] = []string{req.PathParameter("path")}
query = newQuery
}
return scope.ParameterCodec.DecodeParameters(query, scope.Kind.GroupVersion(), into)
}
// ConnectResource returns a function that handles a connect request on a rest.Storage object.
func ConnectResource(connecter rest.Connecter, scope RequestScope, admit admission.Interface, restPath string) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
w := res.ResponseWriter
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
opts, subpath, subpathKey := connecter.NewConnectOptions()
if err := getRequestOptions(req, scope, opts, subpath, subpathKey); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if admit.Handles(admission.Connect) {
connectRequest := &rest.ConnectRequest{
Name: name,
Options: opts,
ResourcePath: restPath,
}
userInfo, _ := request.UserFrom(ctx)
err = admit.Admit(admission.NewAttributesRecord(connectRequest, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, userInfo))
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, res: res})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
handler.ServeHTTP(w, req.Request)
}
}
// responder implements rest.Responder for assisting a connector in writing objects or errors.
type responder struct {
scope RequestScope
req *restful.Request
res *restful.Response
}
func (r *responder) Object(statusCode int, obj runtime.Object) {
responsewriters.WriteObject(statusCode, r.scope.Kind.GroupVersion(), r.scope.Serializer, obj, r.res.ResponseWriter, r.req.Request)
}
func (r *responder) Error(err error) {
r.scope.err(err, r.res.ResponseWriter, r.req.Request)
}
// ListResource returns a function that handles retrieving a list of resources from a rest.Storage object.
func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch bool, minRequestTimeout time.Duration) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
// For performance tracking purposes.
trace := utiltrace.New("List " + req.Request.URL.Path)
w := res.ResponseWriter
namespace, err := scope.Namer.Namespace(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// Watches for single objects are routed to this function.
// Treat a /name parameter the same as a field selector entry.
hasName := true
_, name, err := scope.Namer.Name(req)
if err != nil {
hasName = false
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
opts := metainternalversion.ListOptions{}
if err := metainternalversion.ParameterCodec.DecodeParameters(req.Request.URL.Query(), scope.MetaGroupVersion, &opts); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if opts.FieldSelector != nil {
fn := func(label, value string) (newLabel, newValue string, err error) {
return scope.Convertor.ConvertFieldLabel(scope.Kind.GroupVersion().String(), scope.Kind.Kind, label, value)
}
if opts.FieldSelector, err = opts.FieldSelector.Transform(fn); err != nil {
// TODO: allow bad request to set field causes based on query parameters
err = errors.NewBadRequest(err.Error())
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
if hasName {
// metadata.name is the canonical internal name.
// SelectionPredicate will notice that this is
// a request for a single object and optimize the
// storage query accordingly.
nameSelector := fields.OneTermEqualSelector("metadata.name", name)
if opts.FieldSelector != nil && !opts.FieldSelector.Empty() {
// It doesn't make sense to ask for both a name
// and a field selector, since just the name is
// sufficient to narrow down the request to a
// single object.
scope.err(errors.NewBadRequest("both a name and a field selector provided; please provide one or the other."), res.ResponseWriter, req.Request)
return
}
opts.FieldSelector = nameSelector
}
if (opts.Watch || forceWatch) && rw != nil {
glog.Infof("Started to log from %v for %v", ctx, req.Request.URL.RequestURI())
watcher, err := rw.Watch(ctx, &opts)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// TODO: Currently we explicitly ignore ?timeout= and use only ?timeoutSeconds=.
timeout := time.Duration(0)
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
if timeout == 0 && minRequestTimeout > 0 {
timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0))
}
serveWatch(watcher, scope, req, res, timeout)
return
}
// Log only long List requests (ignore Watch).
defer trace.LogIfLong(500 * time.Millisecond)
trace.Step("About to List from storage")
result, err := r.List(ctx, &opts)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Listing from storage done")
numberOfItems, err := setListSelfLink(result, req, scope.Namer)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Self-linking done")
// Ensure empty lists return a non-nil items slice
if numberOfItems == 0 && meta.IsListType(result) {
if err := meta.SetList(result, []runtime.Object{}); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
responsewriters.WriteObject(http.StatusOK, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
trace.Step(fmt.Sprintf("Writing http response done (%d items)", numberOfItems))
}
}
func createHandler(r rest.NamedCreater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface, includeName bool) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
// For performance tracking purposes.
trace := utiltrace.New("Create " + req.Request.URL.Path)
defer trace.LogIfLong(500 * time.Millisecond)
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
var (
namespace, name string
err error
)
if includeName {
namespace, name, err = scope.Namer.Name(req)
} else {
namespace, err = scope.Namer.Namespace(req)
}
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
gv := scope.Kind.GroupVersion()
s, err := negotiation.NegotiateInputSerializer(req.Request, scope.Serializer)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
decoder := scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal})
body, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
defaultGVK := scope.Kind
original := r.New()
trace.Step("About to convert to expected version")
obj, gvk, err := decoder.Decode(body, &defaultGVK, original)
if err != nil {
err = transformDecodeError(typer, err, original, gvk, body)
scope.err(err, res.ResponseWriter, req.Request)
return
}
if gvk.GroupVersion() != gv {
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%v)", gvk.GroupVersion().String(), gv.String()))
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Conversion done")
if admit != nil && admit.Handles(admission.Create) {
userInfo, _ := request.UserFrom(ctx)
err = admit.Admit(admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, userInfo))
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
trace.Step("About to store object in database")
result, err := finishRequest(timeout, func() (runtime.Object, error) {
out, err := r.Create(ctx, name, obj)
if status, ok := out.(*metav1.Status); ok && err == nil && status.Code == 0 {
status.Code = http.StatusCreated
}
return out, err
})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Object stored in database")
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Self-link added")
responsewriters.WriteObject(http.StatusCreated, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
// CreateNamedResource returns a function that will handle a resource creation with name.
func CreateNamedResource(r rest.NamedCreater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) restful.RouteFunction {
return createHandler(r, scope, typer, admit, true)
}
// CreateResource returns a function that will handle a resource creation.
func CreateResource(r rest.Creater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) restful.RouteFunction {
return createHandler(&namedCreaterAdapter{r}, scope, typer, admit, false)
}
type namedCreaterAdapter struct {
rest.Creater
}
func (c *namedCreaterAdapter) Create(ctx request.Context, name string, obj runtime.Object) (runtime.Object, error) {
return c.Creater.Create(ctx, obj)
}
// PatchResource returns a function that will handle a resource patch
// TODO: Eventually PatchResource should just use GuaranteedUpdate and this routine should be a bit cleaner
func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, converter runtime.ObjectConvertor) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we
// document, move timeout out of this function and declare it in
// api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
versionedObj, err := converter.ConvertToVersion(r.New(), scope.Kind.GroupVersion())
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// TODO: handle this in negotiation
contentType := req.HeaderParameter("Content-Type")
// Remove "; charset=" if included in header.
if idx := strings.Index(contentType, ";"); idx > 0 {
contentType = contentType[:idx]
}
patchType := types.PatchType(contentType)
patchJS, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), runtime.ContentTypeJSON)
if !ok {
scope.err(fmt.Errorf("no serializer defined for JSON"), res.ResponseWriter, req.Request)
return
}
gv := scope.Kind.GroupVersion()
codec := runtime.NewCodec(
scope.Serializer.EncoderForVersion(s.Serializer, gv),
scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal}),
)
updateAdmit := func(updatedObject runtime.Object, currentObject runtime.Object) error {
if admit != nil && admit.Handles(admission.Update) {
userInfo, _ := request.UserFrom(ctx)
return admit.Admit(admission.NewAttributesRecord(updatedObject, currentObject, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo))
}
return nil
}
result, err := patchResource(ctx, updateAdmit, timeout, versionedObj, r, name, patchType, patchJS, scope.Namer, scope.Copier, scope.Resource, codec)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
responsewriters.WriteObject(http.StatusOK, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
type updateAdmissionFunc func(updatedObject runtime.Object, currentObject runtime.Object) error
// patchResource divides PatchResource for easier unit testing
func patchResource(
ctx request.Context,
admit updateAdmissionFunc,
timeout time.Duration,
versionedObj runtime.Object,
patcher rest.Patcher,
name string,
patchType types.PatchType,
patchJS []byte,
namer ScopeNamer,
copier runtime.ObjectCopier,
resource schema.GroupVersionResource,
codec runtime.Codec,
) (runtime.Object, error) {
namespace := request.NamespaceValue(ctx)
var (
originalObjJS []byte
originalPatchedObjJS []byte
originalObjMap map[string]interface{}
originalPatchMap map[string]interface{}
lastConflictErr error
)
// applyPatch is called every time GuaranteedUpdate asks for the updated object,
// and is given the currently persisted object as input.
applyPatch := func(_ request.Context, _, currentObject runtime.Object) (runtime.Object, error) {
// Make sure we actually have a persisted currentObject
if hasUID, err := hasUID(currentObject); err != nil {
return nil, err
} else if !hasUID {
return nil, errors.NewNotFound(resource.GroupResource(), name)
}
switch {
case originalObjJS == nil && originalObjMap == nil:
// first time through,
// 1. apply the patch
// 2. save the original and patched to detect whether there were conflicting changes on retries
objToUpdate := patcher.New()
// For performance reasons, in case of strategicpatch, we avoid json
// marshaling and unmarshaling and operate just on map[string]interface{}.
// In case of other patch types, we still have to operate on JSON
// representations.
switch patchType {
case types.JSONPatchType, types.MergePatchType:
originalJS, patchedJS, err := patchObjectJSON(patchType, codec, currentObject, patchJS, objToUpdate, versionedObj)
if err != nil {
return nil, err
}
originalObjJS, originalPatchedObjJS = originalJS, patchedJS
case types.StrategicMergePatchType:
originalMap, patchMap, err := strategicPatchObject(codec, currentObject, patchJS, objToUpdate, versionedObj)
if err != nil {
return nil, err
}
originalObjMap, originalPatchMap = originalMap, patchMap
}
if err := checkName(objToUpdate, name, namespace, namer); err != nil {
return nil, err
}
return objToUpdate, nil
default:
// on a conflict,
// 1. build a strategic merge patch from originalJS and the patchedJS. Different patch types can
// be specified, but a strategic merge patch should be expressive enough handle them. Build the
// patch with this type to handle those cases.
// 2. build a strategic merge patch from originalJS and the currentJS
// 3. ensure no conflicts between the two patches
// 4. apply the #1 patch to the currentJS object
// TODO: This should be one-step conversion that doesn't require
// json marshaling and unmarshaling once #39017 is fixed.
data, err := runtime.Encode(codec, currentObject)
if err != nil {
return nil, err
}
currentObjMap := make(map[string]interface{})
if err := json.Unmarshal(data, ¤tObjMap); err != nil {
return nil, err
}
var currentPatchMap map[string]interface{}
if originalObjMap != nil {
var err error
currentPatchMap, err = strategicpatch.CreateTwoWayMergeMapPatch(originalObjMap, currentObjMap, versionedObj)
if err != nil {
return nil, err
}
} else {
if originalPatchMap == nil {
// Compute original patch, if we already didn't do this in previous retries.
originalPatch, err := strategicpatch.CreateTwoWayMergePatch(originalObjJS, originalPatchedObjJS, versionedObj)
if err != nil {
return nil, err
}
originalPatchMap = make(map[string]interface{})
if err := json.Unmarshal(originalPatch, &originalPatchMap); err != nil {
return nil, err
}
}
// Compute current patch.
currentObjJS, err := runtime.Encode(codec, currentObject)
if err != nil {
return nil, err
}
currentPatch, err := strategicpatch.CreateTwoWayMergePatch(originalObjJS, currentObjJS, versionedObj)
if err != nil {
return nil, err
}
currentPatchMap = make(map[string]interface{})
if err := json.Unmarshal(currentPatch, ¤tPatchMap); err != nil {
return nil, err
}
}
hasConflicts, err := mergepatch.HasConflicts(originalPatchMap, currentPatchMap)
if err != nil {
return nil, err
}
if hasConflicts {
diff1, _ := json.Marshal(currentPatchMap)
diff2, _ := json.Marshal(originalPatchMap)
patchDiffErr := fmt.Errorf("there is a meaningful conflict:\n diff1=%v\n, diff2=%v\n", diff1, diff2)
glog.V(4).Infof("patchResource failed for resource %s, because there is a meaningful conflict.\n diff1=%v\n, diff2=%v\n", name, diff1, diff2)
// Return the last conflict error we got if we have one
if lastConflictErr != nil {
return nil, lastConflictErr
}
// Otherwise manufacture one of our own
return nil, errors.NewConflict(resource.GroupResource(), name, patchDiffErr)
}
objToUpdate := patcher.New()
if err := applyPatchToObject(codec, currentObjMap, originalPatchMap, objToUpdate, versionedObj); err != nil {
return nil, err
}
return objToUpdate, nil
}
}
// applyAdmission is called every time GuaranteedUpdate asks for the updated object,
// and is given the currently persisted object and the patched object as input.
applyAdmission := func(ctx request.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) {
return patchedObject, admit(patchedObject, currentObject)
}
updatedObjectInfo := rest.DefaultUpdatedObjectInfo(nil, copier, applyPatch, applyAdmission)
return finishRequest(timeout, func() (runtime.Object, error) {
updateObject, _, updateErr := patcher.Update(ctx, name, updatedObjectInfo)
for i := 0; i < maxRetryWhenPatchConflicts && (errors.IsConflict(updateErr)); i++ {
lastConflictErr = updateErr
updateObject, _, updateErr = patcher.Update(ctx, name, updatedObjectInfo)
}
return updateObject, updateErr
})
}
// UpdateResource returns a function that will handle a resource update
func UpdateResource(r rest.Updater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
// For performance tracking purposes.
trace := utiltrace.New("Update " + req.Request.URL.Path)
defer trace.LogIfLong(500 * time.Millisecond)
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
body, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
s, err := negotiation.NegotiateInputSerializer(req.Request, scope.Serializer)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
defaultGVK := scope.Kind
original := r.New()
trace.Step("About to convert to expected version")
obj, gvk, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, original)
if err != nil {
err = transformDecodeError(typer, err, original, gvk, body)
scope.err(err, res.ResponseWriter, req.Request)
return
}
if gvk.GroupVersion() != defaultGVK.GroupVersion() {
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", gvk.GroupVersion(), defaultGVK.GroupVersion()))
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Conversion done")
if err := checkName(obj, name, namespace, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
var transformers []rest.TransformFunc
if admit != nil && admit.Handles(admission.Update) {
transformers = append(transformers, func(ctx request.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
userInfo, _ := request.UserFrom(ctx)
return newObj, admit.Admit(admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo))
})
}
trace.Step("About to store object in database")
wasCreated := false
result, err := finishRequest(timeout, func() (runtime.Object, error) {
obj, created, err := r.Update(ctx, name, rest.DefaultUpdatedObjectInfo(obj, scope.Copier, transformers...))
wasCreated = created
return obj, err
})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Object stored in database")
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Self-link added")
status := http.StatusOK
if wasCreated {
status = http.StatusCreated
}
responsewriters.WriteObject(status, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
// DeleteResource returns a function that will handle a resource deletion
func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
// For performance tracking purposes.
trace := utiltrace.New("Delete " + req.Request.URL.Path)
defer trace.LogIfLong(500 * time.Millisecond)
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
options := &metav1.DeleteOptions{}
if allowsOptions {
body, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req.Request, metainternalversion.Codecs)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
obj, _, err := metainternalversion.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if obj != options {
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), res.ResponseWriter, req.Request)
return
}
} else {
if values := req.Request.URL.Query(); len(values) > 0 {
if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
}
}
if admit != nil && admit.Handles(admission.Delete) {
userInfo, _ := request.UserFrom(ctx)
err = admit.Admit(admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, userInfo))
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
trace.Step("About do delete object from database")
result, err := finishRequest(timeout, func() (runtime.Object, error) {
obj, _, err := r.Delete(ctx, name, options)
return obj, err
})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Object deleted from database")
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
// object with the response.
if result == nil {
result = &metav1.Status{
Status: metav1.StatusSuccess,
Code: http.StatusOK,
Details: &metav1.StatusDetails{
Name: name,
Kind: scope.Kind.Kind,
},
}
} else {
// when a non-status response is returned, set the self link
if _, ok := result.(*metav1.Status); !ok {
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
}
responsewriters.WriteObject(http.StatusOK, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
// DeleteCollection returns a function that will handle a collection deletion
func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
namespace, err := scope.Namer.Namespace(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
if admit != nil && admit.Handles(admission.Delete) {
userInfo, _ := request.UserFrom(ctx)
err = admit.Admit(admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, userInfo))
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
listOptions := metainternalversion.ListOptions{}
if err := metainternalversion.ParameterCodec.DecodeParameters(req.Request.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if listOptions.FieldSelector != nil {
fn := func(label, value string) (newLabel, newValue string, err error) {
return scope.Convertor.ConvertFieldLabel(scope.Kind.GroupVersion().String(), scope.Kind.Kind, label, value)
}
if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil {
// TODO: allow bad request to set field causes based on query parameters
err = errors.NewBadRequest(err.Error())
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
options := &metav1.DeleteOptions{}
if checkBody {
body, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req.Request, scope.Serializer)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
defaultGVK := scope.Kind.GroupVersion().WithKind("DeleteOptions")
obj, _, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if obj != options {
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), res.ResponseWriter, req.Request)
return
}
}
}
result, err := finishRequest(timeout, func() (runtime.Object, error) {
return r.DeleteCollection(ctx, options, &listOptions)
})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
// object with the response.
if result == nil {
result = &metav1.Status{
Status: metav1.StatusSuccess,
Code: http.StatusOK,
Details: &metav1.StatusDetails{
Kind: scope.Kind.Kind,
},
}
} else {
// when a non-status response is returned, set the self link
if _, ok := result.(*metav1.Status); !ok {
if _, err := setListSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
}
responsewriters.WriteObjectNegotiated(scope.Serializer, scope.Kind.GroupVersion(), w, req.Request, http.StatusOK, result)
}
}
// resultFunc is a function that returns a rest result and can be run in a goroutine
type resultFunc func() (runtime.Object, error)
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// Any api.Status object returned is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
// when the select statement reads something other than the one the goroutine sends on.
ch := make(chan runtime.Object, 1)
errCh := make(chan error, 1)
panicCh := make(chan interface{}, 1)
go func() {
// panics don't cross goroutine boundaries, so we have to handle ourselves
defer utilruntime.HandleCrash(func(panicReason interface{}) {
// Propagate to parent goroutine
panicCh <- panicReason
})
if result, err := fn(); err != nil {
errCh <- err
} else {
ch <- result
}
}()
select {
case result = <-ch:
if status, ok := result.(*metav1.Status); ok {
return nil, errors.FromObject(status)
}
return result, nil
case err = <-errCh:
return nil, err
case p := <-panicCh:
panic(p)
case <-time.After(timeout):
return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0)
}
}
// transformDecodeError adds additional information when a decode fails.
func transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error {
objGVKs, _, err := typer.ObjectKinds(into)
if err != nil {
return err
}
objGVK := objGVKs[0]
if gvk != nil && len(gvk.Kind) > 0 {
return errors.NewBadRequest(fmt.Sprintf("%s in version %q cannot be handled as a %s: %v", gvk.Kind, gvk.Version, objGVK.Kind, baseErr))
}
summary := summarizeData(body, 30)
return errors.NewBadRequest(fmt.Sprintf("the object provided is unrecognized (must be of type %s): %v (%s)", objGVK.Kind, baseErr, summary))
}
// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request
// plus the path and query generated by the provided linkFunc
func setSelfLink(obj runtime.Object, req *restful.Request, namer ScopeNamer) error {
// TODO: SelfLink generation should return a full URL?
uri, err := namer.GenerateLink(req, obj)
if err != nil {
return nil
}
return namer.SetSelfLink(obj, uri)
}
func hasUID(obj runtime.Object) (bool, error) {
if obj == nil {
return false, nil
}
accessor, err := meta.Accessor(obj)
if err != nil {
return false, errors.NewInternalError(err)
}
if len(accessor.GetUID()) == 0 {
return false, nil
}
return true, nil
}
// checkName checks the provided name against the request
func checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error {
if objNamespace, objName, err := namer.ObjectName(obj); err == nil {
if err != nil {
return err
}
if objName != name {
return errors.NewBadRequest(fmt.Sprintf(
"the name of the object (%s) does not match the name on the URL (%s)", objName, name))
}
if len(namespace) > 0 {
if len(objNamespace) > 0 && objNamespace != namespace {
return errors.NewBadRequest(fmt.Sprintf(
"the namespace of the object (%s) does not match the namespace on the request (%s)", objNamespace, namespace))
}
}
}
return nil
}
// setListSelfLink sets the self link of a list to the base URL, then sets the self links
// on all child objects returned. Returns the number of items in the list.
func setListSelfLink(obj runtime.Object, req *restful.Request, namer ScopeNamer) (int, error) {
if !meta.IsListType(obj) {
return 0, nil
}
uri, err := namer.GenerateListLink(req)
if err != nil {
return 0, err
}
if err := namer.SetSelfLink(obj, uri); err != nil {
glog.V(4).Infof("Unable to set self link on object: %v", err)
}
count := 0
err = meta.EachListItem(obj, func(obj runtime.Object) error {
count++
return setSelfLink(obj, req, namer)
})
return count, err
}
func summarizeData(data []byte, maxLength int) string {
switch {
case len(data) == 0:
return "<empty>"
case data[0] == '{':
if len(data) > maxLength {
return string(data[:maxLength]) + " ..."
}
return string(data)
default:
if len(data) > maxLength {
return hex.EncodeToString(data[:maxLength]) + " ..."
}
return hex.EncodeToString(data)
}
}
func readBody(req *http.Request) ([]byte, error) {
defer req.Body.Close()
return ioutil.ReadAll(req.Body)
}
func parseTimeout(str string) time.Duration {
if str != "" {
timeout, err := time.ParseDuration(str)
if err == nil {
return timeout
}
glog.Errorf("Failed to parse %q: %v", str, err)
}
return 30 * time.Second
}
Update REST Handler to return 202 for cascading deletion
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"strings"
"time"
"github.com/emicklei/go-restful"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/mergepatch"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
utiltrace "k8s.io/apiserver/pkg/util/trace"
)
// ContextFunc returns a Context given a request - a context must be returned
type ContextFunc func(req *restful.Request) request.Context
// ScopeNamer handles accessing names from requests and objects
type ScopeNamer interface {
// Namespace returns the appropriate namespace value from the request (may be empty) or an
// error.
Namespace(req *restful.Request) (namespace string, err error)
// Name returns the name from the request, and an optional namespace value if this is a namespace
// scoped call. An error is returned if the name is not available.
Name(req *restful.Request) (namespace, name string, err error)
// ObjectName returns the namespace and name from an object if they exist, or an error if the object
// does not support names.
ObjectName(obj runtime.Object) (namespace, name string, err error)
// SetSelfLink sets the provided URL onto the object. The method should return nil if the object
// does not support selfLinks.
SetSelfLink(obj runtime.Object, url string) error
// GenerateLink creates an encoded URI for a given runtime object that represents the canonical path
// and query.
GenerateLink(req *restful.Request, obj runtime.Object) (uri string, err error)
// GenerateLink creates an encoded URI for a list that represents the canonical path and query.
GenerateListLink(req *restful.Request) (uri string, err error)
}
// RequestScope encapsulates common fields across all RESTful handler methods.
type RequestScope struct {
Namer ScopeNamer
ContextFunc
Serializer runtime.NegotiatedSerializer
runtime.ParameterCodec
Creater runtime.ObjectCreater
Convertor runtime.ObjectConvertor
Copier runtime.ObjectCopier
Resource schema.GroupVersionResource
Kind schema.GroupVersionKind
Subresource string
MetaGroupVersion schema.GroupVersion
}
func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) {
responsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req)
}
// getterFunc performs a get request with the given context and object name. The request
// may be used to deserialize an options object to pass to the getter.
type getterFunc func(ctx request.Context, name string, req *restful.Request) (runtime.Object, error)
// maxRetryWhenPatchConflicts is the maximum number of conflicts retry during a patch operation before returning failure
const maxRetryWhenPatchConflicts = 5
// getResourceHandler is an HTTP handler function for get requests. It delegates to the
// passed-in getterFunc to perform the actual get.
func getResourceHandler(scope RequestScope, getter getterFunc) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
w := res.ResponseWriter
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
result, err := getter(ctx, name, req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
responsewriters.WriteObject(http.StatusOK, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
// GetResource returns a function that handles retrieving a single resource from a rest.Storage object.
func GetResource(r rest.Getter, e rest.Exporter, scope RequestScope) restful.RouteFunction {
return getResourceHandler(scope,
func(ctx request.Context, name string, req *restful.Request) (runtime.Object, error) {
// For performance tracking purposes.
trace := utiltrace.New("Get " + req.Request.URL.Path)
defer trace.LogIfLong(500 * time.Millisecond)
// check for export
options := metav1.GetOptions{}
if values := req.Request.URL.Query(); len(values) > 0 {
exports := metav1.ExportOptions{}
if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &exports); err != nil {
return nil, err
}
if exports.Export {
if e == nil {
return nil, errors.NewBadRequest(fmt.Sprintf("export of %q is not supported", scope.Resource.Resource))
}
return e.Export(ctx, name, exports)
}
if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil {
return nil, err
}
}
return r.Get(ctx, name, &options)
})
}
// GetResourceWithOptions returns a function that handles retrieving a single resource from a rest.Storage object.
func GetResourceWithOptions(r rest.GetterWithOptions, scope RequestScope) restful.RouteFunction {
return getResourceHandler(scope,
func(ctx request.Context, name string, req *restful.Request) (runtime.Object, error) {
opts, subpath, subpathKey := r.NewGetOptions()
if err := getRequestOptions(req, scope, opts, subpath, subpathKey); err != nil {
return nil, err
}
return r.Get(ctx, name, opts)
})
}
func getRequestOptions(req *restful.Request, scope RequestScope, into runtime.Object, subpath bool, subpathKey string) error {
if into == nil {
return nil
}
query := req.Request.URL.Query()
if subpath {
newQuery := make(url.Values)
for k, v := range query {
newQuery[k] = v
}
newQuery[subpathKey] = []string{req.PathParameter("path")}
query = newQuery
}
return scope.ParameterCodec.DecodeParameters(query, scope.Kind.GroupVersion(), into)
}
// ConnectResource returns a function that handles a connect request on a rest.Storage object.
func ConnectResource(connecter rest.Connecter, scope RequestScope, admit admission.Interface, restPath string) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
w := res.ResponseWriter
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
opts, subpath, subpathKey := connecter.NewConnectOptions()
if err := getRequestOptions(req, scope, opts, subpath, subpathKey); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if admit.Handles(admission.Connect) {
connectRequest := &rest.ConnectRequest{
Name: name,
Options: opts,
ResourcePath: restPath,
}
userInfo, _ := request.UserFrom(ctx)
err = admit.Admit(admission.NewAttributesRecord(connectRequest, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, userInfo))
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, res: res})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
handler.ServeHTTP(w, req.Request)
}
}
// responder implements rest.Responder for assisting a connector in writing objects or errors.
type responder struct {
scope RequestScope
req *restful.Request
res *restful.Response
}
func (r *responder) Object(statusCode int, obj runtime.Object) {
responsewriters.WriteObject(statusCode, r.scope.Kind.GroupVersion(), r.scope.Serializer, obj, r.res.ResponseWriter, r.req.Request)
}
func (r *responder) Error(err error) {
r.scope.err(err, r.res.ResponseWriter, r.req.Request)
}
// ListResource returns a function that handles retrieving a list of resources from a rest.Storage object.
func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch bool, minRequestTimeout time.Duration) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
// For performance tracking purposes.
trace := utiltrace.New("List " + req.Request.URL.Path)
w := res.ResponseWriter
namespace, err := scope.Namer.Namespace(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// Watches for single objects are routed to this function.
// Treat a /name parameter the same as a field selector entry.
hasName := true
_, name, err := scope.Namer.Name(req)
if err != nil {
hasName = false
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
opts := metainternalversion.ListOptions{}
if err := metainternalversion.ParameterCodec.DecodeParameters(req.Request.URL.Query(), scope.MetaGroupVersion, &opts); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if opts.FieldSelector != nil {
fn := func(label, value string) (newLabel, newValue string, err error) {
return scope.Convertor.ConvertFieldLabel(scope.Kind.GroupVersion().String(), scope.Kind.Kind, label, value)
}
if opts.FieldSelector, err = opts.FieldSelector.Transform(fn); err != nil {
// TODO: allow bad request to set field causes based on query parameters
err = errors.NewBadRequest(err.Error())
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
if hasName {
// metadata.name is the canonical internal name.
// SelectionPredicate will notice that this is
// a request for a single object and optimize the
// storage query accordingly.
nameSelector := fields.OneTermEqualSelector("metadata.name", name)
if opts.FieldSelector != nil && !opts.FieldSelector.Empty() {
// It doesn't make sense to ask for both a name
// and a field selector, since just the name is
// sufficient to narrow down the request to a
// single object.
scope.err(errors.NewBadRequest("both a name and a field selector provided; please provide one or the other."), res.ResponseWriter, req.Request)
return
}
opts.FieldSelector = nameSelector
}
if (opts.Watch || forceWatch) && rw != nil {
glog.Infof("Started to log from %v for %v", ctx, req.Request.URL.RequestURI())
watcher, err := rw.Watch(ctx, &opts)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// TODO: Currently we explicitly ignore ?timeout= and use only ?timeoutSeconds=.
timeout := time.Duration(0)
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
if timeout == 0 && minRequestTimeout > 0 {
timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0))
}
serveWatch(watcher, scope, req, res, timeout)
return
}
// Log only long List requests (ignore Watch).
defer trace.LogIfLong(500 * time.Millisecond)
trace.Step("About to List from storage")
result, err := r.List(ctx, &opts)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Listing from storage done")
numberOfItems, err := setListSelfLink(result, req, scope.Namer)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Self-linking done")
// Ensure empty lists return a non-nil items slice
if numberOfItems == 0 && meta.IsListType(result) {
if err := meta.SetList(result, []runtime.Object{}); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
responsewriters.WriteObject(http.StatusOK, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
trace.Step(fmt.Sprintf("Writing http response done (%d items)", numberOfItems))
}
}
func createHandler(r rest.NamedCreater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface, includeName bool) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
// For performance tracking purposes.
trace := utiltrace.New("Create " + req.Request.URL.Path)
defer trace.LogIfLong(500 * time.Millisecond)
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
var (
namespace, name string
err error
)
if includeName {
namespace, name, err = scope.Namer.Name(req)
} else {
namespace, err = scope.Namer.Namespace(req)
}
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
gv := scope.Kind.GroupVersion()
s, err := negotiation.NegotiateInputSerializer(req.Request, scope.Serializer)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
decoder := scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal})
body, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
defaultGVK := scope.Kind
original := r.New()
trace.Step("About to convert to expected version")
obj, gvk, err := decoder.Decode(body, &defaultGVK, original)
if err != nil {
err = transformDecodeError(typer, err, original, gvk, body)
scope.err(err, res.ResponseWriter, req.Request)
return
}
if gvk.GroupVersion() != gv {
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%v)", gvk.GroupVersion().String(), gv.String()))
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Conversion done")
if admit != nil && admit.Handles(admission.Create) {
userInfo, _ := request.UserFrom(ctx)
err = admit.Admit(admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, userInfo))
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
trace.Step("About to store object in database")
result, err := finishRequest(timeout, func() (runtime.Object, error) {
out, err := r.Create(ctx, name, obj)
if status, ok := out.(*metav1.Status); ok && err == nil && status.Code == 0 {
status.Code = http.StatusCreated
}
return out, err
})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Object stored in database")
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Self-link added")
responsewriters.WriteObject(http.StatusCreated, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
// CreateNamedResource returns a function that will handle a resource creation with name.
func CreateNamedResource(r rest.NamedCreater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) restful.RouteFunction {
return createHandler(r, scope, typer, admit, true)
}
// CreateResource returns a function that will handle a resource creation.
func CreateResource(r rest.Creater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) restful.RouteFunction {
return createHandler(&namedCreaterAdapter{r}, scope, typer, admit, false)
}
type namedCreaterAdapter struct {
rest.Creater
}
func (c *namedCreaterAdapter) Create(ctx request.Context, name string, obj runtime.Object) (runtime.Object, error) {
return c.Creater.Create(ctx, obj)
}
// PatchResource returns a function that will handle a resource patch
// TODO: Eventually PatchResource should just use GuaranteedUpdate and this routine should be a bit cleaner
func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, converter runtime.ObjectConvertor) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we
// document, move timeout out of this function and declare it in
// api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
versionedObj, err := converter.ConvertToVersion(r.New(), scope.Kind.GroupVersion())
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// TODO: handle this in negotiation
contentType := req.HeaderParameter("Content-Type")
// Remove "; charset=" if included in header.
if idx := strings.Index(contentType, ";"); idx > 0 {
contentType = contentType[:idx]
}
patchType := types.PatchType(contentType)
patchJS, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), runtime.ContentTypeJSON)
if !ok {
scope.err(fmt.Errorf("no serializer defined for JSON"), res.ResponseWriter, req.Request)
return
}
gv := scope.Kind.GroupVersion()
codec := runtime.NewCodec(
scope.Serializer.EncoderForVersion(s.Serializer, gv),
scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal}),
)
updateAdmit := func(updatedObject runtime.Object, currentObject runtime.Object) error {
if admit != nil && admit.Handles(admission.Update) {
userInfo, _ := request.UserFrom(ctx)
return admit.Admit(admission.NewAttributesRecord(updatedObject, currentObject, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo))
}
return nil
}
result, err := patchResource(ctx, updateAdmit, timeout, versionedObj, r, name, patchType, patchJS, scope.Namer, scope.Copier, scope.Resource, codec)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
responsewriters.WriteObject(http.StatusOK, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
type updateAdmissionFunc func(updatedObject runtime.Object, currentObject runtime.Object) error
// patchResource divides PatchResource for easier unit testing
func patchResource(
ctx request.Context,
admit updateAdmissionFunc,
timeout time.Duration,
versionedObj runtime.Object,
patcher rest.Patcher,
name string,
patchType types.PatchType,
patchJS []byte,
namer ScopeNamer,
copier runtime.ObjectCopier,
resource schema.GroupVersionResource,
codec runtime.Codec,
) (runtime.Object, error) {
namespace := request.NamespaceValue(ctx)
var (
originalObjJS []byte
originalPatchedObjJS []byte
originalObjMap map[string]interface{}
originalPatchMap map[string]interface{}
lastConflictErr error
)
// applyPatch is called every time GuaranteedUpdate asks for the updated object,
// and is given the currently persisted object as input.
applyPatch := func(_ request.Context, _, currentObject runtime.Object) (runtime.Object, error) {
// Make sure we actually have a persisted currentObject
if hasUID, err := hasUID(currentObject); err != nil {
return nil, err
} else if !hasUID {
return nil, errors.NewNotFound(resource.GroupResource(), name)
}
switch {
case originalObjJS == nil && originalObjMap == nil:
// first time through,
// 1. apply the patch
// 2. save the original and patched to detect whether there were conflicting changes on retries
objToUpdate := patcher.New()
// For performance reasons, in case of strategicpatch, we avoid json
// marshaling and unmarshaling and operate just on map[string]interface{}.
// In case of other patch types, we still have to operate on JSON
// representations.
switch patchType {
case types.JSONPatchType, types.MergePatchType:
originalJS, patchedJS, err := patchObjectJSON(patchType, codec, currentObject, patchJS, objToUpdate, versionedObj)
if err != nil {
return nil, err
}
originalObjJS, originalPatchedObjJS = originalJS, patchedJS
case types.StrategicMergePatchType:
originalMap, patchMap, err := strategicPatchObject(codec, currentObject, patchJS, objToUpdate, versionedObj)
if err != nil {
return nil, err
}
originalObjMap, originalPatchMap = originalMap, patchMap
}
if err := checkName(objToUpdate, name, namespace, namer); err != nil {
return nil, err
}
return objToUpdate, nil
default:
// on a conflict,
// 1. build a strategic merge patch from originalJS and the patchedJS. Different patch types can
// be specified, but a strategic merge patch should be expressive enough handle them. Build the
// patch with this type to handle those cases.
// 2. build a strategic merge patch from originalJS and the currentJS
// 3. ensure no conflicts between the two patches
// 4. apply the #1 patch to the currentJS object
// TODO: This should be one-step conversion that doesn't require
// json marshaling and unmarshaling once #39017 is fixed.
data, err := runtime.Encode(codec, currentObject)
if err != nil {
return nil, err
}
currentObjMap := make(map[string]interface{})
if err := json.Unmarshal(data, ¤tObjMap); err != nil {
return nil, err
}
var currentPatchMap map[string]interface{}
if originalObjMap != nil {
var err error
currentPatchMap, err = strategicpatch.CreateTwoWayMergeMapPatch(originalObjMap, currentObjMap, versionedObj)
if err != nil {
return nil, err
}
} else {
if originalPatchMap == nil {
// Compute original patch, if we already didn't do this in previous retries.
originalPatch, err := strategicpatch.CreateTwoWayMergePatch(originalObjJS, originalPatchedObjJS, versionedObj)
if err != nil {
return nil, err
}
originalPatchMap = make(map[string]interface{})
if err := json.Unmarshal(originalPatch, &originalPatchMap); err != nil {
return nil, err
}
}
// Compute current patch.
currentObjJS, err := runtime.Encode(codec, currentObject)
if err != nil {
return nil, err
}
currentPatch, err := strategicpatch.CreateTwoWayMergePatch(originalObjJS, currentObjJS, versionedObj)
if err != nil {
return nil, err
}
currentPatchMap = make(map[string]interface{})
if err := json.Unmarshal(currentPatch, ¤tPatchMap); err != nil {
return nil, err
}
}
hasConflicts, err := mergepatch.HasConflicts(originalPatchMap, currentPatchMap)
if err != nil {
return nil, err
}
if hasConflicts {
diff1, _ := json.Marshal(currentPatchMap)
diff2, _ := json.Marshal(originalPatchMap)
patchDiffErr := fmt.Errorf("there is a meaningful conflict:\n diff1=%v\n, diff2=%v\n", diff1, diff2)
glog.V(4).Infof("patchResource failed for resource %s, because there is a meaningful conflict.\n diff1=%v\n, diff2=%v\n", name, diff1, diff2)
// Return the last conflict error we got if we have one
if lastConflictErr != nil {
return nil, lastConflictErr
}
// Otherwise manufacture one of our own
return nil, errors.NewConflict(resource.GroupResource(), name, patchDiffErr)
}
objToUpdate := patcher.New()
if err := applyPatchToObject(codec, currentObjMap, originalPatchMap, objToUpdate, versionedObj); err != nil {
return nil, err
}
return objToUpdate, nil
}
}
// applyAdmission is called every time GuaranteedUpdate asks for the updated object,
// and is given the currently persisted object and the patched object as input.
applyAdmission := func(ctx request.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) {
return patchedObject, admit(patchedObject, currentObject)
}
updatedObjectInfo := rest.DefaultUpdatedObjectInfo(nil, copier, applyPatch, applyAdmission)
return finishRequest(timeout, func() (runtime.Object, error) {
updateObject, _, updateErr := patcher.Update(ctx, name, updatedObjectInfo)
for i := 0; i < maxRetryWhenPatchConflicts && (errors.IsConflict(updateErr)); i++ {
lastConflictErr = updateErr
updateObject, _, updateErr = patcher.Update(ctx, name, updatedObjectInfo)
}
return updateObject, updateErr
})
}
// UpdateResource returns a function that will handle a resource update
func UpdateResource(r rest.Updater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
// For performance tracking purposes.
trace := utiltrace.New("Update " + req.Request.URL.Path)
defer trace.LogIfLong(500 * time.Millisecond)
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
body, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
s, err := negotiation.NegotiateInputSerializer(req.Request, scope.Serializer)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
defaultGVK := scope.Kind
original := r.New()
trace.Step("About to convert to expected version")
obj, gvk, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, original)
if err != nil {
err = transformDecodeError(typer, err, original, gvk, body)
scope.err(err, res.ResponseWriter, req.Request)
return
}
if gvk.GroupVersion() != defaultGVK.GroupVersion() {
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", gvk.GroupVersion(), defaultGVK.GroupVersion()))
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Conversion done")
if err := checkName(obj, name, namespace, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
var transformers []rest.TransformFunc
if admit != nil && admit.Handles(admission.Update) {
transformers = append(transformers, func(ctx request.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
userInfo, _ := request.UserFrom(ctx)
return newObj, admit.Admit(admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo))
})
}
trace.Step("About to store object in database")
wasCreated := false
result, err := finishRequest(timeout, func() (runtime.Object, error) {
obj, created, err := r.Update(ctx, name, rest.DefaultUpdatedObjectInfo(obj, scope.Copier, transformers...))
wasCreated = created
return obj, err
})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Object stored in database")
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Self-link added")
status := http.StatusOK
if wasCreated {
status = http.StatusCreated
}
responsewriters.WriteObject(status, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
// DeleteResource returns a function that will handle a resource deletion
func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
// For performance tracking purposes.
trace := utiltrace.New("Delete " + req.Request.URL.Path)
defer trace.LogIfLong(500 * time.Millisecond)
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
options := &metav1.DeleteOptions{}
if allowsOptions {
body, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req.Request, metainternalversion.Codecs)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
obj, _, err := metainternalversion.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if obj != options {
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), res.ResponseWriter, req.Request)
return
}
} else {
if values := req.Request.URL.Query(); len(values) > 0 {
if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
}
}
if admit != nil && admit.Handles(admission.Delete) {
userInfo, _ := request.UserFrom(ctx)
err = admit.Admit(admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, userInfo))
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
trace.Step("About do delete object from database")
wasDeleted := true
result, err := finishRequest(timeout, func() (runtime.Object, error) {
obj, deleted, err := r.Delete(ctx, name, options)
wasDeleted = deleted
return obj, err
})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
trace.Step("Object deleted from database")
status := http.StatusOK
// Return http.StatusAccepted if the resource was not deleted immediately and
// user requested cascading deletion by setting OrphanDependents=false.
// Note: We want to do this always if resource was not deleted immediately, but
// that will break existing clients.
// Other cases where resource is not instantly deleted are: namespace deletion
// and pod graceful deletion.
if !wasDeleted && options.OrphanDependents != nil && *options.OrphanDependents == false {
status = http.StatusAccepted
}
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
// object with the response.
if result == nil {
result = &metav1.Status{
Status: metav1.StatusSuccess,
Code: int32(status),
Details: &metav1.StatusDetails{
Name: name,
Kind: scope.Kind.Kind,
},
}
} else {
// when a non-status response is returned, set the self link
if _, ok := result.(*metav1.Status); !ok {
if err := setSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
}
responsewriters.WriteObject(status, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
}
}
// DeleteCollection returns a function that will handle a collection deletion
func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestScope, admit admission.Interface) restful.RouteFunction {
return func(req *restful.Request, res *restful.Response) {
w := res.ResponseWriter
// TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer)
timeout := parseTimeout(req.Request.URL.Query().Get("timeout"))
namespace, err := scope.Namer.Namespace(req)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
if admit != nil && admit.Handles(admission.Delete) {
userInfo, _ := request.UserFrom(ctx)
err = admit.Admit(admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, userInfo))
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
listOptions := metainternalversion.ListOptions{}
if err := metainternalversion.ParameterCodec.DecodeParameters(req.Request.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if listOptions.FieldSelector != nil {
fn := func(label, value string) (newLabel, newValue string, err error) {
return scope.Convertor.ConvertFieldLabel(scope.Kind.GroupVersion().String(), scope.Kind.Kind, label, value)
}
if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil {
// TODO: allow bad request to set field causes based on query parameters
err = errors.NewBadRequest(err.Error())
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
options := &metav1.DeleteOptions{}
if checkBody {
body, err := readBody(req.Request)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req.Request, scope.Serializer)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
defaultGVK := scope.Kind.GroupVersion().WithKind("DeleteOptions")
obj, _, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
if obj != options {
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), res.ResponseWriter, req.Request)
return
}
}
}
result, err := finishRequest(timeout, func() (runtime.Object, error) {
return r.DeleteCollection(ctx, options, &listOptions)
})
if err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
// object with the response.
if result == nil {
result = &metav1.Status{
Status: metav1.StatusSuccess,
Code: http.StatusOK,
Details: &metav1.StatusDetails{
Kind: scope.Kind.Kind,
},
}
} else {
// when a non-status response is returned, set the self link
if _, ok := result.(*metav1.Status); !ok {
if _, err := setListSelfLink(result, req, scope.Namer); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
}
responsewriters.WriteObjectNegotiated(scope.Serializer, scope.Kind.GroupVersion(), w, req.Request, http.StatusOK, result)
}
}
// resultFunc is a function that returns a rest result and can be run in a goroutine
type resultFunc func() (runtime.Object, error)
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// Any api.Status object returned is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
// when the select statement reads something other than the one the goroutine sends on.
ch := make(chan runtime.Object, 1)
errCh := make(chan error, 1)
panicCh := make(chan interface{}, 1)
go func() {
// panics don't cross goroutine boundaries, so we have to handle ourselves
defer utilruntime.HandleCrash(func(panicReason interface{}) {
// Propagate to parent goroutine
panicCh <- panicReason
})
if result, err := fn(); err != nil {
errCh <- err
} else {
ch <- result
}
}()
select {
case result = <-ch:
if status, ok := result.(*metav1.Status); ok {
return nil, errors.FromObject(status)
}
return result, nil
case err = <-errCh:
return nil, err
case p := <-panicCh:
panic(p)
case <-time.After(timeout):
return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0)
}
}
// transformDecodeError adds additional information when a decode fails.
func transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error {
objGVKs, _, err := typer.ObjectKinds(into)
if err != nil {
return err
}
objGVK := objGVKs[0]
if gvk != nil && len(gvk.Kind) > 0 {
return errors.NewBadRequest(fmt.Sprintf("%s in version %q cannot be handled as a %s: %v", gvk.Kind, gvk.Version, objGVK.Kind, baseErr))
}
summary := summarizeData(body, 30)
return errors.NewBadRequest(fmt.Sprintf("the object provided is unrecognized (must be of type %s): %v (%s)", objGVK.Kind, baseErr, summary))
}
// setSelfLink sets the self link of an object (or the child items in a list) to the base URL of the request
// plus the path and query generated by the provided linkFunc
func setSelfLink(obj runtime.Object, req *restful.Request, namer ScopeNamer) error {
// TODO: SelfLink generation should return a full URL?
uri, err := namer.GenerateLink(req, obj)
if err != nil {
return nil
}
return namer.SetSelfLink(obj, uri)
}
func hasUID(obj runtime.Object) (bool, error) {
if obj == nil {
return false, nil
}
accessor, err := meta.Accessor(obj)
if err != nil {
return false, errors.NewInternalError(err)
}
if len(accessor.GetUID()) == 0 {
return false, nil
}
return true, nil
}
// checkName checks the provided name against the request
func checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error {
if objNamespace, objName, err := namer.ObjectName(obj); err == nil {
if err != nil {
return err
}
if objName != name {
return errors.NewBadRequest(fmt.Sprintf(
"the name of the object (%s) does not match the name on the URL (%s)", objName, name))
}
if len(namespace) > 0 {
if len(objNamespace) > 0 && objNamespace != namespace {
return errors.NewBadRequest(fmt.Sprintf(
"the namespace of the object (%s) does not match the namespace on the request (%s)", objNamespace, namespace))
}
}
}
return nil
}
// setListSelfLink sets the self link of a list to the base URL, then sets the self links
// on all child objects returned. Returns the number of items in the list.
func setListSelfLink(obj runtime.Object, req *restful.Request, namer ScopeNamer) (int, error) {
if !meta.IsListType(obj) {
return 0, nil
}
uri, err := namer.GenerateListLink(req)
if err != nil {
return 0, err
}
if err := namer.SetSelfLink(obj, uri); err != nil {
glog.V(4).Infof("Unable to set self link on object: %v", err)
}
count := 0
err = meta.EachListItem(obj, func(obj runtime.Object) error {
count++
return setSelfLink(obj, req, namer)
})
return count, err
}
func summarizeData(data []byte, maxLength int) string {
switch {
case len(data) == 0:
return "<empty>"
case data[0] == '{':
if len(data) > maxLength {
return string(data[:maxLength]) + " ..."
}
return string(data)
default:
if len(data) > maxLength {
return hex.EncodeToString(data[:maxLength]) + " ..."
}
return hex.EncodeToString(data)
}
}
func readBody(req *http.Request) ([]byte, error) {
defer req.Body.Close()
return ioutil.ReadAll(req.Body)
}
func parseTimeout(str string) time.Duration {
if str != "" {
timeout, err := time.ParseDuration(str)
if err == nil {
return timeout
}
glog.Errorf("Failed to parse %q: %v", str, err)
}
return 30 * time.Second
}
|
/*
Copyright (c) 2016, Percona LLC and/or its affiliates. All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package pmm
import (
"errors"
"fmt"
"time"
)
const (
qanAPIBasePath = "qan-api"
noMonitoring = "No monitoring registered for this node identified as"
apiTimeout = 10 * time.Second
NameRegex = `^[-\w:\.]{2,60}$`
)
var (
// you can use `-ldflags -X github.com/percona/pmm-client/pmm.Version=`
// to set build version number
Version = "1.13.0"
// you can use `-ldflags -X github.com/percona/pmm-client/pmm.RootDir=`
// to set root filesystem for pmm-admin
RootDir = ""
PMMBaseDir = RootDir + "/usr/local/percona/pmm-client"
AgentBaseDir = RootDir + "/usr/local/percona/qan-agent"
ConfigFile = fmt.Sprintf("%s/pmm.yml", PMMBaseDir)
SSLCertFile = fmt.Sprintf("%s/server.crt", PMMBaseDir)
SSLKeyFile = fmt.Sprintf("%s/server.key", PMMBaseDir)
ErrDuplicate = errors.New("there is already one instance with this name under monitoring.")
ErrNoService = errors.New("no service found.")
errNoInstance = errors.New("no instance found on QAN API.")
)
type Errors []error
func (e Errors) Error() string {
return join(e, ", ")
}
// join concatenates the elements of a to create a single string. The separator string
// sep is placed between elements in the resulting string.
func join(a []error, sep string) string {
if len(a) == 0 {
return ""
}
if len(a) == 1 {
return a[0].Error()
}
nilErr := fmt.Sprintf("%v", error(nil))
n := len(sep) * (len(a) - 1)
for i := 0; i < len(a); i++ {
if a[i] == nil {
n += len(nilErr)
} else {
n += len(a[i].Error())
}
}
b := make([]byte, n)
bp := copy(b, a[0].Error())
for _, s := range a[1:] {
bp += copy(b[bp:], sep)
if s == nil {
bp += copy(b[bp:], nilErr)
} else {
bp += copy(b[bp:], s.Error())
}
}
return string(b)
}
PMM-2842 Update versions to 1.14.0
/*
Copyright (c) 2016, Percona LLC and/or its affiliates. All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package pmm
import (
"errors"
"fmt"
"time"
)
const (
qanAPIBasePath = "qan-api"
noMonitoring = "No monitoring registered for this node identified as"
apiTimeout = 10 * time.Second
NameRegex = `^[-\w:\.]{2,60}$`
)
var (
// you can use `-ldflags -X github.com/percona/pmm-client/pmm.Version=`
// to set build version number
Version = "1.14.0"
// you can use `-ldflags -X github.com/percona/pmm-client/pmm.RootDir=`
// to set root filesystem for pmm-admin
RootDir = ""
PMMBaseDir = RootDir + "/usr/local/percona/pmm-client"
AgentBaseDir = RootDir + "/usr/local/percona/qan-agent"
ConfigFile = fmt.Sprintf("%s/pmm.yml", PMMBaseDir)
SSLCertFile = fmt.Sprintf("%s/server.crt", PMMBaseDir)
SSLKeyFile = fmt.Sprintf("%s/server.key", PMMBaseDir)
ErrDuplicate = errors.New("there is already one instance with this name under monitoring.")
ErrNoService = errors.New("no service found.")
errNoInstance = errors.New("no instance found on QAN API.")
)
type Errors []error
func (e Errors) Error() string {
return join(e, ", ")
}
// join concatenates the elements of a to create a single string. The separator string
// sep is placed between elements in the resulting string.
func join(a []error, sep string) string {
if len(a) == 0 {
return ""
}
if len(a) == 1 {
return a[0].Error()
}
nilErr := fmt.Sprintf("%v", error(nil))
n := len(sep) * (len(a) - 1)
for i := 0; i < len(a); i++ {
if a[i] == nil {
n += len(nilErr)
} else {
n += len(a[i].Error())
}
}
b := make([]byte, n)
bp := copy(b, a[0].Error())
for _, s := range a[1:] {
bp += copy(b[bp:], sep)
if s == nil {
bp += copy(b[bp:], nilErr)
} else {
bp += copy(b[bp:], s.Error())
}
}
return string(b)
}
|
package updateBloomData
import (
"Inf191BloomFilter/src/databaseAccessObj"
"strconv"
"github.com/willf/bloom"
)
const bitArraySize = 10000
const numberOfHashFunction = 5
// BloomFilter struct holds the pointer to the bloomFilter object
type BloomFilter struct {
bloomFilter *bloom.BloomFilter
}
// New is called to instantiate a new BloomFilter object
func New() *BloomFilter {
bloomFilter := bloom.New(bitArraySize, numberOfHashFunction)
return &BloomFilter{bloomFilter}
}
// UpdateBloomFilter is used when more unsubscribed emails have been added to the database
func (bf *BloomFilter) UpdateBloomFilter() {
}
// RepopulateBloomFilter will be called if unsubscribed emails are removed from the
// database (customers resubscribe to emails)
func (bf *BloomFilter) RepopulateBloomFilter() {
newBloomFilter := bloom.New(bitArraySize, numberOfHashFunction)
var arrayOfUserIDEmail []string
arrayOfUserIDEmail = getArrayOfUserIDEmail()
for i := range arrayOfUserIDEmail {
newBloomFilter.AddString(arrayOfUserIDEmail[i])
}
bf.bloomFilter = newBloomFilter.Copy()
}
// getArrayOfUserIDEmail retrieves all records in the database and returns an array
// of strings in the form of userid_email
func getArrayOfUserIDEmail() []string {
var arrayOfUserIDEmail []string
dao := databaseAccessObj.New("bloom:test@/unsubscribed")
databaseResultMap := dao.SelectAll()
for key, value := range databaseResultMap {
for i := range value {
arrayOfUserIDEmail = append(arrayOfUserIDEmail, strconv.Itoa(int(key))+"_"+value[i])
}
}
dao.CloseConnection()
return arrayOfUserIDEmail
}
Created GetArrayOfUnsubscribedEmails and UpdateBloomFilter
package updateBloomData
import (
"Inf191BloomFilter/src/databaseAccessObj"
"strconv"
"github.com/willf/bloom"
)
const bitArraySize = 10000
const numberOfHashFunction = 5
// BloomFilter struct holds the pointer to the bloomFilter object
type BloomFilter struct {
bloomFilter *bloom.BloomFilter
}
// New is called to instantiate a new BloomFilter object
func New() *BloomFilter {
bloomFilter := bloom.New(bitArraySize, numberOfHashFunction)
return &BloomFilter{bloomFilter}
}
// UpdateBloomFilter is used when more unsubscribed emails have been added to the database
func (bf *BloomFilter) UpdateBloomFilter() {
var arrayOfUserIDEmail = getArrayOfUserIDEmail()
for i := range arrayOfUserIDEmail {
bf.bloomFilter.AddString(arrayOfUserIDEmail[i])
}
}
// RepopulateBloomFilter will be called if unsubscribed emails are removed from the
// database (customers resubscribe to emails)
func (bf *BloomFilter) RepopulateBloomFilter() {
newBloomFilter := bloom.New(bitArraySize, numberOfHashFunction)
var arrayOfUserIDEmail []string
arrayOfUserIDEmail = getArrayOfUserIDEmail()
for i := range arrayOfUserIDEmail {
newBloomFilter.AddString(arrayOfUserIDEmail[i])
}
bf.bloomFilter = newBloomFilter.Copy()
}
// getArrayOfUserIDEmail retrieves all records in the database and returns an array
// of strings in the form of userid_email
func getArrayOfUserIDEmail() []string {
var arrayOfUserIDEmail []string
dao := databaseAccessObj.New("bloom:test@/unsubscribed")
databaseResultMap := dao.SelectAll()
for key, value := range databaseResultMap {
for i := range value {
arrayOfUserIDEmail = append(arrayOfUserIDEmail, strconv.Itoa(int(key))+"_"+value[i])
}
}
dao.CloseConnection()
return arrayOfUserIDEmail
}
//GetArrayOfUnsubscribedEmails given a list of strings will return a list of those
//that exist in the bloom filter
func (bf *BloomFilter) GetArrayOfUnsubscribedEmails(arrayOfEmails []string) []string {
var arrayOfUnsubscribedEmails []string
for i := range arrayOfEmails {
if bf.bloomFilter.TestString(arrayOfEmails[i]) {
arrayOfUnsubscribedEmails = append(arrayOfUnsubscribedEmails, arrayOfEmails[i])
}
}
return arrayOfUnsubscribedEmails
}
|
package sqls
import (
_ "github.com/go-sql-driver/mysql"
"fmt"
"os"
"testing"
)
var (
db *SqlWrap
username string = "root" // your mysql username for test
passwd string = "123456" // your mysql password for test
insertNum int64 = 100
)
func init() {
var err error
db, err = Open("mysql", fmt.Sprintf("%s:%s@tcp(127.0.0.1:3306)/golib_test", username, passwd))
if err != nil {
fmt.Printf("connect to mysql error\n")
os.Exit(1)
}
// you can also use sql.Open and then assign to SqlWrap
/*
mysqlConn, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(127.0.0.1:3306)/golib_test", username, passwd))
if err != nil {
fmt.Printf("connect to mysql error\n")
os.Exit(1)
}
db = &SqlWrap{db: mysqlConn}
*/
}
func TestCreateTable(t *testing.T) {
sql := "CREATE TABLE IF NOT EXISTS `users` (`id` bigint unsigned NOT NULL AUTO_INCREMENT, `name` varchar(255) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8"
_, err := db.Exec(sql)
if err != nil {
t.Fatalf("create table error, %v", err)
}
}
func TestInsertOne(t *testing.T) {
sql := "INSERT INTO users (name) VALUES ('test')"
_, err := db.Exec(sql)
if err != nil {
t.Fatalf("insert into db error, %v", err)
}
}
func TestInsertTx(t *testing.T) {
sql := "INSERT INTO users (name) VALUES (?)"
tx, err := db.Begin()
if err != nil {
t.Fatalf("start tx error: %v", err)
}
for i := int64(0); i < insertNum-1; i++ {
_, err := db.ExecTx(tx, sql, "test")
if err != nil {
db.Rollback(tx)
t.Fatalf("tx insert into db error, %v", err)
}
}
db.Commit(tx)
}
func TestQuery(t *testing.T) {
sql := "SELECT id, name FROM users WHERE name = ?"
rows, err := db.Query(sql, "test")
if err != nil {
t.Fatalf("query db error, %v", err)
}
defer rows.Close()
var id int64
var name string
var count int64 = 0
for rows.Next() {
count++
rows.Scan(
&id,
&name,
)
}
err = rows.Err()
if err != nil {
t.Fatalf("query db error, %v", err)
}
if count != insertNum {
t.Fatalf("query db error, %d rows inserted and %d get", insertNum, count)
}
}
func TestDropTable(t *testing.T) {
sql := "DROP TABLE users"
_, err := db.Exec(sql)
if err != nil {
t.Fatalf("drop table error, %v", err)
}
}
sqls: typo
package sqls
import (
_ "github.com/go-sql-driver/mysql"
"fmt"
"os"
"testing"
)
var (
db *SqlWrap
username string = "root" // your mysql username for test
passwd string = "" // your mysql password for test
insertNum int64 = 100
)
func init() {
var err error
db, err = Open("mysql", fmt.Sprintf("%s:%s@tcp(127.0.0.1:3306)/golib_test", username, passwd))
if err != nil {
fmt.Printf("connect to mysql error\n")
os.Exit(1)
}
// you can also use sql.Open and then assign to SqlWrap
/*
mysqlConn, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(127.0.0.1:3306)/golib_test", username, passwd))
if err != nil {
fmt.Printf("connect to mysql error\n")
os.Exit(1)
}
db = &SqlWrap{db: mysqlConn}
*/
}
func TestCreateTable(t *testing.T) {
sql := "CREATE TABLE IF NOT EXISTS `users` (`id` bigint unsigned NOT NULL AUTO_INCREMENT, `name` varchar(255) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8"
_, err := db.Exec(sql)
if err != nil {
t.Fatalf("create table error, %v", err)
}
}
func TestInsertOne(t *testing.T) {
sql := "INSERT INTO users (name) VALUES ('test')"
_, err := db.Exec(sql)
if err != nil {
t.Fatalf("insert into db error, %v", err)
}
}
func TestInsertTx(t *testing.T) {
sql := "INSERT INTO users (name) VALUES (?)"
tx, err := db.Begin()
if err != nil {
t.Fatalf("start tx error: %v", err)
}
for i := int64(0); i < insertNum-1; i++ {
_, err := db.ExecTx(tx, sql, "test")
if err != nil {
db.Rollback(tx)
t.Fatalf("tx insert into db error, %v", err)
}
}
db.Commit(tx)
}
func TestQuery(t *testing.T) {
sql := "SELECT id, name FROM users WHERE name = ?"
rows, err := db.Query(sql, "test")
if err != nil {
t.Fatalf("query db error, %v", err)
}
defer rows.Close()
var id int64
var name string
var count int64 = 0
for rows.Next() {
count++
rows.Scan(
&id,
&name,
)
}
err = rows.Err()
if err != nil {
t.Fatalf("query db error, %v", err)
}
if count != insertNum {
t.Fatalf("query db error, %d rows inserted and %d get", insertNum, count)
}
}
func TestDropTable(t *testing.T) {
sql := "DROP TABLE users"
_, err := db.Exec(sql)
if err != nil {
t.Fatalf("drop table error, %v", err)
}
}
|
package templates
import (
"errors"
"fmt"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
kapiv1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kapi "k8s.io/kubernetes/pkg/apis/core"
templateapi "github.com/openshift/origin/pkg/template/apis/template"
exutil "github.com/openshift/origin/test/extended/util"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
var _ = g.Describe("[Conformance][templates] templateinstance cross-namespace test", func() {
defer g.GinkgoRecover()
var (
cli = exutil.NewCLI("templates", exutil.KubeConfigPath())
cli2 = exutil.NewCLI("templates2", exutil.KubeConfigPath())
)
g.It("should create and delete objects across namespaces", func() {
err := cli2.AsAdmin().Run("adm").Args("policy", "add-role-to-user", "admin", cli.Username()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// parameters for templateinstance
_, err = cli.KubeClient().CoreV1().Secrets(cli.Namespace()).Create(&kapiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret",
},
Data: map[string][]byte{
"NAMESPACE": []byte(cli2.Namespace()),
},
})
o.Expect(err).NotTo(o.HaveOccurred())
templateinstance := &templateapi.TemplateInstance{
ObjectMeta: metav1.ObjectMeta{
Name: "templateinstance",
},
Spec: templateapi.TemplateInstanceSpec{
Template: templateapi.Template{
ObjectMeta: metav1.ObjectMeta{
Name: "template",
Namespace: cli.Namespace(),
},
Parameters: []templateapi.Parameter{
{
Name: "NAMESPACE",
},
},
},
Secret: &kapi.LocalObjectReference{
Name: "secret",
},
},
}
err = addObjectsToTemplate(&templateinstance.Spec.Template, []runtime.Object{
// secret in the same namespace
&kapi.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret1",
},
},
// secret in a different namespace
&kapi.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret2",
Namespace: "${NAMESPACE}",
},
},
}, legacyscheme.Scheme.PrioritizedVersionsAllGroups()...)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("creating the templateinstance")
_, err = cli.TemplateClient().Template().TemplateInstances(cli.Namespace()).Create(templateinstance)
o.Expect(err).NotTo(o.HaveOccurred())
// wait for templateinstance controller to do its thing
err = wait.Poll(time.Second, time.Minute, func() (bool, error) {
templateinstance, err = cli.TemplateClient().Template().TemplateInstances(cli.Namespace()).Get(templateinstance.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, c := range templateinstance.Status.Conditions {
if c.Reason == "Failed" && c.Status == kapi.ConditionTrue {
return false, fmt.Errorf("failed condition: %s", c.Message)
}
if c.Reason == "Created" && c.Status == kapi.ConditionTrue {
return true, nil
}
}
return false, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
_, err = cli.KubeClient().CoreV1().Secrets(cli.Namespace()).Get("secret1", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
_, err = cli.KubeClient().CoreV1().Secrets(cli2.Namespace()).Get("secret2", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deleting the templateinstance")
foreground := metav1.DeletePropagationForeground
err = cli.TemplateClient().Template().TemplateInstances(cli.Namespace()).Delete(templateinstance.Name, &metav1.DeleteOptions{PropagationPolicy: &foreground})
o.Expect(err).NotTo(o.HaveOccurred())
// wait for garbage collector to do its thing
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
_, err = cli.TemplateClient().Template().TemplateInstances(cli.Namespace()).Get(templateinstance.Name, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return true, nil
}
return false, err
})
o.Expect(err).NotTo(o.HaveOccurred())
_, err = cli.KubeClient().CoreV1().Secrets(cli.Namespace()).Get("secret1", metav1.GetOptions{})
o.Expect(kerrors.IsNotFound(err)).To(o.BeTrue())
_, err = cli.KubeClient().CoreV1().Secrets(cli2.Namespace()).Get("secret2", metav1.GetOptions{})
o.Expect(kerrors.IsNotFound(err)).To(o.BeTrue())
})
})
// AddObjectsToTemplate adds the objects to the template using the target versions to choose the conversion destination
func addObjectsToTemplate(template *templateapi.Template, objects []runtime.Object, targetVersions ...schema.GroupVersion) error {
for i := range objects {
obj := objects[i]
if obj == nil {
return errors.New("cannot add a nil object to a template")
}
// We currently add legacy types first to the scheme, followed by the types in the new api
// groups. We have to check all ObjectKinds and not just use the first one returned by
// ObjectKind().
gvks, _, err := legacyscheme.Scheme.ObjectKinds(obj)
if err != nil {
return err
}
var targetVersion *schema.GroupVersion
outerLoop:
for j := range targetVersions {
possibleVersion := targetVersions[j]
for _, kind := range gvks {
if kind.Group == possibleVersion.Group {
targetVersion = &possibleVersion
break outerLoop
}
}
}
if targetVersion == nil {
return fmt.Errorf("no target version found for object[%d], gvks %v in %v", i, gvks, targetVersions)
}
wrappedObject := runtime.NewEncodable(legacyscheme.Codecs.LegacyCodec(*targetVersion), obj)
template.Objects = append(template.Objects, wrappedObject)
}
return nil
}
Add debugging to templateinstance gc test
package templates
import (
"errors"
"fmt"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
kapiv1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/legacyscheme"
kapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
templateapi "github.com/openshift/origin/pkg/template/apis/template"
exutil "github.com/openshift/origin/test/extended/util"
)
var _ = g.Describe("[Conformance][templates] templateinstance cross-namespace test", func() {
defer g.GinkgoRecover()
var (
cli = exutil.NewCLI("templates", exutil.KubeConfigPath())
cli2 = exutil.NewCLI("templates2", exutil.KubeConfigPath())
)
g.It("should create and delete objects across namespaces", func() {
err := cli2.AsAdmin().Run("adm").Args("policy", "add-role-to-user", "admin", cli.Username()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// parameters for templateinstance
_, err = cli.KubeClient().CoreV1().Secrets(cli.Namespace()).Create(&kapiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret",
},
Data: map[string][]byte{
"NAMESPACE": []byte(cli2.Namespace()),
},
})
o.Expect(err).NotTo(o.HaveOccurred())
templateinstance := &templateapi.TemplateInstance{
ObjectMeta: metav1.ObjectMeta{
Name: "templateinstance",
},
Spec: templateapi.TemplateInstanceSpec{
Template: templateapi.Template{
ObjectMeta: metav1.ObjectMeta{
Name: "template",
Namespace: cli.Namespace(),
},
Parameters: []templateapi.Parameter{
{
Name: "NAMESPACE",
},
},
},
Secret: &kapi.LocalObjectReference{
Name: "secret",
},
},
}
err = addObjectsToTemplate(&templateinstance.Spec.Template, []runtime.Object{
// secret in the same namespace
&kapi.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret1",
},
},
// secret in a different namespace
&kapi.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret2",
Namespace: "${NAMESPACE}",
},
},
}, legacyscheme.Scheme.PrioritizedVersionsAllGroups()...)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("creating the templateinstance")
_, err = cli.TemplateClient().Template().TemplateInstances(cli.Namespace()).Create(templateinstance)
o.Expect(err).NotTo(o.HaveOccurred())
// wait for templateinstance controller to do its thing
err = wait.Poll(time.Second, time.Minute, func() (bool, error) {
templateinstance, err = cli.TemplateClient().Template().TemplateInstances(cli.Namespace()).Get(templateinstance.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, c := range templateinstance.Status.Conditions {
if c.Reason == "Failed" && c.Status == kapi.ConditionTrue {
return false, fmt.Errorf("failed condition: %s", c.Message)
}
if c.Reason == "Created" && c.Status == kapi.ConditionTrue {
return true, nil
}
}
return false, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
framework.Logf("Template Instance object: %#v", templateinstance)
_, err = cli.KubeClient().CoreV1().Secrets(cli.Namespace()).Get("secret1", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
_, err = cli.KubeClient().CoreV1().Secrets(cli2.Namespace()).Get("secret2", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deleting the templateinstance")
foreground := metav1.DeletePropagationForeground
err = cli.TemplateClient().Template().TemplateInstances(cli.Namespace()).Delete(templateinstance.Name, &metav1.DeleteOptions{PropagationPolicy: &foreground})
o.Expect(err).NotTo(o.HaveOccurred())
// wait for garbage collector to do its thing
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
_, err = cli.TemplateClient().Template().TemplateInstances(cli.Namespace()).Get(templateinstance.Name, metav1.GetOptions{})
if kerrors.IsNotFound(err) {
return true, nil
}
return false, err
})
o.Expect(err).NotTo(o.HaveOccurred())
_, err = cli.KubeClient().CoreV1().Secrets(cli.Namespace()).Get("secret1", metav1.GetOptions{})
o.Expect(kerrors.IsNotFound(err)).To(o.BeTrue())
_, err = cli.KubeClient().CoreV1().Secrets(cli2.Namespace()).Get("secret2", metav1.GetOptions{})
o.Expect(kerrors.IsNotFound(err)).To(o.BeTrue())
})
})
// AddObjectsToTemplate adds the objects to the template using the target versions to choose the conversion destination
func addObjectsToTemplate(template *templateapi.Template, objects []runtime.Object, targetVersions ...schema.GroupVersion) error {
for i := range objects {
obj := objects[i]
if obj == nil {
return errors.New("cannot add a nil object to a template")
}
// We currently add legacy types first to the scheme, followed by the types in the new api
// groups. We have to check all ObjectKinds and not just use the first one returned by
// ObjectKind().
gvks, _, err := legacyscheme.Scheme.ObjectKinds(obj)
if err != nil {
return err
}
var targetVersion *schema.GroupVersion
outerLoop:
for j := range targetVersions {
possibleVersion := targetVersions[j]
for _, kind := range gvks {
if kind.Group == possibleVersion.Group {
targetVersion = &possibleVersion
break outerLoop
}
}
}
if targetVersion == nil {
return fmt.Errorf("no target version found for object[%d], gvks %v in %v", i, gvks, targetVersions)
}
wrappedObject := runtime.NewEncodable(legacyscheme.Codecs.LegacyCodec(*targetVersion), obj)
template.Objects = append(template.Objects, wrappedObject)
}
return nil
}
|
package application
import (
"net/http"
"os"
"path/filepath"
"time"
"github.com/didip/stopwatch"
"github.com/didip/tollbooth"
"github.com/pressly/chi"
"gopkg.in/tylerb/graceful.v1"
"github.com/resourced/resourced-master/handlers"
"github.com/resourced/resourced-master/middlewares"
)
func (app *Application) newHandlerInstruments() map[string]chan int64 {
instruments := make(map[string]chan int64)
for _, key := range []string{"GetHosts", "GetLogs", "GetLogsExecutors"} {
instruments[key] = make(chan int64)
}
return instruments
}
func (app *Application) getHandlerInstrument(key string) chan int64 {
var instrument chan int64
app.RLock()
instrument = app.HandlerInstruments[key]
app.RUnlock()
return instrument
}
func (app *Application) mux() *chi.Mux {
generalAPILimiter := tollbooth.NewLimiter(int64(app.GeneralConfig.RateLimiters.GeneralAPI), time.Second)
signupLimiter := tollbooth.NewLimiter(int64(app.GeneralConfig.RateLimiters.PostSignup), time.Second)
useHTTPS := app.GeneralConfig.HTTPS.CertFile != "" && app.GeneralConfig.HTTPS.KeyFile != ""
CSRF := middlewares.CSRFMiddleware(useHTTPS, app.GeneralConfig.CookieSecret)
r := chi.NewRouter()
// Set middlewares which impact every request.
r.Use(middlewares.SetAddr(app.GeneralConfig.Addr))
r.Use(middlewares.SetVIPAddr(app.GeneralConfig.VIPAddr))
r.Use(middlewares.SetVIPProtocol(app.GeneralConfig.VIPProtocol))
r.Use(middlewares.SetDBs(app.DBConfig))
r.Use(middlewares.SetCookieStore(app.cookieStore))
r.Use(middlewares.SetMailers(app.Mailers))
r.Use(middlewares.SetMessageBus(app.MessageBus))
r.Use(middlewares.SetLogger("outLogger", app.OutLogger))
r.Use(middlewares.SetLogger("errLogger", app.ErrLogger))
r.Get("/signup", handlers.GetSignup)
r.Post("/signup", tollbooth.LimitFuncHandler(signupLimiter, handlers.PostSignup).(http.HandlerFunc))
r.Get("/login", handlers.GetLogin)
r.Post("/login", handlers.PostLogin)
r.Route("/", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", stopwatch.LatencyFuncHandler(app.getHandlerInstrument("GetHosts"), []string{"GET"}, handlers.GetHosts).(http.HandlerFunc))
})
r.Route("/saved-queries", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostSavedQueries)
r.Route("/:id", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteSavedQueriesID)
r.Delete("/", handlers.PostPutDeleteSavedQueriesID)
})
})
r.Route("/graphs", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", handlers.GetGraphs)
r.Post("/", handlers.PostGraphs)
r.Route("/:id", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", handlers.GetPostPutDeleteGraphsID)
r.Post("/", handlers.GetPostPutDeleteGraphsID)
r.Put("/", handlers.GetPostPutDeleteGraphsID)
r.Delete("/", handlers.GetPostPutDeleteGraphsID)
})
})
r.Route("/logs", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", stopwatch.LatencyFuncHandler(app.getHandlerInstrument("GetLogs"), []string{"GET"}, handlers.GetLogs).(http.HandlerFunc))
r.Get("/executors", stopwatch.LatencyFuncHandler(app.getHandlerInstrument("GetLogsExecutors"), []string{"GET"}, handlers.GetLogsExecutors).(http.HandlerFunc))
})
r.Route("/checks", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", handlers.GetChecks)
r.Post("/", handlers.PostChecks)
r.Route("/:checkID", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteCheckID)
r.Put("/", handlers.PostPutDeleteCheckID)
r.Delete("/", handlers.PostPutDeleteCheckID)
r.Post("/silence", handlers.PostCheckIDSilence)
r.Route("/triggers", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostChecksTriggers)
r.Route("/:triggerID", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteCheckTriggerID)
r.Put("/", handlers.PostPutDeleteCheckTriggerID)
r.Delete("/", handlers.PostPutDeleteCheckTriggerID)
})
})
})
})
r.Route("/users", func(r chi.Router) {
r.Route("/:id", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteUsersID)
r.Put("/", handlers.PostPutDeleteUsersID)
r.Delete("/", handlers.PostPutDeleteUsersID)
})
r.Get("/email-verification/:token", handlers.GetUsersEmailVerificationToken)
})
r.Route("/clusters", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Get("/", handlers.GetClusters)
r.Post("/", handlers.PostClusters)
r.Route("/:clusterID", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteClusterID)
r.Put("/", handlers.PostPutDeleteClusterID)
r.Delete("/", handlers.PostPutDeleteClusterID)
r.Route("/current", func(r chi.Router) {
r.Post("/", handlers.PostClusterIDCurrent)
})
r.Post("/access-tokens", handlers.PostAccessTokens)
r.Post("/users", handlers.PostPutDeleteClusterIDUsers)
r.Put("/users", handlers.PostPutDeleteClusterIDUsers)
r.Delete("/users", handlers.PostPutDeleteClusterIDUsers)
r.Route("/metrics", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostMetrics)
r.Route("/:metricID", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteMetricID)
r.Put("/", handlers.PostPutDeleteMetricID)
r.Delete("/", handlers.PostPutDeleteMetricID)
})
})
})
})
r.Route("/access-tokens/:id", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/level", handlers.PostAccessTokensLevel)
r.Post("/enabled", handlers.PostAccessTokensEnabled)
r.Post("/delete", handlers.PostAccessTokensDelete)
})
r.Route("/api", func(r chi.Router) {
r.Route("/hosts", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiHosts).(http.HandlerFunc))
r.Post("/", handlers.PostApiHosts)
})
r.Route("/graphs/:id", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Put("/metrics", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.PutApiGraphsIDMetrics).(http.HandlerFunc))
})
r.Route("/metrics", func(r chi.Router) {
r.Route("/streams", func(r chi.Router) {
r.Use(middlewares.MustLoginApiStream)
r.Handle("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.ApiMetricStreams))
})
r.Route("/:id/streams", func(r chi.Router) {
r.Use(middlewares.MustLoginApiStream)
r.Handle("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.ApiMetricIDStreams))
})
r.Route("/:id/hosts/:host/streams", func(r chi.Router) {
r.Use(middlewares.MustLoginApiStream)
r.Handle("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.ApiMetricIDStreams))
})
r.Route("/:id", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiTSMetrics).(http.HandlerFunc))
r.Get("/15min", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiTSMetrics15Min).(http.HandlerFunc))
r.Get("/hosts/:host", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiTSMetricsByHost).(http.HandlerFunc))
r.Get("/hosts/:host/15min", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiTSMetricsByHost15Min).(http.HandlerFunc))
})
})
r.Route("/events", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Post("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.PostApiEvents).(http.HandlerFunc))
r.Get("/line", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiEventsLine).(http.HandlerFunc))
r.Get("/band", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiEventsBand).(http.HandlerFunc))
r.Delete("/:id", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.DeleteApiEventsID).(http.HandlerFunc))
})
r.Route("/logs", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiLogs).(http.HandlerFunc))
r.Post("/", handlers.PostApiLogs)
r.Get("/executors", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiLogsExecutors).(http.HandlerFunc))
})
r.Route("/executors", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Post("/", handlers.PostApiExecutors)
})
r.Route("/checks/:id", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/results", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiCheckIDResults).(http.HandlerFunc))
})
r.Route("/metadata", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiMetadata).(http.HandlerFunc))
r.Route("/:key", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiMetadataKey).(http.HandlerFunc))
r.Post("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.PostApiMetadataKey).(http.HandlerFunc))
r.Delete("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.DeleteApiMetadataKey).(http.HandlerFunc))
})
})
})
// Path to /static files
workDir, _ := os.Getwd()
r.FileServer("/static", http.Dir(filepath.Join(workDir, "static")))
return r
}
// NewHTTPServer returns an instance of HTTP server.
func (app *Application) NewHTTPServer() (*graceful.Server, error) {
requestTimeout, err := time.ParseDuration(app.GeneralConfig.RequestShutdownTimeout)
if err != nil {
return nil, err
}
// Create HTTP server
srv := &graceful.Server{
Timeout: requestTimeout,
Server: &http.Server{Addr: app.GeneralConfig.Addr, Handler: app.mux()},
}
return srv, nil
}
Expose Mux as public method.
package application
import (
"net/http"
"os"
"path/filepath"
"time"
"github.com/didip/stopwatch"
"github.com/didip/tollbooth"
"github.com/pressly/chi"
"gopkg.in/tylerb/graceful.v1"
"github.com/resourced/resourced-master/handlers"
"github.com/resourced/resourced-master/middlewares"
)
func (app *Application) newHandlerInstruments() map[string]chan int64 {
instruments := make(map[string]chan int64)
for _, key := range []string{"GetHosts", "GetLogs", "GetLogsExecutors"} {
instruments[key] = make(chan int64)
}
return instruments
}
func (app *Application) getHandlerInstrument(key string) chan int64 {
var instrument chan int64
app.RLock()
instrument = app.HandlerInstruments[key]
app.RUnlock()
return instrument
}
// Mux routes HTTP requests to their appropriate handlers
func (app *Application) Mux() *chi.Mux {
generalAPILimiter := tollbooth.NewLimiter(int64(app.GeneralConfig.RateLimiters.GeneralAPI), time.Second)
signupLimiter := tollbooth.NewLimiter(int64(app.GeneralConfig.RateLimiters.PostSignup), time.Second)
useHTTPS := app.GeneralConfig.HTTPS.CertFile != "" && app.GeneralConfig.HTTPS.KeyFile != ""
CSRF := middlewares.CSRFMiddleware(useHTTPS, app.GeneralConfig.CookieSecret)
r := chi.NewRouter()
// Set middlewares which impact every request.
r.Use(middlewares.SetAddr(app.GeneralConfig.Addr))
r.Use(middlewares.SetVIPAddr(app.GeneralConfig.VIPAddr))
r.Use(middlewares.SetVIPProtocol(app.GeneralConfig.VIPProtocol))
r.Use(middlewares.SetDBs(app.DBConfig))
r.Use(middlewares.SetCookieStore(app.cookieStore))
r.Use(middlewares.SetMailers(app.Mailers))
r.Use(middlewares.SetMessageBus(app.MessageBus))
r.Use(middlewares.SetLogger("outLogger", app.OutLogger))
r.Use(middlewares.SetLogger("errLogger", app.ErrLogger))
r.Get("/signup", handlers.GetSignup)
r.Post("/signup", tollbooth.LimitFuncHandler(signupLimiter, handlers.PostSignup).(http.HandlerFunc))
r.Get("/login", handlers.GetLogin)
r.Post("/login", handlers.PostLogin)
r.Route("/", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", stopwatch.LatencyFuncHandler(app.getHandlerInstrument("GetHosts"), []string{"GET"}, handlers.GetHosts).(http.HandlerFunc))
})
r.Route("/saved-queries", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostSavedQueries)
r.Route("/:id", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteSavedQueriesID)
r.Delete("/", handlers.PostPutDeleteSavedQueriesID)
})
})
r.Route("/graphs", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", handlers.GetGraphs)
r.Post("/", handlers.PostGraphs)
r.Route("/:id", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", handlers.GetPostPutDeleteGraphsID)
r.Post("/", handlers.GetPostPutDeleteGraphsID)
r.Put("/", handlers.GetPostPutDeleteGraphsID)
r.Delete("/", handlers.GetPostPutDeleteGraphsID)
})
})
r.Route("/logs", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", stopwatch.LatencyFuncHandler(app.getHandlerInstrument("GetLogs"), []string{"GET"}, handlers.GetLogs).(http.HandlerFunc))
r.Get("/executors", stopwatch.LatencyFuncHandler(app.getHandlerInstrument("GetLogsExecutors"), []string{"GET"}, handlers.GetLogsExecutors).(http.HandlerFunc))
})
r.Route("/checks", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember, middlewares.SetAccessTokens)
r.Get("/", handlers.GetChecks)
r.Post("/", handlers.PostChecks)
r.Route("/:checkID", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteCheckID)
r.Put("/", handlers.PostPutDeleteCheckID)
r.Delete("/", handlers.PostPutDeleteCheckID)
r.Post("/silence", handlers.PostCheckIDSilence)
r.Route("/triggers", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostChecksTriggers)
r.Route("/:triggerID", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteCheckTriggerID)
r.Put("/", handlers.PostPutDeleteCheckTriggerID)
r.Delete("/", handlers.PostPutDeleteCheckTriggerID)
})
})
})
})
r.Route("/users", func(r chi.Router) {
r.Route("/:id", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteUsersID)
r.Put("/", handlers.PostPutDeleteUsersID)
r.Delete("/", handlers.PostPutDeleteUsersID)
})
r.Get("/email-verification/:token", handlers.GetUsersEmailVerificationToken)
})
r.Route("/clusters", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Get("/", handlers.GetClusters)
r.Post("/", handlers.PostClusters)
r.Route("/:clusterID", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteClusterID)
r.Put("/", handlers.PostPutDeleteClusterID)
r.Delete("/", handlers.PostPutDeleteClusterID)
r.Route("/current", func(r chi.Router) {
r.Post("/", handlers.PostClusterIDCurrent)
})
r.Post("/access-tokens", handlers.PostAccessTokens)
r.Post("/users", handlers.PostPutDeleteClusterIDUsers)
r.Put("/users", handlers.PostPutDeleteClusterIDUsers)
r.Delete("/users", handlers.PostPutDeleteClusterIDUsers)
r.Route("/metrics", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostMetrics)
r.Route("/:metricID", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/", handlers.PostPutDeleteMetricID)
r.Put("/", handlers.PostPutDeleteMetricID)
r.Delete("/", handlers.PostPutDeleteMetricID)
})
})
})
})
r.Route("/access-tokens/:id", func(r chi.Router) {
r.Use(CSRF, middlewares.MustLogin, middlewares.SetClusters, middlewares.MustBeMember)
r.Post("/level", handlers.PostAccessTokensLevel)
r.Post("/enabled", handlers.PostAccessTokensEnabled)
r.Post("/delete", handlers.PostAccessTokensDelete)
})
r.Route("/api", func(r chi.Router) {
r.Route("/hosts", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiHosts).(http.HandlerFunc))
r.Post("/", handlers.PostApiHosts)
})
r.Route("/graphs/:id", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Put("/metrics", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.PutApiGraphsIDMetrics).(http.HandlerFunc))
})
r.Route("/metrics", func(r chi.Router) {
r.Route("/streams", func(r chi.Router) {
r.Use(middlewares.MustLoginApiStream)
r.Handle("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.ApiMetricStreams))
})
r.Route("/:id/streams", func(r chi.Router) {
r.Use(middlewares.MustLoginApiStream)
r.Handle("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.ApiMetricIDStreams))
})
r.Route("/:id/hosts/:host/streams", func(r chi.Router) {
r.Use(middlewares.MustLoginApiStream)
r.Handle("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.ApiMetricIDStreams))
})
r.Route("/:id", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiTSMetrics).(http.HandlerFunc))
r.Get("/15min", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiTSMetrics15Min).(http.HandlerFunc))
r.Get("/hosts/:host", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiTSMetricsByHost).(http.HandlerFunc))
r.Get("/hosts/:host/15min", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiTSMetricsByHost15Min).(http.HandlerFunc))
})
})
r.Route("/events", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Post("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.PostApiEvents).(http.HandlerFunc))
r.Get("/line", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiEventsLine).(http.HandlerFunc))
r.Get("/band", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiEventsBand).(http.HandlerFunc))
r.Delete("/:id", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.DeleteApiEventsID).(http.HandlerFunc))
})
r.Route("/logs", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiLogs).(http.HandlerFunc))
r.Post("/", handlers.PostApiLogs)
r.Get("/executors", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiLogsExecutors).(http.HandlerFunc))
})
r.Route("/executors", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Post("/", handlers.PostApiExecutors)
})
r.Route("/checks/:id", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/results", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiCheckIDResults).(http.HandlerFunc))
})
r.Route("/metadata", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiMetadata).(http.HandlerFunc))
r.Route("/:key", func(r chi.Router) {
r.Use(middlewares.MustLoginApi)
r.Get("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.GetApiMetadataKey).(http.HandlerFunc))
r.Post("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.PostApiMetadataKey).(http.HandlerFunc))
r.Delete("/", tollbooth.LimitFuncHandler(generalAPILimiter, handlers.DeleteApiMetadataKey).(http.HandlerFunc))
})
})
})
// Path to /static files
workDir, _ := os.Getwd()
r.FileServer("/static", http.Dir(filepath.Join(workDir, "static")))
return r
}
// NewHTTPServer returns an instance of HTTP server.
func (app *Application) NewHTTPServer() (*graceful.Server, error) {
requestTimeout, err := time.ParseDuration(app.GeneralConfig.RequestShutdownTimeout)
if err != nil {
return nil, err
}
// Create HTTP server
srv := &graceful.Server{
Timeout: requestTimeout,
Server: &http.Server{Addr: app.GeneralConfig.Addr, Handler: app.Mux()},
}
return srv, nil
}
|
package connector
import (
"fmt"
"net"
"os"
"os/exec"
"path"
"time"
"strings"
humanize "github.com/dustin/go-humanize"
"github.com/ellcrys/crypto"
"github.com/ellcrys/util"
cutil "github.com/ncodes/cocoon-util"
"github.com/ncodes/cocoon/core/config"
"github.com/ncodes/cocoon/core/connector/monitor"
docker "github.com/ncodes/go-dockerclient"
logging "github.com/op/go-logging"
)
var log = logging.MustGetLogger("connector")
var buildLog = logging.MustGetLogger("ccode.build")
var runLog = logging.MustGetLogger("ccode.run")
var configLog = logging.MustGetLogger("ccode.config")
var ccodeLog = logging.MustGetLogger("ccode")
var dckClient *docker.Client
func init() {
runLog.SetBackend(config.MessageOnlyBackend)
}
// Connector defines a structure for starting and managing a cocoon (coode)
type Connector struct {
waitCh chan bool
req *Request
connectorRPCAddr string
cocoonCodeRPCAddr string
languages []Language
container *docker.Container
containerRunning bool
monitor *monitor.Monitor
healthCheck *HealthChecker
}
// NewConnector creates a new connector
func NewConnector(req *Request, waitCh chan bool) *Connector {
return &Connector{
req: req,
waitCh: waitCh,
monitor: monitor.NewMonitor(),
}
}
// Launch starts a cocoon code
func (cn *Connector) Launch(connectorRPCAddr, cocoonCodeRPCAddr string) {
endpoint := "unix:///var/run/docker.sock"
client, err := docker.NewClient(endpoint)
if err != nil {
log.Errorf("failed to create docker client. Is dockerd running locally?. %s", err)
cn.Stop(true)
return
}
dckClient = client
cn.monitor.SetDockerClient(dckClient)
cn.healthCheck = NewHealthChecker("127.0.0.1"+cn.cocoonCodeRPCAddr, cn.cocoonUnresponsive)
// No need downloading, building and starting a cocoon code
// if DEV_COCOON_RPC_ADDR has been specified. This means a dev cocoon code
// is running at that address. Just start the connector's client.
if devCocoonCodeRPCAddr := os.Getenv("DEV_COCOON_RPC_ADDR"); len(devCocoonCodeRPCAddr) > 0 {
cn.cocoonCodeRPCAddr = devCocoonCodeRPCAddr
log.Infof("[Dev] Will interact with cocoon code at %s", devCocoonCodeRPCAddr)
cn.healthCheck.Start()
return
}
log.Info("Ready to install cocoon code")
log.Debugf("Found ccode url=%s and lang=%s", cn.req.URL, cn.req.Lang)
lang := cn.GetLanguage(cn.req.Lang)
if lang == nil {
log.Errorf("cocoon code language (%s) not supported", cn.req.Lang)
cn.Stop(true)
return
}
newContainer, err := cn.prepareContainer(cn.req, lang)
if err != nil {
log.Error(err.Error())
cn.Stop(true)
return
}
lang.SetRunEnv(map[string]string{
"COCOON_ID": cn.req.ID,
"CONNECTOR_RPC_ADDR": cn.connectorRPCAddr,
"COCOON_RPC_ADDR": cn.cocoonCodeRPCAddr, // cocoon code server will bind to the port of this address
"COCOON_LINK": cn.req.Link, // the cocoon code id to link to natively
})
go cn.monitor.Monitor()
if err = cn.run(newContainer, lang); err != nil {
log.Error(err.Error())
cn.Stop(true)
return
}
}
// cocoonUnresponsive is called when the cocoon code failed health check
func (cn *Connector) cocoonUnresponsive() {
log.Info("Cocoon code has failed health check. Stopping cocoon code.")
cn.Stop(true)
}
// SetAddrs sets the address of the connector and cocoon code RPC servers
func (cn *Connector) SetAddrs(connectorRPCAddr, cocoonCodeRPCAddr string) {
cn.connectorRPCAddr = connectorRPCAddr
cn.cocoonCodeRPCAddr = cocoonCodeRPCAddr
}
// GetRequest returns the current cocoon launch request
func (cn *Connector) GetRequest() *Request {
return cn.req
}
// GetCocoonCodeRPCAddr returns the RPC address of the cocoon code
func (cn *Connector) GetCocoonCodeRPCAddr() string {
return cn.cocoonCodeRPCAddr
}
// prepareContainer fetches the cocoon code source, creates a container,
// moves the source in to it, builds the source within the container (if required)
// and configures default firewall.
func (cn *Connector) prepareContainer(req *Request, lang Language) (*docker.Container, error) {
_, err := cn.fetchSource(req, lang)
if err != nil {
return nil, err
}
// ensure cocoon code isn't already launched on a container
c, err := cn.getContainer(req.ID)
if err != nil {
return nil, fmt.Errorf("failed to check whether cocoon code is already active. %s ", err.Error())
} else if c != nil {
return nil, fmt.Errorf("cocoon code already exists on a container")
}
newContainer, err := cn.createContainer(
req.ID,
lang,
nil)
if err != nil {
return nil, fmt.Errorf("failed to create new container to run cocoon code. %s ", err.Error())
}
cn.container = newContainer
cn.monitor.SetContainerID(cn.container.ID)
cn.HookToMonitor(req)
if lang.RequiresBuild() {
var buildParams map[string]interface{}
if len(req.BuildParams) > 0 {
req.BuildParams, err = crypto.FromBase64(req.BuildParams)
if err != nil {
return nil, fmt.Errorf("failed to decode build parameter. Expects a base 64 encoded string. %s", err)
}
if err = util.FromJSON([]byte(req.BuildParams), &buildParams); err != nil {
return nil, fmt.Errorf("failed to parse build parameter. Expects valid json string. %s", err)
}
}
if err = lang.SetBuildParams(buildParams); err != nil {
return nil, fmt.Errorf("failed to set and validate build parameter. %s", err)
}
err = cn.build(newContainer, lang)
if err != nil {
return nil, fmt.Errorf(err.Error())
}
} else {
log.Info("Cocoon code does not require a build processing. Skipped.")
}
if err = cn.configFirewall(newContainer, req); err != nil {
return nil, fmt.Errorf(err.Error())
}
return newContainer, nil
}
// HookToMonitor is where all listeners to the monitor
// are attached.
func (cn *Connector) HookToMonitor(req *Request) {
go func() {
for evt := range cn.monitor.GetEmitter().On("monitor.report") {
if cn.RestartIfDiskAllocExceeded(req, evt.Args[0].(monitor.Report).DiskUsage) {
break
}
}
}()
}
// RestartIfDiskAllocExceeded restarts the cocoon code is disk usages
// has exceeded its set limit.
func (cn *Connector) RestartIfDiskAllocExceeded(req *Request, curDiskSize int64) bool {
if curDiskSize > req.DiskLimit {
log.Errorf("cocoon code has used more than its allocated disk space (%s of %s)",
humanize.Bytes(uint64(curDiskSize)),
humanize.Bytes(uint64(req.DiskLimit)))
if err := cn.restart(); err != nil {
log.Error(err.Error())
return false
}
return true
}
return false
}
// Stop closes the client, stops the container if it is still running
// and deletes the container. This will effectively bring the launcher
// to a halt. Set failed parameter to true to set a positve exit code or
// false for 0 exit code.
func (cn *Connector) Stop(failed bool) error {
defer func() {
cn.waitCh <- failed
}()
if dckClient == nil || cn.container == nil {
return nil
}
if cn.monitor != nil {
cn.monitor.Stop()
}
cn.containerRunning = false
err := dckClient.RemoveContainer(docker.RemoveContainerOptions{
ID: cn.container.ID,
RemoveVolumes: true,
Force: true,
})
if err != nil {
return fmt.Errorf("failed to remove container. %s", err)
}
return nil
}
// restart restarts the cocoon code. The running cocoon code is stopped
// and relaunched.
func (cn *Connector) restart() error {
if dckClient == nil || cn.container == nil {
return nil
}
if cn.monitor != nil {
cn.monitor.Reset()
}
log.Info("Restarting cocoon code")
cn.containerRunning = false
err := dckClient.RemoveContainer(docker.RemoveContainerOptions{
ID: cn.container.ID,
RemoveVolumes: true,
Force: true,
})
if err != nil {
return fmt.Errorf("failed to remove container. %s", err)
}
newContainer, err := cn.prepareContainer(cn.req, cn.GetLanguage(cn.req.Lang))
if err != nil {
return fmt.Errorf("restart: %s", err)
}
go cn.monitor.Monitor()
go func() {
if err = cn.run(newContainer, cn.GetLanguage(cn.req.Lang)); err != nil {
log.Info(fmt.Errorf("restart: %s", err))
}
}()
return nil
}
// AddLanguage adds a new langauge to the launcher.
// Will return error if language is already added
func (cn *Connector) AddLanguage(lang Language) error {
if cn.GetLanguage(lang.GetName()) != nil {
return fmt.Errorf("language already exist")
}
cn.languages = append(cn.languages, lang)
return nil
}
// GetLanguage will return a langauges or nil if not found
func (cn *Connector) GetLanguage(name string) Language {
for _, l := range cn.languages {
if l.GetName() == name {
return l
}
}
return nil
}
// GetLanguages returns all languages added to the launcher
func (cn *Connector) GetLanguages() []Language {
return cn.languages
}
// fetchSource fetches the cocoon code source from
// a remote address
func (cn *Connector) fetchSource(req *Request, lang Language) (string, error) {
if !cutil.IsGithubRepoURL(req.URL) {
return "", fmt.Errorf("only public source code hosted on github is supported") // TODO: support zip files
}
return cn.fetchFromGit(req, lang)
}
// findLaunch looks for a previous stored launch/Redeployment by id
// TODO: needs implementation
func (cn *Connector) findLaunch(id string) interface{} {
return nil
}
// fetchFromGit fetchs cocoon code from git repo.
// and returns the download directory.
func (cn *Connector) fetchFromGit(req *Request, lang Language) (string, error) {
var repoTarURL, downloadDst string
var err error
// checks if job was previously deployed. find a job by the job name.
if cn.findLaunch(req.ID) != nil {
return "", fmt.Errorf("cocoon code was previously launched") // TODO: fetch last launch tag and use it
}
repoTarURL, err = cutil.GetGithubRepoRelease(req.URL, req.Tag)
if err != nil {
return "", fmt.Errorf("Failed to fetch release from github repo. %s", err)
}
// set tag to latest if not provided
tagStr := req.Tag
if tagStr == "" {
tagStr = "latest"
}
// determine download directory
downloadDst = lang.GetDownloadDestination()
// delete download directory if it exists
if _, err := os.Stat(downloadDst); err == nil {
log.Info("Download destination is not empty. Deleting content")
if err = os.RemoveAll(downloadDst); err != nil {
return "", fmt.Errorf("failed to delete contents of download directory")
}
log.Info("Download directory has been deleted")
}
// create the download directory
if err = os.MkdirAll(downloadDst, os.ModePerm); err != nil {
return "", fmt.Errorf("Failed to create download directory. %s", err)
}
log.Infof("Downloading cocoon repository with tag=%s, dst=%s", tagStr, downloadDst)
filePath := path.Join(downloadDst, fmt.Sprintf("%s.tar.gz", req.ID))
err = cutil.DownloadFile(repoTarURL, filePath, func(buf []byte) {})
if err != nil {
return "", err
}
log.Info("Successfully downloaded cocoon code")
log.Debugf("Unpacking cocoon code to %s", filePath)
// unpack tarball
cmd := "tar"
args := []string{"-xf", filePath, "-C", downloadDst, "--strip-components", "1"}
if err = exec.Command(cmd, args...).Run(); err != nil {
return "", fmt.Errorf("Failed to unpack cocoon code repo tarball. %s", err)
}
log.Infof("Successfully unpacked cocoon code to %s", downloadDst)
os.Remove(filePath)
log.Info("Deleted the cocoon code tarball")
return downloadDst, nil
}
// getContainer returns a container with a
// matching name or nil if not found.
func (cn *Connector) getContainer(name string) (*docker.APIContainers, error) {
apiContainers, err := dckClient.ListContainers(docker.ListContainersOptions{All: true})
if err != nil {
return nil, err
}
for _, c := range apiContainers {
if util.InStringSlice(c.Names, "/"+name) {
return &c, nil
}
}
return nil, nil
}
// createContainer creates a brand new container,
// and copies the cocoon source code to it.
func (cn *Connector) createContainer(name string, lang Language, env []string) (*docker.Container, error) {
_, cocoonCodePort, _ := net.SplitHostPort(cn.cocoonCodeRPCAddr)
container, err := dckClient.CreateContainer(docker.CreateContainerOptions{
Name: name,
Config: &docker.Config{
Image: lang.GetImage(),
Labels: map[string]string{"name": name, "type": "cocoon_code"},
WorkingDir: lang.GetSourceRootDir(),
Tty: true,
ExposedPorts: map[docker.Port]struct{}{
docker.Port(fmt.Sprintf("%s/tcp", cocoonCodePort)): struct{}{},
},
Cmd: []string{"bash"},
Env: env,
},
HostConfig: &docker.HostConfig{
PortBindings: map[docker.Port][]docker.PortBinding{
docker.Port(fmt.Sprintf("%s/tcp", cocoonCodePort)): []docker.PortBinding{
docker.PortBinding{HostIP: "127.0.0.1", HostPort: cocoonCodePort},
},
},
},
})
if err != nil {
return nil, err
}
// copy source directory to the container's source directory
cmd := "docker"
args := []string{"cp", lang.GetDownloadDestination(), fmt.Sprintf("%s:%s", container.ID, lang.GetCopyDestination())}
if err = exec.Command(cmd, args...).Run(); err != nil {
return nil, fmt.Errorf("failed to copy cocoon code source to cocoon. %s", err)
}
log.Info("Copied cocoon code source to cocoon")
return container, nil
}
// stopContainer stop container. Kill it if it doesn't
// end after 5 seconds.
func (cn *Connector) stopContainer(id string) error {
if err := dckClient.StopContainer(id, uint((5 * time.Second).Seconds())); err != nil {
return err
}
cn.containerRunning = false
return nil
}
// Executes is a general purpose function
// to execute a command in a running container. If container is not running, it starts it.
// It accepts the container, a unique name for the execution
// and a callback function that is passed a lifecycle status and a value.
// If priviledged is set to true, command will attain root powers.
// Supported statuses are before (before command is executed), after (after command is executed)
// and end (when command exits).
func (cn *Connector) execInContainer(container *docker.Container, name string, command []string, priviledged bool, logger *logging.Logger, cb func(string, interface{}) error) error {
containerStatus, err := dckClient.InspectContainer(container.ID)
if err != nil {
return fmt.Errorf("failed to inspect container before executing command [%s]. %s", name, err)
}
if !containerStatus.State.Running {
err := dckClient.StartContainer(container.ID, nil)
if err != nil {
return fmt.Errorf("failed start container for exec [%s]. %s", name, err.Error())
}
cn.containerRunning = true
}
exec, err := dckClient.CreateExec(docker.CreateExecOptions{
Container: container.ID,
AttachStderr: true,
AttachStdout: true,
Cmd: command,
Privileged: priviledged,
})
if err != nil {
return fmt.Errorf("failed to create exec [%s] object. %s", name, err)
}
if err = cb("before", nil); err != nil {
return err
}
outStream := NewLogStreamer()
outStream.SetLogger(logger)
go func() {
err = dckClient.StartExec(exec.ID, docker.StartExecOptions{
OutputStream: outStream.GetWriter(),
ErrorStream: outStream.GetWriter(),
})
if err != nil {
log.Infof("failed to start exec [%s] command. %s", name, err)
}
}()
go func() {
err := outStream.Start()
if err != nil {
log.Errorf("failed to start exec [%s] output stream logger. %s", name, err)
}
}()
execExitCode := 0
time.Sleep(1 * time.Second)
if err = cb("after", nil); err != nil {
outStream.Stop()
return err
}
for cn.containerRunning {
execIns, err := dckClient.InspectExec(exec.ID)
if err != nil {
outStream.Stop()
return err
}
if execIns.Running {
time.Sleep(500 * time.Millisecond)
continue
}
execExitCode = execIns.ExitCode
break
}
outStream.Stop()
if err = cb("end", execExitCode); err != nil {
return err
}
if execExitCode != 0 {
return fmt.Errorf("Exec [%s] exited with code=%d", name, execExitCode)
}
return nil
}
// build starts up the container and builds the cocoon code
// according to the build script provided by the languaged.
func (cn *Connector) build(container *docker.Container, lang Language) error {
cmd := []string{"bash", "-c", lang.GetBuildScript()}
return cn.execInContainer(container, "BUILD", cmd, false, buildLog, func(state string, val interface{}) error {
switch state {
case "before":
log.Info("Building cocoon code...")
case "end":
if val.(int) == 0 {
log.Info("Build succeeded!")
} else {
return fmt.Errorf("Build has failed with exit code=%d", val.(int))
}
}
return nil
})
}
// Run the cocoon code. First it gets the IP address of the container and sets
// the language environment.
func (cn *Connector) run(container *docker.Container, lang Language) error {
return cn.execInContainer(container, "RUN", lang.GetRunScript(), false, runLog, func(state string, val interface{}) error {
switch state {
case "before":
log.Info("Starting cocoon code")
case "after":
cn.healthCheck.Start()
return nil
case "end":
if val.(int) == 0 {
log.Info("Cocoon code successfully stop")
return nil
}
}
return nil
})
}
// getDefaultFirewall returns the default firewall rules
// for a cocoon container.
func (cn *Connector) getDefaultFirewall() string {
_, cocoonCodeRPCPort, _ := net.SplitHostPort(cn.cocoonCodeRPCAddr)
connectorRPCIP, connectorRPCPort, _ := net.SplitHostPort(cn.connectorRPCAddr)
return strings.TrimSpace(`iptables -F &&
iptables -P INPUT DROP &&
iptables -P FORWARD DROP &&
iptables -P OUTPUT DROP &&
iptables -A OUTPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT &&
iptables -A OUTPUT -p tcp -d ` + connectorRPCIP + ` --dport ` + connectorRPCPort + ` -j ACCEPT
iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT &&
iptables -A INPUT -p tcp --sport ` + cocoonCodeRPCPort + ` -j ACCEPT
dnsIPs="$(cat /etc/resolv.conf | grep 'nameserver' | cut -c12-)" &&
for ip in $dnsIPs;
do
iptables -A OUTPUT -m state --state NEW,ESTABLISHED -d ${ip} -p udp --dport 53 -j ACCEPT;
iptables -A OUTPUT -m state --state ESTABLISHED -p udp -s ${ip} --sport 53 -j ACCEPT;
iptables -A OUTPUT -m state --state NEW,ESTABLISHED -d ${ip} -p tcp --dport 53 -j ACCEPT;
iptables -A OUTPUT -m state --state ESTABLISHED -p tcp -s ${ip} --sport 53 -j ACCEPT;
done`)
}
// configFirewall configures the container firewall.
func (cn *Connector) configFirewall(container *docker.Container, req *Request) error {
cmd := []string{"bash", "-c", cn.getDefaultFirewall()}
return cn.execInContainer(container, "CONFIG-FIREWALL", cmd, true, configLog, func(state string, val interface{}) error {
switch state {
case "before":
log.Info("Configuring firewall for cocoon")
case "end":
if val.(int) == 0 {
log.Info("Firewall configured for cocoon")
}
}
return nil
})
}
stops health checker
package connector
import (
"fmt"
"net"
"os"
"os/exec"
"path"
"time"
"strings"
humanize "github.com/dustin/go-humanize"
"github.com/ellcrys/crypto"
"github.com/ellcrys/util"
cutil "github.com/ncodes/cocoon-util"
"github.com/ncodes/cocoon/core/config"
"github.com/ncodes/cocoon/core/connector/monitor"
docker "github.com/ncodes/go-dockerclient"
logging "github.com/op/go-logging"
)
var log = logging.MustGetLogger("connector")
var buildLog = logging.MustGetLogger("ccode.build")
var runLog = logging.MustGetLogger("ccode.run")
var configLog = logging.MustGetLogger("ccode.config")
var ccodeLog = logging.MustGetLogger("ccode")
var dckClient *docker.Client
func init() {
runLog.SetBackend(config.MessageOnlyBackend)
}
// Connector defines a structure for starting and managing a cocoon (coode)
type Connector struct {
waitCh chan bool
req *Request
connectorRPCAddr string
cocoonCodeRPCAddr string
languages []Language
container *docker.Container
containerRunning bool
monitor *monitor.Monitor
healthCheck *HealthChecker
}
// NewConnector creates a new connector
func NewConnector(req *Request, waitCh chan bool) *Connector {
return &Connector{
req: req,
waitCh: waitCh,
monitor: monitor.NewMonitor(),
}
}
// Launch starts a cocoon code
func (cn *Connector) Launch(connectorRPCAddr, cocoonCodeRPCAddr string) {
endpoint := "unix:///var/run/docker.sock"
client, err := docker.NewClient(endpoint)
if err != nil {
log.Errorf("failed to create docker client. Is dockerd running locally?. %s", err)
cn.Stop(true)
return
}
dckClient = client
cn.monitor.SetDockerClient(dckClient)
cn.healthCheck = NewHealthChecker("127.0.0.1"+cn.cocoonCodeRPCAddr, cn.cocoonUnresponsive)
// No need downloading, building and starting a cocoon code
// if DEV_COCOON_RPC_ADDR has been specified. This means a dev cocoon code
// is running at that address. Just start the connector's client.
if devCocoonCodeRPCAddr := os.Getenv("DEV_COCOON_RPC_ADDR"); len(devCocoonCodeRPCAddr) > 0 {
cn.cocoonCodeRPCAddr = devCocoonCodeRPCAddr
log.Infof("[Dev] Will interact with cocoon code at %s", devCocoonCodeRPCAddr)
cn.healthCheck.Start()
return
}
log.Info("Ready to install cocoon code")
log.Debugf("Found ccode url=%s and lang=%s", cn.req.URL, cn.req.Lang)
lang := cn.GetLanguage(cn.req.Lang)
if lang == nil {
log.Errorf("cocoon code language (%s) not supported", cn.req.Lang)
cn.Stop(true)
return
}
newContainer, err := cn.prepareContainer(cn.req, lang)
if err != nil {
log.Error(err.Error())
cn.Stop(true)
return
}
lang.SetRunEnv(map[string]string{
"COCOON_ID": cn.req.ID,
"CONNECTOR_RPC_ADDR": cn.connectorRPCAddr,
"COCOON_RPC_ADDR": cn.cocoonCodeRPCAddr, // cocoon code server will bind to the port of this address
"COCOON_LINK": cn.req.Link, // the cocoon code id to link to natively
})
go cn.monitor.Monitor()
if err = cn.run(newContainer, lang); err != nil {
log.Error(err.Error())
cn.Stop(true)
return
}
}
// cocoonUnresponsive is called when the cocoon code failed health check
func (cn *Connector) cocoonUnresponsive() {
log.Info("Cocoon code has failed health check. Stopping cocoon code.")
cn.Stop(true)
}
// SetAddrs sets the address of the connector and cocoon code RPC servers
func (cn *Connector) SetAddrs(connectorRPCAddr, cocoonCodeRPCAddr string) {
cn.connectorRPCAddr = connectorRPCAddr
cn.cocoonCodeRPCAddr = cocoonCodeRPCAddr
}
// GetRequest returns the current cocoon launch request
func (cn *Connector) GetRequest() *Request {
return cn.req
}
// GetCocoonCodeRPCAddr returns the RPC address of the cocoon code
func (cn *Connector) GetCocoonCodeRPCAddr() string {
return cn.cocoonCodeRPCAddr
}
// prepareContainer fetches the cocoon code source, creates a container,
// moves the source in to it, builds the source within the container (if required)
// and configures default firewall.
func (cn *Connector) prepareContainer(req *Request, lang Language) (*docker.Container, error) {
_, err := cn.fetchSource(req, lang)
if err != nil {
return nil, err
}
// ensure cocoon code isn't already launched on a container
c, err := cn.getContainer(req.ID)
if err != nil {
return nil, fmt.Errorf("failed to check whether cocoon code is already active. %s ", err.Error())
} else if c != nil {
return nil, fmt.Errorf("cocoon code already exists on a container")
}
newContainer, err := cn.createContainer(
req.ID,
lang,
nil)
if err != nil {
return nil, fmt.Errorf("failed to create new container to run cocoon code. %s ", err.Error())
}
cn.container = newContainer
cn.monitor.SetContainerID(cn.container.ID)
cn.HookToMonitor(req)
if lang.RequiresBuild() {
var buildParams map[string]interface{}
if len(req.BuildParams) > 0 {
req.BuildParams, err = crypto.FromBase64(req.BuildParams)
if err != nil {
return nil, fmt.Errorf("failed to decode build parameter. Expects a base 64 encoded string. %s", err)
}
if err = util.FromJSON([]byte(req.BuildParams), &buildParams); err != nil {
return nil, fmt.Errorf("failed to parse build parameter. Expects valid json string. %s", err)
}
}
if err = lang.SetBuildParams(buildParams); err != nil {
return nil, fmt.Errorf("failed to set and validate build parameter. %s", err)
}
err = cn.build(newContainer, lang)
if err != nil {
return nil, fmt.Errorf(err.Error())
}
} else {
log.Info("Cocoon code does not require a build processing. Skipped.")
}
if err = cn.configFirewall(newContainer, req); err != nil {
return nil, fmt.Errorf(err.Error())
}
return newContainer, nil
}
// HookToMonitor is where all listeners to the monitor
// are attached.
func (cn *Connector) HookToMonitor(req *Request) {
go func() {
for evt := range cn.monitor.GetEmitter().On("monitor.report") {
if cn.RestartIfDiskAllocExceeded(req, evt.Args[0].(monitor.Report).DiskUsage) {
break
}
}
}()
}
// RestartIfDiskAllocExceeded restarts the cocoon code is disk usages
// has exceeded its set limit.
func (cn *Connector) RestartIfDiskAllocExceeded(req *Request, curDiskSize int64) bool {
if curDiskSize > req.DiskLimit {
log.Errorf("cocoon code has used more than its allocated disk space (%s of %s)",
humanize.Bytes(uint64(curDiskSize)),
humanize.Bytes(uint64(req.DiskLimit)))
if err := cn.restart(); err != nil {
log.Error(err.Error())
return false
}
return true
}
return false
}
// Stop closes the client, stops the container if it is still running
// and deletes the container. This will effectively bring the launcher
// to a halt. Set failed parameter to true to set a positve exit code or
// false for 0 exit code.
func (cn *Connector) Stop(failed bool) error {
defer func() {
cn.waitCh <- failed
}()
if dckClient == nil || cn.container == nil {
return nil
}
if cn.monitor != nil {
cn.monitor.Stop()
}
if cn.healthCheck != nil {
cn.healthCheck.Stop()
}
cn.containerRunning = false
err := dckClient.RemoveContainer(docker.RemoveContainerOptions{
ID: cn.container.ID,
RemoveVolumes: true,
Force: true,
})
if err != nil {
return fmt.Errorf("failed to remove container. %s", err)
}
return nil
}
// restart restarts the cocoon code. The running cocoon code is stopped
// and relaunched.
func (cn *Connector) restart() error {
if dckClient == nil || cn.container == nil {
return nil
}
if cn.monitor != nil {
cn.monitor.Reset()
}
log.Info("Restarting cocoon code")
cn.containerRunning = false
err := dckClient.RemoveContainer(docker.RemoveContainerOptions{
ID: cn.container.ID,
RemoveVolumes: true,
Force: true,
})
if err != nil {
return fmt.Errorf("failed to remove container. %s", err)
}
newContainer, err := cn.prepareContainer(cn.req, cn.GetLanguage(cn.req.Lang))
if err != nil {
return fmt.Errorf("restart: %s", err)
}
go cn.monitor.Monitor()
go func() {
if err = cn.run(newContainer, cn.GetLanguage(cn.req.Lang)); err != nil {
log.Info(fmt.Errorf("restart: %s", err))
}
}()
return nil
}
// AddLanguage adds a new langauge to the launcher.
// Will return error if language is already added
func (cn *Connector) AddLanguage(lang Language) error {
if cn.GetLanguage(lang.GetName()) != nil {
return fmt.Errorf("language already exist")
}
cn.languages = append(cn.languages, lang)
return nil
}
// GetLanguage will return a langauges or nil if not found
func (cn *Connector) GetLanguage(name string) Language {
for _, l := range cn.languages {
if l.GetName() == name {
return l
}
}
return nil
}
// GetLanguages returns all languages added to the launcher
func (cn *Connector) GetLanguages() []Language {
return cn.languages
}
// fetchSource fetches the cocoon code source from
// a remote address
func (cn *Connector) fetchSource(req *Request, lang Language) (string, error) {
if !cutil.IsGithubRepoURL(req.URL) {
return "", fmt.Errorf("only public source code hosted on github is supported") // TODO: support zip files
}
return cn.fetchFromGit(req, lang)
}
// findLaunch looks for a previous stored launch/Redeployment by id
// TODO: needs implementation
func (cn *Connector) findLaunch(id string) interface{} {
return nil
}
// fetchFromGit fetchs cocoon code from git repo.
// and returns the download directory.
func (cn *Connector) fetchFromGit(req *Request, lang Language) (string, error) {
var repoTarURL, downloadDst string
var err error
// checks if job was previously deployed. find a job by the job name.
if cn.findLaunch(req.ID) != nil {
return "", fmt.Errorf("cocoon code was previously launched") // TODO: fetch last launch tag and use it
}
repoTarURL, err = cutil.GetGithubRepoRelease(req.URL, req.Tag)
if err != nil {
return "", fmt.Errorf("Failed to fetch release from github repo. %s", err)
}
// set tag to latest if not provided
tagStr := req.Tag
if tagStr == "" {
tagStr = "latest"
}
// determine download directory
downloadDst = lang.GetDownloadDestination()
// delete download directory if it exists
if _, err := os.Stat(downloadDst); err == nil {
log.Info("Download destination is not empty. Deleting content")
if err = os.RemoveAll(downloadDst); err != nil {
return "", fmt.Errorf("failed to delete contents of download directory")
}
log.Info("Download directory has been deleted")
}
// create the download directory
if err = os.MkdirAll(downloadDst, os.ModePerm); err != nil {
return "", fmt.Errorf("Failed to create download directory. %s", err)
}
log.Infof("Downloading cocoon repository with tag=%s, dst=%s", tagStr, downloadDst)
filePath := path.Join(downloadDst, fmt.Sprintf("%s.tar.gz", req.ID))
err = cutil.DownloadFile(repoTarURL, filePath, func(buf []byte) {})
if err != nil {
return "", err
}
log.Info("Successfully downloaded cocoon code")
log.Debugf("Unpacking cocoon code to %s", filePath)
// unpack tarball
cmd := "tar"
args := []string{"-xf", filePath, "-C", downloadDst, "--strip-components", "1"}
if err = exec.Command(cmd, args...).Run(); err != nil {
return "", fmt.Errorf("Failed to unpack cocoon code repo tarball. %s", err)
}
log.Infof("Successfully unpacked cocoon code to %s", downloadDst)
os.Remove(filePath)
log.Info("Deleted the cocoon code tarball")
return downloadDst, nil
}
// getContainer returns a container with a
// matching name or nil if not found.
func (cn *Connector) getContainer(name string) (*docker.APIContainers, error) {
apiContainers, err := dckClient.ListContainers(docker.ListContainersOptions{All: true})
if err != nil {
return nil, err
}
for _, c := range apiContainers {
if util.InStringSlice(c.Names, "/"+name) {
return &c, nil
}
}
return nil, nil
}
// createContainer creates a brand new container,
// and copies the cocoon source code to it.
func (cn *Connector) createContainer(name string, lang Language, env []string) (*docker.Container, error) {
_, cocoonCodePort, _ := net.SplitHostPort(cn.cocoonCodeRPCAddr)
container, err := dckClient.CreateContainer(docker.CreateContainerOptions{
Name: name,
Config: &docker.Config{
Image: lang.GetImage(),
Labels: map[string]string{"name": name, "type": "cocoon_code"},
WorkingDir: lang.GetSourceRootDir(),
Tty: true,
ExposedPorts: map[docker.Port]struct{}{
docker.Port(fmt.Sprintf("%s/tcp", cocoonCodePort)): struct{}{},
},
Cmd: []string{"bash"},
Env: env,
},
HostConfig: &docker.HostConfig{
PortBindings: map[docker.Port][]docker.PortBinding{
docker.Port(fmt.Sprintf("%s/tcp", cocoonCodePort)): []docker.PortBinding{
docker.PortBinding{HostIP: "127.0.0.1", HostPort: cocoonCodePort},
},
},
},
})
if err != nil {
return nil, err
}
// copy source directory to the container's source directory
cmd := "docker"
args := []string{"cp", lang.GetDownloadDestination(), fmt.Sprintf("%s:%s", container.ID, lang.GetCopyDestination())}
if err = exec.Command(cmd, args...).Run(); err != nil {
return nil, fmt.Errorf("failed to copy cocoon code source to cocoon. %s", err)
}
log.Info("Copied cocoon code source to cocoon")
return container, nil
}
// stopContainer stop container. Kill it if it doesn't
// end after 5 seconds.
func (cn *Connector) stopContainer(id string) error {
if err := dckClient.StopContainer(id, uint((5 * time.Second).Seconds())); err != nil {
return err
}
cn.containerRunning = false
return nil
}
// Executes is a general purpose function
// to execute a command in a running container. If container is not running, it starts it.
// It accepts the container, a unique name for the execution
// and a callback function that is passed a lifecycle status and a value.
// If priviledged is set to true, command will attain root powers.
// Supported statuses are before (before command is executed), after (after command is executed)
// and end (when command exits).
func (cn *Connector) execInContainer(container *docker.Container, name string, command []string, priviledged bool, logger *logging.Logger, cb func(string, interface{}) error) error {
containerStatus, err := dckClient.InspectContainer(container.ID)
if err != nil {
return fmt.Errorf("failed to inspect container before executing command [%s]. %s", name, err)
}
if !containerStatus.State.Running {
err := dckClient.StartContainer(container.ID, nil)
if err != nil {
return fmt.Errorf("failed start container for exec [%s]. %s", name, err.Error())
}
cn.containerRunning = true
}
exec, err := dckClient.CreateExec(docker.CreateExecOptions{
Container: container.ID,
AttachStderr: true,
AttachStdout: true,
Cmd: command,
Privileged: priviledged,
})
if err != nil {
return fmt.Errorf("failed to create exec [%s] object. %s", name, err)
}
if err = cb("before", nil); err != nil {
return err
}
outStream := NewLogStreamer()
outStream.SetLogger(logger)
go func() {
err = dckClient.StartExec(exec.ID, docker.StartExecOptions{
OutputStream: outStream.GetWriter(),
ErrorStream: outStream.GetWriter(),
})
if err != nil {
log.Infof("failed to start exec [%s] command. %s", name, err)
}
}()
go func() {
err := outStream.Start()
if err != nil {
log.Errorf("failed to start exec [%s] output stream logger. %s", name, err)
}
}()
execExitCode := 0
time.Sleep(1 * time.Second)
if err = cb("after", nil); err != nil {
outStream.Stop()
return err
}
for cn.containerRunning {
execIns, err := dckClient.InspectExec(exec.ID)
if err != nil {
outStream.Stop()
return err
}
if execIns.Running {
time.Sleep(500 * time.Millisecond)
continue
}
execExitCode = execIns.ExitCode
break
}
outStream.Stop()
if err = cb("end", execExitCode); err != nil {
return err
}
if execExitCode != 0 {
return fmt.Errorf("Exec [%s] exited with code=%d", name, execExitCode)
}
return nil
}
// build starts up the container and builds the cocoon code
// according to the build script provided by the languaged.
func (cn *Connector) build(container *docker.Container, lang Language) error {
cmd := []string{"bash", "-c", lang.GetBuildScript()}
return cn.execInContainer(container, "BUILD", cmd, false, buildLog, func(state string, val interface{}) error {
switch state {
case "before":
log.Info("Building cocoon code...")
case "end":
if val.(int) == 0 {
log.Info("Build succeeded!")
} else {
return fmt.Errorf("Build has failed with exit code=%d", val.(int))
}
}
return nil
})
}
// Run the cocoon code. First it gets the IP address of the container and sets
// the language environment.
func (cn *Connector) run(container *docker.Container, lang Language) error {
return cn.execInContainer(container, "RUN", lang.GetRunScript(), false, runLog, func(state string, val interface{}) error {
switch state {
case "before":
log.Info("Starting cocoon code")
case "after":
cn.healthCheck.Start()
return nil
case "end":
if val.(int) == 0 {
log.Info("Cocoon code successfully stop")
return nil
}
}
return nil
})
}
// getDefaultFirewall returns the default firewall rules
// for a cocoon container.
func (cn *Connector) getDefaultFirewall() string {
_, cocoonCodeRPCPort, _ := net.SplitHostPort(cn.cocoonCodeRPCAddr)
connectorRPCIP, connectorRPCPort, _ := net.SplitHostPort(cn.connectorRPCAddr)
return strings.TrimSpace(`iptables -F &&
iptables -P INPUT DROP &&
iptables -P FORWARD DROP &&
iptables -P OUTPUT DROP &&
iptables -A OUTPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT &&
iptables -A OUTPUT -p tcp -d ` + connectorRPCIP + ` --dport ` + connectorRPCPort + ` -j ACCEPT
iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT &&
iptables -A INPUT -p tcp --sport ` + cocoonCodeRPCPort + ` -j ACCEPT
dnsIPs="$(cat /etc/resolv.conf | grep 'nameserver' | cut -c12-)" &&
for ip in $dnsIPs;
do
iptables -A OUTPUT -m state --state NEW,ESTABLISHED -d ${ip} -p udp --dport 53 -j ACCEPT;
iptables -A OUTPUT -m state --state ESTABLISHED -p udp -s ${ip} --sport 53 -j ACCEPT;
iptables -A OUTPUT -m state --state NEW,ESTABLISHED -d ${ip} -p tcp --dport 53 -j ACCEPT;
iptables -A OUTPUT -m state --state ESTABLISHED -p tcp -s ${ip} --sport 53 -j ACCEPT;
done`)
}
// configFirewall configures the container firewall.
func (cn *Connector) configFirewall(container *docker.Container, req *Request) error {
cmd := []string{"bash", "-c", cn.getDefaultFirewall()}
return cn.execInContainer(container, "CONFIG-FIREWALL", cmd, true, configLog, func(state string, val interface{}) error {
switch state {
case "before":
log.Info("Configuring firewall for cocoon")
case "end":
if val.(int) == 0 {
log.Info("Firewall configured for cocoon")
}
}
return nil
})
}
|
package eval
import (
"encoding/json"
"monkey/ast"
"regexp"
)
type RegEx struct {
RegExp *regexp.Regexp
Value string
}
func (re *RegEx) Inspect() string { return re.Value }
func (re *RegEx) Type() ObjectType { return REGEX_OBJ }
func (re *RegEx) CallMethod(line string, scope *Scope, method string, args ...Object) Object {
switch method {
case "match", "matchString":
return re.Match(line, args...)
case "replace", "replaceAllString":
return re.Replace(line, args...)
case "split":
return re.Split(line, args...)
case "findAllString":
return re.FindAllString(line, args...)
case "findAllStringIndex":
return re.FindAllStringIndex(line, args...)
case "findAllStringSubmatch":
return re.FindAllStringSubmatch(line, args...)
case "findAllStringSubmatchIndex":
return re.FindAllStringSubmatchIndex(line, args...)
case "findString":
return re.FindString(line, args...)
case "findStringIndex":
return re.FindStringIndex(line, args...)
case "findStringSubmatch":
return re.FindStringSubmatch(line, args...)
case "findStringSubmatchIndex":
return re.FindStringSubmatchIndex(line, args...)
case "numSubexp":
return re.NumSubexp(line, args...)
case "replaceAllLiteralString":
return re.ReplaceAllLiteralString(line, args...)
case "replaceAllStringFunc":
return re.ReplaceAllStringFunc(line, scope, args...)
case "string":
return re.String(line, args...)
case "subexpNames":
return re.SubexpNames(line, args...)
}
panic(NewError(line, NOMETHODERROR, method, re.Type()))
}
func (re *RegEx) Match(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
if args[0].Type() != STRING_OBJ {
panic(NewError(line, PARAMTYPEERROR, "first", "match", "*String", args[0].Type()))
}
str := args[0].(*String)
matched := re.RegExp.MatchString(str.String)
if matched {
return TRUE
}
return FALSE
}
func (re *RegEx) Replace(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
if args[0].Type() != STRING_OBJ {
panic(NewError(line, PARAMTYPEERROR, "first", "replace", "*String", args[0].Type()))
}
if args[1].Type() != STRING_OBJ {
panic(NewError(line, PARAMTYPEERROR, "second", "replace", "*String", args[1].Type()))
}
str := args[0].(*String)
repl := args[1].(*String)
result := re.RegExp.ReplaceAllString(str.String, repl.String)
return NewString(result)
}
func (re *RegEx) Split(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
if args[0].Type() != STRING_OBJ {
panic(NewError(line, PARAMTYPEERROR, "first", "split", "*String", args[0].Type()))
}
str := args[0].(*String)
splitResult := re.RegExp.Split(str.String, -1)
a := &Array{}
for i := 0; i < len(splitResult); i++ {
a.Members = append(a.Members, NewString(splitResult[i]))
}
return a
}
func (re *RegEx) FindAllString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllString", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllString", "*Integer", args[1].Type()))
}
ret := &Array{}
strArr := re.RegExp.FindAllString(strObj.String, int(intObj.Int64))
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (re *RegEx) FindAllStringIndex(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringIndex", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringIndex", "*Integer", args[1].Type()))
}
ret := &Array{}
intArr2D := re.RegExp.FindAllStringIndex(strObj.String, int(intObj.Int64))
for _, v1 := range intArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewInteger(int64(v2)))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (re *RegEx) FindAllStringSubmatch(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringSubmatch", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringSubmatch", "*Integer", args[1].Type()))
}
ret := &Array{}
strArr2D := re.RegExp.FindAllStringSubmatch(strObj.String, int(intObj.Int64))
for _, v1 := range strArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewString(v2))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (re *RegEx) FindAllStringSubmatchIndex(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringSubmatchIndex", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringSubmatchIndex", "*Integer", args[1].Type()))
}
ret := &Array{}
intArr2D := re.RegExp.FindAllStringSubmatchIndex(strObj.String, int(intObj.Int64))
for _, v1 := range intArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewInteger(int64(v2)))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (re *RegEx) FindString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findString", "*String", args[0].Type()))
}
ret := re.RegExp.FindString(strObj.String)
return NewString(ret)
}
func (re *RegEx) FindStringIndex(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringIndex", "*String", args[0].Type()))
}
ret := &Array{}
intArr := re.RegExp.FindStringIndex(strObj.String)
for _, v := range intArr {
ret.Members = append(ret.Members, NewInteger(int64(v)))
}
return ret
}
func (re *RegEx) FindStringSubmatch(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringSubmatch", "*String", args[0].Type()))
}
ret := &Array{}
strArr := re.RegExp.FindStringSubmatch(strObj.String)
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (re *RegEx) FindStringSubmatchIndex(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringSubmatchIndex", "*String", args[0].Type()))
}
ret := &Array{}
intArr := re.RegExp.FindStringSubmatchIndex(strObj.String)
for _, v := range intArr {
ret.Members = append(ret.Members, NewInteger(int64(v)))
}
return ret
}
func (re *RegEx) NumSubexp(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
i := re.RegExp.NumSubexp()
return NewInteger(int64(i))
}
func (re *RegEx) ReplaceAllLiteralString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllLiteralString", "*String", args[0].Type()))
}
replObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllLiteralString", "*String", args[1].Type()))
}
s := re.RegExp.ReplaceAllLiteralString(srcObj.String, replObj.String)
return NewString(s)
}
func (re *RegEx) ReplaceAllStringFunc(line string, scope *Scope, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllStringFunc", "*String", args[0].Type()))
}
block, ok := args[1].(*Function)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllStringFunc", "*Function", args[1].Type()))
}
paramCount := len(block.Literal.Parameters)
if paramCount != 1 {
panic(NewError(line, FUNCCALLBACKERROR, 1, paramCount))
}
ret := re.RegExp.ReplaceAllStringFunc(srcObj.String, func(str string) string {
return replFunc(scope, block, str)
})
return NewString(ret)
}
func (re *RegEx) String(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
s := re.RegExp.String()
return NewString(s)
}
func (re *RegEx) SubexpNames(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
ret := &Array{}
strArr := re.RegExp.SubexpNames()
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
//MarshalJSON turns regex into string/json
func (re *RegEx) MarshalJSON() ([]byte, error) {
return json.Marshal(re.RegExp.String())
}
//UnmarshalJSON turns a string into proper regex
func (re *RegEx) UnmarshalJSON(b []byte) error {
str := new(string)
json.Unmarshal(b, str)
reg, err := regexp.Compile(*str)
if err != nil {
return err
}
re.RegExp = reg
return nil
}
/* REGEXP OBJECT */
const (
REGEXP_OBJ = "REGEXP_OBJ"
regexp_name = "regexp"
)
type RegExpObj struct{
RegExp *regexp.Regexp
}
func NewRegExpObj() Object {
ret := &RegExpObj{}
SetGlobalObj(regexp_name, ret)
return ret
}
func (rex *RegExpObj) Inspect() string {
if rex.RegExp == nil {
return "Invalid RegExpObj!"
}
return rex.RegExp.String()
}
func (rex *RegExpObj) Type() ObjectType { return REGEXP_OBJ }
func (rex *RegExpObj) CallMethod(line string, scope *Scope, method string, args ...Object) Object {
switch method {
case "compile":
return rex.Compile(line, args...)
case "compilePOSIX":
return rex.CompilePOSIX(line, args...)
case "mustCompile":
return rex.MustCompile(line, args...)
case "mustCompilePOSIX":
return rex.MustCompilePOSIX(line, args...)
case "findAllString":
return rex.FindAllString(line, args...)
case "findAllStringIndex":
return rex.FindAllStringIndex(line, args...)
case "findAllStringSubmatch":
return rex.FindAllStringSubmatch(line, args...)
case "findAllStringSubmatchIndex":
return rex.FindAllStringSubmatchIndex(line, args...)
case "findString":
return rex.FindString(line, args...)
case "findStringIndex":
return rex.FindStringIndex(line, args...)
case "findStringSubmatch":
return rex.FindStringSubmatch(line, args...)
case "findStringSubmatchIndex":
return rex.FindStringSubmatchIndex(line, args...)
case "matchString", "match":
return rex.MatchString(line, args...)
case "numSubexp":
return rex.NumSubexp(line, args...)
case "replaceAllLiteralString":
return rex.ReplaceAllLiteralString(line, args...)
case "replaceAllString", "replace":
return rex.ReplaceAllString(line, args...)
case "replaceAllStringFunc":
return rex.ReplaceAllStringFunc(line, scope, args...)
case "split":
return rex.Split(line, args...)
case "string":
return rex.String(line, args...)
case "subexpNames":
return rex.SubexpNames(line, args...)
}
panic(NewError(line, NOMETHODERROR, method, rex.Type()))
}
func (rex *RegExpObj) Compile(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "compile", "*String", args[0].Type()))
}
var err error = nil
rex.RegExp, err = regexp.Compile(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return rex
}
func (rex *RegExpObj) CompilePOSIX(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "compile", "*String", args[0].Type()))
}
var err error = nil
rex.RegExp, err = regexp.CompilePOSIX(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return rex
}
func (rex *RegExpObj) MustCompile(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "mustCompile", "*String", args[0].Type()))
}
//if regexp.MustCompile() panic, we capture it, and set 'reg.RegExp' to nil.
defer func() {
if r := recover(); r != nil {
rex.RegExp = nil
}
}()
rex.RegExp = regexp.MustCompile(strObj.String)
return rex
}
func (rex *RegExpObj) MustCompilePOSIX(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "mustCompilePOSIX", "*String", args[0].Type()))
}
//if regexp.MustCompilePOSIX() panic, we capture it, and set 'reg.RegExp' to nil.
defer func() {
if r := recover(); r != nil {
rex.RegExp = nil
}
}()
rex.RegExp = regexp.MustCompilePOSIX(strObj.String)
return rex
}
func (rex *RegExpObj) FindAllString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllString", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllString", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findAllString, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr := rex.RegExp.FindAllString(strObj.String, int(intObj.Int64))
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (rex *RegExpObj) FindAllStringIndex(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringIndex", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringIndex", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findAllStringIndex, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
intArr2D := rex.RegExp.FindAllStringIndex(strObj.String, int(intObj.Int64))
for _, v1 := range intArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewInteger(int64(v2)))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (rex *RegExpObj) FindAllStringSubmatch(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringSubmatch", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringSubmatch", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findAllStringSubmatch, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr2D := rex.RegExp.FindAllStringSubmatch(strObj.String, int(intObj.Int64))
for _, v1 := range strArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewString(v2))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (rex *RegExpObj) FindAllStringSubmatchIndex(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringSubmatchIndex", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringSubmatchIndex", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findAllStringSubmatchIndex, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
intArr2D := rex.RegExp.FindAllStringSubmatchIndex(strObj.String, int(intObj.Int64))
for _, v1 := range intArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewInteger(int64(v2)))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (rex *RegExpObj) FindString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findString", "*String", args[0].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findString, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := rex.RegExp.FindString(strObj.String)
return NewString(ret)
}
func (rex *RegExpObj) FindStringIndex(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringIndex", "*String", args[0].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findStringIndex, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
intArr := rex.RegExp.FindStringIndex(strObj.String)
for _, v := range intArr {
ret.Members = append(ret.Members, NewInteger(int64(v)))
}
return ret
}
func (rex *RegExpObj) FindStringSubmatch(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringSubmatch", "*String", args[0].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findStringSubmatch, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr := rex.RegExp.FindStringSubmatch(strObj.String)
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (rex *RegExpObj) FindStringSubmatchIndex(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringSubmatchIndex", "*String", args[0].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findStringSubmatchIndex, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
intArr := rex.RegExp.FindStringSubmatchIndex(strObj.String)
for _, v := range intArr {
ret.Members = append(ret.Members, NewInteger(int64(v)))
}
return ret
}
func (rex *RegExpObj) MatchString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "matchString", "*String", args[0].Type()))
}
b := rex.RegExp.MatchString(strObj.String)
if b {
return TRUE
}
return FALSE
}
func (rex *RegExpObj) NumSubexp(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if rex.RegExp == nil {
return NewNil("Before calling NumSubexp, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
i := rex.RegExp.NumSubexp()
return NewInteger(int64(i))
}
func (rex *RegExpObj) ReplaceAllLiteralString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllLiteralString", "*String", args[0].Type()))
}
replObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllLiteralString", "*String", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling replaceAllLiteralString, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
s := rex.RegExp.ReplaceAllLiteralString(srcObj.String, replObj.String)
return NewString(s)
}
func (rex *RegExpObj) ReplaceAllString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllString", "*String", args[0].Type()))
}
replObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllString", "*String", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling replaceAllString, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
s := rex.RegExp.ReplaceAllString(srcObj.String, replObj.String)
return NewString(s)
}
func (rex *RegExpObj) ReplaceAllStringFunc(line string, scope *Scope, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllStringFunc", "*String", args[0].Type()))
}
block, ok := args[1].(*Function)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllStringFunc", "*Function", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling replaceAllStringFunc, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
paramCount := len(block.Literal.Parameters)
if paramCount != 1 {
panic(NewError(line, FUNCCALLBACKERROR, 1, paramCount))
}
ret := rex.RegExp.ReplaceAllStringFunc(srcObj.String, func(str string) string {
return replFunc(scope, block, str)
})
return NewString(ret)
}
func (rex *RegExpObj) Split(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "split", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "split", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling split, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr := rex.RegExp.Split(strObj.String, int(intObj.Int64))
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (rex *RegExpObj) String(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if rex.RegExp == nil {
return NewNil("Before calling string, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
s := rex.RegExp.String()
return NewString(s)
}
func (rex *RegExpObj) SubexpNames(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if rex.RegExp == nil {
return NewNil("Before calling subexpNames, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr := rex.RegExp.SubexpNames()
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
//ReplaceAllStringFunc()'s callback function
func replFunc(scope *Scope, f *Function, str string) string {
s := NewScope(scope)
//Store to `Scope`,so below `Eval() could use them
s.Set(f.Literal.Parameters[0].(*ast.Identifier).Value, NewString(str))
r := Eval(f.Literal.Body, s)
if obj, ok := r.(*ReturnValue); ok {
r = obj.Value
}
//check for return value, must be a '*String' type
ret, ok := r.(*String)
if ok {
return ret.String
}
return ""
}
Add replaceFirstString() to regexp module
package eval
import (
"encoding/json"
"monkey/ast"
"regexp"
)
type RegEx struct {
RegExp *regexp.Regexp
Value string
}
func (re *RegEx) Inspect() string { return re.Value }
func (re *RegEx) Type() ObjectType { return REGEX_OBJ }
func (re *RegEx) CallMethod(line string, scope *Scope, method string, args ...Object) Object {
switch method {
case "match", "matchString":
return re.Match(line, args...)
case "replace", "replaceAllString":
return re.Replace(line, args...)
case "split":
return re.Split(line, args...)
case "findAllString":
return re.FindAllString(line, args...)
case "findAllStringIndex":
return re.FindAllStringIndex(line, args...)
case "findAllStringSubmatch":
return re.FindAllStringSubmatch(line, args...)
case "findAllStringSubmatchIndex":
return re.FindAllStringSubmatchIndex(line, args...)
case "findString":
return re.FindString(line, args...)
case "findStringIndex":
return re.FindStringIndex(line, args...)
case "findStringSubmatch":
return re.FindStringSubmatch(line, args...)
case "findStringSubmatchIndex":
return re.FindStringSubmatchIndex(line, args...)
case "numSubexp":
return re.NumSubexp(line, args...)
case "replaceFirstString", "sub":
return re.ReplaceFirstString(line, args...)
case "replaceAllLiteralString", "gsub":
return re.ReplaceAllLiteralString(line, args...)
case "replaceAllStringFunc":
return re.ReplaceAllStringFunc(line, scope, args...)
case "string":
return re.String(line, args...)
case "subexpNames":
return re.SubexpNames(line, args...)
}
panic(NewError(line, NOMETHODERROR, method, re.Type()))
}
func (re *RegEx) Match(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
if args[0].Type() != STRING_OBJ {
panic(NewError(line, PARAMTYPEERROR, "first", "match", "*String", args[0].Type()))
}
str := args[0].(*String)
matched := re.RegExp.MatchString(str.String)
if matched {
return TRUE
}
return FALSE
}
func (re *RegEx) Replace(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
if args[0].Type() != STRING_OBJ {
panic(NewError(line, PARAMTYPEERROR, "first", "replace", "*String", args[0].Type()))
}
if args[1].Type() != STRING_OBJ {
panic(NewError(line, PARAMTYPEERROR, "second", "replace", "*String", args[1].Type()))
}
str := args[0].(*String)
repl := args[1].(*String)
result := re.RegExp.ReplaceAllString(str.String, repl.String)
return NewString(result)
}
func (re *RegEx) Split(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
if args[0].Type() != STRING_OBJ {
panic(NewError(line, PARAMTYPEERROR, "first", "split", "*String", args[0].Type()))
}
str := args[0].(*String)
splitResult := re.RegExp.Split(str.String, -1)
a := &Array{}
for i := 0; i < len(splitResult); i++ {
a.Members = append(a.Members, NewString(splitResult[i]))
}
return a
}
func (re *RegEx) FindAllString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllString", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllString", "*Integer", args[1].Type()))
}
ret := &Array{}
strArr := re.RegExp.FindAllString(strObj.String, int(intObj.Int64))
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (re *RegEx) FindAllStringIndex(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringIndex", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringIndex", "*Integer", args[1].Type()))
}
ret := &Array{}
intArr2D := re.RegExp.FindAllStringIndex(strObj.String, int(intObj.Int64))
for _, v1 := range intArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewInteger(int64(v2)))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (re *RegEx) FindAllStringSubmatch(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringSubmatch", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringSubmatch", "*Integer", args[1].Type()))
}
ret := &Array{}
strArr2D := re.RegExp.FindAllStringSubmatch(strObj.String, int(intObj.Int64))
for _, v1 := range strArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewString(v2))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (re *RegEx) FindAllStringSubmatchIndex(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringSubmatchIndex", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringSubmatchIndex", "*Integer", args[1].Type()))
}
ret := &Array{}
intArr2D := re.RegExp.FindAllStringSubmatchIndex(strObj.String, int(intObj.Int64))
for _, v1 := range intArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewInteger(int64(v2)))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (re *RegEx) FindString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findString", "*String", args[0].Type()))
}
ret := re.RegExp.FindString(strObj.String)
return NewString(ret)
}
func (re *RegEx) FindStringIndex(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringIndex", "*String", args[0].Type()))
}
ret := &Array{}
intArr := re.RegExp.FindStringIndex(strObj.String)
for _, v := range intArr {
ret.Members = append(ret.Members, NewInteger(int64(v)))
}
return ret
}
func (re *RegEx) FindStringSubmatch(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringSubmatch", "*String", args[0].Type()))
}
ret := &Array{}
strArr := re.RegExp.FindStringSubmatch(strObj.String)
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (re *RegEx) FindStringSubmatchIndex(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringSubmatchIndex", "*String", args[0].Type()))
}
ret := &Array{}
intArr := re.RegExp.FindStringSubmatchIndex(strObj.String)
for _, v := range intArr {
ret.Members = append(ret.Members, NewInteger(int64(v)))
}
return ret
}
func (re *RegEx) NumSubexp(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
i := re.RegExp.NumSubexp()
return NewInteger(int64(i))
}
func (re *RegEx) ReplaceAllLiteralString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllLiteralString", "*String", args[0].Type()))
}
replObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllLiteralString", "*String", args[1].Type()))
}
s := re.RegExp.ReplaceAllLiteralString(srcObj.String, replObj.String)
return NewString(s)
}
func (re *RegEx) ReplaceFirstString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceFirstString", "*String", args[0].Type()))
}
replObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceFirstString", "*String", args[1].Type()))
}
s := replaceFirstString(re.RegExp, srcObj.String, replObj.String)
return NewString(s)
}
func (re *RegEx) ReplaceAllStringFunc(line string, scope *Scope, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllStringFunc", "*String", args[0].Type()))
}
block, ok := args[1].(*Function)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllStringFunc", "*Function", args[1].Type()))
}
paramCount := len(block.Literal.Parameters)
if paramCount != 1 {
panic(NewError(line, FUNCCALLBACKERROR, 1, paramCount))
}
ret := re.RegExp.ReplaceAllStringFunc(srcObj.String, func(str string) string {
return replFunc(scope, block, str)
})
return NewString(ret)
}
func (re *RegEx) String(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
s := re.RegExp.String()
return NewString(s)
}
func (re *RegEx) SubexpNames(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
ret := &Array{}
strArr := re.RegExp.SubexpNames()
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
//MarshalJSON turns regex into string/json
func (re *RegEx) MarshalJSON() ([]byte, error) {
return json.Marshal(re.RegExp.String())
}
//UnmarshalJSON turns a string into proper regex
func (re *RegEx) UnmarshalJSON(b []byte) error {
str := new(string)
json.Unmarshal(b, str)
reg, err := regexp.Compile(*str)
if err != nil {
return err
}
re.RegExp = reg
return nil
}
/* REGEXP OBJECT */
const (
REGEXP_OBJ = "REGEXP_OBJ"
regexp_name = "regexp"
)
type RegExpObj struct{
RegExp *regexp.Regexp
}
func NewRegExpObj() Object {
ret := &RegExpObj{}
SetGlobalObj(regexp_name, ret)
return ret
}
func (rex *RegExpObj) Inspect() string {
if rex.RegExp == nil {
return "Invalid RegExpObj!"
}
return rex.RegExp.String()
}
func (rex *RegExpObj) Type() ObjectType { return REGEXP_OBJ }
func (rex *RegExpObj) CallMethod(line string, scope *Scope, method string, args ...Object) Object {
switch method {
case "compile":
return rex.Compile(line, args...)
case "compilePOSIX":
return rex.CompilePOSIX(line, args...)
case "mustCompile":
return rex.MustCompile(line, args...)
case "mustCompilePOSIX":
return rex.MustCompilePOSIX(line, args...)
case "findAllString":
return rex.FindAllString(line, args...)
case "findAllStringIndex":
return rex.FindAllStringIndex(line, args...)
case "findAllStringSubmatch":
return rex.FindAllStringSubmatch(line, args...)
case "findAllStringSubmatchIndex":
return rex.FindAllStringSubmatchIndex(line, args...)
case "findString":
return rex.FindString(line, args...)
case "findStringIndex":
return rex.FindStringIndex(line, args...)
case "findStringSubmatch":
return rex.FindStringSubmatch(line, args...)
case "findStringSubmatchIndex":
return rex.FindStringSubmatchIndex(line, args...)
case "matchString", "match":
return rex.MatchString(line, args...)
case "numSubexp":
return rex.NumSubexp(line, args...)
case "replaceAllLiteralString":
return rex.ReplaceAllLiteralString(line, args...)
case "replaceAllString", "replace":
return rex.ReplaceAllString(line, args...)
case "replaceAllStringFunc":
return rex.ReplaceAllStringFunc(line, scope, args...)
case "split":
return rex.Split(line, args...)
case "string":
return rex.String(line, args...)
case "subexpNames":
return rex.SubexpNames(line, args...)
}
panic(NewError(line, NOMETHODERROR, method, rex.Type()))
}
func (rex *RegExpObj) Compile(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "compile", "*String", args[0].Type()))
}
var err error = nil
rex.RegExp, err = regexp.Compile(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return rex
}
func (rex *RegExpObj) CompilePOSIX(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "compile", "*String", args[0].Type()))
}
var err error = nil
rex.RegExp, err = regexp.CompilePOSIX(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return rex
}
func (rex *RegExpObj) MustCompile(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "mustCompile", "*String", args[0].Type()))
}
//if regexp.MustCompile() panic, we capture it, and set 'reg.RegExp' to nil.
defer func() {
if r := recover(); r != nil {
rex.RegExp = nil
}
}()
rex.RegExp = regexp.MustCompile(strObj.String)
return rex
}
func (rex *RegExpObj) MustCompilePOSIX(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "mustCompilePOSIX", "*String", args[0].Type()))
}
//if regexp.MustCompilePOSIX() panic, we capture it, and set 'reg.RegExp' to nil.
defer func() {
if r := recover(); r != nil {
rex.RegExp = nil
}
}()
rex.RegExp = regexp.MustCompilePOSIX(strObj.String)
return rex
}
func (rex *RegExpObj) FindAllString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllString", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllString", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findAllString, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr := rex.RegExp.FindAllString(strObj.String, int(intObj.Int64))
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (rex *RegExpObj) FindAllStringIndex(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringIndex", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringIndex", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findAllStringIndex, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
intArr2D := rex.RegExp.FindAllStringIndex(strObj.String, int(intObj.Int64))
for _, v1 := range intArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewInteger(int64(v2)))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (rex *RegExpObj) FindAllStringSubmatch(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringSubmatch", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringSubmatch", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findAllStringSubmatch, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr2D := rex.RegExp.FindAllStringSubmatch(strObj.String, int(intObj.Int64))
for _, v1 := range strArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewString(v2))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (rex *RegExpObj) FindAllStringSubmatchIndex(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findAllStringSubmatchIndex", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "findAllStringSubmatchIndex", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findAllStringSubmatchIndex, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
intArr2D := rex.RegExp.FindAllStringSubmatchIndex(strObj.String, int(intObj.Int64))
for _, v1 := range intArr2D {
tmpArr := &Array{}
for _, v2 := range v1 {
tmpArr.Members = append(tmpArr.Members, NewInteger(int64(v2)))
}
ret.Members = append(ret.Members, tmpArr)
}
return ret
}
func (rex *RegExpObj) FindString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findString", "*String", args[0].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findString, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := rex.RegExp.FindString(strObj.String)
return NewString(ret)
}
func (rex *RegExpObj) FindStringIndex(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringIndex", "*String", args[0].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findStringIndex, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
intArr := rex.RegExp.FindStringIndex(strObj.String)
for _, v := range intArr {
ret.Members = append(ret.Members, NewInteger(int64(v)))
}
return ret
}
func (rex *RegExpObj) FindStringSubmatch(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringSubmatch", "*String", args[0].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findStringSubmatch, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr := rex.RegExp.FindStringSubmatch(strObj.String)
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (rex *RegExpObj) FindStringSubmatchIndex(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "findStringSubmatchIndex", "*String", args[0].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling findStringSubmatchIndex, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
intArr := rex.RegExp.FindStringSubmatchIndex(strObj.String)
for _, v := range intArr {
ret.Members = append(ret.Members, NewInteger(int64(v)))
}
return ret
}
func (rex *RegExpObj) MatchString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "matchString", "*String", args[0].Type()))
}
b := rex.RegExp.MatchString(strObj.String)
if b {
return TRUE
}
return FALSE
}
func (rex *RegExpObj) NumSubexp(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if rex.RegExp == nil {
return NewNil("Before calling NumSubexp, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
i := rex.RegExp.NumSubexp()
return NewInteger(int64(i))
}
func (rex *RegExpObj) ReplaceAllLiteralString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllLiteralString", "*String", args[0].Type()))
}
replObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllLiteralString", "*String", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling replaceAllLiteralString, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
s := rex.RegExp.ReplaceAllLiteralString(srcObj.String, replObj.String)
return NewString(s)
}
func (rex *RegExpObj) ReplaceAllString(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllString", "*String", args[0].Type()))
}
replObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllString", "*String", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling replaceAllString, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
s := rex.RegExp.ReplaceAllString(srcObj.String, replObj.String)
return NewString(s)
}
func (rex *RegExpObj) ReplaceAllStringFunc(line string, scope *Scope, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
srcObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "replaceAllStringFunc", "*String", args[0].Type()))
}
block, ok := args[1].(*Function)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "replaceAllStringFunc", "*Function", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling replaceAllStringFunc, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
paramCount := len(block.Literal.Parameters)
if paramCount != 1 {
panic(NewError(line, FUNCCALLBACKERROR, 1, paramCount))
}
ret := rex.RegExp.ReplaceAllStringFunc(srcObj.String, func(str string) string {
return replFunc(scope, block, str)
})
return NewString(ret)
}
func (rex *RegExpObj) Split(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "split", "*String", args[0].Type()))
}
intObj, ok := args[1].(*Integer)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "split", "*Integer", args[1].Type()))
}
if rex.RegExp == nil {
return NewNil("Before calling split, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr := rex.RegExp.Split(strObj.String, int(intObj.Int64))
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
func (rex *RegExpObj) String(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if rex.RegExp == nil {
return NewNil("Before calling string, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
s := rex.RegExp.String()
return NewString(s)
}
func (rex *RegExpObj) SubexpNames(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if rex.RegExp == nil {
return NewNil("Before calling subexpNames, you should first call 'compile|compilePOSIX|mustCompile|mustCompilePOSIX'")
}
ret := &Array{}
strArr := rex.RegExp.SubexpNames()
for _, v := range strArr {
ret.Members = append(ret.Members, NewString(v))
}
return ret
}
//ReplaceAllStringFunc()'s callback function
func replFunc(scope *Scope, f *Function, str string) string {
s := NewScope(scope)
//Store to `Scope`,so below `Eval() could use them
s.Set(f.Literal.Parameters[0].(*ast.Identifier).Value, NewString(str))
r := Eval(f.Literal.Body, s)
if obj, ok := r.(*ReturnValue); ok {
r = obj.Value
}
//check for return value, must be a '*String' type
ret, ok := r.(*String)
if ok {
return ret.String
}
return ""
}
func replaceFirstString(re *regexp.Regexp, srcStr, replStr string) string {
src := []byte(srcStr)
repl := []byte(replStr)
if m := re.FindSubmatchIndex(src); m != nil {
out := make([]byte, m[0])
copy(out, src[0:m[0]])
out = re.Expand(out, repl, src, m)
if m[1] < len(src) {
out = append(out, src[m[1]:]...)
}
return string(out)
}
out := make([]byte, len(src))
copy(out, src)
return string(out)
}
|
package db
import (
"sync"
"time"
"github.com/couchbaselabs/sync_gateway/base"
"github.com/couchbaselabs/sync_gateway/channels"
)
var ChannelCacheMinLength = 50 // Keep at least this many entries in cache
var ChannelCacheMaxLength = 500 // Don't put more than this many entries in cache
var ChannelCacheAge = 60 * time.Second // Keep entries at least this long
const NoSeq = uint64(0x7FFFFFFFFFFFFFFF)
type channelCache struct {
channelName string // The channel name, duh
context *DatabaseContext // Database connection (used for view queries)
logs LogEntries // Log entries in sequence order
validFrom uint64 // First sequence that logs is valid for
lock sync.RWMutex // Controls access to logs, validFrom
viewLock sync.Mutex // Ensures only one view query is made at a time
}
func newChannelCache(context *DatabaseContext, channelName string, validFrom uint64) *channelCache {
return &channelCache{context: context, channelName: channelName, validFrom: validFrom}
}
// Low-level method to add a LogEntry to a single channel's cache.
func (c *channelCache) addToCache(change *LogEntry, isRemoval bool) {
c.lock.Lock()
defer c.lock.Unlock()
if !isRemoval {
c._appendChange(change)
} else {
removalChange := *change
removalChange.Flags |= channels.Removed
c._appendChange(&removalChange)
}
c._pruneCache()
base.LogTo("Cache", " #%d ==> channel %q", change.Sequence, c.channelName)
}
// Internal helper that prunes a single channel's cache. Caller MUST be holding the lock.
func (c *channelCache) _pruneCache() {
pruned := 0
for len(c.logs) > ChannelCacheMinLength && time.Since(c.logs[0].TimeReceived) > ChannelCacheAge {
c.validFrom = c.logs[0].Sequence + 1
c.logs = c.logs[1:]
pruned++
}
if pruned > 0 {
base.LogTo("Cache", "Pruned %d old entries from channel %q", pruned, c.channelName)
}
}
func (c *channelCache) pruneCache() {
c.lock.Lock()
c._pruneCache()
c.lock.Unlock()
}
// Returns all of the cached entries for sequences greater than 'since' in the given channel.
// Entries are returned in increasing-sequence order.
func (c *channelCache) getCachedChanges(options ChangesOptions) (validFrom uint64, result []*LogEntry) {
c.lock.RLock()
defer c.lock.RUnlock()
return c._getCachedChanges(options)
}
func (c *channelCache) _getCachedChanges(options ChangesOptions) (validFrom uint64, result []*LogEntry) {
// Find the first entry in the log to return:
log := c.logs
if len(log) == 0 {
validFrom = c.validFrom
return // Return nil if nothing is cached
}
var start int
for start = len(log) - 1; start >= 0 && log[start].Sequence > options.Since; start-- {
}
start++
if start > 0 {
validFrom = log[start-1].Sequence + 1
} else {
validFrom = c.validFrom
}
n := len(log) - start
if options.Limit > 0 && n < options.Limit {
n = options.Limit
}
result = make([]*LogEntry, n)
copy(result[0:], log[start:])
return
}
// Top-level method to get all the changes in a channel since the sequence 'since'.
// If the cache doesn't go back far enough, the view will be queried.
// View query results may be fed back into the cache if there's room.
// initialSequence is used only if the cache is empty: it gives the max sequence to which the
// view should be queried, because we don't want the view query to outrun the chanceCache's
// nextSequence.
func (c *channelCache) GetChanges(options ChangesOptions) ([]*LogEntry, error) {
// Use the cache, and return if it fulfilled the entire request:
cacheValidFrom, resultFromCache := c.getCachedChanges(options)
numFromCache := len(resultFromCache)
if numFromCache > 0 || resultFromCache == nil {
base.LogTo("Cache", "getCachedChanges(%q, %d) --> %d changes valid from #%d",
c.channelName, options.Since, numFromCache, cacheValidFrom)
} else if resultFromCache == nil {
base.LogTo("Cache", "getCachedChanges(%q, %d) --> nothing cached",
c.channelName, options.Since)
}
if cacheValidFrom <= options.Since+1 {
return resultFromCache, nil
}
// Nope, we're going to have to backfill from the view.
//** First acquire the _view_ lock (not the regular lock!)
c.viewLock.Lock()
defer c.viewLock.Unlock()
// Another goroutine might have gotten the lock first and already queried the view and updated
// the cache, so repeat the above:
cacheValidFrom, resultFromCache = c._getCachedChanges(options)
if len(resultFromCache) > numFromCache {
base.LogTo("Cache", "2nd getCachedChanges(%q, %d) got %d more, valid from #%d!",
c.channelName, options.Since, len(resultFromCache)-numFromCache, cacheValidFrom)
}
if cacheValidFrom <= options.Since+1 {
return resultFromCache, nil
}
// Now query the view. We set the max sequence equal to cacheValidFrom, so we'll get one
// overlap, which helps confirm that we've got everything.
resultFromView, err := c.context.getChangesInChannelFromView(c.channelName, cacheValidFrom,
options)
if err != nil {
return nil, err
}
// Cache some of the view results, if there's room in the cache:
if len(resultFromCache) < ChannelCacheMaxLength {
c.prependChanges(resultFromView, options.Since+1, options.Limit == 0)
}
// Concatenate the view & cache results:
result := resultFromView
if (options.Limit == 0 || len(result) < options.Limit) && len(resultFromCache) > 0 {
if resultFromCache[0].Sequence == result[len(result)-1].Sequence {
resultFromCache = resultFromCache[1:]
}
n := len(resultFromCache)
if options.Limit > 0 {
n = options.Limit - len(result)
}
result = append(result, resultFromCache[0:n]...)
}
base.LogTo("Cache", "GetChangesInChannel(%q) --> %d rows", c.channelName, len(result))
return result, nil
}
//////// LOGENTRIES:
func (c *channelCache) _adjustFirstSeq(change *LogEntry) {
if change.Sequence < c.validFrom {
c.validFrom = change.Sequence
}
}
// Adds an entry to the end of an array of LogEntries.
// Any existing entry with the same DocID is removed.
func (c *channelCache) _appendChange(change *LogEntry) {
log := c.logs
end := len(log) - 1
if end >= 0 {
if change.Sequence <= log[end].Sequence {
base.Warn("LogEntries.appendChange: out-of-order sequence #%d (last is #%d)",
change.Sequence, log[end].Sequence)
}
for i := end; i >= 0; i-- {
if log[i].DocID == change.DocID {
copy(log[i:], log[i+1:])
log[end] = change
return
}
}
} else {
c._adjustFirstSeq(change)
}
c.logs = append(log, change)
}
// Prepends an array of entries to this one, skipping ones that I already have.
// The new array needs to overlap with my current log, i.e. must contain the same sequence as
// c.logs[0], otherwise nothing will be added because the method can't confirm that there are no
// missing sequences in between.
// Returns the number of entries actually prepended.
func (c *channelCache) prependChanges(changes LogEntries, changesValidFrom uint64, openEnded bool) int {
c.lock.Lock()
defer c.lock.Unlock()
log := c.logs
if len(log) == 0 {
// If my cache is empty, just copy the new changes:
if len(changes) > 0 {
if !openEnded && changes[len(changes)-1].Sequence < c.validFrom {
return 0 // changes might not go all the way to the current time
}
if excess := len(changes) - ChannelCacheMaxLength; excess > 0 {
changes = changes[excess:]
changesValidFrom = changes[0].Sequence
}
c.logs = make(LogEntries, len(changes))
copy(c.logs, changes)
base.LogTo("Cache", " Initialized cache of %q with %d entries from view (#%d--#%d)",
c.channelName, len(changes), changes[0].Sequence, changes[len(changes)-1].Sequence)
}
c.validFrom = changesValidFrom
return len(changes)
}
// Look for an overlap, and prepend everything up to that point:
firstSequence := log[0].Sequence
if changes[0].Sequence <= firstSequence {
for i := len(changes) - 1; i >= 0; i-- {
if changes[i].Sequence == firstSequence {
if excess := i + len(log) - ChannelCacheMaxLength; excess > 0 {
changes = changes[excess:]
changesValidFrom = changes[0].Sequence
i -= excess
}
if i > 0 {
newLog := make(LogEntries, 0, i+len(log))
newLog = append(newLog, changes[0:i]...)
newLog = append(newLog, log...)
c.logs = newLog
base.LogTo("Cache", " Added %d entries from view (#%d--#%d) to cache of %q",
i, changes[0].Sequence, changes[i-1].Sequence, c.channelName)
}
c.validFrom = changesValidFrom
return i
}
}
}
return 0
}
Fixed crash due to logic error in channelCache
Fixes #288
package db
import (
"sync"
"time"
"github.com/couchbaselabs/sync_gateway/base"
"github.com/couchbaselabs/sync_gateway/channels"
)
var ChannelCacheMinLength = 50 // Keep at least this many entries in cache
var ChannelCacheMaxLength = 500 // Don't put more than this many entries in cache
var ChannelCacheAge = 60 * time.Second // Keep entries at least this long
const NoSeq = uint64(0x7FFFFFFFFFFFFFFF)
type channelCache struct {
channelName string // The channel name, duh
context *DatabaseContext // Database connection (used for view queries)
logs LogEntries // Log entries in sequence order
validFrom uint64 // First sequence that logs is valid for
lock sync.RWMutex // Controls access to logs, validFrom
viewLock sync.Mutex // Ensures only one view query is made at a time
}
func newChannelCache(context *DatabaseContext, channelName string, validFrom uint64) *channelCache {
return &channelCache{context: context, channelName: channelName, validFrom: validFrom}
}
// Low-level method to add a LogEntry to a single channel's cache.
func (c *channelCache) addToCache(change *LogEntry, isRemoval bool) {
c.lock.Lock()
defer c.lock.Unlock()
if !isRemoval {
c._appendChange(change)
} else {
removalChange := *change
removalChange.Flags |= channels.Removed
c._appendChange(&removalChange)
}
c._pruneCache()
base.LogTo("Cache", " #%d ==> channel %q", change.Sequence, c.channelName)
}
// Internal helper that prunes a single channel's cache. Caller MUST be holding the lock.
func (c *channelCache) _pruneCache() {
pruned := 0
for len(c.logs) > ChannelCacheMinLength && time.Since(c.logs[0].TimeReceived) > ChannelCacheAge {
c.validFrom = c.logs[0].Sequence + 1
c.logs = c.logs[1:]
pruned++
}
if pruned > 0 {
base.LogTo("Cache", "Pruned %d old entries from channel %q", pruned, c.channelName)
}
}
func (c *channelCache) pruneCache() {
c.lock.Lock()
c._pruneCache()
c.lock.Unlock()
}
// Returns all of the cached entries for sequences greater than 'since' in the given channel.
// Entries are returned in increasing-sequence order.
func (c *channelCache) getCachedChanges(options ChangesOptions) (validFrom uint64, result []*LogEntry) {
c.lock.RLock()
defer c.lock.RUnlock()
return c._getCachedChanges(options)
}
func (c *channelCache) _getCachedChanges(options ChangesOptions) (validFrom uint64, result []*LogEntry) {
// Find the first entry in the log to return:
log := c.logs
if len(log) == 0 {
validFrom = c.validFrom
return // Return nil if nothing is cached
}
var start int
for start = len(log) - 1; start >= 0 && log[start].Sequence > options.Since; start-- {
}
start++
if start > 0 {
validFrom = log[start-1].Sequence + 1
} else {
validFrom = c.validFrom
}
n := len(log) - start
if options.Limit > 0 && n > options.Limit {
n = options.Limit
}
result = make([]*LogEntry, n)
copy(result[0:], log[start:])
return
}
// Top-level method to get all the changes in a channel since the sequence 'since'.
// If the cache doesn't go back far enough, the view will be queried.
// View query results may be fed back into the cache if there's room.
// initialSequence is used only if the cache is empty: it gives the max sequence to which the
// view should be queried, because we don't want the view query to outrun the chanceCache's
// nextSequence.
func (c *channelCache) GetChanges(options ChangesOptions) ([]*LogEntry, error) {
// Use the cache, and return if it fulfilled the entire request:
cacheValidFrom, resultFromCache := c.getCachedChanges(options)
numFromCache := len(resultFromCache)
if numFromCache > 0 || resultFromCache == nil {
base.LogTo("Cache", "getCachedChanges(%q, %d) --> %d changes valid from #%d",
c.channelName, options.Since, numFromCache, cacheValidFrom)
} else if resultFromCache == nil {
base.LogTo("Cache", "getCachedChanges(%q, %d) --> nothing cached",
c.channelName, options.Since)
}
if cacheValidFrom <= options.Since+1 {
return resultFromCache, nil
}
// Nope, we're going to have to backfill from the view.
//** First acquire the _view_ lock (not the regular lock!)
c.viewLock.Lock()
defer c.viewLock.Unlock()
// Another goroutine might have gotten the lock first and already queried the view and updated
// the cache, so repeat the above:
cacheValidFrom, resultFromCache = c._getCachedChanges(options)
if len(resultFromCache) > numFromCache {
base.LogTo("Cache", "2nd getCachedChanges(%q, %d) got %d more, valid from #%d!",
c.channelName, options.Since, len(resultFromCache)-numFromCache, cacheValidFrom)
}
if cacheValidFrom <= options.Since+1 {
return resultFromCache, nil
}
// Now query the view. We set the max sequence equal to cacheValidFrom, so we'll get one
// overlap, which helps confirm that we've got everything.
resultFromView, err := c.context.getChangesInChannelFromView(c.channelName, cacheValidFrom,
options)
if err != nil {
return nil, err
}
// Cache some of the view results, if there's room in the cache:
if len(resultFromCache) < ChannelCacheMaxLength {
c.prependChanges(resultFromView, options.Since+1, options.Limit == 0)
}
// Concatenate the view & cache results:
result := resultFromView
if (options.Limit == 0 || len(result) < options.Limit) && len(resultFromCache) > 0 {
if resultFromCache[0].Sequence == result[len(result)-1].Sequence {
resultFromCache = resultFromCache[1:]
}
n := len(resultFromCache)
if options.Limit > 0 {
n = options.Limit - len(result)
}
result = append(result, resultFromCache[0:n]...)
}
base.LogTo("Cache", "GetChangesInChannel(%q) --> %d rows", c.channelName, len(result))
return result, nil
}
//////// LOGENTRIES:
func (c *channelCache) _adjustFirstSeq(change *LogEntry) {
if change.Sequence < c.validFrom {
c.validFrom = change.Sequence
}
}
// Adds an entry to the end of an array of LogEntries.
// Any existing entry with the same DocID is removed.
func (c *channelCache) _appendChange(change *LogEntry) {
log := c.logs
end := len(log) - 1
if end >= 0 {
if change.Sequence <= log[end].Sequence {
base.Warn("LogEntries.appendChange: out-of-order sequence #%d (last is #%d)",
change.Sequence, log[end].Sequence)
}
for i := end; i >= 0; i-- {
if log[i].DocID == change.DocID {
copy(log[i:], log[i+1:])
log[end] = change
return
}
}
} else {
c._adjustFirstSeq(change)
}
c.logs = append(log, change)
}
// Prepends an array of entries to this one, skipping ones that I already have.
// The new array needs to overlap with my current log, i.e. must contain the same sequence as
// c.logs[0], otherwise nothing will be added because the method can't confirm that there are no
// missing sequences in between.
// Returns the number of entries actually prepended.
func (c *channelCache) prependChanges(changes LogEntries, changesValidFrom uint64, openEnded bool) int {
c.lock.Lock()
defer c.lock.Unlock()
log := c.logs
if len(log) == 0 {
// If my cache is empty, just copy the new changes:
if len(changes) > 0 {
if !openEnded && changes[len(changes)-1].Sequence < c.validFrom {
return 0 // changes might not go all the way to the current time
}
if excess := len(changes) - ChannelCacheMaxLength; excess > 0 {
changes = changes[excess:]
changesValidFrom = changes[0].Sequence
}
c.logs = make(LogEntries, len(changes))
copy(c.logs, changes)
base.LogTo("Cache", " Initialized cache of %q with %d entries from view (#%d--#%d)",
c.channelName, len(changes), changes[0].Sequence, changes[len(changes)-1].Sequence)
}
c.validFrom = changesValidFrom
return len(changes)
}
// Look for an overlap, and prepend everything up to that point:
firstSequence := log[0].Sequence
if changes[0].Sequence <= firstSequence {
for i := len(changes) - 1; i >= 0; i-- {
if changes[i].Sequence == firstSequence {
if excess := i + len(log) - ChannelCacheMaxLength; excess > 0 {
changes = changes[excess:]
changesValidFrom = changes[0].Sequence
i -= excess
}
if i > 0 {
newLog := make(LogEntries, 0, i+len(log))
newLog = append(newLog, changes[0:i]...)
newLog = append(newLog, log...)
c.logs = newLog
base.LogTo("Cache", " Added %d entries from view (#%d--#%d) to cache of %q",
i, changes[0].Sequence, changes[i-1].Sequence, c.channelName)
}
c.validFrom = changesValidFrom
return i
}
}
}
return 0
}
|
package check_test
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/idahobean/npm-resource"
"github.com/idahobean/npm-resource/check"
)
var _ = Describe("Check", func() {
var (
tmpDir string
cmd *exec.Cmd
request check.Request
)
loginArgs := []string{"-u", "abc", "-p", "def", "-e", "ghi@jkl.mno", "-r", "http://localhost:8080"}
BeforeEach(func() {
var err error
tmpDir, err = ioutil.TempDir("", "npm_resource_check")
Ω(err).ShouldNot(HaveOccurred())
packagePath, err := filepath.Abs("../sample-node")
Ω(err).ShouldNot(HaveOccurred())
request = check.Request{
Source: resource.Source{
PackageName: "sample-node",
Registry: "http://localhost:8080",
},
}
err = exec.Command("npm-cli-login", loginArgs...).Run()
Ω(err).ShouldNot(HaveOccurred())
args := []string{"publish", packagePath, "--registry", "http://localhost:8080"}
err = exec.Command("npm", args...).Run()
Ω(err).ShouldNot(HaveOccurred())
})
JustBeforeEach(func() {
stdin := &bytes.Buffer{}
err := json.NewEncoder(stdin).Encode(request)
Ω(err).ShouldNot(HaveOccurred())
cmd = exec.Command(binPath, tmpDir) // builded from test suite
cmd.Stdin = stdin
cmd.Dir = tmpDir
})
AfterEach(func() {
err := os.RemoveAll(tmpDir)
Ω(err).ShouldNot(HaveOccurred())
args := []string{"unpublish", "sample-node", "--registry", "http://localhost:8080", "--force"}
err = exec.Command("npm", args...).Run()
Ω(err).ShouldNot(HaveOccurred())
})
Context("when command terminates correctly", func() {
Context("packagename is fullfilled", func() {
It("returns npm version", func() {
session, err := gexec.Start(
cmd,
GinkgoWriter,
GinkgoWriter,
)
Ω(err).ShouldNot(HaveOccurred())
Eventually(session, "15s").Should(gexec.Exit(0))
var response check.Response
err = json.Unmarshal(session.Out.Contents(), &response)
Ω(err).ShouldNot(HaveOccurred())
Ω(response).Should(Equal(check.Response{
Version: resource.Version{
Version: "0.0.1",
},
Metadata: []resource.MetadataPair{
{
Name: "name",
Value: "sample-node",
},
{
Name: "homepage",
Value: "https://github.com/idahobean/sample-node#readme"},
},
}))
})
})
})
Context("when required option is empty", func() {
Context("packagename is empty", func() {
BeforeEach(func() {
request.Source.PackageName = ""
})
It("returns an error", func() {
session, err := gexec.Start(
cmd,
GinkgoWriter,
GinkgoWriter,
)
Ω(err).ShouldNot(HaveOccurred())
Eventually(session).Should(gexec.Exit(1))
errMsg := fmt.Sprintf("error parameter required: package_name")
Ω(session.Err).Should(gbytes.Say(errMsg))
})
})
})
})
add force option for setup npm package env
package check_test
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/idahobean/npm-resource"
"github.com/idahobean/npm-resource/check"
)
var _ = Describe("Check", func() {
var (
tmpDir string
cmd *exec.Cmd
request check.Request
)
loginArgs := []string{"-u", "abc", "-p", "def", "-e", "ghi@jkl.mno", "-r", "http://localhost:8080"}
BeforeEach(func() {
var err error
tmpDir, err = ioutil.TempDir("", "npm_resource_check")
Ω(err).ShouldNot(HaveOccurred())
packagePath, err := filepath.Abs("../sample-node")
Ω(err).ShouldNot(HaveOccurred())
request = check.Request{
Source: resource.Source{
PackageName: "sample-node",
Registry: "http://localhost:8080",
},
}
err = exec.Command("npm-cli-login", loginArgs...).Run()
Ω(err).ShouldNot(HaveOccurred())
args := []string{"publish", packagePath, "--registry", "http://localhost:8080", "--force"}
err = exec.Command("npm", args...).Run()
Ω(err).ShouldNot(HaveOccurred())
})
JustBeforeEach(func() {
stdin := &bytes.Buffer{}
err := json.NewEncoder(stdin).Encode(request)
Ω(err).ShouldNot(HaveOccurred())
cmd = exec.Command(binPath, tmpDir) // builded from test suite
cmd.Stdin = stdin
cmd.Dir = tmpDir
})
AfterEach(func() {
err := os.RemoveAll(tmpDir)
Ω(err).ShouldNot(HaveOccurred())
args := []string{"unpublish", "sample-node", "--registry", "http://localhost:8080", "--force"}
err = exec.Command("npm", args...).Run()
Ω(err).ShouldNot(HaveOccurred())
})
Context("when command terminates correctly", func() {
Context("packagename is fullfilled", func() {
It("returns npm version", func() {
session, err := gexec.Start(
cmd,
GinkgoWriter,
GinkgoWriter,
)
Ω(err).ShouldNot(HaveOccurred())
Eventually(session, "15s").Should(gexec.Exit(0))
var response check.Response
err = json.Unmarshal(session.Out.Contents(), &response)
Ω(err).ShouldNot(HaveOccurred())
Ω(response).Should(Equal(check.Response{
Version: resource.Version{
Version: "0.0.1",
},
Metadata: []resource.MetadataPair{
{
Name: "name",
Value: "sample-node",
},
{
Name: "homepage",
Value: "https://github.com/idahobean/sample-node#readme"},
},
}))
})
})
})
Context("when required option is empty", func() {
Context("packagename is empty", func() {
BeforeEach(func() {
request.Source.PackageName = ""
})
It("returns an error", func() {
session, err := gexec.Start(
cmd,
GinkgoWriter,
GinkgoWriter,
)
Ω(err).ShouldNot(HaveOccurred())
Eventually(session).Should(gexec.Exit(1))
errMsg := fmt.Sprintf("error parameter required: package_name")
Ω(session.Err).Should(gbytes.Say(errMsg))
})
})
})
})
|
package clicommand
import (
"bufio"
"context"
"fmt"
"io"
"net/http"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/buildkite/agent/v3/agent"
"github.com/buildkite/agent/v3/api"
"github.com/buildkite/agent/v3/bootstrap/shell"
"github.com/buildkite/agent/v3/cliconfig"
"github.com/buildkite/agent/v3/experiments"
"github.com/buildkite/agent/v3/hook"
"github.com/buildkite/agent/v3/logger"
"github.com/buildkite/agent/v3/metrics"
"github.com/buildkite/agent/v3/process"
"github.com/buildkite/agent/v3/tracetools"
"github.com/buildkite/agent/v3/utils"
"github.com/buildkite/shellwords"
"github.com/urfave/cli"
"golang.org/x/exp/maps"
)
var StartDescription = `Usage:
buildkite-agent start [options...]
Description:
When a job is ready to run it will call the "bootstrap-script"
and pass it all the environment variables required for the job to run.
This script is responsible for checking out the code, and running the
actual build script defined in the pipeline.
The agent will run any jobs within a PTY (pseudo terminal) if available.
Example:
$ buildkite-agent start --token xxx`
// Adding config requires changes in a few different spots
// - The AgentStartConfig struct with a cli parameter
// - As a flag in the AgentStartCommand (with matching env)
// - Into an env to be passed to the bootstrap in agent/job_runner.go, createEnvironment()
// - Into clicommand/bootstrap.go to read it from the env into the bootstrap config
type AgentStartConfig struct {
Config string `cli:"config"`
Name string `cli:"name"`
Priority string `cli:"priority"`
AcquireJob string `cli:"acquire-job"`
DisconnectAfterJob bool `cli:"disconnect-after-job"`
DisconnectAfterIdleTimeout int `cli:"disconnect-after-idle-timeout"`
BootstrapScript string `cli:"bootstrap-script" normalize:"commandpath"`
CancelGracePeriod int `cli:"cancel-grace-period"`
EnableJobLogTmpfile bool `cli:"enable-job-log-tmpfile"`
BuildPath string `cli:"build-path" normalize:"filepath" validate:"required"`
HooksPath string `cli:"hooks-path" normalize:"filepath"`
PluginsPath string `cli:"plugins-path" normalize:"filepath"`
Shell string `cli:"shell"`
Tags []string `cli:"tags" normalize:"list"`
TagsFromEC2MetaData bool `cli:"tags-from-ec2-meta-data"`
TagsFromEC2MetaDataPaths []string `cli:"tags-from-ec2-meta-data-paths" normalize:"list"`
TagsFromEC2Tags bool `cli:"tags-from-ec2-tags"`
TagsFromGCPMetaData bool `cli:"tags-from-gcp-meta-data"`
TagsFromGCPMetaDataPaths []string `cli:"tags-from-gcp-meta-data-paths" normalize:"list"`
TagsFromGCPLabels bool `cli:"tags-from-gcp-labels"`
TagsFromHost bool `cli:"tags-from-host"`
WaitForEC2TagsTimeout string `cli:"wait-for-ec2-tags-timeout"`
WaitForEC2MetaDataTimeout string `cli:"wait-for-ec2-meta-data-timeout"`
WaitForGCPLabelsTimeout string `cli:"wait-for-gcp-labels-timeout"`
GitCloneFlags string `cli:"git-clone-flags"`
GitCloneMirrorFlags string `cli:"git-clone-mirror-flags"`
GitCleanFlags string `cli:"git-clean-flags"`
GitFetchFlags string `cli:"git-fetch-flags"`
GitMirrorsPath string `cli:"git-mirrors-path" normalize:"filepath"`
GitMirrorsLockTimeout int `cli:"git-mirrors-lock-timeout"`
GitMirrorsSkipUpdate bool `cli:"git-mirrors-skip-update"`
NoGitSubmodules bool `cli:"no-git-submodules"`
NoSSHKeyscan bool `cli:"no-ssh-keyscan"`
NoCommandEval bool `cli:"no-command-eval"`
NoLocalHooks bool `cli:"no-local-hooks"`
NoPlugins bool `cli:"no-plugins"`
NoPluginValidation bool `cli:"no-plugin-validation"`
NoPTY bool `cli:"no-pty"`
NoFeatureReporting bool `cli:"no-feature-reporting"`
TimestampLines bool `cli:"timestamp-lines"`
HealthCheckAddr string `cli:"health-check-addr"`
MetricsDatadog bool `cli:"metrics-datadog"`
MetricsDatadogHost string `cli:"metrics-datadog-host"`
MetricsDatadogDistributions bool `cli:"metrics-datadog-distributions"`
TracingBackend string `cli:"tracing-backend"`
Spawn int `cli:"spawn"`
SpawnWithPriority bool `cli:"spawn-with-priority"`
LogFormat string `cli:"log-format"`
CancelSignal string `cli:"cancel-signal"`
RedactedVars []string `cli:"redacted-vars" normalize:"list"`
// Global flags
Debug bool `cli:"debug"`
LogLevel string `cli:"log-level"`
NoColor bool `cli:"no-color"`
Experiments []string `cli:"experiment" normalize:"list"`
Profile string `cli:"profile"`
// API config
DebugHTTP bool `cli:"debug-http"`
Token string `cli:"token" validate:"required"`
Endpoint string `cli:"endpoint" validate:"required"`
NoHTTP2 bool `cli:"no-http2"`
// Deprecated
NoSSHFingerprintVerification bool `cli:"no-automatic-ssh-fingerprint-verification" deprecated-and-renamed-to:"NoSSHKeyscan"`
MetaData []string `cli:"meta-data" deprecated-and-renamed-to:"Tags"`
MetaDataEC2 bool `cli:"meta-data-ec2" deprecated-and-renamed-to:"TagsFromEC2"`
MetaDataEC2Tags bool `cli:"meta-data-ec2-tags" deprecated-and-renamed-to:"TagsFromEC2Tags"`
MetaDataGCP bool `cli:"meta-data-gcp" deprecated-and-renamed-to:"TagsFromGCP"`
TagsFromEC2 bool `cli:"tags-from-ec2" deprecated-and-renamed-to:"TagsFromEC2MetaData"`
TagsFromGCP bool `cli:"tags-from-gcp" deprecated-and-renamed-to:"TagsFromGCPMetaData"`
DisconnectAfterJobTimeout int `cli:"disconnect-after-job-timeout" deprecated:"Use disconnect-after-idle-timeout instead"`
}
func (asc AgentStartConfig) Features() []string {
if asc.NoFeatureReporting {
return []string{}
}
features := make([]string, 0, 8)
if asc.GitMirrorsPath != "" {
features = append(features, "git-mirrors")
}
if asc.AcquireJob != "" {
features = append(features, "acquire-job")
}
if asc.TracingBackend == tracetools.BackendDatadog {
features = append(features, "datadog-tracing")
}
if asc.TracingBackend == tracetools.BackendOpenTelemetry {
features = append(features, "opentelemetry-tracing")
}
if asc.DisconnectAfterJob {
features = append(features, "disconnect-after-job")
}
if asc.DisconnectAfterIdleTimeout != 0 {
features = append(features, "disconnect-after-idle")
}
if asc.NoPlugins {
features = append(features, "no-plugins")
}
if asc.NoCommandEval {
features = append(features, "no-script-eval")
}
for _, exp := range experiments.Enabled() {
features = append(features, fmt.Sprintf("experiment-%s", exp))
}
return features
}
func DefaultShell() string {
// https://github.com/golang/go/blob/master/src/go/build/syslist.go#L7
switch runtime.GOOS {
case "windows":
return `C:\Windows\System32\CMD.exe /S /C`
case "freebsd", "openbsd":
return `/usr/local/bin/bash -e -c`
case "netbsd":
return `/usr/pkg/bin/bash -e -c`
default:
return `/bin/bash -e -c`
}
}
func DefaultConfigFilePaths() (paths []string) {
// Toggle beetwen windows and *nix paths
if runtime.GOOS == "windows" {
paths = []string{
"C:\\buildkite-agent\\buildkite-agent.cfg",
"$USERPROFILE\\AppData\\Local\\buildkite-agent\\buildkite-agent.cfg",
"$USERPROFILE\\AppData\\Local\\BuildkiteAgent\\buildkite-agent.cfg",
}
} else {
paths = []string{
"$HOME/.buildkite-agent/buildkite-agent.cfg",
}
// For Apple Silicon Macs, prioritise the `/opt/homebrew` path over `/usr/local`
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
paths = append(paths, "/opt/homebrew/etc/buildkite-agent/buildkite-agent.cfg")
}
paths = append(paths, "/usr/local/etc/buildkite-agent/buildkite-agent.cfg", "/etc/buildkite-agent/buildkite-agent.cfg")
}
// Also check to see if there's a buildkite-agent.cfg in the folder
// that the binary is running in.
exePath, err := os.Executable()
if err == nil {
pathToBinary, err := filepath.Abs(filepath.Dir(exePath))
if err == nil {
pathToRelativeConfig := filepath.Join(pathToBinary, "buildkite-agent.cfg")
paths = append([]string{pathToRelativeConfig}, paths...)
}
}
return
}
var AgentStartCommand = cli.Command{
Name: "start",
Usage: "Starts a Buildkite agent",
Description: StartDescription,
Flags: []cli.Flag{
cli.StringFlag{
Name: "config",
Value: "",
Usage: "Path to a configuration file",
EnvVar: "BUILDKITE_AGENT_CONFIG",
},
cli.StringFlag{
Name: "name",
Value: "",
Usage: "The name of the agent",
EnvVar: "BUILDKITE_AGENT_NAME",
},
cli.StringFlag{
Name: "priority",
Value: "",
Usage: "The priority of the agent (higher priorities are assigned work first)",
EnvVar: "BUILDKITE_AGENT_PRIORITY",
},
cli.StringFlag{
Name: "acquire-job",
Value: "",
Usage: "Start this agent and only run the specified job, disconnecting after it's finished",
EnvVar: "BUILDKITE_AGENT_ACQUIRE_JOB",
},
cli.BoolFlag{
Name: "disconnect-after-job",
Usage: "Disconnect the agent after running exactly one job. When used in conjunction with the ′--spawn′ flag, each worker booted will run exactly one job",
EnvVar: "BUILDKITE_AGENT_DISCONNECT_AFTER_JOB",
},
cli.IntFlag{
Name: "disconnect-after-idle-timeout",
Value: 0,
Usage: "The maximum idle time in seconds to wait for a job before disconnecting. The default of 0 means no timeout",
EnvVar: "BUILDKITE_AGENT_DISCONNECT_AFTER_IDLE_TIMEOUT",
},
cli.IntFlag{
Name: "cancel-grace-period",
Value: 10,
Usage: "The number of seconds a canceled or timed out job is given to gracefully terminate and upload its artifacts",
EnvVar: "BUILDKITE_CANCEL_GRACE_PERIOD",
},
cli.BoolFlag{
Name: "enable-job-log-tmpfile",
Usage: "Store the job logs in a temporary file ′BUILDKITE_JOB_LOG_TMPFILE′ that is accessible during the job and removed at the end of the job",
EnvVar: "BUILDKITE_ENABLE_JOB_LOG_TMPFILE",
},
cli.StringFlag{
Name: "shell",
Value: DefaultShell(),
Usage: "The shell command used to interpret build commands, e.g /bin/bash -e -c",
EnvVar: "BUILDKITE_SHELL",
},
cli.StringSliceFlag{
Name: "tags",
Value: &cli.StringSlice{},
Usage: "A comma-separated list of tags for the agent (for example, \"linux\" or \"mac,xcode=8\")",
EnvVar: "BUILDKITE_AGENT_TAGS",
},
cli.BoolFlag{
Name: "tags-from-host",
Usage: "Include tags from the host (hostname, machine-id, os)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_HOST",
},
cli.StringSliceFlag{
Name: "tags-from-ec2-meta-data",
Value: &cli.StringSlice{},
Usage: "Include the default set of host EC2 meta-data as tags (instance-id, instance-type, ami-id, and instance-life-cycle)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2_META_DATA",
},
cli.StringSliceFlag{
Name: "tags-from-ec2-meta-data-paths",
Value: &cli.StringSlice{},
Usage: "Include additional tags fetched from EC2 meta-data using tag & path suffix pairs, e.g \"tag_name=path/to/value\"",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2_META_DATA_PATHS",
},
cli.BoolFlag{
Name: "tags-from-ec2-tags",
Usage: "Include the host's EC2 tags as tags",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2_TAGS",
},
cli.StringSliceFlag{
Name: "tags-from-gcp-meta-data",
Value: &cli.StringSlice{},
Usage: "Include the default set of host Google Cloud instance meta-data as tags (instance-id, machine-type, preemptible, project-id, region, and zone)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_GCP_META_DATA",
},
cli.StringSliceFlag{
Name: "tags-from-gcp-meta-data-paths",
Value: &cli.StringSlice{},
Usage: "Include additional tags fetched from Google Cloud instance meta-data using tag & path suffix pairs, e.g \"tag_name=path/to/value\"",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_GCP_META_DATA_PATHS",
},
cli.BoolFlag{
Name: "tags-from-gcp-labels",
Usage: "Include the host's Google Cloud instance labels as tags",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_GCP_LABELS",
},
cli.DurationFlag{
Name: "wait-for-ec2-tags-timeout",
Usage: "The amount of time to wait for tags from EC2 before proceeding",
EnvVar: "BUILDKITE_AGENT_WAIT_FOR_EC2_TAGS_TIMEOUT",
Value: time.Second * 10,
},
cli.DurationFlag{
Name: "wait-for-ec2-meta-data-timeout",
Usage: "The amount of time to wait for meta-data from EC2 before proceeding",
EnvVar: "BUILDKITE_AGENT_WAIT_FOR_EC2_META_DATA_TIMEOUT",
Value: time.Second * 10,
},
cli.DurationFlag{
Name: "wait-for-gcp-labels-timeout",
Usage: "The amount of time to wait for labels from GCP before proceeding",
EnvVar: "BUILDKITE_AGENT_WAIT_FOR_GCP_LABELS_TIMEOUT",
Value: time.Second * 10,
},
cli.StringFlag{
Name: "git-clone-flags",
Value: "-v",
Usage: "Flags to pass to the \"git clone\" command",
EnvVar: "BUILDKITE_GIT_CLONE_FLAGS",
},
cli.StringFlag{
Name: "git-clean-flags",
Value: "-ffxdq",
Usage: "Flags to pass to \"git clean\" command",
EnvVar: "BUILDKITE_GIT_CLEAN_FLAGS",
},
cli.StringFlag{
Name: "git-fetch-flags",
Value: "-v --prune",
Usage: "Flags to pass to \"git fetch\" command",
EnvVar: "BUILDKITE_GIT_FETCH_FLAGS",
},
cli.StringFlag{
Name: "git-clone-mirror-flags",
Value: "-v",
Usage: "Flags to pass to the \"git clone\" command when used for mirroring",
EnvVar: "BUILDKITE_GIT_CLONE_MIRROR_FLAGS",
},
cli.StringFlag{
Name: "git-mirrors-path",
Value: "",
Usage: "Path to where mirrors of git repositories are stored",
EnvVar: "BUILDKITE_GIT_MIRRORS_PATH",
},
cli.IntFlag{
Name: "git-mirrors-lock-timeout",
Value: 300,
Usage: "Seconds to lock a git mirror during clone, should exceed your longest checkout",
EnvVar: "BUILDKITE_GIT_MIRRORS_LOCK_TIMEOUT",
},
cli.BoolFlag{
Name: "git-mirrors-skip-update",
Usage: "Skip updating the Git mirror",
EnvVar: "BUILDKITE_GIT_MIRRORS_SKIP_UPDATE",
},
cli.StringFlag{
Name: "bootstrap-script",
Value: "",
Usage: "The command that is executed for bootstrapping a job, defaults to the bootstrap sub-command of this binary",
EnvVar: "BUILDKITE_BOOTSTRAP_SCRIPT_PATH",
},
cli.StringFlag{
Name: "build-path",
Value: "",
Usage: "Path to where the builds will run from",
EnvVar: "BUILDKITE_BUILD_PATH",
},
cli.StringFlag{
Name: "hooks-path",
Value: "",
Usage: "Directory where the hook scripts are found",
EnvVar: "BUILDKITE_HOOKS_PATH",
},
cli.StringFlag{
Name: "plugins-path",
Value: "",
Usage: "Directory where the plugins are saved to",
EnvVar: "BUILDKITE_PLUGINS_PATH",
},
cli.BoolFlag{
Name: "timestamp-lines",
Usage: "Prepend timestamps on each line of output.",
EnvVar: "BUILDKITE_TIMESTAMP_LINES",
},
cli.StringFlag{
Name: "health-check-addr",
Usage: "Start an HTTP server on this addr:port that returns whether the agent is healthy, disabled by default",
EnvVar: "BUILDKITE_AGENT_HEALTH_CHECK_ADDR",
},
cli.BoolFlag{
Name: "no-pty",
Usage: "Do not run jobs within a pseudo terminal",
EnvVar: "BUILDKITE_NO_PTY",
},
cli.BoolFlag{
Name: "no-ssh-keyscan",
Usage: "Don't automatically run ssh-keyscan before checkout",
EnvVar: "BUILDKITE_NO_SSH_KEYSCAN",
},
cli.BoolFlag{
Name: "no-command-eval",
Usage: "Don't allow this agent to run arbitrary console commands, including plugins",
EnvVar: "BUILDKITE_NO_COMMAND_EVAL",
},
cli.BoolFlag{
Name: "no-plugins",
Usage: "Don't allow this agent to load plugins",
EnvVar: "BUILDKITE_NO_PLUGINS",
},
cli.BoolTFlag{
Name: "no-plugin-validation",
Usage: "Don't validate plugin configuration and requirements",
EnvVar: "BUILDKITE_NO_PLUGIN_VALIDATION",
},
cli.BoolFlag{
Name: "no-local-hooks",
Usage: "Don't allow local hooks to be run from checked out repositories",
EnvVar: "BUILDKITE_NO_LOCAL_HOOKS",
},
cli.BoolFlag{
Name: "no-git-submodules",
Usage: "Don't automatically checkout git submodules",
EnvVar: "BUILDKITE_NO_GIT_SUBMODULES,BUILDKITE_DISABLE_GIT_SUBMODULES",
},
cli.BoolFlag{
Name: "metrics-datadog",
Usage: "Send metrics to DogStatsD for Datadog",
EnvVar: "BUILDKITE_METRICS_DATADOG",
},
cli.BoolFlag{
Name: "no-feature-reporting",
Usage: "Disables sending a list of enabled features back to the Buildkite mothership. We use this information to measure feature usage, but if you're not comfortable sharing that information then that's totally okay :)",
EnvVar: "BUILDKITE_AGENT_NO_FEATURE_REPORTING",
},
cli.StringFlag{
Name: "metrics-datadog-host",
Usage: "The dogstatsd instance to send metrics to using udp",
EnvVar: "BUILDKITE_METRICS_DATADOG_HOST",
Value: "127.0.0.1:8125",
},
cli.BoolFlag{
Name: "metrics-datadog-distributions",
Usage: "Use Datadog Distributions for Timing metrics",
EnvVar: "BUILDKITE_METRICS_DATADOG_DISTRIBUTIONS",
},
cli.StringFlag{
Name: "log-format",
Usage: "The format to use for the logger output",
EnvVar: "BUILDKITE_LOG_FORMAT",
Value: "text",
},
cli.IntFlag{
Name: "spawn",
Usage: "The number of agents to spawn in parallel",
Value: 1,
EnvVar: "BUILDKITE_AGENT_SPAWN",
},
cli.BoolFlag{
Name: "spawn-with-priority",
Usage: "Assign priorities to every spawned agent (when using --spawn) equal to the agent's index",
EnvVar: "BUILDKITE_AGENT_SPAWN_WITH_PRIORITY",
},
cli.StringFlag{
Name: "cancel-signal",
Usage: "The signal to use for cancellation",
EnvVar: "BUILDKITE_CANCEL_SIGNAL",
Value: "SIGTERM",
},
cli.StringFlag{
Name: "tracing-backend",
Usage: `Enable tracing for build jobs by specifying a backend, "datadog" or "opentelemetry"`,
EnvVar: "BUILDKITE_TRACING_BACKEND",
Value: "",
},
// API Flags
AgentRegisterTokenFlag,
EndpointFlag,
NoHTTP2Flag,
DebugHTTPFlag,
// Global flags
NoColorFlag,
DebugFlag,
LogLevelFlag,
ExperimentsFlag,
ProfileFlag,
RedactedVars,
// Deprecated flags which will be removed in v4
cli.StringSliceFlag{
Name: "meta-data",
Value: &cli.StringSlice{},
Hidden: true,
EnvVar: "BUILDKITE_AGENT_META_DATA",
},
cli.BoolFlag{
Name: "meta-data-ec2",
Hidden: true,
EnvVar: "BUILDKITE_AGENT_META_DATA_EC2",
},
cli.BoolFlag{
Name: "meta-data-ec2-tags",
Hidden: true,
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2_TAGS",
},
cli.BoolFlag{
Name: "meta-data-gcp",
Hidden: true,
EnvVar: "BUILDKITE_AGENT_META_DATA_GCP",
},
cli.BoolFlag{
Name: "no-automatic-ssh-fingerprint-verification",
Hidden: true,
EnvVar: "BUILDKITE_NO_AUTOMATIC_SSH_FINGERPRINT_VERIFICATION",
},
cli.BoolFlag{
Name: "tags-from-ec2",
Usage: "Include the host's EC2 meta-data as tags (instance-id, instance-type, and ami-id)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2",
},
cli.BoolFlag{
Name: "tags-from-gcp",
Usage: "Include the host's Google Cloud instance meta-data as tags (instance-id, machine-type, preemptible, project-id, region, and zone)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_GCP",
},
cli.IntFlag{
Name: "disconnect-after-job-timeout",
Hidden: true,
Usage: "When --disconnect-after-job is specified, the number of seconds to wait for a job before shutting down",
EnvVar: "BUILDKITE_AGENT_DISCONNECT_AFTER_JOB_TIMEOUT",
},
},
Action: func(c *cli.Context) {
// The configuration will be loaded into this struct
cfg := AgentStartConfig{}
// Setup the config loader. You'll see that we also path paths to
// potential config files. The loader will use the first one it finds.
loader := cliconfig.Loader{
CLI: c,
Config: &cfg,
DefaultConfigFilePaths: DefaultConfigFilePaths(),
}
// Load the configuration
warnings, err := loader.Load()
if err != nil {
fmt.Printf("%s", err)
os.Exit(1)
}
l := CreateLogger(cfg)
// Show warnings now we have a logger
for _, warning := range warnings {
l.Warn("%s", warning)
}
// Setup any global configuration options
done := HandleGlobalFlags(l, cfg)
defer done()
// Remove any config env from the environment to prevent them propagating to bootstrap
err = UnsetConfigFromEnvironment(c)
if err != nil {
fmt.Printf("%s", err)
os.Exit(1)
}
// Check if git-mirrors are enabled
if experiments.IsEnabled(`git-mirrors`) {
if cfg.GitMirrorsPath == `` {
l.Fatal("Must provide a git-mirrors-path in your configuration for git-mirrors experiment")
}
}
// Force some settings if on Windows (these aren't supported yet)
if runtime.GOOS == "windows" {
cfg.NoPTY = true
}
// Set a useful default for the bootstrap script
if cfg.BootstrapScript == "" {
exePath, err := os.Executable()
if err != nil {
l.Fatal("Unable to find executable path for bootstrap")
}
cfg.BootstrapScript = fmt.Sprintf("%s bootstrap", shellwords.Quote(exePath))
}
isSetNoPlugins := c.IsSet("no-plugins")
if loader.File != nil {
if _, exists := loader.File.Config["no-plugins"]; exists {
isSetNoPlugins = true
}
}
// Show a warning if plugins are enabled by no-command-eval or no-local-hooks is set
if isSetNoPlugins && cfg.NoPlugins == false {
msg := `Plugins have been specifically enabled, despite %s being enabled. ` +
`Plugins can execute arbitrary hooks and commands, make sure you are ` +
`whitelisting your plugins in ` +
`your environment hook.`
switch {
case cfg.NoCommandEval:
l.Warn(msg, `no-command-eval`)
case cfg.NoLocalHooks:
l.Warn(msg, `no-local-hooks`)
}
}
// Turning off command eval or local hooks will also turn off plugins unless
// `--no-plugins=false` is provided specifically
if (cfg.NoCommandEval || cfg.NoLocalHooks) && !isSetNoPlugins {
cfg.NoPlugins = true
}
// Guess the shell if none is provided
if cfg.Shell == "" {
cfg.Shell = DefaultShell()
}
// Handle deprecated DisconnectAfterJobTimeout
if cfg.DisconnectAfterJobTimeout > 0 {
cfg.DisconnectAfterIdleTimeout = cfg.DisconnectAfterJobTimeout
}
var ec2TagTimeout time.Duration
if t := cfg.WaitForEC2TagsTimeout; t != "" {
var err error
ec2TagTimeout, err = time.ParseDuration(t)
if err != nil {
l.Fatal("Failed to parse ec2 tag timeout: %v", err)
}
}
var ec2MetaDataTimeout time.Duration
if t := cfg.WaitForEC2MetaDataTimeout; t != "" {
var err error
ec2MetaDataTimeout, err = time.ParseDuration(t)
if err != nil {
l.Fatal("Failed to parse ec2 meta-data timeout: %v", err)
}
}
var gcpLabelsTimeout time.Duration
if t := cfg.WaitForGCPLabelsTimeout; t != "" {
var err error
gcpLabelsTimeout, err = time.ParseDuration(t)
if err != nil {
l.Fatal("Failed to parse gcp labels timeout: %v", err)
}
}
mc := metrics.NewCollector(l, metrics.CollectorConfig{
Datadog: cfg.MetricsDatadog,
DatadogHost: cfg.MetricsDatadogHost,
DatadogDistributions: cfg.MetricsDatadogDistributions,
})
// Sense check supported tracing backends, we don't want bootstrapped jobs to silently have no tracing
if _, has := tracetools.ValidTracingBackends[cfg.TracingBackend]; !has {
l.Fatal("The given tracing backend %q is not supported. Valid backends are: %q", cfg.TracingBackend, maps.Keys(tracetools.ValidTracingBackends))
}
// AgentConfiguration is the runtime configuration for an agent
agentConf := agent.AgentConfiguration{
BootstrapScript: cfg.BootstrapScript,
BuildPath: cfg.BuildPath,
GitMirrorsPath: cfg.GitMirrorsPath,
GitMirrorsLockTimeout: cfg.GitMirrorsLockTimeout,
GitMirrorsSkipUpdate: cfg.GitMirrorsSkipUpdate,
HooksPath: cfg.HooksPath,
PluginsPath: cfg.PluginsPath,
GitCloneFlags: cfg.GitCloneFlags,
GitCloneMirrorFlags: cfg.GitCloneMirrorFlags,
GitCleanFlags: cfg.GitCleanFlags,
GitFetchFlags: cfg.GitFetchFlags,
GitSubmodules: !cfg.NoGitSubmodules,
SSHKeyscan: !cfg.NoSSHKeyscan,
CommandEval: !cfg.NoCommandEval,
PluginsEnabled: !cfg.NoPlugins,
PluginValidation: !cfg.NoPluginValidation,
LocalHooksEnabled: !cfg.NoLocalHooks,
RunInPty: !cfg.NoPTY,
TimestampLines: cfg.TimestampLines,
DisconnectAfterJob: cfg.DisconnectAfterJob,
DisconnectAfterIdleTimeout: cfg.DisconnectAfterIdleTimeout,
CancelGracePeriod: cfg.CancelGracePeriod,
EnableJobLogTmpfile: cfg.EnableJobLogTmpfile,
Shell: cfg.Shell,
RedactedVars: cfg.RedactedVars,
AcquireJob: cfg.AcquireJob,
TracingBackend: cfg.TracingBackend,
}
if loader.File != nil {
agentConf.ConfigPath = loader.File.Path
}
if cfg.LogFormat == `text` {
welcomeMessage :=
"\n" +
"%s _ _ _ _ _ _ _ _\n" +
" | | (_) | | | | (_) | | |\n" +
" | |__ _ _ _| | __| | | ___| |_ ___ __ _ __ _ ___ _ __ | |_\n" +
" | '_ \\| | | | | |/ _` | |/ / | __/ _ \\ / _` |/ _` |/ _ \\ '_ \\| __|\n" +
" | |_) | |_| | | | (_| | <| | || __/ | (_| | (_| | __/ | | | |_\n" +
" |_.__/ \\__,_|_|_|\\__,_|_|\\_\\_|\\__\\___| \\__,_|\\__, |\\___|_| |_|\\__|\n" +
" __/ |\n" +
" https://buildkite.com/agent |___/\n%s\n"
if !cfg.NoColor {
fmt.Fprintf(os.Stderr, welcomeMessage, "\x1b[38;5;48m", "\x1b[0m")
} else {
fmt.Fprintf(os.Stderr, welcomeMessage, "", "")
}
}
l.Notice("Starting buildkite-agent v%s with PID: %s", agent.Version(), fmt.Sprintf("%d", os.Getpid()))
l.Notice("The agent source code can be found here: https://github.com/buildkite/agent")
l.Notice("For questions and support, email us at: hello@buildkite.com")
if agentConf.ConfigPath != "" {
l.WithFields(logger.StringField(`path`, agentConf.ConfigPath)).Info("Configuration loaded")
}
l.Debug("Bootstrap command: %s", agentConf.BootstrapScript)
l.Debug("Build path: %s", agentConf.BuildPath)
l.Debug("Hooks directory: %s", agentConf.HooksPath)
l.Debug("Plugins directory: %s", agentConf.PluginsPath)
if !agentConf.SSHKeyscan {
l.Info("Automatic ssh-keyscan has been disabled")
}
if !agentConf.CommandEval {
l.Info("Evaluating console commands has been disabled")
}
if !agentConf.PluginsEnabled {
l.Info("Plugins have been disabled")
}
if !agentConf.RunInPty {
l.Info("Running builds within a pseudoterminal (PTY) has been disabled")
}
if agentConf.DisconnectAfterJob {
l.Info("Agents will disconnect after a job run has completed")
}
if agentConf.DisconnectAfterIdleTimeout > 0 {
l.Info("Agents will disconnect after %d seconds of inactivity", agentConf.DisconnectAfterIdleTimeout)
}
cancelSig, err := process.ParseSignal(cfg.CancelSignal)
if err != nil {
l.Fatal("Failed to parse cancel-signal: %v", err)
}
// confirm the BuildPath is exists. The bootstrap is going to write to it when a job executes,
// so we may as well check that'll work now and fail early if it's a problem
if !utils.FileExists(agentConf.BuildPath) {
l.Info("Build Path doesn't exist, creating it (%s)", agentConf.BuildPath)
// Actual file permissions will be reduced by umask, and won't be 0777 unless the user has manually changed the umask to 000
if err := os.MkdirAll(agentConf.BuildPath, 0777); err != nil {
l.Fatal("Failed to create builds path: %v", err)
}
}
// Create the API client
client := api.NewClient(l, loadAPIClientConfig(cfg, `Token`))
// The registration request for all agents
registerReq := api.AgentRegisterRequest{
Name: cfg.Name,
Priority: cfg.Priority,
ScriptEvalEnabled: !cfg.NoCommandEval,
Tags: agent.FetchTags(l, agent.FetchTagsConfig{
Tags: cfg.Tags,
TagsFromEC2MetaData: (cfg.TagsFromEC2MetaData || cfg.TagsFromEC2),
TagsFromEC2MetaDataPaths: cfg.TagsFromEC2MetaDataPaths,
TagsFromEC2Tags: cfg.TagsFromEC2Tags,
TagsFromGCPMetaData: (cfg.TagsFromGCPMetaData || cfg.TagsFromGCP),
TagsFromGCPMetaDataPaths: cfg.TagsFromGCPMetaDataPaths,
TagsFromGCPLabels: cfg.TagsFromGCPLabels,
TagsFromHost: cfg.TagsFromHost,
WaitForEC2TagsTimeout: ec2TagTimeout,
WaitForEC2MetaDataTimeout: ec2MetaDataTimeout,
WaitForGCPLabelsTimeout: gcpLabelsTimeout,
}),
// We only want this agent to be ingored in Buildkite
// dispatches if it's being booted to acquire a
// specific job.
IgnoreInDispatches: cfg.AcquireJob != "",
Features: cfg.Features(),
}
// Spawning multiple agents doesn't work if the agent is being
// booted in acquisition mode
if cfg.Spawn > 1 && cfg.AcquireJob != "" {
l.Fatal("You can't spawn multiple agents and acquire a job at the same time")
}
var workers []*agent.AgentWorker
for i := 1; i <= cfg.Spawn; i++ {
if cfg.Spawn == 1 {
l.Info("Registering agent with Buildkite...")
} else {
l.Info("Registering agent %d of %d with Buildkite...", i, cfg.Spawn)
}
// Handle per-spawn name interpolation, replacing %spawn with the spawn index
registerReq.Name = strings.ReplaceAll(cfg.Name, "%spawn", strconv.Itoa(i))
if cfg.SpawnWithPriority {
l.Info("Assigning priority %s for agent %d", strconv.Itoa(i), i)
registerReq.Priority = strconv.Itoa(i)
}
// Register the agent with the buildkite API
ag, err := agent.Register(l, client, registerReq)
if err != nil {
l.Fatal("%s", err)
}
// Create an agent worker to run the agent
workers = append(workers,
agent.NewAgentWorker(
l.WithFields(logger.StringField(`agent`, ag.Name)), ag, mc, client, agent.AgentWorkerConfig{
AgentConfiguration: agentConf,
CancelSignal: cancelSig,
Debug: cfg.Debug,
DebugHTTP: cfg.DebugHTTP,
SpawnIndex: i,
}))
}
// Setup the agent pool that spawns agent workers
pool := agent.NewAgentPool(workers)
// Agent-wide shutdown hook. Once per agent, for all workers on the agent.
defer agentShutdownHook(l, cfg)
// Handle process signals
signals := handlePoolSignals(l, pool)
defer close(signals)
l.Info("Starting %d Agent(s)", cfg.Spawn)
l.Info("You can press Ctrl-C to stop the agents")
// Determine the health check listening address and port for this agent
if cfg.HealthCheckAddr != "" {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
} else {
fmt.Fprintf(w, "OK: Buildkite agent is running")
}
})
go func() {
l.Notice("Starting HTTP health check server on %v", cfg.HealthCheckAddr)
err := http.ListenAndServe(cfg.HealthCheckAddr, nil)
if err != nil {
l.Error("Could not start health check server: %v", err)
}
}()
}
// Start the agent pool
if err := pool.Start(); err != nil {
l.Fatal("%s", err)
}
},
}
func handlePoolSignals(l logger.Logger, pool *agent.AgentPool) chan os.Signal {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt,
syscall.SIGHUP,
syscall.SIGTERM,
syscall.SIGINT,
syscall.SIGQUIT)
go func() {
var interruptCount int
for sig := range signals {
l.Debug("Received signal `%v`", sig)
switch sig {
case syscall.SIGQUIT:
l.Debug("Received signal `%s`", sig.String())
pool.Stop(false)
case syscall.SIGTERM, syscall.SIGINT:
l.Debug("Received signal `%s`", sig.String())
if interruptCount == 0 {
interruptCount++
l.Info("Received CTRL-C, send again to forcefully kill the agent(s)")
pool.Stop(true)
} else {
l.Info("Forcefully stopping running jobs and stopping the agent(s)")
pool.Stop(false)
}
default:
l.Debug("Ignoring signal `%s`", sig.String())
}
}
}()
return signals
}
// agentShutdownHook looks for an agent-shutdown hook script in the hooks path
// and executes it if found. Output (stdout + stderr) is streamed into the main
// agent logger. Exit status failure is logged but ignored.
func agentShutdownHook(log logger.Logger, cfg AgentStartConfig) {
// search for agent-shutdown hook (including .bat & .ps1 files on Windows)
p, err := hook.Find(cfg.HooksPath, "agent-shutdown")
if err != nil {
if !os.IsNotExist(err) {
log.Error("Error finding agent-shutdown hook: %v", err)
}
return
}
sh, err := shell.New()
if err != nil {
log.Error("creating shell for agent-shutdown hook: %v", err)
return
}
// pipe from hook output to logger
r, w := io.Pipe()
sh.Logger = &shell.WriterLogger{Writer: w, Ansi: !cfg.NoColor} // for Promptf
sh.Writer = w // for stdout+stderr
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
scan := bufio.NewScanner(r) // log each line separately
log = log.WithFields(logger.StringField("hook", "agent-shutdown"))
for scan.Scan() {
log.Info(scan.Text())
}
}()
// run agent-shutdown hook
sh.Promptf("%s", p)
if err = sh.RunScript(context.Background(), p, nil); err != nil {
log.Error("agent-shutdown hook: %v", err)
}
w.Close() // goroutine scans until pipe is closed
// wait for hook to finish and output to flush to logger
wg.Wait()
}
Code comment explaining -ffxdq flags for git clean
package clicommand
import (
"bufio"
"context"
"fmt"
"io"
"net/http"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/buildkite/agent/v3/agent"
"github.com/buildkite/agent/v3/api"
"github.com/buildkite/agent/v3/bootstrap/shell"
"github.com/buildkite/agent/v3/cliconfig"
"github.com/buildkite/agent/v3/experiments"
"github.com/buildkite/agent/v3/hook"
"github.com/buildkite/agent/v3/logger"
"github.com/buildkite/agent/v3/metrics"
"github.com/buildkite/agent/v3/process"
"github.com/buildkite/agent/v3/tracetools"
"github.com/buildkite/agent/v3/utils"
"github.com/buildkite/shellwords"
"github.com/urfave/cli"
"golang.org/x/exp/maps"
)
var StartDescription = `Usage:
buildkite-agent start [options...]
Description:
When a job is ready to run it will call the "bootstrap-script"
and pass it all the environment variables required for the job to run.
This script is responsible for checking out the code, and running the
actual build script defined in the pipeline.
The agent will run any jobs within a PTY (pseudo terminal) if available.
Example:
$ buildkite-agent start --token xxx`
// Adding config requires changes in a few different spots
// - The AgentStartConfig struct with a cli parameter
// - As a flag in the AgentStartCommand (with matching env)
// - Into an env to be passed to the bootstrap in agent/job_runner.go, createEnvironment()
// - Into clicommand/bootstrap.go to read it from the env into the bootstrap config
type AgentStartConfig struct {
Config string `cli:"config"`
Name string `cli:"name"`
Priority string `cli:"priority"`
AcquireJob string `cli:"acquire-job"`
DisconnectAfterJob bool `cli:"disconnect-after-job"`
DisconnectAfterIdleTimeout int `cli:"disconnect-after-idle-timeout"`
BootstrapScript string `cli:"bootstrap-script" normalize:"commandpath"`
CancelGracePeriod int `cli:"cancel-grace-period"`
EnableJobLogTmpfile bool `cli:"enable-job-log-tmpfile"`
BuildPath string `cli:"build-path" normalize:"filepath" validate:"required"`
HooksPath string `cli:"hooks-path" normalize:"filepath"`
PluginsPath string `cli:"plugins-path" normalize:"filepath"`
Shell string `cli:"shell"`
Tags []string `cli:"tags" normalize:"list"`
TagsFromEC2MetaData bool `cli:"tags-from-ec2-meta-data"`
TagsFromEC2MetaDataPaths []string `cli:"tags-from-ec2-meta-data-paths" normalize:"list"`
TagsFromEC2Tags bool `cli:"tags-from-ec2-tags"`
TagsFromGCPMetaData bool `cli:"tags-from-gcp-meta-data"`
TagsFromGCPMetaDataPaths []string `cli:"tags-from-gcp-meta-data-paths" normalize:"list"`
TagsFromGCPLabels bool `cli:"tags-from-gcp-labels"`
TagsFromHost bool `cli:"tags-from-host"`
WaitForEC2TagsTimeout string `cli:"wait-for-ec2-tags-timeout"`
WaitForEC2MetaDataTimeout string `cli:"wait-for-ec2-meta-data-timeout"`
WaitForGCPLabelsTimeout string `cli:"wait-for-gcp-labels-timeout"`
GitCloneFlags string `cli:"git-clone-flags"`
GitCloneMirrorFlags string `cli:"git-clone-mirror-flags"`
GitCleanFlags string `cli:"git-clean-flags"`
GitFetchFlags string `cli:"git-fetch-flags"`
GitMirrorsPath string `cli:"git-mirrors-path" normalize:"filepath"`
GitMirrorsLockTimeout int `cli:"git-mirrors-lock-timeout"`
GitMirrorsSkipUpdate bool `cli:"git-mirrors-skip-update"`
NoGitSubmodules bool `cli:"no-git-submodules"`
NoSSHKeyscan bool `cli:"no-ssh-keyscan"`
NoCommandEval bool `cli:"no-command-eval"`
NoLocalHooks bool `cli:"no-local-hooks"`
NoPlugins bool `cli:"no-plugins"`
NoPluginValidation bool `cli:"no-plugin-validation"`
NoPTY bool `cli:"no-pty"`
NoFeatureReporting bool `cli:"no-feature-reporting"`
TimestampLines bool `cli:"timestamp-lines"`
HealthCheckAddr string `cli:"health-check-addr"`
MetricsDatadog bool `cli:"metrics-datadog"`
MetricsDatadogHost string `cli:"metrics-datadog-host"`
MetricsDatadogDistributions bool `cli:"metrics-datadog-distributions"`
TracingBackend string `cli:"tracing-backend"`
Spawn int `cli:"spawn"`
SpawnWithPriority bool `cli:"spawn-with-priority"`
LogFormat string `cli:"log-format"`
CancelSignal string `cli:"cancel-signal"`
RedactedVars []string `cli:"redacted-vars" normalize:"list"`
// Global flags
Debug bool `cli:"debug"`
LogLevel string `cli:"log-level"`
NoColor bool `cli:"no-color"`
Experiments []string `cli:"experiment" normalize:"list"`
Profile string `cli:"profile"`
// API config
DebugHTTP bool `cli:"debug-http"`
Token string `cli:"token" validate:"required"`
Endpoint string `cli:"endpoint" validate:"required"`
NoHTTP2 bool `cli:"no-http2"`
// Deprecated
NoSSHFingerprintVerification bool `cli:"no-automatic-ssh-fingerprint-verification" deprecated-and-renamed-to:"NoSSHKeyscan"`
MetaData []string `cli:"meta-data" deprecated-and-renamed-to:"Tags"`
MetaDataEC2 bool `cli:"meta-data-ec2" deprecated-and-renamed-to:"TagsFromEC2"`
MetaDataEC2Tags bool `cli:"meta-data-ec2-tags" deprecated-and-renamed-to:"TagsFromEC2Tags"`
MetaDataGCP bool `cli:"meta-data-gcp" deprecated-and-renamed-to:"TagsFromGCP"`
TagsFromEC2 bool `cli:"tags-from-ec2" deprecated-and-renamed-to:"TagsFromEC2MetaData"`
TagsFromGCP bool `cli:"tags-from-gcp" deprecated-and-renamed-to:"TagsFromGCPMetaData"`
DisconnectAfterJobTimeout int `cli:"disconnect-after-job-timeout" deprecated:"Use disconnect-after-idle-timeout instead"`
}
func (asc AgentStartConfig) Features() []string {
if asc.NoFeatureReporting {
return []string{}
}
features := make([]string, 0, 8)
if asc.GitMirrorsPath != "" {
features = append(features, "git-mirrors")
}
if asc.AcquireJob != "" {
features = append(features, "acquire-job")
}
if asc.TracingBackend == tracetools.BackendDatadog {
features = append(features, "datadog-tracing")
}
if asc.TracingBackend == tracetools.BackendOpenTelemetry {
features = append(features, "opentelemetry-tracing")
}
if asc.DisconnectAfterJob {
features = append(features, "disconnect-after-job")
}
if asc.DisconnectAfterIdleTimeout != 0 {
features = append(features, "disconnect-after-idle")
}
if asc.NoPlugins {
features = append(features, "no-plugins")
}
if asc.NoCommandEval {
features = append(features, "no-script-eval")
}
for _, exp := range experiments.Enabled() {
features = append(features, fmt.Sprintf("experiment-%s", exp))
}
return features
}
func DefaultShell() string {
// https://github.com/golang/go/blob/master/src/go/build/syslist.go#L7
switch runtime.GOOS {
case "windows":
return `C:\Windows\System32\CMD.exe /S /C`
case "freebsd", "openbsd":
return `/usr/local/bin/bash -e -c`
case "netbsd":
return `/usr/pkg/bin/bash -e -c`
default:
return `/bin/bash -e -c`
}
}
func DefaultConfigFilePaths() (paths []string) {
// Toggle beetwen windows and *nix paths
if runtime.GOOS == "windows" {
paths = []string{
"C:\\buildkite-agent\\buildkite-agent.cfg",
"$USERPROFILE\\AppData\\Local\\buildkite-agent\\buildkite-agent.cfg",
"$USERPROFILE\\AppData\\Local\\BuildkiteAgent\\buildkite-agent.cfg",
}
} else {
paths = []string{
"$HOME/.buildkite-agent/buildkite-agent.cfg",
}
// For Apple Silicon Macs, prioritise the `/opt/homebrew` path over `/usr/local`
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
paths = append(paths, "/opt/homebrew/etc/buildkite-agent/buildkite-agent.cfg")
}
paths = append(paths, "/usr/local/etc/buildkite-agent/buildkite-agent.cfg", "/etc/buildkite-agent/buildkite-agent.cfg")
}
// Also check to see if there's a buildkite-agent.cfg in the folder
// that the binary is running in.
exePath, err := os.Executable()
if err == nil {
pathToBinary, err := filepath.Abs(filepath.Dir(exePath))
if err == nil {
pathToRelativeConfig := filepath.Join(pathToBinary, "buildkite-agent.cfg")
paths = append([]string{pathToRelativeConfig}, paths...)
}
}
return
}
var AgentStartCommand = cli.Command{
Name: "start",
Usage: "Starts a Buildkite agent",
Description: StartDescription,
Flags: []cli.Flag{
cli.StringFlag{
Name: "config",
Value: "",
Usage: "Path to a configuration file",
EnvVar: "BUILDKITE_AGENT_CONFIG",
},
cli.StringFlag{
Name: "name",
Value: "",
Usage: "The name of the agent",
EnvVar: "BUILDKITE_AGENT_NAME",
},
cli.StringFlag{
Name: "priority",
Value: "",
Usage: "The priority of the agent (higher priorities are assigned work first)",
EnvVar: "BUILDKITE_AGENT_PRIORITY",
},
cli.StringFlag{
Name: "acquire-job",
Value: "",
Usage: "Start this agent and only run the specified job, disconnecting after it's finished",
EnvVar: "BUILDKITE_AGENT_ACQUIRE_JOB",
},
cli.BoolFlag{
Name: "disconnect-after-job",
Usage: "Disconnect the agent after running exactly one job. When used in conjunction with the ′--spawn′ flag, each worker booted will run exactly one job",
EnvVar: "BUILDKITE_AGENT_DISCONNECT_AFTER_JOB",
},
cli.IntFlag{
Name: "disconnect-after-idle-timeout",
Value: 0,
Usage: "The maximum idle time in seconds to wait for a job before disconnecting. The default of 0 means no timeout",
EnvVar: "BUILDKITE_AGENT_DISCONNECT_AFTER_IDLE_TIMEOUT",
},
cli.IntFlag{
Name: "cancel-grace-period",
Value: 10,
Usage: "The number of seconds a canceled or timed out job is given to gracefully terminate and upload its artifacts",
EnvVar: "BUILDKITE_CANCEL_GRACE_PERIOD",
},
cli.BoolFlag{
Name: "enable-job-log-tmpfile",
Usage: "Store the job logs in a temporary file ′BUILDKITE_JOB_LOG_TMPFILE′ that is accessible during the job and removed at the end of the job",
EnvVar: "BUILDKITE_ENABLE_JOB_LOG_TMPFILE",
},
cli.StringFlag{
Name: "shell",
Value: DefaultShell(),
Usage: "The shell command used to interpret build commands, e.g /bin/bash -e -c",
EnvVar: "BUILDKITE_SHELL",
},
cli.StringSliceFlag{
Name: "tags",
Value: &cli.StringSlice{},
Usage: "A comma-separated list of tags for the agent (for example, \"linux\" or \"mac,xcode=8\")",
EnvVar: "BUILDKITE_AGENT_TAGS",
},
cli.BoolFlag{
Name: "tags-from-host",
Usage: "Include tags from the host (hostname, machine-id, os)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_HOST",
},
cli.StringSliceFlag{
Name: "tags-from-ec2-meta-data",
Value: &cli.StringSlice{},
Usage: "Include the default set of host EC2 meta-data as tags (instance-id, instance-type, ami-id, and instance-life-cycle)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2_META_DATA",
},
cli.StringSliceFlag{
Name: "tags-from-ec2-meta-data-paths",
Value: &cli.StringSlice{},
Usage: "Include additional tags fetched from EC2 meta-data using tag & path suffix pairs, e.g \"tag_name=path/to/value\"",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2_META_DATA_PATHS",
},
cli.BoolFlag{
Name: "tags-from-ec2-tags",
Usage: "Include the host's EC2 tags as tags",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2_TAGS",
},
cli.StringSliceFlag{
Name: "tags-from-gcp-meta-data",
Value: &cli.StringSlice{},
Usage: "Include the default set of host Google Cloud instance meta-data as tags (instance-id, machine-type, preemptible, project-id, region, and zone)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_GCP_META_DATA",
},
cli.StringSliceFlag{
Name: "tags-from-gcp-meta-data-paths",
Value: &cli.StringSlice{},
Usage: "Include additional tags fetched from Google Cloud instance meta-data using tag & path suffix pairs, e.g \"tag_name=path/to/value\"",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_GCP_META_DATA_PATHS",
},
cli.BoolFlag{
Name: "tags-from-gcp-labels",
Usage: "Include the host's Google Cloud instance labels as tags",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_GCP_LABELS",
},
cli.DurationFlag{
Name: "wait-for-ec2-tags-timeout",
Usage: "The amount of time to wait for tags from EC2 before proceeding",
EnvVar: "BUILDKITE_AGENT_WAIT_FOR_EC2_TAGS_TIMEOUT",
Value: time.Second * 10,
},
cli.DurationFlag{
Name: "wait-for-ec2-meta-data-timeout",
Usage: "The amount of time to wait for meta-data from EC2 before proceeding",
EnvVar: "BUILDKITE_AGENT_WAIT_FOR_EC2_META_DATA_TIMEOUT",
Value: time.Second * 10,
},
cli.DurationFlag{
Name: "wait-for-gcp-labels-timeout",
Usage: "The amount of time to wait for labels from GCP before proceeding",
EnvVar: "BUILDKITE_AGENT_WAIT_FOR_GCP_LABELS_TIMEOUT",
Value: time.Second * 10,
},
cli.StringFlag{
Name: "git-clone-flags",
Value: "-v",
Usage: "Flags to pass to the \"git clone\" command",
EnvVar: "BUILDKITE_GIT_CLONE_FLAGS",
},
cli.StringFlag{
Name: "git-clean-flags",
Value: "-ffxdq",
Usage: "Flags to pass to \"git clean\" command",
EnvVar: "BUILDKITE_GIT_CLEAN_FLAGS",
// -ff: delete files and directories, including untracked nested git repositories
// -x: don't use .gitignore rules
// -d: recurse into untracked directories
// -q: quiet, only report errors
},
cli.StringFlag{
Name: "git-fetch-flags",
Value: "-v --prune",
Usage: "Flags to pass to \"git fetch\" command",
EnvVar: "BUILDKITE_GIT_FETCH_FLAGS",
},
cli.StringFlag{
Name: "git-clone-mirror-flags",
Value: "-v",
Usage: "Flags to pass to the \"git clone\" command when used for mirroring",
EnvVar: "BUILDKITE_GIT_CLONE_MIRROR_FLAGS",
},
cli.StringFlag{
Name: "git-mirrors-path",
Value: "",
Usage: "Path to where mirrors of git repositories are stored",
EnvVar: "BUILDKITE_GIT_MIRRORS_PATH",
},
cli.IntFlag{
Name: "git-mirrors-lock-timeout",
Value: 300,
Usage: "Seconds to lock a git mirror during clone, should exceed your longest checkout",
EnvVar: "BUILDKITE_GIT_MIRRORS_LOCK_TIMEOUT",
},
cli.BoolFlag{
Name: "git-mirrors-skip-update",
Usage: "Skip updating the Git mirror",
EnvVar: "BUILDKITE_GIT_MIRRORS_SKIP_UPDATE",
},
cli.StringFlag{
Name: "bootstrap-script",
Value: "",
Usage: "The command that is executed for bootstrapping a job, defaults to the bootstrap sub-command of this binary",
EnvVar: "BUILDKITE_BOOTSTRAP_SCRIPT_PATH",
},
cli.StringFlag{
Name: "build-path",
Value: "",
Usage: "Path to where the builds will run from",
EnvVar: "BUILDKITE_BUILD_PATH",
},
cli.StringFlag{
Name: "hooks-path",
Value: "",
Usage: "Directory where the hook scripts are found",
EnvVar: "BUILDKITE_HOOKS_PATH",
},
cli.StringFlag{
Name: "plugins-path",
Value: "",
Usage: "Directory where the plugins are saved to",
EnvVar: "BUILDKITE_PLUGINS_PATH",
},
cli.BoolFlag{
Name: "timestamp-lines",
Usage: "Prepend timestamps on each line of output.",
EnvVar: "BUILDKITE_TIMESTAMP_LINES",
},
cli.StringFlag{
Name: "health-check-addr",
Usage: "Start an HTTP server on this addr:port that returns whether the agent is healthy, disabled by default",
EnvVar: "BUILDKITE_AGENT_HEALTH_CHECK_ADDR",
},
cli.BoolFlag{
Name: "no-pty",
Usage: "Do not run jobs within a pseudo terminal",
EnvVar: "BUILDKITE_NO_PTY",
},
cli.BoolFlag{
Name: "no-ssh-keyscan",
Usage: "Don't automatically run ssh-keyscan before checkout",
EnvVar: "BUILDKITE_NO_SSH_KEYSCAN",
},
cli.BoolFlag{
Name: "no-command-eval",
Usage: "Don't allow this agent to run arbitrary console commands, including plugins",
EnvVar: "BUILDKITE_NO_COMMAND_EVAL",
},
cli.BoolFlag{
Name: "no-plugins",
Usage: "Don't allow this agent to load plugins",
EnvVar: "BUILDKITE_NO_PLUGINS",
},
cli.BoolTFlag{
Name: "no-plugin-validation",
Usage: "Don't validate plugin configuration and requirements",
EnvVar: "BUILDKITE_NO_PLUGIN_VALIDATION",
},
cli.BoolFlag{
Name: "no-local-hooks",
Usage: "Don't allow local hooks to be run from checked out repositories",
EnvVar: "BUILDKITE_NO_LOCAL_HOOKS",
},
cli.BoolFlag{
Name: "no-git-submodules",
Usage: "Don't automatically checkout git submodules",
EnvVar: "BUILDKITE_NO_GIT_SUBMODULES,BUILDKITE_DISABLE_GIT_SUBMODULES",
},
cli.BoolFlag{
Name: "metrics-datadog",
Usage: "Send metrics to DogStatsD for Datadog",
EnvVar: "BUILDKITE_METRICS_DATADOG",
},
cli.BoolFlag{
Name: "no-feature-reporting",
Usage: "Disables sending a list of enabled features back to the Buildkite mothership. We use this information to measure feature usage, but if you're not comfortable sharing that information then that's totally okay :)",
EnvVar: "BUILDKITE_AGENT_NO_FEATURE_REPORTING",
},
cli.StringFlag{
Name: "metrics-datadog-host",
Usage: "The dogstatsd instance to send metrics to using udp",
EnvVar: "BUILDKITE_METRICS_DATADOG_HOST",
Value: "127.0.0.1:8125",
},
cli.BoolFlag{
Name: "metrics-datadog-distributions",
Usage: "Use Datadog Distributions for Timing metrics",
EnvVar: "BUILDKITE_METRICS_DATADOG_DISTRIBUTIONS",
},
cli.StringFlag{
Name: "log-format",
Usage: "The format to use for the logger output",
EnvVar: "BUILDKITE_LOG_FORMAT",
Value: "text",
},
cli.IntFlag{
Name: "spawn",
Usage: "The number of agents to spawn in parallel",
Value: 1,
EnvVar: "BUILDKITE_AGENT_SPAWN",
},
cli.BoolFlag{
Name: "spawn-with-priority",
Usage: "Assign priorities to every spawned agent (when using --spawn) equal to the agent's index",
EnvVar: "BUILDKITE_AGENT_SPAWN_WITH_PRIORITY",
},
cli.StringFlag{
Name: "cancel-signal",
Usage: "The signal to use for cancellation",
EnvVar: "BUILDKITE_CANCEL_SIGNAL",
Value: "SIGTERM",
},
cli.StringFlag{
Name: "tracing-backend",
Usage: `Enable tracing for build jobs by specifying a backend, "datadog" or "opentelemetry"`,
EnvVar: "BUILDKITE_TRACING_BACKEND",
Value: "",
},
// API Flags
AgentRegisterTokenFlag,
EndpointFlag,
NoHTTP2Flag,
DebugHTTPFlag,
// Global flags
NoColorFlag,
DebugFlag,
LogLevelFlag,
ExperimentsFlag,
ProfileFlag,
RedactedVars,
// Deprecated flags which will be removed in v4
cli.StringSliceFlag{
Name: "meta-data",
Value: &cli.StringSlice{},
Hidden: true,
EnvVar: "BUILDKITE_AGENT_META_DATA",
},
cli.BoolFlag{
Name: "meta-data-ec2",
Hidden: true,
EnvVar: "BUILDKITE_AGENT_META_DATA_EC2",
},
cli.BoolFlag{
Name: "meta-data-ec2-tags",
Hidden: true,
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2_TAGS",
},
cli.BoolFlag{
Name: "meta-data-gcp",
Hidden: true,
EnvVar: "BUILDKITE_AGENT_META_DATA_GCP",
},
cli.BoolFlag{
Name: "no-automatic-ssh-fingerprint-verification",
Hidden: true,
EnvVar: "BUILDKITE_NO_AUTOMATIC_SSH_FINGERPRINT_VERIFICATION",
},
cli.BoolFlag{
Name: "tags-from-ec2",
Usage: "Include the host's EC2 meta-data as tags (instance-id, instance-type, and ami-id)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_EC2",
},
cli.BoolFlag{
Name: "tags-from-gcp",
Usage: "Include the host's Google Cloud instance meta-data as tags (instance-id, machine-type, preemptible, project-id, region, and zone)",
EnvVar: "BUILDKITE_AGENT_TAGS_FROM_GCP",
},
cli.IntFlag{
Name: "disconnect-after-job-timeout",
Hidden: true,
Usage: "When --disconnect-after-job is specified, the number of seconds to wait for a job before shutting down",
EnvVar: "BUILDKITE_AGENT_DISCONNECT_AFTER_JOB_TIMEOUT",
},
},
Action: func(c *cli.Context) {
// The configuration will be loaded into this struct
cfg := AgentStartConfig{}
// Setup the config loader. You'll see that we also path paths to
// potential config files. The loader will use the first one it finds.
loader := cliconfig.Loader{
CLI: c,
Config: &cfg,
DefaultConfigFilePaths: DefaultConfigFilePaths(),
}
// Load the configuration
warnings, err := loader.Load()
if err != nil {
fmt.Printf("%s", err)
os.Exit(1)
}
l := CreateLogger(cfg)
// Show warnings now we have a logger
for _, warning := range warnings {
l.Warn("%s", warning)
}
// Setup any global configuration options
done := HandleGlobalFlags(l, cfg)
defer done()
// Remove any config env from the environment to prevent them propagating to bootstrap
err = UnsetConfigFromEnvironment(c)
if err != nil {
fmt.Printf("%s", err)
os.Exit(1)
}
// Check if git-mirrors are enabled
if experiments.IsEnabled(`git-mirrors`) {
if cfg.GitMirrorsPath == `` {
l.Fatal("Must provide a git-mirrors-path in your configuration for git-mirrors experiment")
}
}
// Force some settings if on Windows (these aren't supported yet)
if runtime.GOOS == "windows" {
cfg.NoPTY = true
}
// Set a useful default for the bootstrap script
if cfg.BootstrapScript == "" {
exePath, err := os.Executable()
if err != nil {
l.Fatal("Unable to find executable path for bootstrap")
}
cfg.BootstrapScript = fmt.Sprintf("%s bootstrap", shellwords.Quote(exePath))
}
isSetNoPlugins := c.IsSet("no-plugins")
if loader.File != nil {
if _, exists := loader.File.Config["no-plugins"]; exists {
isSetNoPlugins = true
}
}
// Show a warning if plugins are enabled by no-command-eval or no-local-hooks is set
if isSetNoPlugins && cfg.NoPlugins == false {
msg := `Plugins have been specifically enabled, despite %s being enabled. ` +
`Plugins can execute arbitrary hooks and commands, make sure you are ` +
`whitelisting your plugins in ` +
`your environment hook.`
switch {
case cfg.NoCommandEval:
l.Warn(msg, `no-command-eval`)
case cfg.NoLocalHooks:
l.Warn(msg, `no-local-hooks`)
}
}
// Turning off command eval or local hooks will also turn off plugins unless
// `--no-plugins=false` is provided specifically
if (cfg.NoCommandEval || cfg.NoLocalHooks) && !isSetNoPlugins {
cfg.NoPlugins = true
}
// Guess the shell if none is provided
if cfg.Shell == "" {
cfg.Shell = DefaultShell()
}
// Handle deprecated DisconnectAfterJobTimeout
if cfg.DisconnectAfterJobTimeout > 0 {
cfg.DisconnectAfterIdleTimeout = cfg.DisconnectAfterJobTimeout
}
var ec2TagTimeout time.Duration
if t := cfg.WaitForEC2TagsTimeout; t != "" {
var err error
ec2TagTimeout, err = time.ParseDuration(t)
if err != nil {
l.Fatal("Failed to parse ec2 tag timeout: %v", err)
}
}
var ec2MetaDataTimeout time.Duration
if t := cfg.WaitForEC2MetaDataTimeout; t != "" {
var err error
ec2MetaDataTimeout, err = time.ParseDuration(t)
if err != nil {
l.Fatal("Failed to parse ec2 meta-data timeout: %v", err)
}
}
var gcpLabelsTimeout time.Duration
if t := cfg.WaitForGCPLabelsTimeout; t != "" {
var err error
gcpLabelsTimeout, err = time.ParseDuration(t)
if err != nil {
l.Fatal("Failed to parse gcp labels timeout: %v", err)
}
}
mc := metrics.NewCollector(l, metrics.CollectorConfig{
Datadog: cfg.MetricsDatadog,
DatadogHost: cfg.MetricsDatadogHost,
DatadogDistributions: cfg.MetricsDatadogDistributions,
})
// Sense check supported tracing backends, we don't want bootstrapped jobs to silently have no tracing
if _, has := tracetools.ValidTracingBackends[cfg.TracingBackend]; !has {
l.Fatal("The given tracing backend %q is not supported. Valid backends are: %q", cfg.TracingBackend, maps.Keys(tracetools.ValidTracingBackends))
}
// AgentConfiguration is the runtime configuration for an agent
agentConf := agent.AgentConfiguration{
BootstrapScript: cfg.BootstrapScript,
BuildPath: cfg.BuildPath,
GitMirrorsPath: cfg.GitMirrorsPath,
GitMirrorsLockTimeout: cfg.GitMirrorsLockTimeout,
GitMirrorsSkipUpdate: cfg.GitMirrorsSkipUpdate,
HooksPath: cfg.HooksPath,
PluginsPath: cfg.PluginsPath,
GitCloneFlags: cfg.GitCloneFlags,
GitCloneMirrorFlags: cfg.GitCloneMirrorFlags,
GitCleanFlags: cfg.GitCleanFlags,
GitFetchFlags: cfg.GitFetchFlags,
GitSubmodules: !cfg.NoGitSubmodules,
SSHKeyscan: !cfg.NoSSHKeyscan,
CommandEval: !cfg.NoCommandEval,
PluginsEnabled: !cfg.NoPlugins,
PluginValidation: !cfg.NoPluginValidation,
LocalHooksEnabled: !cfg.NoLocalHooks,
RunInPty: !cfg.NoPTY,
TimestampLines: cfg.TimestampLines,
DisconnectAfterJob: cfg.DisconnectAfterJob,
DisconnectAfterIdleTimeout: cfg.DisconnectAfterIdleTimeout,
CancelGracePeriod: cfg.CancelGracePeriod,
EnableJobLogTmpfile: cfg.EnableJobLogTmpfile,
Shell: cfg.Shell,
RedactedVars: cfg.RedactedVars,
AcquireJob: cfg.AcquireJob,
TracingBackend: cfg.TracingBackend,
}
if loader.File != nil {
agentConf.ConfigPath = loader.File.Path
}
if cfg.LogFormat == `text` {
welcomeMessage :=
"\n" +
"%s _ _ _ _ _ _ _ _\n" +
" | | (_) | | | | (_) | | |\n" +
" | |__ _ _ _| | __| | | ___| |_ ___ __ _ __ _ ___ _ __ | |_\n" +
" | '_ \\| | | | | |/ _` | |/ / | __/ _ \\ / _` |/ _` |/ _ \\ '_ \\| __|\n" +
" | |_) | |_| | | | (_| | <| | || __/ | (_| | (_| | __/ | | | |_\n" +
" |_.__/ \\__,_|_|_|\\__,_|_|\\_\\_|\\__\\___| \\__,_|\\__, |\\___|_| |_|\\__|\n" +
" __/ |\n" +
" https://buildkite.com/agent |___/\n%s\n"
if !cfg.NoColor {
fmt.Fprintf(os.Stderr, welcomeMessage, "\x1b[38;5;48m", "\x1b[0m")
} else {
fmt.Fprintf(os.Stderr, welcomeMessage, "", "")
}
}
l.Notice("Starting buildkite-agent v%s with PID: %s", agent.Version(), fmt.Sprintf("%d", os.Getpid()))
l.Notice("The agent source code can be found here: https://github.com/buildkite/agent")
l.Notice("For questions and support, email us at: hello@buildkite.com")
if agentConf.ConfigPath != "" {
l.WithFields(logger.StringField(`path`, agentConf.ConfigPath)).Info("Configuration loaded")
}
l.Debug("Bootstrap command: %s", agentConf.BootstrapScript)
l.Debug("Build path: %s", agentConf.BuildPath)
l.Debug("Hooks directory: %s", agentConf.HooksPath)
l.Debug("Plugins directory: %s", agentConf.PluginsPath)
if !agentConf.SSHKeyscan {
l.Info("Automatic ssh-keyscan has been disabled")
}
if !agentConf.CommandEval {
l.Info("Evaluating console commands has been disabled")
}
if !agentConf.PluginsEnabled {
l.Info("Plugins have been disabled")
}
if !agentConf.RunInPty {
l.Info("Running builds within a pseudoterminal (PTY) has been disabled")
}
if agentConf.DisconnectAfterJob {
l.Info("Agents will disconnect after a job run has completed")
}
if agentConf.DisconnectAfterIdleTimeout > 0 {
l.Info("Agents will disconnect after %d seconds of inactivity", agentConf.DisconnectAfterIdleTimeout)
}
cancelSig, err := process.ParseSignal(cfg.CancelSignal)
if err != nil {
l.Fatal("Failed to parse cancel-signal: %v", err)
}
// confirm the BuildPath is exists. The bootstrap is going to write to it when a job executes,
// so we may as well check that'll work now and fail early if it's a problem
if !utils.FileExists(agentConf.BuildPath) {
l.Info("Build Path doesn't exist, creating it (%s)", agentConf.BuildPath)
// Actual file permissions will be reduced by umask, and won't be 0777 unless the user has manually changed the umask to 000
if err := os.MkdirAll(agentConf.BuildPath, 0777); err != nil {
l.Fatal("Failed to create builds path: %v", err)
}
}
// Create the API client
client := api.NewClient(l, loadAPIClientConfig(cfg, `Token`))
// The registration request for all agents
registerReq := api.AgentRegisterRequest{
Name: cfg.Name,
Priority: cfg.Priority,
ScriptEvalEnabled: !cfg.NoCommandEval,
Tags: agent.FetchTags(l, agent.FetchTagsConfig{
Tags: cfg.Tags,
TagsFromEC2MetaData: (cfg.TagsFromEC2MetaData || cfg.TagsFromEC2),
TagsFromEC2MetaDataPaths: cfg.TagsFromEC2MetaDataPaths,
TagsFromEC2Tags: cfg.TagsFromEC2Tags,
TagsFromGCPMetaData: (cfg.TagsFromGCPMetaData || cfg.TagsFromGCP),
TagsFromGCPMetaDataPaths: cfg.TagsFromGCPMetaDataPaths,
TagsFromGCPLabels: cfg.TagsFromGCPLabels,
TagsFromHost: cfg.TagsFromHost,
WaitForEC2TagsTimeout: ec2TagTimeout,
WaitForEC2MetaDataTimeout: ec2MetaDataTimeout,
WaitForGCPLabelsTimeout: gcpLabelsTimeout,
}),
// We only want this agent to be ingored in Buildkite
// dispatches if it's being booted to acquire a
// specific job.
IgnoreInDispatches: cfg.AcquireJob != "",
Features: cfg.Features(),
}
// Spawning multiple agents doesn't work if the agent is being
// booted in acquisition mode
if cfg.Spawn > 1 && cfg.AcquireJob != "" {
l.Fatal("You can't spawn multiple agents and acquire a job at the same time")
}
var workers []*agent.AgentWorker
for i := 1; i <= cfg.Spawn; i++ {
if cfg.Spawn == 1 {
l.Info("Registering agent with Buildkite...")
} else {
l.Info("Registering agent %d of %d with Buildkite...", i, cfg.Spawn)
}
// Handle per-spawn name interpolation, replacing %spawn with the spawn index
registerReq.Name = strings.ReplaceAll(cfg.Name, "%spawn", strconv.Itoa(i))
if cfg.SpawnWithPriority {
l.Info("Assigning priority %s for agent %d", strconv.Itoa(i), i)
registerReq.Priority = strconv.Itoa(i)
}
// Register the agent with the buildkite API
ag, err := agent.Register(l, client, registerReq)
if err != nil {
l.Fatal("%s", err)
}
// Create an agent worker to run the agent
workers = append(workers,
agent.NewAgentWorker(
l.WithFields(logger.StringField(`agent`, ag.Name)), ag, mc, client, agent.AgentWorkerConfig{
AgentConfiguration: agentConf,
CancelSignal: cancelSig,
Debug: cfg.Debug,
DebugHTTP: cfg.DebugHTTP,
SpawnIndex: i,
}))
}
// Setup the agent pool that spawns agent workers
pool := agent.NewAgentPool(workers)
// Agent-wide shutdown hook. Once per agent, for all workers on the agent.
defer agentShutdownHook(l, cfg)
// Handle process signals
signals := handlePoolSignals(l, pool)
defer close(signals)
l.Info("Starting %d Agent(s)", cfg.Spawn)
l.Info("You can press Ctrl-C to stop the agents")
// Determine the health check listening address and port for this agent
if cfg.HealthCheckAddr != "" {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
} else {
fmt.Fprintf(w, "OK: Buildkite agent is running")
}
})
go func() {
l.Notice("Starting HTTP health check server on %v", cfg.HealthCheckAddr)
err := http.ListenAndServe(cfg.HealthCheckAddr, nil)
if err != nil {
l.Error("Could not start health check server: %v", err)
}
}()
}
// Start the agent pool
if err := pool.Start(); err != nil {
l.Fatal("%s", err)
}
},
}
func handlePoolSignals(l logger.Logger, pool *agent.AgentPool) chan os.Signal {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt,
syscall.SIGHUP,
syscall.SIGTERM,
syscall.SIGINT,
syscall.SIGQUIT)
go func() {
var interruptCount int
for sig := range signals {
l.Debug("Received signal `%v`", sig)
switch sig {
case syscall.SIGQUIT:
l.Debug("Received signal `%s`", sig.String())
pool.Stop(false)
case syscall.SIGTERM, syscall.SIGINT:
l.Debug("Received signal `%s`", sig.String())
if interruptCount == 0 {
interruptCount++
l.Info("Received CTRL-C, send again to forcefully kill the agent(s)")
pool.Stop(true)
} else {
l.Info("Forcefully stopping running jobs and stopping the agent(s)")
pool.Stop(false)
}
default:
l.Debug("Ignoring signal `%s`", sig.String())
}
}
}()
return signals
}
// agentShutdownHook looks for an agent-shutdown hook script in the hooks path
// and executes it if found. Output (stdout + stderr) is streamed into the main
// agent logger. Exit status failure is logged but ignored.
func agentShutdownHook(log logger.Logger, cfg AgentStartConfig) {
// search for agent-shutdown hook (including .bat & .ps1 files on Windows)
p, err := hook.Find(cfg.HooksPath, "agent-shutdown")
if err != nil {
if !os.IsNotExist(err) {
log.Error("Error finding agent-shutdown hook: %v", err)
}
return
}
sh, err := shell.New()
if err != nil {
log.Error("creating shell for agent-shutdown hook: %v", err)
return
}
// pipe from hook output to logger
r, w := io.Pipe()
sh.Logger = &shell.WriterLogger{Writer: w, Ansi: !cfg.NoColor} // for Promptf
sh.Writer = w // for stdout+stderr
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
scan := bufio.NewScanner(r) // log each line separately
log = log.WithFields(logger.StringField("hook", "agent-shutdown"))
for scan.Scan() {
log.Info(scan.Text())
}
}()
// run agent-shutdown hook
sh.Promptf("%s", p)
if err = sh.RunScript(context.Background(), p, nil); err != nil {
log.Error("agent-shutdown hook: %v", err)
}
w.Close() // goroutine scans until pipe is closed
// wait for hook to finish and output to flush to logger
wg.Wait()
}
|
package wifi
import (
"context"
"fmt"
"net/http"
"os"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/grandcat/zeroconf"
"github.com/parnurzeal/gorequest"
"github.com/resin-io/edge-node-manager/config"
)
var (
initialised bool
avahiTimeout time.Duration
)
type Host struct {
ip string
deviceType string
applicationUUID string
id string
}
func Initialise() error {
if initialised {
return nil
}
log.Info("Initialising wifi hotspot")
os.Setenv("DBUS_SYSTEM_BUS_ADDRESS", "unix:path=/host/run/dbus/system_bus_socket")
ssid := config.GetHotspotSSID()
password := config.GetHotspotPassword()
if err := removeHotspotConnections(ssid); err != nil {
return err
}
// If ethernet is connected, create the hotspot on the first wifi interface found
// If ethernet is not connected, create the hotspot on the first FREE wifi interface found
var device NmDevice
if ethernet, err := isEthernetConnected(); err != nil {
return err
} else if ethernet {
if device, err = getWifiDevice(); err != nil {
return err
}
} else {
if device, err = getFreeWifiDevice(); err != nil {
return err
}
}
if err := createHotspotConnection(device, ssid, password); err != nil {
return err
}
log.WithFields(log.Fields{
"SSID": ssid,
"Password": password,
"Device": device,
}).Info("Initialised wifi hotspot")
initialised = true
return nil
}
func Cleanup() error {
// Return as we do not want to disable the hotspot
return nil
}
func Scan(id string) (map[string]struct{}, error) {
hosts, err := scan()
if err != nil {
return nil, err
}
online := make(map[string]struct{})
for _, host := range hosts {
if host.applicationUUID == id {
var s struct{}
online[host.id] = s
}
}
return online, nil
}
func Online(id string) (bool, error) {
hosts, err := scan()
if err != nil {
return false, err
}
for _, host := range hosts {
if host.id == id {
return true, nil
}
}
return false, nil
}
func GetIP(id string) (string, error) {
hosts, err := scan()
if err != nil {
return "", err
}
for _, host := range hosts {
if host.id == id {
return host.ip, nil
}
}
return "", fmt.Errorf("Device offline")
}
func PostForm(url, filePath string) error {
req := gorequest.New()
req.Post(url)
req.Type("multipart")
req.SendFile(filePath, "firmware.bin", "image")
log.WithFields(log.Fields{
"URL": req.Url,
"Method": req.Method,
}).Info("Posting form")
resp, _, errs := req.End()
return handleResp(resp, errs, http.StatusOK)
}
func init() {
log.SetLevel(config.GetLogLevel())
var err error
if avahiTimeout, err = config.GetAvahiTimeout(); err != nil {
log.WithFields(log.Fields{
"Error": err,
}).Fatal("Unable to load Avahi timeout")
}
log.Debug("Initialised wifi")
}
func scan() ([]Host, error) {
ctx, cancel := context.WithTimeout(context.Background(), avahiTimeout)
defer cancel()
resolver, err := zeroconf.NewResolver(nil)
if err != nil {
return nil, err
}
entries := make(chan *zeroconf.ServiceEntry)
var hosts []Host
go func(entries <-chan *zeroconf.ServiceEntry, hosts *[]Host) {
for entry := range entries {
parts := strings.Split(entry.ServiceRecord.Instance, "_")
host := Host{
ip: entry.AddrIPv4[0].String(),
deviceType: parts[0],
applicationUUID: parts[1],
id: parts[2],
}
*hosts = append(*hosts, host)
}
}(entries, &hosts)
err = resolver.Browse(ctx, "_http._tcp", "local", entries)
if err != nil {
log.WithFields(log.Fields{
"Error": err,
}).Error("Unable to scan")
return nil, err
}
<-ctx.Done()
return hosts, nil
}
func handleResp(resp gorequest.Response, errs []error, statusCode int) error {
if errs != nil {
return errs[0]
}
if resp.StatusCode != statusCode {
return fmt.Errorf("Invalid response received: %s", resp.Status)
}
log.WithFields(log.Fields{
"Response": resp.Status,
}).Debug("Valid response received")
return nil
}
check for index-out-of-range
Change-type: patch
package wifi
import (
"context"
"fmt"
"net/http"
"os"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/grandcat/zeroconf"
"github.com/parnurzeal/gorequest"
"github.com/resin-io/edge-node-manager/config"
)
var (
initialised bool
avahiTimeout time.Duration
)
type Host struct {
ip string
deviceType string
applicationUUID string
id string
}
func Initialise() error {
if initialised {
return nil
}
log.Info("Initialising wifi hotspot")
os.Setenv("DBUS_SYSTEM_BUS_ADDRESS", "unix:path=/host/run/dbus/system_bus_socket")
ssid := config.GetHotspotSSID()
password := config.GetHotspotPassword()
if err := removeHotspotConnections(ssid); err != nil {
return err
}
// If ethernet is connected, create the hotspot on the first wifi interface found
// If ethernet is not connected, create the hotspot on the first FREE wifi interface found
var device NmDevice
if ethernet, err := isEthernetConnected(); err != nil {
return err
} else if ethernet {
if device, err = getWifiDevice(); err != nil {
return err
}
} else {
if device, err = getFreeWifiDevice(); err != nil {
return err
}
}
if err := createHotspotConnection(device, ssid, password); err != nil {
return err
}
log.WithFields(log.Fields{
"SSID": ssid,
"Password": password,
"Device": device,
}).Info("Initialised wifi hotspot")
initialised = true
return nil
}
func Cleanup() error {
// Return as we do not want to disable the hotspot
return nil
}
func Scan(id string) (map[string]struct{}, error) {
hosts, err := scan()
if err != nil {
return nil, err
}
online := make(map[string]struct{})
for _, host := range hosts {
if host.applicationUUID == id {
var s struct{}
online[host.id] = s
}
}
return online, nil
}
func Online(id string) (bool, error) {
hosts, err := scan()
if err != nil {
return false, err
}
for _, host := range hosts {
if host.id == id {
return true, nil
}
}
return false, nil
}
func GetIP(id string) (string, error) {
hosts, err := scan()
if err != nil {
return "", err
}
for _, host := range hosts {
if host.id == id {
return host.ip, nil
}
}
return "", fmt.Errorf("Device offline")
}
func PostForm(url, filePath string) error {
req := gorequest.New()
req.Post(url)
req.Type("multipart")
req.SendFile(filePath, "firmware.bin", "image")
log.WithFields(log.Fields{
"URL": req.Url,
"Method": req.Method,
}).Info("Posting form")
resp, _, errs := req.End()
return handleResp(resp, errs, http.StatusOK)
}
func init() {
log.SetLevel(config.GetLogLevel())
var err error
if avahiTimeout, err = config.GetAvahiTimeout(); err != nil {
log.WithFields(log.Fields{
"Error": err,
}).Fatal("Unable to load Avahi timeout")
}
log.Debug("Initialised wifi")
}
func scan() ([]Host, error) {
ctx, cancel := context.WithTimeout(context.Background(), avahiTimeout)
defer cancel()
resolver, err := zeroconf.NewResolver(nil)
if err != nil {
return nil, err
}
entries := make(chan *zeroconf.ServiceEntry)
var hosts []Host
go func(entries <-chan *zeroconf.ServiceEntry, hosts *[]Host) {
for entry := range entries {
parts := strings.Split(entry.ServiceRecord.Instance, "_")
if len(entry.AddrIPv4) < 1 || len(parts) < 3 {
continue
}
host := Host{
ip: entry.AddrIPv4[0].String(),
deviceType: parts[0],
applicationUUID: parts[1],
id: parts[2],
}
*hosts = append(*hosts, host)
}
}(entries, &hosts)
err = resolver.Browse(ctx, "_http._tcp", "local", entries)
if err != nil {
log.WithFields(log.Fields{
"Error": err,
}).Error("Unable to scan")
return nil, err
}
<-ctx.Done()
return hosts, nil
}
func handleResp(resp gorequest.Response, errs []error, statusCode int) error {
if errs != nil {
return errs[0]
}
if resp.StatusCode != statusCode {
return fmt.Errorf("Invalid response received: %s", resp.Status)
}
log.WithFields(log.Fields{
"Response": resp.Status,
}).Debug("Valid response received")
return nil
}
|
// Copyright (c) 2016 Pani Networks
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package policy
import (
"fmt"
"github.com/romana/core/common"
"github.com/romana/core/tenant"
"log"
"strconv"
"strings"
"crypto/sha1"
"encoding/hex"
)
// Policy provides Policy service.
type PolicySvc struct {
client *common.RestClient
config common.ServiceConfig
store policyStore
}
const (
infoListPath = "/info"
findPath = "/find"
policiesPath = "/policies"
policyNameQueryVar = "policyName"
)
func (policy *PolicySvc) Routes() common.Routes {
routes := common.Routes{
common.Route{
Method: "POST",
Pattern: policiesPath,
Handler: policy.addPolicy,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "DELETE",
Pattern: policiesPath,
Handler: policy.deletePolicyHandler,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "DELETE",
Pattern: policiesPath + "/{policyID}",
Handler: policy.deletePolicyHandler,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: policiesPath,
Handler: policy.listPolicies,
MakeMessage: nil,
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: policiesPath + "/{policyID}",
Handler: policy.getPolicy,
MakeMessage: nil,
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: findPath + policiesPath + "/{policyName}",
Handler: policy.findPolicyByName,
},
}
return routes
}
// augmentEndpoint augments the endpoint provided with appropriate information
// by looking it up in the appropriate service.
func (policy *PolicySvc) augmentEndpoint(endpoint *common.Endpoint) error {
tenantSvcUrl, err := policy.client.GetServiceUrl("tenant")
if err != nil {
return err
}
if endpoint.Peer == common.Wildcard {
// If a wildcard is specfied, there is nothing to augment
return nil
}
log.Printf("Policy: Augmenting %#v", endpoint)
// Code below tries to resolve tenant name into tenant_network_id if possible.
//
// TODO this will have to be changed once we implement
// https://paninetworks.kanbanize.com/ctrl_board/3/cards/319/details
ten := &tenant.Tenant{}
if endpoint.TenantNetworkID == nil {
if endpoint.TenantID != 0 {
tenantIDToUse := strconv.FormatUint(endpoint.TenantID, 10)
tenantsUrl := fmt.Sprintf("%s/tenants/%s", tenantSvcUrl, tenantIDToUse)
log.Printf("Policy: Looking tenant up at %s", tenantsUrl)
err = policy.client.Get(tenantsUrl, ten)
if err != nil {
return err
}
endpoint.TenantNetworkID = &ten.NetworkID
} else if endpoint.TenantExternalID != "" || endpoint.TenantName != "" {
if endpoint.TenantExternalID != "" {
ten.ExternalID = endpoint.TenantExternalID
}
if endpoint.TenantName != "" {
ten.Name = endpoint.TenantName
}
err = policy.client.Find(ten, common.FindLast)
if err != nil {
return err
}
endpoint.TenantNetworkID = &ten.NetworkID
}
}
if endpoint.SegmentNetworkID == nil {
if ten == nil && (endpoint.SegmentID != 0 || endpoint.SegmentExternalID != "" || endpoint.SegmentName != "") {
return common.NewError400("No tenant information specified, cannot look up segment.")
}
segment := &tenant.Segment{}
if endpoint.SegmentID != 0 {
segmentIDToUse := strconv.FormatUint(endpoint.SegmentID, 10)
segmentsUrl := fmt.Sprintf("%s/tenants/%d/segments/%s", tenantSvcUrl, ten.ID, segmentIDToUse)
log.Printf("Policy: Looking segment up at %s for %#v", segmentsUrl, endpoint)
err = policy.client.Get(segmentsUrl, &segment)
if err != nil {
return err
}
endpoint.SegmentNetworkID = &segment.NetworkID
} else if endpoint.SegmentExternalID != "" || endpoint.SegmentName != "" {
segmentsUrl := fmt.Sprintf("%s/findLast/segments?tenant_id=%d&", tenantSvcUrl, ten.ID)
if endpoint.SegmentExternalID != "" {
segmentsUrl += "external_id=" + endpoint.TenantExternalID + "&"
}
if endpoint.SegmentName != "" {
segmentsUrl += "name=" + endpoint.SegmentName
}
log.Printf("Policy: Finding segments at %s for %#v (Tenant %#v %t)", segmentsUrl, endpoint, ten, ten == nil)
err = policy.client.Get(segmentsUrl, &segment)
if err != nil {
return err
}
endpoint.SegmentNetworkID = &segment.NetworkID
}
}
return nil
}
// augmentPolicy augments the provided policy with information gotten from
// various services.
func (policy *PolicySvc) augmentPolicy(policyDoc *common.Policy) error {
// Get info from topology service
log.Printf("Augmenting policy %s", policyDoc.Name)
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
topoUrl, err := policy.client.GetServiceUrl("topology")
if err != nil {
return err
}
// Query topology for data center information
// TODO move this to root
index := common.IndexResponse{}
err = policy.client.Get(topoUrl, &index)
if err != nil {
return err
}
dcURL := index.Links.FindByRel("datacenter")
dc := &common.Datacenter{}
err = policy.client.Get(dcURL, dc)
if err != nil {
return err
}
log.Printf("Policy server received datacenter information from topology service: %+v\n", dc)
policyDoc.Datacenter = dc
for i, _ := range policyDoc.Rules {
rule := &policyDoc.Rules[i]
rule.Protocol = strings.ToUpper(rule.Protocol)
}
for i, _ := range policyDoc.AppliedTo {
endpoint := &policyDoc.AppliedTo[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
for i, _ := range policyDoc.Peers {
endpoint := &policyDoc.Peers[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
return nil
}
// distributePolicy distributes policy to all agents.
// TODO how should error handling work here really?
func (policy *PolicySvc) distributePolicy(policyDoc *common.Policy) error {
hosts, err := policy.client.ListHosts()
if err != nil {
return err
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
log.Printf("Sending policy %s to agent at %s", policyDoc.Name, url)
result := make(map[string]interface{})
err = policy.client.Post(url, policyDoc, &result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error applying policy %d to host %s: %v. ", policyDoc.ID, host.Ip, err))
}
}
if len(errStr) > 0 {
return common.NewError500(errStr)
}
return nil
}
func (policy *PolicySvc) getPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := ctx.PathVariables["policyID"]
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
policyDoc, err := policy.store.getPolicy(id, false)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
return policyDoc, err
}
func (policy *PolicySvc) deletePolicyHandler(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := strings.TrimSpace(ctx.PathVariables["policyID"])
if idStr == "" {
if input == nil {
return nil, common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
policyDoc := input.(*common.Policy)
err := policyDoc.Validate()
if err != nil {
return nil, err
}
log.Printf("IN deletePolicyHandler with %v", policyDoc)
id, err := policy.store.lookupPolicy(policyDoc.ExternalID)
if err != nil {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
id, err = policy.store.lookupPolicy(policyDoc.ExternalID)
}
log.Printf("Found %d / %v (%T) from external ID %s", id, err, err, policyDoc.ExternalID)
if err != nil {
return nil, err
}
return policy.deletePolicy(id)
} else {
if input != nil {
common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
return policy.deletePolicy(id)
}
}
// deletePolicy deletes policy based the following algorithm:
//1. Mark the policy as "deleted" in the backend store.
func (policy *PolicySvc) deletePolicy(id uint64) (interface{}, error) {
// TODO do we need this to be transactional or not ... case can be made for either.
err := policy.store.inactivatePolicy(id)
if err != nil {
return nil, err
}
policyDoc, err := policy.store.getPolicy(id, true)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
if err != nil {
return nil, err
}
hosts, err := policy.client.ListHosts()
if err != nil {
return nil, err
}
if policyDoc.ExternalID == "" {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
result := make(map[string]interface{})
err = policy.client.Delete(url, policyDoc, result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error deleting policy %d (%s) from host %s: %v. ", id, policyDoc.Name, host.Ip, err))
}
}
if len(errStr) > 0 {
return nil, common.NewError500(errStr)
}
err = policy.store.deletePolicy(id)
if err != nil {
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// listPolicies lists all policices.
func (policy *PolicySvc) listPolicies(input interface{}, ctx common.RestContext) (interface{}, error) {
policies, err := policy.store.listPolicies()
if err != nil {
return nil, err
}
for i, _ := range policies {
policies[i].Datacenter = nil
}
return policies, nil
}
// findPolicyByName returns the first policy found corresponding
// to the given policy name. Policy names are not unique unlike
// policy ID's.
func (policy *PolicySvc) findPolicyByName(input interface{}, ctx common.RestContext) (interface{}, error) {
nameStr := ctx.PathVariables["policyName"]
log.Printf("In findPolicy(%s)\n", nameStr)
if nameStr == "" {
return nil, common.NewError500(fmt.Sprintf("Expected policy name, got %s", nameStr))
}
policyDoc, err := policy.store.findPolicyByName(nameStr)
if err != nil {
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// addPolicy stores the new policy and sends it to all agents.
func (policy *PolicySvc) addPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
policyDoc := input.(*common.Policy)
log.Printf("addPolicy(): Request for a new policy to be added: %s", policyDoc.Name)
err := policyDoc.Validate()
if err != nil {
log.Printf("addPolicy(): Error validating: %v", err)
return nil, err
}
log.Printf("addPolicy(): Request for a new policy to be added: %v", policyDoc)
err = policy.augmentPolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error augmenting: %v", err)
return nil, err
}
// Save it
err = policy.store.addPolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error storing: %v", err)
return nil, err
}
log.Printf("addPolicy(): Stored policy %s", policyDoc.Name)
err = policy.distributePolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error distributing: %v", err)
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// Name provides name of this service.
func (policy *PolicySvc) Name() string {
return "policy"
}
// SetConfig implements SetConfig function of the Service interface.
// Returns an error if cannot connect to the data store
func (policy *PolicySvc) SetConfig(config common.ServiceConfig) error {
// TODO this is a copy-paste of topology service, to refactor
log.Println(config)
policy.config = config
// storeConfig := config.ServiceSpecific["store"].(map[string]interface{})
log.Printf("Policy port: %d", config.Common.Api.Port)
policy.store = policyStore{}
storeConfig := config.ServiceSpecific["store"].(map[string]interface{})
policy.store.ServiceStore = &policy.store
return policy.store.SetConfig(storeConfig)
}
func (policy *PolicySvc) createSchema(overwrite bool) error {
return nil
}
// Run mainly runs Policy service.
func Run(rootServiceUrl string, cred *common.Credential) (*common.RestServiceInfo, error) {
clientConfig := common.GetDefaultRestClientConfig(rootServiceUrl)
clientConfig.Credential = cred
client, err := common.NewRestClient(clientConfig)
if err != nil {
return nil, err
}
policy := &PolicySvc{client: client}
config, err := client.GetServiceConfig(policy.Name())
if err != nil {
return nil, err
}
return common.InitializeService(policy, *config)
}
func (policy *PolicySvc) Initialize() error {
log.Println("Entering policy.Initialize()")
err := policy.store.Connect()
if err != nil {
return err
}
return nil
}
// makeId generates uniq id from applied to field.
func makeId (allowedTo []common.Endpoint, name string) string {
var data string
data = name
for _, e := range allowedTo {
if data == "" {
data = fmt.Sprintf("%s", e)
} else {
data = fmt.Sprintf("%s\n%s", data, e)
}
}
hasher := sha1.New()
hasher.Write([]byte(data))
sum := hasher.Sum(nil)
// Taking 6 bytes of a hash which is 12 chars length
return fmt.Sprint(hex.EncodeToString(sum[:6]))
}
// CreateSchema creates schema for Policy service.
func CreateSchema(rootServiceURL string, overwrite bool) error {
log.Println("In CreateSchema(", rootServiceURL, ",", overwrite, ")")
client, err := common.NewRestClient(common.GetDefaultRestClientConfig(rootServiceURL))
if err != nil {
return err
}
policySvc := &PolicySvc{}
config, err := client.GetServiceConfig(policySvc.Name())
if err != nil {
return err
}
err = policySvc.SetConfig(*config)
if err != nil {
return err
}
return policySvc.store.CreateSchema(overwrite)
}
Go fmt policy
// Copyright (c) 2016 Pani Networks
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package policy
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"github.com/romana/core/common"
"github.com/romana/core/tenant"
"log"
"strconv"
"strings"
)
// Policy provides Policy service.
type PolicySvc struct {
client *common.RestClient
config common.ServiceConfig
store policyStore
}
const (
infoListPath = "/info"
findPath = "/find"
policiesPath = "/policies"
policyNameQueryVar = "policyName"
)
func (policy *PolicySvc) Routes() common.Routes {
routes := common.Routes{
common.Route{
Method: "POST",
Pattern: policiesPath,
Handler: policy.addPolicy,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "DELETE",
Pattern: policiesPath,
Handler: policy.deletePolicyHandler,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "DELETE",
Pattern: policiesPath + "/{policyID}",
Handler: policy.deletePolicyHandler,
MakeMessage: func() interface{} { return &common.Policy{} },
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: policiesPath,
Handler: policy.listPolicies,
MakeMessage: nil,
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: policiesPath + "/{policyID}",
Handler: policy.getPolicy,
MakeMessage: nil,
UseRequestToken: false,
},
common.Route{
Method: "GET",
Pattern: findPath + policiesPath + "/{policyName}",
Handler: policy.findPolicyByName,
},
}
return routes
}
// augmentEndpoint augments the endpoint provided with appropriate information
// by looking it up in the appropriate service.
func (policy *PolicySvc) augmentEndpoint(endpoint *common.Endpoint) error {
tenantSvcUrl, err := policy.client.GetServiceUrl("tenant")
if err != nil {
return err
}
if endpoint.Peer == common.Wildcard {
// If a wildcard is specfied, there is nothing to augment
return nil
}
log.Printf("Policy: Augmenting %#v", endpoint)
// Code below tries to resolve tenant name into tenant_network_id if possible.
//
// TODO this will have to be changed once we implement
// https://paninetworks.kanbanize.com/ctrl_board/3/cards/319/details
ten := &tenant.Tenant{}
if endpoint.TenantNetworkID == nil {
if endpoint.TenantID != 0 {
tenantIDToUse := strconv.FormatUint(endpoint.TenantID, 10)
tenantsUrl := fmt.Sprintf("%s/tenants/%s", tenantSvcUrl, tenantIDToUse)
log.Printf("Policy: Looking tenant up at %s", tenantsUrl)
err = policy.client.Get(tenantsUrl, ten)
if err != nil {
return err
}
endpoint.TenantNetworkID = &ten.NetworkID
} else if endpoint.TenantExternalID != "" || endpoint.TenantName != "" {
if endpoint.TenantExternalID != "" {
ten.ExternalID = endpoint.TenantExternalID
}
if endpoint.TenantName != "" {
ten.Name = endpoint.TenantName
}
err = policy.client.Find(ten, common.FindLast)
if err != nil {
return err
}
endpoint.TenantNetworkID = &ten.NetworkID
}
}
if endpoint.SegmentNetworkID == nil {
if ten == nil && (endpoint.SegmentID != 0 || endpoint.SegmentExternalID != "" || endpoint.SegmentName != "") {
return common.NewError400("No tenant information specified, cannot look up segment.")
}
segment := &tenant.Segment{}
if endpoint.SegmentID != 0 {
segmentIDToUse := strconv.FormatUint(endpoint.SegmentID, 10)
segmentsUrl := fmt.Sprintf("%s/tenants/%d/segments/%s", tenantSvcUrl, ten.ID, segmentIDToUse)
log.Printf("Policy: Looking segment up at %s for %#v", segmentsUrl, endpoint)
err = policy.client.Get(segmentsUrl, &segment)
if err != nil {
return err
}
endpoint.SegmentNetworkID = &segment.NetworkID
} else if endpoint.SegmentExternalID != "" || endpoint.SegmentName != "" {
segmentsUrl := fmt.Sprintf("%s/findLast/segments?tenant_id=%d&", tenantSvcUrl, ten.ID)
if endpoint.SegmentExternalID != "" {
segmentsUrl += "external_id=" + endpoint.TenantExternalID + "&"
}
if endpoint.SegmentName != "" {
segmentsUrl += "name=" + endpoint.SegmentName
}
log.Printf("Policy: Finding segments at %s for %#v (Tenant %#v %t)", segmentsUrl, endpoint, ten, ten == nil)
err = policy.client.Get(segmentsUrl, &segment)
if err != nil {
return err
}
endpoint.SegmentNetworkID = &segment.NetworkID
}
}
return nil
}
// augmentPolicy augments the provided policy with information gotten from
// various services.
func (policy *PolicySvc) augmentPolicy(policyDoc *common.Policy) error {
// Get info from topology service
log.Printf("Augmenting policy %s", policyDoc.Name)
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
topoUrl, err := policy.client.GetServiceUrl("topology")
if err != nil {
return err
}
// Query topology for data center information
// TODO move this to root
index := common.IndexResponse{}
err = policy.client.Get(topoUrl, &index)
if err != nil {
return err
}
dcURL := index.Links.FindByRel("datacenter")
dc := &common.Datacenter{}
err = policy.client.Get(dcURL, dc)
if err != nil {
return err
}
log.Printf("Policy server received datacenter information from topology service: %+v\n", dc)
policyDoc.Datacenter = dc
for i, _ := range policyDoc.Rules {
rule := &policyDoc.Rules[i]
rule.Protocol = strings.ToUpper(rule.Protocol)
}
for i, _ := range policyDoc.AppliedTo {
endpoint := &policyDoc.AppliedTo[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
for i, _ := range policyDoc.Peers {
endpoint := &policyDoc.Peers[i]
err = policy.augmentEndpoint(endpoint)
if err != nil {
return err
}
}
return nil
}
// distributePolicy distributes policy to all agents.
// TODO how should error handling work here really?
func (policy *PolicySvc) distributePolicy(policyDoc *common.Policy) error {
hosts, err := policy.client.ListHosts()
if err != nil {
return err
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
log.Printf("Sending policy %s to agent at %s", policyDoc.Name, url)
result := make(map[string]interface{})
err = policy.client.Post(url, policyDoc, &result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error applying policy %d to host %s: %v. ", policyDoc.ID, host.Ip, err))
}
}
if len(errStr) > 0 {
return common.NewError500(errStr)
}
return nil
}
func (policy *PolicySvc) getPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := ctx.PathVariables["policyID"]
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
policyDoc, err := policy.store.getPolicy(id, false)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
return policyDoc, err
}
func (policy *PolicySvc) deletePolicyHandler(input interface{}, ctx common.RestContext) (interface{}, error) {
idStr := strings.TrimSpace(ctx.PathVariables["policyID"])
if idStr == "" {
if input == nil {
return nil, common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
policyDoc := input.(*common.Policy)
err := policyDoc.Validate()
if err != nil {
return nil, err
}
log.Printf("IN deletePolicyHandler with %v", policyDoc)
id, err := policy.store.lookupPolicy(policyDoc.ExternalID)
if err != nil {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
id, err = policy.store.lookupPolicy(policyDoc.ExternalID)
}
log.Printf("Found %d / %v (%T) from external ID %s", id, err, err, policyDoc.ExternalID)
if err != nil {
return nil, err
}
return policy.deletePolicy(id)
} else {
if input != nil {
common.NewError400("Request must either be to /policies/{policyID} or have a body.")
}
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
return nil, common.NewError404("policy", idStr)
}
return policy.deletePolicy(id)
}
}
// deletePolicy deletes policy based the following algorithm:
//1. Mark the policy as "deleted" in the backend store.
func (policy *PolicySvc) deletePolicy(id uint64) (interface{}, error) {
// TODO do we need this to be transactional or not ... case can be made for either.
err := policy.store.inactivatePolicy(id)
if err != nil {
return nil, err
}
policyDoc, err := policy.store.getPolicy(id, true)
log.Printf("Found policy for ID %d: %s (%v)", id, policyDoc, err)
if err != nil {
return nil, err
}
hosts, err := policy.client.ListHosts()
if err != nil {
return nil, err
}
if policyDoc.ExternalID == "" {
// TODO
// Important! This should really be done in policy agent.
// Only done here as temporary measure.
externalId := makeId(policyDoc.AppliedTo, policyDoc.Name)
log.Printf("Constructing internal policy name = %s", externalId)
policyDoc.ExternalID = externalId
}
errStr := make([]string, 0)
for _, host := range hosts {
// TODO make schema configurable
url := fmt.Sprintf("http://%s:%d/policies", host.Ip, host.AgentPort)
result := make(map[string]interface{})
err = policy.client.Delete(url, policyDoc, result)
log.Printf("Agent at %s returned %v", host.Ip, result)
if err != nil {
errStr = append(errStr, fmt.Sprintf("Error deleting policy %d (%s) from host %s: %v. ", id, policyDoc.Name, host.Ip, err))
}
}
if len(errStr) > 0 {
return nil, common.NewError500(errStr)
}
err = policy.store.deletePolicy(id)
if err != nil {
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// listPolicies lists all policices.
func (policy *PolicySvc) listPolicies(input interface{}, ctx common.RestContext) (interface{}, error) {
policies, err := policy.store.listPolicies()
if err != nil {
return nil, err
}
for i, _ := range policies {
policies[i].Datacenter = nil
}
return policies, nil
}
// findPolicyByName returns the first policy found corresponding
// to the given policy name. Policy names are not unique unlike
// policy ID's.
func (policy *PolicySvc) findPolicyByName(input interface{}, ctx common.RestContext) (interface{}, error) {
nameStr := ctx.PathVariables["policyName"]
log.Printf("In findPolicy(%s)\n", nameStr)
if nameStr == "" {
return nil, common.NewError500(fmt.Sprintf("Expected policy name, got %s", nameStr))
}
policyDoc, err := policy.store.findPolicyByName(nameStr)
if err != nil {
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// addPolicy stores the new policy and sends it to all agents.
func (policy *PolicySvc) addPolicy(input interface{}, ctx common.RestContext) (interface{}, error) {
policyDoc := input.(*common.Policy)
log.Printf("addPolicy(): Request for a new policy to be added: %s", policyDoc.Name)
err := policyDoc.Validate()
if err != nil {
log.Printf("addPolicy(): Error validating: %v", err)
return nil, err
}
log.Printf("addPolicy(): Request for a new policy to be added: %v", policyDoc)
err = policy.augmentPolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error augmenting: %v", err)
return nil, err
}
// Save it
err = policy.store.addPolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error storing: %v", err)
return nil, err
}
log.Printf("addPolicy(): Stored policy %s", policyDoc.Name)
err = policy.distributePolicy(policyDoc)
if err != nil {
log.Printf("addPolicy(): Error distributing: %v", err)
return nil, err
}
policyDoc.Datacenter = nil
return policyDoc, nil
}
// Name provides name of this service.
func (policy *PolicySvc) Name() string {
return "policy"
}
// SetConfig implements SetConfig function of the Service interface.
// Returns an error if cannot connect to the data store
func (policy *PolicySvc) SetConfig(config common.ServiceConfig) error {
// TODO this is a copy-paste of topology service, to refactor
log.Println(config)
policy.config = config
// storeConfig := config.ServiceSpecific["store"].(map[string]interface{})
log.Printf("Policy port: %d", config.Common.Api.Port)
policy.store = policyStore{}
storeConfig := config.ServiceSpecific["store"].(map[string]interface{})
policy.store.ServiceStore = &policy.store
return policy.store.SetConfig(storeConfig)
}
func (policy *PolicySvc) createSchema(overwrite bool) error {
return nil
}
// Run mainly runs Policy service.
func Run(rootServiceUrl string, cred *common.Credential) (*common.RestServiceInfo, error) {
clientConfig := common.GetDefaultRestClientConfig(rootServiceUrl)
clientConfig.Credential = cred
client, err := common.NewRestClient(clientConfig)
if err != nil {
return nil, err
}
policy := &PolicySvc{client: client}
config, err := client.GetServiceConfig(policy.Name())
if err != nil {
return nil, err
}
return common.InitializeService(policy, *config)
}
func (policy *PolicySvc) Initialize() error {
log.Println("Entering policy.Initialize()")
err := policy.store.Connect()
if err != nil {
return err
}
return nil
}
// makeId generates uniq id from applied to field.
func makeId(allowedTo []common.Endpoint, name string) string {
var data string
data = name
for _, e := range allowedTo {
if data == "" {
data = fmt.Sprintf("%s", e)
} else {
data = fmt.Sprintf("%s\n%s", data, e)
}
}
hasher := sha1.New()
hasher.Write([]byte(data))
sum := hasher.Sum(nil)
// Taking 6 bytes of a hash which is 12 chars length
return fmt.Sprint(hex.EncodeToString(sum[:6]))
}
// CreateSchema creates schema for Policy service.
func CreateSchema(rootServiceURL string, overwrite bool) error {
log.Println("In CreateSchema(", rootServiceURL, ",", overwrite, ")")
client, err := common.NewRestClient(common.GetDefaultRestClientConfig(rootServiceURL))
if err != nil {
return err
}
policySvc := &PolicySvc{}
config, err := client.GetServiceConfig(policySvc.Name())
if err != nil {
return err
}
err = policySvc.SetConfig(*config)
if err != nil {
return err
}
return policySvc.store.CreateSchema(overwrite)
}
|
package shorty
import (
"crypto/rand"
"encoding/json"
"fmt"
"github.com/golang/glog"
"github.com/gorilla/mux"
omni_http "github.com/qorio/omni/http"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
"strings"
"time"
)
type ShortyAddRequest struct {
LongUrl string `json:"longUrl"`
Rules []RoutingRule `json:"rules"`
}
type ShortyEndPointSettings struct {
Redirect404 string
GeoIpDbFilePath string
}
type ShortyEndPoint struct {
settings ShortyEndPointSettings
router *mux.Router
requestParser *omni_http.RequestParser
service Shorty
}
var secureCookie *omni_http.SecureCookie
func init() {
var err error
secureCookie, err = omni_http.NewSecureCookie([]byte(""), nil)
if err != nil {
glog.Warningln("Cannot initialize secure cookie!")
panic(err)
}
}
func NewApiEndPoint(settings ShortyEndPointSettings, service Shorty) (api *ShortyEndPoint, err error) {
if requestParser, err := omni_http.NewRequestParser(settings.GeoIpDbFilePath); err == nil {
api = &ShortyEndPoint{
settings: settings,
router: mux.NewRouter(),
requestParser: requestParser,
service: service,
}
regex := fmt.Sprintf("[A-Za-z0-9]{%d}", service.UrlLength())
api.router.HandleFunc("/{id:"+regex+"}", api.RedirectHandler).Name("redirect")
api.router.HandleFunc("/api/v1/url", api.ApiAddHandler).Methods("POST").Name("add")
api.router.HandleFunc("/api/v1/stats/{id:"+regex+"}", api.StatsHandler).Methods("GET").Name("stats")
api.router.HandleFunc("/api/v1/events/install/{scheme}/{app_uuid}",
api.ReportInstallHandler).Methods("GET").Name("app_install")
return api, nil
} else {
return nil, err
}
}
func NewRedirector(settings ShortyEndPointSettings, service Shorty) (api *ShortyEndPoint, err error) {
if requestParser, err := omni_http.NewRequestParser(settings.GeoIpDbFilePath); err == nil {
api = &ShortyEndPoint{
settings: settings,
router: mux.NewRouter(),
requestParser: requestParser,
service: service,
}
regex := fmt.Sprintf("[A-Za-z0-9]{%d}", service.UrlLength())
api.router.HandleFunc("/{id:"+regex+"}", api.RedirectHandler).Name("redirect")
return api, nil
} else {
return nil, err
}
}
func (this *ShortyEndPoint) ServeHTTP(resp http.ResponseWriter, request *http.Request) {
this.router.ServeHTTP(resp, request)
}
func (this *ShortyEndPoint) ApiAddHandler(resp http.ResponseWriter, req *http.Request) {
omni_http.SetCORSHeaders(resp)
body, err := ioutil.ReadAll(req.Body)
if err != nil {
renderJsonError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
var message ShortyAddRequest
dec := json.NewDecoder(strings.NewReader(string(body)))
for {
if err := dec.Decode(&message); err == io.EOF {
break
} else if err != nil {
renderJsonError(resp, req, err.Error(), http.StatusBadRequest)
return
}
}
if message.LongUrl == "" {
renderJsonError(resp, req, "No URL to shorten", http.StatusBadRequest)
return
}
shortUrl, err := this.service.ShortUrl(message.LongUrl, message.Rules)
if err != nil {
renderJsonError(resp, req, err.Error(), http.StatusBadRequest)
return
}
if _, err := this.router.Get("redirect").URL("id", shortUrl.Id); err != nil {
renderJsonError(resp, req, err.Error(), http.StatusBadRequest)
return
}
buff, err := json.Marshal(shortUrl)
if err != nil {
renderJsonError(resp, req, "Malformed short url rule", http.StatusInternalServerError)
return
}
resp.Write(buff)
}
func processCookies(resp http.ResponseWriter, req *http.Request, shortUrl *ShortUrl) (visits int, cookied bool, last, uuid string) {
secureCookie.ReadCookie(req, "uuid", &uuid)
secureCookie.ReadCookie(req, "last", &last)
secureCookie.ReadCookie(req, shortUrl.Id, &visits)
var cookieError error
if uuid == "" {
if uuid, _ = newUUID(); uuid != "" {
cookieError = secureCookie.SetCookie(resp, "uuid", uuid)
}
}
visits++
cookieError = secureCookie.SetCookie(resp, "last", shortUrl.Id)
cookieError = secureCookie.SetCookie(resp, shortUrl.Id, visits)
cookied = cookieError == nil
return
}
func (this *ShortyEndPoint) RedirectHandler(resp http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
shortUrl, err := this.service.Find(vars["id"])
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
} else if shortUrl == nil {
if this.settings.Redirect404 != "" {
originalUrl, err := this.router.Get("redirect").URL("id", vars["id"])
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
url404 := strings.Replace(this.settings.Redirect404,
"$origURL", url.QueryEscape(fmt.Sprintf("http://%s%s", req.Host, originalUrl.String())), 1)
http.Redirect(resp, req, url404, http.StatusTemporaryRedirect)
return
}
renderError(resp, req, "No URL was found with that shorty code", http.StatusNotFound)
return
}
var destination string = shortUrl.Destination
// If there are platform-dependent routing
if len(shortUrl.Rules) > 0 {
userAgent := omni_http.ParseUserAgent(req)
for _, rule := range shortUrl.Rules {
if dest, match := rule.Match(userAgent); match {
destination = dest // default
// check to see if the rule specifies app url scheme
// if yes, then check cookie by the same key exists
if rule.AppUrlScheme != "" {
timestamp := int64(0)
secureCookie.ReadCookie(req, rule.AppUrlScheme, ×tamp)
if timestamp == 0 {
destination = rule.AppStoreUrl
}
}
break
}
}
}
omni_http.SetNoCachingHeaders(resp)
visits, cookied, last, userId := processCookies(resp, req, shortUrl)
http.Redirect(resp, req, destination, http.StatusMovedPermanently)
// Record stats asynchronously
go func() {
origin, geoParseErr := this.requestParser.Parse(req)
origin.Cookied = cookied
origin.Visits = visits
origin.LastVisit = last
origin.Destination = destination
origin.ShortCode = shortUrl.Id
if geoParseErr != nil {
glog.Warningln("can-not-determine-location", geoParseErr)
}
glog.Infoln(
"uuid:", userId, "url:", shortUrl.Id, "send-to:", destination,
"ip:", origin.Ip, "mobile:", origin.UserAgent.Mobile,
"platform:", origin.UserAgent.Platform, "os:", origin.UserAgent.OS, "make:", origin.UserAgent.Make,
"browser:", origin.UserAgent.Browser, "version:", origin.UserAgent.BrowserVersion,
"location:", *origin.Location,
"useragent:", origin.UserAgent.Header,
"cookied", cookied)
this.service.PublishDecode(&DecodeEvent{
Origin: origin,
Destination: destination,
ShortyUUID: userId,
})
shortUrl.Record(origin, visits > 1)
}()
}
func addQueryParam(url, key, value string) string {
if strings.ContainsRune(url, '?') {
return url + "&" + key + "=" + value
} else {
return url + "?" + key + "=" + value
}
}
func (this *ShortyEndPoint) ReportInstallHandler(resp http.ResponseWriter, req *http.Request) {
omni_http.SetNoCachingHeaders(resp)
vars := mux.Vars(req)
// Two parameters
// 1. app custom url scheme -> this allows us to key by mobile app per user
// 2. some uuid for the app -> this tracks a user. on ios, idfa uuid is used.
customUrlScheme := vars["scheme"]
if customUrlScheme == "" {
renderError(resp, req, "No app customer url scheme", http.StatusBadRequest)
return
}
appUuid := vars["app_uuid"]
if appUuid == "" {
renderError(resp, req, "No uuid", http.StatusBadRequest)
return
}
// user key for tracking in our world
userKey := fmt.Sprintf("%s/%s", appUuid, customUrlScheme)
glog.Infoln("User key = ", userKey)
// read the cookies that have been set before when user clicked a short link
// this allows us to send a redirect as appropriate; otherwise, send a app url with 404
// a unique user identifier -- generated by us and the lastViewed short code
userId, lastViewed := "", ""
secureCookie.ReadCookie(req, "uuid", &userId)
secureCookie.ReadCookie(req, "last", &lastViewed)
// set a cookie to note that we know the app has been installed on the device
secureCookie.SetCookie(resp, customUrlScheme, time.Now().Unix())
var shortUrl *ShortUrl
var err error
var destination string = customUrlScheme + "://404"
if lastViewed == "" {
destination = addQueryParam(destination, "cookie", userId)
http.Redirect(resp, req, destination, http.StatusMovedPermanently)
goto stat
}
shortUrl, err = this.service.Find(lastViewed)
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
} else if shortUrl == nil {
destination = addQueryParam(destination, "cookie", userId)
http.Redirect(resp, req, destination, http.StatusMovedPermanently)
goto stat
}
// If there are platform-dependent routing
if len(shortUrl.Rules) > 0 {
userAgent := omni_http.ParseUserAgent(req)
for _, rule := range shortUrl.Rules {
if dest, match := rule.Match(userAgent); match {
destination = dest
break
}
}
}
destination = addQueryParam(destination, "cookie", userId)
http.Redirect(resp, req, destination, http.StatusMovedPermanently)
stat: // Record stats asynchronously
go func() {
origin, geoParseErr := this.requestParser.Parse(req)
origin.Destination = destination
if shortUrl != nil {
origin.ShortCode = shortUrl.Id
}
if geoParseErr != nil {
glog.Warningln("can-not-determine-location", geoParseErr)
}
glog.Infoln("send-to:", destination,
"ip:", origin.Ip, "mobile:", origin.UserAgent.Mobile,
"platform:", origin.UserAgent.Platform, "os:", origin.UserAgent.OS, "make:", origin.UserAgent.Make,
"browser:", origin.UserAgent.Browser, "version:", origin.UserAgent.BrowserVersion,
"location:", *origin.Location,
"useragent:", origin.UserAgent.Header)
this.service.PublishInstall(&InstallEvent{
Origin: origin,
Destination: destination,
AppUrlScheme: customUrlScheme,
AppUUID: appUuid,
ShortyUUID: userId,
})
}()
}
type StatsSummary struct {
Id string `json:"id"`
Created string `json:"when"`
Hits int `json:"hits"`
Uniques int `json:"uniques"`
Summary OriginStats `json:"summary"`
Config ShortUrl `json:"config"`
}
func (this *ShortyEndPoint) StatsHandler(resp http.ResponseWriter, req *http.Request) {
omni_http.SetCORSHeaders(resp)
vars := mux.Vars(req)
shortyUrl, err := this.service.Find(vars["id"])
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
} else if shortyUrl == nil {
renderError(resp, req, "No URL was found with short code", http.StatusNotFound)
return
}
hits, err := shortyUrl.Hits()
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
uniques, err := shortyUrl.Uniques()
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
originStats, err := shortyUrl.Sources(true)
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
summary := StatsSummary{
Id: shortyUrl.Id,
Created: relativeTime(time.Now().Sub(shortyUrl.Created)),
Hits: hits,
Uniques: uniques,
Summary: originStats,
Config: *shortyUrl,
}
buff, err := json.Marshal(summary)
if err != nil {
renderJsonError(resp, req, "Malformed summary", http.StatusInternalServerError)
return
}
resp.Write(buff)
}
func relativeTime(duration time.Duration) string {
hours := int64(math.Abs(duration.Hours()))
minutes := int64(math.Abs(duration.Minutes()))
when := ""
switch {
case hours >= (365 * 24):
when = "Over an year ago"
case hours > (30 * 24):
when = fmt.Sprintf("%d months ago", int64(hours/(30*24)))
case hours == (30 * 24):
when = "a month ago"
case hours > 24:
when = fmt.Sprintf("%d days ago", int64(hours/24))
case hours == 24:
when = "yesterday"
case hours >= 2:
when = fmt.Sprintf("%d hours ago", hours)
case hours > 1:
when = "over an hour ago"
case hours == 1:
when = "an hour ago"
case minutes >= 2:
when = fmt.Sprintf("%d minutes ago", minutes)
case minutes > 1:
when = "a minute ago"
default:
when = "just now"
}
return when
}
func renderJsonError(resp http.ResponseWriter, req *http.Request, message string, code int) (err error) {
resp.WriteHeader(code)
resp.Write([]byte(fmt.Sprintf("{\"error\":\"%s\"}", message)))
return
}
func renderError(resp http.ResponseWriter, req *http.Request, message string, code int) (err error) {
/*
body, err := render(req, "layout", "error", map[string]string{"Error": message})
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.WriteHeader(code)
resp.Write(body)
*/
return nil
}
// newUUID generates a random UUID according to RFC 4122
func newUUID() (string, error) {
uuid := make([]byte, 16)
n, err := io.ReadFull(rand.Reader, uuid)
if n != len(uuid) || err != nil {
return "", err
}
// variant bits; see section 4.1.1
uuid[8] = uuid[8]&^0xc0 | 0x80
// version 4 (pseudo-random); see section 4.1.3
uuid[6] = uuid[6]&^0xf0 | 0x40
return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil
}
Cookie changes
package shorty
import (
"crypto/rand"
"encoding/json"
"fmt"
"github.com/golang/glog"
"github.com/gorilla/mux"
omni_http "github.com/qorio/omni/http"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
"strings"
"time"
)
type ShortyAddRequest struct {
LongUrl string `json:"longUrl"`
Rules []RoutingRule `json:"rules"`
}
type ShortyEndPointSettings struct {
Redirect404 string
GeoIpDbFilePath string
}
type ShortyEndPoint struct {
settings ShortyEndPointSettings
router *mux.Router
requestParser *omni_http.RequestParser
service Shorty
}
var secureCookie *omni_http.SecureCookie
func init() {
var err error
secureCookie, err = omni_http.NewSecureCookie([]byte(""), nil)
if err != nil {
glog.Warningln("Cannot initialize secure cookie!")
panic(err)
}
}
func NewApiEndPoint(settings ShortyEndPointSettings, service Shorty) (api *ShortyEndPoint, err error) {
if requestParser, err := omni_http.NewRequestParser(settings.GeoIpDbFilePath); err == nil {
api = &ShortyEndPoint{
settings: settings,
router: mux.NewRouter(),
requestParser: requestParser,
service: service,
}
regex := fmt.Sprintf("[A-Za-z0-9]{%d}", service.UrlLength())
api.router.HandleFunc("/{id:"+regex+"}", api.RedirectHandler).Name("redirect")
api.router.HandleFunc("/api/v1/url", api.ApiAddHandler).Methods("POST").Name("add")
api.router.HandleFunc("/api/v1/stats/{id:"+regex+"}", api.StatsHandler).Methods("GET").Name("stats")
api.router.HandleFunc("/api/v1/events/install/{scheme}/{app_uuid}",
api.ReportInstallHandler).Methods("GET").Name("app_install")
return api, nil
} else {
return nil, err
}
}
func NewRedirector(settings ShortyEndPointSettings, service Shorty) (api *ShortyEndPoint, err error) {
if requestParser, err := omni_http.NewRequestParser(settings.GeoIpDbFilePath); err == nil {
api = &ShortyEndPoint{
settings: settings,
router: mux.NewRouter(),
requestParser: requestParser,
service: service,
}
regex := fmt.Sprintf("[A-Za-z0-9]{%d}", service.UrlLength())
api.router.HandleFunc("/{id:"+regex+"}", api.RedirectHandler).Name("redirect")
return api, nil
} else {
return nil, err
}
}
func (this *ShortyEndPoint) ServeHTTP(resp http.ResponseWriter, request *http.Request) {
this.router.ServeHTTP(resp, request)
}
func (this *ShortyEndPoint) ApiAddHandler(resp http.ResponseWriter, req *http.Request) {
omni_http.SetCORSHeaders(resp)
body, err := ioutil.ReadAll(req.Body)
if err != nil {
renderJsonError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
var message ShortyAddRequest
dec := json.NewDecoder(strings.NewReader(string(body)))
for {
if err := dec.Decode(&message); err == io.EOF {
break
} else if err != nil {
renderJsonError(resp, req, err.Error(), http.StatusBadRequest)
return
}
}
if message.LongUrl == "" {
renderJsonError(resp, req, "No URL to shorten", http.StatusBadRequest)
return
}
shortUrl, err := this.service.ShortUrl(message.LongUrl, message.Rules)
if err != nil {
renderJsonError(resp, req, err.Error(), http.StatusBadRequest)
return
}
if _, err := this.router.Get("redirect").URL("id", shortUrl.Id); err != nil {
renderJsonError(resp, req, err.Error(), http.StatusBadRequest)
return
}
buff, err := json.Marshal(shortUrl)
if err != nil {
renderJsonError(resp, req, "Malformed short url rule", http.StatusInternalServerError)
return
}
resp.Write(buff)
}
func processCookies(resp http.ResponseWriter, req *http.Request, shortUrl *ShortUrl) (visits int, cookied bool, last, uuid string) {
secureCookie.ReadCookie(req, "uuid", &uuid)
secureCookie.ReadCookie(req, "last", &last)
secureCookie.ReadCookie(req, shortUrl.Id, &visits)
var cookieError error
if uuid == "" {
if uuid, _ = newUUID(); uuid != "" {
cookieError = secureCookie.SetCookie(resp, "uuid", uuid)
}
}
visits++
cookieError = secureCookie.SetCookie(resp, "last", shortUrl.Id)
cookieError = secureCookie.SetCookie(resp, shortUrl.Id, visits)
cookied = cookieError == nil
return
}
func (this *ShortyEndPoint) RedirectHandler(resp http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
shortUrl, err := this.service.Find(vars["id"])
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
} else if shortUrl == nil {
if this.settings.Redirect404 != "" {
originalUrl, err := this.router.Get("redirect").URL("id", vars["id"])
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
url404 := strings.Replace(this.settings.Redirect404,
"$origURL", url.QueryEscape(fmt.Sprintf("http://%s%s", req.Host, originalUrl.String())), 1)
http.Redirect(resp, req, url404, http.StatusTemporaryRedirect)
return
}
renderError(resp, req, "No URL was found with that shorty code", http.StatusNotFound)
return
}
var destination string = shortUrl.Destination
// If there are platform-dependent routing
if len(shortUrl.Rules) > 0 {
userAgent := omni_http.ParseUserAgent(req)
for _, rule := range shortUrl.Rules {
if dest, match := rule.Match(userAgent); match {
destination = dest // default
// check to see if the rule specifies app url scheme
// if yes, then check cookie by the same key exists
glog.Infoln(">>>> APP URL SCHEME", rule.AppUrlScheme)
if rule.AppUrlScheme != "" {
timestamp := int64(0)
secureCookie.ReadCookie(req, rule.AppUrlScheme, ×tamp)
glog.Infoln(">>>>> READ TIMESTAMP COOKIE", timestamp)
if timestamp == 0 {
destination = rule.AppStoreUrl
}
}
break
}
}
}
omni_http.SetNoCachingHeaders(resp)
visits, cookied, last, userId := processCookies(resp, req, shortUrl)
http.Redirect(resp, req, destination, http.StatusMovedPermanently)
// Record stats asynchronously
go func() {
origin, geoParseErr := this.requestParser.Parse(req)
origin.Cookied = cookied
origin.Visits = visits
origin.LastVisit = last
origin.Destination = destination
origin.ShortCode = shortUrl.Id
if geoParseErr != nil {
glog.Warningln("can-not-determine-location", geoParseErr)
}
glog.Infoln(
"uuid:", userId, "url:", shortUrl.Id, "send-to:", destination,
"ip:", origin.Ip, "mobile:", origin.UserAgent.Mobile,
"platform:", origin.UserAgent.Platform, "os:", origin.UserAgent.OS, "make:", origin.UserAgent.Make,
"browser:", origin.UserAgent.Browser, "version:", origin.UserAgent.BrowserVersion,
"location:", *origin.Location,
"useragent:", origin.UserAgent.Header,
"cookied", cookied)
this.service.PublishDecode(&DecodeEvent{
Origin: origin,
Destination: destination,
ShortyUUID: userId,
})
shortUrl.Record(origin, visits > 1)
}()
}
func addQueryParam(url, key, value string) string {
if strings.ContainsRune(url, '?') {
return url + "&" + key + "=" + value
} else {
return url + "?" + key + "=" + value
}
}
func (this *ShortyEndPoint) ReportInstallHandler(resp http.ResponseWriter, req *http.Request) {
omni_http.SetNoCachingHeaders(resp)
vars := mux.Vars(req)
// Two parameters
// 1. app custom url scheme -> this allows us to key by mobile app per user
// 2. some uuid for the app -> this tracks a user. on ios, idfa uuid is used.
customUrlScheme := vars["scheme"]
if customUrlScheme == "" {
renderError(resp, req, "No app customer url scheme", http.StatusBadRequest)
return
}
appUuid := vars["app_uuid"]
if appUuid == "" {
renderError(resp, req, "No uuid", http.StatusBadRequest)
return
}
// read the cookies that have been set before when user clicked a short link
// this allows us to send a redirect as appropriate; otherwise, send a app url with 404
// a unique user identifier -- generated by us and the lastViewed short code
userId, lastViewed := "", ""
secureCookie.ReadCookie(req, "uuid", &userId)
secureCookie.ReadCookie(req, "last", &lastViewed)
// set a cookie to note that we know the app has been installed on the device
timestamp := time.Now().Unix()
setCookieErr := secureCookie.SetCookie(resp, customUrlScheme, timestamp)
glog.Infoln(">>>>> cookied ", customUrlScheme, timestamp, setCookieErr)
var shortUrl *ShortUrl
var err error
var destination string = customUrlScheme + "://404"
if lastViewed == "" {
destination = addQueryParam(destination, "cookie", userId)
http.Redirect(resp, req, destination, http.StatusMovedPermanently)
goto stat
}
shortUrl, err = this.service.Find(lastViewed)
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
} else if shortUrl == nil {
destination = addQueryParam(destination, "cookie", userId)
http.Redirect(resp, req, destination, http.StatusMovedPermanently)
goto stat
}
// If there are platform-dependent routing
if len(shortUrl.Rules) > 0 {
userAgent := omni_http.ParseUserAgent(req)
for _, rule := range shortUrl.Rules {
if dest, match := rule.Match(userAgent); match {
destination = dest
break
}
}
}
destination = addQueryParam(destination, "cookie", userId)
http.Redirect(resp, req, destination, http.StatusMovedPermanently)
stat: // Record stats asynchronously
go func() {
origin, geoParseErr := this.requestParser.Parse(req)
origin.Destination = destination
if shortUrl != nil {
origin.ShortCode = shortUrl.Id
}
if geoParseErr != nil {
glog.Warningln("can-not-determine-location", geoParseErr)
}
glog.Infoln("send-to:", destination,
"ip:", origin.Ip, "mobile:", origin.UserAgent.Mobile,
"platform:", origin.UserAgent.Platform, "os:", origin.UserAgent.OS, "make:", origin.UserAgent.Make,
"browser:", origin.UserAgent.Browser, "version:", origin.UserAgent.BrowserVersion,
"location:", *origin.Location,
"useragent:", origin.UserAgent.Header)
this.service.PublishInstall(&InstallEvent{
Origin: origin,
Destination: destination,
AppUrlScheme: customUrlScheme,
AppUUID: appUuid,
ShortyUUID: userId,
})
}()
}
type StatsSummary struct {
Id string `json:"id"`
Created string `json:"when"`
Hits int `json:"hits"`
Uniques int `json:"uniques"`
Summary OriginStats `json:"summary"`
Config ShortUrl `json:"config"`
}
func (this *ShortyEndPoint) StatsHandler(resp http.ResponseWriter, req *http.Request) {
omni_http.SetCORSHeaders(resp)
vars := mux.Vars(req)
shortyUrl, err := this.service.Find(vars["id"])
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
} else if shortyUrl == nil {
renderError(resp, req, "No URL was found with short code", http.StatusNotFound)
return
}
hits, err := shortyUrl.Hits()
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
uniques, err := shortyUrl.Uniques()
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
originStats, err := shortyUrl.Sources(true)
if err != nil {
renderError(resp, req, err.Error(), http.StatusInternalServerError)
return
}
summary := StatsSummary{
Id: shortyUrl.Id,
Created: relativeTime(time.Now().Sub(shortyUrl.Created)),
Hits: hits,
Uniques: uniques,
Summary: originStats,
Config: *shortyUrl,
}
buff, err := json.Marshal(summary)
if err != nil {
renderJsonError(resp, req, "Malformed summary", http.StatusInternalServerError)
return
}
resp.Write(buff)
}
func relativeTime(duration time.Duration) string {
hours := int64(math.Abs(duration.Hours()))
minutes := int64(math.Abs(duration.Minutes()))
when := ""
switch {
case hours >= (365 * 24):
when = "Over an year ago"
case hours > (30 * 24):
when = fmt.Sprintf("%d months ago", int64(hours/(30*24)))
case hours == (30 * 24):
when = "a month ago"
case hours > 24:
when = fmt.Sprintf("%d days ago", int64(hours/24))
case hours == 24:
when = "yesterday"
case hours >= 2:
when = fmt.Sprintf("%d hours ago", hours)
case hours > 1:
when = "over an hour ago"
case hours == 1:
when = "an hour ago"
case minutes >= 2:
when = fmt.Sprintf("%d minutes ago", minutes)
case minutes > 1:
when = "a minute ago"
default:
when = "just now"
}
return when
}
func renderJsonError(resp http.ResponseWriter, req *http.Request, message string, code int) (err error) {
resp.WriteHeader(code)
resp.Write([]byte(fmt.Sprintf("{\"error\":\"%s\"}", message)))
return
}
func renderError(resp http.ResponseWriter, req *http.Request, message string, code int) (err error) {
/*
body, err := render(req, "layout", "error", map[string]string{"Error": message})
if err != nil {
http.Error(resp, err.Error(), http.StatusInternalServerError)
return
}
resp.WriteHeader(code)
resp.Write(body)
*/
return nil
}
// newUUID generates a random UUID according to RFC 4122
func newUUID() (string, error) {
uuid := make([]byte, 16)
n, err := io.ReadFull(rand.Reader, uuid)
if n != len(uuid) || err != nil {
return "", err
}
// variant bits; see section 4.1.1
uuid[8] = uuid[8]&^0xc0 | 0x80
// version 4 (pseudo-random); see section 4.1.3
uuid[6] = uuid[6]&^0xf0 | 0x40
return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil
}
|
package pool
import (
"fmt"
"github.com/DVI-GI-2017/Jira__backend/db"
"github.com/DVI-GI-2017/Jira__backend/models"
"github.com/DVI-GI-2017/Jira__backend/services"
)
func init() {
resolvers["Project"] = projectsResolver
}
const (
ProjectCreate = Action("ProjectCreate")
ProjectsAll = Action("ProjectsAll")
ProjectFindById = Action("ProjectFindById")
ProjectAllUsers = Action("ProjectAllUsers")
ProjectAllTasks = Action("ProjectAllTasks")
ProjectAddUser = Action("ProjectAddUser")
ProjectDeleteUser = Action("ProjectDeleteUser")
)
func projectsResolver(action Action) (service ServiceFunc, err error) {
switch action {
case ProjectCreate:
service = func(source db.DataSource, data interface{}) (interface{}, error) {
project, err := models.SafeCastToProject(data)
if err != nil {
return models.Project{}, err
}
return services.CreateProject(source, project)
}
return
case ProjectsAll:
service = func(source db.DataSource, _ interface{}) (interface{}, error) {
return services.AllProjects(source)
}
return
case ProjectFindById:
service = func(source db.DataSource, data interface{}) (interface{}, error) {
id, err := models.SafeCastToRequiredId(data)
if err != nil {
return models.Project{}, err
}
return services.FindProjectById(source, id)
}
return
case ProjectAllUsers:
service = func(source db.DataSource, data interface{}) (result interface{}, err error) {
id, err := models.SafeCastToRequiredId(data)
if err != nil {
return services.AllUsersInProject(source, id), err
}
return models.UsersList{}, nil
}
return
case ProjectAllTasks:
service = func(source db.DataSource, data interface{}) (result interface{}, err error) {
id, err := models.SafeCastToRequiredId(data)
if err != nil {
return models.TasksList{}, err
}
return services.AllTasksInProject(source, id)
}
return
case ProjectAddUser:
service = func(source db.DataSource, data interface{}) (result interface{}, err error) {
ids, err := models.SafeCastToProjectUser(data)
if err != nil {
return models.UsersList{}, err
}
return services.AddUserToProject(source, ids.ProjectId, ids.UserId)
}
return
case ProjectDeleteUser:
service = func(source db.DataSource, data interface{}) (result interface{}, err error) {
ids, err := models.SafeCastToProjectUser(data)
if err != nil {
return models.UsersList{}, err
}
return services.DeleteUserFromProject(source, ids.ProjectId, ids.UserId)
}
return
}
return nil, fmt.Errorf("can not find resolver with action: %v, in projects resolvers", action)
}
Fix users from projects.
package pool
import (
"fmt"
"github.com/DVI-GI-2017/Jira__backend/db"
"github.com/DVI-GI-2017/Jira__backend/models"
"github.com/DVI-GI-2017/Jira__backend/services"
)
func init() {
resolvers["Project"] = projectsResolver
}
const (
ProjectCreate = Action("ProjectCreate")
ProjectsAll = Action("ProjectsAll")
ProjectFindById = Action("ProjectFindById")
ProjectAllUsers = Action("ProjectAllUsers")
ProjectAllTasks = Action("ProjectAllTasks")
ProjectAddUser = Action("ProjectAddUser")
ProjectDeleteUser = Action("ProjectDeleteUser")
)
func projectsResolver(action Action) (service ServiceFunc, err error) {
switch action {
case ProjectCreate:
service = func(source db.DataSource, data interface{}) (interface{}, error) {
project, err := models.SafeCastToProject(data)
if err != nil {
return models.Project{}, err
}
return services.CreateProject(source, project)
}
return
case ProjectsAll:
service = func(source db.DataSource, _ interface{}) (interface{}, error) {
return services.AllProjects(source)
}
return
case ProjectFindById:
service = func(source db.DataSource, data interface{}) (interface{}, error) {
id, err := models.SafeCastToRequiredId(data)
if err != nil {
return models.Project{}, err
}
return services.FindProjectById(source, id)
}
return
case ProjectAllUsers:
service = func(source db.DataSource, data interface{}) (result interface{}, err error) {
id, err := models.SafeCastToRequiredId(data)
if err != nil {
return models.UsersList{}, err
}
return services.AllUsersInProject(source, id)
}
return
case ProjectAllTasks:
service = func(source db.DataSource, data interface{}) (result interface{}, err error) {
id, err := models.SafeCastToRequiredId(data)
if err != nil {
return models.TasksList{}, err
}
return services.AllTasksInProject(source, id)
}
return
case ProjectAddUser:
service = func(source db.DataSource, data interface{}) (result interface{}, err error) {
ids, err := models.SafeCastToProjectUser(data)
if err != nil {
return models.UsersList{}, err
}
return services.AddUserToProject(source, ids.ProjectId, ids.UserId)
}
return
case ProjectDeleteUser:
service = func(source db.DataSource, data interface{}) (result interface{}, err error) {
ids, err := models.SafeCastToProjectUser(data)
if err != nil {
return models.UsersList{}, err
}
return services.DeleteUserFromProject(source, ids.ProjectId, ids.UserId)
}
return
}
return nil, fmt.Errorf("can not find resolver with action: %v, in projects resolvers", action)
}
|
package sereno_test
import (
"fmt"
"sync"
"testing"
"time"
"github.com/lytics/sereno"
"github.com/lytics/sereno/embeddedetcd"
"golang.org/x/net/context"
)
func TestSignal(t *testing.T) {
cluster := embeddedetcd.TestClusterOf1()
cluster.Launch()
defer func() {
t.Log("terminating etcd cluster")
cluster.Terminate(wipe_data_onterm)
}()
testtimeout := NewTestCaseTimeout(t, 10*time.Second, time.Microsecond)
defer testtimeout.End()
const msgcount = 100
ready1barrier := &sync.WaitGroup{}
ready1barrier.Add(1)
done := &sync.WaitGroup{}
done.Add(1)
kapi := KeyClientFromCluster(cluster)
ctx := context.Background()
pub, err := sereno.NewPubSubTopic(ctx, "topic42", kapi)
AssertT(t, err == nil, "err should be nil, got:%v", err)
go func() {
defer done.Done()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
sub, err := sereno.NewPubSubTopic(ctx, "topic42", kapi)
AssertT(t, err == nil, "err should be nil, got:%v", err)
subchan, err := sub.Subscribe()
AssertT(t, err == nil, "err should be nil, got:%v", err)
ready1barrier.Done()
cnt := 0
st := time.Now()
defer func() {
secs := time.Now().Sub(st).Seconds()
rate := float64(cnt) / secs
t.Logf("Background Subscriber: %v msgs @ rate: %0.0f msg/s", cnt, rate)
}()
for msgout := range subchan {
if msgout.Err != nil {
err := msgout.Err
if err == context.Canceled {
return
} else if err == context.DeadlineExceeded {
return
}
t.Fatalf("error: %v", msgout.Err)
}
//t.Logf("msg: %v", string(msgout.Msg))
cnt++
if cnt == msgcount {
return
}
}
}()
ready1barrier.Wait()
for i := 0; i < msgcount; i++ {
m := fmt.Sprintf("msgid:%d", i)
err := pub.Publish([]byte(m))
AssertT(t, err == nil, "err should be nil, got:%v", err)
}
done.Wait()
t.Log("testing done...")
}
fixing tests
package sereno_test
import (
"fmt"
"sync"
"testing"
"time"
"github.com/lytics/sereno"
"github.com/lytics/sereno/embeddedetcd"
"golang.org/x/net/context"
)
func TestSignal(t *testing.T) {
cluster := embeddedetcd.TestClusterOf1()
cluster.Launch()
defer func() {
t.Log("terminating etcd cluster")
cluster.Terminate(wipe_data_onterm)
}()
testtimeout := NewTestCaseTimeout(t, 10*time.Second, time.Microsecond)
defer testtimeout.End()
const msgcount = 9
ready1barrier := &sync.WaitGroup{}
ready1barrier.Add(1)
done := &sync.WaitGroup{}
done.Add(1)
kapi := KeyClientFromCluster(cluster)
ctx := context.Background()
pub, err := sereno.NewPubSubTopic(ctx, "topic42", kapi)
AssertT(t, err == nil, "err should be nil, got:%v", err)
go func() {
defer done.Done()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
sub, err := sereno.NewPubSubTopic(ctx, "topic42", kapi)
AssertT(t, err == nil, "err should be nil, got:%v", err)
subchan, err := sub.Subscribe()
AssertT(t, err == nil, "err should be nil, got:%v", err)
ready1barrier.Done()
cnt := 0
st := time.Now()
defer func() {
secs := time.Now().Sub(st).Seconds()
rate := float64(cnt) / secs
t.Logf("Background Subscriber: %v msgs @ rate: %0.0f msg/s", cnt, rate)
}()
for msgout := range subchan {
if msgout.Err != nil {
err := msgout.Err
if err == context.Canceled {
return
} else if err == context.DeadlineExceeded {
return
}
t.Fatalf("error: %v", msgout.Err)
}
//t.Logf("msg: %v", string(msgout.Msg))
cnt++
if cnt == msgcount {
return
}
}
}()
ready1barrier.Wait()
for i := 0; i < msgcount; i++ {
m := fmt.Sprintf("msgid:%d", i)
err := pub.Publish([]byte(m))
AssertT(t, err == nil, "err should be nil, got:%v", err)
}
done.Wait()
t.Log("testing done...")
}
|
package main
import (
"context"
"flag"
"fmt"
"math/rand"
"net/http"
_ "net/http/pprof"
"os"
"sync"
"time"
"golang.org/x/net/trace"
"github.com/dgraph-io/badger/badger"
"github.com/dgraph-io/badger/y"
"github.com/dgraph-io/dgraph/store"
"github.com/pkg/profile"
)
const mil float64 = 1000000
var (
which = flag.String("kv", "both", "Which KV store to use. Options: both, badger, rocksdb")
numKeys = flag.Float64("keys_mil", 10.0, "How many million keys to write.")
valueSize = flag.Int("valsz", 128, "Value size in bytes.")
dir = flag.String("dir", "/mnt/data", "Base dir for writes.")
)
func fillEntry(e *badger.Entry) {
k := rand.Int() % int(*numKeys*mil)
key := fmt.Sprintf("vsz=%05d-k=%010d", *valueSize, k) // 22 bytes.
if cap(e.Key) < len(key) {
e.Key = make([]byte, 2*len(key))
}
e.Key = e.Key[:len(key)]
copy(e.Key, key)
rand.Read(e.Value)
e.Meta = 0
e.Offset = 0
}
var ctx = context.Background()
var bdb *badger.KV
var rdb *store.Store
func writeBatch(entries []*badger.Entry) int {
rb := rdb.NewWriteBatch()
defer rb.Destroy()
for _, e := range entries {
fillEntry(e)
rb.Put(e.Key, e.Value)
}
if bdb != nil {
y.Check(bdb.Write(ctx, entries))
}
if rdb != nil {
y.Check(rdb.WriteBatch(rb))
}
return len(entries)
}
func main() {
mode := flag.String("profile.mode", "", "enable profiling mode, one of [cpu, mem, mutex, block]")
flag.Parse()
switch *mode {
case "cpu":
defer profile.Start(profile.CPUProfile).Stop()
case "mem":
defer profile.Start(profile.MemProfile).Stop()
case "mutex":
defer profile.Start(profile.MutexProfile).Stop()
case "block":
defer profile.Start(profile.BlockProfile).Stop()
default:
// do nothing
}
trace.AuthRequest = func(req *http.Request) (any, sensitive bool) {
return true, true
}
nw := *numKeys * mil
opt := badger.DefaultOptions
// opt.MapTablesTo = table.Nothing
opt.Verbose = true
opt.Dir = *dir + "/badger"
opt.SyncWrites = false
var err error
if *which == "badger" || *which == "both" {
fmt.Println("Init Badger")
y.Check(os.RemoveAll(*dir + "/badger"))
os.MkdirAll(*dir+"/badger", 0777)
bdb = badger.NewKV(&opt)
}
if *which == "rocksdb" || *which == "both" {
fmt.Println("Init Rocks")
os.RemoveAll(*dir + "/rocks")
os.MkdirAll(*dir+"/rocks", 0777)
rdb, err = store.NewStore(*dir + "/rocks")
y.Check(err)
}
go http.ListenAndServe("0.0.0.0:8080", nil)
N := 12
var wg sync.WaitGroup
for i := 0; i < N; i++ {
wg.Add(1)
go func(proc int) {
entries := make([]*badger.Entry, 1000)
for i := 0; i < len(entries); i++ {
e := new(badger.Entry)
e.Key = make([]byte, 22)
e.Value = make([]byte, *valueSize)
entries[i] = e
}
var written float64
for written < nw/float64(N) {
written += float64(writeBatch(entries))
if int(written)%100000 == 0 {
fmt.Printf("[%d] Written %5.2fM key-val pairs\n", proc, written/mil)
}
}
fmt.Printf("[%d] Written %5.2fM key-val pairs\n", proc, written/mil)
wg.Done()
}(i)
}
// wg.Add(1) // Block
wg.Wait()
if bdb != nil {
fmt.Println("closing badger")
bdb.Close()
}
if rdb != nil {
fmt.Println("closing rocks")
rdb.Close()
}
time.Sleep(10 * time.Second)
}
Output write rate
package main
import (
"context"
"flag"
"fmt"
"math/rand"
"net/http"
_ "net/http/pprof"
"os"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/trace"
"github.com/dgraph-io/badger/badger"
"github.com/dgraph-io/badger/y"
"github.com/dgraph-io/dgraph/store"
"github.com/paulbellamy/ratecounter"
"github.com/pkg/profile"
)
const mil float64 = 1000000
var (
which = flag.String("kv", "both", "Which KV store to use. Options: both, badger, rocksdb")
numKeys = flag.Float64("keys_mil", 10.0, "How many million keys to write.")
valueSize = flag.Int("valsz", 128, "Value size in bytes.")
dir = flag.String("dir", "/mnt/data", "Base dir for writes.")
)
func fillEntry(e *badger.Entry) {
k := rand.Int() % int(*numKeys*mil)
key := fmt.Sprintf("vsz=%05d-k=%010d", *valueSize, k) // 22 bytes.
if cap(e.Key) < len(key) {
e.Key = make([]byte, 2*len(key))
}
e.Key = e.Key[:len(key)]
copy(e.Key, key)
rand.Read(e.Value)
e.Meta = 0
e.Offset = 0
}
var ctx = context.Background()
var bdb *badger.KV
var rdb *store.Store
func writeBatch(entries []*badger.Entry) int {
rb := rdb.NewWriteBatch()
defer rb.Destroy()
for _, e := range entries {
fillEntry(e)
rb.Put(e.Key, e.Value)
}
if bdb != nil {
y.Check(bdb.Write(ctx, entries))
}
if rdb != nil {
y.Check(rdb.WriteBatch(rb))
}
return len(entries)
}
func humanize(n int64) string {
if n >= 1000000 {
return fmt.Sprintf("%6.2fM", float64(n)/1000000.0)
}
if n >= 1000 {
return fmt.Sprintf("%6.2fK", float64(n)/1000.0)
}
return fmt.Sprintf("%5.2f", float64(n))
}
func main() {
mode := flag.String("profile.mode", "", "enable profiling mode, one of [cpu, mem, mutex, block]")
flag.Parse()
switch *mode {
case "cpu":
defer profile.Start(profile.CPUProfile).Stop()
case "mem":
defer profile.Start(profile.MemProfile).Stop()
case "mutex":
defer profile.Start(profile.MutexProfile).Stop()
case "block":
defer profile.Start(profile.BlockProfile).Stop()
default:
// do nothing
}
trace.AuthRequest = func(req *http.Request) (any, sensitive bool) {
return true, true
}
nw := *numKeys * mil
fmt.Printf("TOTAL KEYS TO WRITE: %s\n", humanize(int64(nw)))
opt := badger.DefaultOptions
// opt.MapTablesTo = table.Nothing
opt.Verbose = true
opt.Dir = *dir + "/badger"
opt.SyncWrites = false
var err error
if *which == "badger" || *which == "both" {
fmt.Println("Init Badger")
y.Check(os.RemoveAll(*dir + "/badger"))
os.MkdirAll(*dir+"/badger", 0777)
bdb = badger.NewKV(&opt)
}
if *which == "rocksdb" || *which == "both" {
fmt.Println("Init Rocks")
os.RemoveAll(*dir + "/rocks")
os.MkdirAll(*dir+"/rocks", 0777)
rdb, err = store.NewStore(*dir + "/rocks")
y.Check(err)
}
rc := ratecounter.NewRateCounter(time.Minute)
var counter int64
ctx, cancel := context.WithCancel(context.Background())
go func() {
t := time.NewTicker(time.Second)
for {
select {
case <-t.C:
fmt.Printf("Write key rate per minute: %s. Total: %s\n",
humanize(rc.Rate()),
humanize(atomic.LoadInt64(&counter)))
case <-ctx.Done():
return
}
}
}()
go http.ListenAndServe("0.0.0.0:8080", nil)
N := 12
var wg sync.WaitGroup
for i := 0; i < N; i++ {
wg.Add(1)
go func(proc int) {
entries := make([]*badger.Entry, 1000)
for i := 0; i < len(entries); i++ {
e := new(badger.Entry)
e.Key = make([]byte, 22)
e.Value = make([]byte, *valueSize)
entries[i] = e
}
var written float64
for written < nw/float64(N) {
wrote := float64(writeBatch(entries))
wi := int64(wrote)
atomic.AddInt64(&counter, wi)
rc.Incr(wi)
written += wrote
}
wg.Done()
}(i)
}
// wg.Add(1) // Block
wg.Wait()
cancel()
if bdb != nil {
fmt.Println("closing badger")
bdb.Close()
}
if rdb != nil {
fmt.Println("closing rocks")
rdb.Close()
}
fmt.Printf("\nWROTE %d KEYS\n", atomic.LoadInt64(&counter))
}
|
// since.go
/* REVISION HISTORY
21 Oct 2018 -- First started playing w/ MichaelTJones' code. I added a help flag
*/
package main
import (
"flag"
"fmt"
"log"
"os"
"sort"
"sync"
"time"
"github.com/MichaelTJones/walk"
)
var duration = flag.String("d", "", "find files modified within DURATION")
var format = flag.String("f", "2006-01-02 03:04:05", "time format")
var instant = flag.String("t", "", "find files modified since TIME")
var quiet = flag.Bool("q", false, "do not print filenames")
var verbose = flag.Bool("v", false, "print summary statistics")
var help = flag.Bool("h", false, "print help message")
var LastAlteredDate = "Oct 21, 2018"
func main() {
fmt.Println(" since written in Go. LastAltered", LastAlteredDate)
flag.Parse()
if *help {
fmt.Println()
fmt.Println()
fmt.Println(" Usage: since <options> <start-dir>")
fmt.Println(" Valid time units for duration are ns, us, ms, s, m, h.")
fmt.Println()
flag.PrintDefaults()
os.Exit(0)
}
now := time.Now()
when := now
switch {
case *instant != "":
t, err := time.Parse(*format, *instant)
if err != nil {
fmt.Printf("error parsing time %q, %s\n", *instant, err)
os.Exit(1)
}
when = t
case *duration != "":
d, err := time.ParseDuration(*duration)
if err != nil {
fmt.Printf("error parsing duration %q, %s\n", *duration, err)
os.Exit(2)
}
when = now.Add(-d)
}
// goroutine to collect names of recently-modified files
var result []string
done := make(chan bool)
results := make(chan string, 1024)
go func() {
for r := range results {
result = append(result, r)
}
sort.Strings(result) // simulate ordered traversal
done <- true
}()
// parallel walker and walk to find recently-modified files
var lock sync.Mutex
var tFiles, tBytes int // total files and bytes
var rFiles, rBytes int // recent files and bytes
sizeVisitor := func(path string, info os.FileInfo, err error) error {
if err == nil {
lock.Lock()
tFiles += 1
tBytes += int(info.Size())
lock.Unlock()
if info.ModTime().After(when) {
lock.Lock()
rFiles += 1
rBytes += int(info.Size())
lock.Unlock()
if !*quiet {
// fmt.Printf("%s %s\n", info.ModTime(), path) // simple
results <- path // allows sorting into "normal" order
}
}
}
return nil
}
for _, root := range flag.Args() {
walk.Walk(root, sizeVisitor)
}
// wait for traversal results and print
close(results) // no more results
<-done // wait for final results and sorting
𝛥t := float64(time.Since(now)) / 1e9
for _, r := range result {
fmt.Printf("%s\n", r)
}
// print optional verbose summary report
if *verbose {
log.Printf(" total: %8d files (%7.2f%%), %13d bytes (%7.2f%%)\n",
tFiles, 100.0, tBytes, 100.0)
rfp := 100 * float64(rFiles) / float64(tFiles)
rbp := 100 * float64(rBytes) / float64(tBytes)
log.Printf(" recent: %8d files (%7.2f%%), %13d bytes (%7.2f%%) in %.4f seconds\n",
rFiles, rfp, rBytes, rbp, 𝛥t)
}
}
modified: since/since.go
10/21/2018 02:53:52 PM
// since.go
/* REVISION HISTORY
21 Oct 2018 -- First started playing w/ MichaelTJones' code. I added a help flag
*/
package main
import (
"flag"
"fmt"
"log"
"os"
"sort"
"sync"
"time"
"github.com/MichaelTJones/walk"
)
var duration = flag.String("d", "", "find files modified within DURATION")
var format = flag.String("f", "2006-01-02 03:04:05", "time format")
var instant = flag.String("t", "", "find files modified since TIME")
var quiet = flag.Bool("q", false, "do not print filenames")
var verbose = flag.Bool("v", false, "print summary statistics")
var help = flag.Bool("h", false, "print help message")
var LastAlteredDate = "Oct 21, 2018"
func main() {
fmt.Println(" since written in Go. LastAltered", LastAlteredDate)
flag.Parse()
if *help {
fmt.Println()
fmt.Println()
fmt.Println(" Usage: since <options> <start-dir>")
fmt.Println(" Valid time units for duration are ns, us, ms, s, m, h.")
fmt.Println()
flag.PrintDefaults()
os.Exit(0)
}
now := time.Now()
when := now
switch {
case *instant != "":
t, err := time.Parse(*format, *instant)
if err != nil {
fmt.Printf("error parsing time %q, %s\n", *instant, err)
os.Exit(1)
}
when = t
case *duration != "":
d, err := time.ParseDuration(*duration)
if err != nil {
fmt.Printf("error parsing duration %q, %s\n", *duration, err)
os.Exit(2)
}
when = now.Add(-d)
}
// goroutine to collect names of recently-modified files
var result []string
done := make(chan bool)
results := make(chan string, 1024)
go func() {
for r := range results {
result = append(result, r)
}
sort.Strings(result) // simulate ordered traversal
done <- true
}()
// parallel walker and walk to find recently-modified files
var lock sync.Mutex
var tFiles, tBytes int // total files and bytes
var rFiles, rBytes int // recent files and bytes
sizeVisitor := func(path string, info os.FileInfo, err error) error {
if err == nil {
lock.Lock()
tFiles += 1
tBytes += int(info.Size())
lock.Unlock()
if info.ModTime().After(when) {
lock.Lock()
rFiles += 1
rBytes += int(info.Size())
lock.Unlock()
if !*quiet {
// fmt.Printf("%s %s\n", info.ModTime(), path) // simple
results <- path // allows sorting into "normal" order
}
}
}
return nil
}
if len(flag.Args()) < 1 {
dir, err := os.Getwd()
if err != nil {
log.Fatalln(" error from Getwd is", err)
}
walk.Walk(dir, sizeVisitor)
} else {
for _, root := range flag.Args() {
walk.Walk(root, sizeVisitor)
}
}
// wait for traversal results and print
close(results) // no more results
<-done // wait for final results and sorting
𝛥t := float64(time.Since(now)) / 1e9
for _, r := range result {
fmt.Printf("%s\n", r)
}
// print optional verbose summary report
if *verbose {
log.Printf(" total: %8d files (%7.2f%%), %13d bytes (%7.2f%%)\n",
tFiles, 100.0, tBytes, 100.0)
rfp := 100 * float64(rFiles) / float64(tFiles)
rbp := 100 * float64(rBytes) / float64(tBytes)
log.Printf(" recent: %8d files (%7.2f%%), %13d bytes (%7.2f%%) in %.4f seconds\n",
rFiles, rfp, rBytes, rbp, 𝛥t)
}
}
|
package compress
import (
"archive/tar"
"compress/gzip"
"io"
"log"
"os"
"github.com/goreleaser/releaser/config"
"github.com/goreleaser/releaser/uname"
"golang.org/x/sync/errgroup"
)
// Pipe for compress
type Pipe struct{}
// Name of the pipe
func (Pipe) Name() string {
return "Compress"
}
// Run the pipe
func (Pipe) Run(config config.ProjectConfig) error {
var g errgroup.Group
for _, system := range config.Build.Oses {
for _, arch := range config.Build.Arches {
system := system
arch := arch
g.Go(func() error {
return create(system, arch, config)
})
}
}
return g.Wait()
}
func create(system, arch string, config config.ProjectConfig) error {
file, err := os.Create("dist/" + nameFor(system, arch, config.BinaryName) + ".tar.gz")
log.Println("Creating", file.Name(), "...")
if err != nil {
return err
}
gw := gzip.NewWriter(file)
tw := tar.NewWriter(gw)
defer func() {
_ = file.Close()
_ = gw.Close()
_ = tw.Close()
}()
for _, f := range config.Files {
if err := addFile(tw, f, f); err != nil {
return err
}
}
return addFile(tw, config.BinaryName+ext(system), binaryPath(system, arch, config.BinaryName))
}
func addFile(tw *tar.Writer, name, path string) (err error) {
file, err := os.Open(path)
if err != nil {
return
}
defer func() {
_ = file.Close()
}()
stat, err := file.Stat()
if err != nil {
return
}
header := new(tar.Header)
header.Name = name
header.Size = stat.Size()
header.Mode = int64(stat.Mode())
header.ModTime = stat.ModTime()
if err := tw.WriteHeader(header); err != nil {
return err
}
if _, err := io.Copy(tw, file); err != nil {
return err
}
return
}
func nameFor(system, arch, binary string) string {
return binary + "_" + uname.FromGo(system) + "_" + uname.FromGo(arch)
}
func binaryPath(system, arch, binary string) string {
return "dist/" + nameFor(system, arch, binary) + "/" + binary
}
func ext(system string) string {
if system == "windows" {
return ".exe"
}
return ""
}
Reorder `defer`'d closers
Commit https://github.com/goreleaser/releaser/commit/40de5c5c644d69c3d33a6d0cf16e539a0f40423b introduces a regression (see comment) whereby
the tarball is closed before data is completely written, thus breaking the release package
package compress
import (
"archive/tar"
"compress/gzip"
"io"
"log"
"os"
"github.com/goreleaser/releaser/config"
"github.com/goreleaser/releaser/uname"
"golang.org/x/sync/errgroup"
)
// Pipe for compress
type Pipe struct{}
// Name of the pipe
func (Pipe) Name() string {
return "Compress"
}
// Run the pipe
func (Pipe) Run(config config.ProjectConfig) error {
var g errgroup.Group
for _, system := range config.Build.Oses {
for _, arch := range config.Build.Arches {
system := system
arch := arch
g.Go(func() error {
return create(system, arch, config)
})
}
}
return g.Wait()
}
func create(system, arch string, config config.ProjectConfig) error {
file, err := os.Create("dist/" + nameFor(system, arch, config.BinaryName) + ".tar.gz")
log.Println("Creating", file.Name(), "...")
if err != nil {
return err
}
gw := gzip.NewWriter(file)
tw := tar.NewWriter(gw)
defer func() {
_ = tw.Close()
_ = gw.Close()
_ = file.Close()
}()
for _, f := range config.Files {
if err := addFile(tw, f, f); err != nil {
return err
}
}
return addFile(tw, config.BinaryName+ext(system), binaryPath(system, arch, config.BinaryName))
}
func addFile(tw *tar.Writer, name, path string) (err error) {
file, err := os.Open(path)
if err != nil {
return
}
defer func() {
_ = file.Close()
}()
stat, err := file.Stat()
if err != nil {
return
}
header := new(tar.Header)
header.Name = name
header.Size = stat.Size()
header.Mode = int64(stat.Mode())
header.ModTime = stat.ModTime()
if err := tw.WriteHeader(header); err != nil {
return err
}
if _, err := io.Copy(tw, file); err != nil {
return err
}
return
}
func nameFor(system, arch, binary string) string {
return binary + "_" + uname.FromGo(system) + "_" + uname.FromGo(arch)
}
func binaryPath(system, arch, binary string) string {
return "dist/" + nameFor(system, arch, binary) + "/" + binary
}
func ext(system string) string {
if system == "windows" {
return ".exe"
}
return ""
}
|
package api
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/api/response"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/metrics"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/guardian"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
"github.com/grafana/grafana/pkg/web"
)
var client = &http.Client{
Timeout: time.Second * 5,
Transport: &http.Transport{Proxy: http.ProxyFromEnvironment},
}
func GetSharingOptions(c *models.ReqContext) {
c.JSON(http.StatusOK, util.DynMap{
"externalSnapshotURL": setting.ExternalSnapshotUrl,
"externalSnapshotName": setting.ExternalSnapshotName,
"externalEnabled": setting.ExternalEnabled,
})
}
type CreateExternalSnapshotResponse struct {
Key string `json:"key"`
DeleteKey string `json:"deleteKey"`
Url string `json:"url"`
DeleteUrl string `json:"deleteUrl"`
}
func createExternalDashboardSnapshot(cmd models.CreateDashboardSnapshotCommand) (*CreateExternalSnapshotResponse, error) {
var createSnapshotResponse CreateExternalSnapshotResponse
message := map[string]interface{}{
"name": cmd.Name,
"expires": cmd.Expires,
"dashboard": cmd.Dashboard,
"key": cmd.Key,
"deleteKey": cmd.DeleteKey,
}
messageBytes, err := simplejson.NewFromAny(message).Encode()
if err != nil {
return nil, err
}
response, err := client.Post(setting.ExternalSnapshotUrl+"/api/snapshots", "application/json", bytes.NewBuffer(messageBytes))
if err != nil {
return nil, err
}
defer func() {
if err := response.Body.Close(); err != nil {
plog.Warn("Failed to close response body", "err", err)
}
}()
if response.StatusCode != 200 {
return nil, fmt.Errorf("create external snapshot response status code %d", response.StatusCode)
}
if err := json.NewDecoder(response.Body).Decode(&createSnapshotResponse); err != nil {
return nil, err
}
return &createSnapshotResponse, nil
}
// POST /api/snapshots
func (hs *HTTPServer) CreateDashboardSnapshot(c *models.ReqContext) response.Response {
cmd := models.CreateDashboardSnapshotCommand{}
if err := web.Bind(c.Req, &cmd); err != nil {
return response.Error(http.StatusBadRequest, "bad request data", err)
}
if cmd.Name == "" {
cmd.Name = "Unnamed snapshot"
}
var url string
cmd.ExternalUrl = ""
cmd.OrgId = c.OrgId
cmd.UserId = c.UserId
if cmd.External {
if !setting.ExternalEnabled {
c.JsonApiErr(403, "External dashboard creation is disabled", nil)
return nil
}
response, err := createExternalDashboardSnapshot(cmd)
if err != nil {
c.JsonApiErr(500, "Failed to create external snapshot", err)
return nil
}
url = response.Url
cmd.Key = response.Key
cmd.DeleteKey = response.DeleteKey
cmd.ExternalUrl = response.Url
cmd.ExternalDeleteUrl = response.DeleteUrl
cmd.Dashboard = simplejson.New()
metrics.MApiDashboardSnapshotExternal.Inc()
} else {
if cmd.Key == "" {
var err error
cmd.Key, err = util.GetRandomString(32)
if err != nil {
c.JsonApiErr(500, "Could not generate random string", err)
return nil
}
}
if cmd.DeleteKey == "" {
var err error
cmd.DeleteKey, err = util.GetRandomString(32)
if err != nil {
c.JsonApiErr(500, "Could not generate random string", err)
return nil
}
}
url = setting.ToAbsUrl("dashboard/snapshot/" + cmd.Key)
metrics.MApiDashboardSnapshotCreate.Inc()
}
if err := hs.dashboardsnapshotsService.CreateDashboardSnapshot(c.Req.Context(), &cmd); err != nil {
c.JsonApiErr(500, "Failed to create snapshot", err)
return nil
}
c.JSON(http.StatusOK, util.DynMap{
"key": cmd.Key,
"deleteKey": cmd.DeleteKey,
"url": url,
"deleteUrl": setting.ToAbsUrl("api/snapshots-delete/" + cmd.DeleteKey),
"id": cmd.Result.Id,
})
return nil
}
// GET /api/snapshots/:key
func (hs *HTTPServer) GetDashboardSnapshot(c *models.ReqContext) response.Response {
key := web.Params(c.Req)[":key"]
if len(key) == 0 {
return response.Error(404, "Snapshot not found", nil)
}
query := &models.GetDashboardSnapshotQuery{Key: key}
err := hs.dashboardsnapshotsService.GetDashboardSnapshot(c.Req.Context(), query)
if err != nil {
return response.Error(500, "Failed to get dashboard snapshot", err)
}
snapshot := query.Result
// expired snapshots should also be removed from db
if snapshot.Expires.Before(time.Now()) {
return response.Error(404, "Dashboard snapshot not found", err)
}
dto := dtos.DashboardFullWithMeta{
Dashboard: snapshot.Dashboard,
Meta: dtos.DashboardMeta{
Type: models.DashTypeSnapshot,
IsSnapshot: true,
Created: snapshot.Created,
Expires: snapshot.Expires,
},
}
metrics.MApiDashboardSnapshotGet.Inc()
return response.JSON(http.StatusOK, dto).SetHeader("Cache-Control", "public, max-age=3600")
}
func deleteExternalDashboardSnapshot(externalUrl string) error {
response, err := client.Get(externalUrl)
if err != nil {
return err
}
defer func() {
if err := response.Body.Close(); err != nil {
plog.Warn("Failed to close response body", "err", err)
}
}()
if response.StatusCode == 200 {
return nil
}
// Gracefully ignore "snapshot not found" errors as they could have already
// been removed either via the cleanup script or by request.
if response.StatusCode == 500 {
var respJson map[string]interface{}
if err := json.NewDecoder(response.Body).Decode(&respJson); err != nil {
return err
}
if respJson["message"] == "Failed to get dashboard snapshot" {
return nil
}
}
return fmt.Errorf("unexpected response when deleting external snapshot, status code: %d", response.StatusCode)
}
// GET /api/snapshots-delete/:deleteKey
func (hs *HTTPServer) DeleteDashboardSnapshotByDeleteKey(c *models.ReqContext) response.Response {
key := web.Params(c.Req)[":deleteKey"]
if len(key) == 0 {
return response.Error(404, "Snapshot not found", nil)
}
query := &models.GetDashboardSnapshotQuery{DeleteKey: key}
err := hs.dashboardsnapshotsService.GetDashboardSnapshot(c.Req.Context(), query)
if err != nil {
return response.Error(500, "Failed to get dashboard snapshot", err)
}
if query.Result.External {
err := deleteExternalDashboardSnapshot(query.Result.ExternalDeleteUrl)
if err != nil {
return response.Error(500, "Failed to delete external dashboard", err)
}
}
cmd := &models.DeleteDashboardSnapshotCommand{DeleteKey: query.Result.DeleteKey}
if err := hs.dashboardsnapshotsService.DeleteDashboardSnapshot(c.Req.Context(), cmd); err != nil {
return response.Error(500, "Failed to delete dashboard snapshot", err)
}
return response.JSON(http.StatusOK, util.DynMap{
"message": "Snapshot deleted. It might take an hour before it's cleared from any CDN caches.",
"id": query.Result.Id,
})
}
// DELETE /api/snapshots/:key
func (hs *HTTPServer) DeleteDashboardSnapshot(c *models.ReqContext) response.Response {
key := web.Params(c.Req)[":key"]
if len(key) == 0 {
return response.Error(404, "Snapshot not found", nil)
}
query := &models.GetDashboardSnapshotQuery{Key: key}
err := hs.dashboardsnapshotsService.GetDashboardSnapshot(c.Req.Context(), query)
if err != nil {
return response.Error(500, "Failed to get dashboard snapshot", err)
}
if query.Result == nil {
return response.Error(404, "Failed to get dashboard snapshot", nil)
}
dashboardID := query.Result.Dashboard.Get("id").MustInt64()
guardian := guardian.New(c.Req.Context(), dashboardID, c.OrgId, c.SignedInUser)
canEdit, err := guardian.CanEdit()
if err != nil {
return response.Error(500, "Error while checking permissions for snapshot", err)
}
if !canEdit && query.Result.UserId != c.SignedInUser.UserId {
return response.Error(403, "Access denied to this snapshot", nil)
}
if query.Result.External {
err := deleteExternalDashboardSnapshot(query.Result.ExternalDeleteUrl)
if err != nil {
return response.Error(500, "Failed to delete external dashboard", err)
}
}
cmd := &models.DeleteDashboardSnapshotCommand{DeleteKey: query.Result.DeleteKey}
if err := hs.dashboardsnapshotsService.DeleteDashboardSnapshot(c.Req.Context(), cmd); err != nil {
return response.Error(500, "Failed to delete dashboard snapshot", err)
}
return response.JSON(http.StatusOK, util.DynMap{
"message": "Snapshot deleted. It might take an hour before it's cleared from any CDN caches.",
"id": query.Result.Id,
})
}
// GET /api/dashboard/snapshots
func (hs *HTTPServer) SearchDashboardSnapshots(c *models.ReqContext) response.Response {
query := c.Query("query")
limit := c.QueryInt("limit")
if limit == 0 {
limit = 1000
}
searchQuery := models.GetDashboardSnapshotsQuery{
Name: query,
Limit: limit,
OrgId: c.OrgId,
SignedInUser: c.SignedInUser,
}
err := hs.dashboardsnapshotsService.SearchDashboardSnapshots(c.Req.Context(), &searchQuery)
if err != nil {
return response.Error(500, "Search failed", err)
}
dtos := make([]*models.DashboardSnapshotDTO, len(searchQuery.Result))
for i, snapshot := range searchQuery.Result {
dtos[i] = &models.DashboardSnapshotDTO{
Id: snapshot.Id,
Name: snapshot.Name,
Key: snapshot.Key,
OrgId: snapshot.OrgId,
UserId: snapshot.UserId,
External: snapshot.External,
ExternalUrl: snapshot.ExternalUrl,
Expires: snapshot.Expires,
Created: snapshot.Created,
Updated: snapshot.Updated,
}
}
return response.JSON(http.StatusOK, dtos)
}
Bug: Fix delete dashboard snapshot for deleted dashboards (#50919)
* Bug: Fix delete dashboard snapshot for deleted dashboards
* Fix lint and make it work for all the errors
* Fix lint
package api
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"time"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/api/response"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/metrics"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/guardian"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
"github.com/grafana/grafana/pkg/web"
)
var client = &http.Client{
Timeout: time.Second * 5,
Transport: &http.Transport{Proxy: http.ProxyFromEnvironment},
}
func GetSharingOptions(c *models.ReqContext) {
c.JSON(http.StatusOK, util.DynMap{
"externalSnapshotURL": setting.ExternalSnapshotUrl,
"externalSnapshotName": setting.ExternalSnapshotName,
"externalEnabled": setting.ExternalEnabled,
})
}
type CreateExternalSnapshotResponse struct {
Key string `json:"key"`
DeleteKey string `json:"deleteKey"`
Url string `json:"url"`
DeleteUrl string `json:"deleteUrl"`
}
func createExternalDashboardSnapshot(cmd models.CreateDashboardSnapshotCommand) (*CreateExternalSnapshotResponse, error) {
var createSnapshotResponse CreateExternalSnapshotResponse
message := map[string]interface{}{
"name": cmd.Name,
"expires": cmd.Expires,
"dashboard": cmd.Dashboard,
"key": cmd.Key,
"deleteKey": cmd.DeleteKey,
}
messageBytes, err := simplejson.NewFromAny(message).Encode()
if err != nil {
return nil, err
}
response, err := client.Post(setting.ExternalSnapshotUrl+"/api/snapshots", "application/json", bytes.NewBuffer(messageBytes))
if err != nil {
return nil, err
}
defer func() {
if err := response.Body.Close(); err != nil {
plog.Warn("Failed to close response body", "err", err)
}
}()
if response.StatusCode != 200 {
return nil, fmt.Errorf("create external snapshot response status code %d", response.StatusCode)
}
if err := json.NewDecoder(response.Body).Decode(&createSnapshotResponse); err != nil {
return nil, err
}
return &createSnapshotResponse, nil
}
// POST /api/snapshots
func (hs *HTTPServer) CreateDashboardSnapshot(c *models.ReqContext) response.Response {
cmd := models.CreateDashboardSnapshotCommand{}
if err := web.Bind(c.Req, &cmd); err != nil {
return response.Error(http.StatusBadRequest, "bad request data", err)
}
if cmd.Name == "" {
cmd.Name = "Unnamed snapshot"
}
var url string
cmd.ExternalUrl = ""
cmd.OrgId = c.OrgId
cmd.UserId = c.UserId
if cmd.External {
if !setting.ExternalEnabled {
c.JsonApiErr(403, "External dashboard creation is disabled", nil)
return nil
}
response, err := createExternalDashboardSnapshot(cmd)
if err != nil {
c.JsonApiErr(500, "Failed to create external snapshot", err)
return nil
}
url = response.Url
cmd.Key = response.Key
cmd.DeleteKey = response.DeleteKey
cmd.ExternalUrl = response.Url
cmd.ExternalDeleteUrl = response.DeleteUrl
cmd.Dashboard = simplejson.New()
metrics.MApiDashboardSnapshotExternal.Inc()
} else {
if cmd.Key == "" {
var err error
cmd.Key, err = util.GetRandomString(32)
if err != nil {
c.JsonApiErr(500, "Could not generate random string", err)
return nil
}
}
if cmd.DeleteKey == "" {
var err error
cmd.DeleteKey, err = util.GetRandomString(32)
if err != nil {
c.JsonApiErr(500, "Could not generate random string", err)
return nil
}
}
url = setting.ToAbsUrl("dashboard/snapshot/" + cmd.Key)
metrics.MApiDashboardSnapshotCreate.Inc()
}
if err := hs.dashboardsnapshotsService.CreateDashboardSnapshot(c.Req.Context(), &cmd); err != nil {
c.JsonApiErr(500, "Failed to create snapshot", err)
return nil
}
c.JSON(http.StatusOK, util.DynMap{
"key": cmd.Key,
"deleteKey": cmd.DeleteKey,
"url": url,
"deleteUrl": setting.ToAbsUrl("api/snapshots-delete/" + cmd.DeleteKey),
"id": cmd.Result.Id,
})
return nil
}
// GET /api/snapshots/:key
func (hs *HTTPServer) GetDashboardSnapshot(c *models.ReqContext) response.Response {
key := web.Params(c.Req)[":key"]
if len(key) == 0 {
return response.Error(404, "Snapshot not found", nil)
}
query := &models.GetDashboardSnapshotQuery{Key: key}
err := hs.dashboardsnapshotsService.GetDashboardSnapshot(c.Req.Context(), query)
if err != nil {
return response.Error(500, "Failed to get dashboard snapshot", err)
}
snapshot := query.Result
// expired snapshots should also be removed from db
if snapshot.Expires.Before(time.Now()) {
return response.Error(404, "Dashboard snapshot not found", err)
}
dto := dtos.DashboardFullWithMeta{
Dashboard: snapshot.Dashboard,
Meta: dtos.DashboardMeta{
Type: models.DashTypeSnapshot,
IsSnapshot: true,
Created: snapshot.Created,
Expires: snapshot.Expires,
},
}
metrics.MApiDashboardSnapshotGet.Inc()
return response.JSON(http.StatusOK, dto).SetHeader("Cache-Control", "public, max-age=3600")
}
func deleteExternalDashboardSnapshot(externalUrl string) error {
response, err := client.Get(externalUrl)
if err != nil {
return err
}
defer func() {
if err := response.Body.Close(); err != nil {
plog.Warn("Failed to close response body", "err", err)
}
}()
if response.StatusCode == 200 {
return nil
}
// Gracefully ignore "snapshot not found" errors as they could have already
// been removed either via the cleanup script or by request.
if response.StatusCode == 500 {
var respJson map[string]interface{}
if err := json.NewDecoder(response.Body).Decode(&respJson); err != nil {
return err
}
if respJson["message"] == "Failed to get dashboard snapshot" {
return nil
}
}
return fmt.Errorf("unexpected response when deleting external snapshot, status code: %d", response.StatusCode)
}
// GET /api/snapshots-delete/:deleteKey
func (hs *HTTPServer) DeleteDashboardSnapshotByDeleteKey(c *models.ReqContext) response.Response {
key := web.Params(c.Req)[":deleteKey"]
if len(key) == 0 {
return response.Error(404, "Snapshot not found", nil)
}
query := &models.GetDashboardSnapshotQuery{DeleteKey: key}
err := hs.dashboardsnapshotsService.GetDashboardSnapshot(c.Req.Context(), query)
if err != nil {
return response.Error(500, "Failed to get dashboard snapshot", err)
}
if query.Result.External {
err := deleteExternalDashboardSnapshot(query.Result.ExternalDeleteUrl)
if err != nil {
return response.Error(500, "Failed to delete external dashboard", err)
}
}
cmd := &models.DeleteDashboardSnapshotCommand{DeleteKey: query.Result.DeleteKey}
if err := hs.dashboardsnapshotsService.DeleteDashboardSnapshot(c.Req.Context(), cmd); err != nil {
return response.Error(500, "Failed to delete dashboard snapshot", err)
}
return response.JSON(http.StatusOK, util.DynMap{
"message": "Snapshot deleted. It might take an hour before it's cleared from any CDN caches.",
"id": query.Result.Id,
})
}
// DELETE /api/snapshots/:key
func (hs *HTTPServer) DeleteDashboardSnapshot(c *models.ReqContext) response.Response {
key := web.Params(c.Req)[":key"]
if len(key) == 0 {
return response.Error(404, "Snapshot not found", nil)
}
query := &models.GetDashboardSnapshotQuery{Key: key}
err := hs.dashboardsnapshotsService.GetDashboardSnapshot(c.Req.Context(), query)
if err != nil {
return response.Error(500, "Failed to get dashboard snapshot", err)
}
if query.Result == nil {
return response.Error(404, "Failed to get dashboard snapshot", nil)
}
dashboardID := query.Result.Dashboard.Get("id").MustInt64()
guardian := guardian.New(c.Req.Context(), dashboardID, c.OrgId, c.SignedInUser)
canEdit, err := guardian.CanEdit()
// check for permissions only if the dahboard is found
if err != nil && !errors.Is(err, models.ErrDashboardNotFound) {
return response.Error(500, "Error while checking permissions for snapshot", err)
}
if !canEdit && query.Result.UserId != c.SignedInUser.UserId && !errors.Is(err, models.ErrDashboardNotFound) {
return response.Error(403, "Access denied to this snapshot", nil)
}
if query.Result.External {
err := deleteExternalDashboardSnapshot(query.Result.ExternalDeleteUrl)
if err != nil {
return response.Error(500, "Failed to delete external dashboard", err)
}
}
cmd := &models.DeleteDashboardSnapshotCommand{DeleteKey: query.Result.DeleteKey}
if err := hs.dashboardsnapshotsService.DeleteDashboardSnapshot(c.Req.Context(), cmd); err != nil {
return response.Error(500, "Failed to delete dashboard snapshot", err)
}
return response.JSON(http.StatusOK, util.DynMap{
"message": "Snapshot deleted. It might take an hour before it's cleared from any CDN caches.",
"id": query.Result.Id,
})
}
// GET /api/dashboard/snapshots
func (hs *HTTPServer) SearchDashboardSnapshots(c *models.ReqContext) response.Response {
query := c.Query("query")
limit := c.QueryInt("limit")
if limit == 0 {
limit = 1000
}
searchQuery := models.GetDashboardSnapshotsQuery{
Name: query,
Limit: limit,
OrgId: c.OrgId,
SignedInUser: c.SignedInUser,
}
err := hs.dashboardsnapshotsService.SearchDashboardSnapshots(c.Req.Context(), &searchQuery)
if err != nil {
return response.Error(500, "Search failed", err)
}
dtos := make([]*models.DashboardSnapshotDTO, len(searchQuery.Result))
for i, snapshot := range searchQuery.Result {
dtos[i] = &models.DashboardSnapshotDTO{
Id: snapshot.Id,
Name: snapshot.Name,
Key: snapshot.Key,
OrgId: snapshot.OrgId,
UserId: snapshot.UserId,
External: snapshot.External,
ExternalUrl: snapshot.ExternalUrl,
Expires: snapshot.Expires,
Created: snapshot.Created,
Updated: snapshot.Updated,
}
}
return response.JSON(http.StatusOK, dtos)
}
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api_test
import (
"encoding/json"
"flag"
"math/rand"
"reflect"
"strconv"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
docker "github.com/fsouza/go-dockerclient"
fuzz "github.com/google/gofuzz"
"speter.net/go/exp/math/dec/inf"
)
var fuzzIters = flag.Int("fuzz_iters", 40, "How many fuzzing iterations to do.")
// apiObjectFuzzer can randomly populate api objects.
var apiObjectFuzzer = fuzz.New().NilChance(.5).NumElements(1, 1).Funcs(
func(j *runtime.PluginBase, c fuzz.Continue) {
// Do nothing; this struct has only a Kind field and it must stay blank in memory.
},
func(j *runtime.TypeMeta, c fuzz.Continue) {
// We have to customize the randomization of TypeMetas because their
// APIVersion and Kind must remain blank in memory.
j.APIVersion = ""
j.Kind = ""
},
func(j *api.TypeMeta, c fuzz.Continue) {
// We have to customize the randomization of TypeMetas because their
// APIVersion and Kind must remain blank in memory.
j.APIVersion = ""
j.Kind = ""
},
func(j *api.ObjectMeta, c fuzz.Continue) {
j.Name = c.RandString()
j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
j.SelfLink = c.RandString()
var sec, nsec int64
c.Fuzz(&sec)
c.Fuzz(&nsec)
j.CreationTimestamp = util.Unix(sec, nsec).Rfc3339Copy()
},
func(j *api.ListMeta, c fuzz.Continue) {
j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
j.SelfLink = c.RandString()
},
func(j *api.PodPhase, c fuzz.Continue) {
statuses := []api.PodPhase{api.PodPending, api.PodRunning, api.PodFailed, api.PodUnknown}
*j = statuses[c.Rand.Intn(len(statuses))]
},
func(j *api.ReplicationControllerSpec, c fuzz.Continue) {
// TemplateRef must be nil for round trip
c.Fuzz(&j.Template)
if j.Template == nil {
// TODO: v1beta1/2 can't round trip a nil template correctly, fix by having v1beta1/2
// conversion compare converted object to nil via DeepEqual
j.Template = &api.PodTemplateSpec{}
}
j.Template.ObjectMeta = api.ObjectMeta{Labels: j.Template.ObjectMeta.Labels}
j.Template.Spec.NodeSelector = nil
c.Fuzz(&j.Selector)
j.Replicas = int(c.RandUint64())
},
func(j *api.ReplicationControllerStatus, c fuzz.Continue) {
// only replicas round trips
j.Replicas = int(c.RandUint64())
},
func(j *api.List, c fuzz.Continue) {
c.Fuzz(&j.ListMeta)
c.Fuzz(&j.Items)
if j.Items == nil {
j.Items = []runtime.Object{}
}
},
func(j *runtime.Object, c fuzz.Continue) {
if c.RandBool() {
*j = &runtime.Unknown{
TypeMeta: runtime.TypeMeta{Kind: "Something", APIVersion: "unknown"},
RawJSON: []byte(`{"apiVersion":"unknown","kind":"Something","someKey":"someValue"}`),
}
} else {
types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}}
t := types[c.Rand.Intn(len(types))]
c.Fuzz(t)
*j = t
}
},
func(intstr *util.IntOrString, c fuzz.Continue) {
// util.IntOrString will panic if its kind is set wrong.
if c.RandBool() {
intstr.Kind = util.IntstrInt
intstr.IntVal = int(c.RandUint64())
intstr.StrVal = ""
} else {
intstr.Kind = util.IntstrString
intstr.IntVal = 0
intstr.StrVal = c.RandString()
}
},
func(pb map[docker.Port][]docker.PortBinding, c fuzz.Continue) {
// This is necessary because keys with nil values get omitted.
// TODO: Is this a bug?
pb[docker.Port(c.RandString())] = []docker.PortBinding{
{c.RandString(), c.RandString()},
{c.RandString(), c.RandString()},
}
},
func(pm map[string]docker.PortMapping, c fuzz.Continue) {
// This is necessary because keys with nil values get omitted.
// TODO: Is this a bug?
pm[c.RandString()] = docker.PortMapping{
c.RandString(): c.RandString(),
}
},
func(q *resource.Quantity, c fuzz.Continue) {
// Real Quantity fuzz testing is done elsewhere;
// this limited subset of functionality survives
// round-tripping to v1beta1/2.
q.Amount = &inf.Dec{}
q.Format = resource.DecimalExponent
//q.Amount.SetScale(inf.Scale(-c.Intn(12)))
q.Amount.SetUnscaled(c.Int63n(1000))
},
)
func runTest(t *testing.T, codec runtime.Codec, source runtime.Object) {
name := reflect.TypeOf(source).Elem().Name()
apiObjectFuzzer.Fuzz(source)
j, err := meta.Accessor(source)
if err != nil {
t.Fatalf("Unexpected error %v for %#v", err, source)
}
j.SetKind("")
j.SetAPIVersion("")
data, err := codec.Encode(source)
if err != nil {
t.Errorf("%v: %v (%#v)", name, err, source)
return
}
obj2, err := codec.Decode(data)
if err != nil {
t.Errorf("0: %v: %v\nCodec: %v\nData: %s\nSource: %#v", name, err, codec, string(data), source)
return
}
if !api.Semantic.DeepEqual(source, obj2) {
t.Errorf("1: %v: diff: %v\nCodec: %v\nData: %s\nSource: %#v", name, util.ObjectGoPrintDiff(source, obj2), codec, string(data), source)
return
}
obj3 := reflect.New(reflect.TypeOf(source).Elem()).Interface().(runtime.Object)
err = codec.DecodeInto(data, obj3)
if err != nil {
t.Errorf("2: %v: %v", name, err)
return
}
if !api.Semantic.DeepEqual(source, obj3) {
t.Errorf("3: %v: diff: %v\nCodec: %v", name, util.ObjectDiff(source, obj3), codec)
return
}
}
// For debugging problems
func TestSpecificKind(t *testing.T) {
api.Scheme.Log(t)
kind := "PodList"
item, err := api.Scheme.New("", kind)
if err != nil {
t.Errorf("Couldn't make a %v? %v", kind, err)
return
}
runTest(t, v1beta1.Codec, item)
runTest(t, v1beta2.Codec, item)
api.Scheme.Log(nil)
}
func TestList(t *testing.T) {
api.Scheme.Log(t)
kind := "List"
item, err := api.Scheme.New("", kind)
if err != nil {
t.Errorf("Couldn't make a %v? %v", kind, err)
return
}
runTest(t, v1beta1.Codec, item)
runTest(t, v1beta2.Codec, item)
api.Scheme.Log(nil)
}
var nonRoundTrippableTypes = util.NewStringSet("ContainerManifest")
var nonInternalRoundTrippableTypes = util.NewStringSet("List")
func TestRoundTripTypes(t *testing.T) {
for kind := range api.Scheme.KnownTypes("") {
if nonRoundTrippableTypes.Has(kind) {
continue
}
// Try a few times, since runTest uses random values.
for i := 0; i < *fuzzIters; i++ {
item, err := api.Scheme.New("", kind)
if err != nil {
t.Fatalf("Couldn't make a %v? %v", kind, err)
}
if _, err := meta.Accessor(item); err != nil {
t.Fatalf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableTypes: %v", kind, err)
}
runTest(t, v1beta1.Codec, item)
runTest(t, v1beta2.Codec, item)
if !nonInternalRoundTrippableTypes.Has(kind) {
runTest(t, api.Codec, item)
}
}
}
}
func TestEncode_Ptr(t *testing.T) {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": "foo"},
},
}
obj := runtime.Object(pod)
data, err := latest.Codec.Encode(obj)
obj2, err2 := latest.Codec.Decode(data)
if err != nil || err2 != nil {
t.Fatalf("Failure: '%v' '%v'", err, err2)
}
if _, ok := obj2.(*api.Pod); !ok {
t.Fatalf("Got wrong type")
}
if !api.Semantic.DeepEqual(obj2, pod) {
t.Errorf("Expected:\n %#v,\n Got:\n %#v", &pod, obj2)
}
}
func TestBadJSONRejection(t *testing.T) {
badJSONMissingKind := []byte(`{ }`)
if _, err := latest.Codec.Decode(badJSONMissingKind); err == nil {
t.Errorf("Did not reject despite lack of kind field: %s", badJSONMissingKind)
}
badJSONUnknownType := []byte(`{"kind": "bar"}`)
if _, err1 := latest.Codec.Decode(badJSONUnknownType); err1 == nil {
t.Errorf("Did not reject despite use of unknown type: %s", badJSONUnknownType)
}
/*badJSONKindMismatch := []byte(`{"kind": "Pod"}`)
if err2 := DecodeInto(badJSONKindMismatch, &Minion{}); err2 == nil {
t.Errorf("Kind is set but doesn't match the object type: %s", badJSONKindMismatch)
}*/
}
const benchmarkSeed = 100
func BenchmarkEncode(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer.RandSource(rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
for i := 0; i < b.N; i++ {
latest.Codec.Encode(&pod)
}
}
// BenchmarkEncodeJSON provides a baseline for regular JSON encode performance
func BenchmarkEncodeJSON(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer.RandSource(rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
for i := 0; i < b.N; i++ {
json.Marshal(&pod)
}
}
func BenchmarkDecode(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer.RandSource(rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
data, _ := latest.Codec.Encode(&pod)
for i := 0; i < b.N; i++ {
latest.Codec.Decode(data)
}
}
func BenchmarkDecodeInto(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer.RandSource(rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
data, _ := latest.Codec.Encode(&pod)
for i := 0; i < b.N; i++ {
obj := api.Pod{}
latest.Codec.DecodeInto(data, &obj)
}
}
// BenchmarkDecodeJSON provides a baseline for regular JSON decode performance
func BenchmarkDecodeJSON(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer.RandSource(rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
data, _ := latest.Codec.Encode(&pod)
for i := 0; i < b.N; i++ {
obj := api.Pod{}
json.Unmarshal(data, &obj)
}
}
Improve serialization round trip test and add v1beta3
Kubernetes-commit: 8262c30c977e96d01f7a84b5d1c7f19cced365f5
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api_test
import (
"encoding/json"
"flag"
"math/rand"
"reflect"
"strconv"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
docker "github.com/fsouza/go-dockerclient"
fuzz "github.com/google/gofuzz"
"speter.net/go/exp/math/dec/inf"
)
var fuzzIters = flag.Int("fuzz_iters", 20, "How many fuzzing iterations to do.")
// fuzzerFor can randomly populate api objects that are destined for version.
func fuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
f := fuzz.New().NilChance(.5).NumElements(1, 1)
if src != nil {
f.RandSource(src)
}
f.Funcs(
func(j *runtime.PluginBase, c fuzz.Continue) {
// Do nothing; this struct has only a Kind field and it must stay blank in memory.
},
func(j *runtime.TypeMeta, c fuzz.Continue) {
// We have to customize the randomization of TypeMetas because their
// APIVersion and Kind must remain blank in memory.
j.APIVersion = ""
j.Kind = ""
},
func(j *api.TypeMeta, c fuzz.Continue) {
// We have to customize the randomization of TypeMetas because their
// APIVersion and Kind must remain blank in memory.
j.APIVersion = ""
j.Kind = ""
},
func(j *api.ObjectMeta, c fuzz.Continue) {
j.Name = c.RandString()
j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
j.SelfLink = c.RandString()
var sec, nsec int64
c.Fuzz(&sec)
c.Fuzz(&nsec)
j.CreationTimestamp = util.Unix(sec, nsec).Rfc3339Copy()
},
func(j *api.ListMeta, c fuzz.Continue) {
j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
j.SelfLink = c.RandString()
},
func(j *api.PodPhase, c fuzz.Continue) {
statuses := []api.PodPhase{api.PodPending, api.PodRunning, api.PodFailed, api.PodUnknown}
*j = statuses[c.Rand.Intn(len(statuses))]
},
func(j *api.ReplicationControllerSpec, c fuzz.Continue) {
// TemplateRef must be nil for round trip
c.Fuzz(&j.Template)
if j.Template == nil {
// TODO: v1beta1/2 can't round trip a nil template correctly, fix by having v1beta1/2
// conversion compare converted object to nil via DeepEqual
j.Template = &api.PodTemplateSpec{}
}
j.Template.ObjectMeta = api.ObjectMeta{Labels: j.Template.ObjectMeta.Labels}
j.Template.Spec.NodeSelector = nil
c.Fuzz(&j.Selector)
j.Replicas = int(c.RandUint64())
},
func(j *api.ReplicationControllerStatus, c fuzz.Continue) {
// only replicas round trips
j.Replicas = int(c.RandUint64())
},
func(j *api.List, c fuzz.Continue) {
c.Fuzz(&j.ListMeta)
c.Fuzz(&j.Items)
if j.Items == nil {
j.Items = []runtime.Object{}
}
},
func(j *runtime.Object, c fuzz.Continue) {
if c.RandBool() {
*j = &runtime.Unknown{
TypeMeta: runtime.TypeMeta{Kind: "Something", APIVersion: "unknown"},
RawJSON: []byte(`{"apiVersion":"unknown","kind":"Something","someKey":"someValue"}`),
}
} else {
types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}}
t := types[c.Rand.Intn(len(types))]
c.Fuzz(t)
*j = t
}
},
func(intstr *util.IntOrString, c fuzz.Continue) {
// util.IntOrString will panic if its kind is set wrong.
if c.RandBool() {
intstr.Kind = util.IntstrInt
intstr.IntVal = int(c.RandUint64())
intstr.StrVal = ""
} else {
intstr.Kind = util.IntstrString
intstr.IntVal = 0
intstr.StrVal = c.RandString()
}
},
func(pb map[docker.Port][]docker.PortBinding, c fuzz.Continue) {
// This is necessary because keys with nil values get omitted.
// TODO: Is this a bug?
pb[docker.Port(c.RandString())] = []docker.PortBinding{
{c.RandString(), c.RandString()},
{c.RandString(), c.RandString()},
}
},
func(pm map[string]docker.PortMapping, c fuzz.Continue) {
// This is necessary because keys with nil values get omitted.
// TODO: Is this a bug?
pm[c.RandString()] = docker.PortMapping{
c.RandString(): c.RandString(),
}
},
func(q *resource.Quantity, c fuzz.Continue) {
// Real Quantity fuzz testing is done elsewhere;
// this limited subset of functionality survives
// round-tripping to v1beta1/2.
q.Amount = &inf.Dec{}
q.Format = resource.DecimalExponent
//q.Amount.SetScale(inf.Scale(-c.Intn(12)))
q.Amount.SetUnscaled(c.Int63n(1000))
},
)
return f
}
func fuzzInternalObject(t *testing.T, forVersion string, item runtime.Object, seed int64) runtime.Object {
fuzzerFor(t, forVersion, rand.NewSource(seed)).Fuzz(item)
j, err := meta.Accessor(item)
if err != nil {
t.Fatalf("Unexpected error %v for %#v", err, item)
}
j.SetKind("")
j.SetAPIVersion("")
return item
}
func roundTrip(t *testing.T, codec runtime.Codec, item runtime.Object) {
name := reflect.TypeOf(item).Elem().Name()
data, err := codec.Encode(item)
if err != nil {
t.Errorf("%v: %v (%#v)", name, err, item)
return
}
obj2, err := codec.Decode(data)
if err != nil {
t.Errorf("0: %v: %v\nCodec: %v\nData: %s\nSource: %#v", name, err, codec, string(data), item)
return
}
if !api.Semantic.DeepEqual(item, obj2) {
t.Errorf("1: %v: diff: %v\nCodec: %v\nData: %s\nSource: %#v\nFinal: %#v", name, util.ObjectGoPrintDiff(item, obj2), codec, string(data), item, obj2)
return
}
obj3 := reflect.New(reflect.TypeOf(item).Elem()).Interface().(runtime.Object)
err = codec.DecodeInto(data, obj3)
if err != nil {
t.Errorf("2: %v: %v", name, err)
return
}
if !api.Semantic.DeepEqual(item, obj3) {
t.Errorf("3: %v: diff: %v\nCodec: %v", name, util.ObjectDiff(item, obj3), codec)
return
}
}
// roundTripSame verifies the same source object is tested in all API versions.
func roundTripSame(t *testing.T, item runtime.Object) {
seed := rand.Int63()
fuzzInternalObject(t, "", item, seed)
roundTrip(t, v1beta1.Codec, item)
roundTrip(t, v1beta2.Codec, item)
fuzzInternalObject(t, "v1beta3", item, seed)
roundTrip(t, v1beta3.Codec, item)
}
func roundTripAll(t *testing.T, item runtime.Object) {
seed := rand.Int63()
roundTrip(t, v1beta1.Codec, fuzzInternalObject(t, "v1beta1", item, seed))
roundTrip(t, v1beta2.Codec, fuzzInternalObject(t, "v1beta2", item, seed))
roundTrip(t, v1beta3.Codec, fuzzInternalObject(t, "v1beta3", item, seed))
}
// For debugging problems
func TestSpecificKind(t *testing.T) {
api.Scheme.Log(t)
defer api.Scheme.Log(nil)
kind := "PodList"
item, err := api.Scheme.New("", kind)
if err != nil {
t.Errorf("Couldn't make a %v? %v", kind, err)
return
}
roundTripSame(t, item)
}
func TestList(t *testing.T) {
api.Scheme.Log(t)
defer api.Scheme.Log(nil)
kind := "List"
item, err := api.Scheme.New("", kind)
if err != nil {
t.Errorf("Couldn't make a %v? %v", kind, err)
return
}
roundTripSame(t, item)
}
var nonRoundTrippableTypes = util.NewStringSet("ContainerManifest", "ContainerManifestList")
var nonInternalRoundTrippableTypes = util.NewStringSet("List")
func TestRoundTripTypes(t *testing.T) {
// api.Scheme.Log(t)
// defer api.Scheme.Log(nil)
for kind := range api.Scheme.KnownTypes("") {
if nonRoundTrippableTypes.Has(kind) {
continue
}
// Try a few times, since runTest uses random values.
for i := 0; i < *fuzzIters; i++ {
item, err := api.Scheme.New("", kind)
if err != nil {
t.Fatalf("Couldn't make a %v? %v", kind, err)
}
if _, err := meta.Accessor(item); err != nil {
t.Fatalf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableTypes: %v", kind, err)
}
roundTripSame(t, item)
if !nonInternalRoundTrippableTypes.Has(kind) {
roundTrip(t, api.Codec, fuzzInternalObject(t, "", item, rand.Int63()))
}
}
}
}
func TestEncode_Ptr(t *testing.T) {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": "foo"},
},
}
obj := runtime.Object(pod)
data, err := latest.Codec.Encode(obj)
obj2, err2 := latest.Codec.Decode(data)
if err != nil || err2 != nil {
t.Fatalf("Failure: '%v' '%v'", err, err2)
}
if _, ok := obj2.(*api.Pod); !ok {
t.Fatalf("Got wrong type")
}
if !api.Semantic.DeepEqual(obj2, pod) {
t.Errorf("Expected:\n %#v,\n Got:\n %#v", &pod, obj2)
}
}
func TestBadJSONRejection(t *testing.T) {
badJSONMissingKind := []byte(`{ }`)
if _, err := latest.Codec.Decode(badJSONMissingKind); err == nil {
t.Errorf("Did not reject despite lack of kind field: %s", badJSONMissingKind)
}
badJSONUnknownType := []byte(`{"kind": "bar"}`)
if _, err1 := latest.Codec.Decode(badJSONUnknownType); err1 == nil {
t.Errorf("Did not reject despite use of unknown type: %s", badJSONUnknownType)
}
/*badJSONKindMismatch := []byte(`{"kind": "Pod"}`)
if err2 := DecodeInto(badJSONKindMismatch, &Minion{}); err2 == nil {
t.Errorf("Kind is set but doesn't match the object type: %s", badJSONKindMismatch)
}*/
}
const benchmarkSeed = 100
func BenchmarkEncode(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer := fuzzerFor(nil, "", rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
for i := 0; i < b.N; i++ {
latest.Codec.Encode(&pod)
}
}
// BenchmarkEncodeJSON provides a baseline for regular JSON encode performance
func BenchmarkEncodeJSON(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer := fuzzerFor(nil, "", rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
for i := 0; i < b.N; i++ {
json.Marshal(&pod)
}
}
func BenchmarkDecode(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer := fuzzerFor(nil, "", rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
data, _ := latest.Codec.Encode(&pod)
for i := 0; i < b.N; i++ {
latest.Codec.Decode(data)
}
}
func BenchmarkDecodeInto(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer := fuzzerFor(nil, "", rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
data, _ := latest.Codec.Encode(&pod)
for i := 0; i < b.N; i++ {
obj := api.Pod{}
latest.Codec.DecodeInto(data, &obj)
}
}
// BenchmarkDecodeJSON provides a baseline for regular JSON decode performance
func BenchmarkDecodeJSON(b *testing.B) {
pod := api.Pod{}
apiObjectFuzzer := fuzzerFor(nil, "", rand.NewSource(benchmarkSeed))
apiObjectFuzzer.Fuzz(&pod)
data, _ := latest.Codec.Encode(&pod)
for i := 0; i < b.N; i++ {
obj := api.Pod{}
json.Unmarshal(data, &obj)
}
}
|
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package loader
import (
"archive/tar"
"bytes"
"compress/gzip"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"helm.sh/helm/v3/pkg/chart"
)
func TestLoadDir(t *testing.T) {
l, err := Loader("testdata/frobnitz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestLoadDirWithDevNull(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("test only works on unix systems with /dev/null present")
}
l, err := Loader("testdata/frobnitz_with_dev_null")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
if _, err := l.Load(); err == nil {
t.Errorf("packages with an irregular file (/dev/null) should not load")
}
}
func TestLoadDirWithSymlink(t *testing.T) {
sym := filepath.Join("..", "LICENSE")
link := filepath.Join("testdata", "frobnitz_with_symlink", "LICENSE")
if err := os.Symlink(sym, link); err != nil {
t.Fatal(err)
}
defer os.Remove(link)
l, err := Loader("testdata/frobnitz_with_symlink")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestBomTestData(t *testing.T) {
testFiles := []string{"frobnitz_with_bom/.helmignore", "frobnitz_with_bom/templates/template.tpl", "frobnitz_with_bom/Chart.yaml"}
for _, file := range testFiles {
data, err := ioutil.ReadFile("testdata/" + file)
if err != nil || !bytes.HasPrefix(data, utf8bom) {
t.Errorf("Test file has no BOM or is invalid: testdata/%s", file)
}
}
archive, err := ioutil.ReadFile("testdata/frobnitz_with_bom.tgz")
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
}
unzipped, err := gzip.NewReader(bytes.NewReader(archive))
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
}
defer unzipped.Close()
for _, testFile := range testFiles {
data := make([]byte, 3)
err := unzipped.Reset(bytes.NewReader(archive))
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
}
tr := tar.NewReader(unzipped)
for {
file, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
}
if file != nil && strings.EqualFold(file.Name, testFile) {
_, err := tr.Read(data)
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
} else {
break
}
}
}
if !bytes.Equal(data, utf8bom) {
t.Fatalf("Test file has no BOM or is invalid: frobnitz_with_bom.tgz/%s", testFile)
}
}
}
func TestLoadDirWithUTFBOM(t *testing.T) {
l, err := Loader("testdata/frobnitz_with_bom")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
verifyBomStripped(t, c.Files)
}
func TestLoadArchiveWithUTFBOM(t *testing.T) {
l, err := Loader("testdata/frobnitz_with_bom.tgz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
verifyBomStripped(t, c.Files)
}
func TestLoadV1(t *testing.T) {
l, err := Loader("testdata/frobnitz.v1")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestLoadFileV1(t *testing.T) {
l, err := Loader("testdata/frobnitz.v1.tgz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestLoadFile(t *testing.T) {
l, err := Loader("testdata/frobnitz-1.2.3.tgz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
}
func TestLoadFiles(t *testing.T) {
goodFiles := []*BufferedFile{
{
Name: "Chart.yaml",
Data: []byte(`apiVersion: v1
name: frobnitz
description: This is a frobnitz.
version: "1.2.3"
keywords:
- frobnitz
- sprocket
- dodad
maintainers:
- name: The Helm Team
email: helm@example.com
- name: Someone Else
email: nobody@example.com
sources:
- https://example.com/foo/bar
home: http://example.com
icon: https://example.com/64x64.png
`),
},
{
Name: "values.yaml",
Data: []byte("var: some values"),
},
{
Name: "values.schema.json",
Data: []byte("type: Values"),
},
{
Name: "templates/deployment.yaml",
Data: []byte("some deployment"),
},
{
Name: "templates/service.yaml",
Data: []byte("some service"),
},
}
c, err := LoadFiles(goodFiles)
if err != nil {
t.Errorf("Expected good files to be loaded, got %v", err)
}
if c.Name() != "frobnitz" {
t.Errorf("Expected chart name to be 'frobnitz', got %s", c.Name())
}
if c.Values["var"] != "some values" {
t.Error("Expected chart values to be populated with default values")
}
if len(c.Raw) != 5 {
t.Errorf("Expected %d files, got %d", 5, len(c.Raw))
}
if !bytes.Equal(c.Schema, []byte("type: Values")) {
t.Error("Expected chart schema to be populated with default values")
}
if len(c.Templates) != 2 {
t.Errorf("Expected number of templates == 2, got %d", len(c.Templates))
}
if _, err = LoadFiles([]*BufferedFile{}); err == nil {
t.Fatal("Expected err to be non-nil")
}
if err.Error() != "validation: chart.metadata is required" {
t.Errorf("Expected chart metadata missing error, got '%s'", err.Error())
}
}
// Packaging the chart on a Windows machine will produce an
// archive that has \\ as delimiters. Test that we support these archives
func TestLoadFileBackslash(t *testing.T) {
c, err := Load("testdata/frobnitz_backslash-1.2.3.tgz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyChartFileAndTemplate(t, c, "frobnitz_backslash")
verifyChart(t, c)
verifyDependencies(t, c)
}
func TestLoadV2WithReqs(t *testing.T) {
l, err := Loader("testdata/frobnitz.v2.reqs")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestLoadInvalidArchive(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "helm-test-")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpdir)
writeTar := func(filename, internalPath string, body []byte) {
dest, err := os.Create(filename)
if err != nil {
t.Fatal(err)
}
zipper := gzip.NewWriter(dest)
tw := tar.NewWriter(zipper)
h := &tar.Header{
Name: internalPath,
Mode: 0755,
Size: int64(len(body)),
ModTime: time.Now(),
}
if err := tw.WriteHeader(h); err != nil {
t.Fatal(err)
}
if _, err := tw.Write(body); err != nil {
t.Fatal(err)
}
tw.Close()
zipper.Close()
dest.Close()
}
for _, tt := range []struct {
chartname string
internal string
expectError string
}{
{"illegal-dots.tgz", "../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots2.tgz", "/foo/../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots3.tgz", "/../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots4.tgz", "./../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-name.tgz", "./.", "chart illegally contains content outside the base directory"},
{"illegal-name2.tgz", "/./.", "chart illegally contains content outside the base directory"},
{"illegal-name3.tgz", "missing-leading-slash", "chart illegally contains content outside the base directory"},
{"illegal-name4.tgz", "/missing-leading-slash", "validation: chart.metadata is required"},
{"illegal-abspath.tgz", "//foo", "chart illegally contains absolute paths"},
{"illegal-abspath2.tgz", "///foo", "chart illegally contains absolute paths"},
{"illegal-abspath3.tgz", "\\\\foo", "chart illegally contains absolute paths"},
{"illegal-abspath3.tgz", "\\..\\..\\foo", "chart illegally references parent directory"},
// Under special circumstances, this can get normalized to things that look like absolute Windows paths
{"illegal-abspath4.tgz", "\\.\\c:\\\\foo", "chart contains illegally named files"},
{"illegal-abspath5.tgz", "/./c://foo", "chart contains illegally named files"},
{"illegal-abspath6.tgz", "\\\\?\\Some\\windows\\magic", "chart illegally contains absolute paths"},
} {
illegalChart := filepath.Join(tmpdir, tt.chartname)
writeTar(illegalChart, tt.internal, []byte("hello: world"))
_, err = Load(illegalChart)
if err == nil {
t.Fatal("expected error when unpacking illegal files")
}
if !strings.Contains(err.Error(), tt.expectError) {
t.Errorf("Expected error to contain %q, got %q for %s", tt.expectError, err.Error(), tt.chartname)
}
}
// Make sure that absolute path gets interpreted as relative
illegalChart := filepath.Join(tmpdir, "abs-path.tgz")
writeTar(illegalChart, "/Chart.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "validation: chart.metadata.name is required" {
t.Error(err)
}
// And just to validate that the above was not spurious
illegalChart = filepath.Join(tmpdir, "abs-path2.tgz")
writeTar(illegalChart, "files/whatever.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "validation: chart.metadata is required" {
t.Error(err)
}
// Finally, test that drive letter gets stripped off on Windows
illegalChart = filepath.Join(tmpdir, "abs-winpath.tgz")
writeTar(illegalChart, "c:\\Chart.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "validation: chart.metadata.name is required" {
t.Error(err)
}
}
func verifyChart(t *testing.T, c *chart.Chart) {
t.Helper()
if c.Name() == "" {
t.Fatalf("No chart metadata found on %v", c)
}
t.Logf("Verifying chart %s", c.Name())
if len(c.Templates) != 1 {
t.Errorf("Expected 1 template, got %d", len(c.Templates))
}
numfiles := 6
if len(c.Files) != numfiles {
t.Errorf("Expected %d extra files, got %d", numfiles, len(c.Files))
for _, n := range c.Files {
t.Logf("\t%s", n.Name)
}
}
if len(c.Dependencies()) != 2 {
t.Errorf("Expected 2 dependencies, got %d (%v)", len(c.Dependencies()), c.Dependencies())
for _, d := range c.Dependencies() {
t.Logf("\tSubchart: %s\n", d.Name())
}
}
expect := map[string]map[string]string{
"alpine": {
"version": "0.1.0",
},
"mariner": {
"version": "4.3.2",
},
}
for _, dep := range c.Dependencies() {
if dep.Metadata == nil {
t.Fatalf("expected metadata on dependency: %v", dep)
}
exp, ok := expect[dep.Name()]
if !ok {
t.Fatalf("Unknown dependency %s", dep.Name())
}
if exp["version"] != dep.Metadata.Version {
t.Errorf("Expected %s version %s, got %s", dep.Name(), exp["version"], dep.Metadata.Version)
}
}
}
func verifyDependencies(t *testing.T, c *chart.Chart) {
if len(c.Metadata.Dependencies) != 2 {
t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
}
tests := []*chart.Dependency{
{Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
{Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
}
for i, tt := range tests {
d := c.Metadata.Dependencies[i]
if d.Name != tt.Name {
t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
}
if d.Version != tt.Version {
t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
}
if d.Repository != tt.Repository {
t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
}
}
}
func verifyDependenciesLock(t *testing.T, c *chart.Chart) {
if len(c.Metadata.Dependencies) != 2 {
t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
}
tests := []*chart.Dependency{
{Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
{Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
}
for i, tt := range tests {
d := c.Metadata.Dependencies[i]
if d.Name != tt.Name {
t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
}
if d.Version != tt.Version {
t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
}
if d.Repository != tt.Repository {
t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
}
}
}
func verifyFrobnitz(t *testing.T, c *chart.Chart) {
verifyChartFileAndTemplate(t, c, "frobnitz")
}
func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) {
if c.Metadata == nil {
t.Fatal("Metadata is nil")
}
if c.Name() != name {
t.Errorf("Expected %s, got %s", name, c.Name())
}
if len(c.Templates) != 1 {
t.Fatalf("Expected 1 template, got %d", len(c.Templates))
}
if c.Templates[0].Name != "templates/template.tpl" {
t.Errorf("Unexpected template: %s", c.Templates[0].Name)
}
if len(c.Templates[0].Data) == 0 {
t.Error("No template data.")
}
if len(c.Files) != 6 {
t.Fatalf("Expected 6 Files, got %d", len(c.Files))
}
if len(c.Dependencies()) != 2 {
t.Fatalf("Expected 2 Dependency, got %d", len(c.Dependencies()))
}
if len(c.Metadata.Dependencies) != 2 {
t.Fatalf("Expected 2 Dependencies.Dependency, got %d", len(c.Metadata.Dependencies))
}
if len(c.Lock.Dependencies) != 2 {
t.Fatalf("Expected 2 Lock.Dependency, got %d", len(c.Lock.Dependencies))
}
for _, dep := range c.Dependencies() {
switch dep.Name() {
case "mariner":
case "alpine":
if len(dep.Templates) != 1 {
t.Fatalf("Expected 1 template, got %d", len(dep.Templates))
}
if dep.Templates[0].Name != "templates/alpine-pod.yaml" {
t.Errorf("Unexpected template: %s", dep.Templates[0].Name)
}
if len(dep.Templates[0].Data) == 0 {
t.Error("No template data.")
}
if len(dep.Files) != 1 {
t.Fatalf("Expected 1 Files, got %d", len(dep.Files))
}
if len(dep.Dependencies()) != 2 {
t.Fatalf("Expected 2 Dependency, got %d", len(dep.Dependencies()))
}
default:
t.Errorf("Unexpected dependency %s", dep.Name())
}
}
}
func verifyBomStripped(t *testing.T, files []*chart.File) {
for _, file := range files {
if bytes.HasPrefix(file.Data, utf8bom) {
t.Errorf("Byte Order Mark still present in processed file %s", file.Name)
}
}
}
cleanup tempfiles for load_test
Signed-off-by: Zhou Hao <f577ade201aa03d612c19915a089dc6084d45cd9@cn.fujitsu.com>
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package loader
import (
"archive/tar"
"bytes"
"compress/gzip"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"helm.sh/helm/v3/pkg/chart"
)
func TestLoadDir(t *testing.T) {
l, err := Loader("testdata/frobnitz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestLoadDirWithDevNull(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("test only works on unix systems with /dev/null present")
}
l, err := Loader("testdata/frobnitz_with_dev_null")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
if _, err := l.Load(); err == nil {
t.Errorf("packages with an irregular file (/dev/null) should not load")
}
}
func TestLoadDirWithSymlink(t *testing.T) {
sym := filepath.Join("..", "LICENSE")
link := filepath.Join("testdata", "frobnitz_with_symlink", "LICENSE")
if err := os.Symlink(sym, link); err != nil {
t.Fatal(err)
}
defer os.Remove(link)
l, err := Loader("testdata/frobnitz_with_symlink")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestBomTestData(t *testing.T) {
testFiles := []string{"frobnitz_with_bom/.helmignore", "frobnitz_with_bom/templates/template.tpl", "frobnitz_with_bom/Chart.yaml"}
for _, file := range testFiles {
data, err := ioutil.ReadFile("testdata/" + file)
if err != nil || !bytes.HasPrefix(data, utf8bom) {
t.Errorf("Test file has no BOM or is invalid: testdata/%s", file)
}
}
archive, err := ioutil.ReadFile("testdata/frobnitz_with_bom.tgz")
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
}
unzipped, err := gzip.NewReader(bytes.NewReader(archive))
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
}
defer unzipped.Close()
for _, testFile := range testFiles {
data := make([]byte, 3)
err := unzipped.Reset(bytes.NewReader(archive))
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
}
tr := tar.NewReader(unzipped)
for {
file, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
}
if file != nil && strings.EqualFold(file.Name, testFile) {
_, err := tr.Read(data)
if err != nil {
t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
} else {
break
}
}
}
if !bytes.Equal(data, utf8bom) {
t.Fatalf("Test file has no BOM or is invalid: frobnitz_with_bom.tgz/%s", testFile)
}
}
}
func TestLoadDirWithUTFBOM(t *testing.T) {
l, err := Loader("testdata/frobnitz_with_bom")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
verifyBomStripped(t, c.Files)
}
func TestLoadArchiveWithUTFBOM(t *testing.T) {
l, err := Loader("testdata/frobnitz_with_bom.tgz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
verifyBomStripped(t, c.Files)
}
func TestLoadV1(t *testing.T) {
l, err := Loader("testdata/frobnitz.v1")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestLoadFileV1(t *testing.T) {
l, err := Loader("testdata/frobnitz.v1.tgz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestLoadFile(t *testing.T) {
l, err := Loader("testdata/frobnitz-1.2.3.tgz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyFrobnitz(t, c)
verifyChart(t, c)
verifyDependencies(t, c)
}
func TestLoadFiles(t *testing.T) {
goodFiles := []*BufferedFile{
{
Name: "Chart.yaml",
Data: []byte(`apiVersion: v1
name: frobnitz
description: This is a frobnitz.
version: "1.2.3"
keywords:
- frobnitz
- sprocket
- dodad
maintainers:
- name: The Helm Team
email: helm@example.com
- name: Someone Else
email: nobody@example.com
sources:
- https://example.com/foo/bar
home: http://example.com
icon: https://example.com/64x64.png
`),
},
{
Name: "values.yaml",
Data: []byte("var: some values"),
},
{
Name: "values.schema.json",
Data: []byte("type: Values"),
},
{
Name: "templates/deployment.yaml",
Data: []byte("some deployment"),
},
{
Name: "templates/service.yaml",
Data: []byte("some service"),
},
}
c, err := LoadFiles(goodFiles)
if err != nil {
t.Errorf("Expected good files to be loaded, got %v", err)
}
if c.Name() != "frobnitz" {
t.Errorf("Expected chart name to be 'frobnitz', got %s", c.Name())
}
if c.Values["var"] != "some values" {
t.Error("Expected chart values to be populated with default values")
}
if len(c.Raw) != 5 {
t.Errorf("Expected %d files, got %d", 5, len(c.Raw))
}
if !bytes.Equal(c.Schema, []byte("type: Values")) {
t.Error("Expected chart schema to be populated with default values")
}
if len(c.Templates) != 2 {
t.Errorf("Expected number of templates == 2, got %d", len(c.Templates))
}
if _, err = LoadFiles([]*BufferedFile{}); err == nil {
t.Fatal("Expected err to be non-nil")
}
if err.Error() != "validation: chart.metadata is required" {
t.Errorf("Expected chart metadata missing error, got '%s'", err.Error())
}
}
// Packaging the chart on a Windows machine will produce an
// archive that has \\ as delimiters. Test that we support these archives
func TestLoadFileBackslash(t *testing.T) {
c, err := Load("testdata/frobnitz_backslash-1.2.3.tgz")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyChartFileAndTemplate(t, c, "frobnitz_backslash")
verifyChart(t, c)
verifyDependencies(t, c)
}
func TestLoadV2WithReqs(t *testing.T) {
l, err := Loader("testdata/frobnitz.v2.reqs")
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
c, err := l.Load()
if err != nil {
t.Fatalf("Failed to load testdata: %s", err)
}
verifyDependencies(t, c)
verifyDependenciesLock(t, c)
}
func TestLoadInvalidArchive(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "helm-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
writeTar := func(filename, internalPath string, body []byte) {
dest, err := os.Create(filename)
if err != nil {
t.Fatal(err)
}
zipper := gzip.NewWriter(dest)
tw := tar.NewWriter(zipper)
h := &tar.Header{
Name: internalPath,
Mode: 0755,
Size: int64(len(body)),
ModTime: time.Now(),
}
if err := tw.WriteHeader(h); err != nil {
t.Fatal(err)
}
if _, err := tw.Write(body); err != nil {
t.Fatal(err)
}
tw.Close()
zipper.Close()
dest.Close()
}
for _, tt := range []struct {
chartname string
internal string
expectError string
}{
{"illegal-dots.tgz", "../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots2.tgz", "/foo/../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots3.tgz", "/../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-dots4.tgz", "./../../malformed-helm-test", "chart illegally references parent directory"},
{"illegal-name.tgz", "./.", "chart illegally contains content outside the base directory"},
{"illegal-name2.tgz", "/./.", "chart illegally contains content outside the base directory"},
{"illegal-name3.tgz", "missing-leading-slash", "chart illegally contains content outside the base directory"},
{"illegal-name4.tgz", "/missing-leading-slash", "validation: chart.metadata is required"},
{"illegal-abspath.tgz", "//foo", "chart illegally contains absolute paths"},
{"illegal-abspath2.tgz", "///foo", "chart illegally contains absolute paths"},
{"illegal-abspath3.tgz", "\\\\foo", "chart illegally contains absolute paths"},
{"illegal-abspath3.tgz", "\\..\\..\\foo", "chart illegally references parent directory"},
// Under special circumstances, this can get normalized to things that look like absolute Windows paths
{"illegal-abspath4.tgz", "\\.\\c:\\\\foo", "chart contains illegally named files"},
{"illegal-abspath5.tgz", "/./c://foo", "chart contains illegally named files"},
{"illegal-abspath6.tgz", "\\\\?\\Some\\windows\\magic", "chart illegally contains absolute paths"},
} {
illegalChart := filepath.Join(tmpdir, tt.chartname)
writeTar(illegalChart, tt.internal, []byte("hello: world"))
_, err = Load(illegalChart)
if err == nil {
t.Fatal("expected error when unpacking illegal files")
}
if !strings.Contains(err.Error(), tt.expectError) {
t.Errorf("Expected error to contain %q, got %q for %s", tt.expectError, err.Error(), tt.chartname)
}
}
// Make sure that absolute path gets interpreted as relative
illegalChart := filepath.Join(tmpdir, "abs-path.tgz")
writeTar(illegalChart, "/Chart.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "validation: chart.metadata.name is required" {
t.Error(err)
}
// And just to validate that the above was not spurious
illegalChart = filepath.Join(tmpdir, "abs-path2.tgz")
writeTar(illegalChart, "files/whatever.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "validation: chart.metadata is required" {
t.Error(err)
}
// Finally, test that drive letter gets stripped off on Windows
illegalChart = filepath.Join(tmpdir, "abs-winpath.tgz")
writeTar(illegalChart, "c:\\Chart.yaml", []byte("hello: world"))
_, err = Load(illegalChart)
if err.Error() != "validation: chart.metadata.name is required" {
t.Error(err)
}
}
func verifyChart(t *testing.T, c *chart.Chart) {
t.Helper()
if c.Name() == "" {
t.Fatalf("No chart metadata found on %v", c)
}
t.Logf("Verifying chart %s", c.Name())
if len(c.Templates) != 1 {
t.Errorf("Expected 1 template, got %d", len(c.Templates))
}
numfiles := 6
if len(c.Files) != numfiles {
t.Errorf("Expected %d extra files, got %d", numfiles, len(c.Files))
for _, n := range c.Files {
t.Logf("\t%s", n.Name)
}
}
if len(c.Dependencies()) != 2 {
t.Errorf("Expected 2 dependencies, got %d (%v)", len(c.Dependencies()), c.Dependencies())
for _, d := range c.Dependencies() {
t.Logf("\tSubchart: %s\n", d.Name())
}
}
expect := map[string]map[string]string{
"alpine": {
"version": "0.1.0",
},
"mariner": {
"version": "4.3.2",
},
}
for _, dep := range c.Dependencies() {
if dep.Metadata == nil {
t.Fatalf("expected metadata on dependency: %v", dep)
}
exp, ok := expect[dep.Name()]
if !ok {
t.Fatalf("Unknown dependency %s", dep.Name())
}
if exp["version"] != dep.Metadata.Version {
t.Errorf("Expected %s version %s, got %s", dep.Name(), exp["version"], dep.Metadata.Version)
}
}
}
func verifyDependencies(t *testing.T, c *chart.Chart) {
if len(c.Metadata.Dependencies) != 2 {
t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
}
tests := []*chart.Dependency{
{Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
{Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
}
for i, tt := range tests {
d := c.Metadata.Dependencies[i]
if d.Name != tt.Name {
t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
}
if d.Version != tt.Version {
t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
}
if d.Repository != tt.Repository {
t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
}
}
}
func verifyDependenciesLock(t *testing.T, c *chart.Chart) {
if len(c.Metadata.Dependencies) != 2 {
t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
}
tests := []*chart.Dependency{
{Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
{Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
}
for i, tt := range tests {
d := c.Metadata.Dependencies[i]
if d.Name != tt.Name {
t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
}
if d.Version != tt.Version {
t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
}
if d.Repository != tt.Repository {
t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
}
}
}
func verifyFrobnitz(t *testing.T, c *chart.Chart) {
verifyChartFileAndTemplate(t, c, "frobnitz")
}
func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) {
if c.Metadata == nil {
t.Fatal("Metadata is nil")
}
if c.Name() != name {
t.Errorf("Expected %s, got %s", name, c.Name())
}
if len(c.Templates) != 1 {
t.Fatalf("Expected 1 template, got %d", len(c.Templates))
}
if c.Templates[0].Name != "templates/template.tpl" {
t.Errorf("Unexpected template: %s", c.Templates[0].Name)
}
if len(c.Templates[0].Data) == 0 {
t.Error("No template data.")
}
if len(c.Files) != 6 {
t.Fatalf("Expected 6 Files, got %d", len(c.Files))
}
if len(c.Dependencies()) != 2 {
t.Fatalf("Expected 2 Dependency, got %d", len(c.Dependencies()))
}
if len(c.Metadata.Dependencies) != 2 {
t.Fatalf("Expected 2 Dependencies.Dependency, got %d", len(c.Metadata.Dependencies))
}
if len(c.Lock.Dependencies) != 2 {
t.Fatalf("Expected 2 Lock.Dependency, got %d", len(c.Lock.Dependencies))
}
for _, dep := range c.Dependencies() {
switch dep.Name() {
case "mariner":
case "alpine":
if len(dep.Templates) != 1 {
t.Fatalf("Expected 1 template, got %d", len(dep.Templates))
}
if dep.Templates[0].Name != "templates/alpine-pod.yaml" {
t.Errorf("Unexpected template: %s", dep.Templates[0].Name)
}
if len(dep.Templates[0].Data) == 0 {
t.Error("No template data.")
}
if len(dep.Files) != 1 {
t.Fatalf("Expected 1 Files, got %d", len(dep.Files))
}
if len(dep.Dependencies()) != 2 {
t.Fatalf("Expected 2 Dependency, got %d", len(dep.Dependencies()))
}
default:
t.Errorf("Unexpected dependency %s", dep.Name())
}
}
}
func verifyBomStripped(t *testing.T, files []*chart.File) {
for _, file := range files {
if bytes.HasPrefix(file.Data, utf8bom) {
t.Errorf("Byte Order Mark still present in processed file %s", file.Name)
}
}
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
func TestPluginPathsAreUnaltered(t *testing.T) {
tempDir, err := ioutil.TempDir(os.TempDir(), "test-cmd-plugins")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
tempDir2, err := ioutil.TempDir(os.TempDir(), "test-cmd-plugins2")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// cleanup
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
panic(fmt.Errorf("unexpected cleanup error: %v", err))
}
if err := os.RemoveAll(tempDir2); err != nil {
panic(fmt.Errorf("unexpected cleanup error: %v", err))
}
}()
ioStreams, _, _, errOut := genericclioptions.NewTestIOStreams()
verifier := newFakePluginPathVerifier()
pluginPaths := []string{tempDir, tempDir2}
o := &PluginListOptions{
Verifier: verifier,
IOStreams: ioStreams,
PluginPaths: pluginPaths,
}
// write at least one valid plugin file
if _, err := ioutil.TempFile(tempDir, "kubectl-"); err != nil {
t.Fatalf("unexpected error %v", err)
}
if _, err := ioutil.TempFile(tempDir2, "kubectl-"); err != nil {
t.Fatalf("unexpected error %v", err)
}
if err := o.Run(); err != nil {
t.Fatalf("unexpected error %v - %v", err, errOut.String())
}
// ensure original paths remain unaltered
if len(verifier.seenUnsorted) != len(pluginPaths) {
t.Fatalf("saw unexpected plugin paths. Expecting %v, got %v", pluginPaths, verifier.seenUnsorted)
}
for actual := range verifier.seenUnsorted {
if !strings.HasPrefix(verifier.seenUnsorted[actual], pluginPaths[actual]) {
t.Fatalf("expected PATH slice to be unaltered. Expecting %v, but got %v", pluginPaths[actual], verifier.seenUnsorted[actual])
}
}
}
func TestPluginPathsAreValid(t *testing.T) {
tempDir, err := ioutil.TempDir(os.TempDir(), "test-cmd-plugins")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// cleanup
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
panic(fmt.Errorf("unexpected cleanup error: %v", err))
}
}()
tc := []struct {
name string
pluginPaths []string
pluginFile func() (*os.File, error)
verifier *fakePluginPathVerifier
expectVerifyErrors []error
expectErr string
expectErrOut string
expectOut string
}{
{
name: "ensure no plugins found if no files begin with kubectl- prefix",
pluginPaths: []string{tempDir},
verifier: newFakePluginPathVerifier(),
pluginFile: func() (*os.File, error) {
return ioutil.TempFile(tempDir, "notkubectl-")
},
expectErr: "unable to find any kubectl plugins in your PATH",
},
{
name: "ensure de-duplicated plugin-paths slice",
pluginPaths: []string{tempDir, tempDir},
verifier: newFakePluginPathVerifier(),
pluginFile: func() (*os.File, error) {
return ioutil.TempFile(tempDir, "kubectl-")
},
expectOut: "The following compatible plugins are available:",
},
{
name: "ensure no errors when empty string or blank path are specified",
pluginPaths: []string{tempDir, "", " "},
verifier: newFakePluginPathVerifier(),
pluginFile: func() (*os.File, error) {
return ioutil.TempFile(tempDir, "kubectl-")
},
expectOut: "The following compatible plugins are available:",
},
}
for _, test := range tc {
t.Run(test.name, func(t *testing.T) {
ioStreams, _, out, errOut := genericclioptions.NewTestIOStreams()
o := &PluginListOptions{
Verifier: test.verifier,
IOStreams: ioStreams,
PluginPaths: test.pluginPaths,
}
o.Out = out
o.ErrOut = errOut
// create files
if test.pluginFile != nil {
if _, err := test.pluginFile(); err != nil {
t.Fatalf("unexpected error creating plugin file: %v", err)
}
}
for _, expected := range test.expectVerifyErrors {
for _, actual := range test.verifier.errors {
if expected != actual {
t.Fatalf("unexpected error: expected %v, but got %v", expected, actual)
}
}
}
err := o.Run()
if err == nil && len(test.expectErr) > 0 {
t.Fatalf("unexpected non-error: expecting %v", test.expectErr)
}
if err != nil && len(test.expectErr) == 0 {
t.Fatalf("unexpected error: %v - %v", err, errOut.String())
}
if len(test.expectErrOut) == 0 && errOut.Len() > 0 {
t.Fatalf("unexpected error output: expected nothing, but got %v", errOut.String())
} else if len(test.expectErrOut) > 0 && !strings.Contains(errOut.String(), test.expectErrOut) {
t.Fatalf("unexpected error output: expected to contain %v, but got %v", test.expectErrOut, errOut.String())
}
if len(test.expectOut) == 0 && out.Len() > 0 {
t.Fatalf("unexpected output: expected nothing, but got %v", out.String())
} else if len(test.expectOut) > 0 && !strings.Contains(out.String(), test.expectOut) {
t.Fatalf("unexpected output: expected to contain %v, but got %v", test.expectOut, out.String())
}
})
}
}
type duplicatePathError struct {
path string
}
func (d *duplicatePathError) Error() string {
return fmt.Sprintf("path %q already visited", d.path)
}
type fakePluginPathVerifier struct {
errors []error
seen map[string]bool
seenUnsorted []string
}
func (f *fakePluginPathVerifier) Verify(path string) []error {
if f.seen[path] {
err := &duplicatePathError{path}
f.errors = append(f.errors, err)
return []error{err}
}
f.seen[path] = true
f.seenUnsorted = append(f.seenUnsorted, path)
return nil
}
func newFakePluginPathVerifier() *fakePluginPathVerifier {
return &fakePluginPathVerifier{seen: make(map[string]bool)}
}
Fixed problem in unit test where error expected/actual comparison was not being performed
Kubernetes-commit: 97185e97529ef7c006f26bb3190805ad28f15ffe
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
func TestPluginPathsAreUnaltered(t *testing.T) {
tempDir, err := ioutil.TempDir(os.TempDir(), "test-cmd-plugins")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
tempDir2, err := ioutil.TempDir(os.TempDir(), "test-cmd-plugins2")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// cleanup
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
panic(fmt.Errorf("unexpected cleanup error: %v", err))
}
if err := os.RemoveAll(tempDir2); err != nil {
panic(fmt.Errorf("unexpected cleanup error: %v", err))
}
}()
ioStreams, _, _, errOut := genericclioptions.NewTestIOStreams()
verifier := newFakePluginPathVerifier()
pluginPaths := []string{tempDir, tempDir2}
o := &PluginListOptions{
Verifier: verifier,
IOStreams: ioStreams,
PluginPaths: pluginPaths,
}
// write at least one valid plugin file
if _, err := ioutil.TempFile(tempDir, "kubectl-"); err != nil {
t.Fatalf("unexpected error %v", err)
}
if _, err := ioutil.TempFile(tempDir2, "kubectl-"); err != nil {
t.Fatalf("unexpected error %v", err)
}
if err := o.Run(); err != nil {
t.Fatalf("unexpected error %v - %v", err, errOut.String())
}
// ensure original paths remain unaltered
if len(verifier.seenUnsorted) != len(pluginPaths) {
t.Fatalf("saw unexpected plugin paths. Expecting %v, got %v", pluginPaths, verifier.seenUnsorted)
}
for actual := range verifier.seenUnsorted {
if !strings.HasPrefix(verifier.seenUnsorted[actual], pluginPaths[actual]) {
t.Fatalf("expected PATH slice to be unaltered. Expecting %v, but got %v", pluginPaths[actual], verifier.seenUnsorted[actual])
}
}
}
func TestPluginPathsAreValid(t *testing.T) {
tempDir, err := ioutil.TempDir(os.TempDir(), "test-cmd-plugins")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// cleanup
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
panic(fmt.Errorf("unexpected cleanup error: %v", err))
}
}()
tc := []struct {
name string
pluginPaths []string
pluginFile func() (*os.File, error)
verifier *fakePluginPathVerifier
expectVerifyErrors []error
expectErr string
expectErrOut string
expectOut string
}{
{
name: "ensure no plugins found if no files begin with kubectl- prefix",
pluginPaths: []string{tempDir},
verifier: newFakePluginPathVerifier(),
pluginFile: func() (*os.File, error) {
return ioutil.TempFile(tempDir, "notkubectl-")
},
expectErr: "error: unable to find any kubectl plugins in your PATH\n",
},
{
name: "ensure de-duplicated plugin-paths slice",
pluginPaths: []string{tempDir, tempDir},
verifier: newFakePluginPathVerifier(),
pluginFile: func() (*os.File, error) {
return ioutil.TempFile(tempDir, "kubectl-")
},
expectOut: "The following compatible plugins are available:",
},
{
name: "ensure no errors when empty string or blank path are specified",
pluginPaths: []string{tempDir, "", " "},
verifier: newFakePluginPathVerifier(),
pluginFile: func() (*os.File, error) {
return ioutil.TempFile(tempDir, "kubectl-")
},
expectOut: "The following compatible plugins are available:",
},
}
for _, test := range tc {
t.Run(test.name, func(t *testing.T) {
ioStreams, _, out, errOut := genericclioptions.NewTestIOStreams()
o := &PluginListOptions{
Verifier: test.verifier,
IOStreams: ioStreams,
PluginPaths: test.pluginPaths,
}
// create files
if test.pluginFile != nil {
if _, err := test.pluginFile(); err != nil {
t.Fatalf("unexpected error creating plugin file: %v", err)
}
}
for _, expected := range test.expectVerifyErrors {
for _, actual := range test.verifier.errors {
if expected != actual {
t.Fatalf("unexpected error: expected %v, but got %v", expected, actual)
}
}
}
err := o.Run()
if err == nil && len(test.expectErr) > 0 {
t.Fatalf("unexpected non-error: expected %v, but got nothing", test.expectErr)
} else if err != nil && len(test.expectErr) == 0 {
t.Fatalf("unexpected error: expected nothing, but got %v", err.Error())
} else if err != nil && err.Error() != test.expectErr {
t.Fatalf("unexpected error: expected %v, but got %v", test.expectErr, err.Error())
}
if len(test.expectErrOut) == 0 && errOut.Len() > 0 {
t.Fatalf("unexpected error output: expected nothing, but got %v", errOut.String())
} else if len(test.expectErrOut) > 0 && !strings.Contains(errOut.String(), test.expectErrOut) {
t.Fatalf("unexpected error output: expected to contain %v, but got %v", test.expectErrOut, errOut.String())
}
if len(test.expectOut) == 0 && out.Len() > 0 {
t.Fatalf("unexpected output: expected nothing, but got %v", out.String())
} else if len(test.expectOut) > 0 && !strings.Contains(out.String(), test.expectOut) {
t.Fatalf("unexpected output: expected to contain %v, but got %v", test.expectOut, out.String())
}
})
}
}
type duplicatePathError struct {
path string
}
func (d *duplicatePathError) Error() string {
return fmt.Sprintf("path %q already visited", d.path)
}
type fakePluginPathVerifier struct {
errors []error
seen map[string]bool
seenUnsorted []string
}
func (f *fakePluginPathVerifier) Verify(path string) []error {
if f.seen[path] {
err := &duplicatePathError{path}
f.errors = append(f.errors, err)
return []error{err}
}
f.seen[path] = true
f.seenUnsorted = append(f.seenUnsorted, path)
return nil
}
func newFakePluginPathVerifier() *fakePluginPathVerifier {
return &fakePluginPathVerifier{seen: make(map[string]bool)}
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"k8s.io/apimachinery/pkg/util/runtime"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // alpha: v1.X
// MyFeature featuregate.Feature = "MyFeature"
// owner: @tallclair
// beta: v1.4
AppArmor featuregate.Feature = "AppArmor"
// owner: @mtaufen
// alpha: v1.4
// beta: v1.11
DynamicKubeletConfig featuregate.Feature = "DynamicKubeletConfig"
// owner: @pweil-
// alpha: v1.5
//
// Default userns=host for containers that are using other host namespaces, host mounts, the pod
// contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE,
// SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon.
ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting"
// owner: @jiayingz
// beta: v1.10
//
// Enables support for Device Plugins
DevicePlugins featuregate.Feature = "DevicePlugins"
// owner: @dxist
// alpha: v1.16
//
// Enables support of HPA scaling to zero pods when an object or custom metric is configured.
HPAScaleToZero featuregate.Feature = "HPAScaleToZero"
// owner: @Huang-Wei
// beta: v1.13
// ga: v1.18
//
// Changes the logic behind evicting Pods from not ready Nodes
// to take advantage of NoExecute Taints and Tolerations.
TaintBasedEvictions featuregate.Feature = "TaintBasedEvictions"
// owner: @mikedanese
// alpha: v1.7
// beta: v1.12
//
// Gets a server certificate for the kubelet from the Certificate Signing
// Request API instead of generating one self signed and auto rotates the
// certificate as expiration approaches.
RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate"
// owner: @mikedanese
// beta: v1.8
// ga: v1.19
//
// Automatically renews the client certificate used for communicating with
// the API server as the certificate approaches expiration.
RotateKubeletClientCertificate featuregate.Feature = "RotateKubeletClientCertificate"
// owner: @jinxu
// beta: v1.10
//
// New local storage types to support local storage capacity isolation
LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
// owner: @gnufied
// beta: v1.11
// Ability to Expand persistent volumes
ExpandPersistentVolumes featuregate.Feature = "ExpandPersistentVolumes"
// owner: @mlmhl
// beta: v1.15
// Ability to expand persistent volumes' file system without unmounting volumes.
ExpandInUsePersistentVolumes featuregate.Feature = "ExpandInUsePersistentVolumes"
// owner: @gnufied
// alpha: v1.14
// beta: v1.16
// Ability to expand CSI volumes
ExpandCSIVolumes featuregate.Feature = "ExpandCSIVolumes"
// owner: @verb
// alpha: v1.16
//
// Allows running an ephemeral container in pod namespaces to troubleshoot a running pod.
EphemeralContainers featuregate.Feature = "EphemeralContainers"
// owner: @sjenning
// alpha: v1.11
//
// Allows resource reservations at the QoS level preventing pods at lower QoS levels from
// bursting into resources requested at higher QoS levels (memory only for now)
QOSReserved featuregate.Feature = "QOSReserved"
// owner: @ConnorDoyle
// alpha: v1.8
// beta: v1.10
//
// Alternative container-level CPU affinity policies.
CPUManager featuregate.Feature = "CPUManager"
// owner: @szuecs
// alpha: v1.12
//
// Enable nodes to change CPUCFSQuotaPeriod
CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod"
// owner: @lmdaly
// alpha: v1.16
// beta: v1.18
//
// Enable resource managers to make NUMA aligned decisions
TopologyManager featuregate.Feature = "TopologyManager"
// owner: @sjenning
// beta: v1.11
//
// Enable pods to set sysctls on a pod
Sysctls featuregate.Feature = "Sysctls"
// owner @smarterclayton
// alpha: v1.16
// beta: v1.19
//
// Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18.
LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior"
// owner @brendandburns
// alpha: v1.9
// beta: v1.19
//
// Enable nodes to exclude themselves from service load balancers
ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion"
// owner @smarterclayton
// alpha: v1.16
// beta: v1.19
//
// Enable nodes to exclude themselves from network disruption checks
NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion"
// owner: @saad-ali
// alpha: v1.12
// beta: v1.14
// GA: v1.18
// Enable all logic related to the CSIDriver API object in storage.k8s.io
CSIDriverRegistry featuregate.Feature = "CSIDriverRegistry"
// owner: @verult
// alpha: v1.12
// beta: v1.14
// ga: v1.17
// Enable all logic related to the CSINode API object in storage.k8s.io
CSINodeInfo featuregate.Feature = "CSINodeInfo"
// owner: @screeley44
// alpha: v1.9
// beta: v1.13
// ga: v1.18
//
// Enable Block volume support in containers.
BlockVolume featuregate.Feature = "BlockVolume"
// owner: @pospispa
// GA: v1.11
//
// Postpone deletion of a PV or a PVC when they are being used
StorageObjectInUseProtection featuregate.Feature = "StorageObjectInUseProtection"
// owner: @dims, @derekwaynecarr
// alpha: v1.10
// beta: v1.14
// GA: v1.20
//
// Implement support for limiting pids in pods
SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit"
// owner: @feiskyer
// alpha: v1.10
//
// Enable Hyper-V containers on Windows
// Deprecated in 1.20 and removed in 1.21
HyperVContainer featuregate.Feature = "HyperVContainer"
// owner: @mikedanese
// beta: v1.12
// ga: v1.20
//
// Implement TokenRequest endpoint on service account resources.
TokenRequest featuregate.Feature = "TokenRequest"
// owner: @mikedanese
// beta: v1.12
// ga: v1.20
//
// Enable ServiceAccountTokenVolumeProjection support in ProjectedVolumes.
TokenRequestProjection featuregate.Feature = "TokenRequestProjection"
// owner: @mikedanese
// alpha: v1.13
//
// Migrate ServiceAccount volumes to use a projected volume consisting of a
// ServiceAccountTokenVolumeProjection. This feature adds new required flags
// to the API server.
BoundServiceAccountTokenVolume featuregate.Feature = "BoundServiceAccountTokenVolume"
// owner: @mtaufen
// alpha: v1.18
// beta: v1.20
//
// Enable OIDC discovery endpoints (issuer and JWKS URLs) for the service
// account issuer in the API server.
// Note these endpoints serve minimally-compliant discovery docs that are
// intended to be used for service account token verification.
ServiceAccountIssuerDiscovery featuregate.Feature = "ServiceAccountIssuerDiscovery"
// owner: @Random-Liu
// beta: v1.11
//
// Enable container log rotation for cri container runtime
CRIContainerLogRotation featuregate.Feature = "CRIContainerLogRotation"
// owner: @krmayankk
// beta: v1.14
//
// Enables control over the primary group ID of containers' init processes.
RunAsGroup featuregate.Feature = "RunAsGroup"
// owner: @saad-ali
// ga
//
// Allow mounting a subpath of a volume in a container
// Do not remove this feature gate even though it's GA
VolumeSubpath featuregate.Feature = "VolumeSubpath"
// owner: @gnufied
// beta : v1.12
// GA : v1.17
//
// Add support for volume plugins to report node specific
// volume limits
AttachVolumeLimit featuregate.Feature = "AttachVolumeLimit"
// owner: @ravig
// alpha: v1.11
//
// Include volume count on node to be considered for balanced resource allocation while scheduling.
// A node which has closer cpu,memory utilization and volume count is favoured by scheduler
// while making decisions.
BalanceAttachedNodeVolumes featuregate.Feature = "BalanceAttachedNodeVolumes"
// owner: @vladimirvivien
// alpha: v1.11
// beta: v1.14
// ga: v1.18
//
// Enables CSI to use raw block storage volumes
CSIBlockVolume featuregate.Feature = "CSIBlockVolume"
// owner: @pohly
// alpha: v1.14
// beta: v1.16
//
// Enables CSI Inline volumes support for pods
CSIInlineVolume featuregate.Feature = "CSIInlineVolume"
// owner: @pohly
// alpha: v1.19
//
// Enables tracking of available storage capacity that CSI drivers provide.
CSIStorageCapacity featuregate.Feature = "CSIStorageCapacity"
// owner: @alculquicondor
// beta: v1.20
//
// Enables the use of PodTopologySpread scheduling plugin to do default
// spreading and disables legacy SelectorSpread plugin.
DefaultPodTopologySpread featuregate.Feature = "DefaultPodTopologySpread"
// owner: @pohly
// alpha: v1.19
//
// Enables generic ephemeral inline volume support for pods
GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume"
// owner: @tallclair
// alpha: v1.12
// beta: v1.14
//
// Enables RuntimeClass, for selecting between multiple runtimes to run a pod.
RuntimeClass featuregate.Feature = "RuntimeClass"
// owner: @mtaufen
// alpha: v1.12
// beta: v1.14
// GA: v1.17
//
// Kubelet uses the new Lease API to report node heartbeats,
// (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal.
NodeLease featuregate.Feature = "NodeLease"
// owner: @janosi
// alpha: v1.12
// beta: v1.18
// GA: v1.20
//
// Enables SCTP as new protocol for Service ports, NetworkPolicy, and ContainerPort in Pod/Containers definition
SCTPSupport featuregate.Feature = "SCTPSupport"
// owner: @xing-yang
// alpha: v1.12
// beta: v1.17
//
// Enable volume snapshot data source support.
VolumeSnapshotDataSource featuregate.Feature = "VolumeSnapshotDataSource"
// owner: @jessfraz
// alpha: v1.12
//
// Enables control over ProcMountType for containers.
ProcMountType featuregate.Feature = "ProcMountType"
// owner: @janetkuo
// alpha: v1.12
//
// Allow TTL controller to clean up Pods and Jobs after they finish.
TTLAfterFinished featuregate.Feature = "TTLAfterFinished"
// owner: @dashpole
// alpha: v1.13
// beta: v1.15
//
// Enables the kubelet's pod resources grpc endpoint
KubeletPodResources featuregate.Feature = "KubeletPodResources"
// owner: @davidz627
// alpha: v1.14
// beta: v1.17
//
// Enables the in-tree storage to CSI Plugin migration feature.
CSIMigration featuregate.Feature = "CSIMigration"
// owner: @davidz627
// alpha: v1.14
// beta: v1.17
//
// Enables the GCE PD in-tree driver to GCE CSI Driver migration feature.
CSIMigrationGCE featuregate.Feature = "CSIMigrationGCE"
// owner: @davidz627
// alpha: v1.17
//
// Disables the GCE PD in-tree driver.
// Expects GCE PD CSI Driver to be installed and configured on all nodes.
CSIMigrationGCEComplete featuregate.Feature = "CSIMigrationGCEComplete"
// owner: @leakingtapan
// alpha: v1.14
// beta: v1.17
//
// Enables the AWS EBS in-tree driver to AWS EBS CSI Driver migration feature.
CSIMigrationAWS featuregate.Feature = "CSIMigrationAWS"
// owner: @leakingtapan
// alpha: v1.17
//
// Disables the AWS EBS in-tree driver.
// Expects AWS EBS CSI Driver to be installed and configured on all nodes.
CSIMigrationAWSComplete featuregate.Feature = "CSIMigrationAWSComplete"
// owner: @andyzhangx
// alpha: v1.15
// beta: v1.19
//
// Enables the Azure Disk in-tree driver to Azure Disk Driver migration feature.
CSIMigrationAzureDisk featuregate.Feature = "CSIMigrationAzureDisk"
// owner: @andyzhangx
// alpha: v1.17
//
// Disables the Azure Disk in-tree driver.
// Expects Azure Disk CSI Driver to be installed and configured on all nodes.
CSIMigrationAzureDiskComplete featuregate.Feature = "CSIMigrationAzureDiskComplete"
// owner: @andyzhangx
// alpha: v1.15
//
// Enables the Azure File in-tree driver to Azure File Driver migration feature.
CSIMigrationAzureFile featuregate.Feature = "CSIMigrationAzureFile"
// owner: @andyzhangx
// alpha: v1.17
//
// Disables the Azure File in-tree driver.
// Expects Azure File CSI Driver to be installed and configured on all nodes.
CSIMigrationAzureFileComplete featuregate.Feature = "CSIMigrationAzureFileComplete"
// owner: @divyenpatel
// beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15)
//
// Enables the vSphere in-tree driver to vSphere CSI Driver migration feature.
CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere"
// owner: @divyenpatel
// beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15)
//
// Disables the vSphere in-tree driver.
// Expects vSphere CSI Driver to be installed and configured on all nodes.
CSIMigrationvSphereComplete featuregate.Feature = "CSIMigrationvSphereComplete"
// owner: @huffmanca
// alpha: v1.19
//
// Determines if a CSI Driver supports applying fsGroup.
CSIVolumeFSGroupPolicy featuregate.Feature = "CSIVolumeFSGroupPolicy"
// owner: @gnufied
// alpha: v1.18
// Allows user to configure volume permission change policy for fsGroups when mounting
// a volume in a Pod.
ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy"
// owner: @RobertKrawitz, @derekwaynecarr
// beta: v1.15
// GA: v1.20
//
// Implement support for limiting pids in nodes
SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit"
// owner: @wk8
// alpha: v1.14
// beta: v1.16
//
// Enables GMSA support for Windows workloads.
WindowsGMSA featuregate.Feature = "WindowsGMSA"
// owner: @bclau
// alpha: v1.16
// beta: v1.17
// GA: v1.18
//
// Enables support for running container entrypoints as different usernames than their default ones.
WindowsRunAsUserName featuregate.Feature = "WindowsRunAsUserName"
// owner: @adisky
// alpha: v1.14
// beta: v1.18
//
// Enables the OpenStack Cinder in-tree driver to OpenStack Cinder CSI Driver migration feature.
CSIMigrationOpenStack featuregate.Feature = "CSIMigrationOpenStack"
// owner: @adisky
// alpha: v1.17
//
// Disables the OpenStack Cinder in-tree driver.
// Expects the OpenStack Cinder CSI Driver to be installed and configured on all nodes.
CSIMigrationOpenStackComplete featuregate.Feature = "CSIMigrationOpenStackComplete"
// owner: @RobertKrawitz
// alpha: v1.15
//
// Allow use of filesystems for ephemeral storage monitoring.
// Only applies if LocalStorageCapacityIsolation is set.
LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring"
// owner: @denkensk
// alpha: v1.15
// beta: v1.19
//
// Enables NonPreempting option for priorityClass and pod.
NonPreemptingPriority featuregate.Feature = "NonPreemptingPriority"
// owner: @j-griffith
// alpha: v1.15
// beta: v1.16
// GA: v1.18
//
// Enable support for specifying an existing PVC as a DataSource
VolumePVCDataSource featuregate.Feature = "VolumePVCDataSource"
// owner: @egernst
// alpha: v1.16
// beta: v1.18
//
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
PodOverhead featuregate.Feature = "PodOverhead"
// owner: @khenidak
// alpha: v1.15
//
// Enables ipv6 dual stack
IPv6DualStack featuregate.Feature = "IPv6DualStack"
// owner: @robscott @freehan
// alpha: v1.16
//
// Enable Endpoint Slices for more scalable Service endpoints.
EndpointSlice featuregate.Feature = "EndpointSlice"
// owner: @robscott @freehan
// alpha: v1.18
// beta: v1.19
//
// Enable Endpoint Slice consumption by kube-proxy for improved scalability.
EndpointSliceProxying featuregate.Feature = "EndpointSliceProxying"
// owner: @robscott @kumarvin123
// alpha: v1.19
//
// Enable Endpoint Slice consumption by kube-proxy in Windows for improved scalability.
WindowsEndpointSliceProxying featuregate.Feature = "WindowsEndpointSliceProxying"
// owner: @Huang-Wei
// alpha: v1.16
// beta: v1.18
// GA: v1.19
//
// Schedule pods evenly across available topology domains.
EvenPodsSpread featuregate.Feature = "EvenPodsSpread"
// owner: @matthyx
// alpha: v1.16
// beta: v1.18
// GA: v1.20
//
// Enables the startupProbe in kubelet worker.
StartupProbe featuregate.Feature = "StartupProbe"
// owner: @deads2k
// beta: v1.17
//
// Enables the users to skip TLS verification of kubelets on pod logs requests
AllowInsecureBackendProxy featuregate.Feature = "AllowInsecureBackendProxy"
// owner: @mortent
// alpha: v1.3
// beta: v1.5
//
// Enable all logic related to the PodDisruptionBudget API object in policy
PodDisruptionBudget featuregate.Feature = "PodDisruptionBudget"
// owner: @m1093782566
// alpha: v1.17
//
// Enables topology aware service routing
ServiceTopology featuregate.Feature = "ServiceTopology"
// owner: @robscott
// alpha: v1.18
// beta: v1.19
//
// Enables AppProtocol field for Services and Endpoints.
ServiceAppProtocol featuregate.Feature = "ServiceAppProtocol"
// owner: @wojtek-t
// alpha: v1.18
// beta: v1.19
//
// Enables a feature to make secrets and configmaps data immutable.
ImmutableEphemeralVolumes featuregate.Feature = "ImmutableEphemeralVolumes"
// owner: @bart0sh
// alpha: v1.18
// beta: v1.19
//
// Enables usage of HugePages-<size> in a volume medium,
// e.g. emptyDir:
// medium: HugePages-1Gi
HugePageStorageMediumSize featuregate.Feature = "HugePageStorageMediumSize"
// owner: @freehan
// GA: v1.18
//
// Enable ExternalTrafficPolicy for Service ExternalIPs.
// This is for bug fix #69811
ExternalPolicyForExternalIP featuregate.Feature = "ExternalPolicyForExternalIP"
// owner: @bswartz
// alpha: v1.18
//
// Enables usage of any object for volume data source in PVCs
AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource"
// owner: @javidiaz
// alpha: v1.19
// beta: v1.20
//
// Allow setting the Fully Qualified Domain Name (FQDN) in the hostname of a Pod. If a Pod does not
// have FQDN, this feature has no effect.
SetHostnameAsFQDN featuregate.Feature = "SetHostnameAsFQDN"
// owner: @ksubrmnn
// alpha: v1.14
// beta: v1.20
//
// Allows kube-proxy to run in Overlay mode for Windows
WinOverlay featuregate.Feature = "WinOverlay"
// owner: @ksubrmnn
// alpha: v1.14
//
// Allows kube-proxy to create DSR loadbalancers for Windows
WinDSR featuregate.Feature = "WinDSR"
// owner: @RenaudWasTaken @dashpole
// alpha: v1.19
// beta: v1.20
//
// Disables Accelerator Metrics Collected by Kubelet
DisableAcceleratorUsageMetrics featuregate.Feature = "DisableAcceleratorUsageMetrics"
// owner: @arjunrn @mwielgus @josephburnett
// alpha: v1.20
//
// Add support for the HPA to scale based on metrics from individual containers
// in target pods
HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics"
// owner: @zshihang
// alpha: v1.13
// beta: v1.20
//
// Allows kube-controller-manager to publish kube-root-ca.crt configmap to
// every namespace. This feature is a prerequisite of BoundServiceAccountTokenVolume.
RootCAConfigMap featuregate.Feature = "RootCAConfigMap"
// owner: @andrewsykim
// alpha: v1.20
//
// Enable Terminating condition in Endpoint Slices.
EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition"
// owner: @derekwaynecarr
// alpha: v1.20
//
// Enables kubelet support to size memory backed volumes
SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes"
// owner: @Sh4d1
// alpha: v1.21
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
LoadBalancerIPMode featuregate.Feature = "LoadBalancerIPMode"
)
func init() {
runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))
}
// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout Kubernetes binaries.
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
AppArmor: {Default: true, PreRelease: featuregate.Beta},
DynamicKubeletConfig: {Default: true, PreRelease: featuregate.Beta},
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta},
DevicePlugins: {Default: true, PreRelease: featuregate.Beta},
TaintBasedEvictions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
RotateKubeletClientCertificate: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.Beta},
Sysctls: {Default: true, PreRelease: featuregate.Beta},
EphemeralContainers: {Default: false, PreRelease: featuregate.Alpha},
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
ExpandCSIVolumes: {Default: true, PreRelease: featuregate.Beta},
AttachVolumeLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
CPUManager: {Default: true, PreRelease: featuregate.Beta},
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
TopologyManager: {Default: true, PreRelease: featuregate.Beta},
ServiceNodeExclusion: {Default: true, PreRelease: featuregate.Beta},
NodeDisruptionExclusion: {Default: true, PreRelease: featuregate.Beta},
CSIDriverRegistry: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
CSINodeInfo: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
SupportPodPidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
SupportNodePidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
HyperVContainer: {Default: false, PreRelease: featuregate.Deprecated},
TokenRequest: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
TokenRequestProjection: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
BoundServiceAccountTokenVolume: {Default: false, PreRelease: featuregate.Alpha},
ServiceAccountIssuerDiscovery: {Default: true, PreRelease: featuregate.Beta},
CRIContainerLogRotation: {Default: true, PreRelease: featuregate.Beta},
CSIMigration: {Default: true, PreRelease: featuregate.Beta},
CSIMigrationGCE: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires GCE PD CSI Driver)
CSIMigrationGCEComplete: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationAWS: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires AWS EBS CSI driver)
CSIMigrationAWSComplete: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationAzureDisk: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires Azure Disk CSI driver)
CSIMigrationAzureDiskComplete: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationAzureFile: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationAzureFileComplete: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationvSphere: {Default: false, PreRelease: featuregate.Beta},
CSIMigrationvSphereComplete: {Default: false, PreRelease: featuregate.Beta},
RunAsGroup: {Default: true, PreRelease: featuregate.Beta},
CSIMigrationOpenStack: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires OpenStack Cinder CSI driver)
CSIMigrationOpenStackComplete: {Default: false, PreRelease: featuregate.Alpha},
VolumeSubpath: {Default: true, PreRelease: featuregate.GA},
ConfigurableFSGroupPolicy: {Default: false, PreRelease: featuregate.Alpha},
BalanceAttachedNodeVolumes: {Default: false, PreRelease: featuregate.Alpha},
CSIBlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta},
CSIStorageCapacity: {Default: false, PreRelease: featuregate.Alpha},
GenericEphemeralVolume: {Default: false, PreRelease: featuregate.Alpha},
CSIVolumeFSGroupPolicy: {Default: false, PreRelease: featuregate.Alpha},
RuntimeClass: {Default: true, PreRelease: featuregate.Beta},
NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
SCTPSupport: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
VolumeSnapshotDataSource: {Default: true, PreRelease: featuregate.Beta},
ProcMountType: {Default: false, PreRelease: featuregate.Alpha},
TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha},
KubeletPodResources: {Default: true, PreRelease: featuregate.Beta},
WindowsGMSA: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
WindowsRunAsUserName: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
NonPreemptingPriority: {Default: true, PreRelease: featuregate.Beta},
VolumePVCDataSource: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
PodOverhead: {Default: true, PreRelease: featuregate.Beta},
IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha},
EndpointSlice: {Default: true, PreRelease: featuregate.Beta},
EndpointSliceProxying: {Default: true, PreRelease: featuregate.Beta},
EndpointSliceTerminatingCondition: {Default: false, PreRelease: featuregate.Alpha},
WindowsEndpointSliceProxying: {Default: false, PreRelease: featuregate.Alpha},
EvenPodsSpread: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
StartupProbe: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
AllowInsecureBackendProxy: {Default: true, PreRelease: featuregate.Beta},
PodDisruptionBudget: {Default: true, PreRelease: featuregate.Beta},
ServiceTopology: {Default: false, PreRelease: featuregate.Alpha},
ServiceAppProtocol: {Default: true, PreRelease: featuregate.Beta},
ImmutableEphemeralVolumes: {Default: true, PreRelease: featuregate.Beta},
HugePageStorageMediumSize: {Default: true, PreRelease: featuregate.Beta},
ExternalPolicyForExternalIP: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
AnyVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha},
DefaultPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
SetHostnameAsFQDN: {Default: true, PreRelease: featuregate.Beta},
WinOverlay: {Default: true, PreRelease: featuregate.Beta},
WinDSR: {Default: false, PreRelease: featuregate.Alpha},
DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.Beta},
HPAContainerMetrics: {Default: false, PreRelease: featuregate.Alpha},
RootCAConfigMap: {Default: true, PreRelease: featuregate.Beta},
SizeMemoryBackedVolumes: {Default: false, PreRelease: featuregate.Alpha},
LoadBalancerIPMode: {Default: false, PreRelease: featuregate.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
genericfeatures.StreamingProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated},
genericfeatures.ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA},
genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.DryRun: {Default: true, PreRelease: featuregate.GA},
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIPriorityAndFairness: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.WarningHeaders: {Default: true, PreRelease: featuregate.Beta},
// features that enable backwards compatibility but are scheduled to be removed
// ...
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
LegacyNodeRoleBehavior: {Default: true, PreRelease: featuregate.Beta},
}
Enable ConfigurableFSGroupPolicy feature gate
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"k8s.io/apimachinery/pkg/util/runtime"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // alpha: v1.X
// MyFeature featuregate.Feature = "MyFeature"
// owner: @tallclair
// beta: v1.4
AppArmor featuregate.Feature = "AppArmor"
// owner: @mtaufen
// alpha: v1.4
// beta: v1.11
DynamicKubeletConfig featuregate.Feature = "DynamicKubeletConfig"
// owner: @pweil-
// alpha: v1.5
//
// Default userns=host for containers that are using other host namespaces, host mounts, the pod
// contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE,
// SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon.
ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting"
// owner: @jiayingz
// beta: v1.10
//
// Enables support for Device Plugins
DevicePlugins featuregate.Feature = "DevicePlugins"
// owner: @dxist
// alpha: v1.16
//
// Enables support of HPA scaling to zero pods when an object or custom metric is configured.
HPAScaleToZero featuregate.Feature = "HPAScaleToZero"
// owner: @Huang-Wei
// beta: v1.13
// ga: v1.18
//
// Changes the logic behind evicting Pods from not ready Nodes
// to take advantage of NoExecute Taints and Tolerations.
TaintBasedEvictions featuregate.Feature = "TaintBasedEvictions"
// owner: @mikedanese
// alpha: v1.7
// beta: v1.12
//
// Gets a server certificate for the kubelet from the Certificate Signing
// Request API instead of generating one self signed and auto rotates the
// certificate as expiration approaches.
RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate"
// owner: @mikedanese
// beta: v1.8
// ga: v1.19
//
// Automatically renews the client certificate used for communicating with
// the API server as the certificate approaches expiration.
RotateKubeletClientCertificate featuregate.Feature = "RotateKubeletClientCertificate"
// owner: @jinxu
// beta: v1.10
//
// New local storage types to support local storage capacity isolation
LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
// owner: @gnufied
// beta: v1.11
// Ability to Expand persistent volumes
ExpandPersistentVolumes featuregate.Feature = "ExpandPersistentVolumes"
// owner: @mlmhl
// beta: v1.15
// Ability to expand persistent volumes' file system without unmounting volumes.
ExpandInUsePersistentVolumes featuregate.Feature = "ExpandInUsePersistentVolumes"
// owner: @gnufied
// alpha: v1.14
// beta: v1.16
// Ability to expand CSI volumes
ExpandCSIVolumes featuregate.Feature = "ExpandCSIVolumes"
// owner: @verb
// alpha: v1.16
//
// Allows running an ephemeral container in pod namespaces to troubleshoot a running pod.
EphemeralContainers featuregate.Feature = "EphemeralContainers"
// owner: @sjenning
// alpha: v1.11
//
// Allows resource reservations at the QoS level preventing pods at lower QoS levels from
// bursting into resources requested at higher QoS levels (memory only for now)
QOSReserved featuregate.Feature = "QOSReserved"
// owner: @ConnorDoyle
// alpha: v1.8
// beta: v1.10
//
// Alternative container-level CPU affinity policies.
CPUManager featuregate.Feature = "CPUManager"
// owner: @szuecs
// alpha: v1.12
//
// Enable nodes to change CPUCFSQuotaPeriod
CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod"
// owner: @lmdaly
// alpha: v1.16
// beta: v1.18
//
// Enable resource managers to make NUMA aligned decisions
TopologyManager featuregate.Feature = "TopologyManager"
// owner: @sjenning
// beta: v1.11
//
// Enable pods to set sysctls on a pod
Sysctls featuregate.Feature = "Sysctls"
// owner @smarterclayton
// alpha: v1.16
// beta: v1.19
//
// Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18.
LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior"
// owner @brendandburns
// alpha: v1.9
// beta: v1.19
//
// Enable nodes to exclude themselves from service load balancers
ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion"
// owner @smarterclayton
// alpha: v1.16
// beta: v1.19
//
// Enable nodes to exclude themselves from network disruption checks
NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion"
// owner: @saad-ali
// alpha: v1.12
// beta: v1.14
// GA: v1.18
// Enable all logic related to the CSIDriver API object in storage.k8s.io
CSIDriverRegistry featuregate.Feature = "CSIDriverRegistry"
// owner: @verult
// alpha: v1.12
// beta: v1.14
// ga: v1.17
// Enable all logic related to the CSINode API object in storage.k8s.io
CSINodeInfo featuregate.Feature = "CSINodeInfo"
// owner: @screeley44
// alpha: v1.9
// beta: v1.13
// ga: v1.18
//
// Enable Block volume support in containers.
BlockVolume featuregate.Feature = "BlockVolume"
// owner: @pospispa
// GA: v1.11
//
// Postpone deletion of a PV or a PVC when they are being used
StorageObjectInUseProtection featuregate.Feature = "StorageObjectInUseProtection"
// owner: @dims, @derekwaynecarr
// alpha: v1.10
// beta: v1.14
// GA: v1.20
//
// Implement support for limiting pids in pods
SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit"
// owner: @feiskyer
// alpha: v1.10
//
// Enable Hyper-V containers on Windows
// Deprecated in 1.20 and removed in 1.21
HyperVContainer featuregate.Feature = "HyperVContainer"
// owner: @mikedanese
// beta: v1.12
// ga: v1.20
//
// Implement TokenRequest endpoint on service account resources.
TokenRequest featuregate.Feature = "TokenRequest"
// owner: @mikedanese
// beta: v1.12
// ga: v1.20
//
// Enable ServiceAccountTokenVolumeProjection support in ProjectedVolumes.
TokenRequestProjection featuregate.Feature = "TokenRequestProjection"
// owner: @mikedanese
// alpha: v1.13
//
// Migrate ServiceAccount volumes to use a projected volume consisting of a
// ServiceAccountTokenVolumeProjection. This feature adds new required flags
// to the API server.
BoundServiceAccountTokenVolume featuregate.Feature = "BoundServiceAccountTokenVolume"
// owner: @mtaufen
// alpha: v1.18
// beta: v1.20
//
// Enable OIDC discovery endpoints (issuer and JWKS URLs) for the service
// account issuer in the API server.
// Note these endpoints serve minimally-compliant discovery docs that are
// intended to be used for service account token verification.
ServiceAccountIssuerDiscovery featuregate.Feature = "ServiceAccountIssuerDiscovery"
// owner: @Random-Liu
// beta: v1.11
//
// Enable container log rotation for cri container runtime
CRIContainerLogRotation featuregate.Feature = "CRIContainerLogRotation"
// owner: @krmayankk
// beta: v1.14
//
// Enables control over the primary group ID of containers' init processes.
RunAsGroup featuregate.Feature = "RunAsGroup"
// owner: @saad-ali
// ga
//
// Allow mounting a subpath of a volume in a container
// Do not remove this feature gate even though it's GA
VolumeSubpath featuregate.Feature = "VolumeSubpath"
// owner: @gnufied
// beta : v1.12
// GA : v1.17
//
// Add support for volume plugins to report node specific
// volume limits
AttachVolumeLimit featuregate.Feature = "AttachVolumeLimit"
// owner: @ravig
// alpha: v1.11
//
// Include volume count on node to be considered for balanced resource allocation while scheduling.
// A node which has closer cpu,memory utilization and volume count is favoured by scheduler
// while making decisions.
BalanceAttachedNodeVolumes featuregate.Feature = "BalanceAttachedNodeVolumes"
// owner: @vladimirvivien
// alpha: v1.11
// beta: v1.14
// ga: v1.18
//
// Enables CSI to use raw block storage volumes
CSIBlockVolume featuregate.Feature = "CSIBlockVolume"
// owner: @pohly
// alpha: v1.14
// beta: v1.16
//
// Enables CSI Inline volumes support for pods
CSIInlineVolume featuregate.Feature = "CSIInlineVolume"
// owner: @pohly
// alpha: v1.19
//
// Enables tracking of available storage capacity that CSI drivers provide.
CSIStorageCapacity featuregate.Feature = "CSIStorageCapacity"
// owner: @alculquicondor
// beta: v1.20
//
// Enables the use of PodTopologySpread scheduling plugin to do default
// spreading and disables legacy SelectorSpread plugin.
DefaultPodTopologySpread featuregate.Feature = "DefaultPodTopologySpread"
// owner: @pohly
// alpha: v1.19
//
// Enables generic ephemeral inline volume support for pods
GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume"
// owner: @tallclair
// alpha: v1.12
// beta: v1.14
//
// Enables RuntimeClass, for selecting between multiple runtimes to run a pod.
RuntimeClass featuregate.Feature = "RuntimeClass"
// owner: @mtaufen
// alpha: v1.12
// beta: v1.14
// GA: v1.17
//
// Kubelet uses the new Lease API to report node heartbeats,
// (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal.
NodeLease featuregate.Feature = "NodeLease"
// owner: @janosi
// alpha: v1.12
// beta: v1.18
// GA: v1.20
//
// Enables SCTP as new protocol for Service ports, NetworkPolicy, and ContainerPort in Pod/Containers definition
SCTPSupport featuregate.Feature = "SCTPSupport"
// owner: @xing-yang
// alpha: v1.12
// beta: v1.17
//
// Enable volume snapshot data source support.
VolumeSnapshotDataSource featuregate.Feature = "VolumeSnapshotDataSource"
// owner: @jessfraz
// alpha: v1.12
//
// Enables control over ProcMountType for containers.
ProcMountType featuregate.Feature = "ProcMountType"
// owner: @janetkuo
// alpha: v1.12
//
// Allow TTL controller to clean up Pods and Jobs after they finish.
TTLAfterFinished featuregate.Feature = "TTLAfterFinished"
// owner: @dashpole
// alpha: v1.13
// beta: v1.15
//
// Enables the kubelet's pod resources grpc endpoint
KubeletPodResources featuregate.Feature = "KubeletPodResources"
// owner: @davidz627
// alpha: v1.14
// beta: v1.17
//
// Enables the in-tree storage to CSI Plugin migration feature.
CSIMigration featuregate.Feature = "CSIMigration"
// owner: @davidz627
// alpha: v1.14
// beta: v1.17
//
// Enables the GCE PD in-tree driver to GCE CSI Driver migration feature.
CSIMigrationGCE featuregate.Feature = "CSIMigrationGCE"
// owner: @davidz627
// alpha: v1.17
//
// Disables the GCE PD in-tree driver.
// Expects GCE PD CSI Driver to be installed and configured on all nodes.
CSIMigrationGCEComplete featuregate.Feature = "CSIMigrationGCEComplete"
// owner: @leakingtapan
// alpha: v1.14
// beta: v1.17
//
// Enables the AWS EBS in-tree driver to AWS EBS CSI Driver migration feature.
CSIMigrationAWS featuregate.Feature = "CSIMigrationAWS"
// owner: @leakingtapan
// alpha: v1.17
//
// Disables the AWS EBS in-tree driver.
// Expects AWS EBS CSI Driver to be installed and configured on all nodes.
CSIMigrationAWSComplete featuregate.Feature = "CSIMigrationAWSComplete"
// owner: @andyzhangx
// alpha: v1.15
// beta: v1.19
//
// Enables the Azure Disk in-tree driver to Azure Disk Driver migration feature.
CSIMigrationAzureDisk featuregate.Feature = "CSIMigrationAzureDisk"
// owner: @andyzhangx
// alpha: v1.17
//
// Disables the Azure Disk in-tree driver.
// Expects Azure Disk CSI Driver to be installed and configured on all nodes.
CSIMigrationAzureDiskComplete featuregate.Feature = "CSIMigrationAzureDiskComplete"
// owner: @andyzhangx
// alpha: v1.15
//
// Enables the Azure File in-tree driver to Azure File Driver migration feature.
CSIMigrationAzureFile featuregate.Feature = "CSIMigrationAzureFile"
// owner: @andyzhangx
// alpha: v1.17
//
// Disables the Azure File in-tree driver.
// Expects Azure File CSI Driver to be installed and configured on all nodes.
CSIMigrationAzureFileComplete featuregate.Feature = "CSIMigrationAzureFileComplete"
// owner: @divyenpatel
// beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15)
//
// Enables the vSphere in-tree driver to vSphere CSI Driver migration feature.
CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere"
// owner: @divyenpatel
// beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15)
//
// Disables the vSphere in-tree driver.
// Expects vSphere CSI Driver to be installed and configured on all nodes.
CSIMigrationvSphereComplete featuregate.Feature = "CSIMigrationvSphereComplete"
// owner: @huffmanca
// alpha: v1.19
//
// Determines if a CSI Driver supports applying fsGroup.
CSIVolumeFSGroupPolicy featuregate.Feature = "CSIVolumeFSGroupPolicy"
// owner: @gnufied
// alpha: v1.18
// Allows user to configure volume permission change policy for fsGroups when mounting
// a volume in a Pod.
ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy"
// owner: @RobertKrawitz, @derekwaynecarr
// beta: v1.15
// GA: v1.20
//
// Implement support for limiting pids in nodes
SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit"
// owner: @wk8
// alpha: v1.14
// beta: v1.16
//
// Enables GMSA support for Windows workloads.
WindowsGMSA featuregate.Feature = "WindowsGMSA"
// owner: @bclau
// alpha: v1.16
// beta: v1.17
// GA: v1.18
//
// Enables support for running container entrypoints as different usernames than their default ones.
WindowsRunAsUserName featuregate.Feature = "WindowsRunAsUserName"
// owner: @adisky
// alpha: v1.14
// beta: v1.18
//
// Enables the OpenStack Cinder in-tree driver to OpenStack Cinder CSI Driver migration feature.
CSIMigrationOpenStack featuregate.Feature = "CSIMigrationOpenStack"
// owner: @adisky
// alpha: v1.17
//
// Disables the OpenStack Cinder in-tree driver.
// Expects the OpenStack Cinder CSI Driver to be installed and configured on all nodes.
CSIMigrationOpenStackComplete featuregate.Feature = "CSIMigrationOpenStackComplete"
// owner: @RobertKrawitz
// alpha: v1.15
//
// Allow use of filesystems for ephemeral storage monitoring.
// Only applies if LocalStorageCapacityIsolation is set.
LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring"
// owner: @denkensk
// alpha: v1.15
// beta: v1.19
//
// Enables NonPreempting option for priorityClass and pod.
NonPreemptingPriority featuregate.Feature = "NonPreemptingPriority"
// owner: @j-griffith
// alpha: v1.15
// beta: v1.16
// GA: v1.18
//
// Enable support for specifying an existing PVC as a DataSource
VolumePVCDataSource featuregate.Feature = "VolumePVCDataSource"
// owner: @egernst
// alpha: v1.16
// beta: v1.18
//
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
PodOverhead featuregate.Feature = "PodOverhead"
// owner: @khenidak
// alpha: v1.15
//
// Enables ipv6 dual stack
IPv6DualStack featuregate.Feature = "IPv6DualStack"
// owner: @robscott @freehan
// alpha: v1.16
//
// Enable Endpoint Slices for more scalable Service endpoints.
EndpointSlice featuregate.Feature = "EndpointSlice"
// owner: @robscott @freehan
// alpha: v1.18
// beta: v1.19
//
// Enable Endpoint Slice consumption by kube-proxy for improved scalability.
EndpointSliceProxying featuregate.Feature = "EndpointSliceProxying"
// owner: @robscott @kumarvin123
// alpha: v1.19
//
// Enable Endpoint Slice consumption by kube-proxy in Windows for improved scalability.
WindowsEndpointSliceProxying featuregate.Feature = "WindowsEndpointSliceProxying"
// owner: @Huang-Wei
// alpha: v1.16
// beta: v1.18
// GA: v1.19
//
// Schedule pods evenly across available topology domains.
EvenPodsSpread featuregate.Feature = "EvenPodsSpread"
// owner: @matthyx
// alpha: v1.16
// beta: v1.18
// GA: v1.20
//
// Enables the startupProbe in kubelet worker.
StartupProbe featuregate.Feature = "StartupProbe"
// owner: @deads2k
// beta: v1.17
//
// Enables the users to skip TLS verification of kubelets on pod logs requests
AllowInsecureBackendProxy featuregate.Feature = "AllowInsecureBackendProxy"
// owner: @mortent
// alpha: v1.3
// beta: v1.5
//
// Enable all logic related to the PodDisruptionBudget API object in policy
PodDisruptionBudget featuregate.Feature = "PodDisruptionBudget"
// owner: @m1093782566
// alpha: v1.17
//
// Enables topology aware service routing
ServiceTopology featuregate.Feature = "ServiceTopology"
// owner: @robscott
// alpha: v1.18
// beta: v1.19
//
// Enables AppProtocol field for Services and Endpoints.
ServiceAppProtocol featuregate.Feature = "ServiceAppProtocol"
// owner: @wojtek-t
// alpha: v1.18
// beta: v1.19
//
// Enables a feature to make secrets and configmaps data immutable.
ImmutableEphemeralVolumes featuregate.Feature = "ImmutableEphemeralVolumes"
// owner: @bart0sh
// alpha: v1.18
// beta: v1.19
//
// Enables usage of HugePages-<size> in a volume medium,
// e.g. emptyDir:
// medium: HugePages-1Gi
HugePageStorageMediumSize featuregate.Feature = "HugePageStorageMediumSize"
// owner: @freehan
// GA: v1.18
//
// Enable ExternalTrafficPolicy for Service ExternalIPs.
// This is for bug fix #69811
ExternalPolicyForExternalIP featuregate.Feature = "ExternalPolicyForExternalIP"
// owner: @bswartz
// alpha: v1.18
//
// Enables usage of any object for volume data source in PVCs
AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource"
// owner: @javidiaz
// alpha: v1.19
// beta: v1.20
//
// Allow setting the Fully Qualified Domain Name (FQDN) in the hostname of a Pod. If a Pod does not
// have FQDN, this feature has no effect.
SetHostnameAsFQDN featuregate.Feature = "SetHostnameAsFQDN"
// owner: @ksubrmnn
// alpha: v1.14
// beta: v1.20
//
// Allows kube-proxy to run in Overlay mode for Windows
WinOverlay featuregate.Feature = "WinOverlay"
// owner: @ksubrmnn
// alpha: v1.14
//
// Allows kube-proxy to create DSR loadbalancers for Windows
WinDSR featuregate.Feature = "WinDSR"
// owner: @RenaudWasTaken @dashpole
// alpha: v1.19
// beta: v1.20
//
// Disables Accelerator Metrics Collected by Kubelet
DisableAcceleratorUsageMetrics featuregate.Feature = "DisableAcceleratorUsageMetrics"
// owner: @arjunrn @mwielgus @josephburnett
// alpha: v1.20
//
// Add support for the HPA to scale based on metrics from individual containers
// in target pods
HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics"
// owner: @zshihang
// alpha: v1.13
// beta: v1.20
//
// Allows kube-controller-manager to publish kube-root-ca.crt configmap to
// every namespace. This feature is a prerequisite of BoundServiceAccountTokenVolume.
RootCAConfigMap featuregate.Feature = "RootCAConfigMap"
// owner: @andrewsykim
// alpha: v1.20
//
// Enable Terminating condition in Endpoint Slices.
EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition"
// owner: @derekwaynecarr
// alpha: v1.20
//
// Enables kubelet support to size memory backed volumes
SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes"
// owner: @Sh4d1
// alpha: v1.21
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
LoadBalancerIPMode featuregate.Feature = "LoadBalancerIPMode"
)
func init() {
runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))
}
// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout Kubernetes binaries.
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
AppArmor: {Default: true, PreRelease: featuregate.Beta},
DynamicKubeletConfig: {Default: true, PreRelease: featuregate.Beta},
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta},
DevicePlugins: {Default: true, PreRelease: featuregate.Beta},
TaintBasedEvictions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
RotateKubeletClientCertificate: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.Beta},
Sysctls: {Default: true, PreRelease: featuregate.Beta},
EphemeralContainers: {Default: false, PreRelease: featuregate.Alpha},
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
ExpandCSIVolumes: {Default: true, PreRelease: featuregate.Beta},
AttachVolumeLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
CPUManager: {Default: true, PreRelease: featuregate.Beta},
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
TopologyManager: {Default: true, PreRelease: featuregate.Beta},
ServiceNodeExclusion: {Default: true, PreRelease: featuregate.Beta},
NodeDisruptionExclusion: {Default: true, PreRelease: featuregate.Beta},
CSIDriverRegistry: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
CSINodeInfo: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
SupportPodPidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
SupportNodePidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
HyperVContainer: {Default: false, PreRelease: featuregate.Deprecated},
TokenRequest: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
TokenRequestProjection: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
BoundServiceAccountTokenVolume: {Default: false, PreRelease: featuregate.Alpha},
ServiceAccountIssuerDiscovery: {Default: true, PreRelease: featuregate.Beta},
CRIContainerLogRotation: {Default: true, PreRelease: featuregate.Beta},
CSIMigration: {Default: true, PreRelease: featuregate.Beta},
CSIMigrationGCE: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires GCE PD CSI Driver)
CSIMigrationGCEComplete: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationAWS: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires AWS EBS CSI driver)
CSIMigrationAWSComplete: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationAzureDisk: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires Azure Disk CSI driver)
CSIMigrationAzureDiskComplete: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationAzureFile: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationAzureFileComplete: {Default: false, PreRelease: featuregate.Alpha},
CSIMigrationvSphere: {Default: false, PreRelease: featuregate.Beta},
CSIMigrationvSphereComplete: {Default: false, PreRelease: featuregate.Beta},
RunAsGroup: {Default: true, PreRelease: featuregate.Beta},
CSIMigrationOpenStack: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires OpenStack Cinder CSI driver)
CSIMigrationOpenStackComplete: {Default: false, PreRelease: featuregate.Alpha},
VolumeSubpath: {Default: true, PreRelease: featuregate.GA},
ConfigurableFSGroupPolicy: {Default: true, PreRelease: featuregate.Beta},
BalanceAttachedNodeVolumes: {Default: false, PreRelease: featuregate.Alpha},
CSIBlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta},
CSIStorageCapacity: {Default: false, PreRelease: featuregate.Alpha},
GenericEphemeralVolume: {Default: false, PreRelease: featuregate.Alpha},
CSIVolumeFSGroupPolicy: {Default: false, PreRelease: featuregate.Alpha},
RuntimeClass: {Default: true, PreRelease: featuregate.Beta},
NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
SCTPSupport: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
VolumeSnapshotDataSource: {Default: true, PreRelease: featuregate.Beta},
ProcMountType: {Default: false, PreRelease: featuregate.Alpha},
TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha},
KubeletPodResources: {Default: true, PreRelease: featuregate.Beta},
WindowsGMSA: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
WindowsRunAsUserName: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
NonPreemptingPriority: {Default: true, PreRelease: featuregate.Beta},
VolumePVCDataSource: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
PodOverhead: {Default: true, PreRelease: featuregate.Beta},
IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha},
EndpointSlice: {Default: true, PreRelease: featuregate.Beta},
EndpointSliceProxying: {Default: true, PreRelease: featuregate.Beta},
EndpointSliceTerminatingCondition: {Default: false, PreRelease: featuregate.Alpha},
WindowsEndpointSliceProxying: {Default: false, PreRelease: featuregate.Alpha},
EvenPodsSpread: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
StartupProbe: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
AllowInsecureBackendProxy: {Default: true, PreRelease: featuregate.Beta},
PodDisruptionBudget: {Default: true, PreRelease: featuregate.Beta},
ServiceTopology: {Default: false, PreRelease: featuregate.Alpha},
ServiceAppProtocol: {Default: true, PreRelease: featuregate.Beta},
ImmutableEphemeralVolumes: {Default: true, PreRelease: featuregate.Beta},
HugePageStorageMediumSize: {Default: true, PreRelease: featuregate.Beta},
ExternalPolicyForExternalIP: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
AnyVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha},
DefaultPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
SetHostnameAsFQDN: {Default: true, PreRelease: featuregate.Beta},
WinOverlay: {Default: true, PreRelease: featuregate.Beta},
WinDSR: {Default: false, PreRelease: featuregate.Alpha},
DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.Beta},
HPAContainerMetrics: {Default: false, PreRelease: featuregate.Alpha},
RootCAConfigMap: {Default: true, PreRelease: featuregate.Beta},
SizeMemoryBackedVolumes: {Default: false, PreRelease: featuregate.Alpha},
LoadBalancerIPMode: {Default: false, PreRelease: featuregate.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
genericfeatures.StreamingProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated},
genericfeatures.ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA},
genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.DryRun: {Default: true, PreRelease: featuregate.GA},
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIPriorityAndFairness: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.WarningHeaders: {Default: true, PreRelease: featuregate.Beta},
// features that enable backwards compatibility but are scheduled to be removed
// ...
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
LegacyNodeRoleBehavior: {Default: true, PreRelease: featuregate.Beta},
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"k8s.io/component-base/featuregate"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // alpha: v1.4
// MyFeature() bool
// owner @smarterclayton
// alpha: v1.16
// beta: v1.19
//
// Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18.
// Original copy from k8s.io/kubernetes/pkg/features/kube_features.go
LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior"
// owner @brendandburns
// alpha: v1.9
// beta: v1.19
// ga: v1.21
//
// Enable nodes to exclude themselves from service load balancers
// Original copy from k8s.io/kubernetes/pkg/features/kube_features.go
ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion"
// owner: @khenidak
// alpha: v1.15
//
// Enables ipv6 dual stack
// Original copy from k8s.io/kubernetes/pkg/features/kube_features.go
IPv6DualStack featuregate.Feature = "IPv6DualStack"
)
func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.MutableFeatureGate) error {
return featuregates.Add(cloudPublicFeatureGates)
}
// cloudPublicFeatureGates consists of cloud-specific feature keys.
// To add a new feature, define a key for it at k8s.io/api/pkg/features and add it here.
var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
LegacyNodeRoleBehavior: {Default: false, PreRelease: featuregate.GA, LockToDefault: true},
ServiceNodeExclusion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha},
}
upgrade IPv6DualStack feature to beta and turn on by default
Kubernetes-commit: 3e56ddae67695c08aa64a546e226b6126e1fd09e
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"k8s.io/component-base/featuregate"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // alpha: v1.4
// MyFeature() bool
// owner @smarterclayton
// alpha: v1.16
// beta: v1.19
//
// Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18.
// Original copy from k8s.io/kubernetes/pkg/features/kube_features.go
LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior"
// owner @brendandburns
// alpha: v1.9
// beta: v1.19
// ga: v1.21
//
// Enable nodes to exclude themselves from service load balancers
// Original copy from k8s.io/kubernetes/pkg/features/kube_features.go
ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion"
// owner: @khenidak
// alpha: v1.15
//
// Enables ipv6 dual stack
// Original copy from k8s.io/kubernetes/pkg/features/kube_features.go
IPv6DualStack featuregate.Feature = "IPv6DualStack"
)
func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.MutableFeatureGate) error {
return featuregates.Add(cloudPublicFeatureGates)
}
// cloudPublicFeatureGates consists of cloud-specific feature keys.
// To add a new feature, define a key for it at k8s.io/api/pkg/features and add it here.
var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
LegacyNodeRoleBehavior: {Default: false, PreRelease: featuregate.GA, LockToDefault: true},
ServiceNodeExclusion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
IPv6DualStack: {Default: true, PreRelease: featuregate.Beta},
}
|
package status
import (
"context"
"fmt"
"path"
"path/filepath"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/werf/logboek"
"github.com/werf/werf/pkg/path_matcher"
"github.com/werf/werf/pkg/util"
)
type Result struct {
repositoryFullFilepath string // path relative to main repository
fileStatusList git.Status
submoduleResults []*SubmoduleResult
}
func NewResult(repositoryFullFilepath string, fileStatusList git.Status, submoduleResults []*SubmoduleResult) *Result {
return &Result{
repositoryFullFilepath: repositoryFullFilepath,
fileStatusList: fileStatusList,
submoduleResults: submoduleResults,
}
}
type SubmoduleResult struct {
*Result
submoduleName string
submodulePath string
submoduleStatus *git.SubmoduleStatus
}
func NewSubmoduleResult(submoduleName, submodulePath string, submoduleStatus *git.SubmoduleStatus, result *Result) *SubmoduleResult {
return &SubmoduleResult{
submoduleName: submoduleName,
submodulePath: submodulePath,
submoduleStatus: submoduleStatus,
Result: result,
}
}
type FilterOptions struct {
StagingOnly bool
WorktreeOnly bool
IgnoreSubmodules bool
}
func (r *Result) Status(ctx context.Context, pathMatcher path_matcher.PathMatcher) (*Result, error) {
res := NewResult(r.repositoryFullFilepath, git.Status{}, []*SubmoduleResult{})
for fileStatusPath, fileStatus := range r.fileStatusList {
fileStatusFilepath := filepath.FromSlash(fileStatusPath)
fileStatusFullFilepath := filepath.Join(r.repositoryFullFilepath, fileStatusFilepath)
if pathMatcher.MatchPath(fileStatusFullFilepath) {
res.fileStatusList[fileStatusPath] = fileStatus
if debugProcess() {
logboek.Context(ctx).Debug().LogF(
"File was added: %s (worktree: %s, staging: %s)\n",
fileStatusFullFilepath,
fileStatusMapping[fileStatus.Worktree],
fileStatusMapping[fileStatus.Staging],
)
}
}
}
for _, submoduleResult := range r.submoduleResults {
isMatched, shouldGoThrough := pathMatcher.ProcessDirOrSubmodulePath(submoduleResult.repositoryFullFilepath)
if isMatched || shouldGoThrough {
if debugProcess() {
logboek.Context(ctx).Debug().LogF("Submodule was checking: %s\n", submoduleResult.repositoryFullFilepath)
}
newResult, err := submoduleResult.Status(ctx, pathMatcher)
if err != nil {
return nil, err
}
newSubmoduleResult := NewSubmoduleResult(submoduleResult.submoduleName, submoduleResult.submodulePath, submoduleResult.submoduleStatus, newResult)
res.submoduleResults = append(res.submoduleResults, newSubmoduleResult)
}
}
return res, nil
}
// FilePathList method returns file paths relative to the main repository
func (r *Result) FilePathList(options FilterOptions) []string {
var result []string
for _, filePath := range r.filteredFilePathList(options) {
result = append(result, filepath.Join(r.repositoryFullFilepath, filePath))
}
if !options.IgnoreSubmodules {
for _, submoduleResult := range r.submoduleResults {
result = append(result, submoduleResult.FilePathList(options)...)
}
}
return result
}
// filteredFilePathList method returns file paths relative to the repository except submodules
func (r *Result) filteredFilePathList(options FilterOptions) []string {
var result []string
for fileStatusPath, fileStatus := range r.fileStatusList {
if isFileStatusAccepted(fileStatus, options) {
result = append(result, fileStatusPath)
}
}
return result
}
type UncleanSubmoduleError struct {
SubmodulePath string
ExpectedCommit string
CurrentCommit string
HeadCommitCurrentCommit string
error
}
type SubmoduleHasUncommittedChangesError struct {
SubmodulePath string
FilePathList []string
error
}
func (r *Result) ValidateSubmodules(repository *git.Repository, headCommit string) error {
if len(r.submoduleResults) == 0 {
return nil
}
c, err := repository.CommitObject(plumbing.NewHash(headCommit))
if err != nil {
return err
}
cTree, err := c.Tree()
if err != nil {
return err
}
for _, sr := range r.submoduleResults {
dotGitExist, err := util.FileExists(filepath.Join(sr.repositoryFullFilepath, ".git"))
if err != nil {
return err
}
/* The submodule is not checked out, so it is not modified */
if !dotGitExist {
continue
}
e, err := cTree.FindEntry(sr.submodulePath)
if err != nil {
/* The submodule exists locally but it is not committed yet */
if err == object.ErrEntryNotFound {
return UncleanSubmoduleError{
SubmodulePath: sr.repositoryFullFilepath,
HeadCommitCurrentCommit: plumbing.ZeroHash.String(),
ExpectedCommit: sr.submoduleStatus.Expected.String(),
CurrentCommit: sr.submoduleStatus.Current.String(),
error: fmt.Errorf("submodule is not clean"),
}
}
return err
}
headCommitSubmoduleCommit := e.Hash
/* The submodule is switched to another commit and not committed yet */
if headCommitSubmoduleCommit != sr.submoduleStatus.Expected || sr.submoduleStatus.Expected != sr.submoduleStatus.Current {
return UncleanSubmoduleError{
SubmodulePath: sr.repositoryFullFilepath,
HeadCommitCurrentCommit: headCommitSubmoduleCommit.String(),
ExpectedCommit: sr.Expected.String(),
CurrentCommit: sr.Current.String(),
error: fmt.Errorf("submodule is not clean"),
}
}
/* The submodule expected commit (from stage) differs from the current commit */
if sr.Expected != sr.Current {
/* skip invalid submodule state */
if sr.Current == plumbing.ZeroHash {
continue
}
return UncleanSubmoduleError{
SubmodulePath: sr.repositoryFullFilepath,
HeadCommitCurrentCommit: headCommitSubmoduleCommit.String(),
ExpectedCommit: sr.submoduleStatus.Expected.String(),
CurrentCommit: sr.submoduleStatus.Current.String(),
error: fmt.Errorf("submodule is not clean"),
}
}
/* The submodule has untracked/modified files */
if len(sr.fileStatusList) != 0 {
return SubmoduleHasUncommittedChangesError{
SubmodulePath: sr.repositoryFullFilepath,
FilePathList: sr.filteredFilePathList(FilterOptions{IgnoreSubmodules: true}),
error: fmt.Errorf("submodule has uncommitted changes"),
}
}
w, err := repository.Worktree()
if err != nil {
return err
}
s, err := w.Submodule(sr.submoduleName)
if err != nil {
return err
}
srRepository, err := s.Repository()
if err != nil {
return err
}
if err := sr.ValidateSubmodules(srRepository, sr.submoduleStatus.Current.String()); err != nil {
return err
}
}
return nil
}
func (r *Result) IsFileModified(relPath string, options FilterOptions) bool {
for _, filePath := range r.filteredFilePathList(options) {
if path.Join(r.repositoryFullFilepath, filePath) == filepath.ToSlash(relPath) {
return true
}
}
if !options.IgnoreSubmodules {
for _, sr := range r.submoduleResults {
if util.IsSubpathOfBasePath(filepath.ToSlash(sr.repositoryFullFilepath), filepath.ToSlash(relPath)) {
if sr.submoduleStatus.Current != sr.submoduleStatus.Expected {
return true
}
return sr.IsFileModified(relPath, options)
}
}
}
return false
}
func isFileStatusAccepted(fileStatus *git.FileStatus, options FilterOptions) bool {
if (options.StagingOnly && !isFileStatusCodeExpected(fileStatus.Staging)) || (options.WorktreeOnly && !isFileStatusCodeExpected(fileStatus.Worktree)) {
return false
}
return true
}
func isFileStatusCodeExpected(code git.StatusCode) bool {
return code != git.Unmodified
}
Fix merge conflicts
package status
import (
"context"
"fmt"
"path"
"path/filepath"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/werf/logboek"
"github.com/werf/werf/pkg/path_matcher"
"github.com/werf/werf/pkg/util"
)
type Result struct {
repositoryFullFilepath string // path relative to main repository
fileStatusList git.Status
submoduleResults []*SubmoduleResult
}
func NewResult(repositoryFullFilepath string, fileStatusList git.Status, submoduleResults []*SubmoduleResult) *Result {
return &Result{
repositoryFullFilepath: repositoryFullFilepath,
fileStatusList: fileStatusList,
submoduleResults: submoduleResults,
}
}
type SubmoduleResult struct {
*Result
submoduleName string
submodulePath string
submoduleStatus *git.SubmoduleStatus
}
func NewSubmoduleResult(submoduleName, submodulePath string, submoduleStatus *git.SubmoduleStatus, result *Result) *SubmoduleResult {
return &SubmoduleResult{
submoduleName: submoduleName,
submodulePath: submodulePath,
submoduleStatus: submoduleStatus,
Result: result,
}
}
type FilterOptions struct {
StagingOnly bool
WorktreeOnly bool
IgnoreSubmodules bool
}
func (r *Result) Status(ctx context.Context, pathMatcher path_matcher.PathMatcher) (*Result, error) {
res := NewResult(r.repositoryFullFilepath, git.Status{}, []*SubmoduleResult{})
for fileStatusPath, fileStatus := range r.fileStatusList {
fileStatusFilepath := filepath.FromSlash(fileStatusPath)
fileStatusFullFilepath := filepath.Join(r.repositoryFullFilepath, fileStatusFilepath)
if pathMatcher.MatchPath(fileStatusFullFilepath) {
res.fileStatusList[fileStatusPath] = fileStatus
if debugProcess() {
logboek.Context(ctx).Debug().LogF(
"File was added: %s (worktree: %s, staging: %s)\n",
fileStatusFullFilepath,
fileStatusMapping[fileStatus.Worktree],
fileStatusMapping[fileStatus.Staging],
)
}
}
}
for _, submoduleResult := range r.submoduleResults {
isMatched, shouldGoThrough := pathMatcher.ProcessDirOrSubmodulePath(submoduleResult.repositoryFullFilepath)
if isMatched || shouldGoThrough {
if debugProcess() {
logboek.Context(ctx).Debug().LogF("Submodule was checking: %s\n", submoduleResult.repositoryFullFilepath)
}
newResult, err := submoduleResult.Status(ctx, pathMatcher)
if err != nil {
return nil, err
}
newSubmoduleResult := NewSubmoduleResult(submoduleResult.submoduleName, submoduleResult.submodulePath, submoduleResult.submoduleStatus, newResult)
res.submoduleResults = append(res.submoduleResults, newSubmoduleResult)
}
}
return res, nil
}
// FilePathList method returns file paths relative to the main repository
func (r *Result) FilePathList(options FilterOptions) []string {
var result []string
for _, filePath := range r.filteredFilePathList(options) {
result = append(result, filepath.Join(r.repositoryFullFilepath, filePath))
}
if !options.IgnoreSubmodules {
for _, submoduleResult := range r.submoduleResults {
result = append(result, submoduleResult.FilePathList(options)...)
}
}
return result
}
// filteredFilePathList method returns file paths relative to the repository except submodules
func (r *Result) filteredFilePathList(options FilterOptions) []string {
var result []string
for fileStatusPath, fileStatus := range r.fileStatusList {
if isFileStatusAccepted(fileStatus, options) {
result = append(result, fileStatusPath)
}
}
return result
}
type UncleanSubmoduleError struct {
SubmodulePath string
ExpectedCommit string
CurrentCommit string
HeadCommitCurrentCommit string
error
}
type SubmoduleHasUncommittedChangesError struct {
SubmodulePath string
FilePathList []string
error
}
func (r *Result) ValidateSubmodules(repository *git.Repository, headCommit string) error {
if len(r.submoduleResults) == 0 {
return nil
}
c, err := repository.CommitObject(plumbing.NewHash(headCommit))
if err != nil {
return err
}
cTree, err := c.Tree()
if err != nil {
return err
}
for _, sr := range r.submoduleResults {
dotGitExist, err := util.FileExists(filepath.Join(sr.repositoryFullFilepath, ".git"))
if err != nil {
return err
}
/* The submodule is not checked out, so it is not modified */
if !dotGitExist {
continue
}
e, err := cTree.FindEntry(sr.submodulePath)
if err != nil {
/* The submodule exists locally but it is not committed yet */
if err == object.ErrEntryNotFound {
return UncleanSubmoduleError{
SubmodulePath: sr.repositoryFullFilepath,
HeadCommitCurrentCommit: plumbing.ZeroHash.String(),
ExpectedCommit: sr.submoduleStatus.Expected.String(),
CurrentCommit: sr.submoduleStatus.Current.String(),
error: fmt.Errorf("submodule is not clean"),
}
}
return err
}
headCommitSubmoduleCommit := e.Hash
/* The submodule is switched to another commit and not committed yet */
if headCommitSubmoduleCommit != sr.submoduleStatus.Expected || sr.submoduleStatus.Expected != sr.submoduleStatus.Current {
return UncleanSubmoduleError{
SubmodulePath: sr.repositoryFullFilepath,
HeadCommitCurrentCommit: headCommitSubmoduleCommit.String(),
ExpectedCommit: sr.submoduleStatus.Expected.String(),
CurrentCommit: sr.submoduleStatus.Current.String(),
error: fmt.Errorf("submodule is not clean"),
}
}
/* The submodule expected commit (from stage) differs from the current commit */
if sr.submoduleStatus.Expected != sr.submoduleStatus.Current {
/* skip invalid submodule state */
if sr.submoduleStatus.Current == plumbing.ZeroHash {
continue
}
return UncleanSubmoduleError{
SubmodulePath: sr.repositoryFullFilepath,
HeadCommitCurrentCommit: headCommitSubmoduleCommit.String(),
ExpectedCommit: sr.submoduleStatus.Expected.String(),
CurrentCommit: sr.submoduleStatus.Current.String(),
error: fmt.Errorf("submodule is not clean"),
}
}
/* The submodule has untracked/modified files */
if len(sr.fileStatusList) != 0 {
return SubmoduleHasUncommittedChangesError{
SubmodulePath: sr.repositoryFullFilepath,
FilePathList: sr.filteredFilePathList(FilterOptions{IgnoreSubmodules: true}),
error: fmt.Errorf("submodule has uncommitted changes"),
}
}
w, err := repository.Worktree()
if err != nil {
return err
}
s, err := w.Submodule(sr.submoduleName)
if err != nil {
return err
}
srRepository, err := s.Repository()
if err != nil {
return err
}
if err := sr.ValidateSubmodules(srRepository, sr.submoduleStatus.Current.String()); err != nil {
return err
}
}
return nil
}
func (r *Result) IsFileModified(relPath string, options FilterOptions) bool {
for _, filePath := range r.filteredFilePathList(options) {
if path.Join(r.repositoryFullFilepath, filePath) == filepath.ToSlash(relPath) {
return true
}
}
if !options.IgnoreSubmodules {
for _, sr := range r.submoduleResults {
if util.IsSubpathOfBasePath(filepath.ToSlash(sr.repositoryFullFilepath), filepath.ToSlash(relPath)) {
if sr.submoduleStatus.Current != sr.submoduleStatus.Expected {
return true
}
return sr.IsFileModified(relPath, options)
}
}
}
return false
}
func isFileStatusAccepted(fileStatus *git.FileStatus, options FilterOptions) bool {
if (options.StagingOnly && !isFileStatusCodeExpected(fileStatus.Staging)) || (options.WorktreeOnly && !isFileStatusCodeExpected(fileStatus.Worktree)) {
return false
}
return true
}
func isFileStatusCodeExpected(code git.StatusCode) bool {
return code != git.Unmodified
}
|
package paxos
import (
"borg/assert"
"testing"
"fmt"
"os"
)
var (
IdOutOfRange = os.NewError("Id Out of Range")
)
func coordinator(me, nNodes uint64, v string, ins, outs chan msg, clock chan int) {
if me > nNodes {
panic(IdOutOfRange)
}
var crnd uint64 = me
Start:
start := msg{
cmd: "INVITE",
to: 0, // send to all acceptors
from: me,
body: fmt.Sprintf("%d", crnd),
}
outs <- start
var rsvps uint64
quorum := nNodes/2 + 1
for {
select {
case in := <-ins:
switch in.cmd {
case "RSVP":
rsvps++
if rsvps >= quorum {
choosen := msg{
cmd: "NOMINATE",
to: 0, // send to all acceptors
from: me,
body: fmt.Sprintf("%d:%s", crnd, v),
}
go func() { outs <- choosen }()
}
}
case <-clock:
crnd += nNodes
goto Start
}
}
}
// Testing
// This is here mainly for triangulation. It ensures we're not
// hardcoding crnd.
func TestStartsRoundAtMe(t *testing.T) {
ins := make(chan msg)
outs := make(chan msg)
clock := make(chan int)
nNodes := uint64(10) // this is arbitrary
res := make([]msg, 2)
go coordinator(1, nNodes, "foo", ins, outs, clock)
res[0] = <-outs
go coordinator(2, nNodes, "foo", ins, outs, clock)
res[1] = <-outs
exp := msgs("1:*:INVITE:1", "2:*:INVITE:2")
assert.Equal(t, exp, res, "")
}
func TestPanicWhenMeIsOutOfRange(t *testing.T) {
ins := make(chan msg)
outs := make(chan msg)
clock := make(chan int)
nNodes := uint64(10) // this is arbitrary
assert.Panic(t, IdOutOfRange, func() {
coordinator(11, nNodes, "foo", ins, outs, clock)
})
}
func TestPhase2aTimeoutStartsNewRound(t *testing.T) {
ins := make(chan msg)
outs := make(chan msg)
clock := make(chan int)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, nNodes, "foo", ins, outs, clock)
<-outs //discard INVITE
// never reach majority (force timeout)
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
clock <- 1
exp := m("1:*:INVITE:11")
assert.Equal(t, exp, <-outs, "")
}
paxos: test coordinator shuts down properly
package paxos
import (
"borg/assert"
"testing"
"fmt"
"os"
)
var (
IdOutOfRange = os.NewError("Id Out of Range")
)
func coordinator(me, nNodes uint64, v string, ins, outs chan msg, clock chan int) {
if me > nNodes {
panic(IdOutOfRange)
}
var crnd uint64 = me
Start:
start := msg{
cmd: "INVITE",
to: 0, // send to all acceptors
from: me,
body: fmt.Sprintf("%d", crnd),
}
outs <- start
var rsvps uint64
quorum := nNodes/2 + 1
for {
select {
case in := <-ins:
if closed(ins) {
close(outs)
return
}
switch in.cmd {
case "RSVP":
rsvps++
if rsvps >= quorum {
choosen := msg{
cmd: "NOMINATE",
to: 0, // send to all acceptors
from: me,
body: fmt.Sprintf("%d:%s", crnd, v),
}
go func() { outs <- choosen }()
}
}
case <-clock:
crnd += nNodes
goto Start
}
}
}
// Testing
// This is here mainly for triangulation. It ensures we're not
// hardcoding crnd.
func TestStartsRoundAtMe(t *testing.T) {
ins := make(chan msg)
outs := make(chan msg)
clock := make(chan int)
nNodes := uint64(10) // this is arbitrary
res := make([]msg, 2)
go coordinator(1, nNodes, "foo", ins, outs, clock)
res[0] = <-outs
go coordinator(2, nNodes, "foo", ins, outs, clock)
res[1] = <-outs
exp := msgs("1:*:INVITE:1", "2:*:INVITE:2")
assert.Equal(t, exp, res, "")
}
func TestPanicWhenMeIsOutOfRange(t *testing.T) {
ins := make(chan msg)
outs := make(chan msg)
clock := make(chan int)
nNodes := uint64(10) // this is arbitrary
assert.Panic(t, IdOutOfRange, func() {
coordinator(11, nNodes, "foo", ins, outs, clock)
})
}
func TestPhase2aTimeoutStartsNewRound(t *testing.T) {
ins := make(chan msg)
outs := make(chan msg)
clock := make(chan int)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, nNodes, "foo", ins, outs, clock)
<-outs //discard INVITE
// never reach majority (force timeout)
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
clock <- 1
exp := m("1:*:INVITE:11")
assert.Equal(t, exp, <-outs, "")
}
func TestShutdown(t *testing.T) {
ins := make(chan msg)
outs := make(chan msg)
clock := make(chan int)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, nNodes, "foo", ins, outs, clock)
close(ins)
exp := msgs("1:*:INVITE:1")
assert.Equal(t, exp, gather(outs), "")
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iptables
//
// NOTE: this needs to be tested in e2e since it uses iptables for everything.
//
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"fmt"
"net"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/helper"
apiservice "k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
"k8s.io/kubernetes/pkg/util/async"
utilexec "k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilversion "k8s.io/kubernetes/pkg/util/version"
)
const (
// iptablesMinVersion is the minimum version of iptables for which we will use the Proxier
// from this package instead of the userspace Proxier. While most of the
// features we need were available earlier, the '-C' flag was added more
// recently. We use that indirectly in Ensure* functions, and if we don't
// have it, we have to be extra careful about the exact args we feed in being
// the same as the args we read back (iptables itself normalizes some args).
// This is the "new" Proxier, so we require "new" versions of tools.
iptablesMinVersion = utiliptables.MinCheckVersion
// the services chain
kubeServicesChain utiliptables.Chain = "KUBE-SERVICES"
// the nodeports chain
kubeNodePortsChain utiliptables.Chain = "KUBE-NODEPORTS"
// the kubernetes postrouting chain
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
// the mark-for-masquerade chain
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
// the mark-for-drop chain
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
)
// IPTablesVersioner can query the current iptables version.
type IPTablesVersioner interface {
// returns "X.Y.Z"
GetVersion() (string, error)
}
// KernelCompatTester tests whether the required kernel capabilities are
// present to run the iptables proxier.
type KernelCompatTester interface {
IsCompatible() error
}
// CanUseIPTablesProxier returns true if we should use the iptables Proxier
// instead of the "classic" userspace Proxier. This is determined by checking
// the iptables version and for the existence of kernel features. It may return
// an error if it fails to get the iptables version without error, in which
// case it will also return false.
func CanUseIPTablesProxier(iptver IPTablesVersioner, kcompat KernelCompatTester) (bool, error) {
minVersion, err := utilversion.ParseGeneric(iptablesMinVersion)
if err != nil {
return false, err
}
versionString, err := iptver.GetVersion()
if err != nil {
return false, err
}
version, err := utilversion.ParseGeneric(versionString)
if err != nil {
return false, err
}
if version.LessThan(minVersion) {
return false, nil
}
// Check that the kernel supports what we need.
if err := kcompat.IsCompatible(); err != nil {
return false, err
}
return true, nil
}
type LinuxKernelCompatTester struct{}
func (lkct LinuxKernelCompatTester) IsCompatible() error {
// Check for the required sysctls. We don't care about the value, just
// that it exists. If this Proxier is chosen, we'll initialize it as we
// need.
_, err := utilsysctl.New().GetSysctl(sysctlRouteLocalnet)
return err
}
const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet"
const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
// internal struct for string service information
type serviceInfo struct {
clusterIP net.IP
port int
protocol api.Protocol
nodePort int
loadBalancerStatus api.LoadBalancerStatus
sessionAffinityType api.ServiceAffinity
stickyMaxAgeMinutes int
externalIPs []string
loadBalancerSourceRanges []string
onlyNodeLocalEndpoints bool
healthCheckNodePort int
// The following fields are computed and stored for performance reasons.
serviceNameString string
servicePortChainName utiliptables.Chain
serviceFirewallChainName utiliptables.Chain
serviceLBChainName utiliptables.Chain
}
// internal struct for endpoints information
type endpointsInfo struct {
endpoint string // TODO: should be an endpointString type
isLocal bool
// The following fields we lazily compute and store here for performance
// reasons. If the protocol is the same as you expect it to be, then the
// chainName can be reused, otherwise it should be recomputed.
protocol string
chainName utiliptables.Chain
}
// Returns just the IP part of the endpoint.
func (e *endpointsInfo) IPPart() string {
if index := strings.Index(e.endpoint, ":"); index != -1 {
return e.endpoint[0:index]
}
return e.endpoint
}
// Returns the endpoint chain name for a given endpointsInfo.
func (e *endpointsInfo) endpointChain(svcNameString, protocol string) utiliptables.Chain {
if e.protocol != protocol {
e.protocol = protocol
e.chainName = servicePortEndpointChainName(svcNameString, protocol, e.endpoint)
}
return e.chainName
}
func (e *endpointsInfo) String() string {
return fmt.Sprintf("%v", *e)
}
// returns a new serviceInfo struct
func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo {
onlyNodeLocalEndpoints := false
if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) &&
apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
info := &serviceInfo{
clusterIP: net.ParseIP(service.Spec.ClusterIP),
port: int(port.Port),
protocol: port.Protocol,
nodePort: int(port.NodePort),
// Deep-copy in case the service instance changes
loadBalancerStatus: *helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer),
sessionAffinityType: service.Spec.SessionAffinity,
stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API.
externalIPs: make([]string, len(service.Spec.ExternalIPs)),
loadBalancerSourceRanges: make([]string, len(service.Spec.LoadBalancerSourceRanges)),
onlyNodeLocalEndpoints: onlyNodeLocalEndpoints,
}
copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
copy(info.externalIPs, service.Spec.ExternalIPs)
if apiservice.NeedsHealthCheck(service) {
p := apiservice.GetServiceHealthCheckNodePort(service)
if p == 0 {
glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String())
} else {
info.healthCheckNodePort = int(p)
}
}
// Store the following for performance reasons.
protocol := strings.ToLower(string(info.protocol))
info.serviceNameString = svcPortName.String()
info.servicePortChainName = servicePortChainName(info.serviceNameString, protocol)
info.serviceFirewallChainName = serviceFirewallChainName(info.serviceNameString, protocol)
info.serviceLBChainName = serviceLBChainName(info.serviceNameString, protocol)
return info
}
type endpointsChange struct {
previous proxyEndpointsMap
current proxyEndpointsMap
}
type endpointsChangeMap struct {
lock sync.Mutex
hostname string
items map[types.NamespacedName]*endpointsChange
}
type serviceChange struct {
previous proxyServiceMap
current proxyServiceMap
}
type serviceChangeMap struct {
lock sync.Mutex
items map[types.NamespacedName]*serviceChange
}
type proxyServiceMap map[proxy.ServicePortName]*serviceInfo
type proxyEndpointsMap map[proxy.ServicePortName][]*endpointsInfo
func newEndpointsChangeMap(hostname string) endpointsChangeMap {
return endpointsChangeMap{
hostname: hostname,
items: make(map[types.NamespacedName]*endpointsChange),
}
}
func (ecm *endpointsChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Endpoints) bool {
ecm.lock.Lock()
defer ecm.lock.Unlock()
change, exists := ecm.items[*namespacedName]
if !exists {
change = &endpointsChange{}
change.previous = endpointsToEndpointsMap(previous, ecm.hostname)
ecm.items[*namespacedName] = change
}
change.current = endpointsToEndpointsMap(current, ecm.hostname)
if reflect.DeepEqual(change.previous, change.current) {
delete(ecm.items, *namespacedName)
}
return len(ecm.items) > 0
}
func newServiceChangeMap() serviceChangeMap {
return serviceChangeMap{
items: make(map[types.NamespacedName]*serviceChange),
}
}
func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Service) bool {
scm.lock.Lock()
defer scm.lock.Unlock()
change, exists := scm.items[*namespacedName]
if !exists {
change = &serviceChange{}
change.previous = serviceToServiceMap(previous)
scm.items[*namespacedName] = change
}
change.current = serviceToServiceMap(current)
if reflect.DeepEqual(change.previous, change.current) {
delete(scm.items, *namespacedName)
}
return len(scm.items) > 0
}
func (sm *proxyServiceMap) merge(other proxyServiceMap) sets.String {
existingPorts := sets.NewString()
for svcPortName, info := range other {
existingPorts.Insert(svcPortName.Port)
_, exists := (*sm)[svcPortName]
if !exists {
glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
} else {
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
}
(*sm)[svcPortName] = info
}
return existingPorts
}
func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleServices sets.String) {
for svcPortName := range other {
if existingPorts.Has(svcPortName.Port) {
continue
}
info, exists := (*sm)[svcPortName]
if exists {
glog.V(1).Infof("Removing service port %q", svcPortName)
if info.protocol == api.ProtocolUDP {
staleServices.Insert(info.clusterIP.String())
}
delete(*sm, svcPortName)
} else {
glog.Errorf("Service port %q removed, but doesn't exists", svcPortName)
}
}
}
func (em proxyEndpointsMap) merge(other proxyEndpointsMap) {
for svcPortName := range other {
em[svcPortName] = other[svcPortName]
}
}
func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap) {
for svcPortName := range other {
delete(em, svcPortName)
}
}
// Proxier is an iptables based proxy for connections between a localhost:lport
// and services that provide the actual backends.
type Proxier struct {
// endpointsChanges and serviceChanges contains all changes to endpoints and
// services that happened since iptables was synced. For a single object,
// changes are accumulated, i.e. previous is state from before all of them,
// current is state after applying all of those.
endpointsChanges endpointsChangeMap
serviceChanges serviceChangeMap
mu sync.Mutex // protects the following fields
serviceMap proxyServiceMap
endpointsMap proxyEndpointsMap
portsMap map[localPort]closeable
// endpointsSynced and servicesSynced are set to true when corresponding
// objects are synced after startup. This is used to avoid updating iptables
// with some partial data after kube-proxy restart.
endpointsSynced bool
servicesSynced bool
initialized int32
syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules
// These are effectively const and do not need the mutex to be held.
iptables utiliptables.Interface
masqueradeAll bool
masqueradeMark string
exec utilexec.Interface
clusterCIDR string
hostname string
nodeIP net.IP
portMapper portOpener
recorder record.EventRecorder
healthChecker healthcheck.Server
healthzServer healthcheck.HealthzUpdater
// Since converting probabilities (floats) to strings is expensive
// and we are using only probabilities in the format of 1/n, we are
// precomputing some number of those and cache for future reuse.
precomputedProbabilities []string
// The following buffers are used to reuse memory and avoid allocations
// that are significantly impacting performance.
iptablesData *bytes.Buffer
filterChains *bytes.Buffer
filterRules *bytes.Buffer
natChains *bytes.Buffer
natRules *bytes.Buffer
}
type localPort struct {
desc string
ip string
port int
protocol string
}
func (lp *localPort) String() string {
return fmt.Sprintf("%q (%s:%d/%s)", lp.desc, lp.ip, lp.port, lp.protocol)
}
type closeable interface {
Close() error
}
// portOpener is an interface around port opening/closing.
// Abstracted out for testing.
type portOpener interface {
OpenLocalPort(lp *localPort) (closeable, error)
}
// listenPortOpener opens ports by calling bind() and listen().
type listenPortOpener struct{}
// OpenLocalPort holds the given local port open.
func (l *listenPortOpener) OpenLocalPort(lp *localPort) (closeable, error) {
return openLocalPort(lp)
}
// Proxier implements ProxyProvider
var _ proxy.ProxyProvider = &Proxier{}
// NewProxier returns a new Proxier given an iptables Interface instance.
// Because of the iptables logic, it is assumed that there is only a single Proxier active on a machine.
// An error will be returned if iptables fails to update or acquire the initial lock.
// Once a proxier is created, it will keep iptables up to date in the background and
// will not terminate if a particular iptables call fails.
func NewProxier(ipt utiliptables.Interface,
sysctl utilsysctl.Interface,
exec utilexec.Interface,
syncPeriod time.Duration,
minSyncPeriod time.Duration,
masqueradeAll bool,
masqueradeBit int,
clusterCIDR string,
hostname string,
nodeIP net.IP,
recorder record.EventRecorder,
healthzServer healthcheck.HealthzUpdater,
) (*Proxier, error) {
// check valid user input
if minSyncPeriod > syncPeriod {
return nil, fmt.Errorf("minSyncPeriod (%v) must be <= syncPeriod (%v)", minSyncPeriod, syncPeriod)
}
// Set the route_localnet sysctl we need for
if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err)
}
// Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers
// are connected to a Linux bridge (but not SDN bridges). Until most
// plugins handle this, log when config is missing
if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 {
glog.Warningf("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended")
}
// Generate the masquerade mark to use for SNAT rules.
if masqueradeBit < 0 || masqueradeBit > 31 {
return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit)
}
masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
if nodeIP == nil {
glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
nodeIP = net.ParseIP("127.0.0.1")
}
if len(clusterCIDR) == 0 {
glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
}
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
proxier := &Proxier{
portsMap: make(map[localPort]closeable),
serviceMap: make(proxyServiceMap),
serviceChanges: newServiceChangeMap(),
endpointsMap: make(proxyEndpointsMap),
endpointsChanges: newEndpointsChangeMap(hostname),
iptables: ipt,
masqueradeAll: masqueradeAll,
masqueradeMark: masqueradeMark,
exec: exec,
clusterCIDR: clusterCIDR,
hostname: hostname,
nodeIP: nodeIP,
portMapper: &listenPortOpener{},
recorder: recorder,
healthChecker: healthChecker,
healthzServer: healthzServer,
precomputedProbabilities: make([]string, 0, 1001),
iptablesData: bytes.NewBuffer(nil),
filterChains: bytes.NewBuffer(nil),
filterRules: bytes.NewBuffer(nil),
natChains: bytes.NewBuffer(nil),
natRules: bytes.NewBuffer(nil),
}
burstSyncs := 2
glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
return proxier, nil
}
// CleanupLeftovers removes all iptables rules and chains created by the Proxier
// It returns true if an error was encountered. Errors are logged.
func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Unlink the services chain.
args := []string{
"-m", "comment", "--comment", "kubernetes service portals",
"-j", string(kubeServicesChain),
}
tableChainsWithJumpServices := []struct {
table utiliptables.Table
chain utiliptables.Chain
}{
{utiliptables.TableFilter, utiliptables.ChainInput},
{utiliptables.TableFilter, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
}
for _, tc := range tableChainsWithJumpServices {
if err := ipt.DeleteRule(tc.table, tc.chain, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
}
}
}
// Unlink the postrouting chain.
args = []string{
"-m", "comment", "--comment", "kubernetes postrouting rules",
"-j", string(kubePostroutingChain),
}
if err := ipt.DeleteRule(utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
}
}
// Flush and remove all of our chains.
iptablesData := bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil {
glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err)
encounteredError = true
} else {
existingNATChains := utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes())
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
writeLine(natChains, "*nat")
// Start with chains we know we need to remove.
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
if _, found := existingNATChains[chain]; found {
chainString := string(chain)
writeLine(natChains, existingNATChains[chain]) // flush
writeLine(natRules, "-X", chainString) // delete
}
}
// Hunt for service and endpoint chains.
for chain := range existingNATChains {
chainString := string(chain)
if strings.HasPrefix(chainString, "KUBE-SVC-") || strings.HasPrefix(chainString, "KUBE-SEP-") || strings.HasPrefix(chainString, "KUBE-FW-") || strings.HasPrefix(chainString, "KUBE-XLB-") {
writeLine(natChains, existingNATChains[chain]) // flush
writeLine(natRules, "-X", chainString) // delete
}
}
writeLine(natRules, "COMMIT")
natLines := append(natChains.Bytes(), natRules.Bytes()...)
// Write it.
err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err)
encounteredError = true
}
}
{
filterBuf := bytes.NewBuffer(nil)
writeLine(filterBuf, "*filter")
writeLine(filterBuf, fmt.Sprintf(":%s - [0:0]", kubeServicesChain))
writeLine(filterBuf, fmt.Sprintf("-X %s", kubeServicesChain))
writeLine(filterBuf, "COMMIT")
// Write it.
if err := ipt.Restore(utiliptables.TableFilter, filterBuf.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil {
glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err)
encounteredError = true
}
}
return encounteredError
}
func computeProbability(n int) string {
return fmt.Sprintf("%0.5f", 1.0/float64(n))
}
// This assumes proxier.mu is held
func (proxier *Proxier) precomputeProbabilities(numberOfPrecomputed int) {
if len(proxier.precomputedProbabilities) == 0 {
proxier.precomputedProbabilities = append(proxier.precomputedProbabilities, "<bad value>")
}
for i := len(proxier.precomputedProbabilities); i <= numberOfPrecomputed; i++ {
proxier.precomputedProbabilities = append(proxier.precomputedProbabilities, computeProbability(i))
}
}
// This assumes proxier.mu is held
func (proxier *Proxier) probability(n int) string {
if n >= len(proxier.precomputedProbabilities) {
proxier.precomputeProbabilities(n)
}
return proxier.precomputedProbabilities[n]
}
// Sync is called to synchronize the proxier state to iptables as soon as possible.
func (proxier *Proxier) Sync() {
proxier.syncRunner.Run()
}
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
func (proxier *Proxier) SyncLoop() {
// Update healthz timestamp at beginning in case Sync() never succeeds.
if proxier.healthzServer != nil {
proxier.healthzServer.UpdateTimestamp()
}
proxier.syncRunner.Loop(wait.NeverStop)
}
func (proxier *Proxier) setInitialized(value bool) {
var initialized int32
if value {
initialized = 1
}
atomic.StoreInt32(&proxier.initialized, initialized)
}
func (proxier *Proxier) isInitialized() bool {
return atomic.LoadInt32(&proxier.initialized) > 0
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, nil, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, oldService, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, service, nil) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceSynced() {
proxier.mu.Lock()
proxier.servicesSynced = true
proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced)
proxier.mu.Unlock()
// Sync unconditionally - this is called once per lifetime.
proxier.syncProxyRules()
}
func shouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == api.ServiceTypeExternalName {
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
return true
}
return false
}
// <serviceMap> is updated by this function (based on the given changes).
// <changes> map is cleared after applying them.
func updateServiceMap(
serviceMap proxyServiceMap,
changes *serviceChangeMap) (hcServices map[types.NamespacedName]uint16, staleServices sets.String) {
staleServices = sets.NewString()
func() {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
existingPorts := serviceMap.merge(change.current)
serviceMap.unmerge(change.previous, existingPorts, staleServices)
}
changes.items = make(map[types.NamespacedName]*serviceChange)
}()
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to serviceMap.
hcServices = make(map[types.NamespacedName]uint16)
for svcPortName, info := range serviceMap {
if info.healthCheckNodePort != 0 {
hcServices[svcPortName.NamespacedName] = uint16(info.healthCheckNodePort)
}
}
return hcServices, staleServices
}
func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, nil, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, oldEndpoints, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, endpoints, nil) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsSynced() {
proxier.mu.Lock()
proxier.endpointsSynced = true
proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced)
proxier.mu.Unlock()
// Sync unconditionally - this is called once per lifetime.
proxier.syncProxyRules()
}
// <endpointsMap> is updated by this function (based on the given changes).
// <changes> map is cleared after applying them.
func updateEndpointsMap(
endpointsMap proxyEndpointsMap,
changes *endpointsChangeMap,
hostname string) (hcEndpoints map[types.NamespacedName]int, staleEndpoints map[endpointServicePair]bool, staleServiceNames map[proxy.ServicePortName]bool) {
staleEndpoints = make(map[endpointServicePair]bool)
staleServiceNames = make(map[proxy.ServicePortName]bool)
func() {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
endpointsMap.unmerge(change.previous)
endpointsMap.merge(change.current)
detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames)
}
changes.items = make(map[types.NamespacedName]*endpointsChange)
}()
if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) {
return
}
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to endpointsMap.
hcEndpoints = make(map[types.NamespacedName]int)
localIPs := getLocalIPs(endpointsMap)
for nsn, ips := range localIPs {
hcEndpoints[nsn] = len(ips)
}
return hcEndpoints, staleEndpoints, staleServiceNames
}
// <staleEndpoints> and <staleServices> are modified by this function with detected stale connections.
func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, staleEndpoints map[endpointServicePair]bool, staleServiceNames map[proxy.ServicePortName]bool) {
for svcPortName, epList := range oldEndpointsMap {
for _, ep := range epList {
stale := true
for i := range newEndpointsMap[svcPortName] {
if *newEndpointsMap[svcPortName][i] == *ep {
stale = false
break
}
}
if stale {
glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.endpoint)
staleEndpoints[endpointServicePair{endpoint: ep.endpoint, servicePortName: svcPortName}] = true
}
}
}
for svcPortName, epList := range newEndpointsMap {
// For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service.
if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 {
staleServiceNames[svcPortName] = true
}
}
}
func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.String {
localIPs := make(map[types.NamespacedName]sets.String)
for svcPortName := range endpointsMap {
for _, ep := range endpointsMap[svcPortName] {
if ep.isLocal {
nsn := svcPortName.NamespacedName
if localIPs[nsn] == nil {
localIPs[nsn] = sets.NewString()
}
localIPs[nsn].Insert(ep.IPPart()) // just the IP part
}
}
}
return localIPs
}
// Translates single Endpoints object to proxyEndpointsMap.
// This function is used for incremental updated of endpointsMap.
//
// NOTE: endpoints object should NOT be modified.
func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEndpointsMap {
if endpoints == nil {
return nil
}
endpointsMap := make(proxyEndpointsMap)
// We need to build a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := proxy.ServicePortName{
NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name},
Port: port.Name,
}
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
epInfo := &endpointsInfo{
endpoint: net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))),
isLocal: addr.NodeName != nil && *addr.NodeName == hostname,
}
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo)
}
if glog.V(3) {
newEPList := []string{}
for _, ep := range endpointsMap[svcPortName] {
newEPList = append(newEPList, ep.endpoint)
}
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
}
}
}
return endpointsMap
}
// Translates single Service object to proxyServiceMap.
//
// NOTE: service object should NOT be modified.
func serviceToServiceMap(service *api.Service) proxyServiceMap {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if shouldSkipService(svcName, service) {
return nil
}
serviceMap := make(proxyServiceMap)
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
serviceMap[svcPortName] = newServiceInfo(svcPortName, servicePort, service)
}
return serviceMap
}
// portProtoHash takes the ServicePortName and protocol for a service
// returns the associated 16 character hash. This is computed by hashing (sha256)
// then encoding to base32 and truncating to 16 chars. We do this because IPTables
// Chain Names must be <= 28 chars long, and the longer they are the harder they are to read.
func portProtoHash(servicePortName string, protocol string) string {
hash := sha256.Sum256([]byte(servicePortName + protocol))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return encoded[:16]
}
// servicePortChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-SVC-".
func servicePortChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-SVC-" + portProtoHash(servicePortName, protocol))
}
// serviceFirewallChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-FW-".
func serviceFirewallChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-FW-" + portProtoHash(servicePortName, protocol))
}
// serviceLBPortChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-XLB-". We do
// this because IPTables Chain Names must be <= 28 chars long, and the longer
// they are the harder they are to read.
func serviceLBChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-XLB-" + portProtoHash(servicePortName, protocol))
}
// This is the same as servicePortChainName but with the endpoint included.
func servicePortEndpointChainName(servicePortName string, protocol string, endpoint string) utiliptables.Chain {
hash := sha256.Sum256([]byte(servicePortName + protocol + endpoint))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain("KUBE-SEP-" + encoded[:16])
}
type endpointServicePair struct {
endpoint string
servicePortName proxy.ServicePortName
}
func (esp *endpointServicePair) IPPart() string {
if index := strings.Index(esp.endpoint, ":"); index != -1 {
return esp.endpoint[0:index]
}
return esp.endpoint
}
const noConnectionToDelete = "0 flow entries have been deleted"
// After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we
// risk sending more traffic to it, all of which will be lost (because UDP).
// This assumes the proxier mutex is held
func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServicePair]bool) {
for epSvcPair := range connectionMap {
if svcInfo, ok := proxier.serviceMap[epSvcPair.servicePortName]; ok && svcInfo.protocol == api.ProtocolUDP {
endpointIP := epSvcPair.endpoint[0:strings.Index(epSvcPair.endpoint, ":")]
glog.V(2).Infof("Deleting connection tracking state for service IP %s, endpoint IP %s", svcInfo.clusterIP.String(), endpointIP)
err := utilproxy.ExecConntrackTool(proxier.exec, "-D", "--orig-dst", svcInfo.clusterIP.String(), "--dst-nat", endpointIP, "-p", "udp")
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
// is expensive to baby sit all udp connections to kubernetes services.
glog.Errorf("conntrack return with error: %v", err)
}
}
}
}
// This is where all of the iptables-save/restore calls happen.
// The only other iptables rules are those that are setup in iptablesInit()
// This assumes proxier.mu is NOT held
func (proxier *Proxier) syncProxyRules() {
proxier.mu.Lock()
defer proxier.mu.Unlock()
start := time.Now()
defer func() {
SyncProxyRulesLatency.Observe(sinceInMicroseconds(start))
glog.V(4).Infof("syncProxyRules took %v", time.Since(start))
}()
// don't sync rules till we've received services and endpoints
if !proxier.endpointsSynced || !proxier.servicesSynced {
glog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master")
return
}
var staleServices sets.String
// We assume that if this was called, we really want to sync them,
// even if nothing changed in the meantime. In other words, callers are
// responsible for detecting no-op changes and not calling this function.
hcServices, staleServices := updateServiceMap(
proxier.serviceMap, &proxier.serviceChanges)
hcEndpoints, staleEndpoints, staleServiceNames := updateEndpointsMap(
proxier.endpointsMap, &proxier.endpointsChanges, proxier.hostname)
// merge stale services gathered from updateEndpointsMap
for svcPortName := range staleServiceNames {
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == api.ProtocolUDP {
glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String())
staleServices.Insert(svcInfo.clusterIP.String())
}
}
glog.V(3).Infof("Syncing iptables rules")
// Create and link the kube services chain.
{
tablesNeedServicesChain := []utiliptables.Table{utiliptables.TableFilter, utiliptables.TableNAT}
for _, table := range tablesNeedServicesChain {
if _, err := proxier.iptables.EnsureChain(table, kubeServicesChain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", table, kubeServicesChain, err)
return
}
}
tableChainsNeedJumpServices := []struct {
table utiliptables.Table
chain utiliptables.Chain
}{
{utiliptables.TableFilter, utiliptables.ChainInput},
{utiliptables.TableFilter, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
}
comment := "kubernetes service portals"
args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeServicesChain)}
for _, tc := range tableChainsNeedJumpServices {
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, tc.table, tc.chain, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeServicesChain, err)
return
}
}
}
// Create and link the kube postrouting chain.
{
if _, err := proxier.iptables.EnsureChain(utiliptables.TableNAT, kubePostroutingChain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubePostroutingChain, err)
return
}
comment := "kubernetes postrouting rules"
args := []string{"-m", "comment", "--comment", comment, "-j", string(kubePostroutingChain)}
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, kubePostroutingChain, err)
return
}
}
//
// Below this point we will not return until we try to write the iptables rules.
//
// Get iptables-save output so we can check for existing chains and rules.
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
existingFilterChains := make(map[utiliptables.Chain]string)
proxier.iptablesData.Reset()
err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.iptablesData)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingFilterChains = utiliptables.GetChainLines(utiliptables.TableFilter, proxier.iptablesData.Bytes())
}
existingNATChains := make(map[utiliptables.Chain]string)
proxier.iptablesData.Reset()
err = proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, proxier.iptablesData.Bytes())
}
// Reset all buffers used later.
// This is to avoid memory reallocations and thus improve performance.
proxier.filterChains.Reset()
proxier.filterRules.Reset()
proxier.natChains.Reset()
proxier.natRules.Reset()
// Write table headers.
writeLine(proxier.filterChains, "*filter")
writeLine(proxier.natChains, "*nat")
// Make sure we keep stats for the top-level chains, if they existed
// (which most should have because we created them above).
if chain, ok := existingFilterChains[kubeServicesChain]; ok {
writeLine(proxier.filterChains, chain)
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeServicesChain))
}
if chain, ok := existingNATChains[kubeServicesChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubeServicesChain))
}
if chain, ok := existingNATChains[kubeNodePortsChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubeNodePortsChain))
}
if chain, ok := existingNATChains[kubePostroutingChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubePostroutingChain))
}
if chain, ok := existingNATChains[KubeMarkMasqChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(KubeMarkMasqChain))
}
// Install the kubernetes-specific postrouting rules. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
writeLine(proxier.natRules, []string{
"-A", string(kubePostroutingChain),
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
"-m", "mark", "--mark", proxier.masqueradeMark,
"-j", "MASQUERADE",
}...)
// Install the kubernetes-specific masquerade mark rule. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
writeLine(proxier.natRules, []string{
"-A", string(KubeMarkMasqChain),
"-j", "MARK", "--set-xmark", proxier.masqueradeMark,
}...)
// Accumulate NAT chains to keep.
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
// Accumulate the set of local ports that we will be holding open once this update is complete
replacementPortsMap := map[localPort]closeable{}
// We are creating those slices ones here to avoid memory reallocations
// in every loop. Note that reuse the memory, instead of doing:
// slice = <some new slice>
// you should always do one of the below:
// slice = slice[:0] // and then append to it
// slice = append(slice[:0], ...)
endpoints := make([]*endpointsInfo, 0)
endpointChains := make([]utiliptables.Chain, 0)
// To avoid growing this slice, we arbitrarily set its size to 64,
// there is never more than that many arguments for a single line.
// Note that even if we go over 64, it will still be correct - it
// is just for efficiency, not correctness.
args := make([]string, 64)
// Build rules for each service.
var svcNameString string
for svcName, svcInfo := range proxier.serviceMap {
protocol := strings.ToLower(string(svcInfo.protocol))
svcNameString = svcInfo.serviceNameString
// Create the per-service chain, retaining counters if possible.
svcChain := svcInfo.servicePortChainName
if chain, ok := existingNATChains[svcChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(svcChain))
}
activeNATChains[svcChain] = true
svcXlbChain := svcInfo.serviceLBChainName
if svcInfo.onlyNodeLocalEndpoints {
// Only for services request OnlyLocal traffic
// create the per-service LB chain, retaining counters if possible.
if lbChain, ok := existingNATChains[svcXlbChain]; ok {
writeLine(proxier.natChains, lbChain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(svcXlbChain))
}
activeNATChains[svcXlbChain] = true
} else if activeNATChains[svcXlbChain] {
// Cleanup the previously created XLB chain for this service
delete(activeNATChains, svcXlbChain)
}
// Capture the clusterIP.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()),
"--dport", strconv.Itoa(svcInfo.port),
)
if proxier.masqueradeAll {
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
} else if len(proxier.clusterCIDR) > 0 {
// This masquerades off-cluster traffic to a service VIP. The idea
// is that you can establish a static route for your Service range,
// routing to any node, and that node will bridge into the Service
// for you. Since that might bounce off-node, we masquerade here.
// If/when we support "Local" policy for VIPs, we should update this.
writeLine(proxier.natRules, append(args, "! -s", proxier.clusterCIDR, "-j", string(KubeMarkMasqChain))...)
}
writeLine(proxier.natRules, append(args, "-j", string(svcChain))...)
// Capture externalIPs.
for _, externalIP := range svcInfo.externalIPs {
// If the "external" IP happens to be an IP that is local to this
// machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work).
if local, err := isLocalIP(externalIP); err != nil {
glog.Errorf("can't determine if IP is local, assuming not: %v", err)
} else if local {
lp := localPort{
desc: "externalIP for " + svcNameString,
ip: externalIP,
port: svcInfo.port,
protocol: protocol,
}
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
if err != nil {
msg := fmt.Sprintf("can't open %s, skipping this externalIP: %v", lp.String(), err)
proxier.recorder.Eventf(
&clientv1.ObjectReference{
Kind: "Node",
Name: proxier.hostname,
UID: types.UID(proxier.hostname),
Namespace: "",
}, api.EventTypeWarning, err.Error(), msg)
glog.Error(msg)
continue
}
replacementPortsMap[lp] = socket
}
} // We're holding the port, so it's OK to install iptables rules.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", externalIP),
"--dport", strconv.Itoa(svcInfo.port),
)
// We have to SNAT packets to external IPs.
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
// Allow traffic for external IPs that does not come from a bridge (i.e. not from a container)
// nor from a local process to be forwarded to the service.
// This rule roughly translates to "all traffic from off-machine".
// This is imperfect in the face of network plugins that might not use a bridge, but we can revisit that later.
externalTrafficOnlyArgs := append(args,
"-m", "physdev", "!", "--physdev-is-in",
"-m", "addrtype", "!", "--src-type", "LOCAL")
writeLine(proxier.natRules, append(externalTrafficOnlyArgs, "-j", string(svcChain))...)
dstLocalOnlyArgs := append(args, "-m", "addrtype", "--dst-type", "LOCAL")
// Allow traffic bound for external IPs that happen to be recognized as local IPs to stay local.
// This covers cases like GCE load-balancers which get added to the local routing table.
writeLine(proxier.natRules, append(dstLocalOnlyArgs, "-j", string(svcChain))...)
// If the service has no endpoints then reject packets coming via externalIP
// Install ICMP Reject rule in filter table for destination=externalIP and dport=svcport
if len(proxier.endpointsMap[svcName]) == 0 {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", externalIP),
"--dport", strconv.Itoa(svcInfo.port),
"-j", "REJECT",
)
}
}
// Capture load-balancer ingress.
fwChain := svcInfo.serviceFirewallChainName
for _, ingress := range svcInfo.loadBalancerStatus.Ingress {
if ingress.IP != "" {
// create service firewall chain
if chain, ok := existingNATChains[fwChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(fwChain))
}
activeNATChains[fwChain] = true
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
// This currently works for loadbalancers that preserves source ips.
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", ingress.IP),
"--dport", strconv.Itoa(svcInfo.port),
)
// jump to service firewall chain
writeLine(proxier.natRules, append(args, "-j", string(fwChain))...)
args = append(args[:0],
"-A", string(fwChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
)
// Each source match rule in the FW chain may jump to either the SVC or the XLB chain
chosenChain := svcXlbChain
// If we are proxying globally, we need to masquerade in case we cross nodes.
// If we are proxying only locally, we can retain the source IP.
if !svcInfo.onlyNodeLocalEndpoints {
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
chosenChain = svcChain
}
if len(svcInfo.loadBalancerSourceRanges) == 0 {
// allow all sources, so jump directly to the KUBE-SVC or KUBE-XLB chain
writeLine(proxier.natRules, append(args, "-j", string(chosenChain))...)
} else {
// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.loadBalancerSourceRanges {
writeLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...)
// ignore error because it has been validated
_, cidr, _ := net.ParseCIDR(src)
if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// generally, ip route rule was added to intercept request to loadbalancer vip from the
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
// Need to add the following rule to allow request on host.
if allowFromNode {
writeLine(proxier.natRules, append(args, "-s", fmt.Sprintf("%s/32", ingress.IP), "-j", string(chosenChain))...)
}
}
// If the packet was able to reach the end of firewall chain, then it did not get DNATed.
// It means the packet cannot go thru the firewall, then mark it for DROP
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkDropChain))...)
}
}
// Capture nodeports. If we had more than 2 rules it might be
// worthwhile to make a new per-service chain for nodeport rules, but
// with just 2 rules it ends up being a waste and a cognitive burden.
if svcInfo.nodePort != 0 {
// Hold the local port open so no other process can open it
// (because the socket might open but it would never work).
lp := localPort{
desc: "nodePort for " + svcNameString,
ip: "",
port: svcInfo.nodePort,
protocol: protocol,
}
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
if err != nil {
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
continue
}
if lp.protocol == "udp" {
proxier.clearUDPConntrackForPort(lp.port)
}
replacementPortsMap[lp] = socket
} // We're holding the port, so it's OK to install iptables rules.
args = append(args[:0],
"-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", svcNameString,
"-m", protocol, "-p", protocol,
"--dport", strconv.Itoa(svcInfo.nodePort),
)
if !svcInfo.onlyNodeLocalEndpoints {
// Nodeports need SNAT, unless they're local.
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
// Jump to the service chain.
writeLine(proxier.natRules, append(args, "-j", string(svcChain))...)
} else {
// TODO: Make all nodePorts jump to the firewall chain.
// Currently we only create it for loadbalancers (#33586).
writeLine(proxier.natRules, append(args, "-j", string(svcXlbChain))...)
}
// If the service has no endpoints then reject packets. The filter
// table doesn't currently have the same per-service structure that
// the nat table does, so we just stick this into the kube-services
// chain.
if len(proxier.endpointsMap[svcName]) == 0 {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", "addrtype", "--dst-type", "LOCAL",
"-m", protocol, "-p", protocol,
"--dport", strconv.Itoa(svcInfo.nodePort),
"-j", "REJECT",
)
}
}
// If the service has no endpoints then reject packets.
if len(proxier.endpointsMap[svcName]) == 0 {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()),
"--dport", strconv.Itoa(svcInfo.port),
"-j", "REJECT",
)
continue
}
// From here on, we assume there are active endpoints.
// Generate the per-endpoint chains. We do this in multiple passes so we
// can group rules together.
// These two slices parallel each other - keep in sync
endpoints = endpoints[:0]
endpointChains = endpointChains[:0]
var endpointChain utiliptables.Chain
for _, ep := range proxier.endpointsMap[svcName] {
endpoints = append(endpoints, ep)
endpointChain = ep.endpointChain(svcNameString, protocol)
endpointChains = append(endpointChains, endpointChain)
// Create the endpoint chain, retaining counters if possible.
if chain, ok := existingNATChains[utiliptables.Chain(endpointChain)]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(endpointChain))
}
activeNATChains[endpointChain] = true
}
// First write session affinity rules, if applicable.
if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP {
for _, endpointChain := range endpointChains {
writeLine(proxier.natRules,
"-A", string(svcChain),
"-m", "comment", "--comment", svcNameString,
"-m", "recent", "--name", string(endpointChain),
"--rcheck", "--seconds", strconv.Itoa(svcInfo.stickyMaxAgeMinutes*60), "--reap",
"-j", string(endpointChain))
}
}
// Now write loadbalancing & DNAT rules.
n := len(endpointChains)
for i, endpointChain := range endpointChains {
// Balancing rules in the per-service chain.
args = append(args[:0], []string{
"-A", string(svcChain),
"-m", "comment", "--comment", svcNameString,
}...)
if i < (n - 1) {
// Each rule is a probabilistic match.
args = append(args,
"-m", "statistic",
"--mode", "random",
"--probability", proxier.probability(n-i))
}
// The final (or only if n == 1) rule is a guaranteed match.
args = append(args, "-j", string(endpointChain))
writeLine(proxier.natRules, args...)
// Rules in the per-endpoint chain.
args = append(args[:0],
"-A", string(endpointChain),
"-m", "comment", "--comment", svcNameString,
)
// Handle traffic that loops back to the originator with SNAT.
writeLine(proxier.natRules, append(args,
"-s", fmt.Sprintf("%s/32", endpoints[i].IPPart()),
"-j", string(KubeMarkMasqChain))...)
// Update client-affinity lists.
if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP {
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
}
// DNAT to final destination.
args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", endpoints[i].endpoint)
writeLine(proxier.natRules, args...)
}
// The logic below this applies only if this service is marked as OnlyLocal
if !svcInfo.onlyNodeLocalEndpoints {
continue
}
// Now write ingress loadbalancing & DNAT rules only for services that request OnlyLocal traffic.
// TODO - This logic may be combinable with the block above that creates the svc balancer chain
localEndpoints := make([]*endpointsInfo, 0)
localEndpointChains := make([]utiliptables.Chain, 0)
for i := range endpointChains {
if endpoints[i].isLocal {
// These slices parallel each other; must be kept in sync
localEndpoints = append(localEndpoints, endpoints[i])
localEndpointChains = append(localEndpointChains, endpointChains[i])
}
}
// First rule in the chain redirects all pod -> external VIP traffic to the
// Service's ClusterIP instead. This happens whether or not we have local
// endpoints; only if clusterCIDR is specified
if len(proxier.clusterCIDR) > 0 {
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
`"Redirect pods trying to reach external loadbalancer VIP to clusterIP"`,
"-s", proxier.clusterCIDR,
"-j", string(svcChain),
)
writeLine(proxier.natRules, args...)
}
numLocalEndpoints := len(localEndpointChains)
if numLocalEndpoints == 0 {
// Blackhole all traffic since there are no local endpoints
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
fmt.Sprintf(`"%s has no local endpoints"`, svcNameString),
"-j",
string(KubeMarkDropChain),
)
writeLine(proxier.natRules, args...)
} else {
// Setup probability filter rules only over local endpoints
for i, endpointChain := range localEndpointChains {
// Balancing rules in the per-service chain.
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
fmt.Sprintf(`"Balancing rule %d for %s"`, i, svcNameString),
)
if i < (numLocalEndpoints - 1) {
// Each rule is a probabilistic match.
args = append(args,
"-m", "statistic",
"--mode", "random",
"--probability", proxier.probability(numLocalEndpoints-i))
}
// The final (or only if n == 1) rule is a guaranteed match.
args = append(args, "-j", string(endpointChain))
writeLine(proxier.natRules, args...)
}
}
}
// Delete chains no longer in use.
for chain := range existingNATChains {
if !activeNATChains[chain] {
chainString := string(chain)
if !strings.HasPrefix(chainString, "KUBE-SVC-") && !strings.HasPrefix(chainString, "KUBE-SEP-") && !strings.HasPrefix(chainString, "KUBE-FW-") && !strings.HasPrefix(chainString, "KUBE-XLB-") {
// Ignore chains that aren't ours.
continue
}
// We must (as per iptables) write a chain-line for it, which has
// the nice effect of flushing the chain. Then we can remove the
// chain.
writeLine(proxier.natChains, existingNATChains[chain])
writeLine(proxier.natRules, "-X", chainString)
}
}
// Finally, tail-call to the nodeports chain. This needs to be after all
// other service portal rules.
writeLine(proxier.natRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`,
"-m", "addrtype", "--dst-type", "LOCAL",
"-j", string(kubeNodePortsChain))
// Write the end-of-table markers.
writeLine(proxier.filterRules, "COMMIT")
writeLine(proxier.natRules, "COMMIT")
// Sync rules.
// NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table
proxier.iptablesData.Reset()
proxier.iptablesData.Write(proxier.filterChains.Bytes())
proxier.iptablesData.Write(proxier.filterRules.Bytes())
proxier.iptablesData.Write(proxier.natChains.Bytes())
proxier.iptablesData.Write(proxier.natRules.Bytes())
glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes())
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
glog.Errorf("Failed to execute iptables-restore: %v", err)
glog.V(2).Infof("Rules:\n%s", proxier.iptablesData.Bytes())
// Revert new local ports.
revertPorts(replacementPortsMap, proxier.portsMap)
return
}
// Close old local ports and save new ones.
for k, v := range proxier.portsMap {
if replacementPortsMap[k] == nil {
v.Close()
}
}
proxier.portsMap = replacementPortsMap
// Update healthz timestamp.
if proxier.healthzServer != nil {
proxier.healthzServer.UpdateTimestamp()
}
// Update healthchecks. The endpoints list might include services that are
// not "OnlyLocal", but the services list will not, and the healthChecker
// will just drop those endpoints.
if err := proxier.healthChecker.SyncServices(hcServices); err != nil {
glog.Errorf("Error syncing healtcheck services: %v", err)
}
if err := proxier.healthChecker.SyncEndpoints(hcEndpoints); err != nil {
glog.Errorf("Error syncing healthcheck endoints: %v", err)
}
// Finish housekeeping.
// TODO: these and clearUDPConntrackForPort() could be made more consistent.
utilproxy.DeleteServiceConnections(proxier.exec, staleServices.List())
proxier.deleteEndpointConnections(staleEndpoints)
}
// Clear UDP conntrack for port or all conntrack entries when port equal zero.
// When a packet arrives, it will not go through NAT table again, because it is not "the first" packet.
// The solution is clearing the conntrack. Known issus:
// https://github.com/docker/docker/issues/8795
// https://github.com/kubernetes/kubernetes/issues/31983
func (proxier *Proxier) clearUDPConntrackForPort(port int) {
glog.V(2).Infof("Deleting conntrack entries for udp connections")
if port > 0 {
err := utilproxy.ExecConntrackTool(proxier.exec, "-D", "-p", "udp", "--dport", strconv.Itoa(port))
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
glog.Errorf("conntrack return with error: %v", err)
}
} else {
glog.Errorf("Wrong port number. The port number must be greater than zero")
}
}
// Join all words with spaces, terminate with newline and write to buf.
func writeLine(buf *bytes.Buffer, words ...string) {
// We avoid strings.Join for performance reasons.
for i := range words {
buf.WriteString(words[i])
if i < len(words)-1 {
buf.WriteByte(' ')
} else {
buf.WriteByte('\n')
}
}
}
func isLocalIP(ip string) (bool, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return false, err
}
for i := range addrs {
intf, _, err := net.ParseCIDR(addrs[i].String())
if err != nil {
return false, err
}
if net.ParseIP(ip).Equal(intf) {
return true, nil
}
}
return false, nil
}
func openLocalPort(lp *localPort) (closeable, error) {
// For ports on node IPs, open the actual port and hold it, even though we
// use iptables to redirect traffic.
// This ensures a) that it's safe to use that port and b) that (a) stays
// true. The risk is that some process on the node (e.g. sshd or kubelet)
// is using a port and we give that same port out to a Service. That would
// be bad because iptables would silently claim the traffic but the process
// would never know.
// NOTE: We should not need to have a real listen()ing socket - bind()
// should be enough, but I can't figure out a way to e2e test without
// it. Tools like 'ss' and 'netstat' do not show sockets that are
// bind()ed but not listen()ed, and at least the default debian netcat
// has no way to avoid about 10 seconds of retries.
var socket closeable
switch lp.protocol {
case "tcp":
listener, err := net.Listen("tcp", net.JoinHostPort(lp.ip, strconv.Itoa(lp.port)))
if err != nil {
return nil, err
}
socket = listener
case "udp":
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(lp.ip, strconv.Itoa(lp.port)))
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
socket = conn
default:
return nil, fmt.Errorf("unknown protocol %q", lp.protocol)
}
glog.V(2).Infof("Opened local port %s", lp.String())
return socket, nil
}
// revertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
// closes the ports opened in this sync.
func revertPorts(replacementPortsMap, originalPortsMap map[localPort]closeable) {
for k, v := range replacementPortsMap {
// Only close newly opened local ports - leave ones that were open before this update
if originalPortsMap[k] == nil {
glog.V(2).Infof("Closing local port %s after iptables-restore failure", k.String())
v.Close()
}
}
}
refactor updateEndpointMap and updateServiceMap results
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iptables
//
// NOTE: this needs to be tested in e2e since it uses iptables for everything.
//
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"fmt"
"net"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/helper"
apiservice "k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
"k8s.io/kubernetes/pkg/util/async"
utilexec "k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilversion "k8s.io/kubernetes/pkg/util/version"
)
const (
// iptablesMinVersion is the minimum version of iptables for which we will use the Proxier
// from this package instead of the userspace Proxier. While most of the
// features we need were available earlier, the '-C' flag was added more
// recently. We use that indirectly in Ensure* functions, and if we don't
// have it, we have to be extra careful about the exact args we feed in being
// the same as the args we read back (iptables itself normalizes some args).
// This is the "new" Proxier, so we require "new" versions of tools.
iptablesMinVersion = utiliptables.MinCheckVersion
// the services chain
kubeServicesChain utiliptables.Chain = "KUBE-SERVICES"
// the nodeports chain
kubeNodePortsChain utiliptables.Chain = "KUBE-NODEPORTS"
// the kubernetes postrouting chain
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
// the mark-for-masquerade chain
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
// the mark-for-drop chain
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
)
// IPTablesVersioner can query the current iptables version.
type IPTablesVersioner interface {
// returns "X.Y.Z"
GetVersion() (string, error)
}
// KernelCompatTester tests whether the required kernel capabilities are
// present to run the iptables proxier.
type KernelCompatTester interface {
IsCompatible() error
}
// CanUseIPTablesProxier returns true if we should use the iptables Proxier
// instead of the "classic" userspace Proxier. This is determined by checking
// the iptables version and for the existence of kernel features. It may return
// an error if it fails to get the iptables version without error, in which
// case it will also return false.
func CanUseIPTablesProxier(iptver IPTablesVersioner, kcompat KernelCompatTester) (bool, error) {
minVersion, err := utilversion.ParseGeneric(iptablesMinVersion)
if err != nil {
return false, err
}
versionString, err := iptver.GetVersion()
if err != nil {
return false, err
}
version, err := utilversion.ParseGeneric(versionString)
if err != nil {
return false, err
}
if version.LessThan(minVersion) {
return false, nil
}
// Check that the kernel supports what we need.
if err := kcompat.IsCompatible(); err != nil {
return false, err
}
return true, nil
}
type LinuxKernelCompatTester struct{}
func (lkct LinuxKernelCompatTester) IsCompatible() error {
// Check for the required sysctls. We don't care about the value, just
// that it exists. If this Proxier is chosen, we'll initialize it as we
// need.
_, err := utilsysctl.New().GetSysctl(sysctlRouteLocalnet)
return err
}
const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet"
const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
// internal struct for string service information
type serviceInfo struct {
clusterIP net.IP
port int
protocol api.Protocol
nodePort int
loadBalancerStatus api.LoadBalancerStatus
sessionAffinityType api.ServiceAffinity
stickyMaxAgeMinutes int
externalIPs []string
loadBalancerSourceRanges []string
onlyNodeLocalEndpoints bool
healthCheckNodePort int
// The following fields are computed and stored for performance reasons.
serviceNameString string
servicePortChainName utiliptables.Chain
serviceFirewallChainName utiliptables.Chain
serviceLBChainName utiliptables.Chain
}
// internal struct for endpoints information
type endpointsInfo struct {
endpoint string // TODO: should be an endpointString type
isLocal bool
// The following fields we lazily compute and store here for performance
// reasons. If the protocol is the same as you expect it to be, then the
// chainName can be reused, otherwise it should be recomputed.
protocol string
chainName utiliptables.Chain
}
// Returns just the IP part of the endpoint.
func (e *endpointsInfo) IPPart() string {
if index := strings.Index(e.endpoint, ":"); index != -1 {
return e.endpoint[0:index]
}
return e.endpoint
}
// Returns the endpoint chain name for a given endpointsInfo.
func (e *endpointsInfo) endpointChain(svcNameString, protocol string) utiliptables.Chain {
if e.protocol != protocol {
e.protocol = protocol
e.chainName = servicePortEndpointChainName(svcNameString, protocol, e.endpoint)
}
return e.chainName
}
func (e *endpointsInfo) String() string {
return fmt.Sprintf("%v", *e)
}
// returns a new serviceInfo struct
func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo {
onlyNodeLocalEndpoints := false
if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) &&
apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
info := &serviceInfo{
clusterIP: net.ParseIP(service.Spec.ClusterIP),
port: int(port.Port),
protocol: port.Protocol,
nodePort: int(port.NodePort),
// Deep-copy in case the service instance changes
loadBalancerStatus: *helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer),
sessionAffinityType: service.Spec.SessionAffinity,
stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API.
externalIPs: make([]string, len(service.Spec.ExternalIPs)),
loadBalancerSourceRanges: make([]string, len(service.Spec.LoadBalancerSourceRanges)),
onlyNodeLocalEndpoints: onlyNodeLocalEndpoints,
}
copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
copy(info.externalIPs, service.Spec.ExternalIPs)
if apiservice.NeedsHealthCheck(service) {
p := apiservice.GetServiceHealthCheckNodePort(service)
if p == 0 {
glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String())
} else {
info.healthCheckNodePort = int(p)
}
}
// Store the following for performance reasons.
protocol := strings.ToLower(string(info.protocol))
info.serviceNameString = svcPortName.String()
info.servicePortChainName = servicePortChainName(info.serviceNameString, protocol)
info.serviceFirewallChainName = serviceFirewallChainName(info.serviceNameString, protocol)
info.serviceLBChainName = serviceLBChainName(info.serviceNameString, protocol)
return info
}
type endpointsChange struct {
previous proxyEndpointsMap
current proxyEndpointsMap
}
type endpointsChangeMap struct {
lock sync.Mutex
hostname string
items map[types.NamespacedName]*endpointsChange
}
type serviceChange struct {
previous proxyServiceMap
current proxyServiceMap
}
type serviceChangeMap struct {
lock sync.Mutex
items map[types.NamespacedName]*serviceChange
}
type updateEndpointMapResult struct {
hcEndpoints map[types.NamespacedName]int
staleEndpoints map[endpointServicePair]bool
staleServiceNames map[proxy.ServicePortName]bool
}
type updateServiceMapResult struct {
hcServices map[types.NamespacedName]uint16
staleServices sets.String
}
type proxyServiceMap map[proxy.ServicePortName]*serviceInfo
type proxyEndpointsMap map[proxy.ServicePortName][]*endpointsInfo
func newEndpointsChangeMap(hostname string) endpointsChangeMap {
return endpointsChangeMap{
hostname: hostname,
items: make(map[types.NamespacedName]*endpointsChange),
}
}
func (ecm *endpointsChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Endpoints) bool {
ecm.lock.Lock()
defer ecm.lock.Unlock()
change, exists := ecm.items[*namespacedName]
if !exists {
change = &endpointsChange{}
change.previous = endpointsToEndpointsMap(previous, ecm.hostname)
ecm.items[*namespacedName] = change
}
change.current = endpointsToEndpointsMap(current, ecm.hostname)
if reflect.DeepEqual(change.previous, change.current) {
delete(ecm.items, *namespacedName)
}
return len(ecm.items) > 0
}
func newServiceChangeMap() serviceChangeMap {
return serviceChangeMap{
items: make(map[types.NamespacedName]*serviceChange),
}
}
func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Service) bool {
scm.lock.Lock()
defer scm.lock.Unlock()
change, exists := scm.items[*namespacedName]
if !exists {
change = &serviceChange{}
change.previous = serviceToServiceMap(previous)
scm.items[*namespacedName] = change
}
change.current = serviceToServiceMap(current)
if reflect.DeepEqual(change.previous, change.current) {
delete(scm.items, *namespacedName)
}
return len(scm.items) > 0
}
func (sm *proxyServiceMap) merge(other proxyServiceMap) sets.String {
existingPorts := sets.NewString()
for svcPortName, info := range other {
existingPorts.Insert(svcPortName.Port)
_, exists := (*sm)[svcPortName]
if !exists {
glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
} else {
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
}
(*sm)[svcPortName] = info
}
return existingPorts
}
func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleServices sets.String) {
for svcPortName := range other {
if existingPorts.Has(svcPortName.Port) {
continue
}
info, exists := (*sm)[svcPortName]
if exists {
glog.V(1).Infof("Removing service port %q", svcPortName)
if info.protocol == api.ProtocolUDP {
staleServices.Insert(info.clusterIP.String())
}
delete(*sm, svcPortName)
} else {
glog.Errorf("Service port %q removed, but doesn't exists", svcPortName)
}
}
}
func (em proxyEndpointsMap) merge(other proxyEndpointsMap) {
for svcPortName := range other {
em[svcPortName] = other[svcPortName]
}
}
func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap) {
for svcPortName := range other {
delete(em, svcPortName)
}
}
// Proxier is an iptables based proxy for connections between a localhost:lport
// and services that provide the actual backends.
type Proxier struct {
// endpointsChanges and serviceChanges contains all changes to endpoints and
// services that happened since iptables was synced. For a single object,
// changes are accumulated, i.e. previous is state from before all of them,
// current is state after applying all of those.
endpointsChanges endpointsChangeMap
serviceChanges serviceChangeMap
mu sync.Mutex // protects the following fields
serviceMap proxyServiceMap
endpointsMap proxyEndpointsMap
portsMap map[localPort]closeable
// endpointsSynced and servicesSynced are set to true when corresponding
// objects are synced after startup. This is used to avoid updating iptables
// with some partial data after kube-proxy restart.
endpointsSynced bool
servicesSynced bool
initialized int32
syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules
// These are effectively const and do not need the mutex to be held.
iptables utiliptables.Interface
masqueradeAll bool
masqueradeMark string
exec utilexec.Interface
clusterCIDR string
hostname string
nodeIP net.IP
portMapper portOpener
recorder record.EventRecorder
healthChecker healthcheck.Server
healthzServer healthcheck.HealthzUpdater
// Since converting probabilities (floats) to strings is expensive
// and we are using only probabilities in the format of 1/n, we are
// precomputing some number of those and cache for future reuse.
precomputedProbabilities []string
// The following buffers are used to reuse memory and avoid allocations
// that are significantly impacting performance.
iptablesData *bytes.Buffer
filterChains *bytes.Buffer
filterRules *bytes.Buffer
natChains *bytes.Buffer
natRules *bytes.Buffer
}
type localPort struct {
desc string
ip string
port int
protocol string
}
func (lp *localPort) String() string {
return fmt.Sprintf("%q (%s:%d/%s)", lp.desc, lp.ip, lp.port, lp.protocol)
}
type closeable interface {
Close() error
}
// portOpener is an interface around port opening/closing.
// Abstracted out for testing.
type portOpener interface {
OpenLocalPort(lp *localPort) (closeable, error)
}
// listenPortOpener opens ports by calling bind() and listen().
type listenPortOpener struct{}
// OpenLocalPort holds the given local port open.
func (l *listenPortOpener) OpenLocalPort(lp *localPort) (closeable, error) {
return openLocalPort(lp)
}
// Proxier implements ProxyProvider
var _ proxy.ProxyProvider = &Proxier{}
// NewProxier returns a new Proxier given an iptables Interface instance.
// Because of the iptables logic, it is assumed that there is only a single Proxier active on a machine.
// An error will be returned if iptables fails to update or acquire the initial lock.
// Once a proxier is created, it will keep iptables up to date in the background and
// will not terminate if a particular iptables call fails.
func NewProxier(ipt utiliptables.Interface,
sysctl utilsysctl.Interface,
exec utilexec.Interface,
syncPeriod time.Duration,
minSyncPeriod time.Duration,
masqueradeAll bool,
masqueradeBit int,
clusterCIDR string,
hostname string,
nodeIP net.IP,
recorder record.EventRecorder,
healthzServer healthcheck.HealthzUpdater,
) (*Proxier, error) {
// check valid user input
if minSyncPeriod > syncPeriod {
return nil, fmt.Errorf("minSyncPeriod (%v) must be <= syncPeriod (%v)", minSyncPeriod, syncPeriod)
}
// Set the route_localnet sysctl we need for
if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err)
}
// Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers
// are connected to a Linux bridge (but not SDN bridges). Until most
// plugins handle this, log when config is missing
if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 {
glog.Warningf("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended")
}
// Generate the masquerade mark to use for SNAT rules.
if masqueradeBit < 0 || masqueradeBit > 31 {
return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit)
}
masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
if nodeIP == nil {
glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
nodeIP = net.ParseIP("127.0.0.1")
}
if len(clusterCIDR) == 0 {
glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
}
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
proxier := &Proxier{
portsMap: make(map[localPort]closeable),
serviceMap: make(proxyServiceMap),
serviceChanges: newServiceChangeMap(),
endpointsMap: make(proxyEndpointsMap),
endpointsChanges: newEndpointsChangeMap(hostname),
iptables: ipt,
masqueradeAll: masqueradeAll,
masqueradeMark: masqueradeMark,
exec: exec,
clusterCIDR: clusterCIDR,
hostname: hostname,
nodeIP: nodeIP,
portMapper: &listenPortOpener{},
recorder: recorder,
healthChecker: healthChecker,
healthzServer: healthzServer,
precomputedProbabilities: make([]string, 0, 1001),
iptablesData: bytes.NewBuffer(nil),
filterChains: bytes.NewBuffer(nil),
filterRules: bytes.NewBuffer(nil),
natChains: bytes.NewBuffer(nil),
natRules: bytes.NewBuffer(nil),
}
burstSyncs := 2
glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
return proxier, nil
}
// CleanupLeftovers removes all iptables rules and chains created by the Proxier
// It returns true if an error was encountered. Errors are logged.
func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Unlink the services chain.
args := []string{
"-m", "comment", "--comment", "kubernetes service portals",
"-j", string(kubeServicesChain),
}
tableChainsWithJumpServices := []struct {
table utiliptables.Table
chain utiliptables.Chain
}{
{utiliptables.TableFilter, utiliptables.ChainInput},
{utiliptables.TableFilter, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
}
for _, tc := range tableChainsWithJumpServices {
if err := ipt.DeleteRule(tc.table, tc.chain, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
}
}
}
// Unlink the postrouting chain.
args = []string{
"-m", "comment", "--comment", "kubernetes postrouting rules",
"-j", string(kubePostroutingChain),
}
if err := ipt.DeleteRule(utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
}
}
// Flush and remove all of our chains.
iptablesData := bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil {
glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err)
encounteredError = true
} else {
existingNATChains := utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes())
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
writeLine(natChains, "*nat")
// Start with chains we know we need to remove.
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
if _, found := existingNATChains[chain]; found {
chainString := string(chain)
writeLine(natChains, existingNATChains[chain]) // flush
writeLine(natRules, "-X", chainString) // delete
}
}
// Hunt for service and endpoint chains.
for chain := range existingNATChains {
chainString := string(chain)
if strings.HasPrefix(chainString, "KUBE-SVC-") || strings.HasPrefix(chainString, "KUBE-SEP-") || strings.HasPrefix(chainString, "KUBE-FW-") || strings.HasPrefix(chainString, "KUBE-XLB-") {
writeLine(natChains, existingNATChains[chain]) // flush
writeLine(natRules, "-X", chainString) // delete
}
}
writeLine(natRules, "COMMIT")
natLines := append(natChains.Bytes(), natRules.Bytes()...)
// Write it.
err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err)
encounteredError = true
}
}
{
filterBuf := bytes.NewBuffer(nil)
writeLine(filterBuf, "*filter")
writeLine(filterBuf, fmt.Sprintf(":%s - [0:0]", kubeServicesChain))
writeLine(filterBuf, fmt.Sprintf("-X %s", kubeServicesChain))
writeLine(filterBuf, "COMMIT")
// Write it.
if err := ipt.Restore(utiliptables.TableFilter, filterBuf.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil {
glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err)
encounteredError = true
}
}
return encounteredError
}
func computeProbability(n int) string {
return fmt.Sprintf("%0.5f", 1.0/float64(n))
}
// This assumes proxier.mu is held
func (proxier *Proxier) precomputeProbabilities(numberOfPrecomputed int) {
if len(proxier.precomputedProbabilities) == 0 {
proxier.precomputedProbabilities = append(proxier.precomputedProbabilities, "<bad value>")
}
for i := len(proxier.precomputedProbabilities); i <= numberOfPrecomputed; i++ {
proxier.precomputedProbabilities = append(proxier.precomputedProbabilities, computeProbability(i))
}
}
// This assumes proxier.mu is held
func (proxier *Proxier) probability(n int) string {
if n >= len(proxier.precomputedProbabilities) {
proxier.precomputeProbabilities(n)
}
return proxier.precomputedProbabilities[n]
}
// Sync is called to synchronize the proxier state to iptables as soon as possible.
func (proxier *Proxier) Sync() {
proxier.syncRunner.Run()
}
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
func (proxier *Proxier) SyncLoop() {
// Update healthz timestamp at beginning in case Sync() never succeeds.
if proxier.healthzServer != nil {
proxier.healthzServer.UpdateTimestamp()
}
proxier.syncRunner.Loop(wait.NeverStop)
}
func (proxier *Proxier) setInitialized(value bool) {
var initialized int32
if value {
initialized = 1
}
atomic.StoreInt32(&proxier.initialized, initialized)
}
func (proxier *Proxier) isInitialized() bool {
return atomic.LoadInt32(&proxier.initialized) > 0
}
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, nil, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, oldService, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if proxier.serviceChanges.update(&namespacedName, service, nil) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnServiceSynced() {
proxier.mu.Lock()
proxier.servicesSynced = true
proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced)
proxier.mu.Unlock()
// Sync unconditionally - this is called once per lifetime.
proxier.syncProxyRules()
}
func shouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == api.ServiceTypeExternalName {
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
return true
}
return false
}
// <serviceMap> is updated by this function (based on the given changes).
// <changes> map is cleared after applying them.
func updateServiceMap(
serviceMap proxyServiceMap,
changes *serviceChangeMap) (result updateServiceMapResult) {
result.staleServices = sets.NewString()
func() {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
existingPorts := serviceMap.merge(change.current)
serviceMap.unmerge(change.previous, existingPorts, result.staleServices)
}
changes.items = make(map[types.NamespacedName]*serviceChange)
}()
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to serviceMap.
result.hcServices = make(map[types.NamespacedName]uint16)
for svcPortName, info := range serviceMap {
if info.healthCheckNodePort != 0 {
result.hcServices[svcPortName.NamespacedName] = uint16(info.healthCheckNodePort)
}
}
return result
}
func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, nil, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, oldEndpoints, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) {
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
if proxier.endpointsChanges.update(&namespacedName, endpoints, nil) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
func (proxier *Proxier) OnEndpointsSynced() {
proxier.mu.Lock()
proxier.endpointsSynced = true
proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced)
proxier.mu.Unlock()
// Sync unconditionally - this is called once per lifetime.
proxier.syncProxyRules()
}
// <endpointsMap> is updated by this function (based on the given changes).
// <changes> map is cleared after applying them.
func updateEndpointsMap(
endpointsMap proxyEndpointsMap,
changes *endpointsChangeMap,
hostname string) (result updateEndpointMapResult) {
result.staleEndpoints = make(map[endpointServicePair]bool)
result.staleServiceNames = make(map[proxy.ServicePortName]bool)
func() {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
endpointsMap.unmerge(change.previous)
endpointsMap.merge(change.current)
detectStaleConnections(change.previous, change.current, result.staleEndpoints, result.staleServiceNames)
}
changes.items = make(map[types.NamespacedName]*endpointsChange)
}()
if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) {
return
}
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to endpointsMap.
result.hcEndpoints = make(map[types.NamespacedName]int)
localIPs := getLocalIPs(endpointsMap)
for nsn, ips := range localIPs {
result.hcEndpoints[nsn] = len(ips)
}
return result
}
// <staleEndpoints> and <staleServices> are modified by this function with detected stale connections.
func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, staleEndpoints map[endpointServicePair]bool, staleServiceNames map[proxy.ServicePortName]bool) {
for svcPortName, epList := range oldEndpointsMap {
for _, ep := range epList {
stale := true
for i := range newEndpointsMap[svcPortName] {
if *newEndpointsMap[svcPortName][i] == *ep {
stale = false
break
}
}
if stale {
glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.endpoint)
staleEndpoints[endpointServicePair{endpoint: ep.endpoint, servicePortName: svcPortName}] = true
}
}
}
for svcPortName, epList := range newEndpointsMap {
// For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service.
if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 {
staleServiceNames[svcPortName] = true
}
}
}
func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.String {
localIPs := make(map[types.NamespacedName]sets.String)
for svcPortName := range endpointsMap {
for _, ep := range endpointsMap[svcPortName] {
if ep.isLocal {
nsn := svcPortName.NamespacedName
if localIPs[nsn] == nil {
localIPs[nsn] = sets.NewString()
}
localIPs[nsn].Insert(ep.IPPart()) // just the IP part
}
}
}
return localIPs
}
// Translates single Endpoints object to proxyEndpointsMap.
// This function is used for incremental updated of endpointsMap.
//
// NOTE: endpoints object should NOT be modified.
func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEndpointsMap {
if endpoints == nil {
return nil
}
endpointsMap := make(proxyEndpointsMap)
// We need to build a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := proxy.ServicePortName{
NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name},
Port: port.Name,
}
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
epInfo := &endpointsInfo{
endpoint: net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))),
isLocal: addr.NodeName != nil && *addr.NodeName == hostname,
}
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo)
}
if glog.V(3) {
newEPList := []string{}
for _, ep := range endpointsMap[svcPortName] {
newEPList = append(newEPList, ep.endpoint)
}
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
}
}
}
return endpointsMap
}
// Translates single Service object to proxyServiceMap.
//
// NOTE: service object should NOT be modified.
func serviceToServiceMap(service *api.Service) proxyServiceMap {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if shouldSkipService(svcName, service) {
return nil
}
serviceMap := make(proxyServiceMap)
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
serviceMap[svcPortName] = newServiceInfo(svcPortName, servicePort, service)
}
return serviceMap
}
// portProtoHash takes the ServicePortName and protocol for a service
// returns the associated 16 character hash. This is computed by hashing (sha256)
// then encoding to base32 and truncating to 16 chars. We do this because IPTables
// Chain Names must be <= 28 chars long, and the longer they are the harder they are to read.
func portProtoHash(servicePortName string, protocol string) string {
hash := sha256.Sum256([]byte(servicePortName + protocol))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return encoded[:16]
}
// servicePortChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-SVC-".
func servicePortChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-SVC-" + portProtoHash(servicePortName, protocol))
}
// serviceFirewallChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-FW-".
func serviceFirewallChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-FW-" + portProtoHash(servicePortName, protocol))
}
// serviceLBPortChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-XLB-". We do
// this because IPTables Chain Names must be <= 28 chars long, and the longer
// they are the harder they are to read.
func serviceLBChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-XLB-" + portProtoHash(servicePortName, protocol))
}
// This is the same as servicePortChainName but with the endpoint included.
func servicePortEndpointChainName(servicePortName string, protocol string, endpoint string) utiliptables.Chain {
hash := sha256.Sum256([]byte(servicePortName + protocol + endpoint))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain("KUBE-SEP-" + encoded[:16])
}
type endpointServicePair struct {
endpoint string
servicePortName proxy.ServicePortName
}
func (esp *endpointServicePair) IPPart() string {
if index := strings.Index(esp.endpoint, ":"); index != -1 {
return esp.endpoint[0:index]
}
return esp.endpoint
}
const noConnectionToDelete = "0 flow entries have been deleted"
// After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we
// risk sending more traffic to it, all of which will be lost (because UDP).
// This assumes the proxier mutex is held
func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServicePair]bool) {
for epSvcPair := range connectionMap {
if svcInfo, ok := proxier.serviceMap[epSvcPair.servicePortName]; ok && svcInfo.protocol == api.ProtocolUDP {
endpointIP := epSvcPair.endpoint[0:strings.Index(epSvcPair.endpoint, ":")]
glog.V(2).Infof("Deleting connection tracking state for service IP %s, endpoint IP %s", svcInfo.clusterIP.String(), endpointIP)
err := utilproxy.ExecConntrackTool(proxier.exec, "-D", "--orig-dst", svcInfo.clusterIP.String(), "--dst-nat", endpointIP, "-p", "udp")
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
// is expensive to baby sit all udp connections to kubernetes services.
glog.Errorf("conntrack return with error: %v", err)
}
}
}
}
// This is where all of the iptables-save/restore calls happen.
// The only other iptables rules are those that are setup in iptablesInit()
// This assumes proxier.mu is NOT held
func (proxier *Proxier) syncProxyRules() {
proxier.mu.Lock()
defer proxier.mu.Unlock()
start := time.Now()
defer func() {
SyncProxyRulesLatency.Observe(sinceInMicroseconds(start))
glog.V(4).Infof("syncProxyRules took %v", time.Since(start))
}()
// don't sync rules till we've received services and endpoints
if !proxier.endpointsSynced || !proxier.servicesSynced {
glog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master")
return
}
// We assume that if this was called, we really want to sync them,
// even if nothing changed in the meantime. In other words, callers are
// responsible for detecting no-op changes and not calling this function.
serviceUpdateResult := updateServiceMap(
proxier.serviceMap, &proxier.serviceChanges)
endpointUpdateResult := updateEndpointsMap(
proxier.endpointsMap, &proxier.endpointsChanges, proxier.hostname)
staleServices := serviceUpdateResult.staleServices
// merge stale services gathered from updateEndpointsMap
for svcPortName := range endpointUpdateResult.staleServiceNames {
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == api.ProtocolUDP {
glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String())
staleServices.Insert(svcInfo.clusterIP.String())
}
}
glog.V(3).Infof("Syncing iptables rules")
// Create and link the kube services chain.
{
tablesNeedServicesChain := []utiliptables.Table{utiliptables.TableFilter, utiliptables.TableNAT}
for _, table := range tablesNeedServicesChain {
if _, err := proxier.iptables.EnsureChain(table, kubeServicesChain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", table, kubeServicesChain, err)
return
}
}
tableChainsNeedJumpServices := []struct {
table utiliptables.Table
chain utiliptables.Chain
}{
{utiliptables.TableFilter, utiliptables.ChainInput},
{utiliptables.TableFilter, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
}
comment := "kubernetes service portals"
args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeServicesChain)}
for _, tc := range tableChainsNeedJumpServices {
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, tc.table, tc.chain, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeServicesChain, err)
return
}
}
}
// Create and link the kube postrouting chain.
{
if _, err := proxier.iptables.EnsureChain(utiliptables.TableNAT, kubePostroutingChain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubePostroutingChain, err)
return
}
comment := "kubernetes postrouting rules"
args := []string{"-m", "comment", "--comment", comment, "-j", string(kubePostroutingChain)}
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, kubePostroutingChain, err)
return
}
}
//
// Below this point we will not return until we try to write the iptables rules.
//
// Get iptables-save output so we can check for existing chains and rules.
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
existingFilterChains := make(map[utiliptables.Chain]string)
proxier.iptablesData.Reset()
err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.iptablesData)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingFilterChains = utiliptables.GetChainLines(utiliptables.TableFilter, proxier.iptablesData.Bytes())
}
existingNATChains := make(map[utiliptables.Chain]string)
proxier.iptablesData.Reset()
err = proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, proxier.iptablesData.Bytes())
}
// Reset all buffers used later.
// This is to avoid memory reallocations and thus improve performance.
proxier.filterChains.Reset()
proxier.filterRules.Reset()
proxier.natChains.Reset()
proxier.natRules.Reset()
// Write table headers.
writeLine(proxier.filterChains, "*filter")
writeLine(proxier.natChains, "*nat")
// Make sure we keep stats for the top-level chains, if they existed
// (which most should have because we created them above).
if chain, ok := existingFilterChains[kubeServicesChain]; ok {
writeLine(proxier.filterChains, chain)
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeServicesChain))
}
if chain, ok := existingNATChains[kubeServicesChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubeServicesChain))
}
if chain, ok := existingNATChains[kubeNodePortsChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubeNodePortsChain))
}
if chain, ok := existingNATChains[kubePostroutingChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubePostroutingChain))
}
if chain, ok := existingNATChains[KubeMarkMasqChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(KubeMarkMasqChain))
}
// Install the kubernetes-specific postrouting rules. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
writeLine(proxier.natRules, []string{
"-A", string(kubePostroutingChain),
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
"-m", "mark", "--mark", proxier.masqueradeMark,
"-j", "MASQUERADE",
}...)
// Install the kubernetes-specific masquerade mark rule. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
writeLine(proxier.natRules, []string{
"-A", string(KubeMarkMasqChain),
"-j", "MARK", "--set-xmark", proxier.masqueradeMark,
}...)
// Accumulate NAT chains to keep.
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
// Accumulate the set of local ports that we will be holding open once this update is complete
replacementPortsMap := map[localPort]closeable{}
// We are creating those slices ones here to avoid memory reallocations
// in every loop. Note that reuse the memory, instead of doing:
// slice = <some new slice>
// you should always do one of the below:
// slice = slice[:0] // and then append to it
// slice = append(slice[:0], ...)
endpoints := make([]*endpointsInfo, 0)
endpointChains := make([]utiliptables.Chain, 0)
// To avoid growing this slice, we arbitrarily set its size to 64,
// there is never more than that many arguments for a single line.
// Note that even if we go over 64, it will still be correct - it
// is just for efficiency, not correctness.
args := make([]string, 64)
// Build rules for each service.
var svcNameString string
for svcName, svcInfo := range proxier.serviceMap {
protocol := strings.ToLower(string(svcInfo.protocol))
svcNameString = svcInfo.serviceNameString
// Create the per-service chain, retaining counters if possible.
svcChain := svcInfo.servicePortChainName
if chain, ok := existingNATChains[svcChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(svcChain))
}
activeNATChains[svcChain] = true
svcXlbChain := svcInfo.serviceLBChainName
if svcInfo.onlyNodeLocalEndpoints {
// Only for services request OnlyLocal traffic
// create the per-service LB chain, retaining counters if possible.
if lbChain, ok := existingNATChains[svcXlbChain]; ok {
writeLine(proxier.natChains, lbChain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(svcXlbChain))
}
activeNATChains[svcXlbChain] = true
} else if activeNATChains[svcXlbChain] {
// Cleanup the previously created XLB chain for this service
delete(activeNATChains, svcXlbChain)
}
// Capture the clusterIP.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()),
"--dport", strconv.Itoa(svcInfo.port),
)
if proxier.masqueradeAll {
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
} else if len(proxier.clusterCIDR) > 0 {
// This masquerades off-cluster traffic to a service VIP. The idea
// is that you can establish a static route for your Service range,
// routing to any node, and that node will bridge into the Service
// for you. Since that might bounce off-node, we masquerade here.
// If/when we support "Local" policy for VIPs, we should update this.
writeLine(proxier.natRules, append(args, "! -s", proxier.clusterCIDR, "-j", string(KubeMarkMasqChain))...)
}
writeLine(proxier.natRules, append(args, "-j", string(svcChain))...)
// Capture externalIPs.
for _, externalIP := range svcInfo.externalIPs {
// If the "external" IP happens to be an IP that is local to this
// machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work).
if local, err := isLocalIP(externalIP); err != nil {
glog.Errorf("can't determine if IP is local, assuming not: %v", err)
} else if local {
lp := localPort{
desc: "externalIP for " + svcNameString,
ip: externalIP,
port: svcInfo.port,
protocol: protocol,
}
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
if err != nil {
msg := fmt.Sprintf("can't open %s, skipping this externalIP: %v", lp.String(), err)
proxier.recorder.Eventf(
&clientv1.ObjectReference{
Kind: "Node",
Name: proxier.hostname,
UID: types.UID(proxier.hostname),
Namespace: "",
}, api.EventTypeWarning, err.Error(), msg)
glog.Error(msg)
continue
}
replacementPortsMap[lp] = socket
}
} // We're holding the port, so it's OK to install iptables rules.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", externalIP),
"--dport", strconv.Itoa(svcInfo.port),
)
// We have to SNAT packets to external IPs.
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
// Allow traffic for external IPs that does not come from a bridge (i.e. not from a container)
// nor from a local process to be forwarded to the service.
// This rule roughly translates to "all traffic from off-machine".
// This is imperfect in the face of network plugins that might not use a bridge, but we can revisit that later.
externalTrafficOnlyArgs := append(args,
"-m", "physdev", "!", "--physdev-is-in",
"-m", "addrtype", "!", "--src-type", "LOCAL")
writeLine(proxier.natRules, append(externalTrafficOnlyArgs, "-j", string(svcChain))...)
dstLocalOnlyArgs := append(args, "-m", "addrtype", "--dst-type", "LOCAL")
// Allow traffic bound for external IPs that happen to be recognized as local IPs to stay local.
// This covers cases like GCE load-balancers which get added to the local routing table.
writeLine(proxier.natRules, append(dstLocalOnlyArgs, "-j", string(svcChain))...)
// If the service has no endpoints then reject packets coming via externalIP
// Install ICMP Reject rule in filter table for destination=externalIP and dport=svcport
if len(proxier.endpointsMap[svcName]) == 0 {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", externalIP),
"--dport", strconv.Itoa(svcInfo.port),
"-j", "REJECT",
)
}
}
// Capture load-balancer ingress.
fwChain := svcInfo.serviceFirewallChainName
for _, ingress := range svcInfo.loadBalancerStatus.Ingress {
if ingress.IP != "" {
// create service firewall chain
if chain, ok := existingNATChains[fwChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(fwChain))
}
activeNATChains[fwChain] = true
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
// This currently works for loadbalancers that preserves source ips.
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", ingress.IP),
"--dport", strconv.Itoa(svcInfo.port),
)
// jump to service firewall chain
writeLine(proxier.natRules, append(args, "-j", string(fwChain))...)
args = append(args[:0],
"-A", string(fwChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
)
// Each source match rule in the FW chain may jump to either the SVC or the XLB chain
chosenChain := svcXlbChain
// If we are proxying globally, we need to masquerade in case we cross nodes.
// If we are proxying only locally, we can retain the source IP.
if !svcInfo.onlyNodeLocalEndpoints {
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
chosenChain = svcChain
}
if len(svcInfo.loadBalancerSourceRanges) == 0 {
// allow all sources, so jump directly to the KUBE-SVC or KUBE-XLB chain
writeLine(proxier.natRules, append(args, "-j", string(chosenChain))...)
} else {
// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.loadBalancerSourceRanges {
writeLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...)
// ignore error because it has been validated
_, cidr, _ := net.ParseCIDR(src)
if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// generally, ip route rule was added to intercept request to loadbalancer vip from the
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
// Need to add the following rule to allow request on host.
if allowFromNode {
writeLine(proxier.natRules, append(args, "-s", fmt.Sprintf("%s/32", ingress.IP), "-j", string(chosenChain))...)
}
}
// If the packet was able to reach the end of firewall chain, then it did not get DNATed.
// It means the packet cannot go thru the firewall, then mark it for DROP
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkDropChain))...)
}
}
// Capture nodeports. If we had more than 2 rules it might be
// worthwhile to make a new per-service chain for nodeport rules, but
// with just 2 rules it ends up being a waste and a cognitive burden.
if svcInfo.nodePort != 0 {
// Hold the local port open so no other process can open it
// (because the socket might open but it would never work).
lp := localPort{
desc: "nodePort for " + svcNameString,
ip: "",
port: svcInfo.nodePort,
protocol: protocol,
}
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
if err != nil {
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
continue
}
if lp.protocol == "udp" {
proxier.clearUDPConntrackForPort(lp.port)
}
replacementPortsMap[lp] = socket
} // We're holding the port, so it's OK to install iptables rules.
args = append(args[:0],
"-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", svcNameString,
"-m", protocol, "-p", protocol,
"--dport", strconv.Itoa(svcInfo.nodePort),
)
if !svcInfo.onlyNodeLocalEndpoints {
// Nodeports need SNAT, unless they're local.
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
// Jump to the service chain.
writeLine(proxier.natRules, append(args, "-j", string(svcChain))...)
} else {
// TODO: Make all nodePorts jump to the firewall chain.
// Currently we only create it for loadbalancers (#33586).
writeLine(proxier.natRules, append(args, "-j", string(svcXlbChain))...)
}
// If the service has no endpoints then reject packets. The filter
// table doesn't currently have the same per-service structure that
// the nat table does, so we just stick this into the kube-services
// chain.
if len(proxier.endpointsMap[svcName]) == 0 {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", "addrtype", "--dst-type", "LOCAL",
"-m", protocol, "-p", protocol,
"--dport", strconv.Itoa(svcInfo.nodePort),
"-j", "REJECT",
)
}
}
// If the service has no endpoints then reject packets.
if len(proxier.endpointsMap[svcName]) == 0 {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()),
"--dport", strconv.Itoa(svcInfo.port),
"-j", "REJECT",
)
continue
}
// From here on, we assume there are active endpoints.
// Generate the per-endpoint chains. We do this in multiple passes so we
// can group rules together.
// These two slices parallel each other - keep in sync
endpoints = endpoints[:0]
endpointChains = endpointChains[:0]
var endpointChain utiliptables.Chain
for _, ep := range proxier.endpointsMap[svcName] {
endpoints = append(endpoints, ep)
endpointChain = ep.endpointChain(svcNameString, protocol)
endpointChains = append(endpointChains, endpointChain)
// Create the endpoint chain, retaining counters if possible.
if chain, ok := existingNATChains[utiliptables.Chain(endpointChain)]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(endpointChain))
}
activeNATChains[endpointChain] = true
}
// First write session affinity rules, if applicable.
if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP {
for _, endpointChain := range endpointChains {
writeLine(proxier.natRules,
"-A", string(svcChain),
"-m", "comment", "--comment", svcNameString,
"-m", "recent", "--name", string(endpointChain),
"--rcheck", "--seconds", strconv.Itoa(svcInfo.stickyMaxAgeMinutes*60), "--reap",
"-j", string(endpointChain))
}
}
// Now write loadbalancing & DNAT rules.
n := len(endpointChains)
for i, endpointChain := range endpointChains {
// Balancing rules in the per-service chain.
args = append(args[:0], []string{
"-A", string(svcChain),
"-m", "comment", "--comment", svcNameString,
}...)
if i < (n - 1) {
// Each rule is a probabilistic match.
args = append(args,
"-m", "statistic",
"--mode", "random",
"--probability", proxier.probability(n-i))
}
// The final (or only if n == 1) rule is a guaranteed match.
args = append(args, "-j", string(endpointChain))
writeLine(proxier.natRules, args...)
// Rules in the per-endpoint chain.
args = append(args[:0],
"-A", string(endpointChain),
"-m", "comment", "--comment", svcNameString,
)
// Handle traffic that loops back to the originator with SNAT.
writeLine(proxier.natRules, append(args,
"-s", fmt.Sprintf("%s/32", endpoints[i].IPPart()),
"-j", string(KubeMarkMasqChain))...)
// Update client-affinity lists.
if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP {
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
}
// DNAT to final destination.
args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", endpoints[i].endpoint)
writeLine(proxier.natRules, args...)
}
// The logic below this applies only if this service is marked as OnlyLocal
if !svcInfo.onlyNodeLocalEndpoints {
continue
}
// Now write ingress loadbalancing & DNAT rules only for services that request OnlyLocal traffic.
// TODO - This logic may be combinable with the block above that creates the svc balancer chain
localEndpoints := make([]*endpointsInfo, 0)
localEndpointChains := make([]utiliptables.Chain, 0)
for i := range endpointChains {
if endpoints[i].isLocal {
// These slices parallel each other; must be kept in sync
localEndpoints = append(localEndpoints, endpoints[i])
localEndpointChains = append(localEndpointChains, endpointChains[i])
}
}
// First rule in the chain redirects all pod -> external VIP traffic to the
// Service's ClusterIP instead. This happens whether or not we have local
// endpoints; only if clusterCIDR is specified
if len(proxier.clusterCIDR) > 0 {
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
`"Redirect pods trying to reach external loadbalancer VIP to clusterIP"`,
"-s", proxier.clusterCIDR,
"-j", string(svcChain),
)
writeLine(proxier.natRules, args...)
}
numLocalEndpoints := len(localEndpointChains)
if numLocalEndpoints == 0 {
// Blackhole all traffic since there are no local endpoints
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
fmt.Sprintf(`"%s has no local endpoints"`, svcNameString),
"-j",
string(KubeMarkDropChain),
)
writeLine(proxier.natRules, args...)
} else {
// Setup probability filter rules only over local endpoints
for i, endpointChain := range localEndpointChains {
// Balancing rules in the per-service chain.
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
fmt.Sprintf(`"Balancing rule %d for %s"`, i, svcNameString),
)
if i < (numLocalEndpoints - 1) {
// Each rule is a probabilistic match.
args = append(args,
"-m", "statistic",
"--mode", "random",
"--probability", proxier.probability(numLocalEndpoints-i))
}
// The final (or only if n == 1) rule is a guaranteed match.
args = append(args, "-j", string(endpointChain))
writeLine(proxier.natRules, args...)
}
}
}
// Delete chains no longer in use.
for chain := range existingNATChains {
if !activeNATChains[chain] {
chainString := string(chain)
if !strings.HasPrefix(chainString, "KUBE-SVC-") && !strings.HasPrefix(chainString, "KUBE-SEP-") && !strings.HasPrefix(chainString, "KUBE-FW-") && !strings.HasPrefix(chainString, "KUBE-XLB-") {
// Ignore chains that aren't ours.
continue
}
// We must (as per iptables) write a chain-line for it, which has
// the nice effect of flushing the chain. Then we can remove the
// chain.
writeLine(proxier.natChains, existingNATChains[chain])
writeLine(proxier.natRules, "-X", chainString)
}
}
// Finally, tail-call to the nodeports chain. This needs to be after all
// other service portal rules.
writeLine(proxier.natRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`,
"-m", "addrtype", "--dst-type", "LOCAL",
"-j", string(kubeNodePortsChain))
// Write the end-of-table markers.
writeLine(proxier.filterRules, "COMMIT")
writeLine(proxier.natRules, "COMMIT")
// Sync rules.
// NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table
proxier.iptablesData.Reset()
proxier.iptablesData.Write(proxier.filterChains.Bytes())
proxier.iptablesData.Write(proxier.filterRules.Bytes())
proxier.iptablesData.Write(proxier.natChains.Bytes())
proxier.iptablesData.Write(proxier.natRules.Bytes())
glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes())
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
glog.Errorf("Failed to execute iptables-restore: %v", err)
glog.V(2).Infof("Rules:\n%s", proxier.iptablesData.Bytes())
// Revert new local ports.
revertPorts(replacementPortsMap, proxier.portsMap)
return
}
// Close old local ports and save new ones.
for k, v := range proxier.portsMap {
if replacementPortsMap[k] == nil {
v.Close()
}
}
proxier.portsMap = replacementPortsMap
// Update healthz timestamp.
if proxier.healthzServer != nil {
proxier.healthzServer.UpdateTimestamp()
}
// Update healthchecks. The endpoints list might include services that are
// not "OnlyLocal", but the services list will not, and the healthChecker
// will just drop those endpoints.
if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil {
glog.Errorf("Error syncing healtcheck services: %v", err)
}
if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil {
glog.Errorf("Error syncing healthcheck endoints: %v", err)
}
// Finish housekeeping.
// TODO: these and clearUDPConntrackForPort() could be made more consistent.
utilproxy.DeleteServiceConnections(proxier.exec, staleServices.List())
proxier.deleteEndpointConnections(endpointUpdateResult.staleEndpoints)
}
// Clear UDP conntrack for port or all conntrack entries when port equal zero.
// When a packet arrives, it will not go through NAT table again, because it is not "the first" packet.
// The solution is clearing the conntrack. Known issus:
// https://github.com/docker/docker/issues/8795
// https://github.com/kubernetes/kubernetes/issues/31983
func (proxier *Proxier) clearUDPConntrackForPort(port int) {
glog.V(2).Infof("Deleting conntrack entries for udp connections")
if port > 0 {
err := utilproxy.ExecConntrackTool(proxier.exec, "-D", "-p", "udp", "--dport", strconv.Itoa(port))
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
glog.Errorf("conntrack return with error: %v", err)
}
} else {
glog.Errorf("Wrong port number. The port number must be greater than zero")
}
}
// Join all words with spaces, terminate with newline and write to buf.
func writeLine(buf *bytes.Buffer, words ...string) {
// We avoid strings.Join for performance reasons.
for i := range words {
buf.WriteString(words[i])
if i < len(words)-1 {
buf.WriteByte(' ')
} else {
buf.WriteByte('\n')
}
}
}
func isLocalIP(ip string) (bool, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return false, err
}
for i := range addrs {
intf, _, err := net.ParseCIDR(addrs[i].String())
if err != nil {
return false, err
}
if net.ParseIP(ip).Equal(intf) {
return true, nil
}
}
return false, nil
}
func openLocalPort(lp *localPort) (closeable, error) {
// For ports on node IPs, open the actual port and hold it, even though we
// use iptables to redirect traffic.
// This ensures a) that it's safe to use that port and b) that (a) stays
// true. The risk is that some process on the node (e.g. sshd or kubelet)
// is using a port and we give that same port out to a Service. That would
// be bad because iptables would silently claim the traffic but the process
// would never know.
// NOTE: We should not need to have a real listen()ing socket - bind()
// should be enough, but I can't figure out a way to e2e test without
// it. Tools like 'ss' and 'netstat' do not show sockets that are
// bind()ed but not listen()ed, and at least the default debian netcat
// has no way to avoid about 10 seconds of retries.
var socket closeable
switch lp.protocol {
case "tcp":
listener, err := net.Listen("tcp", net.JoinHostPort(lp.ip, strconv.Itoa(lp.port)))
if err != nil {
return nil, err
}
socket = listener
case "udp":
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(lp.ip, strconv.Itoa(lp.port)))
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
socket = conn
default:
return nil, fmt.Errorf("unknown protocol %q", lp.protocol)
}
glog.V(2).Infof("Opened local port %s", lp.String())
return socket, nil
}
// revertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
// closes the ports opened in this sync.
func revertPorts(replacementPortsMap, originalPortsMap map[localPort]closeable) {
for k, v := range replacementPortsMap {
// Only close newly opened local ports - leave ones that were open before this update
if originalPortsMap[k] == nil {
glog.V(2).Infof("Closing local port %s after iptables-restore failure", k.String())
v.Close()
}
}
}
|
package realtime
import (
"sync"
"testing"
"time"
"github.com/go-redis/redis"
"github.com/stretchr/testify/assert"
)
type testDoc struct {
id string
doctype string
}
func (t *testDoc) ID() string { return t.id }
func (t *testDoc) DocType() string { return t.doctype }
func (t *testDoc) MarshalJSON() ([]byte, error) {
j := `{"_id":"` + t.id + `", "_type":"` + t.doctype + `"}`
return []byte(j), nil
}
func TestMemRealtime(t *testing.T) {
h := newMemHub()
c1 := h.Subscriber("testing")
c2 := h.Subscriber("testing")
c3 := h.SubscribeLocalAll()
wg := sync.WaitGroup{}
err := c1.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
err = c2.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
wg.Add(1)
go func() {
for e := range c1.Channel {
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
wg.Add(1)
go func() {
for e := range c2.Channel {
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
wg.Add(1)
go func() {
for e := range c3.Channel {
assert.Equal(t, "testing", e.Domain)
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
time.AfterFunc(1*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "foo",
},
})
})
wg.Wait()
err = c1.Close()
assert.NoError(t, err)
err = c2.Close()
assert.NoError(t, err)
err = c3.Close()
assert.NoError(t, err)
err = c1.Close()
assert.Error(t, err)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "nobodywillseeme",
},
})
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "meneither",
},
})
time.Sleep(1 * time.Millisecond)
c4 := h.Subscriber("testing")
err = c4.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
err = c4.Subscribe("io.cozy.testobject2")
assert.NoError(t, err)
wg.Add(2)
go func() {
expected := "bar"
for e := range c4.Channel {
assert.Equal(t, expected, e.Doc.ID())
wg.Done()
if expected == "baz" {
break
}
expected = "baz"
}
}()
time.AfterFunc(1*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "bar",
},
})
})
time.AfterFunc(2*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject2",
id: "baz",
},
})
})
wg.Wait()
}
func TestRedisRealtime(t *testing.T) {
opt, err := redis.ParseURL("redis://localhost:6379/6")
assert.NoError(t, err)
client := redis.NewClient(opt)
h := newRedisHub(client)
c1 := h.Subscriber("testing")
c2 := h.Subscriber("testing")
c3 := h.SubscribeLocalAll()
wg := sync.WaitGroup{}
err = c1.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
err = c2.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
wg.Add(1)
go func() {
for e := range c1.Channel {
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
wg.Add(1)
go func() {
for e := range c2.Channel {
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
wg.Add(1)
go func() {
for e := range c3.Channel {
assert.Equal(t, "testing", e.Domain)
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
time.AfterFunc(1*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "foo",
},
})
})
wg.Wait()
err = c1.Close()
assert.NoError(t, err)
err = c2.Close()
assert.NoError(t, err)
err = c3.Close()
assert.NoError(t, err)
err = c1.Close()
assert.Error(t, err)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "nobodywillseeme",
},
})
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "meneither",
},
})
time.Sleep(100 * time.Millisecond)
c4 := h.Subscriber("testing")
err = c4.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
err = c4.Subscribe("io.cozy.testobject2")
assert.NoError(t, err)
wg.Add(2)
go func() {
expected := "bar"
for e := range c4.Channel {
assert.Equal(t, expected, e.Doc.ID())
wg.Done()
if expected == "baz" {
break
}
expected = "baz"
}
}()
time.AfterFunc(1*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "bar",
},
})
})
time.AfterFunc(2*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject2",
id: "baz",
},
})
})
wg.Wait()
}
Add some tests for realtime.Watch
package realtime
import (
"sync"
"testing"
"time"
"github.com/go-redis/redis"
"github.com/stretchr/testify/assert"
)
type testDoc struct {
id string
doctype string
}
func (t *testDoc) ID() string { return t.id }
func (t *testDoc) DocType() string { return t.doctype }
func (t *testDoc) MarshalJSON() ([]byte, error) {
j := `{"_id":"` + t.id + `", "_type":"` + t.doctype + `"}`
return []byte(j), nil
}
func TestMemRealtime(t *testing.T) {
h := newMemHub()
c1 := h.Subscriber("testing")
c2 := h.Subscriber("testing")
c3 := h.SubscribeLocalAll()
wg := sync.WaitGroup{}
err := c1.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
err = c2.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
wg.Add(1)
go func() {
for e := range c1.Channel {
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
wg.Add(1)
go func() {
for e := range c2.Channel {
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
wg.Add(1)
go func() {
for e := range c3.Channel {
assert.Equal(t, "testing", e.Domain)
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
time.AfterFunc(1*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "foo",
},
})
})
wg.Wait()
err = c1.Close()
assert.NoError(t, err)
err = c2.Close()
assert.NoError(t, err)
err = c3.Close()
assert.NoError(t, err)
err = c1.Close()
assert.Error(t, err)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "nobodywillseeme",
},
})
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "meneither",
},
})
time.Sleep(1 * time.Millisecond)
c4 := h.Subscriber("testing")
err = c4.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
err = c4.Subscribe("io.cozy.testobject2")
assert.NoError(t, err)
wg.Add(2)
go func() {
expected := "bar"
for e := range c4.Channel {
assert.Equal(t, expected, e.Doc.ID())
wg.Done()
if expected == "baz" {
break
}
expected = "baz"
}
}()
time.AfterFunc(1*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "bar",
},
})
})
time.AfterFunc(2*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject2",
id: "baz",
},
})
})
wg.Wait()
}
func TestWatch(t *testing.T) {
h := newMemHub()
c1 := h.Subscriber("testing")
wg := sync.WaitGroup{}
err := c1.Watch("io.cozy.testobject", "id1")
assert.NoError(t, err)
err = c1.Watch("io.cozy.testobject", "id2")
assert.NoError(t, err)
wg.Add(1)
go func() {
for e := range c1.Channel {
assert.Equal(t, "id1", e.Doc.ID())
break
}
for e := range c1.Channel {
assert.Equal(t, "id2", e.Doc.ID())
break
}
wg.Done()
}()
time.Sleep(1 * time.Millisecond)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "not-id1-and-not-id2",
},
})
time.Sleep(1 * time.Millisecond)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "id1",
},
})
time.Sleep(1 * time.Millisecond)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "id2",
},
})
wg.Wait()
err = c1.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
wg.Add(1)
go func() {
for e := range c1.Channel {
assert.Equal(t, "id1", e.Doc.ID())
break
}
for e := range c1.Channel {
assert.Equal(t, "id2", e.Doc.ID())
break
}
for e := range c1.Channel {
assert.Equal(t, "id3", e.Doc.ID())
break
}
wg.Done()
}()
time.Sleep(1 * time.Millisecond)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "id1",
},
})
time.Sleep(1 * time.Millisecond)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "id2",
},
})
time.Sleep(1 * time.Millisecond)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "id3",
},
})
wg.Wait()
err = c1.Close()
assert.NoError(t, err)
}
func TestRedisRealtime(t *testing.T) {
opt, err := redis.ParseURL("redis://localhost:6379/6")
assert.NoError(t, err)
client := redis.NewClient(opt)
h := newRedisHub(client)
c1 := h.Subscriber("testing")
c2 := h.Subscriber("testing")
c3 := h.SubscribeLocalAll()
wg := sync.WaitGroup{}
err = c1.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
err = c2.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
wg.Add(1)
go func() {
for e := range c1.Channel {
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
wg.Add(1)
go func() {
for e := range c2.Channel {
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
wg.Add(1)
go func() {
for e := range c3.Channel {
assert.Equal(t, "testing", e.Domain)
assert.Equal(t, "foo", e.Doc.ID())
break
}
wg.Done()
}()
time.AfterFunc(1*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "foo",
},
})
})
wg.Wait()
err = c1.Close()
assert.NoError(t, err)
err = c2.Close()
assert.NoError(t, err)
err = c3.Close()
assert.NoError(t, err)
err = c1.Close()
assert.Error(t, err)
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "nobodywillseeme",
},
})
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "meneither",
},
})
time.Sleep(100 * time.Millisecond)
c4 := h.Subscriber("testing")
err = c4.Subscribe("io.cozy.testobject")
assert.NoError(t, err)
err = c4.Subscribe("io.cozy.testobject2")
assert.NoError(t, err)
wg.Add(2)
go func() {
expected := "bar"
for e := range c4.Channel {
assert.Equal(t, expected, e.Doc.ID())
wg.Done()
if expected == "baz" {
break
}
expected = "baz"
}
}()
time.AfterFunc(1*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject",
id: "bar",
},
})
})
time.AfterFunc(2*time.Millisecond, func() {
h.Publish(&Event{
Domain: "testing",
Doc: &testDoc{
doctype: "io.cozy.testobject2",
id: "baz",
},
})
})
wg.Wait()
}
|
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package serverconfig
import (
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"camlistore.org/pkg/blob"
"camlistore.org/pkg/jsonconfig"
"camlistore.org/pkg/jsonsign"
"camlistore.org/pkg/osutil"
)
const (
DefaultTLSCert = "config/selfgen_pem.crt"
DefaultTLSKey = "config/selfgen_pem.key"
)
// various parameters derived from the high-level user config
// and needed to set up the low-level config.
type configPrefixesParams struct {
secretRing string
keyId string
indexerPath string
blobPath string
packBlobs bool
searchOwner blob.Ref
shareHandlerPath string
flickr string
}
var (
tempDir = os.TempDir
noMkdir bool // for tests to not call os.Mkdir
)
func addPublishedConfig(prefixes jsonconfig.Obj,
published jsonconfig.Obj,
sourceRoot string) ([]interface{}, error) {
pubPrefixes := []interface{}{}
for k, v := range published {
p, ok := v.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Wrong type for %s; was expecting map[string]interface{}, got %T", k, v)
}
rootName := strings.Replace(k, "/", "", -1) + "Root"
rootPermanode, goTemplate, style, js := "", "", "", ""
for pk, pv := range p {
val, ok := pv.(string)
if !ok {
return nil, fmt.Errorf("Was expecting type string for %s, got %T", pk, pv)
}
switch pk {
case "rootPermanode":
rootPermanode = val
case "goTemplate":
goTemplate = val
case "style":
style = val
case "js":
js = val
default:
return nil, fmt.Errorf("Unexpected key %q in config for %s", pk, k)
}
}
if rootPermanode == "" || goTemplate == "" {
return nil, fmt.Errorf("Missing key in configuration for %s, need \"rootPermanode\" and \"goTemplate\"", k)
}
ob := map[string]interface{}{}
ob["handler"] = "publish"
handlerArgs := map[string]interface{}{
"rootName": rootName,
"blobRoot": "/bs-and-maybe-also-index/",
"searchRoot": "/my-search/",
"cache": "/cache/",
"rootPermanode": []interface{}{"/sighelper/", rootPermanode},
}
if sourceRoot != "" {
handlerArgs["sourceRoot"] = sourceRoot
}
handlerArgs["goTemplate"] = goTemplate
if style != "" {
handlerArgs["css"] = []interface{}{style}
}
if js != "" {
handlerArgs["js"] = []interface{}{js}
}
handlerArgs["scaledImage"] = "lrucache"
ob["handlerArgs"] = handlerArgs
prefixes[k] = ob
pubPrefixes = append(pubPrefixes, k)
}
return pubPrefixes, nil
}
func addUIConfig(prefixes jsonconfig.Obj,
uiPrefix string,
published []interface{},
sourceRoot string) {
ob := map[string]interface{}{}
ob["handler"] = "ui"
handlerArgs := map[string]interface{}{
"jsonSignRoot": "/sighelper/",
"cache": "/cache/",
"scaledImage": "lrucache",
}
if len(published) > 0 {
handlerArgs["publishRoots"] = published
}
if sourceRoot != "" {
handlerArgs["sourceRoot"] = sourceRoot
}
ob["handlerArgs"] = handlerArgs
prefixes[uiPrefix] = ob
}
func addMongoConfig(prefixes jsonconfig.Obj, dbname string, dbinfo string) {
fields := strings.Split(dbinfo, "@")
if len(fields) != 2 {
exitFailure("Malformed mongo config string. Got \"%v\", want: \"user:password@host\"", dbinfo)
}
host := fields[1]
fields = strings.Split(fields[0], ":")
if len(fields) != 2 {
exitFailure("Malformed mongo config string. Got \"%v\", want: \"user:password\"", fields[0])
}
ob := map[string]interface{}{}
ob["enabled"] = true
ob["handler"] = "storage-mongodbindexer"
ob["handlerArgs"] = map[string]interface{}{
"host": host,
"user": fields[0],
"password": fields[1],
"database": dbname,
"blobSource": "/bs/",
}
prefixes["/index-mongo/"] = ob
}
func addSQLConfig(rdbms string, prefixes jsonconfig.Obj, dbname string, dbinfo string) {
fields := strings.Split(dbinfo, "@")
if len(fields) != 2 {
exitFailure("Malformed " + rdbms + " config string. Want: \"user@host:password\"")
}
user := fields[0]
fields = strings.Split(fields[1], ":")
if len(fields) != 2 {
exitFailure("Malformed " + rdbms + " config string. Want: \"user@host:password\"")
}
ob := map[string]interface{}{}
ob["enabled"] = true
ob["handler"] = "storage-" + rdbms + "indexer"
ob["handlerArgs"] = map[string]interface{}{
"host": fields[0],
"user": user,
"password": fields[1],
"database": dbname,
"blobSource": "/bs/",
}
prefixes["/index-"+rdbms+"/"] = ob
}
func addPostgresConfig(prefixes jsonconfig.Obj, dbname string, dbinfo string) {
addSQLConfig("postgres", prefixes, dbname, dbinfo)
}
func addMySQLConfig(prefixes jsonconfig.Obj, dbname string, dbinfo string) {
addSQLConfig("mysql", prefixes, dbname, dbinfo)
}
func addMemindexConfig(prefixes jsonconfig.Obj) {
ob := map[string]interface{}{}
ob["handler"] = "storage-memory-only-dev-indexer"
ob["handlerArgs"] = map[string]interface{}{
"blobSource": "/bs/",
}
prefixes["/index-mem/"] = ob
}
func addSQLiteConfig(prefixes jsonconfig.Obj, file string) {
ob := map[string]interface{}{}
ob["handler"] = "storage-sqliteindexer"
ob["handlerArgs"] = map[string]interface{}{
"blobSource": "/bs/",
"file": file,
}
prefixes["/index-sqlite/"] = ob
}
func addKVConfig(prefixes jsonconfig.Obj, file string) {
prefixes["/index-kv/"] = map[string]interface{}{
"handler": "storage-kvfileindexer",
"handlerArgs": map[string]interface{}{
"blobSource": "/bs/",
"file": file,
},
}
}
func addS3Config(params *configPrefixesParams, prefixes jsonconfig.Obj, s3 string) error {
f := strings.SplitN(s3, ":", 4)
if len(f) < 3 {
return errors.New(`genconfig: expected "s3" field to be of form "access_key_id:secret_access_key:bucket"`)
}
accessKey, secret, bucket := f[0], f[1], f[2]
var hostname string
if len(f) == 4 {
hostname = f[3]
}
isPrimary := false
if _, ok := prefixes["/bs/"]; !ok {
isPrimary = true
}
s3Prefix := ""
if isPrimary {
s3Prefix = "/bs/"
} else {
s3Prefix = "/sto-s3/"
}
args := map[string]interface{}{
"aws_access_key": accessKey,
"aws_secret_access_key": secret,
"bucket": bucket,
}
if hostname != "" {
args["hostname"] = hostname
}
prefixes[s3Prefix] = map[string]interface{}{
"handler": "storage-s3",
"handlerArgs": args,
}
if isPrimary {
// TODO(mpl): s3CacheBucket
// See http://code.google.com/p/camlistore/issues/detail?id=85
prefixes["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(tempDir(), "camli-cache"),
},
}
} else {
if params.blobPath == "" {
panic("unexpected empty blobpath with sync-to-s3")
}
prefixes["/sync-to-s3/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": map[string]interface{}{
"from": "/bs/",
"to": s3Prefix,
"queue": map[string]interface{}{
"type": "kv",
"file": filepath.Join(params.blobPath, "sync-to-s3-queue.kv"),
},
},
}
}
return nil
}
func addGoogleDriveConfig(prefixes jsonconfig.Obj, highCfg string) error {
f := strings.SplitN(highCfg, ":", 4)
if len(f) != 4 {
return errors.New(`genconfig: expected "googledrive" field to be of form "client_id:client_secret:refresh_token:parent_id"`)
}
clientId, secret, refreshToken, parentId := f[0], f[1], f[2], f[3]
isPrimary := false
if _, ok := prefixes["/bs/"]; !ok {
isPrimary = true
}
prefix := ""
if isPrimary {
prefix = "/bs/"
} else {
prefix = "/sto-googledrive/"
}
prefixes[prefix] = map[string]interface{}{
"handler": "storage-googledrive",
"handlerArgs": map[string]interface{}{
"parent_id": parentId,
"auth": map[string]interface{}{
"client_id": clientId,
"client_secret": secret,
"refresh_token": refreshToken,
},
},
}
if isPrimary {
prefixes["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(tempDir(), "camli-cache"),
},
}
} else {
prefixes["/sync-to-googledrive/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": map[string]interface{}{
"from": "/bs/",
"to": prefix,
},
}
}
return nil
}
func addGoogleCloudStorageConfig(prefixes jsonconfig.Obj, highCfg string) error {
f := strings.SplitN(highCfg, ":", 4)
if len(f) != 4 {
return errors.New(`genconfig: expected "googlecloudstorage" field to be of form "client_id:client_secret:refresh_token:bucket"`)
}
clientId, secret, refreshToken, bucket := f[0], f[1], f[2], f[3]
isPrimary := false
if _, ok := prefixes["/bs/"]; !ok {
isPrimary = true
}
gsPrefix := ""
if isPrimary {
gsPrefix = "/bs/"
} else {
gsPrefix = "/sto-googlecloudstorage/"
}
prefixes[gsPrefix] = map[string]interface{}{
"handler": "storage-googlecloudstorage",
"handlerArgs": map[string]interface{}{
"bucket": bucket,
"auth": map[string]interface{}{
"client_id": clientId,
"client_secret": secret,
"refresh_token": refreshToken,
// If high-level config is for the common user then fullSyncOnStart = true
// Then the default just works.
//"fullSyncOnStart": true,
//"blockingFullSyncOnStart": false
},
},
}
if isPrimary {
// TODO: cacheBucket like s3CacheBucket?
prefixes["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(tempDir(), "camli-cache"),
},
}
} else {
prefixes["/sync-to-googlecloudstorage/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": map[string]interface{}{
"from": "/bs/",
"to": gsPrefix,
},
}
}
return nil
}
func genLowLevelPrefixes(params *configPrefixesParams, ownerName string) (m jsonconfig.Obj) {
m = make(jsonconfig.Obj)
haveIndex := params.indexerPath != ""
root := "/bs/"
pubKeyDest := root
if haveIndex {
root = "/bs-and-maybe-also-index/"
pubKeyDest = "/bs-and-index/"
}
rootArgs := map[string]interface{}{
"stealth": false,
"blobRoot": root,
"statusRoot": "/status/",
}
if ownerName != "" {
rootArgs["ownerName"] = ownerName
}
m["/"] = map[string]interface{}{
"handler": "root",
"handlerArgs": rootArgs,
}
if haveIndex {
setMap(m, "/", "handlerArgs", "searchRoot", "/my-search/")
}
m["/setup/"] = map[string]interface{}{
"handler": "setup",
}
m["/status/"] = map[string]interface{}{
"handler": "status",
}
if params.shareHandlerPath != "" {
m[params.shareHandlerPath] = map[string]interface{}{
"handler": "share",
"handlerArgs": map[string]interface{}{
"blobRoot": "/bs/",
},
}
}
m["/sighelper/"] = map[string]interface{}{
"handler": "jsonsign",
"handlerArgs": map[string]interface{}{
"secretRing": params.secretRing,
"keyId": params.keyId,
"publicKeyDest": pubKeyDest,
},
}
storageType := "filesystem"
if params.packBlobs {
storageType = "diskpacked"
}
if params.blobPath != "" {
m["/bs/"] = map[string]interface{}{
"handler": "storage-" + storageType,
"handlerArgs": map[string]interface{}{
"path": params.blobPath,
},
}
m["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(params.blobPath, "/cache"),
},
}
}
if params.flickr != "" {
m["/importer-flickr/"] = map[string]interface{}{
"apiKey": params.flickr,
}
}
if haveIndex {
syncArgs := map[string]interface{}{
"from": "/bs/",
"to": params.indexerPath,
}
// TODO(mpl): Brad says the cond should be dest == /index-*.
// But what about when dest is index-mem and we have a local disk;
// don't we want to have an active synchandler to do the fullSyncOnStart?
// Anyway, that condition works for now.
if params.blobPath == "" {
// When our primary blob store is remote (s3 or google cloud),
// i.e not an efficient replication source, we do not want the
// synchandler to mirror to the indexer. But we still want a
// synchandler to provide the discovery for e.g tools like
// camtool sync. See http://camlistore.org/issue/201
syncArgs["idle"] = true
} else {
syncArgs["queue"] = map[string]interface{}{
"type": "kv",
"file": filepath.Join(params.blobPath, "sync-to-index-queue.kv"),
}
}
m["/sync/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": syncArgs,
}
m["/bs-and-index/"] = map[string]interface{}{
"handler": "storage-replica",
"handlerArgs": map[string]interface{}{
"backends": []interface{}{"/bs/", params.indexerPath},
},
}
m["/bs-and-maybe-also-index/"] = map[string]interface{}{
"handler": "storage-cond",
"handlerArgs": map[string]interface{}{
"write": map[string]interface{}{
"if": "isSchema",
"then": "/bs-and-index/",
"else": "/bs/",
},
"read": "/bs/",
},
}
m["/my-search/"] = map[string]interface{}{
"handler": "search",
"handlerArgs": map[string]interface{}{
"index": params.indexerPath,
"owner": params.searchOwner.String(),
},
}
}
return
}
// genLowLevelConfig returns a low-level config from a high-level config.
func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
var (
baseURL = conf.OptionalString("baseURL", "")
listen = conf.OptionalString("listen", "")
auth = conf.RequiredString("auth")
keyId = conf.RequiredString("identity")
secretRing = conf.RequiredString("identitySecretRing")
tlsOn = conf.OptionalBool("https", false)
tlsCert = conf.OptionalString("HTTPSCertFile", "")
tlsKey = conf.OptionalString("HTTPSKeyFile", "")
// Blob storage options
blobPath = conf.OptionalString("blobPath", "")
packBlobs = conf.OptionalBool("packBlobs", false) // use diskpacked instead of the default filestorage
s3 = conf.OptionalString("s3", "") // "access_key_id:secret_access_key:bucket[:hostname]"
googlecloudstorage = conf.OptionalString("googlecloudstorage", "") // "clientId:clientSecret:refreshToken:bucket"
googledrive = conf.OptionalString("googledrive", "") // "clientId:clientSecret:refreshToken:parentId"
// Enable the share handler. If true, and shareHandlerPath is empty,
// then shareHandlerPath defaults to "/share/".
shareHandler = conf.OptionalBool("shareHandler", false)
// URL prefix for the share handler. If set, overrides shareHandler.
shareHandlerPath = conf.OptionalString("shareHandlerPath", "")
// Index options
runIndex = conf.OptionalBool("runIndex", true) // if false: no search, no UI, etc.
dbname = conf.OptionalString("dbname", "") // for mysql, postgres, mongo
mysql = conf.OptionalString("mysql", "")
postgres = conf.OptionalString("postgres", "")
memIndex = conf.OptionalBool("memIndex", false)
mongo = conf.OptionalString("mongo", "")
sqliteFile = conf.OptionalString("sqlite", "")
kvFile = conf.OptionalString("kvIndexFile", "")
// Importer options
flickr = conf.OptionalString("flickr", "")
_ = conf.OptionalList("replicateTo")
publish = conf.OptionalObject("publish")
// alternative source tree, to override the embedded ui and/or closure resources.
// If non empty, the ui files will be expected at
// sourceRoot + "/server/camlistored/ui" and the closure library at
// sourceRoot + "/third_party/closure/lib"
// Also used by the publish handler.
sourceRoot = conf.OptionalString("sourceRoot", "")
ownerName = conf.OptionalString("ownerName", "")
)
if err := conf.Validate(); err != nil {
return nil, err
}
obj := jsonconfig.Obj{}
if tlsOn {
if (tlsCert != "") != (tlsKey != "") {
return nil, errors.New("Must set both TLSCertFile and TLSKeyFile (or neither to generate a self-signed cert)")
}
if tlsCert != "" {
obj["TLSCertFile"] = tlsCert
obj["TLSKeyFile"] = tlsKey
} else {
obj["TLSCertFile"] = DefaultTLSCert
obj["TLSKeyFile"] = DefaultTLSKey
}
}
if baseURL != "" {
u, err := url.Parse(baseURL)
if err != nil {
return nil, fmt.Errorf("Error parsing baseURL %q as a URL: %v", baseURL, err)
}
if u.Path != "" && u.Path != "/" {
return nil, fmt.Errorf("baseURL can't have a path, only a scheme, host, and optional port.")
}
u.Path = ""
obj["baseURL"] = u.String()
}
if listen != "" {
obj["listen"] = listen
}
obj["https"] = tlsOn
obj["auth"] = auth
username := ""
if dbname == "" {
username = osutil.Username()
if username == "" {
return nil, fmt.Errorf("USER (USERNAME on windows) env var not set; needed to define dbname")
}
dbname = "camli" + username
}
var indexerPath string
numIndexers := numSet(mongo, mysql, postgres, sqliteFile, memIndex, kvFile)
switch {
case runIndex && numIndexers == 0:
return nil, fmt.Errorf("Unless runIndex is set to false, you must specify an index option (kvIndexFile, mongo, mysql, postgres, sqlite, memIndex).")
case runIndex && numIndexers != 1:
return nil, fmt.Errorf("With runIndex set true, you can only pick exactly one indexer (mongo, mysql, postgres, sqlite, memIndex).")
case !runIndex && numIndexers != 0:
return nil, fmt.Errorf("With runIndex disabled, you can't specify any of mongo, mysql, postgres, sqlite, memIndex.")
case mysql != "":
indexerPath = "/index-mysql/"
case postgres != "":
indexerPath = "/index-postgres/"
case mongo != "":
indexerPath = "/index-mongo/"
case sqliteFile != "":
indexerPath = "/index-sqlite/"
case kvFile != "":
indexerPath = "/index-kv/"
case memIndex:
indexerPath = "/index-mem/"
}
entity, err := jsonsign.EntityFromSecring(keyId, secretRing)
if err != nil {
return nil, err
}
armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
if err != nil {
return nil, err
}
nolocaldisk := blobPath == ""
if nolocaldisk {
if s3 == "" && googlecloudstorage == "" {
return nil, errors.New("You need at least one of blobPath (for localdisk) or s3 or googlecloudstorage configured for a blobserver.")
}
if s3 != "" && googlecloudstorage != "" {
return nil, errors.New("Using S3 as a primary storage and Google Cloud Storage as a mirror is not supported for now.")
}
}
if shareHandler && shareHandlerPath == "" {
shareHandlerPath = "/share/"
}
prefixesParams := &configPrefixesParams{
secretRing: secretRing,
keyId: keyId,
indexerPath: indexerPath,
blobPath: blobPath,
packBlobs: packBlobs,
searchOwner: blob.SHA1FromString(armoredPublicKey),
shareHandlerPath: shareHandlerPath,
flickr: flickr,
}
prefixes := genLowLevelPrefixes(prefixesParams, ownerName)
var cacheDir string
if nolocaldisk {
// Whether camlistored is run from EC2 or not, we use
// a temp dir as the cache when primary storage is S3.
// TODO(mpl): s3CacheBucket
// See http://code.google.com/p/camlistore/issues/detail?id=85
cacheDir = filepath.Join(tempDir(), "camli-cache")
} else {
cacheDir = filepath.Join(blobPath, "cache")
}
if !noMkdir {
if err := os.MkdirAll(cacheDir, 0700); err != nil {
return nil, fmt.Errorf("Could not create blobs cache dir %s: %v", cacheDir, err)
}
}
published := []interface{}{}
if len(publish) > 0 {
if !runIndex {
return nil, fmt.Errorf("publishing requires an index")
}
published, err = addPublishedConfig(prefixes, publish, sourceRoot)
if err != nil {
return nil, fmt.Errorf("Could not generate config for published: %v", err)
}
}
if runIndex {
addUIConfig(prefixes, "/ui/", published, sourceRoot)
}
if mysql != "" {
addMySQLConfig(prefixes, dbname, mysql)
}
if postgres != "" {
addPostgresConfig(prefixes, dbname, postgres)
}
if mongo != "" {
addMongoConfig(prefixes, dbname, mongo)
}
if sqliteFile != "" {
addSQLiteConfig(prefixes, sqliteFile)
}
if kvFile != "" {
addKVConfig(prefixes, kvFile)
}
if s3 != "" {
if err := addS3Config(prefixesParams, prefixes, s3); err != nil {
return nil, err
}
}
if googledrive != "" {
if err := addGoogleDriveConfig(prefixes, googledrive); err != nil {
return nil, err
}
}
if googlecloudstorage != "" {
if err := addGoogleCloudStorageConfig(prefixes, googlecloudstorage); err != nil {
return nil, err
}
}
if indexerPath == "/index-mem/" {
addMemindexConfig(prefixes)
}
obj["prefixes"] = (map[string]interface{})(prefixes)
lowLevelConf = &Config{
Obj: obj,
configPath: conf.configPath,
}
return lowLevelConf, nil
}
func numSet(vv ...interface{}) (num int) {
for _, vi := range vv {
switch v := vi.(type) {
case string:
if v != "" {
num++
}
case bool:
if v {
num++
}
default:
panic("unknown type")
}
}
return
}
func setMap(m map[string]interface{}, v ...interface{}) {
if len(v) < 2 {
panic("too few args")
}
if len(v) == 2 {
m[v[0].(string)] = v[1]
return
}
setMap(m[v[0].(string)].(map[string]interface{}), v[1:]...)
}
gofmt
Change-Id: Ibec8c4b1a78bd93b78e26ff73de7a386699cc6c8
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package serverconfig
import (
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"camlistore.org/pkg/blob"
"camlistore.org/pkg/jsonconfig"
"camlistore.org/pkg/jsonsign"
"camlistore.org/pkg/osutil"
)
const (
DefaultTLSCert = "config/selfgen_pem.crt"
DefaultTLSKey = "config/selfgen_pem.key"
)
// various parameters derived from the high-level user config
// and needed to set up the low-level config.
type configPrefixesParams struct {
secretRing string
keyId string
indexerPath string
blobPath string
packBlobs bool
searchOwner blob.Ref
shareHandlerPath string
flickr string
}
var (
tempDir = os.TempDir
noMkdir bool // for tests to not call os.Mkdir
)
func addPublishedConfig(prefixes jsonconfig.Obj,
published jsonconfig.Obj,
sourceRoot string) ([]interface{}, error) {
pubPrefixes := []interface{}{}
for k, v := range published {
p, ok := v.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Wrong type for %s; was expecting map[string]interface{}, got %T", k, v)
}
rootName := strings.Replace(k, "/", "", -1) + "Root"
rootPermanode, goTemplate, style, js := "", "", "", ""
for pk, pv := range p {
val, ok := pv.(string)
if !ok {
return nil, fmt.Errorf("Was expecting type string for %s, got %T", pk, pv)
}
switch pk {
case "rootPermanode":
rootPermanode = val
case "goTemplate":
goTemplate = val
case "style":
style = val
case "js":
js = val
default:
return nil, fmt.Errorf("Unexpected key %q in config for %s", pk, k)
}
}
if rootPermanode == "" || goTemplate == "" {
return nil, fmt.Errorf("Missing key in configuration for %s, need \"rootPermanode\" and \"goTemplate\"", k)
}
ob := map[string]interface{}{}
ob["handler"] = "publish"
handlerArgs := map[string]interface{}{
"rootName": rootName,
"blobRoot": "/bs-and-maybe-also-index/",
"searchRoot": "/my-search/",
"cache": "/cache/",
"rootPermanode": []interface{}{"/sighelper/", rootPermanode},
}
if sourceRoot != "" {
handlerArgs["sourceRoot"] = sourceRoot
}
handlerArgs["goTemplate"] = goTemplate
if style != "" {
handlerArgs["css"] = []interface{}{style}
}
if js != "" {
handlerArgs["js"] = []interface{}{js}
}
handlerArgs["scaledImage"] = "lrucache"
ob["handlerArgs"] = handlerArgs
prefixes[k] = ob
pubPrefixes = append(pubPrefixes, k)
}
return pubPrefixes, nil
}
func addUIConfig(prefixes jsonconfig.Obj,
uiPrefix string,
published []interface{},
sourceRoot string) {
ob := map[string]interface{}{}
ob["handler"] = "ui"
handlerArgs := map[string]interface{}{
"jsonSignRoot": "/sighelper/",
"cache": "/cache/",
"scaledImage": "lrucache",
}
if len(published) > 0 {
handlerArgs["publishRoots"] = published
}
if sourceRoot != "" {
handlerArgs["sourceRoot"] = sourceRoot
}
ob["handlerArgs"] = handlerArgs
prefixes[uiPrefix] = ob
}
func addMongoConfig(prefixes jsonconfig.Obj, dbname string, dbinfo string) {
fields := strings.Split(dbinfo, "@")
if len(fields) != 2 {
exitFailure("Malformed mongo config string. Got \"%v\", want: \"user:password@host\"", dbinfo)
}
host := fields[1]
fields = strings.Split(fields[0], ":")
if len(fields) != 2 {
exitFailure("Malformed mongo config string. Got \"%v\", want: \"user:password\"", fields[0])
}
ob := map[string]interface{}{}
ob["enabled"] = true
ob["handler"] = "storage-mongodbindexer"
ob["handlerArgs"] = map[string]interface{}{
"host": host,
"user": fields[0],
"password": fields[1],
"database": dbname,
"blobSource": "/bs/",
}
prefixes["/index-mongo/"] = ob
}
func addSQLConfig(rdbms string, prefixes jsonconfig.Obj, dbname string, dbinfo string) {
fields := strings.Split(dbinfo, "@")
if len(fields) != 2 {
exitFailure("Malformed " + rdbms + " config string. Want: \"user@host:password\"")
}
user := fields[0]
fields = strings.Split(fields[1], ":")
if len(fields) != 2 {
exitFailure("Malformed " + rdbms + " config string. Want: \"user@host:password\"")
}
ob := map[string]interface{}{}
ob["enabled"] = true
ob["handler"] = "storage-" + rdbms + "indexer"
ob["handlerArgs"] = map[string]interface{}{
"host": fields[0],
"user": user,
"password": fields[1],
"database": dbname,
"blobSource": "/bs/",
}
prefixes["/index-"+rdbms+"/"] = ob
}
func addPostgresConfig(prefixes jsonconfig.Obj, dbname string, dbinfo string) {
addSQLConfig("postgres", prefixes, dbname, dbinfo)
}
func addMySQLConfig(prefixes jsonconfig.Obj, dbname string, dbinfo string) {
addSQLConfig("mysql", prefixes, dbname, dbinfo)
}
func addMemindexConfig(prefixes jsonconfig.Obj) {
ob := map[string]interface{}{}
ob["handler"] = "storage-memory-only-dev-indexer"
ob["handlerArgs"] = map[string]interface{}{
"blobSource": "/bs/",
}
prefixes["/index-mem/"] = ob
}
func addSQLiteConfig(prefixes jsonconfig.Obj, file string) {
ob := map[string]interface{}{}
ob["handler"] = "storage-sqliteindexer"
ob["handlerArgs"] = map[string]interface{}{
"blobSource": "/bs/",
"file": file,
}
prefixes["/index-sqlite/"] = ob
}
func addKVConfig(prefixes jsonconfig.Obj, file string) {
prefixes["/index-kv/"] = map[string]interface{}{
"handler": "storage-kvfileindexer",
"handlerArgs": map[string]interface{}{
"blobSource": "/bs/",
"file": file,
},
}
}
func addS3Config(params *configPrefixesParams, prefixes jsonconfig.Obj, s3 string) error {
f := strings.SplitN(s3, ":", 4)
if len(f) < 3 {
return errors.New(`genconfig: expected "s3" field to be of form "access_key_id:secret_access_key:bucket"`)
}
accessKey, secret, bucket := f[0], f[1], f[2]
var hostname string
if len(f) == 4 {
hostname = f[3]
}
isPrimary := false
if _, ok := prefixes["/bs/"]; !ok {
isPrimary = true
}
s3Prefix := ""
if isPrimary {
s3Prefix = "/bs/"
} else {
s3Prefix = "/sto-s3/"
}
args := map[string]interface{}{
"aws_access_key": accessKey,
"aws_secret_access_key": secret,
"bucket": bucket,
}
if hostname != "" {
args["hostname"] = hostname
}
prefixes[s3Prefix] = map[string]interface{}{
"handler": "storage-s3",
"handlerArgs": args,
}
if isPrimary {
// TODO(mpl): s3CacheBucket
// See http://code.google.com/p/camlistore/issues/detail?id=85
prefixes["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(tempDir(), "camli-cache"),
},
}
} else {
if params.blobPath == "" {
panic("unexpected empty blobpath with sync-to-s3")
}
prefixes["/sync-to-s3/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": map[string]interface{}{
"from": "/bs/",
"to": s3Prefix,
"queue": map[string]interface{}{
"type": "kv",
"file": filepath.Join(params.blobPath, "sync-to-s3-queue.kv"),
},
},
}
}
return nil
}
func addGoogleDriveConfig(prefixes jsonconfig.Obj, highCfg string) error {
f := strings.SplitN(highCfg, ":", 4)
if len(f) != 4 {
return errors.New(`genconfig: expected "googledrive" field to be of form "client_id:client_secret:refresh_token:parent_id"`)
}
clientId, secret, refreshToken, parentId := f[0], f[1], f[2], f[3]
isPrimary := false
if _, ok := prefixes["/bs/"]; !ok {
isPrimary = true
}
prefix := ""
if isPrimary {
prefix = "/bs/"
} else {
prefix = "/sto-googledrive/"
}
prefixes[prefix] = map[string]interface{}{
"handler": "storage-googledrive",
"handlerArgs": map[string]interface{}{
"parent_id": parentId,
"auth": map[string]interface{}{
"client_id": clientId,
"client_secret": secret,
"refresh_token": refreshToken,
},
},
}
if isPrimary {
prefixes["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(tempDir(), "camli-cache"),
},
}
} else {
prefixes["/sync-to-googledrive/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": map[string]interface{}{
"from": "/bs/",
"to": prefix,
},
}
}
return nil
}
func addGoogleCloudStorageConfig(prefixes jsonconfig.Obj, highCfg string) error {
f := strings.SplitN(highCfg, ":", 4)
if len(f) != 4 {
return errors.New(`genconfig: expected "googlecloudstorage" field to be of form "client_id:client_secret:refresh_token:bucket"`)
}
clientId, secret, refreshToken, bucket := f[0], f[1], f[2], f[3]
isPrimary := false
if _, ok := prefixes["/bs/"]; !ok {
isPrimary = true
}
gsPrefix := ""
if isPrimary {
gsPrefix = "/bs/"
} else {
gsPrefix = "/sto-googlecloudstorage/"
}
prefixes[gsPrefix] = map[string]interface{}{
"handler": "storage-googlecloudstorage",
"handlerArgs": map[string]interface{}{
"bucket": bucket,
"auth": map[string]interface{}{
"client_id": clientId,
"client_secret": secret,
"refresh_token": refreshToken,
// If high-level config is for the common user then fullSyncOnStart = true
// Then the default just works.
//"fullSyncOnStart": true,
//"blockingFullSyncOnStart": false
},
},
}
if isPrimary {
// TODO: cacheBucket like s3CacheBucket?
prefixes["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(tempDir(), "camli-cache"),
},
}
} else {
prefixes["/sync-to-googlecloudstorage/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": map[string]interface{}{
"from": "/bs/",
"to": gsPrefix,
},
}
}
return nil
}
func genLowLevelPrefixes(params *configPrefixesParams, ownerName string) (m jsonconfig.Obj) {
m = make(jsonconfig.Obj)
haveIndex := params.indexerPath != ""
root := "/bs/"
pubKeyDest := root
if haveIndex {
root = "/bs-and-maybe-also-index/"
pubKeyDest = "/bs-and-index/"
}
rootArgs := map[string]interface{}{
"stealth": false,
"blobRoot": root,
"statusRoot": "/status/",
}
if ownerName != "" {
rootArgs["ownerName"] = ownerName
}
m["/"] = map[string]interface{}{
"handler": "root",
"handlerArgs": rootArgs,
}
if haveIndex {
setMap(m, "/", "handlerArgs", "searchRoot", "/my-search/")
}
m["/setup/"] = map[string]interface{}{
"handler": "setup",
}
m["/status/"] = map[string]interface{}{
"handler": "status",
}
if params.shareHandlerPath != "" {
m[params.shareHandlerPath] = map[string]interface{}{
"handler": "share",
"handlerArgs": map[string]interface{}{
"blobRoot": "/bs/",
},
}
}
m["/sighelper/"] = map[string]interface{}{
"handler": "jsonsign",
"handlerArgs": map[string]interface{}{
"secretRing": params.secretRing,
"keyId": params.keyId,
"publicKeyDest": pubKeyDest,
},
}
storageType := "filesystem"
if params.packBlobs {
storageType = "diskpacked"
}
if params.blobPath != "" {
m["/bs/"] = map[string]interface{}{
"handler": "storage-" + storageType,
"handlerArgs": map[string]interface{}{
"path": params.blobPath,
},
}
m["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(params.blobPath, "/cache"),
},
}
}
if params.flickr != "" {
m["/importer-flickr/"] = map[string]interface{}{
"apiKey": params.flickr,
}
}
if haveIndex {
syncArgs := map[string]interface{}{
"from": "/bs/",
"to": params.indexerPath,
}
// TODO(mpl): Brad says the cond should be dest == /index-*.
// But what about when dest is index-mem and we have a local disk;
// don't we want to have an active synchandler to do the fullSyncOnStart?
// Anyway, that condition works for now.
if params.blobPath == "" {
// When our primary blob store is remote (s3 or google cloud),
// i.e not an efficient replication source, we do not want the
// synchandler to mirror to the indexer. But we still want a
// synchandler to provide the discovery for e.g tools like
// camtool sync. See http://camlistore.org/issue/201
syncArgs["idle"] = true
} else {
syncArgs["queue"] = map[string]interface{}{
"type": "kv",
"file": filepath.Join(params.blobPath, "sync-to-index-queue.kv"),
}
}
m["/sync/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": syncArgs,
}
m["/bs-and-index/"] = map[string]interface{}{
"handler": "storage-replica",
"handlerArgs": map[string]interface{}{
"backends": []interface{}{"/bs/", params.indexerPath},
},
}
m["/bs-and-maybe-also-index/"] = map[string]interface{}{
"handler": "storage-cond",
"handlerArgs": map[string]interface{}{
"write": map[string]interface{}{
"if": "isSchema",
"then": "/bs-and-index/",
"else": "/bs/",
},
"read": "/bs/",
},
}
m["/my-search/"] = map[string]interface{}{
"handler": "search",
"handlerArgs": map[string]interface{}{
"index": params.indexerPath,
"owner": params.searchOwner.String(),
},
}
}
return
}
// genLowLevelConfig returns a low-level config from a high-level config.
func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
var (
baseURL = conf.OptionalString("baseURL", "")
listen = conf.OptionalString("listen", "")
auth = conf.RequiredString("auth")
keyId = conf.RequiredString("identity")
secretRing = conf.RequiredString("identitySecretRing")
tlsOn = conf.OptionalBool("https", false)
tlsCert = conf.OptionalString("HTTPSCertFile", "")
tlsKey = conf.OptionalString("HTTPSKeyFile", "")
// Blob storage options
blobPath = conf.OptionalString("blobPath", "")
packBlobs = conf.OptionalBool("packBlobs", false) // use diskpacked instead of the default filestorage
s3 = conf.OptionalString("s3", "") // "access_key_id:secret_access_key:bucket[:hostname]"
googlecloudstorage = conf.OptionalString("googlecloudstorage", "") // "clientId:clientSecret:refreshToken:bucket"
googledrive = conf.OptionalString("googledrive", "") // "clientId:clientSecret:refreshToken:parentId"
// Enable the share handler. If true, and shareHandlerPath is empty,
// then shareHandlerPath defaults to "/share/".
shareHandler = conf.OptionalBool("shareHandler", false)
// URL prefix for the share handler. If set, overrides shareHandler.
shareHandlerPath = conf.OptionalString("shareHandlerPath", "")
// Index options
runIndex = conf.OptionalBool("runIndex", true) // if false: no search, no UI, etc.
dbname = conf.OptionalString("dbname", "") // for mysql, postgres, mongo
mysql = conf.OptionalString("mysql", "")
postgres = conf.OptionalString("postgres", "")
memIndex = conf.OptionalBool("memIndex", false)
mongo = conf.OptionalString("mongo", "")
sqliteFile = conf.OptionalString("sqlite", "")
kvFile = conf.OptionalString("kvIndexFile", "")
// Importer options
flickr = conf.OptionalString("flickr", "")
_ = conf.OptionalList("replicateTo")
publish = conf.OptionalObject("publish")
// alternative source tree, to override the embedded ui and/or closure resources.
// If non empty, the ui files will be expected at
// sourceRoot + "/server/camlistored/ui" and the closure library at
// sourceRoot + "/third_party/closure/lib"
// Also used by the publish handler.
sourceRoot = conf.OptionalString("sourceRoot", "")
ownerName = conf.OptionalString("ownerName", "")
)
if err := conf.Validate(); err != nil {
return nil, err
}
obj := jsonconfig.Obj{}
if tlsOn {
if (tlsCert != "") != (tlsKey != "") {
return nil, errors.New("Must set both TLSCertFile and TLSKeyFile (or neither to generate a self-signed cert)")
}
if tlsCert != "" {
obj["TLSCertFile"] = tlsCert
obj["TLSKeyFile"] = tlsKey
} else {
obj["TLSCertFile"] = DefaultTLSCert
obj["TLSKeyFile"] = DefaultTLSKey
}
}
if baseURL != "" {
u, err := url.Parse(baseURL)
if err != nil {
return nil, fmt.Errorf("Error parsing baseURL %q as a URL: %v", baseURL, err)
}
if u.Path != "" && u.Path != "/" {
return nil, fmt.Errorf("baseURL can't have a path, only a scheme, host, and optional port.")
}
u.Path = ""
obj["baseURL"] = u.String()
}
if listen != "" {
obj["listen"] = listen
}
obj["https"] = tlsOn
obj["auth"] = auth
username := ""
if dbname == "" {
username = osutil.Username()
if username == "" {
return nil, fmt.Errorf("USER (USERNAME on windows) env var not set; needed to define dbname")
}
dbname = "camli" + username
}
var indexerPath string
numIndexers := numSet(mongo, mysql, postgres, sqliteFile, memIndex, kvFile)
switch {
case runIndex && numIndexers == 0:
return nil, fmt.Errorf("Unless runIndex is set to false, you must specify an index option (kvIndexFile, mongo, mysql, postgres, sqlite, memIndex).")
case runIndex && numIndexers != 1:
return nil, fmt.Errorf("With runIndex set true, you can only pick exactly one indexer (mongo, mysql, postgres, sqlite, memIndex).")
case !runIndex && numIndexers != 0:
return nil, fmt.Errorf("With runIndex disabled, you can't specify any of mongo, mysql, postgres, sqlite, memIndex.")
case mysql != "":
indexerPath = "/index-mysql/"
case postgres != "":
indexerPath = "/index-postgres/"
case mongo != "":
indexerPath = "/index-mongo/"
case sqliteFile != "":
indexerPath = "/index-sqlite/"
case kvFile != "":
indexerPath = "/index-kv/"
case memIndex:
indexerPath = "/index-mem/"
}
entity, err := jsonsign.EntityFromSecring(keyId, secretRing)
if err != nil {
return nil, err
}
armoredPublicKey, err := jsonsign.ArmoredPublicKey(entity)
if err != nil {
return nil, err
}
nolocaldisk := blobPath == ""
if nolocaldisk {
if s3 == "" && googlecloudstorage == "" {
return nil, errors.New("You need at least one of blobPath (for localdisk) or s3 or googlecloudstorage configured for a blobserver.")
}
if s3 != "" && googlecloudstorage != "" {
return nil, errors.New("Using S3 as a primary storage and Google Cloud Storage as a mirror is not supported for now.")
}
}
if shareHandler && shareHandlerPath == "" {
shareHandlerPath = "/share/"
}
prefixesParams := &configPrefixesParams{
secretRing: secretRing,
keyId: keyId,
indexerPath: indexerPath,
blobPath: blobPath,
packBlobs: packBlobs,
searchOwner: blob.SHA1FromString(armoredPublicKey),
shareHandlerPath: shareHandlerPath,
flickr: flickr,
}
prefixes := genLowLevelPrefixes(prefixesParams, ownerName)
var cacheDir string
if nolocaldisk {
// Whether camlistored is run from EC2 or not, we use
// a temp dir as the cache when primary storage is S3.
// TODO(mpl): s3CacheBucket
// See http://code.google.com/p/camlistore/issues/detail?id=85
cacheDir = filepath.Join(tempDir(), "camli-cache")
} else {
cacheDir = filepath.Join(blobPath, "cache")
}
if !noMkdir {
if err := os.MkdirAll(cacheDir, 0700); err != nil {
return nil, fmt.Errorf("Could not create blobs cache dir %s: %v", cacheDir, err)
}
}
published := []interface{}{}
if len(publish) > 0 {
if !runIndex {
return nil, fmt.Errorf("publishing requires an index")
}
published, err = addPublishedConfig(prefixes, publish, sourceRoot)
if err != nil {
return nil, fmt.Errorf("Could not generate config for published: %v", err)
}
}
if runIndex {
addUIConfig(prefixes, "/ui/", published, sourceRoot)
}
if mysql != "" {
addMySQLConfig(prefixes, dbname, mysql)
}
if postgres != "" {
addPostgresConfig(prefixes, dbname, postgres)
}
if mongo != "" {
addMongoConfig(prefixes, dbname, mongo)
}
if sqliteFile != "" {
addSQLiteConfig(prefixes, sqliteFile)
}
if kvFile != "" {
addKVConfig(prefixes, kvFile)
}
if s3 != "" {
if err := addS3Config(prefixesParams, prefixes, s3); err != nil {
return nil, err
}
}
if googledrive != "" {
if err := addGoogleDriveConfig(prefixes, googledrive); err != nil {
return nil, err
}
}
if googlecloudstorage != "" {
if err := addGoogleCloudStorageConfig(prefixes, googlecloudstorage); err != nil {
return nil, err
}
}
if indexerPath == "/index-mem/" {
addMemindexConfig(prefixes)
}
obj["prefixes"] = (map[string]interface{})(prefixes)
lowLevelConf = &Config{
Obj: obj,
configPath: conf.configPath,
}
return lowLevelConf, nil
}
func numSet(vv ...interface{}) (num int) {
for _, vi := range vv {
switch v := vi.(type) {
case string:
if v != "" {
num++
}
case bool:
if v {
num++
}
default:
panic("unknown type")
}
}
return
}
func setMap(m map[string]interface{}, v ...interface{}) {
if len(v) < 2 {
panic("too few args")
}
if len(v) == 2 {
m[v[0].(string)] = v[1]
return
}
setMap(m[v[0].(string)].(map[string]interface{}), v[1:]...)
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tpr
import (
"testing"
"k8s.io/kubernetes/pkg/api"
)
const (
defaultCtxNS = "defaultTestNS"
ctxNS = "testNS"
)
func TestKeyRoot(t *testing.T) {
ctx := api.NewContext()
ctx = api.WithNamespace(ctx, ctxNS)
keyer := Keyer{DefaultNamespace: defaultCtxNS}
root := keyer.KeyRoot(ctx)
if root != ctxNS {
t.Fatalf("key root '%s' wasn't expected '%s'", root, ctxNS)
}
ctx = api.NewContext()
root = keyer.NewRoot(ctx)
if root != keyer.DefaultNamespace {
t.Fatalf("key root '%s' wasn't expected '%s'", root, keyer.DefaultNamespace)
}
}
func TestKey(t *testing.T) {
t.Skip("TODO")
}
func TestNamespaceAndNameFromKey(t *testing.T) {
t.Skip("TODO")
}
adding more tests
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tpr
import (
"testing"
"k8s.io/kubernetes/pkg/api"
)
const (
defaultCtxNS = "defaultTestNS"
ctxNS = "testNS"
separator = "/"
resourceName = "myResource"
)
func TestKeyRoot(t *testing.T) {
ctx := api.NewContext()
ctx = api.WithNamespace(ctx, ctxNS)
keyer := Keyer{DefaultNamespace: defaultCtxNS}
root := keyer.KeyRoot(ctx)
if root != ctxNS {
t.Fatalf("key root '%s' wasn't expected '%s'", root, ctxNS)
}
ctx = api.NewContext()
root = keyer.NewRoot(ctx)
if root != keyer.DefaultNamespace {
t.Fatalf("key root '%s' wasn't expected '%s'", root, keyer.DefaultNamespace)
}
}
func TestKey(t *testing.T) {
ctx := api.NewContext()
ctx = api.WithNamespace(ctx, ctxNS)
keyer := Keyer{Separator: separator, ResourceName: resourceName}
key := keyer.Key(ctx, resourceName)
expected := ctxNS + separator + resourceName
if key != expected {
t.Fatalf("key was '%s', not expected '%s', key, expected")
}
}
func TestNamespaceAndNameFromKey(t *testing.T) {
const testName = "testName"
keyer := Keyer{Separator: separator, ResourceName: resourceName}
key := ctxNS + separator + testName
ns, name, err := keyer.NamespaceAndNameFromKey(key)
if err != nil {
t.Fatalf("unexpected error %s", err)
}
if ns != ctxNS {
t.Fatalf("namespace was '%s', not expected '%s'", ns, ctxNS)
}
if name != testName {
t.Fatalf("name was '%s', not expected '%s'", name, testName)
}
key = ctxNS
ns, name, err = keyer.NamespaceAndNameFromKey(key)
if err != nil {
t.Fatalf("unexpected error %s", err)
}
if ns != ctxNS {
t.Fatalf("namespace was '%s', not expected '%s'", ns, ctxNS)
}
if name != "" {
t.Fatalf("expected empty name, got '%s'", name)
}
}
|
package system
// IsProcessAlive returns true if process with a given pid is running.
func IsProcessAlive(pid int) bool {
// TODO Windows containerd. Not sure this is needed
// p, err := os.FindProcess(pid)
// if err == nil {
// return true
// }
return false
}
// KillProcess force-stops a process.
func KillProcess(pid int) {
// TODO Windows containerd. Not sure this is needed
// p, err := os.FindProcess(pid)
// if err == nil {
// p.Kill()
// }
}
Windows: Remove unused process_windows.go
Signed-off-by: John Howard <50ae662f5fcde39b7d1ed786133b7c61a25411dd@microsoft.com>
|
package system // import "github.com/docker/docker/pkg/system"
import (
"unsafe"
"github.com/sirupsen/logrus"
)
const (
// Deprecated: use github.com/docker/pkg/idtools.SeTakeOwnershipPrivilege
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
// Deprecated: use github.com/docker/pkg/idtools.ContainerAdministratorSidString
ContainerAdministratorSidString = "S-1-5-93-2-1"
// Deprecated: use github.com/docker/pkg/idtools.ContainerUserSidString
ContainerUserSidString = "S-1-5-93-2-2"
)
var procGetVersionExW = modkernel32.NewProc("GetVersionExW")
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported)
type osVersionInfoEx struct {
OSVersionInfoSize uint32
MajorVersion uint32
MinorVersion uint32
BuildNumber uint32
PlatformID uint32
CSDVersion [128]uint16
ServicePackMajor uint16
ServicePackMinor uint16
SuiteMask uint16
ProductType byte
Reserve byte
}
// IsWindowsClient returns true if the SKU is client. It returns false on
// Windows server, or if an error occurred when making the GetVersionExW
// syscall.
func IsWindowsClient() bool {
osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
if r1 == 0 {
logrus.WithError(err).Warn("GetVersionExW failed - assuming server SKU")
return false
}
// VER_NT_WORKSTATION, see https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
const verNTWorkstation = 0x00000001 // VER_NT_WORKSTATION
return osviex.ProductType == verNTWorkstation
}
pkg/system: rewrite IsWindowsClient() using golang.org/x/sys/windows
Looks like we should be able to use the implementation from x/sys/windows.
Signed-off-by: Sebastiaan van Stijn <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@gone.nl>
package system // import "github.com/docker/docker/pkg/system"
import "golang.org/x/sys/windows"
const (
// Deprecated: use github.com/docker/pkg/idtools.SeTakeOwnershipPrivilege
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
// Deprecated: use github.com/docker/pkg/idtools.ContainerAdministratorSidString
ContainerAdministratorSidString = "S-1-5-93-2-1"
// Deprecated: use github.com/docker/pkg/idtools.ContainerUserSidString
ContainerUserSidString = "S-1-5-93-2-2"
)
// VER_NT_WORKSTATION, see https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
const verNTWorkstation = 0x00000001 // VER_NT_WORKSTATION
// IsWindowsClient returns true if the SKU is client. It returns false on
// Windows server, or if an error occurred when making the GetVersionExW
// syscall.
func IsWindowsClient() bool {
ver := windows.RtlGetVersion()
return ver != nil && ver.ProductType == verNTWorkstation
}
|
package main
import (
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
"os/exec"
"strings"
"time"
)
// Listen for Docker image, container, and volume delete events and run a command after a delay.
// Event represents the subset of the Docker event message that we're
// interested in
type Event struct {
Type string
Action string
}
// String returns an Event in a human-readable form
func (e Event) String() string {
return fmt.Sprintf("Type: %s, Action: %s", e.Type, e.Action)
}
// DelayedAction runs a function in the future at least once after every call
// to AtLeastOnceMore.
type DelayedAction struct {
c chan interface{}
}
// NewDelayedAction creates a delayed action which guarantees to call f
// at most d after a call to AtLeastOnceMore.
func NewDelayedAction(d time.Duration, f func()) *DelayedAction {
c := make(chan interface{})
go func() {
for {
<-c
time.Sleep(d)
f()
}
}()
return &DelayedAction{
c: c,
}
}
// AtLeastOnceMore guarantees to call f at least once more within the originally
// specified duration.
func (a *DelayedAction) AtLeastOnceMore() {
select {
case a.c <- nil:
// Started a fresh countdown
default:
// There is already a countdown in progress
}
}
func main() {
// after-image-deletes --delay 10s -- /sbin/fstrim /var
delay := flag.Duration("delay", time.Second*10, "maximum time to wait after an image delete before triggering")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s: run a command after images are deleted by Docker.\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Example usage:\n")
fmt.Fprintf(os.Stderr, "%s --delay 10s -- /sbin/fstrim /var\n", os.Args[0])
fmt.Fprintf(os.Stderr, " -- run the command /sbin/fstrim /var at most 10s after an image is deleted.\n")
fmt.Fprintf(os.Stderr, " This would allow large batches of image deletions to happen and amortise the\n")
fmt.Fprintf(os.Stderr, " cost of the TRIM operation.\n\n")
fmt.Fprintf(os.Stderr, "Arguments:\n")
flag.PrintDefaults()
}
flag.Parse()
toRun := flag.Args()
if len(toRun) == 0 {
log.Fatalf("Please supply a program to run. For usage add -h")
}
log.Printf("I will run %s around %.1f seconds after an image is deleted", strings.Join(toRun, " "), delay.Seconds())
action := NewDelayedAction(*delay, func() {
cmdline := strings.Join(toRun, " ")
log.Printf("Running %s", cmdline)
cmd := exec.Command(toRun[0], toRun[1:]...)
err := cmd.Run()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
log.Printf("%s failed: %s", cmdline, string(ee.Stderr))
return
}
log.Printf("Unexpected failure while running: %s: %#v", cmdline, err)
}
})
// Connect to Docker over the Unix domain socket
httpc := http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", "/var/run/docker.sock")
},
},
}
RECONNECT:
// (Re-)connect forever, reading events
for {
res, err := httpc.Get("http://unix/v1.24/events")
if err != nil {
log.Printf("Failed to connect to the Docker daemon: will retry in 1s")
time.Sleep(time.Second)
continue RECONNECT
}
// Check the server identifies as Docker. This will provide an early failure
// if we're pointed at completely the wrong address.
server := res.Header.Get("Server")
if !strings.HasPrefix(server, "Docker") {
log.Printf("Server identified as %s -- is this really Docker?", server)
panic(errors.New("Remote server is not Docker"))
}
log.Printf("(Re-)connected to the Docker daemon")
d := json.NewDecoder(res.Body)
var event Event
for {
err = d.Decode(&event)
if err != nil {
log.Printf("Failed to read event: will retry in 1s")
res.Body.Close()
time.Sleep(time.Second)
continue RECONNECT
}
if event.Action == "delete" && event.Type == "image" {
log.Printf("An image has been removed: will run the action at least once more")
action.AtLeastOnceMore()
} else if event.Action == "destroy" && event.Type == "container" {
log.Printf("A container has been removed: will run the action at least once more")
action.AtLeastOnceMore()
} else if event.Action == "destroy" && event.Type == "volume" {
log.Printf("A volume has been removed: will run the action at least once more")
action.AtLeastOnceMore()
}
}
}
}
Add more event types to trigger fstrim
Signed-off-by: Anca Iordache <39812d7fd70a462e93ee0815a2f4c75f6a164b2e@docker.com>
package main
import (
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
"os/exec"
"strings"
"time"
)
// Listen for Docker image, container, and volume delete events and run a command after a delay.
// Event represents the subset of the Docker event message that we're
// interested in
type Event struct {
Type string
Action string
}
// String returns an Event in a human-readable form
func (e Event) String() string {
return fmt.Sprintf("Type: %s, Action: %s", e.Type, e.Action)
}
// DelayedAction runs a function in the future at least once after every call
// to AtLeastOnceMore.
type DelayedAction struct {
c chan interface{}
}
// NewDelayedAction creates a delayed action which guarantees to call f
// at most d after a call to AtLeastOnceMore.
func NewDelayedAction(d time.Duration, f func()) *DelayedAction {
c := make(chan interface{})
go func() {
for {
<-c
time.Sleep(d)
f()
}
}()
return &DelayedAction{
c: c,
}
}
// AtLeastOnceMore guarantees to call f at least once more within the originally
// specified duration.
func (a *DelayedAction) AtLeastOnceMore() {
select {
case a.c <- nil:
// Started a fresh countdown
default:
// There is already a countdown in progress
}
}
func main() {
// after-image-deletes --delay 10s -- /sbin/fstrim /var
delay := flag.Duration("delay", time.Second*10, "maximum time to wait after an image delete before triggering")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s: run a command after images are deleted by Docker.\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Example usage:\n")
fmt.Fprintf(os.Stderr, "%s --delay 10s -- /sbin/fstrim /var\n", os.Args[0])
fmt.Fprintf(os.Stderr, " -- run the command /sbin/fstrim /var at most 10s after an image is deleted.\n")
fmt.Fprintf(os.Stderr, " This would allow large batches of image deletions to happen and amortise the\n")
fmt.Fprintf(os.Stderr, " cost of the TRIM operation.\n\n")
fmt.Fprintf(os.Stderr, "Arguments:\n")
flag.PrintDefaults()
}
flag.Parse()
toRun := flag.Args()
if len(toRun) == 0 {
log.Fatalf("Please supply a program to run. For usage add -h")
}
log.Printf("I will run %s around %.1f seconds after an image is deleted", strings.Join(toRun, " "), delay.Seconds())
action := NewDelayedAction(*delay, func() {
cmdline := strings.Join(toRun, " ")
log.Printf("Running %s", cmdline)
cmd := exec.Command(toRun[0], toRun[1:]...)
err := cmd.Run()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
log.Printf("%s failed: %s", cmdline, string(ee.Stderr))
return
}
log.Printf("Unexpected failure while running: %s: %#v", cmdline, err)
}
})
// Connect to Docker over the Unix domain socket
httpc := http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", "/var/run/docker.sock")
},
},
}
RECONNECT:
// (Re-)connect forever, reading events
for {
res, err := httpc.Get("http://unix/v1.24/events")
if err != nil {
log.Printf("Failed to connect to the Docker daemon: will retry in 1s")
time.Sleep(time.Second)
continue RECONNECT
}
// Check the server identifies as Docker. This will provide an early failure
// if we're pointed at completely the wrong address.
server := res.Header.Get("Server")
if !strings.HasPrefix(server, "Docker") {
log.Printf("Server identified as %s -- is this really Docker?", server)
panic(errors.New("Remote server is not Docker"))
}
log.Printf("(Re-)connected to the Docker daemon")
d := json.NewDecoder(res.Body)
var event Event
for {
err = d.Decode(&event)
if err != nil {
log.Printf("Failed to read event: will retry in 1s")
res.Body.Close()
time.Sleep(time.Second)
continue RECONNECT
}
if event.Action == "delete" && event.Type == "image" {
log.Printf("An image has been removed: will run the action at least once more")
action.AtLeastOnceMore()
} else if event.Action == "destroy" && event.Type == "container" {
log.Printf("A container has been removed: will run the action at least once more")
action.AtLeastOnceMore()
} else if event.Action == "destroy" && event.Type == "volume" {
log.Printf("A volume has been removed: will run the action at least once more")
action.AtLeastOnceMore()
} else if event.Action == "prune" && event.Type == "image" {
log.Printf("Dangling images have been removed: will run the action at least once more")
action.AtLeastOnceMore()
} else if event.Action == "prune" && event.Type == "container" {
log.Printf("Stopped containers have been removed: will run the action at least once more")
action.AtLeastOnceMore()
} else if event.Action == "prune" && event.Type == "volume" {
log.Printf("Unused volumes have been removed: will run the action at least once more")
action.AtLeastOnceMore()
} else if event.Action == "prune" && event.Type == "builder" {
log.Printf("Dangling build cache has been removed: will run the action at least once more")
action.AtLeastOnceMore()
}
}
}
}
|
// +build coverage
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package coverage provides tools for coverage-instrumented binaries to collect and
// flush coverage information.
package coverage
import (
"flag"
"fmt"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
"os"
"testing"
"time"
)
var coverageFile string
// tempCoveragePath returns a temporary file to write coverage information to.
// The file is in the same directory as the destination, ensuring os.Rename will work.
func tempCoveragePath() string {
return coverageFile + ".tmp"
}
// InitCoverage is called from the dummy unit test to prepare Go's coverage framework.
// Clients should never need to call it.
func InitCoverage(name string) {
// We read the coverage destination in from the KUBE_COVERAGE_FILE env var,
// or if it's empty we just use a default in /tmp
coverageFile = os.Getenv("KUBE_COVERAGE_FILE")
if coverageFile == "" {
coverageFile = "/tmp/k8s-" + name + ".cov"
}
fmt.Println("Dumping coverage information to " + coverageFile)
flushInterval := 5 * time.Second
requestedInterval := os.Getenv("KUBE_COVERAGE_FLUSH_INTERVAL")
if requestedInterval != "" {
if duration, err := time.ParseDuration(requestedInterval); err == nil {
flushInterval = duration
} else {
panic("Invalid KUBE_COVERAGE_FLUSH_INTERVAL value; try something like '30s'.")
}
}
// Set up the unit test framework with the required arguments to activate test coverage.
flag.CommandLine.Parse([]string{"-test.coverprofile", tempCoveragePath()})
// Begin periodic logging
go wait.Forever(FlushCoverage, flushInterval)
}
// FlushCoverage flushes collected coverage information to disk.
// The destination file is configured at startup and cannot be changed.
// Calling this function also sends a line like "coverage: 5% of statements" to stdout.
func FlushCoverage() {
// We're not actually going to run any tests, but we need Go to think we did so it writes
// coverage information to disk. To achieve this, we create a bunch of empty test suites and
// have it "run" them.
tests := []testing.InternalTest{}
benchmarks := []testing.InternalBenchmark{}
examples := []testing.InternalExample{}
var deps fakeTestDeps
dummyRun := testing.MainStart(deps, tests, benchmarks, examples)
dummyRun.Run()
// Once it writes to the temporary path, we move it to the intended path.
// This gets us atomic updates from the perspective of another process trying to access
// the file.
if err := os.Rename(tempCoveragePath(), coverageFile); err != nil {
klog.Errorf("Couldn't move coverage file from %s to %s", coverageFile, tempCoveragePath())
}
}
[pkg/util/coverage]: group imports for readability
Signed-off-by: Zhou Peng <516b9783fca517eecbd1d064da2d165310b19759@ctriple.cn>
// +build coverage
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package coverage provides tools for coverage-instrumented binaries to collect and
// flush coverage information.
package coverage
import (
"flag"
"fmt"
"os"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
)
var coverageFile string
// tempCoveragePath returns a temporary file to write coverage information to.
// The file is in the same directory as the destination, ensuring os.Rename will work.
func tempCoveragePath() string {
return coverageFile + ".tmp"
}
// InitCoverage is called from the dummy unit test to prepare Go's coverage framework.
// Clients should never need to call it.
func InitCoverage(name string) {
// We read the coverage destination in from the KUBE_COVERAGE_FILE env var,
// or if it's empty we just use a default in /tmp
coverageFile = os.Getenv("KUBE_COVERAGE_FILE")
if coverageFile == "" {
coverageFile = "/tmp/k8s-" + name + ".cov"
}
fmt.Println("Dumping coverage information to " + coverageFile)
flushInterval := 5 * time.Second
requestedInterval := os.Getenv("KUBE_COVERAGE_FLUSH_INTERVAL")
if requestedInterval != "" {
if duration, err := time.ParseDuration(requestedInterval); err == nil {
flushInterval = duration
} else {
panic("Invalid KUBE_COVERAGE_FLUSH_INTERVAL value; try something like '30s'.")
}
}
// Set up the unit test framework with the required arguments to activate test coverage.
flag.CommandLine.Parse([]string{"-test.coverprofile", tempCoveragePath()})
// Begin periodic logging
go wait.Forever(FlushCoverage, flushInterval)
}
// FlushCoverage flushes collected coverage information to disk.
// The destination file is configured at startup and cannot be changed.
// Calling this function also sends a line like "coverage: 5% of statements" to stdout.
func FlushCoverage() {
// We're not actually going to run any tests, but we need Go to think we did so it writes
// coverage information to disk. To achieve this, we create a bunch of empty test suites and
// have it "run" them.
tests := []testing.InternalTest{}
benchmarks := []testing.InternalBenchmark{}
examples := []testing.InternalExample{}
var deps fakeTestDeps
dummyRun := testing.MainStart(deps, tests, benchmarks, examples)
dummyRun.Run()
// Once it writes to the temporary path, we move it to the intended path.
// This gets us atomic updates from the perspective of another process trying to access
// the file.
if err := os.Rename(tempCoveragePath(), coverageFile); err != nil {
klog.Errorf("Couldn't move coverage file from %s to %s", coverageFile, tempCoveragePath())
}
}
|
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"bufio"
"fmt"
"hash/adler32"
"io"
"os"
"os/exec"
"path"
"strconv"
"strings"
"syscall"
"github.com/golang/glog"
utilExec "k8s.io/kubernetes/pkg/util/exec"
)
const (
// How many times to retry for a consistent read of /proc/mounts.
maxListTries = 3
// Number of fields per line in /proc/mounts as per the fstab man page.
expectedNumFieldsPerLine = 6
// Location of the mount file to use
procMountsPath = "/proc/mounts"
)
const (
// 'fsck' found errors and corrected them
fsckErrorsCorrected = 1
// 'fsck' found errors but exited without correcting them
fsckErrorsUncorrected = 4
)
// Mounter provides the default implementation of mount.Interface
// for the linux platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
mounterRootfsPath string
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
// be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem
// type, where kernel handles fs type for you. The mount 'options' is a list of options,
// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
// required, call Mount with an empty string list or nil.
// Update source path to include a root filesystem override to make a containerized mounter (specified via `mounterPath`) work.
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
bind, bindRemountOpts := isBind(options)
if bind {
err := doMount(mounter.mounterPath, path.Join(mounter.mounterRootfsPath, source), target, fstype, []string{"bind"})
if err != nil {
return err
}
return doMount(mounter.mounterPath, path.Join(mounter.mounterRootfsPath, source), target, fstype, bindRemountOpts)
} else {
return doMount(mounter.mounterPath, path.Join(mounter.mounterRootfsPath, source), target, fstype, options)
}
}
// isBind detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func isBind(options []string) (bool, []string) {
bindRemountOpts := []string{"remount"}
bind := false
if len(options) != 0 {
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
}
return bind, bindRemountOpts
}
// doMount runs the mount command.
func doMount(mountCmd string, source string, target string, fstype string, options []string) error {
glog.V(4).Infof("Mounting %s %s %s %v with command: %q", source, target, fstype, options, mountCmd)
mountArgs := makeMountArgs(source, target, fstype, options)
glog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs)
command := exec.Command(mountCmd, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
glog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", err, mountCmd, source, target, fstype, options, string(output))
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n",
err, mountCmd, source, target, fstype, options, string(output))
}
return err
}
// makeMountArgs makes the arguments to the mount(8) command.
func makeMountArgs(source, target, fstype string, options []string) []string {
// Build mount command as follows:
// mount [-t $fstype] [-o $options] [$source] $target
mountArgs := []string{}
if len(fstype) > 0 {
mountArgs = append(mountArgs, "-t", fstype)
}
if len(options) > 0 {
mountArgs = append(mountArgs, "-o", strings.Join(options, ","))
}
if len(source) > 0 {
mountArgs = append(mountArgs, source)
}
mountArgs = append(mountArgs, target)
return mountArgs
}
// Unmount unmounts the target.
func (mounter *Mounter) Unmount(target string) error {
glog.V(4).Infof("Unmounting %s", target)
command := exec.Command("umount", target)
output, err := command.CombinedOutput()
if err != nil {
return fmt.Errorf("Unmount failed: %v\nUnmounting arguments: %s\nOutput: %s\n", err, target, string(output))
}
return nil
}
// List returns a list of all mounted filesystems.
func (*Mounter) List() ([]MountPoint, error) {
return listProcMounts(procMountsPath)
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
// It is fast but not necessarily ALWAYS correct. If the path is in fact
// a bind mount from one part of a mount to another it will not be detected.
// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b")
// will return true. When in fact /tmp/b is a mount point. If this situation
// if of interest to you, don't use this function...
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
return IsNotMountPoint(file)
}
func IsNotMountPoint(file string) (bool, error) {
stat, err := os.Stat(file)
if err != nil {
return true, err
}
rootStat, err := os.Lstat(file + "/..")
if err != nil {
return true, err
}
// If the directory has a different device as parent, then it is a mountpoint.
if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev {
return false, nil
}
return true, nil
}
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// If pathname is not a device, log and return false with nil error.
// If open returns errno EBUSY, return true with nil error.
// If open returns nil, return false with nil error.
// Otherwise, return false with error
func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) {
return exclusiveOpenFailsOnDevice(pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) {
return pathIsDevice(pathname)
}
func exclusiveOpenFailsOnDevice(pathname string) (bool, error) {
isDevice, err := pathIsDevice(pathname)
if err != nil {
return false, fmt.Errorf(
"PathIsDevice failed for path %q: %v",
pathname,
err)
}
if !isDevice {
glog.Errorf("Path %q is not refering to a device.", pathname)
return false, nil
}
fd, errno := syscall.Open(pathname, syscall.O_RDONLY|syscall.O_EXCL, 0)
// If the device is in use, open will return an invalid fd.
// When this happens, it is expected that Close will fail and throw an error.
defer syscall.Close(fd)
if errno == nil {
// device not in use
return false, nil
} else if errno == syscall.EBUSY {
// device is in use
return true, nil
}
// error during call to Open
return false, errno
}
func pathIsDevice(pathname string) (bool, error) {
finfo, err := os.Stat(pathname)
if os.IsNotExist(err) {
return false, nil
}
// err in call to os.Stat
if err != nil {
return false, err
}
// path refers to a device
if finfo.Mode()&os.ModeDevice != 0 {
return true, nil
}
// path does not refer to device
return false, nil
}
//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point
func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return getDeviceNameFromMount(mounter, mountPath, pluginDir)
}
func listProcMounts(mountFilePath string) ([]MountPoint, error) {
hash1, err := readProcMounts(mountFilePath, nil)
if err != nil {
return nil, err
}
for i := 0; i < maxListTries; i++ {
mps := []MountPoint{}
hash2, err := readProcMounts(mountFilePath, &mps)
if err != nil {
return nil, err
}
if hash1 == hash2 {
// Success
return mps, nil
}
hash1 = hash2
}
return nil, fmt.Errorf("failed to get a consistent snapshot of %v after %d tries", mountFilePath, maxListTries)
}
// readProcMounts reads the given mountFilePath (normally /proc/mounts) and produces a hash
// of the contents. If the out argument is not nil, this fills it with MountPoint structs.
func readProcMounts(mountFilePath string, out *[]MountPoint) (uint32, error) {
file, err := os.Open(mountFilePath)
if err != nil {
return 0, err
}
defer file.Close()
return readProcMountsFrom(file, out)
}
func readProcMountsFrom(file io.Reader, out *[]MountPoint) (uint32, error) {
hash := adler32.New()
scanner := bufio.NewReader(file)
for {
line, err := scanner.ReadString('\n')
if err == io.EOF {
break
}
fields := strings.Fields(line)
if len(fields) != expectedNumFieldsPerLine {
return 0, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line)
}
fmt.Fprintf(hash, "%s", line)
if out != nil {
mp := MountPoint{
Device: fields[0],
Path: fields[1],
Type: fields[2],
Opts: strings.Split(fields[3], ","),
}
freq, err := strconv.Atoi(fields[4])
if err != nil {
return 0, err
}
mp.Freq = freq
pass, err := strconv.Atoi(fields[5])
if err != nil {
return 0, err
}
mp.Pass = pass
*out = append(*out, mp)
}
}
return hash.Sum32(), nil
}
// formatAndMount uses unix utils to format and mount the given disk
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
options = append(options, "defaults")
// Run fsck on the disk to fix repairable issues
glog.V(4).Infof("Checking for issues with fsck on disk: %s", source)
args := []string{"-a", source}
cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput()
if err != nil {
ee, isExitError := err.(utilExec.ExitError)
switch {
case err == utilExec.ErrExecutableNotFound:
glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
glog.Infof("Device %s has errors which were corrected by fsck.", source)
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
glog.Infof("`fsck` error %s", string(out))
}
}
// Try to mount the disk
glog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target)
err = mounter.Interface.Mount(source, target, fstype, options)
if err != nil {
// It is possible that this disk is not formatted. Double check using diskLooksUnformatted
notFormatted, err := mounter.diskLooksUnformatted(source)
if err == nil && notFormatted {
args = []string{source}
// Disk is unformatted so format it.
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", source}
}
glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args)
cmd := mounter.Runner.Command("mkfs."+fstype, args...)
_, err := cmd.CombinedOutput()
if err == nil {
// the disk has been formatted successfully try to mount it again.
glog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target)
return mounter.Interface.Mount(source, target, fstype, options)
}
glog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err)
return err
}
}
return err
}
// diskLooksUnformatted uses 'lsblk' to see if the given disk is unformated
func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) {
args := []string{"-nd", "-o", "FSTYPE", disk}
cmd := mounter.Runner.Command("lsblk", args...)
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
dataOut, err := cmd.CombinedOutput()
output := strings.TrimSpace(string(dataOut))
// TODO (#13212): check if this disk has partitions and return false, and
// an error if so.
if err != nil {
glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return false, err
}
return output == "", nil
}
Fix source and target path with overriden rootfs in mount utility package
Signed-off-by: Vishnu kannan <8c9a7175bd62235d51889a4697184181a7bcfb30@google.com>
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"bufio"
"fmt"
"hash/adler32"
"io"
"os"
"os/exec"
"path"
"strconv"
"strings"
"syscall"
"github.com/golang/glog"
utilExec "k8s.io/kubernetes/pkg/util/exec"
)
const (
// How many times to retry for a consistent read of /proc/mounts.
maxListTries = 3
// Number of fields per line in /proc/mounts as per the fstab man page.
expectedNumFieldsPerLine = 6
// Location of the mount file to use
procMountsPath = "/proc/mounts"
)
const (
// 'fsck' found errors and corrected them
fsckErrorsCorrected = 1
// 'fsck' found errors but exited without correcting them
fsckErrorsUncorrected = 4
)
// Mounter provides the default implementation of mount.Interface
// for the linux platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
mounterRootfsPath string
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
// be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem
// type, where kernel handles fs type for you. The mount 'options' is a list of options,
// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
// required, call Mount with an empty string list or nil.
// Update source path to include a root filesystem override to make a containerized mounter (specified via `mounterPath`) work.
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
bind, bindRemountOpts := isBind(options)
if bind {
err := doMount(mounter.mounterPath, path.Join(mounter.mounterRootfsPath, source), path.Join(mounter.mounterRootfsPath, target), fstype, []string{"bind"})
if err != nil {
return err
}
return doMount(mounter.mounterPath, path.Join(mounter.mounterRootfsPath, source), path.Join(mounter.mounterRootfsPath, target), fstype, bindRemountOpts)
} else {
return doMount(mounter.mounterPath, source, path.Join(mounter.mounterRootfsPath, target), fstype, options)
}
}
// isBind detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func isBind(options []string) (bool, []string) {
bindRemountOpts := []string{"remount"}
bind := false
if len(options) != 0 {
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
}
return bind, bindRemountOpts
}
// doMount runs the mount command.
func doMount(mountCmd string, source string, target string, fstype string, options []string) error {
glog.V(4).Infof("Mounting %s %s %s %v with command: %q", source, target, fstype, options, mountCmd)
mountArgs := makeMountArgs(source, target, fstype, options)
glog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs)
command := exec.Command(mountCmd, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
glog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", err, mountCmd, source, target, fstype, options, string(output))
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n",
err, mountCmd, source, target, fstype, options, string(output))
}
return err
}
// makeMountArgs makes the arguments to the mount(8) command.
func makeMountArgs(source, target, fstype string, options []string) []string {
// Build mount command as follows:
// mount [-t $fstype] [-o $options] [$source] $target
mountArgs := []string{}
if len(fstype) > 0 {
mountArgs = append(mountArgs, "-t", fstype)
}
if len(options) > 0 {
mountArgs = append(mountArgs, "-o", strings.Join(options, ","))
}
if len(source) > 0 {
mountArgs = append(mountArgs, source)
}
mountArgs = append(mountArgs, target)
return mountArgs
}
// Unmount unmounts the target.
func (mounter *Mounter) Unmount(target string) error {
glog.V(4).Infof("Unmounting %s", target)
command := exec.Command("umount", target)
output, err := command.CombinedOutput()
if err != nil {
return fmt.Errorf("Unmount failed: %v\nUnmounting arguments: %s\nOutput: %s\n", err, target, string(output))
}
return nil
}
// List returns a list of all mounted filesystems.
func (*Mounter) List() ([]MountPoint, error) {
return listProcMounts(procMountsPath)
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
// It is fast but not necessarily ALWAYS correct. If the path is in fact
// a bind mount from one part of a mount to another it will not be detected.
// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b")
// will return true. When in fact /tmp/b is a mount point. If this situation
// if of interest to you, don't use this function...
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
return IsNotMountPoint(file)
}
func IsNotMountPoint(file string) (bool, error) {
stat, err := os.Stat(file)
if err != nil {
return true, err
}
rootStat, err := os.Lstat(file + "/..")
if err != nil {
return true, err
}
// If the directory has a different device as parent, then it is a mountpoint.
if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev {
return false, nil
}
return true, nil
}
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// If pathname is not a device, log and return false with nil error.
// If open returns errno EBUSY, return true with nil error.
// If open returns nil, return false with nil error.
// Otherwise, return false with error
func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) {
return exclusiveOpenFailsOnDevice(pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) {
return pathIsDevice(pathname)
}
func exclusiveOpenFailsOnDevice(pathname string) (bool, error) {
isDevice, err := pathIsDevice(pathname)
if err != nil {
return false, fmt.Errorf(
"PathIsDevice failed for path %q: %v",
pathname,
err)
}
if !isDevice {
glog.Errorf("Path %q is not refering to a device.", pathname)
return false, nil
}
fd, errno := syscall.Open(pathname, syscall.O_RDONLY|syscall.O_EXCL, 0)
// If the device is in use, open will return an invalid fd.
// When this happens, it is expected that Close will fail and throw an error.
defer syscall.Close(fd)
if errno == nil {
// device not in use
return false, nil
} else if errno == syscall.EBUSY {
// device is in use
return true, nil
}
// error during call to Open
return false, errno
}
func pathIsDevice(pathname string) (bool, error) {
finfo, err := os.Stat(pathname)
if os.IsNotExist(err) {
return false, nil
}
// err in call to os.Stat
if err != nil {
return false, err
}
// path refers to a device
if finfo.Mode()&os.ModeDevice != 0 {
return true, nil
}
// path does not refer to device
return false, nil
}
//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point
func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return getDeviceNameFromMount(mounter, mountPath, pluginDir)
}
func listProcMounts(mountFilePath string) ([]MountPoint, error) {
hash1, err := readProcMounts(mountFilePath, nil)
if err != nil {
return nil, err
}
for i := 0; i < maxListTries; i++ {
mps := []MountPoint{}
hash2, err := readProcMounts(mountFilePath, &mps)
if err != nil {
return nil, err
}
if hash1 == hash2 {
// Success
return mps, nil
}
hash1 = hash2
}
return nil, fmt.Errorf("failed to get a consistent snapshot of %v after %d tries", mountFilePath, maxListTries)
}
// readProcMounts reads the given mountFilePath (normally /proc/mounts) and produces a hash
// of the contents. If the out argument is not nil, this fills it with MountPoint structs.
func readProcMounts(mountFilePath string, out *[]MountPoint) (uint32, error) {
file, err := os.Open(mountFilePath)
if err != nil {
return 0, err
}
defer file.Close()
return readProcMountsFrom(file, out)
}
func readProcMountsFrom(file io.Reader, out *[]MountPoint) (uint32, error) {
hash := adler32.New()
scanner := bufio.NewReader(file)
for {
line, err := scanner.ReadString('\n')
if err == io.EOF {
break
}
fields := strings.Fields(line)
if len(fields) != expectedNumFieldsPerLine {
return 0, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line)
}
fmt.Fprintf(hash, "%s", line)
if out != nil {
mp := MountPoint{
Device: fields[0],
Path: fields[1],
Type: fields[2],
Opts: strings.Split(fields[3], ","),
}
freq, err := strconv.Atoi(fields[4])
if err != nil {
return 0, err
}
mp.Freq = freq
pass, err := strconv.Atoi(fields[5])
if err != nil {
return 0, err
}
mp.Pass = pass
*out = append(*out, mp)
}
}
return hash.Sum32(), nil
}
// formatAndMount uses unix utils to format and mount the given disk
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
options = append(options, "defaults")
// Run fsck on the disk to fix repairable issues
glog.V(4).Infof("Checking for issues with fsck on disk: %s", source)
args := []string{"-a", source}
cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput()
if err != nil {
ee, isExitError := err.(utilExec.ExitError)
switch {
case err == utilExec.ErrExecutableNotFound:
glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
glog.Infof("Device %s has errors which were corrected by fsck.", source)
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
glog.Infof("`fsck` error %s", string(out))
}
}
// Try to mount the disk
glog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target)
err = mounter.Interface.Mount(source, target, fstype, options)
if err != nil {
// It is possible that this disk is not formatted. Double check using diskLooksUnformatted
notFormatted, err := mounter.diskLooksUnformatted(source)
if err == nil && notFormatted {
args = []string{source}
// Disk is unformatted so format it.
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if fstype == "ext4" || fstype == "ext3" {
args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", source}
}
glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args)
cmd := mounter.Runner.Command("mkfs."+fstype, args...)
_, err := cmd.CombinedOutput()
if err == nil {
// the disk has been formatted successfully try to mount it again.
glog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target)
return mounter.Interface.Mount(source, target, fstype, options)
}
glog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err)
return err
}
}
return err
}
// diskLooksUnformatted uses 'lsblk' to see if the given disk is unformated
func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) {
args := []string{"-nd", "-o", "FSTYPE", disk}
cmd := mounter.Runner.Command("lsblk", args...)
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
dataOut, err := cmd.CombinedOutput()
output := strings.TrimSpace(string(dataOut))
// TODO (#13212): check if this disk has partitions and return false, and
// an error if so.
if err != nil {
glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return false, err
}
return output == "", nil
}
|
package main
import (
"crypto/rand"
"fmt"
"github.com/caius/gobot"
"githubstatus"
"io/ioutil"
"math/big"
"net/http"
"regexp"
"signalstatus"
"strconv"
"strings"
)
var GitCommit string
var BuiltBy string
func main() {
fmt.Printf("Version: %s\nBuilt by: %s\n", GitCommit, BuiltBy) // FU GO
bot := gobot.Gobot{Name: "caiusbot", Room: "#caius", Server: "irc.freenode.net:6667"}
bot.Plugins = make(map[string]func(p gobot.Privmsg))
bot.Match("37status", func(privmsg gobot.Privmsg) {
status, err := signalstatus.Status()
if err != nil {
privmsg.Error(err)
return
}
var reply string
if status.OK() {
reply = fmt.Sprintf("OK: %s\n", status.Status.Description)
} else {
reply = fmt.Sprintf("Uh oh: %s\n", status.Status.Description)
}
privmsg.Msg(reply)
})
bot.Match("hubstatus", func(privmsg gobot.Privmsg) {
status, err := githubstatus.Status()
if err != nil {
privmsg.Error(err)
return
}
fmt.Println(status)
privmsg.Msg(fmt.Sprintf("Github: %s - %s", status.Mood, status.Description))
})
bot.Match("hullo", func(privmsg gobot.Privmsg) {
privmsg.Msg("Oh hai!")
})
bot.Match("/help|commands/", func(privmsg gobot.Privmsg) {
privmsg.Msg("roll, nextmeet, artme <string>, stab <nick>, seen <nick>, ram, uptime, 37status, boobs, trollface, dywj, dance, mustachify, stats, last, ping")
})
bot.Match("meme", func(privmsg gobot.Privmsg) {
// There are no decent meme web services, nor gems wrapping the shitty ones.
// -- Caius, 20th Aug 2011
privmsg.Msg("Y U NO FIX MEME?!")
})
bot.Match("/troll(face)?/", func(privmsg gobot.Privmsg) {
response, err := bot.Sample([]string{"http://no.gd/troll.png", "http://no.gd/trolldance.gif", "http://caius.name/images/phone_troll.jpg"})
if err != nil {
return
}
privmsg.Msg(response)
})
bot.Match("boner", func(privmsg gobot.Privmsg) {
response, err := bot.Sample([]string{"http://files.myopera.com/coxy/albums/106123/trex-boner.jpg", "http://no.gd/badger.gif"})
if err != nil {
return
}
privmsg.Msg(response)
})
bot.Match("badger", func(privmsg gobot.Privmsg) {
privmsg.Msg("http://no.gd/badger2.gif")
})
bot.Match("dywj", func(privmsg gobot.Privmsg) {
privmsg.Msg("DAMN YOU WILL JESSOP!!!")
})
// derp, herp
bot.Match("/\\b[dh]erp\\b/", func(privmsg gobot.Privmsg) {
privmsg.Msg("http://caius.name/images/qs/herped-a-derp.png")
})
bot.Match("/F{2,}U{2,}/", func(privmsg gobot.Privmsg) {
var response string
if strings.Contains(strings.ToLower(privmsg.Nick), "tomb") {
response = "http://no.gd/p/calm-20111107-115310.jpg"
} else {
response = fmt.Sprintf("Calm down %s!", privmsg.Nick)
}
privmsg.Msg(response)
})
bot.Match("nextmeat", func(privmsg gobot.Privmsg) {
privmsg.Msg("BACNOM")
})
bot.Match("/where is (wlll|will)/", func(privmsg gobot.Privmsg) {
response, err := bot.Sample([]string{"North Tea Power", "home"})
if err != nil {
return
}
privmsg.Msg(response)
})
bot.Match("/^b(oo|ew)bs$/", func(privmsg gobot.Privmsg) {
response, err := bot.Sample([]string{"(.)(.)", "http://no.gd/boobs.gif"})
if err != nil {
return
}
privmsg.Msg(response)
})
bot.Match("version", func(privmsg gobot.Privmsg) {
reply := "My current version is"
if GitCommit != "" {
reply = fmt.Sprintf("%s %s", reply, GitCommit)
} else {
reply = fmt.Sprintf("%s unknown", reply)
}
if BuiltBy != "" {
reply = fmt.Sprintf("%s and I was built by %s", reply, BuiltBy)
}
privmsg.Msg(reply)
})
// Pong plugin
bot.Match("/^(?:\\.|!?\\.?ping)$/", func(privmsg gobot.Privmsg) {
privmsg.Msg("pong!")
})
bot.Match("/^stats?$/", func(privmsg gobot.Privmsg) {
privmsg.Msg("http://dev.hentan.caius.name/irc/nwrug.html")
})
bot.Match("dance", func(privmsg gobot.Privmsg) {
i, err := bot.Sample([]string{"0", "1", "2"})
if err != nil {
return
}
switch i {
case "0":
privmsg.Msg("EVERYBODY DANCE NOW!") // msg channel, "EVERYBODY DANCE NOW!"
privmsg.Action("does the funky chicken")
case "1":
privmsg.Msg("http://no.gd/caiusboogie.gif")
case "2":
privmsg.Msg("http://i.imgur.com/rDDjz.gif")
}
})
// Stabs what he is comanded to. Unless it's himself.
// `stab blah` => `* gobot stabs blah`
bot.Match("/stab (.+)/", func(privmsg gobot.Privmsg) {
msg := privmsg.Message
stab_regexp := regexp.MustCompile("stab (.+)")
receiver := stab_regexp.FindStringSubmatch(msg)[1]
// If they try to stab us, stab them
if strings.Contains(receiver, "rugbot") {
receiver = privmsg.Nick
}
// TODO: privmsg.Actionf()
privmsg.Action(fmt.Sprintf("/me stabs %s", receiver))
})
// Listens to channel conversation and inserts title of any link posted, following redirects
// `And then I went to www.caius.name` => `gobot: Caius Durling » Profile`
bot.Match("/.+/", func(privmsg gobot.Privmsg) {
msg := privmsg.Message
// Regexp from http://daringfireball.net/2010/07/improved_regex_for_matching_urls - Ta gruber!
url_regexp := regexp.MustCompile("(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))")
url := url_regexp.FindString(msg)
if url == "" {
return
}
fmt.Printf("Extracted '%s'\n", url)
// We might extract `www.google.com` or `bit.ly/something` so we need to prepend http:// in that case
if !regexp.MustCompile("^https?:\\/\\/").MatchString(url) {
url = fmt.Sprintf("http://%s", url)
}
fmt.Printf("GET %s\n", url)
// Attempt a GET request to get the page title
// TODO: handle PDF and non-HTML content
resp, err := http.Get(url)
if err != nil {
privmsg.Error(err)
return
}
defer resp.Body.Close()
raw_body, err := ioutil.ReadAll(resp.Body)
if err != nil {
privmsg.Error(err)
return
}
body := string(raw_body)
title_regexp := regexp.MustCompile("<title>([^<]+)</title>")
title := title_regexp.FindStringSubmatch(body)
if title == nil {
return
}
fmt.Printf("title: %s\n", title[1])
privmsg.Msg(title[1])
})
bot.Match("/^roll (\\d{1,})$/", func(privmsg gobot.Privmsg) {
msg := privmsg.Message
fmt.Printf("!!!!!!!!! Got a roll! %s\n", msg)
total_sides_string := strings.TrimPrefix(msg, "roll ")
total_sides, err := strconv.Atoi(total_sides_string)
if err != nil {
privmsg.Error(err)
return
}
i, err := rand.Int(rand.Reader, big.NewInt(int64(total_sides)))
if err != nil {
privmsg.Error(err)
return
}
// We'll be 0-i, so add 1 to turn into dice faces
i.Add(i, big.NewInt(1))
privmsg.Msg(i.String())
})
// TODO: last
// TODO: ACTION pokes .+
// TODO: nextmeet
// TODO: ACTION staabs
// TODO: artme
// TODO: tasche http
// TODO: tasche artme
// TODO: seen
// TODO: ram
// TODO: uptime
// TODO: last poop
// TODO: twitter status
// TODO: twitter user
// TODO: commit me
bot.Run()
}
Make use of gobot's new API for all existing plugins
package main
import (
"crypto/rand"
"fmt"
"github.com/caius/gobot"
"githubstatus"
"io/ioutil"
"math/big"
"net/http"
"regexp"
"signalstatus"
"strconv"
"strings"
)
var GitCommit string
var BuiltBy string
func main() {
bot := gobot.Gobot()
bot.Name = "caiusbot"
bot.Room = "#caius"
bot.Server = "irc.freenode.net:6667"
bot.MatchString("37status", func(privmsg gobot.Privmsg) {
status, err := signalstatus.Status()
if err != nil {
privmsg.Error(err)
return
}
var reply string
if status.OK() {
reply = fmt.Sprintf("OK: %s\n", status.Status.Description)
} else {
reply = fmt.Sprintf("Uh oh: %s\n", status.Status.Description)
}
privmsg.Msg(reply)
})
bot.MatchString("hubstatus", func(privmsg gobot.Privmsg) {
status, err := githubstatus.Status()
if err != nil {
privmsg.Error(err)
return
}
fmt.Println(status)
privmsg.Msg(fmt.Sprintf("Github: %s - %s", status.Mood, status.Description))
})
bot.MatchString("hullo", func(privmsg gobot.Privmsg) {
privmsg.Msg("Oh hai!")
})
bot.MatchString("help|commands", func(privmsg gobot.Privmsg) {
privmsg.Msg("roll, nextmeet, artme <string>, stab <nick>, seen <nick>, ram, uptime, 37status, boobs, trollface, dywj, dance, mustachify, stats, last, ping")
})
bot.MatchString("meme", func(privmsg gobot.Privmsg) {
// There are no decent meme web services, nor gems wrapping the shitty ones.
// -- Caius, 20th Aug 2011
privmsg.Msg("Y U NO FIX MEME?!")
})
bot.MatchString("troll(face)?", func(privmsg gobot.Privmsg) {
response, err := bot.Sample([]string{"http://no.gd/troll.png", "http://no.gd/trolldance.gif", "http://caius.name/images/phone_troll.jpg"})
if err != nil {
return
}
privmsg.Msg(response)
})
bot.MatchString("boner", func(privmsg gobot.Privmsg) {
response, err := bot.Sample([]string{"http://files.myopera.com/coxy/albums/106123/trex-boner.jpg", "http://no.gd/badger.gif"})
if err != nil {
return
}
privmsg.Msg(response)
})
bot.MatchString("badger", func(privmsg gobot.Privmsg) {
privmsg.Msg("http://no.gd/badger2.gif")
})
bot.MatchString("dywj", func(privmsg gobot.Privmsg) {
privmsg.Msg("DAMN YOU WILL JESSOP!!!")
})
// derp, herp
bot.Match(regexp.MustCompile("\\b[dh]erp\\b"), func(privmsg gobot.Privmsg) {
privmsg.Msg("http://caius.name/images/qs/herped-a-derp.png")
})
bot.MatchString("F{2,}U{2,}", func(privmsg gobot.Privmsg) {
var response string
if strings.Contains(strings.ToLower(privmsg.Nick), "tomb") {
response = "http://no.gd/p/calm-20111107-115310.jpg"
} else {
response = fmt.Sprintf("Calm down %s!", privmsg.Nick)
}
privmsg.Msg(response)
})
bot.MatchString("nextmeat", func(privmsg gobot.Privmsg) {
privmsg.Msg("BACNOM")
})
bot.MatchString("/where is (wlll|will)/", func(privmsg gobot.Privmsg) {
response, err := bot.Sample([]string{"North Tea Power", "home"})
if err != nil {
return
}
privmsg.Msg(response)
})
bot.MatchString("b(oo|ew)bs", func(privmsg gobot.Privmsg) {
response, err := bot.Sample([]string{"(.)(.)", "http://no.gd/boobs.gif"})
if err != nil {
return
}
privmsg.Msg(response)
})
bot.MatchString("version", func(privmsg gobot.Privmsg) {
reply := "My current version is"
if GitCommit != "" {
reply = fmt.Sprintf("%s %s", reply, GitCommit)
} else {
reply = fmt.Sprintf("%s unknown", reply)
}
if BuiltBy != "" {
reply = fmt.Sprintf("%s and I was built by %s", reply, BuiltBy)
}
privmsg.Msg(reply)
})
// Pong plugin
bot.MatchString("(?:\\.|!?\\.?ping)", func(privmsg gobot.Privmsg) {
privmsg.Msg("pong!")
})
bot.MatchString("stats?", func(privmsg gobot.Privmsg) {
privmsg.Msg("http://dev.hentan.caius.name/irc/nwrug.html")
})
bot.MatchString("dance", func(privmsg gobot.Privmsg) {
i, err := bot.Sample([]string{"0", "1", "2"})
if err != nil {
return
}
switch i {
case "0":
privmsg.Msg("EVERYBODY DANCE NOW!") // msg channel, "EVERYBODY DANCE NOW!"
privmsg.Action("does the funky chicken")
case "1":
privmsg.Msg("http://no.gd/caiusboogie.gif")
case "2":
privmsg.Msg("http://i.imgur.com/rDDjz.gif")
}
})
// Stabs what he is comanded to. Unless it's himself.
// `stab blah` => `* gobot stabs blah`
bot.MatchString("stab (.+)", func(privmsg gobot.Privmsg) {
msg := privmsg.Message
stab_regexp := regexp.MustCompile("stab (.+)")
receiver := stab_regexp.FindStringSubmatch(msg)[1]
// If they try to stab us, stab them
if strings.Contains(receiver, "rugbot") {
receiver = privmsg.Nick
}
// TODO: privmsg.Actionf()
privmsg.Action(fmt.Sprintf("/me stabs %s", receiver))
})
// Listens to channel conversation and inserts title of any link posted, following redirects
// `And then I went to www.caius.name` => `gobot: Caius Durling » Profile`
bot.MatchString(".", func(privmsg gobot.Privmsg) {
msg := privmsg.Message
// Regexp from http://daringfireball.net/2010/07/improved_regex_for_matching_urls - Ta gruber!
url_regexp := regexp.MustCompile("(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))")
url := url_regexp.FindString(msg)
if url == "" {
return
}
fmt.Printf("Extracted '%s'\n", url)
// We might extract `www.google.com` or `bit.ly/something` so we need to prepend http:// in that case
if !regexp.MustCompile("^https?:\\/\\/").MatchString(url) {
url = fmt.Sprintf("http://%s", url)
}
fmt.Printf("GET %s\n", url)
// Attempt a GET request to get the page title
// TODO: handle PDF and non-HTML content
resp, err := http.Get(url)
if err != nil {
privmsg.Error(err)
return
}
defer resp.Body.Close()
raw_body, err := ioutil.ReadAll(resp.Body)
if err != nil {
privmsg.Error(err)
return
}
body := string(raw_body)
title_regexp := regexp.MustCompile("<title>([^<]+)</title>")
title := title_regexp.FindStringSubmatch(body)
if title == nil {
return
}
fmt.Printf("title: %s\n", title[1])
privmsg.Msg(title[1])
})
bot.MatchString("roll (\\d{1,})", func(privmsg gobot.Privmsg) {
msg := privmsg.Message
total_sides_string := strings.TrimPrefix(msg, "roll ")
total_sides, err := strconv.Atoi(total_sides_string)
if err != nil {
privmsg.Error(err)
return
}
i, err := rand.Int(rand.Reader, big.NewInt(int64(total_sides)))
if err != nil {
privmsg.Error(err)
return
}
// We'll be 0-i, so add 1 to turn into dice faces
i.Add(i, big.NewInt(1))
privmsg.Msg(i.String())
})
// TODO: last
// TODO: ACTION pokes .+
// TODO: nextmeet
// TODO: ACTION staabs
// TODO: artme
// TODO: tasche http
// TODO: tasche artme
// TODO: seen
// TODO: ram
// TODO: uptime
// TODO: last poop
// TODO: twitter status
// TODO: twitter user
// TODO: commit me
bot.Run()
}
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}}
}
type awsElasticBlockStorePlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
const (
awsElasticBlockStorePluginName = "kubernetes.io/aws-ebs"
awsURLNamePrefix = "aws://"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(awsElasticBlockStorePluginName), volName)
}
func (plugin *awsElasticBlockStorePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *awsElasticBlockStorePlugin) GetPluginName() string {
return awsElasticBlockStorePluginName
}
func (plugin *awsElasticBlockStorePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.VolumeID, nil
}
func (plugin *awsElasticBlockStorePlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore != nil) ||
(spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil)
}
func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool {
return false
}
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
}
}
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter())
}
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
ebs, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
fsType := ebs.FSType
partition := ""
if ebs.Partition != 0 {
partition = strconv.Itoa(int(ebs.Partition))
}
return &awsElasticBlockStoreMounter{
awsElasticBlockStore: &awsElasticBlockStore{
podUID: podUID,
volName: spec.Name(),
volumeID: volumeID,
partition: partition,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter())
}
func (plugin *awsElasticBlockStorePlugin) newUnmounterInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Unmounter, error) {
return &awsElasticBlockStoreUnmounter{&awsElasticBlockStore{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &AWSDiskUtil{})
}
func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil {
glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
}
return &awsElasticBlockStoreDeleter{
awsElasticBlockStore: &awsElasticBlockStore{
volName: spec.Name(),
volumeID: aws.KubernetesVolumeID(spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID),
manager: manager,
plugin: plugin,
}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &AWSDiskUtil{})
}
func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.VolumeOptions, manager ebsManager) (volume.Provisioner, error) {
return &awsElasticBlockStoreProvisioner{
awsElasticBlockStore: &awsElasticBlockStore{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func getVolumeSource(
spec *volume.Spec) (*api.AWSElasticBlockStoreVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
return spec.Volume.AWSElasticBlockStore, spec.Volume.AWSElasticBlockStore.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.AWSElasticBlockStore != nil {
return spec.PersistentVolume.Spec.AWSElasticBlockStore, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference an AWS EBS volume type")
}
func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
volumeID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
// This is a workaround to fix the issue in converting aws volume id from globalPDPath
// There are three aws volume id formats and their volumeID from GetDeviceNameFromMount() are:
// aws:///vol-1234 (aws/vol-1234)
// aws://us-east-1/vol-1234 (aws/us-east-1/vol-1234)
// vol-1234 (vol-1234)
// This code is for converting volume id to aws style volume id for the first two cases.
sourceName := volumeID
if strings.HasPrefix(volumeID, "aws/") {
names := strings.Split(volumeID, "/")
length := len(names)
if length < 2 || length > 3 {
return nil, fmt.Errorf("Failed to get AWS volume id from mount path %q: invalid volume name format %q", mountPath, volumeID)
}
volName := names[length-1]
if !strings.HasPrefix(volName, "vol-") {
return nil, fmt.Errorf("Invalid volume name format for AWS volume (%q) retrieved from mount path %q", volName, mountPath)
}
if length == 2 {
sourceName = awsURLNamePrefix + "" + "/" + volName // empty zone label
}
if length == 3 {
sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label
}
glog.V(4).Info("Convert aws volume name from %q to %q ", volumeID, sourceName)
}
awsVolume := &api.Volume{
Name: volName,
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: sourceName,
},
},
}
return volume.NewSpecFromVolume(awsVolume), nil
}
// Abstract interface to PD operations.
type ebsManager interface {
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, err error)
// Deletes a volume
DeleteVolume(deleter *awsElasticBlockStoreDeleter) error
}
// awsElasticBlockStore volumes are disk resources provided by Amazon Web Services
// that are attached to the kubelet's host machine and exposed to the pod.
type awsElasticBlockStore struct {
volName string
podUID types.UID
// Unique id of the PD, used to find the disk resource in the provider.
volumeID aws.KubernetesVolumeID
// Specifies the partition to mount
partition string
// Utility interface that provides API calls to the provider to attach/detach disks.
manager ebsManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
plugin *awsElasticBlockStorePlugin
volume.MetricsProvider
}
type awsElasticBlockStoreMounter struct {
*awsElasticBlockStore
// Filesystem type, optional.
fsType string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter *mount.SafeFormatAndMount
}
var _ volume.Mounter = &awsElasticBlockStoreMounter{}
func (b *awsElasticBlockStoreMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *awsElasticBlockStoreMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mount point: %s %v", dir, err)
return err
}
if !notMnt {
return nil
}
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volumeID)
if err := os.MkdirAll(dir, 0750); err != nil {
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("failed to unmount %s: %v", dir, mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
glog.Errorf("Mount of disk %s failed: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(4).Infof("Successfully mounted %s", dir)
return nil
}
func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) string {
// Clean up the URI to be more fs-friendly
name := string(volumeID)
name = strings.Replace(name, "://", "/", -1)
return path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
}
// Reverses the mapping done in makeGlobalPDPath
func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
basePath := path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
rel, err := filepath.Rel(basePath, globalPath)
if err != nil {
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
return "", err
}
if strings.Contains(rel, "../") {
glog.Errorf("Unexpected mount path: %s", globalPath)
return "", fmt.Errorf("unexpected mount path: " + globalPath)
}
// Reverse the :// replacement done in makeGlobalPDPath
volumeID := rel
if strings.HasPrefix(volumeID, "aws/") {
volumeID = strings.Replace(volumeID, "aws/", "aws://", 1)
}
glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID)
return volumeID, nil
}
func (ebs *awsElasticBlockStore) GetPath() string {
return getPath(ebs.podUID, ebs.volName, ebs.plugin.host)
}
type awsElasticBlockStoreUnmounter struct {
*awsElasticBlockStore
}
var _ volume.Unmounter = &awsElasticBlockStoreUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *awsElasticBlockStoreUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount
func (c *awsElasticBlockStoreUnmounter) TearDownAt(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.V(2).Info("Error checking if mountpoint ", dir, ": ", err)
return err
}
if notMnt {
glog.V(2).Info("Not mountpoint, deleting")
return os.Remove(dir)
}
// Unmount the bind-mount inside this pod
if err := c.mounter.Unmount(dir); err != nil {
glog.V(2).Info("Error unmounting dir ", dir, ": ", err)
return err
}
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notMnt {
if err := os.Remove(dir); err != nil {
glog.V(2).Info("Error removing mountpoint ", dir, ": ", err)
return err
}
}
return nil
}
type awsElasticBlockStoreDeleter struct {
*awsElasticBlockStore
}
var _ volume.Deleter = &awsElasticBlockStoreDeleter{}
func (d *awsElasticBlockStoreDeleter) GetPath() string {
return getPath(d.podUID, d.volName, d.plugin.host)
}
func (d *awsElasticBlockStoreDeleter) Delete() error {
return d.manager.DeleteVolume(d)
}
type awsElasticBlockStoreProvisioner struct {
*awsElasticBlockStore
options volume.VolumeOptions
namespace string
}
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, error) {
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
if err != nil {
glog.Errorf("Provision failed: %v", err)
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "aws-ebs-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: string(volumeID),
FSType: "ext4",
Partition: 0,
ReadOnly: false,
},
},
},
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
}
}
return pv, nil
}
Update aws_ebs.go
fix typo in glog
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws_ebs
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}}
}
type awsElasticBlockStorePlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.DeletableVolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
const (
awsElasticBlockStorePluginName = "kubernetes.io/aws-ebs"
awsURLNamePrefix = "aws://"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(awsElasticBlockStorePluginName), volName)
}
func (plugin *awsElasticBlockStorePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *awsElasticBlockStorePlugin) GetPluginName() string {
return awsElasticBlockStorePluginName
}
func (plugin *awsElasticBlockStorePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.VolumeID, nil
}
func (plugin *awsElasticBlockStorePlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore != nil) ||
(spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil)
}
func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool {
return false
}
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
}
}
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter())
}
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
ebs, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
fsType := ebs.FSType
partition := ""
if ebs.Partition != 0 {
partition = strconv.Itoa(int(ebs.Partition))
}
return &awsElasticBlockStoreMounter{
awsElasticBlockStore: &awsElasticBlockStore{
podUID: podUID,
volName: spec.Name(),
volumeID: volumeID,
partition: partition,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter())
}
func (plugin *awsElasticBlockStorePlugin) newUnmounterInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Unmounter, error) {
return &awsElasticBlockStoreUnmounter{&awsElasticBlockStore{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &AWSDiskUtil{})
}
func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil {
glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
}
return &awsElasticBlockStoreDeleter{
awsElasticBlockStore: &awsElasticBlockStore{
volName: spec.Name(),
volumeID: aws.KubernetesVolumeID(spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID),
manager: manager,
plugin: plugin,
}}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &AWSDiskUtil{})
}
func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.VolumeOptions, manager ebsManager) (volume.Provisioner, error) {
return &awsElasticBlockStoreProvisioner{
awsElasticBlockStore: &awsElasticBlockStore{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func getVolumeSource(
spec *volume.Spec) (*api.AWSElasticBlockStoreVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
return spec.Volume.AWSElasticBlockStore, spec.Volume.AWSElasticBlockStore.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.AWSElasticBlockStore != nil {
return spec.PersistentVolume.Spec.AWSElasticBlockStore, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference an AWS EBS volume type")
}
func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
volumeID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
// This is a workaround to fix the issue in converting aws volume id from globalPDPath
// There are three aws volume id formats and their volumeID from GetDeviceNameFromMount() are:
// aws:///vol-1234 (aws/vol-1234)
// aws://us-east-1/vol-1234 (aws/us-east-1/vol-1234)
// vol-1234 (vol-1234)
// This code is for converting volume id to aws style volume id for the first two cases.
sourceName := volumeID
if strings.HasPrefix(volumeID, "aws/") {
names := strings.Split(volumeID, "/")
length := len(names)
if length < 2 || length > 3 {
return nil, fmt.Errorf("Failed to get AWS volume id from mount path %q: invalid volume name format %q", mountPath, volumeID)
}
volName := names[length-1]
if !strings.HasPrefix(volName, "vol-") {
return nil, fmt.Errorf("Invalid volume name format for AWS volume (%q) retrieved from mount path %q", volName, mountPath)
}
if length == 2 {
sourceName = awsURLNamePrefix + "" + "/" + volName // empty zone label
}
if length == 3 {
sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label
}
glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName)
}
awsVolume := &api.Volume{
Name: volName,
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: sourceName,
},
},
}
return volume.NewSpecFromVolume(awsVolume), nil
}
// Abstract interface to PD operations.
type ebsManager interface {
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, err error)
// Deletes a volume
DeleteVolume(deleter *awsElasticBlockStoreDeleter) error
}
// awsElasticBlockStore volumes are disk resources provided by Amazon Web Services
// that are attached to the kubelet's host machine and exposed to the pod.
type awsElasticBlockStore struct {
volName string
podUID types.UID
// Unique id of the PD, used to find the disk resource in the provider.
volumeID aws.KubernetesVolumeID
// Specifies the partition to mount
partition string
// Utility interface that provides API calls to the provider to attach/detach disks.
manager ebsManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
plugin *awsElasticBlockStorePlugin
volume.MetricsProvider
}
type awsElasticBlockStoreMounter struct {
*awsElasticBlockStore
// Filesystem type, optional.
fsType string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter *mount.SafeFormatAndMount
}
var _ volume.Mounter = &awsElasticBlockStoreMounter{}
func (b *awsElasticBlockStoreMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *awsElasticBlockStoreMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mount point: %s %v", dir, err)
return err
}
if !notMnt {
return nil
}
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volumeID)
if err := os.MkdirAll(dir, 0750); err != nil {
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("failed to unmount %s: %v", dir, mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
glog.Errorf("Mount of disk %s failed: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(4).Infof("Successfully mounted %s", dir)
return nil
}
func makeGlobalPDPath(host volume.VolumeHost, volumeID aws.KubernetesVolumeID) string {
// Clean up the URI to be more fs-friendly
name := string(volumeID)
name = strings.Replace(name, "://", "/", -1)
return path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath, name)
}
// Reverses the mapping done in makeGlobalPDPath
func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (string, error) {
basePath := path.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath)
rel, err := filepath.Rel(basePath, globalPath)
if err != nil {
glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err)
return "", err
}
if strings.Contains(rel, "../") {
glog.Errorf("Unexpected mount path: %s", globalPath)
return "", fmt.Errorf("unexpected mount path: " + globalPath)
}
// Reverse the :// replacement done in makeGlobalPDPath
volumeID := rel
if strings.HasPrefix(volumeID, "aws/") {
volumeID = strings.Replace(volumeID, "aws/", "aws://", 1)
}
glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID)
return volumeID, nil
}
func (ebs *awsElasticBlockStore) GetPath() string {
return getPath(ebs.podUID, ebs.volName, ebs.plugin.host)
}
type awsElasticBlockStoreUnmounter struct {
*awsElasticBlockStore
}
var _ volume.Unmounter = &awsElasticBlockStoreUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *awsElasticBlockStoreUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount
func (c *awsElasticBlockStoreUnmounter) TearDownAt(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.V(2).Info("Error checking if mountpoint ", dir, ": ", err)
return err
}
if notMnt {
glog.V(2).Info("Not mountpoint, deleting")
return os.Remove(dir)
}
// Unmount the bind-mount inside this pod
if err := c.mounter.Unmount(dir); err != nil {
glog.V(2).Info("Error unmounting dir ", dir, ": ", err)
return err
}
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notMnt {
if err := os.Remove(dir); err != nil {
glog.V(2).Info("Error removing mountpoint ", dir, ": ", err)
return err
}
}
return nil
}
type awsElasticBlockStoreDeleter struct {
*awsElasticBlockStore
}
var _ volume.Deleter = &awsElasticBlockStoreDeleter{}
func (d *awsElasticBlockStoreDeleter) GetPath() string {
return getPath(d.podUID, d.volName, d.plugin.host)
}
func (d *awsElasticBlockStoreDeleter) Delete() error {
return d.manager.DeleteVolume(d)
}
type awsElasticBlockStoreProvisioner struct {
*awsElasticBlockStore
options volume.VolumeOptions
namespace string
}
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, error) {
volumeID, sizeGB, labels, err := c.manager.CreateVolume(c)
if err != nil {
glog.Errorf("Provision failed: %v", err)
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "aws-ebs-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: string(volumeID),
FSType: "ext4",
Partition: 0,
ReadOnly: false,
},
},
},
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
}
}
return pv, nil
}
|
package web
import (
"net/http"
"encoding/json"
"io"
"time"
"github.com/Sirupsen/logrus"
"github.com/feedhenry/mcp-standalone/pkg/mobile"
"github.com/feedhenry/mcp-standalone/pkg/mobile/app"
"github.com/feedhenry/mcp-standalone/pkg/web/headers"
"github.com/gorilla/mux"
"github.com/pkg/errors"
)
type BuildHandler struct {
buildRepoBuilder mobile.BuildRepoBuilder
buildService *app.Build
logger *logrus.Logger
}
// NewBuildHandler returns a configured build handler
func NewBuildHandler(br mobile.BuildRepoBuilder, buildService *app.Build, logger *logrus.Logger) *BuildHandler {
return &BuildHandler{
buildRepoBuilder: br,
buildService: buildService,
logger: logger,
}
}
// Create will parse the create request and hand it off to app build service
func (bh *BuildHandler) Create(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
buildRepo, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
var (
build = &mobile.BuildConfig{}
decoder = json.NewDecoder(req.Body)
encoder = json.NewEncoder(rw)
)
if err := decoder.Decode(build); err != nil {
err = errors.Wrap(err, "build handler failed to decode build payload")
handleCommonErrorCases(err, rw, bh.logger)
return
}
res, err := bh.buildService.CreateAppBuild(buildRepo, build)
if err != nil {
err = errors.Wrap(err, "build handler failed to create app build")
handleCommonErrorCases(err, rw, bh.logger)
return
}
rw.WriteHeader(http.StatusCreated)
if err := encoder.Encode(res); err != nil {
err = errors.Wrap(err, "failed to encode the build response")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
func (bh *BuildHandler) Build(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
buildRepo, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
params := mux.Vars(req)
buildID := params["buildID"]
if buildID == "" {
http.Error(rw, "buildID cannot be empty", http.StatusBadRequest)
}
if err := bh.buildService.BuildApp(buildRepo, buildID); err != nil {
err = errors.Wrap(err, "failed to start app build")
handleCommonErrorCases(err, rw, bh.logger)
return
}
rw.WriteHeader(http.StatusCreated)
return
}
// GenerateKeys will parse the request and hand it off to the service logic to setup a new public private key pair
func (bh *BuildHandler) GenerateKeys(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
params := mux.Vars(req)
buildID := params["buildID"]
if buildID == "" {
http.Error(rw, "buildID cannot be empty ", http.StatusBadRequest)
return
}
buildRepo, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
asset, _, err := bh.buildService.CreateBuildSrcKeySecret(buildRepo, buildID)
if err != nil {
err = errors.Wrap(err, "failed to generate keys")
handleCommonErrorCases(err, rw, bh.logger)
return
}
res := map[string]string{"name": asset}
encoder := json.NewEncoder(rw)
rw.WriteHeader(http.StatusCreated)
if err := encoder.Encode(res); err != nil {
err = errors.Wrap(err, "failed to encode response after creating source keys")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
func (bh *BuildHandler) GenerateDownload(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
params := mux.Vars(req)
buildID := params["buildID"]
if buildID == "" {
http.Error(rw, "buildID cannot be empty ", http.StatusBadRequest)
return
}
buildRepo, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
download, err := bh.buildService.EnableDownload(buildRepo, buildID)
if err != nil {
err = errors.Wrap(err, "build handler failed to create download")
handleCommonErrorCases(err, rw, bh.logger)
return
}
encoder := json.NewEncoder(rw)
rw.WriteHeader(http.StatusCreated)
if err := encoder.Encode(download); err != nil {
err = errors.Wrap(err, "failed to encode response after creating download url")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
func (bh *BuildHandler) Download(rw http.ResponseWriter, req *http.Request) {
token := req.URL.Query().Get("token")
if token == "" {
http.Error(rw, "token cannot be empty", http.StatusBadRequest)
return
}
params := mux.Vars(req)
buildID := params["buildID"]
if buildID == "" {
http.Error(rw, "buildID cannot be empty ", http.StatusBadRequest)
return
}
buildRepo, err := bh.buildRepoBuilder.UseDefaultSAToken().Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
download, err := buildRepo.GetDownload(buildID)
// check our download token matched our sent token. We will be using the SAToken after this
if download.Token != token {
http.Error(rw, "forbidden", http.StatusForbidden)
return
}
if download.Expires < time.Now().Unix() {
http.Error(rw, "token expired", http.StatusGone)
return
}
artifactReader, err := bh.buildService.Download(buildRepo, buildID)
if err != nil {
err = errors.Wrap(err, "error when attempting to download artifact")
handleCommonErrorCases(err, rw, bh.logger)
return
}
defer func() {
if err := artifactReader.Close(); err != nil {
bh.logger.Error("failed to close file handle. could be leaking resources ", err)
}
}()
rw.Header().Set("content-type", "octet/stream")
// TODO handle more than apk
rw.Header().Set("content-disposition", "attachment; filename=\"app.apk\"")
if _, err := io.Copy(rw, artifactReader); err != nil {
err = errors.Wrap(err, "failed to write download")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
func (bh *BuildHandler) AddAsset(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
buildAsset := &mobile.BuildAsset{}
if err := req.ParseMultipartForm(10 * 1000000); err != nil { //10MB
err = errors.Wrap(err, "failed parse multipart form when adding asset")
handleCommonErrorCases(err, rw, bh.logger)
return
}
params := mux.Vars(req)
file, info, err := req.FormFile("asset")
if err != nil {
err = errors.Wrap(err, "getting the form file failed when adding asset")
handleCommonErrorCases(err, rw, bh.logger)
return
}
defer func() {
if err := file.Close(); err != nil {
bh.logger.Error("failed to close file handle. could be leaking resources", err)
}
}()
buildAsset.Name = info.Filename
buildAsset.Platform = params["platform"]
buildAsset.Type = mobile.BuildAssetTypeBuildSecret
buildAsset.Password = req.FormValue("password")
if err := buildAsset.Validate(mobile.BuildAssetTypeBuildSecret); err != nil {
err = &mobile.StatusError{Message: err.Error(), Code: http.StatusBadRequest}
handleCommonErrorCases(err, rw, bh.logger)
return
}
br, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "failed to create build repo with token")
handleCommonErrorCases(err, rw, bh.logger)
return
}
asset, err := bh.buildService.AddBuildAsset(br, file, buildAsset)
if err != nil {
err = errors.Wrap(err, "AddAsset failed to add new build resource")
handleCommonErrorCases(err, rw, bh.logger)
return
}
res := map[string]string{"name": asset}
encoder := json.NewEncoder(rw)
rw.WriteHeader(http.StatusCreated)
if err := encoder.Encode(res); err != nil {
err = errors.Wrap(err, "failed to encode response after creating build asset")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
Return after http error
package web
import (
"net/http"
"encoding/json"
"io"
"time"
"github.com/Sirupsen/logrus"
"github.com/feedhenry/mcp-standalone/pkg/mobile"
"github.com/feedhenry/mcp-standalone/pkg/mobile/app"
"github.com/feedhenry/mcp-standalone/pkg/web/headers"
"github.com/gorilla/mux"
"github.com/pkg/errors"
)
type BuildHandler struct {
buildRepoBuilder mobile.BuildRepoBuilder
buildService *app.Build
logger *logrus.Logger
}
// NewBuildHandler returns a configured build handler
func NewBuildHandler(br mobile.BuildRepoBuilder, buildService *app.Build, logger *logrus.Logger) *BuildHandler {
return &BuildHandler{
buildRepoBuilder: br,
buildService: buildService,
logger: logger,
}
}
// Create will parse the create request and hand it off to app build service
func (bh *BuildHandler) Create(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
buildRepo, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
var (
build = &mobile.BuildConfig{}
decoder = json.NewDecoder(req.Body)
encoder = json.NewEncoder(rw)
)
if err := decoder.Decode(build); err != nil {
err = errors.Wrap(err, "build handler failed to decode build payload")
handleCommonErrorCases(err, rw, bh.logger)
return
}
res, err := bh.buildService.CreateAppBuild(buildRepo, build)
if err != nil {
err = errors.Wrap(err, "build handler failed to create app build")
handleCommonErrorCases(err, rw, bh.logger)
return
}
rw.WriteHeader(http.StatusCreated)
if err := encoder.Encode(res); err != nil {
err = errors.Wrap(err, "failed to encode the build response")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
func (bh *BuildHandler) Build(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
buildRepo, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
params := mux.Vars(req)
buildID := params["buildID"]
if buildID == "" {
http.Error(rw, "buildID cannot be empty", http.StatusBadRequest)
return
}
if err := bh.buildService.BuildApp(buildRepo, buildID); err != nil {
err = errors.Wrap(err, "failed to start app build")
handleCommonErrorCases(err, rw, bh.logger)
return
}
rw.WriteHeader(http.StatusCreated)
return
}
// GenerateKeys will parse the request and hand it off to the service logic to setup a new public private key pair
func (bh *BuildHandler) GenerateKeys(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
params := mux.Vars(req)
buildID := params["buildID"]
if buildID == "" {
http.Error(rw, "buildID cannot be empty ", http.StatusBadRequest)
return
}
buildRepo, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
asset, _, err := bh.buildService.CreateBuildSrcKeySecret(buildRepo, buildID)
if err != nil {
err = errors.Wrap(err, "failed to generate keys")
handleCommonErrorCases(err, rw, bh.logger)
return
}
res := map[string]string{"name": asset}
encoder := json.NewEncoder(rw)
rw.WriteHeader(http.StatusCreated)
if err := encoder.Encode(res); err != nil {
err = errors.Wrap(err, "failed to encode response after creating source keys")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
func (bh *BuildHandler) GenerateDownload(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
params := mux.Vars(req)
buildID := params["buildID"]
if buildID == "" {
http.Error(rw, "buildID cannot be empty ", http.StatusBadRequest)
return
}
buildRepo, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
download, err := bh.buildService.EnableDownload(buildRepo, buildID)
if err != nil {
err = errors.Wrap(err, "build handler failed to create download")
handleCommonErrorCases(err, rw, bh.logger)
return
}
encoder := json.NewEncoder(rw)
rw.WriteHeader(http.StatusCreated)
if err := encoder.Encode(download); err != nil {
err = errors.Wrap(err, "failed to encode response after creating download url")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
func (bh *BuildHandler) Download(rw http.ResponseWriter, req *http.Request) {
token := req.URL.Query().Get("token")
if token == "" {
http.Error(rw, "token cannot be empty", http.StatusBadRequest)
return
}
params := mux.Vars(req)
buildID := params["buildID"]
if buildID == "" {
http.Error(rw, "buildID cannot be empty ", http.StatusBadRequest)
return
}
buildRepo, err := bh.buildRepoBuilder.UseDefaultSAToken().Build()
if err != nil {
err = errors.Wrap(err, "build handler failed to create build repo instance")
handleCommonErrorCases(err, rw, bh.logger)
return
}
download, err := buildRepo.GetDownload(buildID)
// check our download token matched our sent token. We will be using the SAToken after this
if download.Token != token {
http.Error(rw, "forbidden", http.StatusForbidden)
return
}
if download.Expires < time.Now().Unix() {
http.Error(rw, "token expired", http.StatusGone)
return
}
artifactReader, err := bh.buildService.Download(buildRepo, buildID)
if err != nil {
err = errors.Wrap(err, "error when attempting to download artifact")
handleCommonErrorCases(err, rw, bh.logger)
return
}
defer func() {
if err := artifactReader.Close(); err != nil {
bh.logger.Error("failed to close file handle. could be leaking resources ", err)
}
}()
rw.Header().Set("content-type", "octet/stream")
// TODO handle more than apk
rw.Header().Set("content-disposition", "attachment; filename=\"app.apk\"")
if _, err := io.Copy(rw, artifactReader); err != nil {
err = errors.Wrap(err, "failed to write download")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
func (bh *BuildHandler) AddAsset(rw http.ResponseWriter, req *http.Request) {
token := headers.DefaultTokenRetriever(req.Header)
buildAsset := &mobile.BuildAsset{}
if err := req.ParseMultipartForm(10 * 1000000); err != nil { //10MB
err = errors.Wrap(err, "failed parse multipart form when adding asset")
handleCommonErrorCases(err, rw, bh.logger)
return
}
params := mux.Vars(req)
file, info, err := req.FormFile("asset")
if err != nil {
err = errors.Wrap(err, "getting the form file failed when adding asset")
handleCommonErrorCases(err, rw, bh.logger)
return
}
defer func() {
if err := file.Close(); err != nil {
bh.logger.Error("failed to close file handle. could be leaking resources", err)
}
}()
buildAsset.Name = info.Filename
buildAsset.Platform = params["platform"]
buildAsset.Type = mobile.BuildAssetTypeBuildSecret
buildAsset.Password = req.FormValue("password")
if err := buildAsset.Validate(mobile.BuildAssetTypeBuildSecret); err != nil {
err = &mobile.StatusError{Message: err.Error(), Code: http.StatusBadRequest}
handleCommonErrorCases(err, rw, bh.logger)
return
}
br, err := bh.buildRepoBuilder.WithToken(token).Build()
if err != nil {
err = errors.Wrap(err, "failed to create build repo with token")
handleCommonErrorCases(err, rw, bh.logger)
return
}
asset, err := bh.buildService.AddBuildAsset(br, file, buildAsset)
if err != nil {
err = errors.Wrap(err, "AddAsset failed to add new build resource")
handleCommonErrorCases(err, rw, bh.logger)
return
}
res := map[string]string{"name": asset}
encoder := json.NewEncoder(rw)
rw.WriteHeader(http.StatusCreated)
if err := encoder.Encode(res); err != nil {
err = errors.Wrap(err, "failed to encode response after creating build asset")
handleCommonErrorCases(err, rw, bh.logger)
return
}
}
|
package exec
import (
"archive/tar"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path"
"time"
"github.com/cozy/cozy-stack/pkg/apps"
"github.com/cozy/cozy-stack/pkg/config"
"github.com/cozy/cozy-stack/pkg/consts"
"github.com/cozy/cozy-stack/pkg/couchdb"
"github.com/cozy/cozy-stack/pkg/instance"
"github.com/cozy/cozy-stack/pkg/jobs"
"github.com/cozy/cozy-stack/pkg/realtime"
"github.com/cozy/cozy-stack/pkg/vfs"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
type konnectorMsg struct {
Type string `json:"type"`
Message string `json:"message"`
}
type konnectorWorker struct {
slug string
msg map[string]interface{}
man *apps.KonnManifest
messages []konnectorMsg
}
// konnectorResult stores the result of a konnector execution.
type konnectorResult struct {
DocID string `json:"_id,omitempty"`
DocRev string `json:"_rev,omitempty"`
CreatedAt time.Time `json:"last_execution"`
LastSuccess time.Time `json:"last_success"`
Account string `json:"account"`
State string `json:"state"`
Error string `json:"error"`
}
func (r *konnectorResult) ID() string { return r.DocID }
func (r *konnectorResult) Rev() string { return r.DocRev }
func (r *konnectorResult) DocType() string { return consts.KonnectorResults }
func (r *konnectorResult) Clone() couchdb.Doc { c := *r; return &c }
func (r *konnectorResult) SetID(id string) { r.DocID = id }
func (r *konnectorResult) SetRev(rev string) { r.DocRev = rev }
const (
konnectorMsgTypeDebug = "debug"
konnectorMsgTypeWarning = "warning"
konnectorMsgTypeError = "error"
konnectorMsgTypeCritical = "critical"
)
// const konnectorMsgTypeProgress string = "progress"
func (w *konnectorWorker) PrepareWorkDir(i *instance.Instance, m jobs.Message) (workDir string, err error) {
var msg map[string]interface{}
if err = m.Unmarshal(&msg); err != nil {
return
}
slug, _ := msg["konnector"].(string)
man, err := apps.GetKonnectorBySlug(i, slug)
if err != nil {
return
}
// TODO: disallow konnectors on state Installed to be run when we define our
// workflow to accept permissions changes on konnectors.
if s := man.State(); s != apps.Ready && s != apps.Installed {
err = errors.New("Konnector is not ready")
return
}
w.slug = slug
w.msg = msg
w.man = man
osFS := afero.NewOsFs()
workDir, err = afero.TempDir(osFS, "", "konnector-"+slug)
if err != nil {
return
}
workFS := afero.NewBasePathFs(osFS, workDir)
fileServer := i.KonnectorsFileServer()
tarFile, err := fileServer.Open(slug, man.Version(), apps.KonnectorArchiveName)
if err != nil {
return
}
// Create the folder in which the konnector has the right to write.
{
fs := i.VFS()
folderToSave, _ := msg["folder_to_save"].(string)
defaultFolderPath, _ := msg["default_folder_path"].(string)
if folderToSave != "" {
if defaultFolderPath == "" {
defaultFolderPath = fmt.Sprintf("/???/%s", slug)
}
if _, err = fs.DirByID(folderToSave); os.IsNotExist(err) {
folderToSave = ""
}
}
if folderToSave == "" {
var dir *vfs.DirDoc
dir, err = vfs.MkdirAll(fs, defaultFolderPath, nil)
if err != nil {
return
}
msg["folder_to_save"] = dir.ID()
}
}
tr := tar.NewReader(tarFile)
for {
var hdr *tar.Header
hdr, err = tr.Next()
if err == io.EOF {
break
}
if err != nil {
return
}
dirname := path.Dir(hdr.Name)
if dirname != "." {
if err = workFS.MkdirAll(dirname, 0755); err != nil {
return
}
}
var f afero.File
f, err = workFS.OpenFile(hdr.Name, os.O_CREATE|os.O_WRONLY, 0640)
if err != nil {
return
}
_, err = io.Copy(f, tr)
errc := f.Close()
if err != nil {
return
}
if errc != nil {
err = errc
return
}
}
return workDir, nil
}
func (w *konnectorWorker) PrepareCmdEnv(i *instance.Instance, m jobs.Message) (cmd string, env []string, jobID string, err error) {
jobID = fmt.Sprintf("konnector/%s/%s", w.slug, i.Domain)
// Directly pass the job message as fields parameters
fieldsJSON, err := json.Marshal(w.msg)
if err != nil {
return
}
paramsJSON, err := json.Marshal(w.man.Parameters)
if err != nil {
return
}
token := i.BuildKonnectorToken(w.man)
cmd = config.GetConfig().Konnectors.Cmd
env = []string{
"COZY_URL=" + i.PageURL("/", nil),
"COZY_CREDENTIALS=" + token,
"COZY_FIELDS=" + string(fieldsJSON),
"COZY_PARAMETERS=" + string(paramsJSON),
"COZY_TYPE=" + w.man.Type,
"COZY_LOCALE=" + i.Locale,
"COZY_JOB_ID=" + jobID,
}
return
}
func (w *konnectorWorker) ScanOuput(i *instance.Instance, log *logrus.Entry, line []byte) error {
var msg konnectorMsg
if err := json.Unmarshal(line, &msg); err != nil {
return fmt.Errorf("Could not parse stdout as JSON: %q", string(line))
}
switch msg.Type {
case konnectorMsgTypeDebug:
log.Debug(msg.Message)
case konnectorMsgTypeWarning:
log.Warn(msg.Message)
case konnectorMsgTypeError, konnectorMsgTypeCritical:
log.Error(msg.Message)
}
w.messages = append(w.messages, msg)
realtime.GetHub().Publish(&realtime.Event{
Verb: realtime.EventCreate,
Doc: couchdb.JSONDoc{Type: consts.JobEvents, M: map[string]interface{}{
"type": msg.Type,
"message": msg.Message,
}},
Domain: i.Domain,
})
return nil
}
func (w *konnectorWorker) Error(i *instance.Instance, err error) error {
// For retro-compatibility, we still use "error" logs as returned error, only
// in the case that no "critical" message are actually returned. In such
// case, We use the last "error" log as the returned error.
var lastErrorMessage error
for _, msg := range w.messages {
if msg.Type == konnectorMsgTypeCritical {
return errors.New(msg.Message)
}
if msg.Type == konnectorMsgTypeError {
lastErrorMessage = errors.New(msg.Message)
}
}
if lastErrorMessage != nil {
return lastErrorMessage
}
return err
}
func (w *konnectorWorker) Commit(ctx context.Context, msg jobs.Message, errjob error) error {
if w.msg == nil {
return nil
}
accountID, _ := w.msg["account"].(string)
domain := ctx.Value(jobs.ContextDomainKey).(string)
inst, err := instance.Get(domain)
if err != nil {
return err
}
lastResult := &konnectorResult{}
err = couchdb.GetDoc(inst, consts.KonnectorResults, w.slug, lastResult)
if err != nil {
if !couchdb.IsNotFoundError(err) {
return err
}
lastResult = nil
}
var state, errstr string
var lastSuccess time.Time
if errjob != nil {
if lastResult != nil {
lastSuccess = lastResult.LastSuccess
}
errstr = errjob.Error()
state = jobs.Errored
} else {
lastSuccess = time.Now()
state = jobs.Done
}
result := &konnectorResult{
DocID: w.slug,
Account: accountID,
CreatedAt: time.Now(),
LastSuccess: lastSuccess,
State: state,
Error: errstr,
}
if lastResult == nil {
err = couchdb.CreateNamedDocWithDB(inst, result)
} else {
result.SetRev(lastResult.Rev())
err = couchdb.UpdateDoc(inst, result)
}
return err
// if err != nil {
// return err
// }
// // if it is the first try we do not take into account an error, we bail.
// if lastResult == nil {
// return nil
// }
// // if the job has not errored, or the last one was already errored, we bail.
// if state != jobs.Errored || lastResult.State == jobs.Errored {
// return nil
// }
// konnectorURL := inst.SubDomain(consts.CollectSlug)
// konnectorURL.Fragment = "/category/all/" + slug
// mail := mails.Options{
// Mode: mails.ModeNoReply,
// Subject: inst.Translate("Error Konnector execution", domain),
// TemplateName: "konnector_error_" + inst.Locale,
// TemplateValues: map[string]string{
// "KonnectorName": slug,
// "KonnectorPage": konnectorURL.String(),
// },
// }
// msg, err := jobs.NewMessage(&mail)
// if err != nil {
// return err
// }
// log := logger.WithDomain(domain)
// log.Info("Konnector has failed definitively, should send mail.", mail)
// _, err = globals.GetBroker().PushJob(&jobs.JobRequest{
// Domain: domain,
// WorkerType: "sendmail",
// Message: msg,
// })
// return err
}
Set better default folder name
package exec
import (
"archive/tar"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path"
"time"
"github.com/cozy/cozy-stack/pkg/apps"
"github.com/cozy/cozy-stack/pkg/config"
"github.com/cozy/cozy-stack/pkg/consts"
"github.com/cozy/cozy-stack/pkg/couchdb"
"github.com/cozy/cozy-stack/pkg/instance"
"github.com/cozy/cozy-stack/pkg/jobs"
"github.com/cozy/cozy-stack/pkg/realtime"
"github.com/cozy/cozy-stack/pkg/vfs"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
type konnectorMsg struct {
Type string `json:"type"`
Message string `json:"message"`
}
type konnectorWorker struct {
slug string
msg map[string]interface{}
man *apps.KonnManifest
messages []konnectorMsg
}
// konnectorResult stores the result of a konnector execution.
type konnectorResult struct {
DocID string `json:"_id,omitempty"`
DocRev string `json:"_rev,omitempty"`
CreatedAt time.Time `json:"last_execution"`
LastSuccess time.Time `json:"last_success"`
Account string `json:"account"`
State string `json:"state"`
Error string `json:"error"`
}
func (r *konnectorResult) ID() string { return r.DocID }
func (r *konnectorResult) Rev() string { return r.DocRev }
func (r *konnectorResult) DocType() string { return consts.KonnectorResults }
func (r *konnectorResult) Clone() couchdb.Doc { c := *r; return &c }
func (r *konnectorResult) SetID(id string) { r.DocID = id }
func (r *konnectorResult) SetRev(rev string) { r.DocRev = rev }
const (
konnectorMsgTypeDebug = "debug"
konnectorMsgTypeWarning = "warning"
konnectorMsgTypeError = "error"
konnectorMsgTypeCritical = "critical"
)
// const konnectorMsgTypeProgress string = "progress"
func (w *konnectorWorker) PrepareWorkDir(i *instance.Instance, m jobs.Message) (workDir string, err error) {
var msg map[string]interface{}
if err = m.Unmarshal(&msg); err != nil {
return
}
slug, _ := msg["konnector"].(string)
man, err := apps.GetKonnectorBySlug(i, slug)
if err != nil {
return
}
// TODO: disallow konnectors on state Installed to be run when we define our
// workflow to accept permissions changes on konnectors.
if s := man.State(); s != apps.Ready && s != apps.Installed {
err = errors.New("Konnector is not ready")
return
}
w.slug = slug
w.msg = msg
w.man = man
osFS := afero.NewOsFs()
workDir, err = afero.TempDir(osFS, "", "konnector-"+slug)
if err != nil {
return
}
workFS := afero.NewBasePathFs(osFS, workDir)
fileServer := i.KonnectorsFileServer()
tarFile, err := fileServer.Open(slug, man.Version(), apps.KonnectorArchiveName)
if err != nil {
return
}
// Create the folder in which the konnector has the right to write.
{
fs := i.VFS()
folderToSave, _ := msg["folder_to_save"].(string)
defaultFolderPath, _ := msg["default_folder_path"].(string)
if folderToSave != "" {
if defaultFolderPath == "" {
name := i.Translate("Tree Administrative")
defaultFolderPath = fmt.Sprintf("/%s/%s", name, slug)
}
if _, err = fs.DirByID(folderToSave); os.IsNotExist(err) {
folderToSave = ""
}
}
if folderToSave == "" {
var dir *vfs.DirDoc
dir, err = vfs.MkdirAll(fs, defaultFolderPath, nil)
if err != nil {
return
}
msg["folder_to_save"] = dir.ID()
}
}
tr := tar.NewReader(tarFile)
for {
var hdr *tar.Header
hdr, err = tr.Next()
if err == io.EOF {
break
}
if err != nil {
return
}
dirname := path.Dir(hdr.Name)
if dirname != "." {
if err = workFS.MkdirAll(dirname, 0755); err != nil {
return
}
}
var f afero.File
f, err = workFS.OpenFile(hdr.Name, os.O_CREATE|os.O_WRONLY, 0640)
if err != nil {
return
}
_, err = io.Copy(f, tr)
errc := f.Close()
if err != nil {
return
}
if errc != nil {
err = errc
return
}
}
return workDir, nil
}
func (w *konnectorWorker) PrepareCmdEnv(i *instance.Instance, m jobs.Message) (cmd string, env []string, jobID string, err error) {
jobID = fmt.Sprintf("konnector/%s/%s", w.slug, i.Domain)
// Directly pass the job message as fields parameters
fieldsJSON, err := json.Marshal(w.msg)
if err != nil {
return
}
paramsJSON, err := json.Marshal(w.man.Parameters)
if err != nil {
return
}
token := i.BuildKonnectorToken(w.man)
cmd = config.GetConfig().Konnectors.Cmd
env = []string{
"COZY_URL=" + i.PageURL("/", nil),
"COZY_CREDENTIALS=" + token,
"COZY_FIELDS=" + string(fieldsJSON),
"COZY_PARAMETERS=" + string(paramsJSON),
"COZY_TYPE=" + w.man.Type,
"COZY_LOCALE=" + i.Locale,
"COZY_JOB_ID=" + jobID,
}
return
}
func (w *konnectorWorker) ScanOuput(i *instance.Instance, log *logrus.Entry, line []byte) error {
var msg konnectorMsg
if err := json.Unmarshal(line, &msg); err != nil {
return fmt.Errorf("Could not parse stdout as JSON: %q", string(line))
}
switch msg.Type {
case konnectorMsgTypeDebug:
log.Debug(msg.Message)
case konnectorMsgTypeWarning:
log.Warn(msg.Message)
case konnectorMsgTypeError, konnectorMsgTypeCritical:
log.Error(msg.Message)
}
w.messages = append(w.messages, msg)
realtime.GetHub().Publish(&realtime.Event{
Verb: realtime.EventCreate,
Doc: couchdb.JSONDoc{Type: consts.JobEvents, M: map[string]interface{}{
"type": msg.Type,
"message": msg.Message,
}},
Domain: i.Domain,
})
return nil
}
func (w *konnectorWorker) Error(i *instance.Instance, err error) error {
// For retro-compatibility, we still use "error" logs as returned error, only
// in the case that no "critical" message are actually returned. In such
// case, We use the last "error" log as the returned error.
var lastErrorMessage error
for _, msg := range w.messages {
if msg.Type == konnectorMsgTypeCritical {
return errors.New(msg.Message)
}
if msg.Type == konnectorMsgTypeError {
lastErrorMessage = errors.New(msg.Message)
}
}
if lastErrorMessage != nil {
return lastErrorMessage
}
return err
}
func (w *konnectorWorker) Commit(ctx context.Context, msg jobs.Message, errjob error) error {
if w.msg == nil {
return nil
}
accountID, _ := w.msg["account"].(string)
domain := ctx.Value(jobs.ContextDomainKey).(string)
inst, err := instance.Get(domain)
if err != nil {
return err
}
lastResult := &konnectorResult{}
err = couchdb.GetDoc(inst, consts.KonnectorResults, w.slug, lastResult)
if err != nil {
if !couchdb.IsNotFoundError(err) {
return err
}
lastResult = nil
}
var state, errstr string
var lastSuccess time.Time
if errjob != nil {
if lastResult != nil {
lastSuccess = lastResult.LastSuccess
}
errstr = errjob.Error()
state = jobs.Errored
} else {
lastSuccess = time.Now()
state = jobs.Done
}
result := &konnectorResult{
DocID: w.slug,
Account: accountID,
CreatedAt: time.Now(),
LastSuccess: lastSuccess,
State: state,
Error: errstr,
}
if lastResult == nil {
err = couchdb.CreateNamedDocWithDB(inst, result)
} else {
result.SetRev(lastResult.Rev())
err = couchdb.UpdateDoc(inst, result)
}
return err
// if err != nil {
// return err
// }
// // if it is the first try we do not take into account an error, we bail.
// if lastResult == nil {
// return nil
// }
// // if the job has not errored, or the last one was already errored, we bail.
// if state != jobs.Errored || lastResult.State == jobs.Errored {
// return nil
// }
// konnectorURL := inst.SubDomain(consts.CollectSlug)
// konnectorURL.Fragment = "/category/all/" + slug
// mail := mails.Options{
// Mode: mails.ModeNoReply,
// Subject: inst.Translate("Error Konnector execution", domain),
// TemplateName: "konnector_error_" + inst.Locale,
// TemplateValues: map[string]string{
// "KonnectorName": slug,
// "KonnectorPage": konnectorURL.String(),
// },
// }
// msg, err := jobs.NewMessage(&mail)
// if err != nil {
// return err
// }
// log := logger.WithDomain(domain)
// log.Info("Konnector has failed definitively, should send mail.", mail)
// _, err = globals.GetBroker().PushJob(&jobs.JobRequest{
// Domain: domain,
// WorkerType: "sendmail",
// Message: msg,
// })
// return err
}
|
package main
import "fmt"
type Circle struct {
x,y,r float64
}
func main(){
var c Circle;
fmt.Println(c)
circ:= new (Circle)
(*circ).x=9
fmt.Println(circ)
circ2:= Circle{0,0,8}
fmt.Println(circ2)
}
Added methods
package main
import "fmt"
import "math"
type Circle struct {
x,y,r float64
}
func area(circle *Circle) float64 {
return math.Pi * circle.r * circle.r
}
func (c *Circle) rArea () float64 {
return math.Pi * c.r * c.r
}
func main(){
var c Circle;
fmt.Println(c)
circ:= new (Circle)
(*circ).x=9
(*circ).r=1
fmt.Println(circ)
circ2:= Circle{0,0,8}
fmt.Println(circ2)
a:= area(&circ2);
fmt.Println("Area is ", a)
b:= area(circ)
fmt.Println(" Are of b is ", b)
fmt.Println("Area from method ", circ.rArea())
}
|
// Calculate the number of legal 60-card decks in various Magic the Gathering formats.
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"strings"
)
type Legality struct {
Format, Legality string
}
type Card struct {
Name string
Type string
Legalities []Legality
}
func main() {
limits := FormatLimits("AllCards-x.json") // from https://mtgjson.com/json/AllCards-x.json.zip
formats := []string{"Standard", "Modern", "Legacy", "Vintage"}
for _, f := range formats {
c := LimitedMultiChoose(60, limits[f])
fmt.Printf("%8s: %.3g (%v)\n", f, new(big.Float).SetInt(c), c)
}
}
func FormatLimits(mtgJsonFile string) map[string][]int {
mtgJson, err := ioutil.ReadFile(mtgJsonFile)
if err != nil {
panic(err)
}
var cards map[string]Card
if err := json.Unmarshal(mtgJson, &cards); err != nil {
panic(err)
}
limits := map[string][]int{}
for _, c := range cards {
for _, leg := range c.Legalities {
f := leg.Format
if _, ok := limits[f]; !ok {
limits[f] = []int{}
}
lim := 0
if leg.Legality == "Legal" {
if strings.HasPrefix(c.Type, "Basic Land") {
lim = 1000
} else {
lim = 4
}
} else if leg.Legality == "Restricted" {
lim = 1
}
if lim > 0 {
limits[f] = append(limits[f], lim)
}
}
}
return limits
}
// Cache key, used to speed up LimitedMultiChooose.
type key struct {
numToBuy, numProducts int
}
// LimitedMultiChoose(B, L) returns the number of ways to buy B items from a
// store with len(L) products, where item I has only L[I] in stock (0 < I < N).
// For example, LimitedMultiChoose(5, []int{3,4,6})=17, which is the number of
// ways to choose 5 items to buy from a selection of 3 products, where product
// 0 has 3 in stock, product 1 has 4 in stock, and product 2 has 6 in stock.
func LimitedMultiChoose(numToBuy int, numInStock []int) *big.Int {
return _limitedMultiChoose(numToBuy, numInStock, map[key]*big.Int{})
}
func _limitedMultiChoose(numToBuy int, numInStock []int, cache map[key]*big.Int) *big.Int {
if numToBuy == 0 {
return big.NewInt(1)
}
if numToBuy < 0 || len(numInStock) == 0 {
return big.NewInt(0)
}
key := key{numToBuy, len(numInStock)}
if val, ok := cache[key]; ok {
return val
}
sum := big.NewInt(0)
for i := 0; i <= numInStock[0]; i++ {
sum.Add(sum, _limitedMultiChoose(numToBuy-i, numInStock[1:], cache))
}
cache[key] = sum
return sum
}
Update code for latest mtgjson data format
// Calculate the number of legal 60-card decks in various Magic the Gathering formats.
//
// Results with 2019-06-03 mtgjson data:
// standard: 7.03e+113 (703157299624545053686771888932520980286428279076474012460151327012433705223062879010429007489792352368415680180400)
// modern: 2.29e+164 (229241017427318043159764934629690835542655152031535534015191438353327502711772302958651348359168786657337832400898840998833277507768943622200054517284702035550051800)
// legacy: 2.48e+174 (2476245970516236803157138976198690342673231699980791266540935465097380031382411336013990929809169770898356715035479025800730182913157595543506016140547900979848605735227225255)
// vintage: 2.91e+174 (2914429382569820774637578150121710890809328668604110727990775935135618637873885636206091853018973867369917756238339541481092956757836236427126456480990881788926023871498707668)
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"strings"
)
type Card struct {
Name string
Type string
Legalities map[string]string
}
func main() {
limits := FormatLimits("AllCards.json") // from https://mtgjson.com/json/AllCards.json
for _, f := range []string{"standard", "modern", "legacy", "vintage"} {
c := LimitedMultiChoose(60, limits[f])
fmt.Printf("%8s: %.3g (%v)\n", f, new(big.Float).SetInt(c), c)
}
}
func FormatLimits(mtgJsonFile string) map[string][]int {
mtgJson, err := ioutil.ReadFile(mtgJsonFile)
if err != nil {
panic(err)
}
var cards map[string]Card
if err := json.Unmarshal(mtgJson, &cards); err != nil {
panic(err)
}
limits := map[string][]int{}
for _, c := range cards {
for f, leg := range c.Legalities {
if _, ok := limits[f]; !ok {
limits[f] = []int{}
}
lim := 0
if leg == "Legal" {
if strings.HasPrefix(c.Type, "Basic Land") {
lim = 1000
} else {
lim = 4
}
} else if leg == "Restricted" {
lim = 1
}
if lim > 0 {
limits[f] = append(limits[f], lim)
}
}
}
return limits
}
// Cache key, used to speed up LimitedMultiChooose.
type key struct {
numToBuy, numProducts int
}
// LimitedMultiChoose(B, L) returns the number of ways to buy B items from a
// store with len(L) products, where item I has only L[I] in stock (0 < I < N).
// For example, LimitedMultiChoose(5, []int{3,4,6})=17, which is the number of
// ways to choose 5 items to buy from a selection of 3 products, where product
// 0 has 3 in stock, product 1 has 4 in stock, and product 2 has 6 in stock.
func LimitedMultiChoose(numToBuy int, numInStock []int) *big.Int {
return _limitedMultiChoose(numToBuy, numInStock, map[key]*big.Int{})
}
func _limitedMultiChoose(numToBuy int, numInStock []int, cache map[key]*big.Int) *big.Int {
if numToBuy == 0 {
return big.NewInt(1)
}
if numToBuy < 0 || len(numInStock) == 0 {
return big.NewInt(0)
}
key := key{numToBuy, len(numInStock)}
if val, ok := cache[key]; ok {
return val
}
sum := big.NewInt(0)
for i := 0; i <= numInStock[0]; i++ {
sum.Add(sum, _limitedMultiChoose(numToBuy-i, numInStock[1:], cache))
}
cache[key] = sum
return sum
}
|
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"sort"
"strconv"
"strings"
"time"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
)
const (
podStartupThreshold time.Duration = 5 * time.Second
listPodLatencySmallThreshold time.Duration = 1 * time.Second
listPodLatencyMediumThreshold time.Duration = 1 * time.Second
listPodLatencyLargeThreshold time.Duration = 1 * time.Second
apiCallLatencySmallThreshold time.Duration = 250 * time.Millisecond
apiCallLatencyMediumThreshold time.Duration = 500 * time.Millisecond
apiCallLatencyLargeThreshold time.Duration = 1 * time.Second
)
// Dashboard metrics
type LatencyMetric struct {
Perc50 time.Duration `json:"Perc50"`
Perc90 time.Duration `json:"Perc90"`
Perc99 time.Duration `json:"Perc99"`
}
type PodStartupLatency struct {
Latency LatencyMetric `json:"latency"`
}
type SchedulingLatency struct {
Scheduling LatencyMetric `json:"scheduling:`
Binding LatencyMetric `json:"binding"`
Total LatencyMetric `json:"total"`
}
type APICall struct {
Resource string `json:"resource"`
Verb string `json:"verb"`
Latency LatencyMetric `json:"latency"`
}
type APIResponsiveness struct {
APICalls []APICall `json:"apicalls"`
}
func (a APIResponsiveness) Len() int { return len(a.APICalls) }
func (a APIResponsiveness) Swap(i, j int) { a.APICalls[i], a.APICalls[j] = a.APICalls[j], a.APICalls[i] }
func (a APIResponsiveness) Less(i, j int) bool {
return a.APICalls[i].Latency.Perc99 < a.APICalls[j].Latency.Perc99
}
// 0 <= quantile <=1 (e.g. 0.95 is 95%tile, 0.5 is median)
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func (a *APIResponsiveness) addMetric(resource, verb string, quantile float64, latency time.Duration) {
for i, apicall := range a.APICalls {
if apicall.Resource == resource && apicall.Verb == verb {
a.APICalls[i] = setQuantileAPICall(apicall, quantile, latency)
return
}
}
apicall := setQuantileAPICall(APICall{Resource: resource, Verb: verb}, quantile, latency)
a.APICalls = append(a.APICalls, apicall)
}
// 0 <= quantile <=1 (e.g. 0.95 is 95%tile, 0.5 is median)
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func setQuantileAPICall(apicall APICall, quantile float64, latency time.Duration) APICall {
setQuantile(&apicall.Latency, quantile, latency)
return apicall
}
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func setQuantile(metric *LatencyMetric, quantile float64, latency time.Duration) {
switch quantile {
case 0.5:
metric.Perc50 = latency
case 0.9:
metric.Perc90 = latency
case 0.99:
metric.Perc99 = latency
}
}
func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) {
var a APIResponsiveness
body, err := getMetrics(c)
if err != nil {
return a, err
}
samples, err := extractMetricSamples(body)
if err != nil {
return a, err
}
ignoredResources := sets.NewString("events")
// TODO: figure out why we're getting non-capitalized proxy and fix this.
ignoredVerbs := sets.NewString("WATCHLIST", "PROXY", "proxy")
for _, sample := range samples {
// Example line:
// apiserver_request_latencies_summary{resource="namespaces",verb="LIST",quantile="0.99"} 908
if sample.Metric[model.MetricNameLabel] != "apiserver_request_latencies_summary" {
continue
}
resource := string(sample.Metric["resource"])
verb := string(sample.Metric["verb"])
if ignoredResources.Has(resource) || ignoredVerbs.Has(verb) {
continue
}
latency := sample.Value
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
if err != nil {
return a, err
}
a.addMetric(resource, verb, quantile, time.Duration(int64(latency))*time.Microsecond)
}
return a, err
}
// Returns threshold for API call depending on the size of the cluster.
// In general our goal is 1s, but for smaller clusters, we want to enforce
// smaller limits, to allow noticing regressions.
func apiCallLatencyThreshold(numNodes int) time.Duration {
if numNodes <= 250 {
return apiCallLatencySmallThreshold
}
if numNodes <= 500 {
return apiCallLatencyMediumThreshold
}
return apiCallLatencyLargeThreshold
}
func listPodsLatencyThreshold(numNodes int) time.Duration {
if numNodes <= 250 {
return listPodLatencySmallThreshold
}
if numNodes <= 500 {
return listPodLatencyMediumThreshold
}
return listPodLatencyLargeThreshold
}
// Prints top five summary metrics for request types with latency and returns
// number of such request types above threshold.
func HighLatencyRequests(c *client.Client) (int, error) {
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return 0, err
}
numNodes := len(nodes.Items)
metrics, err := readLatencyMetrics(c)
if err != nil {
return 0, err
}
sort.Sort(sort.Reverse(metrics))
badMetrics := 0
top := 5
for _, metric := range metrics.APICalls {
threshold := apiCallLatencyThreshold(numNodes)
if metric.Verb == "LIST" && metric.Resource == "pods" {
threshold = listPodsLatencyThreshold(numNodes)
}
isBad := false
if metric.Latency.Perc99 > threshold {
badMetrics++
isBad = true
}
if top > 0 || isBad {
top--
prefix := ""
if isBad {
prefix = "WARNING "
}
Logf("%vTop latency metric: %+v", prefix, metric)
}
}
Logf("API calls latencies: %s", prettyPrintJSON(metrics))
return badMetrics, nil
}
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
// within the threshold.
func VerifyPodStartupLatency(latency PodStartupLatency) error {
Logf("Pod startup latency: %s", prettyPrintJSON(latency))
if latency.Latency.Perc50 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
}
if latency.Latency.Perc90 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 90th percentile: %v", latency.Latency.Perc90)
}
if latency.Latency.Perc99 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 99th percentil: %v", latency.Latency.Perc99)
}
return nil
}
// Resets latency metrics in apiserver.
func resetMetrics(c *client.Client) error {
Logf("Resetting latency metrics in apiserver...")
body, err := c.Get().AbsPath("/resetMetrics").DoRaw()
if err != nil {
return err
}
if string(body) != "metrics reset\n" {
return fmt.Errorf("Unexpected response: %q", string(body))
}
return nil
}
// Retrieves metrics information.
func getMetrics(c *client.Client) (string, error) {
body, err := c.Get().AbsPath("/metrics").DoRaw()
if err != nil {
return "", err
}
return string(body), nil
}
// Retrieves scheduler metrics information.
func getSchedulingLatency() (SchedulingLatency, error) {
result := SchedulingLatency{}
cmd := "curl http://localhost:10251/metrics"
sshResult, err := SSH(cmd, getMasterHost()+":22", testContext.Provider)
if err != nil || sshResult.Code != 0 {
return result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
}
samples, err := extractMetricSamples(sshResult.Stdout)
if err != nil {
return result, err
}
for _, sample := range samples {
var metric *LatencyMetric = nil
switch sample.Metric[model.MetricNameLabel] {
case "scheduler_scheduling_algorithm_latency_microseconds":
metric = &result.Scheduling
case "scheduler_binding_latency_microseconds":
metric = &result.Binding
case "scheduler_e2e_scheduling_latency_microseconds":
metric = &result.Total
}
if metric == nil {
continue
}
latency := sample.Value
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
if err != nil {
return result, err
}
setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond)
}
return result, nil
}
// Verifies (currently just by logging them) the scheduling latencies.
func VerifySchedulerLatency() error {
latency, err := getSchedulingLatency()
if err != nil {
return err
}
Logf("Scheduling latency: %s", prettyPrintJSON(latency))
// TODO: Add some reasonable checks once we know more about the values.
return nil
}
func prettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil {
return ""
}
formatted := &bytes.Buffer{}
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
return ""
}
return string(formatted.Bytes())
}
// Retrieves debug information.
func getDebugInfo(c *client.Client) (map[string]string, error) {
data := make(map[string]string)
for _, key := range []string{"block", "goroutine", "heap", "threadcreate"} {
resp, err := http.Get(c.Get().AbsPath(fmt.Sprintf("debug/pprof/%s", key)).URL().String() + "?debug=2")
if err != nil {
Logf("Warning: Error trying to fetch %s debug data: %v", key, err)
continue
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
Logf("Warning: Error trying to read %s debug data: %v", key, err)
}
data[key] = string(body)
}
return data, nil
}
func writePerfData(c *client.Client, dirName string, postfix string) error {
fname := fmt.Sprintf("%s/metrics_%s.txt", dirName, postfix)
handler, err := os.Create(fname)
if err != nil {
return fmt.Errorf("Error creating file '%s': %v", fname, err)
}
metrics, err := getMetrics(c)
if err != nil {
return fmt.Errorf("Error retrieving metrics: %v", err)
}
_, err = handler.WriteString(metrics)
if err != nil {
return fmt.Errorf("Error writing metrics: %v", err)
}
err = handler.Close()
if err != nil {
return fmt.Errorf("Error closing '%s': %v", fname, err)
}
debug, err := getDebugInfo(c)
if err != nil {
return fmt.Errorf("Error retrieving debug information: %v", err)
}
for key, value := range debug {
fname := fmt.Sprintf("%s/%s_%s.txt", dirName, key, postfix)
handler, err = os.Create(fname)
if err != nil {
return fmt.Errorf("Error creating file '%s': %v", fname, err)
}
_, err = handler.WriteString(value)
if err != nil {
return fmt.Errorf("Error writing %s: %v", key, err)
}
err = handler.Close()
if err != nil {
return fmt.Errorf("Error closing '%s': %v", fname, err)
}
}
return nil
}
// extractMetricSamples parses the prometheus metric samples from the input string.
func extractMetricSamples(metricsBlob string) ([]*model.Sample, error) {
dec, err := expfmt.NewDecoder(strings.NewReader(metricsBlob), expfmt.FmtText)
if err != nil {
return nil, err
}
decoder := expfmt.SampleDecoder{
Dec: dec,
Opts: &expfmt.DecodeOptions{},
}
var samples []*model.Sample
for {
var v model.Vector
if err = decoder.Decode(&v); err != nil {
if err == io.EOF {
// Expected loop termination condition.
return samples, nil
}
return nil, err
}
samples = append(samples, v...)
}
}
// logSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times
// If latencyDataLag is nil then it will be populated from latencyData
func logSuspiciousLatency(latencyData []podLatencyData, latencyDataLag []podLatencyData, nodeCount int, c *client.Client) {
if latencyDataLag == nil {
latencyDataLag = latencyData
}
for _, l := range latencyData {
if l.Latency > NodeStartupThreshold {
HighLatencyKubeletOperations(c, 1*time.Second, l.Node)
}
}
Logf("Approx throughput: %v pods/min",
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
}
// testMaximumLatencyValue verifies the highest latency value is less than or equal to
// the given time.Duration. Since the arrays are sorted we are looking at the last
// element which will always be the highest. If the latency is higher than the max Failf
// is called.
func testMaximumLatencyValue(latencies []podLatencyData, max time.Duration, name string) {
highestLatency := latencies[len(latencies)-1]
if !(highestLatency.Latency <= max) {
Failf("%s were not all under %s: %#v", name, max.String(), latencies)
}
}
func printLatencies(latencies []podLatencyData, header string) {
metrics := extractLatencyMetrics(latencies)
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
}
Relax scalability limits
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"sort"
"strconv"
"strings"
"time"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
)
const (
podStartupThreshold time.Duration = 5 * time.Second
listPodLatencySmallThreshold time.Duration = 1 * time.Second
listPodLatencyMediumThreshold time.Duration = 1 * time.Second
listPodLatencyLargeThreshold time.Duration = 1 * time.Second
// TODO: Decrease the small threshold to 250ms once tests are fixed.
apiCallLatencySmallThreshold time.Duration = 500 * time.Millisecond
apiCallLatencyMediumThreshold time.Duration = 500 * time.Millisecond
apiCallLatencyLargeThreshold time.Duration = 1 * time.Second
)
// Dashboard metrics
type LatencyMetric struct {
Perc50 time.Duration `json:"Perc50"`
Perc90 time.Duration `json:"Perc90"`
Perc99 time.Duration `json:"Perc99"`
}
type PodStartupLatency struct {
Latency LatencyMetric `json:"latency"`
}
type SchedulingLatency struct {
Scheduling LatencyMetric `json:"scheduling:`
Binding LatencyMetric `json:"binding"`
Total LatencyMetric `json:"total"`
}
type APICall struct {
Resource string `json:"resource"`
Verb string `json:"verb"`
Latency LatencyMetric `json:"latency"`
}
type APIResponsiveness struct {
APICalls []APICall `json:"apicalls"`
}
func (a APIResponsiveness) Len() int { return len(a.APICalls) }
func (a APIResponsiveness) Swap(i, j int) { a.APICalls[i], a.APICalls[j] = a.APICalls[j], a.APICalls[i] }
func (a APIResponsiveness) Less(i, j int) bool {
return a.APICalls[i].Latency.Perc99 < a.APICalls[j].Latency.Perc99
}
// 0 <= quantile <=1 (e.g. 0.95 is 95%tile, 0.5 is median)
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func (a *APIResponsiveness) addMetric(resource, verb string, quantile float64, latency time.Duration) {
for i, apicall := range a.APICalls {
if apicall.Resource == resource && apicall.Verb == verb {
a.APICalls[i] = setQuantileAPICall(apicall, quantile, latency)
return
}
}
apicall := setQuantileAPICall(APICall{Resource: resource, Verb: verb}, quantile, latency)
a.APICalls = append(a.APICalls, apicall)
}
// 0 <= quantile <=1 (e.g. 0.95 is 95%tile, 0.5 is median)
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func setQuantileAPICall(apicall APICall, quantile float64, latency time.Duration) APICall {
setQuantile(&apicall.Latency, quantile, latency)
return apicall
}
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func setQuantile(metric *LatencyMetric, quantile float64, latency time.Duration) {
switch quantile {
case 0.5:
metric.Perc50 = latency
case 0.9:
metric.Perc90 = latency
case 0.99:
metric.Perc99 = latency
}
}
func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) {
var a APIResponsiveness
body, err := getMetrics(c)
if err != nil {
return a, err
}
samples, err := extractMetricSamples(body)
if err != nil {
return a, err
}
ignoredResources := sets.NewString("events")
// TODO: figure out why we're getting non-capitalized proxy and fix this.
ignoredVerbs := sets.NewString("WATCHLIST", "PROXY", "proxy")
for _, sample := range samples {
// Example line:
// apiserver_request_latencies_summary{resource="namespaces",verb="LIST",quantile="0.99"} 908
if sample.Metric[model.MetricNameLabel] != "apiserver_request_latencies_summary" {
continue
}
resource := string(sample.Metric["resource"])
verb := string(sample.Metric["verb"])
if ignoredResources.Has(resource) || ignoredVerbs.Has(verb) {
continue
}
latency := sample.Value
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
if err != nil {
return a, err
}
a.addMetric(resource, verb, quantile, time.Duration(int64(latency))*time.Microsecond)
}
return a, err
}
// Returns threshold for API call depending on the size of the cluster.
// In general our goal is 1s, but for smaller clusters, we want to enforce
// smaller limits, to allow noticing regressions.
func apiCallLatencyThreshold(numNodes int) time.Duration {
if numNodes <= 250 {
return apiCallLatencySmallThreshold
}
if numNodes <= 500 {
return apiCallLatencyMediumThreshold
}
return apiCallLatencyLargeThreshold
}
func listPodsLatencyThreshold(numNodes int) time.Duration {
if numNodes <= 250 {
return listPodLatencySmallThreshold
}
if numNodes <= 500 {
return listPodLatencyMediumThreshold
}
return listPodLatencyLargeThreshold
}
// Prints top five summary metrics for request types with latency and returns
// number of such request types above threshold.
func HighLatencyRequests(c *client.Client) (int, error) {
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return 0, err
}
numNodes := len(nodes.Items)
metrics, err := readLatencyMetrics(c)
if err != nil {
return 0, err
}
sort.Sort(sort.Reverse(metrics))
badMetrics := 0
top := 5
for _, metric := range metrics.APICalls {
threshold := apiCallLatencyThreshold(numNodes)
if metric.Verb == "LIST" && metric.Resource == "pods" {
threshold = listPodsLatencyThreshold(numNodes)
}
isBad := false
if metric.Latency.Perc99 > threshold {
badMetrics++
isBad = true
}
if top > 0 || isBad {
top--
prefix := ""
if isBad {
prefix = "WARNING "
}
Logf("%vTop latency metric: %+v", prefix, metric)
}
}
Logf("API calls latencies: %s", prettyPrintJSON(metrics))
return badMetrics, nil
}
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
// within the threshold.
func VerifyPodStartupLatency(latency PodStartupLatency) error {
Logf("Pod startup latency: %s", prettyPrintJSON(latency))
if latency.Latency.Perc50 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
}
if latency.Latency.Perc90 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 90th percentile: %v", latency.Latency.Perc90)
}
if latency.Latency.Perc99 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 99th percentil: %v", latency.Latency.Perc99)
}
return nil
}
// Resets latency metrics in apiserver.
func resetMetrics(c *client.Client) error {
Logf("Resetting latency metrics in apiserver...")
body, err := c.Get().AbsPath("/resetMetrics").DoRaw()
if err != nil {
return err
}
if string(body) != "metrics reset\n" {
return fmt.Errorf("Unexpected response: %q", string(body))
}
return nil
}
// Retrieves metrics information.
func getMetrics(c *client.Client) (string, error) {
body, err := c.Get().AbsPath("/metrics").DoRaw()
if err != nil {
return "", err
}
return string(body), nil
}
// Retrieves scheduler metrics information.
func getSchedulingLatency() (SchedulingLatency, error) {
result := SchedulingLatency{}
cmd := "curl http://localhost:10251/metrics"
sshResult, err := SSH(cmd, getMasterHost()+":22", testContext.Provider)
if err != nil || sshResult.Code != 0 {
return result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
}
samples, err := extractMetricSamples(sshResult.Stdout)
if err != nil {
return result, err
}
for _, sample := range samples {
var metric *LatencyMetric = nil
switch sample.Metric[model.MetricNameLabel] {
case "scheduler_scheduling_algorithm_latency_microseconds":
metric = &result.Scheduling
case "scheduler_binding_latency_microseconds":
metric = &result.Binding
case "scheduler_e2e_scheduling_latency_microseconds":
metric = &result.Total
}
if metric == nil {
continue
}
latency := sample.Value
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
if err != nil {
return result, err
}
setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond)
}
return result, nil
}
// Verifies (currently just by logging them) the scheduling latencies.
func VerifySchedulerLatency() error {
latency, err := getSchedulingLatency()
if err != nil {
return err
}
Logf("Scheduling latency: %s", prettyPrintJSON(latency))
// TODO: Add some reasonable checks once we know more about the values.
return nil
}
func prettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil {
return ""
}
formatted := &bytes.Buffer{}
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
return ""
}
return string(formatted.Bytes())
}
// Retrieves debug information.
func getDebugInfo(c *client.Client) (map[string]string, error) {
data := make(map[string]string)
for _, key := range []string{"block", "goroutine", "heap", "threadcreate"} {
resp, err := http.Get(c.Get().AbsPath(fmt.Sprintf("debug/pprof/%s", key)).URL().String() + "?debug=2")
if err != nil {
Logf("Warning: Error trying to fetch %s debug data: %v", key, err)
continue
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
Logf("Warning: Error trying to read %s debug data: %v", key, err)
}
data[key] = string(body)
}
return data, nil
}
func writePerfData(c *client.Client, dirName string, postfix string) error {
fname := fmt.Sprintf("%s/metrics_%s.txt", dirName, postfix)
handler, err := os.Create(fname)
if err != nil {
return fmt.Errorf("Error creating file '%s': %v", fname, err)
}
metrics, err := getMetrics(c)
if err != nil {
return fmt.Errorf("Error retrieving metrics: %v", err)
}
_, err = handler.WriteString(metrics)
if err != nil {
return fmt.Errorf("Error writing metrics: %v", err)
}
err = handler.Close()
if err != nil {
return fmt.Errorf("Error closing '%s': %v", fname, err)
}
debug, err := getDebugInfo(c)
if err != nil {
return fmt.Errorf("Error retrieving debug information: %v", err)
}
for key, value := range debug {
fname := fmt.Sprintf("%s/%s_%s.txt", dirName, key, postfix)
handler, err = os.Create(fname)
if err != nil {
return fmt.Errorf("Error creating file '%s': %v", fname, err)
}
_, err = handler.WriteString(value)
if err != nil {
return fmt.Errorf("Error writing %s: %v", key, err)
}
err = handler.Close()
if err != nil {
return fmt.Errorf("Error closing '%s': %v", fname, err)
}
}
return nil
}
// extractMetricSamples parses the prometheus metric samples from the input string.
func extractMetricSamples(metricsBlob string) ([]*model.Sample, error) {
dec, err := expfmt.NewDecoder(strings.NewReader(metricsBlob), expfmt.FmtText)
if err != nil {
return nil, err
}
decoder := expfmt.SampleDecoder{
Dec: dec,
Opts: &expfmt.DecodeOptions{},
}
var samples []*model.Sample
for {
var v model.Vector
if err = decoder.Decode(&v); err != nil {
if err == io.EOF {
// Expected loop termination condition.
return samples, nil
}
return nil, err
}
samples = append(samples, v...)
}
}
// logSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times
// If latencyDataLag is nil then it will be populated from latencyData
func logSuspiciousLatency(latencyData []podLatencyData, latencyDataLag []podLatencyData, nodeCount int, c *client.Client) {
if latencyDataLag == nil {
latencyDataLag = latencyData
}
for _, l := range latencyData {
if l.Latency > NodeStartupThreshold {
HighLatencyKubeletOperations(c, 1*time.Second, l.Node)
}
}
Logf("Approx throughput: %v pods/min",
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
}
// testMaximumLatencyValue verifies the highest latency value is less than or equal to
// the given time.Duration. Since the arrays are sorted we are looking at the last
// element which will always be the highest. If the latency is higher than the max Failf
// is called.
func testMaximumLatencyValue(latencies []podLatencyData, max time.Duration, name string) {
highestLatency := latencies[len(latencies)-1]
if !(highestLatency.Latency <= max) {
Failf("%s were not all under %s: %#v", name, max.String(), latencies)
}
}
func printLatencies(latencies []podLatencyData, header string) {
metrics := extractLatencyMetrics(latencies)
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
}
|
package core
import (
"errors"
"fmt"
"path"
"reflect"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/MG-RAST/AWE/lib/conf"
"github.com/MG-RAST/AWE/lib/core/cwl"
"github.com/MG-RAST/AWE/lib/logger"
"github.com/MG-RAST/AWE/lib/rwmutex"
shock "github.com/MG-RAST/go-shock-client"
"github.com/davecgh/go-spew/spew"
"github.com/mitchellh/mapstructure"
)
// hierachy (in ideal case without errors):
// 1. TASK_STAT_INIT
// 2. TASK_STAT_PENDING
// 3. TASK_STAT_READY
// 4. TASK_STAT_QUEUED
// 5. TASK_STAT_INPROGRESS
// 6. TASK_STAT_COMPLETED
const (
TASK_STAT_INIT = "init" // initial state on creation of a task
TASK_STAT_PENDING = "pending" // a task that wants to be enqueued (but dependent tasks are not complete)
TASK_STAT_READY = "ready" // a task ready to be enqueued (all dependent tasks are complete , but workunits habe not yet been created)
TASK_STAT_QUEUED = "queued" // a task for which workunits have been created/queued
TASK_STAT_INPROGRESS = "in-progress" // a first workunit has been checkout (this does not guarantee a workunit is running right now)
TASK_STAT_SUSPEND = "suspend"
TASK_STAT_FAILED = "failed" // deprecated ?
TASK_STAT_FAILED_PERMANENT = "failed-permanent" // on exit code 42
TASK_STAT_COMPLETED = "completed"
TASK_STAT_SKIPPED = "user_skipped" // deprecated
TASK_STAT_FAIL_SKIP = "skipped" // deprecated
TASK_STAT_PASSED = "passed" // deprecated ?
)
var TASK_STATS_RESET = []string{TASK_STAT_QUEUED, TASK_STAT_INPROGRESS, TASK_STAT_SUSPEND}
const (
TASK_TYPE_UNKNOWN = ""
TASK_TYPE_SCATTER = "scatter"
//TASK_TYPE_WORKFLOW = "workflow"
TASK_TYPE_NORMAL = "normal"
)
// Scatter
// A task of type "scatter" generates multiple scatter children.
// List of children for a scatter task are stored in field "ScatterChildren"
// Each Scatter child points to its Scatter parent
// Scatter child outputs do not go into context object, they only go to scatter parent output array !
type TaskRaw struct {
rwmutex.RWMutex `bson:"-" json:"-" mapstructure:"-"`
Task_Unique_Identifier `bson:",inline" mapstructure:",squash"`
Id string `bson:"taskid" json:"taskid" mapstructure:"taskid"` // old-style
TaskType string `bson:"task_type" json:"task_type" mapstructure:"task_type"`
Info *Info `bson:"-" json:"-" mapstructure:"-"` // this is just a pointer to the job.Info
Cmd *Command `bson:"cmd" json:"cmd" mapstructure:"cmd"`
Partition *PartInfo `bson:"partinfo" json:"-" mapstructure:"partinfo"`
DependsOn []string `bson:"dependsOn" json:"dependsOn" mapstructure:"dependsOn"` // only needed if dependency cannot be inferred from Input.Origin
TotalWork int `bson:"totalwork" json:"totalwork" mapstructure:"totalwork"`
MaxWorkSize int `bson:"maxworksize" json:"maxworksize" mapstructure:"maxworksize"`
RemainWork int `bson:"remainwork" json:"remainwork" mapstructure:"remainwork"`
ResetTask bool `bson:"resettask" json:"-" mapstructure:"resettask"` // trigged by function - resume, recompute, resubmit
State string `bson:"state" json:"state" mapstructure:"state"`
CreatedDate time.Time `bson:"createdDate" json:"createddate" mapstructure:"createdDate"`
StartedDate time.Time `bson:"startedDate" json:"starteddate" mapstructure:"startedDate"`
CompletedDate time.Time `bson:"completedDate" json:"completeddate" mapstructure:"completedDate"`
ComputeTime int `bson:"computetime" json:"computetime" mapstructure:"computetime"`
UserAttr map[string]interface{} `bson:"userattr" json:"userattr" mapstructure:"userattr"`
ClientGroups string `bson:"clientgroups" json:"clientgroups" mapstructure:"clientgroups"`
WorkflowStep *cwl.WorkflowStep `bson:"workflowStep" json:"workflowStep" mapstructure:"workflowStep"` // CWL-only
StepOutputInterface interface{} `bson:"stepOutput" json:"stepOutput" mapstructure:"stepOutput"` // CWL-only
StepInput *cwl.Job_document `bson:"-" json:"-" mapstructure:"-"` // CWL-only
StepOutput *cwl.Job_document `bson:"-" json:"-" mapstructure:"-"` // CWL-only
//Scatter_task bool `bson:"scatter_task" json:"scatter_task" mapstructure:"scatter_task"` // CWL-only, indicates if this is a scatter_task TODO: compare with TaskType ?
Scatter_parent *Task_Unique_Identifier `bson:"scatter_parent" json:"scatter_parent" mapstructure:"scatter_parent"` // CWL-only, points to scatter parent
ScatterChildren []string `bson:"scatterChildren" json:"scatterChildren" mapstructure:"scatterChildren"` // use simple TaskName , CWL-only, list of all children in a subworkflow task
ScatterChildren_ptr []*Task `bson:"-" json:"-" mapstructure:"-"` // caching only, CWL-only
Finalizing bool `bson:"-" json:"-" mapstructure:"-"` // CWL-only, a lock mechanism for subworkflows and scatter tasks
CwlVersion cwl.CWLVersion `bson:"cwlVersion,omitempty" mapstructure:"cwlVersion,omitempty" mapstructure:"cwlVersion,omitempty"` // CWL-only
WorkflowInstanceId string `bson:"workflow_instance_id" json:"workflow_instance_id" mapstructure:"workflow_instance_id"` // CWL-only
job *Job `bson:"-" mapstructure:"-"` // caching only
//WorkflowParent *Task_Unique_Identifier `bson:"workflow_parent" json:"workflow_parent" mapstructure:"workflow_parent"` // CWL-only parent that created subworkflow
}
type Task struct {
TaskRaw `bson:",inline" mapstructure:",squash"`
Inputs []*IO `bson:"inputs" json:"inputs" mapstructure:"inputs"`
Outputs []*IO `bson:"outputs" json:"outputs" mapstructure:"outputs"`
Predata []*IO `bson:"predata" json:"predata" mapstructure:"predata"`
Comment string
}
// Deprecated JobDep struct uses deprecated TaskDep struct which uses the deprecated IOmap. Maintained for backwards compatibility.
// Jobs that cannot be parsed into the Job struct, but can be parsed into the JobDep struct will be translated to the new Job struct.
// (=deprecated=)
type TaskDep struct {
TaskRaw `bson:",inline"`
Inputs IOmap `bson:"inputs" json:"inputs"`
Outputs IOmap `bson:"outputs" json:"outputs"`
Predata IOmap `bson:"predata" json:"predata"`
}
type TaskLog struct {
Id string `bson:"taskid" json:"taskid"`
State string `bson:"state" json:"state"`
TotalWork int `bson:"totalwork" json:"totalwork"`
CompletedDate time.Time `bson:"completedDate" json:"completeddate"`
Workunits []*WorkLog `bson:"workunits" json:"workunits"`
}
func NewTaskRaw(task_id Task_Unique_Identifier, info *Info) (tr *TaskRaw, err error) {
logger.Debug(3, "task_id: %s", task_id)
logger.Debug(3, "task_id.JobId: %s", task_id.JobId)
logger.Debug(3, "task_id.TaskName: %s", task_id.TaskName)
var task_str string
task_str, err = task_id.String()
if err != nil {
err = fmt.Errorf("() task.String returned: %s", err.Error())
return
}
tr = &TaskRaw{
Task_Unique_Identifier: task_id,
Id: task_str,
Info: info,
Cmd: &Command{},
Partition: nil,
DependsOn: []string{},
}
return
}
func (task *TaskRaw) InitRaw(job *Job, job_id string) (changed bool, err error) {
changed = false
if len(task.Id) == 0 {
err = errors.New("(InitRaw) empty taskid")
return
}
//job_id := job.ID
if job_id == "" {
err = fmt.Errorf("(InitRaw) job_id empty")
return
}
if task.JobId == "" {
task.JobId = job_id
changed = true
}
//logger.Debug(3, "task.TaskName A: %s", task.TaskName)
job_prefix := job_id + "_"
if len(task.Id) > 0 && (!strings.HasPrefix(task.Id, job_prefix)) {
task.TaskName = task.Id
changed = true
panic("should not happen 1")
}
//logger.Debug(3, "task.TaskName B: %s", task.TaskName)
//if strings.HasSuffix(task.TaskName, "ERROR") {
// err = fmt.Errorf("(InitRaw) taskname is error")
// return
//}
if task.TaskName == "" && strings.HasPrefix(task.Id, job_prefix) {
var tid Task_Unique_Identifier
tid, err = New_Task_Unique_Identifier_FromString(task.Id)
if err != nil {
err = fmt.Errorf("(InitRaw) New_Task_Unique_Identifier_FromString returned: %s", err.Error())
return
}
task.Task_Unique_Identifier = tid
panic("should not happen 2")
}
var task_str string
task_str, err = task.String()
if err != nil {
err = fmt.Errorf("(InitRaw) task.String returned: %s", err.Error())
return
}
task.RWMutex.Init("task_" + task_str)
// job_id is missing and task_id is only a number (e.g. on submission of old-style AWE)
if task.TaskName == "" {
err = fmt.Errorf("(InitRaw) task.TaskName empty")
return
}
if task.Id != task_str {
task.Id = task_str
changed = true
}
if task.State == "" {
task.State = TASK_STAT_INIT
changed = true
}
if job != nil {
if job.Info == nil {
err = fmt.Errorf("(InitRaw) job.Info empty")
return
}
task.Info = job.Info
}
if task.TotalWork <= 0 {
task.TotalWork = 1
}
if task.State != TASK_STAT_COMPLETED {
if task.RemainWork != task.TotalWork {
task.RemainWork = task.TotalWork
changed = true
}
}
if len(task.Cmd.Environ.Private) > 0 {
task.Cmd.HasPrivateEnv = true
}
//if strings.HasPrefix(task.Id, task.JobId+"_") {
// task.Id = strings.TrimPrefix(task.Id, task.JobId+"_")
// changed = true
//}
//if strings.HasPrefix(task.Id, "_") {
// task.Id = strings.TrimPrefix(task.Id, "_")
// changed = true
//}
if job == nil {
err = fmt.Errorf("(InitRaw) job is nil")
return
}
context := job.WorkflowContext
if task.StepOutputInterface != nil {
task.StepOutput, err = cwl.NewJob_documentFromNamedTypes(task.StepOutputInterface, context)
if err != nil {
err = fmt.Errorf("(InitRaw) cwl.NewJob_documentFromNamedTypes returned: %s", err.Error())
return
}
}
CwlVersion := context.CwlVersion
if CwlVersion != "" {
if task.CwlVersion != CwlVersion {
task.CwlVersion = CwlVersion
}
}
if task.WorkflowStep != nil {
if job != nil {
if job.WorkflowContext == nil {
err = fmt.Errorf("(InitRaw) job.WorkflowContext == nil")
return
}
err = task.WorkflowStep.Init(job.WorkflowContext)
if err != nil {
err = fmt.Errorf("(InitRaw) task.WorkflowStep.Init returned: %s", err.Error())
return
}
}
}
return
}
// this function prevents a dead-lock when a sub-workflow task finalizes
func (task *TaskRaw) Finalize() (ok bool, err error) {
err = task.LockNamed("Finalize")
if err != nil {
return
}
defer task.Unlock()
if task.Finalizing {
// somebody else already flipped the bit
ok = false
return
}
task.Finalizing = true
ok = true
return
}
func IsValidUUID(uuid string) bool {
if len(uuid) != 36 {
return false
}
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$")
return r.MatchString(uuid)
}
// populate DependsOn
func (task *Task) CollectDependencies() (changed bool, err error) {
deps := make(map[Task_Unique_Identifier]bool)
deps_changed := false
jobid, err := task.GetJobId()
if err != nil {
return
}
if jobid == "" {
err = fmt.Errorf("(CollectDependencies) jobid is empty")
return
}
job_prefix := jobid + "_"
// collect explicit dependencies
for _, deptask := range task.DependsOn {
if deptask == "" {
deps_changed = true
continue
}
if !strings.HasPrefix(deptask, job_prefix) {
deptask = job_prefix + deptask
deps_changed = true
} else {
deptask_suffix := strings.TrimPrefix(deptask, job_prefix)
if deptask_suffix == "" {
deps_changed = true
continue
}
}
t, yerr := New_Task_Unique_Identifier_FromString(deptask)
if yerr != nil {
err = fmt.Errorf("(CollectDependencies) Cannot parse entry in DependsOn: %s", yerr.Error())
return
}
if t.TaskName == "" {
// this is to fix a bug
deps_changed = true
continue
}
deps[t] = true
}
for _, input := range task.Inputs {
deptask := input.Origin
if deptask == "" {
deps_changed = true
continue
}
if !strings.HasPrefix(deptask, job_prefix) {
deptask = job_prefix + deptask
deps_changed = true
}
t, yerr := New_Task_Unique_Identifier_FromString(deptask)
if yerr != nil {
err = fmt.Errorf("(CollectDependencies) Cannot parse Origin entry in Input: %s", yerr.Error())
return
}
_, ok := deps[t]
if !ok {
// this was not yet in deps
deps[t] = true
deps_changed = true
}
}
// write all dependencies if different from before
if deps_changed {
task.DependsOn = []string{}
for deptask, _ := range deps {
var dep_task_str string
dep_task_str, err = deptask.String()
if err != nil {
err = fmt.Errorf("(CollectDependencies) dep_task.String returned: %s", err.Error())
return
}
task.DependsOn = append(task.DependsOn, dep_task_str)
}
changed = true
}
return
}
// argument job is optional, but recommended
func (task *Task) Init(job *Job, job_id string) (changed bool, err error) {
changed, err = task.InitRaw(job, job_id)
if err != nil {
return
}
dep_changes, err := task.CollectDependencies()
if err != nil {
return
}
if dep_changes {
changed = true
}
// set node / host / url for files
for _, io := range task.Inputs {
if io.Node == "" {
io.Node = "-"
}
_, err = io.DataUrl()
if err != nil {
return
}
logger.Debug(2, "inittask input: host="+io.Host+", node="+io.Node+", url="+io.Url)
}
for _, io := range task.Outputs {
if io.Node == "" {
io.Node = "-"
}
_, err = io.DataUrl()
if err != nil {
return
}
logger.Debug(2, "inittask output: host="+io.Host+", node="+io.Node+", url="+io.Url)
}
for _, io := range task.Predata {
if io.Node == "" {
io.Node = "-"
}
_, err = io.DataUrl()
if err != nil {
return
}
// predata IO can not be empty
if (io.Url == "") && (io.Node == "-") {
err = errors.New("Invalid IO, required fields url or host / node missing")
return
}
logger.Debug(2, "inittask predata: host="+io.Host+", node="+io.Node+", url="+io.Url)
}
err = task.setTokenForIO(false)
if err != nil {
return
}
return
}
// task_id_str is without prefix yet
func NewTask(job *Job, workflow_instance_id string, task_id_str string) (t *Task, err error) {
fmt.Printf("(NewTask) new task: %s %s/%s\n", job.ID, workflow_instance_id, task_id_str)
if task_id_str == "" {
err = fmt.Errorf("(NewTask) task_id is empty")
return
}
if strings.HasPrefix(task_id_str, "#main") {
err = fmt.Errorf("(NewTask) task_id_str prefix wrong: %s", task_id_str)
return
}
if task_id_str != "#main" {
if !strings.HasPrefix(workflow_instance_id, "#main") {
err = fmt.Errorf("(NewTask) workflow_instance_id has not #main prefix: %s", workflow_instance_id)
return
}
}
if job.ID == "" {
err = fmt.Errorf("(NewTask) jobid is empty!")
return
}
if strings.HasSuffix(task_id_str, "/") {
err = fmt.Errorf("(NewTask) Suffix in task_id not ok %s", task_id_str)
return
}
task_id_str = strings.TrimSuffix(task_id_str, "/")
//workflow = strings.TrimSuffix(workflow, "/")
job_global_task_id_str := workflow_instance_id + "/" + task_id_str
var tui Task_Unique_Identifier
tui, err = New_Task_Unique_Identifier(job.ID, job_global_task_id_str)
if err != nil {
err = fmt.Errorf("(NewTask) New_Task_Unique_Identifier returns: %s", err.Error())
return
}
var tr *TaskRaw
tr, err = NewTaskRaw(tui, job.Info)
if err != nil {
err = fmt.Errorf("(NewTask) NewTaskRaw returns: %s", err.Error())
return
}
t = &Task{
TaskRaw: *tr,
Inputs: []*IO{},
Outputs: []*IO{},
Predata: []*IO{},
}
t.TaskRaw.WorkflowInstanceId = workflow_instance_id
if workflow_instance_id == "" {
err = fmt.Errorf("(NewTask) workflow_instance_id empty")
return
}
return
}
func (task *Task) GetOutputs() (outputs []*IO, err error) {
outputs = []*IO{}
lock, err := task.RLockNamed("GetOutputs")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
for _, output := range task.Outputs {
outputs = append(outputs, output)
}
return
}
func (task *Task) GetOutput(filename string) (output *IO, err error) {
lock, err := task.RLockNamed("GetOutput")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
for _, io := range task.Outputs {
if io.FileName == filename {
output = io
return
}
}
err = fmt.Errorf("Output %s not found", filename)
return
}
func (task *TaskRaw) SetScatterChildren(qm *ServerMgr, scatterChildren []string, writelock bool) (err error) {
if writelock {
err = task.LockNamed("SetScatterChildren")
if err != nil {
return
}
defer task.Unlock()
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskField(task.JobId, task.WorkflowInstanceId, task.Id, "scatterChildren", scatterChildren)
if err != nil {
err = fmt.Errorf("(SetScatterChildren) dbUpdateJobTaskField returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskField(task.JobId, task.WorkflowInstanceId, task.Id, "scatterChildren", scatterChildren)
if err != nil {
err = fmt.Errorf("(SetScatterChildren) dbUpdateTaskField returned: %s", err.Error())
return
}
}
task.ScatterChildren = scatterChildren
return
}
func (task *TaskRaw) GetScatterChildren(wi *WorkflowInstance, qm *ServerMgr) (children []*Task, err error) {
lock, err := task.RLockNamed("GetScatterChildren")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
if task.ScatterChildren_ptr != nil {
children = task.ScatterChildren_ptr // should make a copy....
return
}
children = []*Task{}
for _, task_id_str := range task.ScatterChildren {
var child *Task
var ok bool
child, ok, err = wi.GetTaskByName(task_id_str, true)
if err != nil {
err = fmt.Errorf("(GetScatterChildren) wi.GetTaskByName returned: %s", err.Error())
return
}
if !ok {
err = fmt.Errorf("(GetScatterChildren) child task %s not found in TaskMap", task_id_str)
return
}
children = append(children, child)
}
task.ScatterChildren_ptr = children
return
}
func (task *TaskRaw) GetWorkflowInstance() (wi *WorkflowInstance, ok bool, err error) {
var job *Job
job, err = task.GetJob()
if err != nil {
err = fmt.Errorf("(GetWorkflowInstance) task.GetJob returned: %s", err.Error())
return
}
wi_id := task.WorkflowInstanceId
wi, ok, err = job.GetWorkflowInstance(wi_id, true)
if err != nil {
err = fmt.Errorf("(GetWorkflowInstance) job.GetWorkflowInstance returned: %s", err.Error())
return
}
if !ok {
err = fmt.Errorf("(GetWorkflowInstance) job.GetWorkflowInstance did not find: %s", wi_id)
return
}
return
}
// returns name of Parent (without jobid)
// func (task *TaskRaw) GetWorkflowParent() (p Task_Unique_Identifier, ok bool, err error) {
// lock, err := task.RLockNamed("GetParent")
// if err != nil {
// return
// }
// defer task.RUnlockNamed(lock)
// if task.WorkflowParent == nil {
// ok = false
// return
// }
// p = *task.WorkflowParent
// return
// }
// func (task *TaskRaw) GetWorkflowParentStr() (parent_id_str string, err error) {
// lock, err := task.RLockNamed("GetWorkflowParentStr")
// if err != nil {
// return
// }
// defer task.RUnlockNamed(lock)
// parent_id_str = ""
// if task.WorkflowParent != nil {
// parent_id_str, _ = task.WorkflowParent.String()
// }
// return
// }
func (task *TaskRaw) GetState() (state string, err error) {
lock, err := task.RLockNamed("GetState")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
state = task.State
return
}
func (task *TaskRaw) GetTaskType() (type_str string, err error) {
lock, err := task.RLockNamed("GetTaskType")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
type_str = task.TaskType
return
}
func (task *Task) SetTaskType(type_str string, writelock bool) (err error) {
if writelock {
err = task.LockNamed("SetTaskType")
if err != nil {
return
}
defer task.Unlock()
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskString(task.JobId, task.WorkflowInstanceId, task.Id, "task_type", type_str)
if err != nil {
err = fmt.Errorf("(task/SetTaskType) dbUpdateJobTaskString returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskString(task.JobId, task.WorkflowInstanceId, task.Id, "task_type", type_str)
if err != nil {
err = fmt.Errorf("(task/SetTaskType) dbUpdateTaskString returned: %s", err.Error())
return
}
}
task.TaskType = type_str
return
}
func (task *TaskRaw) SetCreatedDate(t time.Time) (err error) {
err = task.LockNamed("SetCreatedDate")
if err != nil {
return
}
defer task.Unlock()
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "createdDate", t)
if err != nil {
err = fmt.Errorf("(task/SetCreatedDate) dbUpdateJobTaskTime returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "createdDate", t)
if err != nil {
err = fmt.Errorf("(task/SetCreatedDate) dbUpdateTaskTime returned: %s", err.Error())
return
}
}
task.CreatedDate = t
return
}
func (task *TaskRaw) SetStartedDate(t time.Time) (err error) {
err = task.LockNamed("SetStartedDate")
if err != nil {
return
}
defer task.Unlock()
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "startedDate", t)
if err != nil {
err = fmt.Errorf("(task/SetStartedDate) dbUpdateJobTaskTime returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "startedDate", t)
if err != nil {
err = fmt.Errorf("(task/SetStartedDate) dbUpdateTaskTime returned: %s", err.Error())
return
}
}
task.StartedDate = t
return
}
func (task *TaskRaw) SetCompletedDate(t time.Time, lock bool) (err error) {
if lock {
err = task.LockNamed("SetCompletedDate")
if err != nil {
return
}
defer task.Unlock()
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "completedDate", t)
if err != nil {
err = fmt.Errorf("(task/SetCompletedDate) dbUpdateJobTaskTime returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "completedDate", t)
if err != nil {
err = fmt.Errorf("(task/SetCompletedDate) dbUpdateTaskTime returned: %s", err.Error())
return
}
}
task.CompletedDate = t
return
}
func (task *TaskRaw) SetStepOutput(jd *cwl.Job_document, lock bool) (err error) {
if lock {
err = task.LockNamed("SetStepOutput")
if err != nil {
return
}
defer task.Unlock()
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskField(task.JobId, task.WorkflowInstanceId, task.Id, "stepOutput", *jd)
if err != nil {
err = fmt.Errorf("(task/SetStepOutput) dbUpdateJobTaskField returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskField(task.JobId, task.WorkflowInstanceId, task.Id, "stepOutput", *jd)
if err != nil {
err = fmt.Errorf("(task/SetStepOutput) dbUpdateTaskField returned: %s", err.Error())
return
}
}
task.StepOutput = jd
task.StepOutputInterface = jd
return
}
// only for debugging purposes
func (task *TaskRaw) GetStateNamed(name string) (state string, err error) {
lock, err := task.RLockNamed("GetState/" + name)
if err != nil {
return
}
defer task.RUnlockNamed(lock)
state = task.State
return
}
func (task *TaskRaw) GetId(me string) (id Task_Unique_Identifier, err error) {
lock, err := task.RLockNamed("GetId:" + me)
if err != nil {
return
}
defer task.RUnlockNamed(lock)
id = task.Task_Unique_Identifier
return
}
func (task *TaskRaw) GetJobId() (id string, err error) {
lock, err := task.RLockNamed("GetJobId")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
id = task.JobId
return
}
func (task *TaskRaw) GetJob() (job *Job, err error) {
lock, err := task.RLockNamed("GetJob")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
if task.job != nil {
job = task.job
return
}
jobid := task.JobId
job, err = GetJob(jobid)
if err != nil {
err = fmt.Errorf("(TaskRaw/GetJob) global GetJob returned: %s", err.Error())
return
}
// this is writing while we just have a readlock, not so nice
task.job = job
return
}
// also updates wi.RemainTasks, task.SetCompletedDate
func (task *TaskRaw) SetState(wi *WorkflowInstance, new_state string, writeLock bool) (err error) {
if writeLock {
err = task.LockNamed("SetState")
if err != nil {
return
}
defer task.Unlock()
}
old_state := task.State
taskid := task.Id
jobid := task.JobId
if jobid == "" {
err = fmt.Errorf("task %s has no job id", taskid)
return
}
if old_state == new_state {
return
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskString(jobid, task.WorkflowInstanceId, taskid, "state", new_state)
if err != nil {
err = fmt.Errorf("(TaskRaw/SetState) dbUpdateJobTaskString returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskString(jobid, task.WorkflowInstanceId, taskid, "state", new_state)
if err != nil {
err = fmt.Errorf("(TaskRaw/SetState) dbUpdateTaskString returned: %s", err.Error())
return
}
}
logger.Debug(3, "(Task/SetState) %s new state: \"%s\" (old state \"%s\")", taskid, new_state, old_state)
task.State = new_state
if new_state == TASK_STAT_COMPLETED {
if wi != nil {
_, err = wi.IncrementRemainSteps(-1, true)
if err != nil {
err = fmt.Errorf("(task/SetState) wi.DecreaseRemainSteps returned: %s", err.Error())
return
}
}
err = task.SetCompletedDate(time.Now(), false)
if err != nil {
err = fmt.Errorf("(task/SetState) task.SetCompletedDate returned: %s", err.Error())
return
}
} else if old_state == TASK_STAT_COMPLETED {
// in case a completed task is marked as something different
//var job *Job
//job, err = GetJob(jobid)
//if err != nil {
// return
//}
//_, err = job.IncrementRemainSteps(1, "task/SetState")
//if err != nil {
// err = fmt.Errorf("(task/SetState) IncrementRemainSteps returned: %s", err.Error())
// return
//}
initTime := time.Time{}
err = task.SetCompletedDate(initTime, false)
if err != nil {
err = fmt.Errorf("(task/SetState) SetCompletedDate returned: %s", err.Error())
return
}
if wi != nil {
_, err = wi.IncrementRemainSteps(1, true)
if err != nil {
err = fmt.Errorf("(task/SetState) wi.IncrementRemainSteps returned: %s", err.Error())
return
}
}
}
return
}
func (task *TaskRaw) GetDependsOn() (dep []string, err error) {
lock, err := task.RLockNamed("GetDependsOn")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
dep = task.DependsOn
return
}
// checks and creates indices on input shock nodes if needed
func (task *Task) CreateInputIndexes() (err error) {
for _, io := range task.Inputs {
_, err = io.IndexFile(io.ShockIndex)
if err != nil {
err = fmt.Errorf("(CreateInputIndexes) failed to create shock index: node=%s, taskid=%s, error=%s", io.Node, task.Id, err.Error())
logger.Error(err.Error())
return
}
}
return
}
// checks and creates indices on output shock nodes if needed
// if worker failed to do so, this will catch it
func (task *Task) CreateOutputIndexes() (err error) {
for _, io := range task.Outputs {
_, err = io.IndexFile(io.ShockIndex)
if err != nil {
err = fmt.Errorf("(CreateOutputIndexes) failed to create shock index: node=%s, taskid=%s, error=%s", io.Node, task.Id, err.Error())
logger.Error(err.Error())
return
}
}
return
}
// check that part index is valid before initalizing it
// refactored out of InitPartIndex deal with potentailly long write lock
func (task *Task) checkPartIndex() (newPartition *PartInfo, totalunits int, isSingle bool, err error) {
lock, err := task.RLockNamed("checkPartIndex")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
inputIO := task.Inputs[0]
newPartition = &PartInfo{
Input: inputIO.FileName,
MaxPartSizeMB: task.MaxWorkSize,
}
if len(task.Inputs) > 1 {
found := false
if (task.Partition != nil) && (task.Partition.Input != "") {
// task submitted with partition input specified, use that
for _, io := range task.Inputs {
if io.FileName == task.Partition.Input {
found = true
inputIO = io
newPartition.Input = io.FileName
}
}
}
if !found {
// bad state - set as not multi-workunit
logger.Error("warning: lacking partition info while multiple inputs are specified, taskid=" + task.Id)
isSingle = true
return
}
}
// if submitted with partition index use that, otherwise default
if (task.Partition != nil) && (task.Partition.Index != "") {
newPartition.Index = task.Partition.Index
} else {
newPartition.Index = conf.DEFAULT_INDEX
}
idxInfo, err := inputIO.IndexFile(newPartition.Index)
if err != nil {
// bad state - set as not multi-workunit
logger.Error("warning: failed to create / retrieve index=%s, taskid=%s, error=%s", newPartition.Index, task.Id, err.Error())
isSingle = true
err = nil
return
}
totalunits = int(idxInfo.TotalUnits)
return
}
// get part size based on partition/index info
// this resets task.Partition when called
// only 1 task.Inputs allowed unless 'partinfo.input' specified on POST
// if fail to get index info, task.TotalWork set to 1 and task.Partition set to nil
func (task *Task) InitPartIndex() (err error) {
if task.TotalWork == 1 && task.MaxWorkSize == 0 {
// only 1 workunit requested
return
}
newPartition, totalunits, isSingle, err := task.checkPartIndex()
if err != nil {
return
}
if isSingle {
// its a single workunit, skip init
err = task.setSingleWorkunit(true)
return
}
err = task.LockNamed("InitPartIndex")
if err != nil {
return
}
defer task.Unlock()
// adjust total work based on needs
if newPartition.MaxPartSizeMB > 0 {
// this implementation for chunkrecord indexer only
chunkmb := int(conf.DEFAULT_CHUNK_SIZE / 1048576)
var totalwork int
if totalunits*chunkmb%newPartition.MaxPartSizeMB == 0 {
totalwork = totalunits * chunkmb / newPartition.MaxPartSizeMB
} else {
totalwork = totalunits*chunkmb/newPartition.MaxPartSizeMB + 1
}
if totalwork < task.TotalWork {
// use bigger splits (specified by size or totalwork)
totalwork = task.TotalWork
}
if totalwork != task.TotalWork {
err = task.setTotalWork(totalwork, false)
if err != nil {
return
}
}
}
if totalunits < task.TotalWork {
err = task.setTotalWork(totalunits, false)
if err != nil {
return
}
}
// need only 1 workunit
if task.TotalWork == 1 {
err = task.setSingleWorkunit(false)
return
}
// done, set it
newPartition.TotalIndex = totalunits
err = task.setPartition(newPartition, false)
return
}
// wrapper functions to set: totalwork=1, partition=nil, maxworksize=0
func (task *Task) setSingleWorkunit(writelock bool) (err error) {
if task.TotalWork != 1 {
err = task.setTotalWork(1, writelock)
if err != nil {
err = fmt.Errorf("(task/setSingleWorkunit) setTotalWork returned: %s", err.Error())
return
}
}
if task.Partition != nil {
err = task.setPartition(nil, writelock)
if err != nil {
err = fmt.Errorf("(task/setSingleWorkunit) setPartition returned: %s", err.Error())
return
}
}
if task.MaxWorkSize != 0 {
err = task.setMaxWorkSize(0, writelock)
if err != nil {
err = fmt.Errorf("(task/setSingleWorkunit) setMaxWorkSize returned: %s", err.Error())
return
}
}
return
}
func (task *Task) setTotalWork(num int, writelock bool) (err error) {
if writelock {
err = task.LockNamed("setTotalWork")
if err != nil {
return
}
defer task.Unlock()
}
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "totalwork", num)
if err != nil {
err = fmt.Errorf("(task/setTotalWork) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
task.TotalWork = num
// reset remaining work whenever total work reset
err = task.SetRemainWork(num, false)
return
}
func (task *Task) setPartition(partition *PartInfo, writelock bool) (err error) {
if writelock {
err = task.LockNamed("setPartition")
if err != nil {
return
}
defer task.Unlock()
}
err = dbUpdateJobTaskPartition(task.JobId, task.WorkflowInstanceId, task.Id, partition)
if err != nil {
err = fmt.Errorf("(task/setPartition) dbUpdateJobTaskPartition returned: %s", err.Error())
return
}
task.Partition = partition
return
}
func (task *Task) setMaxWorkSize(num int, writelock bool) (err error) {
if writelock {
err = task.LockNamed("setMaxWorkSize")
if err != nil {
return
}
defer task.Unlock()
}
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "maxworksize", num)
if err != nil {
err = fmt.Errorf("(task/setMaxWorkSize) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
task.MaxWorkSize = num
return
}
func (task *Task) SetRemainWork(num int, writelock bool) (err error) {
if writelock {
err = task.LockNamed("SetRemainWork")
if err != nil {
return
}
defer task.Unlock()
}
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "remainwork", num)
if err != nil {
err = fmt.Errorf("(task/SetRemainWork) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
task.RemainWork = num
return
}
func (task *Task) IncrementRemainWork(inc int, writelock bool) (remainwork int, err error) {
if writelock {
err = task.LockNamed("IncrementRemainWork")
if err != nil {
return
}
defer task.Unlock()
}
remainwork = task.RemainWork + inc
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "remainwork", remainwork)
if err != nil {
err = fmt.Errorf("(task/IncrementRemainWork) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "remainwork", remainwork)
if err != nil {
err = fmt.Errorf("(task/IncrementRemainWork) dbUpdateTaskInt returned: %s", err.Error())
return
}
}
task.RemainWork = remainwork
return
}
func (task *Task) IncrementComputeTime(inc int) (err error) {
err = task.LockNamed("IncrementComputeTime")
if err != nil {
return
}
defer task.Unlock()
newComputeTime := task.ComputeTime + inc
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "computetime", newComputeTime)
if err != nil {
err = fmt.Errorf("(task/IncrementComputeTime) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "computetime", newComputeTime)
if err != nil {
err = fmt.Errorf("(task/IncrementComputeTime) dbUpdateTaskInt returned: %s", err.Error())
return
}
}
task.ComputeTime = newComputeTime
return
}
func (task *Task) ResetTaskTrue(name string) (err error) {
if task.ResetTask == true {
return
}
err = task.LockNamed("ResetTaskTrue:" + name)
if err != nil {
return
}
defer task.Unlock()
err = task.SetState(nil, TASK_STAT_PENDING, false)
if err != nil {
err = fmt.Errorf("(task/ResetTaskTrue) task.SetState returned: %s", err.Error())
return
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskBoolean(task.JobId, task.WorkflowInstanceId, task.Id, "resettask", true)
if err != nil {
err = fmt.Errorf("(task/ResetTaskTrue) dbUpdateJobTaskBoolean returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskBoolean(task.JobId, task.WorkflowInstanceId, task.Id, "resettask", true)
if err != nil {
err = fmt.Errorf("(task/ResetTaskTrue) dbUpdateTaskBoolean returned: %s", err.Error())
return
}
}
task.ResetTask = true
return
}
func (task *Task) SetResetTask(info *Info) (err error) {
// called when enqueing a task that previously ran
err = task.LockNamed("SetResetTask")
if err != nil {
return
}
defer task.Unlock()
// only run if true
if task.ResetTask == false {
return
}
// in memory pointer
task.Info = info
// reset remainwork
err = task.SetRemainWork(task.TotalWork, false)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) task.SetRemainWork returned: %s", err.Error())
return
}
// reset computetime
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "computetime", 0)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "computetime", 0)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateTaskInt returned: %s", err.Error())
return
}
}
task.ComputeTime = 0
// reset completedate
err = task.SetCompletedDate(time.Time{}, false)
// reset inputs
for _, io := range task.Inputs {
// skip inputs with no origin (predecessor task)
if io.Origin == "" {
continue
}
io.Node = "-"
io.Size = 0
io.Url = ""
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateJobTaskIO returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateTaskIO returned: %s", err.Error())
return
}
}
// reset / delete all outputs
for _, io := range task.Outputs {
// do not delete update IO
if io.Type == "update" {
continue
}
if dataUrl, _ := io.DataUrl(); dataUrl != "" {
// delete dataUrl if is shock node
if strings.HasSuffix(dataUrl, shock.DATA_SUFFIX) {
err = shock.ShockDelete(io.Host, io.Node, io.DataToken)
if err == nil {
logger.Debug(2, "Deleted node %s from shock", io.Node)
} else {
logger.Error("(SetResetTask) unable to deleted node %s from shock: %s", io.Node, err.Error())
}
}
}
io.Node = "-"
io.Size = 0
io.Url = ""
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateJobTaskIO returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateTaskIO returned: %s", err.Error())
return
}
}
// delete all workunit logs
for _, log := range conf.WORKUNIT_LOGS {
err = task.DeleteLogs(log, false)
if err != nil {
return
}
}
// reset the reset
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskBoolean(task.JobId, task.WorkflowInstanceId, task.Id, "resettask", false)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateJobTaskBoolean returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskBoolean(task.JobId, task.WorkflowInstanceId, task.Id, "resettask", false)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateTaskBoolean returned: %s", err.Error())
return
}
}
task.ResetTask = false
return
}
func (task *Task) setTokenForIO(writelock bool) (err error) {
if writelock {
err = task.LockNamed("setTokenForIO")
if err != nil {
return
}
defer task.Unlock()
}
if task.Info == nil {
err = fmt.Errorf("(setTokenForIO) task.Info empty")
return
}
if !task.Info.Auth || task.Info.DataToken == "" {
return
}
// update inputs
changed := false
for _, io := range task.Inputs {
if io.DataToken != task.Info.DataToken {
io.DataToken = task.Info.DataToken
changed = true
}
}
if changed {
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(task/setTokenForIO) dbUpdateJobTaskIO returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(task/setTokenForIO) dbUpdateTaskIO returned: %s", err.Error())
return
}
}
}
// update outputs
changed = false
for _, io := range task.Outputs {
if io.DataToken != task.Info.DataToken {
io.DataToken = task.Info.DataToken
changed = true
}
}
if changed {
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("(task/setTokenForIO) dbUpdateJobTaskIO returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("(task/setTokenForIO) dbUpdateTaskIO returned: %s", err.Error())
return
}
}
}
return
}
func (task *Task) CreateWorkunits(qm *ServerMgr, job *Job) (wus []*Workunit, err error) {
//if a task contains only one workunit, assign rank 0
//if task.WorkflowStep != nil {
// step :=
//}
//task_name := task.TaskName
//fmt.Printf("(CreateWorkunits) %s\n", task_name)
//if task_name == "_root__main_step1_step1_0" {
// panic("found task")
//}
if task.TotalWork == 1 {
workunit, xerr := NewWorkunit(qm, task, 0, job)
if xerr != nil {
err = fmt.Errorf("(CreateWorkunits) (single) NewWorkunit failed: %s", xerr.Error())
return
}
wus = append(wus, workunit)
return
}
// if a task contains N (N>1) workunits, assign rank 1..N
for i := 1; i <= task.TotalWork; i++ {
workunit, xerr := NewWorkunit(qm, task, i, job)
if xerr != nil {
err = fmt.Errorf("(CreateWorkunits) (multi) NewWorkunit failed: %s", xerr.Error())
return
}
wus = append(wus, workunit)
}
return
}
func (task *Task) GetTaskLogs() (tlog *TaskLog, err error) {
tlog = new(TaskLog)
tlog.Id = task.Id
tlog.State = task.State
tlog.TotalWork = task.TotalWork
tlog.CompletedDate = task.CompletedDate
workunit_id := New_Workunit_Unique_Identifier(task.Task_Unique_Identifier, 0)
//workunit_id := Workunit_Unique_Identifier{JobId: task.JobId, TaskName: task.Id}
if task.TotalWork == 1 {
//workunit_id.Rank = 0
var wl *WorkLog
wl, err = NewWorkLog(workunit_id)
if err != nil {
err = fmt.Errorf("(task/GetTaskLogs) NewWorkLog returned: %s", err.Error())
return
}
tlog.Workunits = append(tlog.Workunits, wl)
} else {
for i := 1; i <= task.TotalWork; i++ {
workunit_id.Rank = i
var wl *WorkLog
wl, err = NewWorkLog(workunit_id)
if err != nil {
err = fmt.Errorf("(task/GetTaskLogs) NewWorkLog returned: %s", err.Error())
return
}
tlog.Workunits = append(tlog.Workunits, wl)
}
}
return
}
func (task *Task) ValidateDependants(qm *ServerMgr) (reason string, err error) {
lock, err := task.RLockNamed("ValidateDependants")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
// validate task states in depends on list
for _, preTaskStr := range task.DependsOn {
var preId Task_Unique_Identifier
preId, err = New_Task_Unique_Identifier_FromString(preTaskStr)
if err != nil {
err = fmt.Errorf("(ValidateDependants) New_Task_Unique_Identifier_FromString returns: %s", err.Error())
return
}
preTask, ok, xerr := qm.TaskMap.Get(preId, true)
if xerr != nil {
err = fmt.Errorf("(ValidateDependants) predecessor task %s not found for task %s: %s", preTaskStr, task.Id, xerr.Error())
return
}
if !ok {
reason = fmt.Sprintf("(ValidateDependants) predecessor task not found: task=%s, pretask=%s", task.Id, preTaskStr)
logger.Debug(3, reason)
return
}
preTaskState, xerr := preTask.GetState()
if xerr != nil {
err = fmt.Errorf("(ValidateDependants) unable to get state for predecessor task %s: %s", preTaskStr, xerr.Error())
return
}
if preTaskState != TASK_STAT_COMPLETED {
reason = fmt.Sprintf("(ValidateDependants) predecessor task state is not completed: task=%s, pretask=%s, pretask.state=%s", task.Id, preTaskStr, preTaskState)
logger.Debug(3, reason)
return
}
}
// validate task states in input IO origins
for _, io := range task.Inputs {
if io.Origin == "" {
continue
}
var preId Task_Unique_Identifier
preId, err = New_Task_Unique_Identifier(task.JobId, io.Origin)
if err != nil {
err = fmt.Errorf("(ValidateDependants) New_Task_Unique_Identifier returns: %s", err.Error())
return
}
var preTaskStr string
preTaskStr, err = preId.String()
if err != nil {
err = fmt.Errorf("(ValidateDependants) task.String returned: %s", err.Error())
return
}
preTask, ok, xerr := qm.TaskMap.Get(preId, true)
if xerr != nil {
err = fmt.Errorf("(ValidateDependants) predecessor task %s not found for task %s: %s", preTaskStr, task.Id, xerr.Error())
return
}
if !ok {
reason = fmt.Sprintf("(ValidateDependants) predecessor task not found: task=%s, pretask=%s", task.Id, preTaskStr)
logger.Debug(3, reason)
return
}
preTaskState, xerr := preTask.GetState()
if xerr != nil {
err = fmt.Errorf("(ValidateDependants) unable to get state for predecessor task %s: %s", preTaskStr, xerr.Error())
return
}
if preTaskState != TASK_STAT_COMPLETED {
reason = fmt.Sprintf("(ValidateDependants) predecessor task state is not completed: task=%s, pretask=%s, pretask.state=%s", task.Id, preTaskStr, preTaskState)
logger.Debug(3, reason)
return
}
}
return
}
func (task *Task) ValidateInputs(qm *ServerMgr) (err error) {
err = task.LockNamed("ValidateInputs")
if err != nil {
err = fmt.Errorf("(ValidateInputs) unable to lock task %s: %s", task.Id, err.Error())
return
}
defer task.Unlock()
for _, io := range task.Inputs {
if io.Origin != "" {
// find predecessor task
var preId Task_Unique_Identifier
preId, err = New_Task_Unique_Identifier(task.JobId, io.Origin)
if err != nil {
err = fmt.Errorf("(ValidateInputs) New_Task_Unique_Identifier returned: %s", err.Error())
return
}
var preTaskStr string
preTaskStr, err = preId.String()
if err != nil {
err = fmt.Errorf("(ValidateInputs) task.String returned: %s", err.Error())
return
}
preTask, ok, xerr := qm.TaskMap.Get(preId, true)
if xerr != nil {
err = fmt.Errorf("(ValidateInputs) predecessor task %s not found for task %s: %s", preTaskStr, task.Id, xerr.Error())
return
}
if !ok {
err = fmt.Errorf("(ValidateInputs) predecessor task %s not found for task %s", preTaskStr, task.Id)
return
}
// test predecessor state
preTaskState, xerr := preTask.GetState()
if xerr != nil {
err = fmt.Errorf("(ValidateInputs) unable to get state for predecessor task %s: %s", preTaskStr, xerr.Error())
return
}
if preTaskState != TASK_STAT_COMPLETED {
err = fmt.Errorf("(ValidateInputs) predecessor task state is not completed: task=%s, pretask=%s, pretask.state=%s", task.Id, preTaskStr, preTaskState)
return
}
// find predecessor output
preTaskIO, xerr := preTask.GetOutput(io.FileName)
if xerr != nil {
err = fmt.Errorf("(ValidateInputs) unable to get IO for predecessor task %s, file %s: %s", preTaskStr, io.FileName, err.Error())
return
}
io.Node = preTaskIO.Node
}
// make sure we have node id
if (io.Node == "") || (io.Node == "-") {
err = fmt.Errorf("(ValidateInputs) error in locate input for task, no node id found: task=%s, file=%s", task.Id, io.FileName)
return
}
// force build data url
io.Url = ""
_, err = io.DataUrl()
if err != nil {
err = fmt.Errorf("(ValidateInputs) DataUrl returns: %s", err.Error())
return
}
// forece check file exists and get size
io.Size = 0
_, err = io.UpdateFileSize()
if err != nil {
err = fmt.Errorf("(ValidateInputs) input file %s UpdateFileSize returns: %s", io.FileName, err.Error())
return
}
// create or wait on shock index on input node (if set in workflow document)
_, err = io.IndexFile(io.ShockIndex)
if err != nil {
err = fmt.Errorf("(ValidateInputs) failed to create shock index: task=%s, node=%s: %s", task.Id, io.Node, err.Error())
return
}
logger.Debug(3, "(ValidateInputs) input located: task=%s, file=%s, node=%s, size=%d", task.Id, io.FileName, io.Node, io.Size)
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(ValidateInputs) unable to save task inputs to mongodb, task=%s: %s", task.Id, err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(ValidateInputs) unable to save task inputs to mongodb, task=%s: %s", task.Id, err.Error())
return
}
}
return
}
func (task *Task) ValidateOutputs() (err error) {
err = task.LockNamed("ValidateOutputs")
if err != nil {
err = fmt.Errorf("unable to lock task %s: %s", task.Id, err.Error())
return
}
defer task.Unlock()
for _, io := range task.Outputs {
// force build data url
io.Url = ""
_, err = io.DataUrl()
if err != nil {
err = fmt.Errorf("DataUrl returns: %s", err.Error())
return
}
// force check file exists and get size
io.Size = 0
_, err = io.UpdateFileSize()
if err != nil {
err = fmt.Errorf("input file %s GetFileSize returns: %s", io.FileName, err.Error())
return
}
// create or wait on shock index on output node (if set in workflow document)
_, err = io.IndexFile(io.ShockIndex)
if err != nil {
err = fmt.Errorf("failed to create shock index: task=%s, node=%s: %s", task.Id, io.Node, err.Error())
return
}
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("unable to save task outputs to mongodb, task=%s: %s", task.Id, err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("unable to save task outputs to mongodb, task=%s: %s", task.Id, err.Error())
return
}
}
return
}
func (task *Task) ValidatePredata() (err error) {
err = task.LockNamed("ValidatePreData")
if err != nil {
err = fmt.Errorf("unable to lock task %s: %s", task.Id, err.Error())
return
}
defer task.Unlock()
// locate predata
var modified bool
for _, io := range task.Predata {
// only verify predata that is a shock node
if (io.Node != "") && (io.Node != "-") {
// check file size
mod, xerr := io.UpdateFileSize()
if xerr != nil {
err = fmt.Errorf("input file %s GetFileSize returns: %s", io.FileName, xerr.Error())
return
}
if mod {
modified = true
}
// build url if missing
if io.Url == "" {
_, err = io.DataUrl()
if err != nil {
err = fmt.Errorf("DataUrl returns: %s", err.Error())
}
modified = true
}
}
}
if modified {
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "predata", task.Predata)
if err != nil {
err = fmt.Errorf("unable to save task predata to mongodb, task=%s: %s", task.Id, err.Error())
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "predata", task.Predata)
if err != nil {
err = fmt.Errorf("unable to save task predata to mongodb, task=%s: %s", task.Id, err.Error())
}
}
}
return
}
func (task *Task) DeleteOutput() (modified int) {
modified = 0
task_state := task.State
if task_state == TASK_STAT_COMPLETED ||
task_state == TASK_STAT_SKIPPED ||
task_state == TASK_STAT_FAIL_SKIP {
for _, io := range task.Outputs {
if io.Delete {
if err := io.DeleteNode(); err != nil {
logger.Warning("failed to delete shock node %s: %s", io.Node, err.Error())
}
modified += 1
}
}
}
return
}
func (task *Task) DeleteInput() (modified int) {
modified = 0
task_state := task.State
if task_state == TASK_STAT_COMPLETED ||
task_state == TASK_STAT_SKIPPED ||
task_state == TASK_STAT_FAIL_SKIP {
for _, io := range task.Inputs {
if io.Delete {
if err := io.DeleteNode(); err != nil {
logger.Warning("failed to delete shock node %s: %s", io.Node, err.Error())
}
modified += 1
}
}
}
return
}
func (task *Task) DeleteLogs(logname string, writelock bool) (err error) {
if writelock {
err = task.LockNamed("setTotalWork")
if err != nil {
return
}
defer task.Unlock()
}
var logdir string
logdir, err = getPathByJobId(task.JobId)
if err != nil {
err = fmt.Errorf("(task/GetTaDeleteLogsskLogs) getPathByJobId returned: %s", err.Error())
return
}
globpath := fmt.Sprintf("%s/%s_*.%s", logdir, task.Id, logname)
var logfiles []string
logfiles, err = filepath.Glob(globpath)
if err != nil {
err = fmt.Errorf("(task/GetTaDeleteLogsskLogs) filepath.Glob returned: %s", err.Error())
return
}
for _, logfile := range logfiles {
workid := strings.Split(filepath.Base(logfile), ".")[0]
logger.Debug(2, "Deleted %s log for workunit %s", logname, workid)
os.Remove(logfile)
}
return
}
func (task *Task) GetStepOutput(name string) (obj cwl.CWLType, ok bool, reason string, err error) {
if task.StepOutput == nil {
ok = false
reason = "task.StepOutput == nil"
//err = fmt.Errorf("(task/GetStepOutput) task.StepOutput == nil")
return
}
for _, named_step_output := range *task.StepOutput {
named_step_output_base := path.Base(named_step_output.Id)
logger.Debug(3, "(task/GetStepOutput) %s vs %s\n", named_step_output_base, name)
if named_step_output_base == name {
obj = named_step_output.Value
if obj == nil {
err = fmt.Errorf("(task/GetStepOutput) found %s , but it is nil", name) // this should not happen, taskReady makes sure everything is available
return
}
ok = true
return
}
}
ok = false
return
}
func (task *Task) GetStepOutputNames() (names []string, err error) {
if task.StepOutput == nil {
err = fmt.Errorf("(task/GetStepOutputNames) task.StepOutput == nil")
return
}
names = []string{}
for _, named_step_output := range *task.StepOutput {
named_step_output_base := path.Base(named_step_output.Id)
names = append(names, named_step_output_base)
}
return
}
func String2Date(str string) (t time.Time, err error) {
//layout := "2006-01-02T15:04:05.00Z"
// 2018-12-13T22:36:02.96Z
//str := "2014-11-12T11:45:26.371Z"
//t, err = time.Parse(layout, str)
t, err = time.Parse(time.RFC3339, str)
return
}
func FixTimeInMap(original_map map[string]interface{}, field string) (err error) {
var value_if interface{}
var ok bool
value_if, ok = original_map[field]
if ok {
switch value_if.(type) {
case string:
value_str := value_if.(string)
var value_time time.Time
value_time, err = String2Date(value_str)
if err != nil {
err = fmt.Errorf("(FixTimeInMap) Could not parse date: %s", err.Error())
return
}
delete(original_map, field)
original_map[field] = value_time
case time.Time:
// all ok
default:
err = fmt.Errorf("(FixTimeInMap) time type unknown (%s)", reflect.TypeOf(value_if))
}
}
return
}
func NewTaskFromInterface(original interface{}, context *cwl.WorkflowContext) (task *Task, err error) {
task = &Task{}
task.TaskRaw = TaskRaw{}
//spew.Dump(original)
original, err = cwl.MakeStringMap(original, context)
if err != nil {
err = fmt.Errorf("(NewTaskFromInterface) MakeStringMap returned: %s", err.Error())
return
}
original_map := original.(map[string]interface{})
for _, field := range []string{"createdDate", "startedDate", "completedDate"} {
err = FixTimeInMap(original_map, field)
if err != nil {
err = fmt.Errorf("(NewTaskFromInterface) FixTimeInMap returned: %s", err.Error())
return
}
}
err = mapstructure.Decode(original, task)
if err != nil {
err = fmt.Errorf("(NewTaskFromInterface) mapstructure.Decode returned: %s (%s)", err.Error(), spew.Sdump(original_map))
return
}
if task.WorkflowInstanceId == "" {
err = fmt.Errorf("(NewTaskFromInterface) task.WorkflowInstanceId == empty")
return
}
// if task.WorkflowInstanceId != "_root" {
// if task.WorkflowParent == nil {
// task_id_str, _ := task.String()
// err = fmt.Errorf("(NewTaskFromInterface) task.WorkflowParent == nil , (%s)", task_id_str)
// return
// }
// }
return
}
func NewTasksFromInterface(original interface{}, context *cwl.WorkflowContext) (tasks []*Task, err error) {
switch original.(type) {
case []interface{}:
original_array := original.([]interface{})
tasks = []*Task{}
for i, _ := range original_array {
var t *Task
t, err = NewTaskFromInterface(original_array[i], context)
if err != nil {
err = fmt.Errorf("(NewTasksFromInterface) NewTaskFromInterface returned: %s", err.Error())
return
}
tasks = append(tasks, t)
}
default:
err = fmt.Errorf("(NewTasksFromInterface) type not supported: %s", reflect.TypeOf(original))
}
return
}
remove wi.IncrementRemainSteps call
package core
import (
"errors"
"fmt"
"path"
"reflect"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/MG-RAST/AWE/lib/conf"
"github.com/MG-RAST/AWE/lib/core/cwl"
"github.com/MG-RAST/AWE/lib/logger"
"github.com/MG-RAST/AWE/lib/rwmutex"
shock "github.com/MG-RAST/go-shock-client"
"github.com/davecgh/go-spew/spew"
"github.com/mitchellh/mapstructure"
)
// hierachy (in ideal case without errors):
// 1. TASK_STAT_INIT
// 2. TASK_STAT_PENDING
// 3. TASK_STAT_READY
// 4. TASK_STAT_QUEUED
// 5. TASK_STAT_INPROGRESS
// 6. TASK_STAT_COMPLETED
const (
TASK_STAT_INIT = "init" // initial state on creation of a task
TASK_STAT_PENDING = "pending" // a task that wants to be enqueued (but dependent tasks are not complete)
TASK_STAT_READY = "ready" // a task ready to be enqueued (all dependent tasks are complete , but workunits habe not yet been created)
TASK_STAT_QUEUED = "queued" // a task for which workunits have been created/queued
TASK_STAT_INPROGRESS = "in-progress" // a first workunit has been checkout (this does not guarantee a workunit is running right now)
TASK_STAT_SUSPEND = "suspend"
TASK_STAT_FAILED = "failed" // deprecated ?
TASK_STAT_FAILED_PERMANENT = "failed-permanent" // on exit code 42
TASK_STAT_COMPLETED = "completed"
TASK_STAT_SKIPPED = "user_skipped" // deprecated
TASK_STAT_FAIL_SKIP = "skipped" // deprecated
TASK_STAT_PASSED = "passed" // deprecated ?
)
var TASK_STATS_RESET = []string{TASK_STAT_QUEUED, TASK_STAT_INPROGRESS, TASK_STAT_SUSPEND}
const (
TASK_TYPE_UNKNOWN = ""
TASK_TYPE_SCATTER = "scatter"
//TASK_TYPE_WORKFLOW = "workflow"
TASK_TYPE_NORMAL = "normal"
)
// Scatter
// A task of type "scatter" generates multiple scatter children.
// List of children for a scatter task are stored in field "ScatterChildren"
// Each Scatter child points to its Scatter parent
// Scatter child outputs do not go into context object, they only go to scatter parent output array !
type TaskRaw struct {
rwmutex.RWMutex `bson:"-" json:"-" mapstructure:"-"`
Task_Unique_Identifier `bson:",inline" mapstructure:",squash"`
Id string `bson:"taskid" json:"taskid" mapstructure:"taskid"` // old-style
TaskType string `bson:"task_type" json:"task_type" mapstructure:"task_type"`
Info *Info `bson:"-" json:"-" mapstructure:"-"` // this is just a pointer to the job.Info
Cmd *Command `bson:"cmd" json:"cmd" mapstructure:"cmd"`
Partition *PartInfo `bson:"partinfo" json:"-" mapstructure:"partinfo"`
DependsOn []string `bson:"dependsOn" json:"dependsOn" mapstructure:"dependsOn"` // only needed if dependency cannot be inferred from Input.Origin
TotalWork int `bson:"totalwork" json:"totalwork" mapstructure:"totalwork"`
MaxWorkSize int `bson:"maxworksize" json:"maxworksize" mapstructure:"maxworksize"`
RemainWork int `bson:"remainwork" json:"remainwork" mapstructure:"remainwork"`
ResetTask bool `bson:"resettask" json:"-" mapstructure:"resettask"` // trigged by function - resume, recompute, resubmit
State string `bson:"state" json:"state" mapstructure:"state"`
CreatedDate time.Time `bson:"createdDate" json:"createddate" mapstructure:"createdDate"`
StartedDate time.Time `bson:"startedDate" json:"starteddate" mapstructure:"startedDate"`
CompletedDate time.Time `bson:"completedDate" json:"completeddate" mapstructure:"completedDate"`
ComputeTime int `bson:"computetime" json:"computetime" mapstructure:"computetime"`
UserAttr map[string]interface{} `bson:"userattr" json:"userattr" mapstructure:"userattr"`
ClientGroups string `bson:"clientgroups" json:"clientgroups" mapstructure:"clientgroups"`
WorkflowStep *cwl.WorkflowStep `bson:"workflowStep" json:"workflowStep" mapstructure:"workflowStep"` // CWL-only
StepOutputInterface interface{} `bson:"stepOutput" json:"stepOutput" mapstructure:"stepOutput"` // CWL-only
StepInput *cwl.Job_document `bson:"-" json:"-" mapstructure:"-"` // CWL-only
StepOutput *cwl.Job_document `bson:"-" json:"-" mapstructure:"-"` // CWL-only
//Scatter_task bool `bson:"scatter_task" json:"scatter_task" mapstructure:"scatter_task"` // CWL-only, indicates if this is a scatter_task TODO: compare with TaskType ?
Scatter_parent *Task_Unique_Identifier `bson:"scatter_parent" json:"scatter_parent" mapstructure:"scatter_parent"` // CWL-only, points to scatter parent
ScatterChildren []string `bson:"scatterChildren" json:"scatterChildren" mapstructure:"scatterChildren"` // use simple TaskName , CWL-only, list of all children in a subworkflow task
ScatterChildren_ptr []*Task `bson:"-" json:"-" mapstructure:"-"` // caching only, CWL-only
Finalizing bool `bson:"-" json:"-" mapstructure:"-"` // CWL-only, a lock mechanism for subworkflows and scatter tasks
CwlVersion cwl.CWLVersion `bson:"cwlVersion,omitempty" mapstructure:"cwlVersion,omitempty" mapstructure:"cwlVersion,omitempty"` // CWL-only
WorkflowInstanceId string `bson:"workflow_instance_id" json:"workflow_instance_id" mapstructure:"workflow_instance_id"` // CWL-only
job *Job `bson:"-" mapstructure:"-"` // caching only
//WorkflowParent *Task_Unique_Identifier `bson:"workflow_parent" json:"workflow_parent" mapstructure:"workflow_parent"` // CWL-only parent that created subworkflow
}
type Task struct {
TaskRaw `bson:",inline" mapstructure:",squash"`
Inputs []*IO `bson:"inputs" json:"inputs" mapstructure:"inputs"`
Outputs []*IO `bson:"outputs" json:"outputs" mapstructure:"outputs"`
Predata []*IO `bson:"predata" json:"predata" mapstructure:"predata"`
Comment string
}
// Deprecated JobDep struct uses deprecated TaskDep struct which uses the deprecated IOmap. Maintained for backwards compatibility.
// Jobs that cannot be parsed into the Job struct, but can be parsed into the JobDep struct will be translated to the new Job struct.
// (=deprecated=)
type TaskDep struct {
TaskRaw `bson:",inline"`
Inputs IOmap `bson:"inputs" json:"inputs"`
Outputs IOmap `bson:"outputs" json:"outputs"`
Predata IOmap `bson:"predata" json:"predata"`
}
type TaskLog struct {
Id string `bson:"taskid" json:"taskid"`
State string `bson:"state" json:"state"`
TotalWork int `bson:"totalwork" json:"totalwork"`
CompletedDate time.Time `bson:"completedDate" json:"completeddate"`
Workunits []*WorkLog `bson:"workunits" json:"workunits"`
}
func NewTaskRaw(task_id Task_Unique_Identifier, info *Info) (tr *TaskRaw, err error) {
logger.Debug(3, "task_id: %s", task_id)
logger.Debug(3, "task_id.JobId: %s", task_id.JobId)
logger.Debug(3, "task_id.TaskName: %s", task_id.TaskName)
var task_str string
task_str, err = task_id.String()
if err != nil {
err = fmt.Errorf("() task.String returned: %s", err.Error())
return
}
tr = &TaskRaw{
Task_Unique_Identifier: task_id,
Id: task_str,
Info: info,
Cmd: &Command{},
Partition: nil,
DependsOn: []string{},
}
return
}
func (task *TaskRaw) InitRaw(job *Job, job_id string) (changed bool, err error) {
changed = false
if len(task.Id) == 0 {
err = errors.New("(InitRaw) empty taskid")
return
}
//job_id := job.ID
if job_id == "" {
err = fmt.Errorf("(InitRaw) job_id empty")
return
}
if task.JobId == "" {
task.JobId = job_id
changed = true
}
//logger.Debug(3, "task.TaskName A: %s", task.TaskName)
job_prefix := job_id + "_"
if len(task.Id) > 0 && (!strings.HasPrefix(task.Id, job_prefix)) {
task.TaskName = task.Id
changed = true
panic("should not happen 1")
}
//logger.Debug(3, "task.TaskName B: %s", task.TaskName)
//if strings.HasSuffix(task.TaskName, "ERROR") {
// err = fmt.Errorf("(InitRaw) taskname is error")
// return
//}
if task.TaskName == "" && strings.HasPrefix(task.Id, job_prefix) {
var tid Task_Unique_Identifier
tid, err = New_Task_Unique_Identifier_FromString(task.Id)
if err != nil {
err = fmt.Errorf("(InitRaw) New_Task_Unique_Identifier_FromString returned: %s", err.Error())
return
}
task.Task_Unique_Identifier = tid
panic("should not happen 2")
}
var task_str string
task_str, err = task.String()
if err != nil {
err = fmt.Errorf("(InitRaw) task.String returned: %s", err.Error())
return
}
task.RWMutex.Init("task_" + task_str)
// job_id is missing and task_id is only a number (e.g. on submission of old-style AWE)
if task.TaskName == "" {
err = fmt.Errorf("(InitRaw) task.TaskName empty")
return
}
if task.Id != task_str {
task.Id = task_str
changed = true
}
if task.State == "" {
task.State = TASK_STAT_INIT
changed = true
}
if job != nil {
if job.Info == nil {
err = fmt.Errorf("(InitRaw) job.Info empty")
return
}
task.Info = job.Info
}
if task.TotalWork <= 0 {
task.TotalWork = 1
}
if task.State != TASK_STAT_COMPLETED {
if task.RemainWork != task.TotalWork {
task.RemainWork = task.TotalWork
changed = true
}
}
if len(task.Cmd.Environ.Private) > 0 {
task.Cmd.HasPrivateEnv = true
}
//if strings.HasPrefix(task.Id, task.JobId+"_") {
// task.Id = strings.TrimPrefix(task.Id, task.JobId+"_")
// changed = true
//}
//if strings.HasPrefix(task.Id, "_") {
// task.Id = strings.TrimPrefix(task.Id, "_")
// changed = true
//}
if job == nil {
err = fmt.Errorf("(InitRaw) job is nil")
return
}
context := job.WorkflowContext
if task.StepOutputInterface != nil {
task.StepOutput, err = cwl.NewJob_documentFromNamedTypes(task.StepOutputInterface, context)
if err != nil {
err = fmt.Errorf("(InitRaw) cwl.NewJob_documentFromNamedTypes returned: %s", err.Error())
return
}
}
CwlVersion := context.CwlVersion
if CwlVersion != "" {
if task.CwlVersion != CwlVersion {
task.CwlVersion = CwlVersion
}
}
if task.WorkflowStep != nil {
if job != nil {
if job.WorkflowContext == nil {
err = fmt.Errorf("(InitRaw) job.WorkflowContext == nil")
return
}
err = task.WorkflowStep.Init(job.WorkflowContext)
if err != nil {
err = fmt.Errorf("(InitRaw) task.WorkflowStep.Init returned: %s", err.Error())
return
}
}
}
return
}
// this function prevents a dead-lock when a sub-workflow task finalizes
func (task *TaskRaw) Finalize() (ok bool, err error) {
err = task.LockNamed("Finalize")
if err != nil {
return
}
defer task.Unlock()
if task.Finalizing {
// somebody else already flipped the bit
ok = false
return
}
task.Finalizing = true
ok = true
return
}
func IsValidUUID(uuid string) bool {
if len(uuid) != 36 {
return false
}
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$")
return r.MatchString(uuid)
}
// populate DependsOn
func (task *Task) CollectDependencies() (changed bool, err error) {
deps := make(map[Task_Unique_Identifier]bool)
deps_changed := false
jobid, err := task.GetJobId()
if err != nil {
return
}
if jobid == "" {
err = fmt.Errorf("(CollectDependencies) jobid is empty")
return
}
job_prefix := jobid + "_"
// collect explicit dependencies
for _, deptask := range task.DependsOn {
if deptask == "" {
deps_changed = true
continue
}
if !strings.HasPrefix(deptask, job_prefix) {
deptask = job_prefix + deptask
deps_changed = true
} else {
deptask_suffix := strings.TrimPrefix(deptask, job_prefix)
if deptask_suffix == "" {
deps_changed = true
continue
}
}
t, yerr := New_Task_Unique_Identifier_FromString(deptask)
if yerr != nil {
err = fmt.Errorf("(CollectDependencies) Cannot parse entry in DependsOn: %s", yerr.Error())
return
}
if t.TaskName == "" {
// this is to fix a bug
deps_changed = true
continue
}
deps[t] = true
}
for _, input := range task.Inputs {
deptask := input.Origin
if deptask == "" {
deps_changed = true
continue
}
if !strings.HasPrefix(deptask, job_prefix) {
deptask = job_prefix + deptask
deps_changed = true
}
t, yerr := New_Task_Unique_Identifier_FromString(deptask)
if yerr != nil {
err = fmt.Errorf("(CollectDependencies) Cannot parse Origin entry in Input: %s", yerr.Error())
return
}
_, ok := deps[t]
if !ok {
// this was not yet in deps
deps[t] = true
deps_changed = true
}
}
// write all dependencies if different from before
if deps_changed {
task.DependsOn = []string{}
for deptask, _ := range deps {
var dep_task_str string
dep_task_str, err = deptask.String()
if err != nil {
err = fmt.Errorf("(CollectDependencies) dep_task.String returned: %s", err.Error())
return
}
task.DependsOn = append(task.DependsOn, dep_task_str)
}
changed = true
}
return
}
// argument job is optional, but recommended
func (task *Task) Init(job *Job, job_id string) (changed bool, err error) {
changed, err = task.InitRaw(job, job_id)
if err != nil {
return
}
dep_changes, err := task.CollectDependencies()
if err != nil {
return
}
if dep_changes {
changed = true
}
// set node / host / url for files
for _, io := range task.Inputs {
if io.Node == "" {
io.Node = "-"
}
_, err = io.DataUrl()
if err != nil {
return
}
logger.Debug(2, "inittask input: host="+io.Host+", node="+io.Node+", url="+io.Url)
}
for _, io := range task.Outputs {
if io.Node == "" {
io.Node = "-"
}
_, err = io.DataUrl()
if err != nil {
return
}
logger.Debug(2, "inittask output: host="+io.Host+", node="+io.Node+", url="+io.Url)
}
for _, io := range task.Predata {
if io.Node == "" {
io.Node = "-"
}
_, err = io.DataUrl()
if err != nil {
return
}
// predata IO can not be empty
if (io.Url == "") && (io.Node == "-") {
err = errors.New("Invalid IO, required fields url or host / node missing")
return
}
logger.Debug(2, "inittask predata: host="+io.Host+", node="+io.Node+", url="+io.Url)
}
err = task.setTokenForIO(false)
if err != nil {
return
}
return
}
// task_id_str is without prefix yet
func NewTask(job *Job, workflow_instance_id string, task_id_str string) (t *Task, err error) {
fmt.Printf("(NewTask) new task: %s %s/%s\n", job.ID, workflow_instance_id, task_id_str)
if task_id_str == "" {
err = fmt.Errorf("(NewTask) task_id is empty")
return
}
if strings.HasPrefix(task_id_str, "#main") {
err = fmt.Errorf("(NewTask) task_id_str prefix wrong: %s", task_id_str)
return
}
if task_id_str != "#main" {
if !strings.HasPrefix(workflow_instance_id, "#main") {
err = fmt.Errorf("(NewTask) workflow_instance_id has not #main prefix: %s", workflow_instance_id)
return
}
}
if job.ID == "" {
err = fmt.Errorf("(NewTask) jobid is empty!")
return
}
if strings.HasSuffix(task_id_str, "/") {
err = fmt.Errorf("(NewTask) Suffix in task_id not ok %s", task_id_str)
return
}
task_id_str = strings.TrimSuffix(task_id_str, "/")
//workflow = strings.TrimSuffix(workflow, "/")
job_global_task_id_str := workflow_instance_id + "/" + task_id_str
var tui Task_Unique_Identifier
tui, err = New_Task_Unique_Identifier(job.ID, job_global_task_id_str)
if err != nil {
err = fmt.Errorf("(NewTask) New_Task_Unique_Identifier returns: %s", err.Error())
return
}
var tr *TaskRaw
tr, err = NewTaskRaw(tui, job.Info)
if err != nil {
err = fmt.Errorf("(NewTask) NewTaskRaw returns: %s", err.Error())
return
}
t = &Task{
TaskRaw: *tr,
Inputs: []*IO{},
Outputs: []*IO{},
Predata: []*IO{},
}
t.TaskRaw.WorkflowInstanceId = workflow_instance_id
if workflow_instance_id == "" {
err = fmt.Errorf("(NewTask) workflow_instance_id empty")
return
}
return
}
func (task *Task) GetOutputs() (outputs []*IO, err error) {
outputs = []*IO{}
lock, err := task.RLockNamed("GetOutputs")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
for _, output := range task.Outputs {
outputs = append(outputs, output)
}
return
}
func (task *Task) GetOutput(filename string) (output *IO, err error) {
lock, err := task.RLockNamed("GetOutput")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
for _, io := range task.Outputs {
if io.FileName == filename {
output = io
return
}
}
err = fmt.Errorf("Output %s not found", filename)
return
}
func (task *TaskRaw) SetScatterChildren(qm *ServerMgr, scatterChildren []string, writelock bool) (err error) {
if writelock {
err = task.LockNamed("SetScatterChildren")
if err != nil {
return
}
defer task.Unlock()
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskField(task.JobId, task.WorkflowInstanceId, task.Id, "scatterChildren", scatterChildren)
if err != nil {
err = fmt.Errorf("(SetScatterChildren) dbUpdateJobTaskField returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskField(task.JobId, task.WorkflowInstanceId, task.Id, "scatterChildren", scatterChildren)
if err != nil {
err = fmt.Errorf("(SetScatterChildren) dbUpdateTaskField returned: %s", err.Error())
return
}
}
task.ScatterChildren = scatterChildren
return
}
func (task *TaskRaw) GetScatterChildren(wi *WorkflowInstance, qm *ServerMgr) (children []*Task, err error) {
lock, err := task.RLockNamed("GetScatterChildren")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
if task.ScatterChildren_ptr != nil {
children = task.ScatterChildren_ptr // should make a copy....
return
}
children = []*Task{}
for _, task_id_str := range task.ScatterChildren {
var child *Task
var ok bool
child, ok, err = wi.GetTaskByName(task_id_str, true)
if err != nil {
err = fmt.Errorf("(GetScatterChildren) wi.GetTaskByName returned: %s", err.Error())
return
}
if !ok {
err = fmt.Errorf("(GetScatterChildren) child task %s not found in TaskMap", task_id_str)
return
}
children = append(children, child)
}
task.ScatterChildren_ptr = children
return
}
func (task *TaskRaw) GetWorkflowInstance() (wi *WorkflowInstance, ok bool, err error) {
var job *Job
job, err = task.GetJob()
if err != nil {
err = fmt.Errorf("(GetWorkflowInstance) task.GetJob returned: %s", err.Error())
return
}
wi_id := task.WorkflowInstanceId
wi, ok, err = job.GetWorkflowInstance(wi_id, true)
if err != nil {
err = fmt.Errorf("(GetWorkflowInstance) job.GetWorkflowInstance returned: %s", err.Error())
return
}
if !ok {
err = fmt.Errorf("(GetWorkflowInstance) job.GetWorkflowInstance did not find: %s", wi_id)
return
}
return
}
// returns name of Parent (without jobid)
// func (task *TaskRaw) GetWorkflowParent() (p Task_Unique_Identifier, ok bool, err error) {
// lock, err := task.RLockNamed("GetParent")
// if err != nil {
// return
// }
// defer task.RUnlockNamed(lock)
// if task.WorkflowParent == nil {
// ok = false
// return
// }
// p = *task.WorkflowParent
// return
// }
// func (task *TaskRaw) GetWorkflowParentStr() (parent_id_str string, err error) {
// lock, err := task.RLockNamed("GetWorkflowParentStr")
// if err != nil {
// return
// }
// defer task.RUnlockNamed(lock)
// parent_id_str = ""
// if task.WorkflowParent != nil {
// parent_id_str, _ = task.WorkflowParent.String()
// }
// return
// }
func (task *TaskRaw) GetState() (state string, err error) {
lock, err := task.RLockNamed("GetState")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
state = task.State
return
}
func (task *TaskRaw) GetTaskType() (type_str string, err error) {
lock, err := task.RLockNamed("GetTaskType")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
type_str = task.TaskType
return
}
func (task *Task) SetTaskType(type_str string, writelock bool) (err error) {
if writelock {
err = task.LockNamed("SetTaskType")
if err != nil {
return
}
defer task.Unlock()
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskString(task.JobId, task.WorkflowInstanceId, task.Id, "task_type", type_str)
if err != nil {
err = fmt.Errorf("(task/SetTaskType) dbUpdateJobTaskString returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskString(task.JobId, task.WorkflowInstanceId, task.Id, "task_type", type_str)
if err != nil {
err = fmt.Errorf("(task/SetTaskType) dbUpdateTaskString returned: %s", err.Error())
return
}
}
task.TaskType = type_str
return
}
func (task *TaskRaw) SetCreatedDate(t time.Time) (err error) {
err = task.LockNamed("SetCreatedDate")
if err != nil {
return
}
defer task.Unlock()
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "createdDate", t)
if err != nil {
err = fmt.Errorf("(task/SetCreatedDate) dbUpdateJobTaskTime returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "createdDate", t)
if err != nil {
err = fmt.Errorf("(task/SetCreatedDate) dbUpdateTaskTime returned: %s", err.Error())
return
}
}
task.CreatedDate = t
return
}
func (task *TaskRaw) SetStartedDate(t time.Time) (err error) {
err = task.LockNamed("SetStartedDate")
if err != nil {
return
}
defer task.Unlock()
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "startedDate", t)
if err != nil {
err = fmt.Errorf("(task/SetStartedDate) dbUpdateJobTaskTime returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "startedDate", t)
if err != nil {
err = fmt.Errorf("(task/SetStartedDate) dbUpdateTaskTime returned: %s", err.Error())
return
}
}
task.StartedDate = t
return
}
func (task *TaskRaw) SetCompletedDate(t time.Time, lock bool) (err error) {
if lock {
err = task.LockNamed("SetCompletedDate")
if err != nil {
return
}
defer task.Unlock()
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "completedDate", t)
if err != nil {
err = fmt.Errorf("(task/SetCompletedDate) dbUpdateJobTaskTime returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskTime(task.JobId, task.WorkflowInstanceId, task.Id, "completedDate", t)
if err != nil {
err = fmt.Errorf("(task/SetCompletedDate) dbUpdateTaskTime returned: %s", err.Error())
return
}
}
task.CompletedDate = t
return
}
func (task *TaskRaw) SetStepOutput(jd *cwl.Job_document, lock bool) (err error) {
if lock {
err = task.LockNamed("SetStepOutput")
if err != nil {
return
}
defer task.Unlock()
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskField(task.JobId, task.WorkflowInstanceId, task.Id, "stepOutput", *jd)
if err != nil {
err = fmt.Errorf("(task/SetStepOutput) dbUpdateJobTaskField returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskField(task.JobId, task.WorkflowInstanceId, task.Id, "stepOutput", *jd)
if err != nil {
err = fmt.Errorf("(task/SetStepOutput) dbUpdateTaskField returned: %s", err.Error())
return
}
}
task.StepOutput = jd
task.StepOutputInterface = jd
return
}
// only for debugging purposes
func (task *TaskRaw) GetStateNamed(name string) (state string, err error) {
lock, err := task.RLockNamed("GetState/" + name)
if err != nil {
return
}
defer task.RUnlockNamed(lock)
state = task.State
return
}
func (task *TaskRaw) GetId(me string) (id Task_Unique_Identifier, err error) {
lock, err := task.RLockNamed("GetId:" + me)
if err != nil {
return
}
defer task.RUnlockNamed(lock)
id = task.Task_Unique_Identifier
return
}
func (task *TaskRaw) GetJobId() (id string, err error) {
lock, err := task.RLockNamed("GetJobId")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
id = task.JobId
return
}
func (task *TaskRaw) GetJob() (job *Job, err error) {
lock, err := task.RLockNamed("GetJob")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
if task.job != nil {
job = task.job
return
}
jobid := task.JobId
job, err = GetJob(jobid)
if err != nil {
err = fmt.Errorf("(TaskRaw/GetJob) global GetJob returned: %s", err.Error())
return
}
// this is writing while we just have a readlock, not so nice
task.job = job
return
}
// also updates wi.RemainTasks, task.SetCompletedDate
func (task *TaskRaw) SetState(wi *WorkflowInstance, new_state string, writeLock bool) (err error) {
if writeLock {
err = task.LockNamed("SetState")
if err != nil {
return
}
defer task.Unlock()
}
old_state := task.State
taskid := task.Id
jobid := task.JobId
if jobid == "" {
err = fmt.Errorf("task %s has no job id", taskid)
return
}
if old_state == new_state {
return
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskString(jobid, task.WorkflowInstanceId, taskid, "state", new_state)
if err != nil {
err = fmt.Errorf("(TaskRaw/SetState) dbUpdateJobTaskString returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskString(jobid, task.WorkflowInstanceId, taskid, "state", new_state)
if err != nil {
err = fmt.Errorf("(TaskRaw/SetState) dbUpdateTaskString returned: %s", err.Error())
return
}
}
logger.Debug(3, "(Task/SetState) %s new state: \"%s\" (old state \"%s\")", taskid, new_state, old_state)
task.State = new_state
if new_state == TASK_STAT_COMPLETED {
if wi != nil {
_, err = wi.IncrementRemainSteps(-1, true)
if err != nil {
err = fmt.Errorf("(task/SetState) wi.DecreaseRemainSteps returned: %s", err.Error())
return
}
}
err = task.SetCompletedDate(time.Now(), false)
if err != nil {
err = fmt.Errorf("(task/SetState) task.SetCompletedDate returned: %s", err.Error())
return
}
} else if old_state == TASK_STAT_COMPLETED {
// in case a completed task is marked as something different
//var job *Job
//job, err = GetJob(jobid)
//if err != nil {
// return
//}
//_, err = job.IncrementRemainSteps(1, "task/SetState")
//if err != nil {
// err = fmt.Errorf("(task/SetState) IncrementRemainSteps returned: %s", err.Error())
// return
//}
initTime := time.Time{}
err = task.SetCompletedDate(initTime, false)
if err != nil {
err = fmt.Errorf("(task/SetState) SetCompletedDate returned: %s", err.Error())
return
}
}
return
}
func (task *TaskRaw) GetDependsOn() (dep []string, err error) {
lock, err := task.RLockNamed("GetDependsOn")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
dep = task.DependsOn
return
}
// checks and creates indices on input shock nodes if needed
func (task *Task) CreateInputIndexes() (err error) {
for _, io := range task.Inputs {
_, err = io.IndexFile(io.ShockIndex)
if err != nil {
err = fmt.Errorf("(CreateInputIndexes) failed to create shock index: node=%s, taskid=%s, error=%s", io.Node, task.Id, err.Error())
logger.Error(err.Error())
return
}
}
return
}
// checks and creates indices on output shock nodes if needed
// if worker failed to do so, this will catch it
func (task *Task) CreateOutputIndexes() (err error) {
for _, io := range task.Outputs {
_, err = io.IndexFile(io.ShockIndex)
if err != nil {
err = fmt.Errorf("(CreateOutputIndexes) failed to create shock index: node=%s, taskid=%s, error=%s", io.Node, task.Id, err.Error())
logger.Error(err.Error())
return
}
}
return
}
// check that part index is valid before initalizing it
// refactored out of InitPartIndex deal with potentailly long write lock
func (task *Task) checkPartIndex() (newPartition *PartInfo, totalunits int, isSingle bool, err error) {
lock, err := task.RLockNamed("checkPartIndex")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
inputIO := task.Inputs[0]
newPartition = &PartInfo{
Input: inputIO.FileName,
MaxPartSizeMB: task.MaxWorkSize,
}
if len(task.Inputs) > 1 {
found := false
if (task.Partition != nil) && (task.Partition.Input != "") {
// task submitted with partition input specified, use that
for _, io := range task.Inputs {
if io.FileName == task.Partition.Input {
found = true
inputIO = io
newPartition.Input = io.FileName
}
}
}
if !found {
// bad state - set as not multi-workunit
logger.Error("warning: lacking partition info while multiple inputs are specified, taskid=" + task.Id)
isSingle = true
return
}
}
// if submitted with partition index use that, otherwise default
if (task.Partition != nil) && (task.Partition.Index != "") {
newPartition.Index = task.Partition.Index
} else {
newPartition.Index = conf.DEFAULT_INDEX
}
idxInfo, err := inputIO.IndexFile(newPartition.Index)
if err != nil {
// bad state - set as not multi-workunit
logger.Error("warning: failed to create / retrieve index=%s, taskid=%s, error=%s", newPartition.Index, task.Id, err.Error())
isSingle = true
err = nil
return
}
totalunits = int(idxInfo.TotalUnits)
return
}
// get part size based on partition/index info
// this resets task.Partition when called
// only 1 task.Inputs allowed unless 'partinfo.input' specified on POST
// if fail to get index info, task.TotalWork set to 1 and task.Partition set to nil
func (task *Task) InitPartIndex() (err error) {
if task.TotalWork == 1 && task.MaxWorkSize == 0 {
// only 1 workunit requested
return
}
newPartition, totalunits, isSingle, err := task.checkPartIndex()
if err != nil {
return
}
if isSingle {
// its a single workunit, skip init
err = task.setSingleWorkunit(true)
return
}
err = task.LockNamed("InitPartIndex")
if err != nil {
return
}
defer task.Unlock()
// adjust total work based on needs
if newPartition.MaxPartSizeMB > 0 {
// this implementation for chunkrecord indexer only
chunkmb := int(conf.DEFAULT_CHUNK_SIZE / 1048576)
var totalwork int
if totalunits*chunkmb%newPartition.MaxPartSizeMB == 0 {
totalwork = totalunits * chunkmb / newPartition.MaxPartSizeMB
} else {
totalwork = totalunits*chunkmb/newPartition.MaxPartSizeMB + 1
}
if totalwork < task.TotalWork {
// use bigger splits (specified by size or totalwork)
totalwork = task.TotalWork
}
if totalwork != task.TotalWork {
err = task.setTotalWork(totalwork, false)
if err != nil {
return
}
}
}
if totalunits < task.TotalWork {
err = task.setTotalWork(totalunits, false)
if err != nil {
return
}
}
// need only 1 workunit
if task.TotalWork == 1 {
err = task.setSingleWorkunit(false)
return
}
// done, set it
newPartition.TotalIndex = totalunits
err = task.setPartition(newPartition, false)
return
}
// wrapper functions to set: totalwork=1, partition=nil, maxworksize=0
func (task *Task) setSingleWorkunit(writelock bool) (err error) {
if task.TotalWork != 1 {
err = task.setTotalWork(1, writelock)
if err != nil {
err = fmt.Errorf("(task/setSingleWorkunit) setTotalWork returned: %s", err.Error())
return
}
}
if task.Partition != nil {
err = task.setPartition(nil, writelock)
if err != nil {
err = fmt.Errorf("(task/setSingleWorkunit) setPartition returned: %s", err.Error())
return
}
}
if task.MaxWorkSize != 0 {
err = task.setMaxWorkSize(0, writelock)
if err != nil {
err = fmt.Errorf("(task/setSingleWorkunit) setMaxWorkSize returned: %s", err.Error())
return
}
}
return
}
func (task *Task) setTotalWork(num int, writelock bool) (err error) {
if writelock {
err = task.LockNamed("setTotalWork")
if err != nil {
return
}
defer task.Unlock()
}
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "totalwork", num)
if err != nil {
err = fmt.Errorf("(task/setTotalWork) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
task.TotalWork = num
// reset remaining work whenever total work reset
err = task.SetRemainWork(num, false)
return
}
func (task *Task) setPartition(partition *PartInfo, writelock bool) (err error) {
if writelock {
err = task.LockNamed("setPartition")
if err != nil {
return
}
defer task.Unlock()
}
err = dbUpdateJobTaskPartition(task.JobId, task.WorkflowInstanceId, task.Id, partition)
if err != nil {
err = fmt.Errorf("(task/setPartition) dbUpdateJobTaskPartition returned: %s", err.Error())
return
}
task.Partition = partition
return
}
func (task *Task) setMaxWorkSize(num int, writelock bool) (err error) {
if writelock {
err = task.LockNamed("setMaxWorkSize")
if err != nil {
return
}
defer task.Unlock()
}
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "maxworksize", num)
if err != nil {
err = fmt.Errorf("(task/setMaxWorkSize) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
task.MaxWorkSize = num
return
}
func (task *Task) SetRemainWork(num int, writelock bool) (err error) {
if writelock {
err = task.LockNamed("SetRemainWork")
if err != nil {
return
}
defer task.Unlock()
}
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "remainwork", num)
if err != nil {
err = fmt.Errorf("(task/SetRemainWork) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
task.RemainWork = num
return
}
func (task *Task) IncrementRemainWork(inc int, writelock bool) (remainwork int, err error) {
if writelock {
err = task.LockNamed("IncrementRemainWork")
if err != nil {
return
}
defer task.Unlock()
}
remainwork = task.RemainWork + inc
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "remainwork", remainwork)
if err != nil {
err = fmt.Errorf("(task/IncrementRemainWork) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "remainwork", remainwork)
if err != nil {
err = fmt.Errorf("(task/IncrementRemainWork) dbUpdateTaskInt returned: %s", err.Error())
return
}
}
task.RemainWork = remainwork
return
}
func (task *Task) IncrementComputeTime(inc int) (err error) {
err = task.LockNamed("IncrementComputeTime")
if err != nil {
return
}
defer task.Unlock()
newComputeTime := task.ComputeTime + inc
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "computetime", newComputeTime)
if err != nil {
err = fmt.Errorf("(task/IncrementComputeTime) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "computetime", newComputeTime)
if err != nil {
err = fmt.Errorf("(task/IncrementComputeTime) dbUpdateTaskInt returned: %s", err.Error())
return
}
}
task.ComputeTime = newComputeTime
return
}
func (task *Task) ResetTaskTrue(name string) (err error) {
if task.ResetTask == true {
return
}
err = task.LockNamed("ResetTaskTrue:" + name)
if err != nil {
return
}
defer task.Unlock()
err = task.SetState(nil, TASK_STAT_PENDING, false)
if err != nil {
err = fmt.Errorf("(task/ResetTaskTrue) task.SetState returned: %s", err.Error())
return
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskBoolean(task.JobId, task.WorkflowInstanceId, task.Id, "resettask", true)
if err != nil {
err = fmt.Errorf("(task/ResetTaskTrue) dbUpdateJobTaskBoolean returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskBoolean(task.JobId, task.WorkflowInstanceId, task.Id, "resettask", true)
if err != nil {
err = fmt.Errorf("(task/ResetTaskTrue) dbUpdateTaskBoolean returned: %s", err.Error())
return
}
}
task.ResetTask = true
return
}
func (task *Task) SetResetTask(info *Info) (err error) {
// called when enqueing a task that previously ran
err = task.LockNamed("SetResetTask")
if err != nil {
return
}
defer task.Unlock()
// only run if true
if task.ResetTask == false {
return
}
// in memory pointer
task.Info = info
// reset remainwork
err = task.SetRemainWork(task.TotalWork, false)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) task.SetRemainWork returned: %s", err.Error())
return
}
// reset computetime
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "computetime", 0)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateJobTaskInt returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskInt(task.JobId, task.WorkflowInstanceId, task.Id, "computetime", 0)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateTaskInt returned: %s", err.Error())
return
}
}
task.ComputeTime = 0
// reset completedate
err = task.SetCompletedDate(time.Time{}, false)
// reset inputs
for _, io := range task.Inputs {
// skip inputs with no origin (predecessor task)
if io.Origin == "" {
continue
}
io.Node = "-"
io.Size = 0
io.Url = ""
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateJobTaskIO returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateTaskIO returned: %s", err.Error())
return
}
}
// reset / delete all outputs
for _, io := range task.Outputs {
// do not delete update IO
if io.Type == "update" {
continue
}
if dataUrl, _ := io.DataUrl(); dataUrl != "" {
// delete dataUrl if is shock node
if strings.HasSuffix(dataUrl, shock.DATA_SUFFIX) {
err = shock.ShockDelete(io.Host, io.Node, io.DataToken)
if err == nil {
logger.Debug(2, "Deleted node %s from shock", io.Node)
} else {
logger.Error("(SetResetTask) unable to deleted node %s from shock: %s", io.Node, err.Error())
}
}
}
io.Node = "-"
io.Size = 0
io.Url = ""
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateJobTaskIO returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateTaskIO returned: %s", err.Error())
return
}
}
// delete all workunit logs
for _, log := range conf.WORKUNIT_LOGS {
err = task.DeleteLogs(log, false)
if err != nil {
return
}
}
// reset the reset
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskBoolean(task.JobId, task.WorkflowInstanceId, task.Id, "resettask", false)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateJobTaskBoolean returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskBoolean(task.JobId, task.WorkflowInstanceId, task.Id, "resettask", false)
if err != nil {
err = fmt.Errorf("(task/SetResetTask) dbUpdateTaskBoolean returned: %s", err.Error())
return
}
}
task.ResetTask = false
return
}
func (task *Task) setTokenForIO(writelock bool) (err error) {
if writelock {
err = task.LockNamed("setTokenForIO")
if err != nil {
return
}
defer task.Unlock()
}
if task.Info == nil {
err = fmt.Errorf("(setTokenForIO) task.Info empty")
return
}
if !task.Info.Auth || task.Info.DataToken == "" {
return
}
// update inputs
changed := false
for _, io := range task.Inputs {
if io.DataToken != task.Info.DataToken {
io.DataToken = task.Info.DataToken
changed = true
}
}
if changed {
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(task/setTokenForIO) dbUpdateJobTaskIO returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(task/setTokenForIO) dbUpdateTaskIO returned: %s", err.Error())
return
}
}
}
// update outputs
changed = false
for _, io := range task.Outputs {
if io.DataToken != task.Info.DataToken {
io.DataToken = task.Info.DataToken
changed = true
}
}
if changed {
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("(task/setTokenForIO) dbUpdateJobTaskIO returned: %s", err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("(task/setTokenForIO) dbUpdateTaskIO returned: %s", err.Error())
return
}
}
}
return
}
func (task *Task) CreateWorkunits(qm *ServerMgr, job *Job) (wus []*Workunit, err error) {
//if a task contains only one workunit, assign rank 0
//if task.WorkflowStep != nil {
// step :=
//}
//task_name := task.TaskName
//fmt.Printf("(CreateWorkunits) %s\n", task_name)
//if task_name == "_root__main_step1_step1_0" {
// panic("found task")
//}
if task.TotalWork == 1 {
workunit, xerr := NewWorkunit(qm, task, 0, job)
if xerr != nil {
err = fmt.Errorf("(CreateWorkunits) (single) NewWorkunit failed: %s", xerr.Error())
return
}
wus = append(wus, workunit)
return
}
// if a task contains N (N>1) workunits, assign rank 1..N
for i := 1; i <= task.TotalWork; i++ {
workunit, xerr := NewWorkunit(qm, task, i, job)
if xerr != nil {
err = fmt.Errorf("(CreateWorkunits) (multi) NewWorkunit failed: %s", xerr.Error())
return
}
wus = append(wus, workunit)
}
return
}
func (task *Task) GetTaskLogs() (tlog *TaskLog, err error) {
tlog = new(TaskLog)
tlog.Id = task.Id
tlog.State = task.State
tlog.TotalWork = task.TotalWork
tlog.CompletedDate = task.CompletedDate
workunit_id := New_Workunit_Unique_Identifier(task.Task_Unique_Identifier, 0)
//workunit_id := Workunit_Unique_Identifier{JobId: task.JobId, TaskName: task.Id}
if task.TotalWork == 1 {
//workunit_id.Rank = 0
var wl *WorkLog
wl, err = NewWorkLog(workunit_id)
if err != nil {
err = fmt.Errorf("(task/GetTaskLogs) NewWorkLog returned: %s", err.Error())
return
}
tlog.Workunits = append(tlog.Workunits, wl)
} else {
for i := 1; i <= task.TotalWork; i++ {
workunit_id.Rank = i
var wl *WorkLog
wl, err = NewWorkLog(workunit_id)
if err != nil {
err = fmt.Errorf("(task/GetTaskLogs) NewWorkLog returned: %s", err.Error())
return
}
tlog.Workunits = append(tlog.Workunits, wl)
}
}
return
}
func (task *Task) ValidateDependants(qm *ServerMgr) (reason string, err error) {
lock, err := task.RLockNamed("ValidateDependants")
if err != nil {
return
}
defer task.RUnlockNamed(lock)
// validate task states in depends on list
for _, preTaskStr := range task.DependsOn {
var preId Task_Unique_Identifier
preId, err = New_Task_Unique_Identifier_FromString(preTaskStr)
if err != nil {
err = fmt.Errorf("(ValidateDependants) New_Task_Unique_Identifier_FromString returns: %s", err.Error())
return
}
preTask, ok, xerr := qm.TaskMap.Get(preId, true)
if xerr != nil {
err = fmt.Errorf("(ValidateDependants) predecessor task %s not found for task %s: %s", preTaskStr, task.Id, xerr.Error())
return
}
if !ok {
reason = fmt.Sprintf("(ValidateDependants) predecessor task not found: task=%s, pretask=%s", task.Id, preTaskStr)
logger.Debug(3, reason)
return
}
preTaskState, xerr := preTask.GetState()
if xerr != nil {
err = fmt.Errorf("(ValidateDependants) unable to get state for predecessor task %s: %s", preTaskStr, xerr.Error())
return
}
if preTaskState != TASK_STAT_COMPLETED {
reason = fmt.Sprintf("(ValidateDependants) predecessor task state is not completed: task=%s, pretask=%s, pretask.state=%s", task.Id, preTaskStr, preTaskState)
logger.Debug(3, reason)
return
}
}
// validate task states in input IO origins
for _, io := range task.Inputs {
if io.Origin == "" {
continue
}
var preId Task_Unique_Identifier
preId, err = New_Task_Unique_Identifier(task.JobId, io.Origin)
if err != nil {
err = fmt.Errorf("(ValidateDependants) New_Task_Unique_Identifier returns: %s", err.Error())
return
}
var preTaskStr string
preTaskStr, err = preId.String()
if err != nil {
err = fmt.Errorf("(ValidateDependants) task.String returned: %s", err.Error())
return
}
preTask, ok, xerr := qm.TaskMap.Get(preId, true)
if xerr != nil {
err = fmt.Errorf("(ValidateDependants) predecessor task %s not found for task %s: %s", preTaskStr, task.Id, xerr.Error())
return
}
if !ok {
reason = fmt.Sprintf("(ValidateDependants) predecessor task not found: task=%s, pretask=%s", task.Id, preTaskStr)
logger.Debug(3, reason)
return
}
preTaskState, xerr := preTask.GetState()
if xerr != nil {
err = fmt.Errorf("(ValidateDependants) unable to get state for predecessor task %s: %s", preTaskStr, xerr.Error())
return
}
if preTaskState != TASK_STAT_COMPLETED {
reason = fmt.Sprintf("(ValidateDependants) predecessor task state is not completed: task=%s, pretask=%s, pretask.state=%s", task.Id, preTaskStr, preTaskState)
logger.Debug(3, reason)
return
}
}
return
}
func (task *Task) ValidateInputs(qm *ServerMgr) (err error) {
err = task.LockNamed("ValidateInputs")
if err != nil {
err = fmt.Errorf("(ValidateInputs) unable to lock task %s: %s", task.Id, err.Error())
return
}
defer task.Unlock()
for _, io := range task.Inputs {
if io.Origin != "" {
// find predecessor task
var preId Task_Unique_Identifier
preId, err = New_Task_Unique_Identifier(task.JobId, io.Origin)
if err != nil {
err = fmt.Errorf("(ValidateInputs) New_Task_Unique_Identifier returned: %s", err.Error())
return
}
var preTaskStr string
preTaskStr, err = preId.String()
if err != nil {
err = fmt.Errorf("(ValidateInputs) task.String returned: %s", err.Error())
return
}
preTask, ok, xerr := qm.TaskMap.Get(preId, true)
if xerr != nil {
err = fmt.Errorf("(ValidateInputs) predecessor task %s not found for task %s: %s", preTaskStr, task.Id, xerr.Error())
return
}
if !ok {
err = fmt.Errorf("(ValidateInputs) predecessor task %s not found for task %s", preTaskStr, task.Id)
return
}
// test predecessor state
preTaskState, xerr := preTask.GetState()
if xerr != nil {
err = fmt.Errorf("(ValidateInputs) unable to get state for predecessor task %s: %s", preTaskStr, xerr.Error())
return
}
if preTaskState != TASK_STAT_COMPLETED {
err = fmt.Errorf("(ValidateInputs) predecessor task state is not completed: task=%s, pretask=%s, pretask.state=%s", task.Id, preTaskStr, preTaskState)
return
}
// find predecessor output
preTaskIO, xerr := preTask.GetOutput(io.FileName)
if xerr != nil {
err = fmt.Errorf("(ValidateInputs) unable to get IO for predecessor task %s, file %s: %s", preTaskStr, io.FileName, err.Error())
return
}
io.Node = preTaskIO.Node
}
// make sure we have node id
if (io.Node == "") || (io.Node == "-") {
err = fmt.Errorf("(ValidateInputs) error in locate input for task, no node id found: task=%s, file=%s", task.Id, io.FileName)
return
}
// force build data url
io.Url = ""
_, err = io.DataUrl()
if err != nil {
err = fmt.Errorf("(ValidateInputs) DataUrl returns: %s", err.Error())
return
}
// forece check file exists and get size
io.Size = 0
_, err = io.UpdateFileSize()
if err != nil {
err = fmt.Errorf("(ValidateInputs) input file %s UpdateFileSize returns: %s", io.FileName, err.Error())
return
}
// create or wait on shock index on input node (if set in workflow document)
_, err = io.IndexFile(io.ShockIndex)
if err != nil {
err = fmt.Errorf("(ValidateInputs) failed to create shock index: task=%s, node=%s: %s", task.Id, io.Node, err.Error())
return
}
logger.Debug(3, "(ValidateInputs) input located: task=%s, file=%s, node=%s, size=%d", task.Id, io.FileName, io.Node, io.Size)
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(ValidateInputs) unable to save task inputs to mongodb, task=%s: %s", task.Id, err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "inputs", task.Inputs)
if err != nil {
err = fmt.Errorf("(ValidateInputs) unable to save task inputs to mongodb, task=%s: %s", task.Id, err.Error())
return
}
}
return
}
func (task *Task) ValidateOutputs() (err error) {
err = task.LockNamed("ValidateOutputs")
if err != nil {
err = fmt.Errorf("unable to lock task %s: %s", task.Id, err.Error())
return
}
defer task.Unlock()
for _, io := range task.Outputs {
// force build data url
io.Url = ""
_, err = io.DataUrl()
if err != nil {
err = fmt.Errorf("DataUrl returns: %s", err.Error())
return
}
// force check file exists and get size
io.Size = 0
_, err = io.UpdateFileSize()
if err != nil {
err = fmt.Errorf("input file %s GetFileSize returns: %s", io.FileName, err.Error())
return
}
// create or wait on shock index on output node (if set in workflow document)
_, err = io.IndexFile(io.ShockIndex)
if err != nil {
err = fmt.Errorf("failed to create shock index: task=%s, node=%s: %s", task.Id, io.Node, err.Error())
return
}
}
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("unable to save task outputs to mongodb, task=%s: %s", task.Id, err.Error())
return
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "outputs", task.Outputs)
if err != nil {
err = fmt.Errorf("unable to save task outputs to mongodb, task=%s: %s", task.Id, err.Error())
return
}
}
return
}
func (task *Task) ValidatePredata() (err error) {
err = task.LockNamed("ValidatePreData")
if err != nil {
err = fmt.Errorf("unable to lock task %s: %s", task.Id, err.Error())
return
}
defer task.Unlock()
// locate predata
var modified bool
for _, io := range task.Predata {
// only verify predata that is a shock node
if (io.Node != "") && (io.Node != "-") {
// check file size
mod, xerr := io.UpdateFileSize()
if xerr != nil {
err = fmt.Errorf("input file %s GetFileSize returns: %s", io.FileName, xerr.Error())
return
}
if mod {
modified = true
}
// build url if missing
if io.Url == "" {
_, err = io.DataUrl()
if err != nil {
err = fmt.Errorf("DataUrl returns: %s", err.Error())
}
modified = true
}
}
}
if modified {
if task.WorkflowInstanceId == "" {
err = dbUpdateJobTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "predata", task.Predata)
if err != nil {
err = fmt.Errorf("unable to save task predata to mongodb, task=%s: %s", task.Id, err.Error())
}
} else {
err = dbUpdateTaskIO(task.JobId, task.WorkflowInstanceId, task.Id, "predata", task.Predata)
if err != nil {
err = fmt.Errorf("unable to save task predata to mongodb, task=%s: %s", task.Id, err.Error())
}
}
}
return
}
func (task *Task) DeleteOutput() (modified int) {
modified = 0
task_state := task.State
if task_state == TASK_STAT_COMPLETED ||
task_state == TASK_STAT_SKIPPED ||
task_state == TASK_STAT_FAIL_SKIP {
for _, io := range task.Outputs {
if io.Delete {
if err := io.DeleteNode(); err != nil {
logger.Warning("failed to delete shock node %s: %s", io.Node, err.Error())
}
modified += 1
}
}
}
return
}
func (task *Task) DeleteInput() (modified int) {
modified = 0
task_state := task.State
if task_state == TASK_STAT_COMPLETED ||
task_state == TASK_STAT_SKIPPED ||
task_state == TASK_STAT_FAIL_SKIP {
for _, io := range task.Inputs {
if io.Delete {
if err := io.DeleteNode(); err != nil {
logger.Warning("failed to delete shock node %s: %s", io.Node, err.Error())
}
modified += 1
}
}
}
return
}
func (task *Task) DeleteLogs(logname string, writelock bool) (err error) {
if writelock {
err = task.LockNamed("setTotalWork")
if err != nil {
return
}
defer task.Unlock()
}
var logdir string
logdir, err = getPathByJobId(task.JobId)
if err != nil {
err = fmt.Errorf("(task/GetTaDeleteLogsskLogs) getPathByJobId returned: %s", err.Error())
return
}
globpath := fmt.Sprintf("%s/%s_*.%s", logdir, task.Id, logname)
var logfiles []string
logfiles, err = filepath.Glob(globpath)
if err != nil {
err = fmt.Errorf("(task/GetTaDeleteLogsskLogs) filepath.Glob returned: %s", err.Error())
return
}
for _, logfile := range logfiles {
workid := strings.Split(filepath.Base(logfile), ".")[0]
logger.Debug(2, "Deleted %s log for workunit %s", logname, workid)
os.Remove(logfile)
}
return
}
func (task *Task) GetStepOutput(name string) (obj cwl.CWLType, ok bool, reason string, err error) {
if task.StepOutput == nil {
ok = false
reason = "task.StepOutput == nil"
//err = fmt.Errorf("(task/GetStepOutput) task.StepOutput == nil")
return
}
for _, named_step_output := range *task.StepOutput {
named_step_output_base := path.Base(named_step_output.Id)
logger.Debug(3, "(task/GetStepOutput) %s vs %s\n", named_step_output_base, name)
if named_step_output_base == name {
obj = named_step_output.Value
if obj == nil {
err = fmt.Errorf("(task/GetStepOutput) found %s , but it is nil", name) // this should not happen, taskReady makes sure everything is available
return
}
ok = true
return
}
}
ok = false
return
}
func (task *Task) GetStepOutputNames() (names []string, err error) {
if task.StepOutput == nil {
err = fmt.Errorf("(task/GetStepOutputNames) task.StepOutput == nil")
return
}
names = []string{}
for _, named_step_output := range *task.StepOutput {
named_step_output_base := path.Base(named_step_output.Id)
names = append(names, named_step_output_base)
}
return
}
func String2Date(str string) (t time.Time, err error) {
//layout := "2006-01-02T15:04:05.00Z"
// 2018-12-13T22:36:02.96Z
//str := "2014-11-12T11:45:26.371Z"
//t, err = time.Parse(layout, str)
t, err = time.Parse(time.RFC3339, str)
return
}
func FixTimeInMap(original_map map[string]interface{}, field string) (err error) {
var value_if interface{}
var ok bool
value_if, ok = original_map[field]
if ok {
switch value_if.(type) {
case string:
value_str := value_if.(string)
var value_time time.Time
value_time, err = String2Date(value_str)
if err != nil {
err = fmt.Errorf("(FixTimeInMap) Could not parse date: %s", err.Error())
return
}
delete(original_map, field)
original_map[field] = value_time
case time.Time:
// all ok
default:
err = fmt.Errorf("(FixTimeInMap) time type unknown (%s)", reflect.TypeOf(value_if))
}
}
return
}
func NewTaskFromInterface(original interface{}, context *cwl.WorkflowContext) (task *Task, err error) {
task = &Task{}
task.TaskRaw = TaskRaw{}
//spew.Dump(original)
original, err = cwl.MakeStringMap(original, context)
if err != nil {
err = fmt.Errorf("(NewTaskFromInterface) MakeStringMap returned: %s", err.Error())
return
}
original_map := original.(map[string]interface{})
for _, field := range []string{"createdDate", "startedDate", "completedDate"} {
err = FixTimeInMap(original_map, field)
if err != nil {
err = fmt.Errorf("(NewTaskFromInterface) FixTimeInMap returned: %s", err.Error())
return
}
}
err = mapstructure.Decode(original, task)
if err != nil {
err = fmt.Errorf("(NewTaskFromInterface) mapstructure.Decode returned: %s (%s)", err.Error(), spew.Sdump(original_map))
return
}
if task.WorkflowInstanceId == "" {
err = fmt.Errorf("(NewTaskFromInterface) task.WorkflowInstanceId == empty")
return
}
// if task.WorkflowInstanceId != "_root" {
// if task.WorkflowParent == nil {
// task_id_str, _ := task.String()
// err = fmt.Errorf("(NewTaskFromInterface) task.WorkflowParent == nil , (%s)", task_id_str)
// return
// }
// }
return
}
func NewTasksFromInterface(original interface{}, context *cwl.WorkflowContext) (tasks []*Task, err error) {
switch original.(type) {
case []interface{}:
original_array := original.([]interface{})
tasks = []*Task{}
for i, _ := range original_array {
var t *Task
t, err = NewTaskFromInterface(original_array[i], context)
if err != nil {
err = fmt.Errorf("(NewTasksFromInterface) NewTaskFromInterface returned: %s", err.Error())
return
}
tasks = append(tasks, t)
}
default:
err = fmt.Errorf("(NewTasksFromInterface) type not supported: %s", reflect.TypeOf(original))
}
return
}
|
package tablestorageproxy
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/http/httputil"
"strings"
"time"
)
type GoHaveStorage interface {
GetKey() []byte
GetAccount() string
}
type TableStorageProxy struct {
goHaveStorage GoHaveStorage
baseUrl string
}
func New(goHaveStorage GoHaveStorage) *TableStorageProxy {
var tableStorageProxy TableStorageProxy
tableStorageProxy.goHaveStorage = goHaveStorage
tableStorageProxy.baseUrl = "https://"+goHaveStorage.GetAccount()+".table.core.windows.net/"
return &tableStorageProxy
}
func (tableStorageProxy *TableStorageProxy) QueryTables() {
target := "Tables"
client := &http.Client{}
request, _ := http.NewRequest("GET", tableStorageProxy.baseUrl+target, nil)
request.Header.Set("Accept", "application/json;odata=nometadata")
tableStorageProxy.executeRequest(request, client, target)
}
func (tableStorageProxy *TableStorageProxy) DeleteTable(tableName string) {
target := "Tables%28%27" + tableName + "%27%29"
client := &http.Client{}
request, _ := http.NewRequest("DELETE", tableStorageProxy.baseUrl+target, nil)
request.Header.Set("Content-Type", "application/atom+xml")
tableStorageProxy.executeRequest(request, client, target)
}
type CreateTableArgs struct {
TableName string
}
func (tableStorageProxy *TableStorageProxy) CreateTable(tableName string) {
var createTableArgs CreateTableArgs
createTableArgs.TableName = tableName
jsonBytes, _ := json.Marshal(createTableArgs)
target := "Tables"
client := &http.Client{}
request, _ := http.NewRequest("POST", tableStorageProxy.baseUrl+target, bytes.NewBuffer(jsonBytes))
request.Header.Set("Accept", "application/json;odata=nometadata")
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Content-Length", string(len(jsonBytes)))
tableStorageProxy.executeRequest(request, client, target)
}
func (tableStorageProxy *TableStorageProxy) InsertEntity(tableName string, json []byte) {
client := &http.Client{}
request, _ := http.NewRequest("POST", tableStorageProxy.baseUrl+tableName, bytes.NewBuffer(json))
request.Header.Set("Accept", "application/json;odata=nometadata")
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Content-Length", string(len(json)))
tableStorageProxy.executeRequest(request, client, tableName)s
}
func (tableStorageProxy *TableStorageProxy) executeRequest(request *http.Request, client *http.Client, target string) {
xmsdate, Authentication := tableStorageProxy.calculateDateAndAuthentication(target)
request.Header.Set("x-ms-date", xmsdate)
request.Header.Set("x-ms-version", "2013-08-15")
request.Header.Set("Authorization", Authentication)
requestDump, _ := httputil.DumpRequest(request, true)
fmt.Printf("Request: %s\n", requestDump)
response, _ := client.Do(request)
responseDump, _ := httputil.DumpResponse(response, true)
fmt.Printf("Response: %s\n", responseDump)
}
func (tableStorageProxy *TableStorageProxy) calculateDateAndAuthentication(target string) (string, string) {
xmsdate := strings.Replace(time.Now().UTC().Add(-time.Minute).Format(time.RFC1123), "UTC", "GMT", -1)
SignatureString := xmsdate + "\n/" + tableStorageProxy.goHaveStorage.GetAccount() + "/" + target
Authentication := "SharedKeyLite " + tableStorageProxy.goHaveStorage.GetAccount() + ":" + computeHmac256(SignatureString, tableStorageProxy.goHaveStorage.GetKey())
return xmsdate, Authentication
}
func computeHmac256(message string, key []byte) string {
h := hmac.New(sha256.New, key)
h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
DRYing
package tablestorageproxy
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/http/httputil"
"strings"
"time"
)
type GoHaveStorage interface {
GetKey() []byte
GetAccount() string
}
type TableStorageProxy struct {
goHaveStorage GoHaveStorage
baseUrl string
}
func New(goHaveStorage GoHaveStorage) *TableStorageProxy {
var tableStorageProxy TableStorageProxy
tableStorageProxy.goHaveStorage = goHaveStorage
tableStorageProxy.baseUrl = "https://"+goHaveStorage.GetAccount()+".table.core.windows.net/"
return &tableStorageProxy
}
func (tableStorageProxy *TableStorageProxy) QueryTables() {
target := "Tables"
client := &http.Client{}
request, _ := http.NewRequest("GET", tableStorageProxy.baseUrl+target, nil)
request.Header.Set("Accept", "application/json;odata=nometadata")
tableStorageProxy.executeRequest(request, client, target)
}
func (tableStorageProxy *TableStorageProxy) DeleteTable(tableName string) {
target := "Tables%28%27" + tableName + "%27%29"
client := &http.Client{}
request, _ := http.NewRequest("DELETE", tableStorageProxy.baseUrl+target, nil)
request.Header.Set("Content-Type", "application/atom+xml")
tableStorageProxy.executeRequest(request, client, target)
}
type CreateTableArgs struct {
TableName string
}
func (tableStorageProxy *TableStorageProxy) CreateTable(tableName string) {
var createTableArgs CreateTableArgs
createTableArgs.TableName = tableName
json, _ := json.Marshal(createTableArgs)
tableStorageProxy.postJson("Tables", json)
}
func (tableStorageProxy *TableStorageProxy) InsertEntity(tableName string, json []byte) {
tableStorageProxy.postJson(tableName, json)
}
func (tableStorageProxy *TableStorageProxy) postJson(target string, json []byte) {
client := &http.Client{}
request, _ := http.NewRequest("POST", tableStorageProxy.baseUrl+target, bytes.NewBuffer(json))
request.Header.Set("Accept", "application/json;odata=nometadata")
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Content-Length", string(len(json)))
tableStorageProxy.executeRequest(request, client, target)
}
func (tableStorageProxy *TableStorageProxy) executeRequest(request *http.Request, client *http.Client, target string) {
xmsdate, Authentication := tableStorageProxy.calculateDateAndAuthentication(target)
request.Header.Set("x-ms-date", xmsdate)
request.Header.Set("x-ms-version", "2013-08-15")
request.Header.Set("Authorization", Authentication)
requestDump, _ := httputil.DumpRequest(request, true)
fmt.Printf("Request: %s\n", requestDump)
response, _ := client.Do(request)
responseDump, _ := httputil.DumpResponse(response, true)
fmt.Printf("Response: %s\n", responseDump)
}
func (tableStorageProxy *TableStorageProxy) calculateDateAndAuthentication(target string) (string, string) {
xmsdate := strings.Replace(time.Now().UTC().Add(-time.Minute).Format(time.RFC1123), "UTC", "GMT", -1)
SignatureString := xmsdate + "\n/" + tableStorageProxy.goHaveStorage.GetAccount() + "/" + target
Authentication := "SharedKeyLite " + tableStorageProxy.goHaveStorage.GetAccount() + ":" + computeHmac256(SignatureString, tableStorageProxy.goHaveStorage.GetKey())
return xmsdate, Authentication
}
func computeHmac256(message string, key []byte) string {
h := hmac.New(sha256.New, key)
h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
|
package firestore
import (
"context"
"fmt"
"sort"
"strings"
"time"
fs "cloud.google.com/go/firestore"
"go.skia.org/infra/go/firestore"
"go.skia.org/infra/go/sklog"
"go.skia.org/infra/go/util"
"go.skia.org/infra/task_scheduler/go/db"
"go.skia.org/infra/task_scheduler/go/types"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
COLLECTION_JOBS = "jobs"
)
// Fix all timestamps for the given job.
func fixJobTimestamps(job *types.Job) {
job.Created = firestore.FixTimestamp(job.Created)
job.DbModified = firestore.FixTimestamp(job.DbModified)
job.Finished = firestore.FixTimestamp(job.Finished)
job.Requested = firestore.FixTimestamp(job.Requested)
}
// jobs returns a reference to the jobs collection.
func (d *firestoreDB) jobs() *fs.CollectionRef {
return d.client.Collection(COLLECTION_JOBS)
}
// See documentation for types.JobReader interface.
func (d *firestoreDB) GetJobById(id string) (*types.Job, error) {
doc, err := d.client.Get(context.TODO(), d.jobs().Doc(id), DEFAULT_ATTEMPTS, GET_SINGLE_TIMEOUT)
if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound {
return nil, nil
} else if err != nil {
return nil, err
}
var rv types.Job
if err := doc.DataTo(&rv); err != nil {
return nil, err
}
return &rv, nil
}
// See documentation for types.JobReader interface.
func (d *firestoreDB) GetJobsFromDateRange(start, end time.Time, repo string) ([]*types.Job, error) {
var jobs [][]*types.Job
init := func(numGoroutines int) {
jobs = make([][]*types.Job, numGoroutines)
for i := 0; i < numGoroutines; i++ {
estResults := estResultSize(end.Sub(start) / time.Duration(numGoroutines))
jobs[i] = make([]*types.Job, 0, estResults)
}
}
elem := func(idx int, doc *fs.DocumentSnapshot) error {
var job types.Job
if err := doc.DataTo(&job); err != nil {
return err
}
if doc.Ref.ID != job.Id {
sklog.Errorf("Job %s is stored with ID %s; GetJobById will not be able to find it!", job.Id, doc.Ref.ID)
return nil
}
if repo != "" {
if job.Repo != repo {
sklog.Errorf("Query returned job with wrong repo; wanted %q but got %q; job: %+v", repo, job.Repo, job)
return nil
}
}
jobs[idx] = append(jobs[idx], &job)
return nil
}
q := d.jobs().Query
if repo != "" {
q = q.Where(KEY_REPO, "==", repo)
}
if err := d.dateRangeHelper("GetJobsFromDateRange", q, start, end, init, elem); err != nil {
return nil, err
}
totalResults := 0
for _, jobList := range jobs {
totalResults += len(jobList)
}
rv := make([]*types.Job, 0, totalResults)
for _, jobList := range jobs {
rv = append(rv, jobList...)
}
sort.Sort(types.JobSlice(rv))
return rv, nil
}
// putJobs sets the contents of the given jobs in Firestore, as part of the
// given transaction. It is used by PutJob and PutJobs.
func (d *firestoreDB) putJobs(jobs []*types.Job, isNew []bool, prevModified []time.Time, tx *fs.Transaction) (rvErr error) {
// Find the previous versions of the jobs. Ensure that they weren't
// updated concurrently.
refs := make([]*fs.DocumentRef, 0, len(jobs))
for _, job := range jobs {
refs = append(refs, d.jobs().Doc(job.Id))
}
docs, err := tx.GetAll(refs)
if err != nil {
return err
}
d.client.CountReadQueryAndRows(d.jobs().Path, len(docs))
for idx, doc := range docs {
if !doc.Exists() {
// This is expected for new jobs.
if !isNew[idx] {
sklog.Errorf("Job is not new but wasn't found in the DB: %+v", jobs[idx])
// If the job is supposed to exist but does not, then
// we have a problem.
return db.ErrConcurrentUpdate
}
} else if isNew[idx] {
// If the job is not supposed to exist but does, then
// we have a problem.
var old types.Job
if err := doc.DataTo(&old); err != nil {
return fmt.Errorf("Job has no DbModified timestamp but already exists in the DB. Failed to decode previous job with: %s", err)
}
sklog.Errorf("Job has no DbModified timestamp but already exists in the DB! \"New\" job:\n%+v\nExisting job:\n%+v", jobs[idx], old)
return db.ErrConcurrentUpdate
}
// If the job already exists, check the DbModified timestamp
// to ensure that someone else didn't update it.
if !isNew[idx] {
var old types.Job
if err := doc.DataTo(&old); err != nil {
return err
}
if old.DbModified != prevModified[idx] {
sklog.Infof("Concurrent update: Job %s in DB has DbModified %s; cached job has DbModified %s. \"New\" job:\n%+v\nExisting job:\n%+v", old.Id, old.DbModified.Format(time.RFC3339Nano), prevModified[idx].Format(time.RFC3339Nano), jobs[idx], old)
return db.ErrConcurrentUpdate
}
}
}
// Set the new contents of the jobs.
d.client.CountWriteQueryAndRows(d.jobs().Path, len(jobs))
for _, job := range jobs {
ref := d.jobs().Doc(job.Id)
if err := tx.Set(ref, job); err != nil {
return err
}
}
return nil
}
// See documentation for types.JobDB interface.
func (d *firestoreDB) PutJob(job *types.Job) error {
return d.PutJobs([]*types.Job{job})
}
// See documentation for types.JobDB interface.
func (d *firestoreDB) PutJobs(jobs []*types.Job) (rvErr error) {
if len(jobs) == 0 {
return nil
}
if len(jobs) > MAX_TRANSACTION_DOCS {
return fmt.Errorf("Tried to insert %d jobs but Firestore maximum per transaction is %d.", len(jobs), MAX_TRANSACTION_DOCS)
}
// Record the previous ID and DbModified timestamp. We'll reset these
// if we fail to insert the jobs into the DB.
now := firestore.FixTimestamp(time.Now())
isNew := make([]bool, len(jobs))
prevId := make([]string, len(jobs))
prevModified := make([]time.Time, len(jobs))
for idx, job := range jobs {
if util.TimeIsZero(job.Created) {
return fmt.Errorf("Created not set. Job %s created time is %s. %v", job.Id, job.Created, job)
}
isNew[idx] = util.TimeIsZero(job.DbModified)
prevId[idx] = job.Id
prevModified[idx] = job.DbModified
}
defer func() {
if rvErr != nil {
for idx, job := range jobs {
job.Id = prevId[idx]
job.DbModified = prevModified[idx]
}
}
}()
// logmsg builds a log message to debug skia:9444.
var logmsg strings.Builder
fmt.Fprintf(&logmsg, "Added/updated Jobs with DbModified %s:", now.Format(time.RFC3339Nano))
// Assign new IDs (where needed) and DbModified timestamps.
for _, job := range jobs {
logmsg.WriteRune(' ')
if job.Id == "" {
job.Id = firestore.AlphaNumID()
logmsg.WriteRune('+')
}
logmsg.WriteString(job.Id)
if !now.After(job.DbModified) {
// We can't use the same DbModified timestamp for two updates,
// or we risk losing updates. Increment the timestamp if
// necessary.
job.DbModified = job.DbModified.Add(firestore.TS_RESOLUTION)
fmt.Fprintf(&logmsg, "@%s", job.DbModified.Format(time.RFC3339Nano))
} else {
job.DbModified = now
}
fixJobTimestamps(job)
}
// Insert the jobs into the DB.
if err := d.client.RunTransaction(context.TODO(), "PutJobs", fmt.Sprintf("%d jobs", len(jobs)), DEFAULT_ATTEMPTS, PUT_MULTI_TIMEOUT, func(ctx context.Context, tx *fs.Transaction) error {
return d.putJobs(jobs, isNew, prevModified, tx)
}); err != nil {
return err
}
sklog.Debug(logmsg.String())
return nil
}
// See documentation for types.JobDB interface.
func (d *firestoreDB) PutJobsInChunks(jobs []*types.Job) error {
return util.ChunkIter(len(jobs), MAX_TRANSACTION_DOCS, func(i, j int) error {
return d.PutJobs(jobs[i:j])
})
}
[task_scheduler] Drive-by err check cleanup
Change-Id: I65f20af21f88feb15d2b455dd91a5cddc0888d49
Reviewed-on: https://skia-review.googlesource.com/c/buildbot/+/258557
Commit-Queue: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>
Auto-Submit: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
package firestore
import (
"context"
"fmt"
"sort"
"strings"
"time"
fs "cloud.google.com/go/firestore"
"go.skia.org/infra/go/firestore"
"go.skia.org/infra/go/sklog"
"go.skia.org/infra/go/util"
"go.skia.org/infra/task_scheduler/go/db"
"go.skia.org/infra/task_scheduler/go/types"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
COLLECTION_JOBS = "jobs"
)
// Fix all timestamps for the given job.
func fixJobTimestamps(job *types.Job) {
job.Created = firestore.FixTimestamp(job.Created)
job.DbModified = firestore.FixTimestamp(job.DbModified)
job.Finished = firestore.FixTimestamp(job.Finished)
job.Requested = firestore.FixTimestamp(job.Requested)
}
// jobs returns a reference to the jobs collection.
func (d *firestoreDB) jobs() *fs.CollectionRef {
return d.client.Collection(COLLECTION_JOBS)
}
// See documentation for types.JobReader interface.
func (d *firestoreDB) GetJobById(id string) (*types.Job, error) {
doc, err := d.client.Get(context.TODO(), d.jobs().Doc(id), DEFAULT_ATTEMPTS, GET_SINGLE_TIMEOUT)
if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound {
return nil, nil
} else if err != nil {
return nil, err
}
var rv types.Job
if err := doc.DataTo(&rv); err != nil {
return nil, err
}
return &rv, nil
}
// See documentation for types.JobReader interface.
func (d *firestoreDB) GetJobsFromDateRange(start, end time.Time, repo string) ([]*types.Job, error) {
var jobs [][]*types.Job
init := func(numGoroutines int) {
jobs = make([][]*types.Job, numGoroutines)
for i := 0; i < numGoroutines; i++ {
estResults := estResultSize(end.Sub(start) / time.Duration(numGoroutines))
jobs[i] = make([]*types.Job, 0, estResults)
}
}
elem := func(idx int, doc *fs.DocumentSnapshot) error {
var job types.Job
if err := doc.DataTo(&job); err != nil {
return err
}
if doc.Ref.ID != job.Id {
sklog.Errorf("Job %s is stored with ID %s; GetJobById will not be able to find it!", job.Id, doc.Ref.ID)
return nil
}
if repo != "" {
if job.Repo != repo {
sklog.Errorf("Query returned job with wrong repo; wanted %q but got %q; job: %+v", repo, job.Repo, job)
return nil
}
}
jobs[idx] = append(jobs[idx], &job)
return nil
}
q := d.jobs().Query
if repo != "" {
q = q.Where(KEY_REPO, "==", repo)
}
if err := d.dateRangeHelper("GetJobsFromDateRange", q, start, end, init, elem); err != nil {
return nil, err
}
totalResults := 0
for _, jobList := range jobs {
totalResults += len(jobList)
}
rv := make([]*types.Job, 0, totalResults)
for _, jobList := range jobs {
rv = append(rv, jobList...)
}
sort.Sort(types.JobSlice(rv))
return rv, nil
}
// putJobs sets the contents of the given jobs in Firestore, as part of the
// given transaction. It is used by PutJob and PutJobs.
func (d *firestoreDB) putJobs(jobs []*types.Job, isNew []bool, prevModified []time.Time, tx *fs.Transaction) (rvErr error) {
// Find the previous versions of the jobs. Ensure that they weren't
// updated concurrently.
refs := make([]*fs.DocumentRef, 0, len(jobs))
for _, job := range jobs {
refs = append(refs, d.jobs().Doc(job.Id))
}
docs, err := tx.GetAll(refs)
if err != nil {
return err
}
d.client.CountReadQueryAndRows(d.jobs().Path, len(docs))
for idx, doc := range docs {
if !doc.Exists() {
// This is expected for new jobs.
if !isNew[idx] {
sklog.Errorf("Job is not new but wasn't found in the DB: %+v", jobs[idx])
// If the job is supposed to exist but does not, then
// we have a problem.
return db.ErrConcurrentUpdate
}
} else if isNew[idx] {
// If the job is not supposed to exist but does, then
// we have a problem.
var old types.Job
if err := doc.DataTo(&old); err != nil {
return fmt.Errorf("Job has no DbModified timestamp but already exists in the DB. Failed to decode previous job with: %s", err)
}
sklog.Errorf("Job has no DbModified timestamp but already exists in the DB! \"New\" job:\n%+v\nExisting job:\n%+v", jobs[idx], old)
return db.ErrConcurrentUpdate
}
// If the job already exists, check the DbModified timestamp
// to ensure that someone else didn't update it.
if !isNew[idx] {
var old types.Job
if err := doc.DataTo(&old); err != nil {
return err
}
if old.DbModified != prevModified[idx] {
sklog.Infof("Concurrent update: Job %s in DB has DbModified %s; cached job has DbModified %s. \"New\" job:\n%+v\nExisting job:\n%+v", old.Id, old.DbModified.Format(time.RFC3339Nano), prevModified[idx].Format(time.RFC3339Nano), jobs[idx], old)
return db.ErrConcurrentUpdate
}
}
}
// Set the new contents of the jobs.
d.client.CountWriteQueryAndRows(d.jobs().Path, len(jobs))
for _, job := range jobs {
ref := d.jobs().Doc(job.Id)
if err := tx.Set(ref, job); err != nil {
return err
}
}
return nil
}
// See documentation for types.JobDB interface.
func (d *firestoreDB) PutJob(job *types.Job) error {
return d.PutJobs([]*types.Job{job})
}
// See documentation for types.JobDB interface.
func (d *firestoreDB) PutJobs(jobs []*types.Job) (rvErr error) {
if len(jobs) == 0 {
return nil
}
if len(jobs) > MAX_TRANSACTION_DOCS {
return fmt.Errorf("Tried to insert %d jobs but Firestore maximum per transaction is %d.", len(jobs), MAX_TRANSACTION_DOCS)
}
// Record the previous ID and DbModified timestamp. We'll reset these
// if we fail to insert the jobs into the DB.
now := firestore.FixTimestamp(time.Now())
isNew := make([]bool, len(jobs))
prevId := make([]string, len(jobs))
prevModified := make([]time.Time, len(jobs))
for idx, job := range jobs {
if util.TimeIsZero(job.Created) {
return fmt.Errorf("Created not set. Job %s created time is %s. %v", job.Id, job.Created, job)
}
isNew[idx] = util.TimeIsZero(job.DbModified)
prevId[idx] = job.Id
prevModified[idx] = job.DbModified
}
defer func() {
if rvErr != nil {
for idx, job := range jobs {
job.Id = prevId[idx]
job.DbModified = prevModified[idx]
}
}
}()
// logmsg builds a log message to debug skia:9444.
var logmsg strings.Builder
if _, err := fmt.Fprintf(&logmsg, "Added/updated Jobs with DbModified %s:", now.Format(time.RFC3339Nano)); err != nil {
sklog.Warningf("Error building log message: %s", err)
}
// Assign new IDs (where needed) and DbModified timestamps.
for _, job := range jobs {
logmsg.WriteRune(' ')
if job.Id == "" {
job.Id = firestore.AlphaNumID()
logmsg.WriteRune('+')
}
logmsg.WriteString(job.Id)
if !now.After(job.DbModified) {
// We can't use the same DbModified timestamp for two updates,
// or we risk losing updates. Increment the timestamp if
// necessary.
job.DbModified = job.DbModified.Add(firestore.TS_RESOLUTION)
if _, err := fmt.Fprintf(&logmsg, "@%s", job.DbModified.Format(time.RFC3339Nano)); err != nil {
sklog.Warningf("Error building log message: %s", err)
}
} else {
job.DbModified = now
}
fixJobTimestamps(job)
}
// Insert the jobs into the DB.
if err := d.client.RunTransaction(context.TODO(), "PutJobs", fmt.Sprintf("%d jobs", len(jobs)), DEFAULT_ATTEMPTS, PUT_MULTI_TIMEOUT, func(ctx context.Context, tx *fs.Transaction) error {
return d.putJobs(jobs, isNew, prevModified, tx)
}); err != nil {
return err
}
sklog.Debug(logmsg.String())
return nil
}
// See documentation for types.JobDB interface.
func (d *firestoreDB) PutJobsInChunks(jobs []*types.Job) error {
return util.ChunkIter(len(jobs), MAX_TRANSACTION_DOCS, func(i, j int) error {
return d.PutJobs(jobs[i:j])
})
}
|
package ecslogs
import (
"encoding/json"
"testing"
)
var (
jsonLenTests = []interface{}{
nil,
true,
false,
0,
1,
42,
-1,
-42,
0.1234,
"",
"Hello World!",
"Hello\nWorld!",
[]byte(""),
[]byte("Hello World!"),
json.Number("0"),
json.Number("1.2345"),
[]int{},
[]int{1, 2, 3},
[]string{"hello", "world"},
[]interface{}{nil, true, 42, "hey!"},
map[string]string{},
map[string]int{"answer": 42},
map[string]interface{}{
"A": nil,
"B": true,
"C": 42,
"D": "hey!",
},
struct{}{},
struct{ Answer int }{42},
struct {
A int
B int
C int
}{1, 2, 3},
struct {
Question string
Answer string
}{"How are you?", "Well"},
map[string]interface{}{
"struct": struct {
OK bool `json:",omitempty"`
}{false},
"what?": struct {
List []interface{}
String string
}{
List: []interface{}{1, 2, 3},
String: "Hello World!",
},
},
Event{
Info: EventInfo{Level: DEBUG, Time: Now()},
Data: EventData{"message": "Hello World!"},
},
}
)
func TestJsonLen(t *testing.T) {
for _, test := range jsonLenTests {
b, _ := json.Marshal(test)
n := jsonLen(test)
if n != len(b) {
t.Errorf("%#v => %d != %d (%s)", test, n, len(b), string(b))
}
}
}
func BenchmarkJsonLen(b *testing.B) {
for i := 0; i != b.N; i++ {
for _, test := range jsonLenTests {
jsonLen(test)
}
}
}
func BenchmarkJsonMarshal(b *testing.B) {
for i := 0; i != b.N; i++ {
for _, test := range jsonLenTests {
json.Marshal(test)
}
}
}
add benchmark against json encoder to ioutil.Discard
package ecslogs
import (
"encoding/json"
"io/ioutil"
"testing"
)
var (
jsonLenTests = []interface{}{
nil,
true,
false,
0,
1,
42,
-1,
-42,
0.1234,
"",
"Hello World!",
"Hello\nWorld!",
[]byte(""),
[]byte("Hello World!"),
json.Number("0"),
json.Number("1.2345"),
[]int{},
[]int{1, 2, 3},
[]string{"hello", "world"},
[]interface{}{nil, true, 42, "hey!"},
map[string]string{},
map[string]int{"answer": 42},
map[string]interface{}{
"A": nil,
"B": true,
"C": 42,
"D": "hey!",
},
struct{}{},
struct{ Answer int }{42},
struct {
A int
B int
C int
}{1, 2, 3},
struct {
Question string
Answer string
}{"How are you?", "Well"},
map[string]interface{}{
"struct": struct {
OK bool `json:",omitempty"`
}{false},
"what?": struct {
List []interface{}
String string
}{
List: []interface{}{1, 2, 3},
String: "Hello World!",
},
},
Event{
Info: EventInfo{Level: DEBUG, Time: Now()},
Data: EventData{"message": "Hello World!"},
},
}
)
func TestJsonLen(t *testing.T) {
for _, test := range jsonLenTests {
b, _ := json.Marshal(test)
n := jsonLen(test)
if n != len(b) {
t.Errorf("%#v => %d != %d (%s)", test, n, len(b), string(b))
}
}
}
func BenchmarkJsonLen(b *testing.B) {
for i := 0; i != b.N; i++ {
for _, test := range jsonLenTests {
jsonLen(test)
}
}
}
func BenchmarkJsonMarshal(b *testing.B) {
for i := 0; i != b.N; i++ {
for _, test := range jsonLenTests {
json.Marshal(test)
}
}
}
func BenchmarkJsonMarshalDevNull(b *testing.B) {
for i := 0; i != b.N; i++ {
for _, test := range jsonLenTests {
e := json.NewEncoder(ioutil.Discard)
e.Encode(test)
}
}
}
|
package qb
import (
"fmt"
"github.com/nu7hatch/gouuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"testing"
"time"
)
type pUser struct {
ID string `qb:"type:uuid; constraints:primary_key, auto_increment" db:"id"`
Email string `qb:"constraints:unique, notnull" db:"email"`
FullName string `qb:"constraints:notnull" db:"full_name"`
Bio *string `qb:"type:text; constraints:null" db:"bio"`
Oscars int `qb:"constraints:default(0)" db:"oscars"`
}
type pSession struct {
ID int64 `qb:"type:bigserial; constraints:primary_key" db:"id"`
UserID string `qb:"type:uuid; constraints:ref(p_user.id)" db:"user_id"`
AuthToken string `qb:"type:uuid; constraints:notnull, unique" db:"auth_token"`
CreatedAt time.Time `qb:"constraints:notnull" db:"created_at"`
ExpiresAt time.Time `qb:"constraints:notnull" db:"expires_at"`
}
type pFailModel struct {
ID int64 `qb:"type:notype"`
}
type PostgresTestSuite struct {
suite.Suite
metadata *MetaData
dialect *Dialect
engine *Engine
session *Session
}
func (suite *PostgresTestSuite) SetupTest() {
engine, err := NewEngine("postgres", "user=postgres dbname=qb_test sslmode=disable")
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), engine)
suite.engine = engine
suite.dialect = NewDialect(engine.Driver())
suite.metadata = NewMetaData(engine)
suite.session = NewSession(suite.metadata)
}
func (suite *PostgresTestSuite) TestPostgres() {
var err error
// create tables
suite.metadata.Add(pUser{})
suite.metadata.Add(pSession{})
err = suite.metadata.CreateAll()
assert.Nil(suite.T(), err)
fmt.Println()
// insert user using dialect
insUserJN := suite.dialect.
Insert("p_user").Values(
map[string]interface{}{
"id": "b6f8bfe3-a830-441a-a097-1777e6bfae95",
"email": "jack@nicholson.com",
"full_name": "Jack Nicholson",
"bio": "Jack Nicholson, an American actor, producer, screen-writer and director, is a three-time Academy Award winner and twelve-time nominee.",
}).Query()
fmt.Println(insUserJN.SQL())
fmt.Println(insUserJN.Bindings())
fmt.Println()
_, err = suite.metadata.Engine().Exec(insUserJN)
assert.Nil(suite.T(), err)
// insert user using table
ddlID, _ := uuid.NewV4()
insUserDDL := suite.metadata.Table("p_user").Insert(
map[string]interface{}{
"id": ddlID.String(),
"email": "daniel@day-lewis.com",
"full_name": "Daniel Day-Lewis",
}).Query()
_, err = suite.metadata.Engine().Exec(insUserDDL)
assert.Nil(suite.T(), err)
// insert user using session
rdnID, _ := uuid.NewV4()
rdn := pUser{
ID: rdnID.String(),
Email: "robert@de-niro.com",
FullName: "Robert De Niro",
Oscars: 3,
}
apId, _ := uuid.NewV4()
ap := pUser{
ID: apId.String(),
Email: "al@pacino.com",
FullName: "Al Pacino",
Oscars: 1,
}
suite.session.AddAll(rdn, ap)
err = suite.session.Commit()
assert.Nil(suite.T(), err)
// find user using session
findRdn := pUser{
ID: rdn.ID,
}
err = suite.session.Find(&findRdn).First(&findRdn)
assert.Nil(suite.T(), err)
assert.Equal(suite.T(), findRdn.Email, "robert@de-niro.com")
assert.Equal(suite.T(), findRdn.FullName, "Robert De Niro")
assert.Equal(suite.T(), findRdn.Oscars, 3)
fmt.Println(findRdn)
// find users using session
findUsers := []pUser{}
err = suite.session.Find(&pUser{}).All(&findUsers)
fmt.Println(findUsers)
// delete user using session api
suite.session.Delete(rdn)
err = suite.session.Commit()
assert.Nil(suite.T(), err)
// insert session using dialect
insSession := suite.dialect.Insert("p_session").Values(
map[string]interface{}{
"user_id": "b6f8bfe3-a830-441a-a097-1777e6bfae95",
"auth_token": "e4968197-6137-47a4-ba79-690d8c552248",
"created_at": time.Now(),
"expires_at": time.Now().Add(24 * time.Hour),
}).Query()
_, err = suite.metadata.Engine().Exec(insSession)
assert.Nil(suite.T(), err)
// select user
selUser := suite.dialect.
Select("id", "email", "full_name", "bio").
From("p_user").
Where("p_user.id = ?", "b6f8bfe3-a830-441a-a097-1777e6bfae95").
Query()
var user pUser
suite.metadata.Engine().QueryRow(selUser).Scan(&user.ID, &user.Email, &user.FullName, &user.Bio)
assert.Equal(suite.T(), user.ID, "b6f8bfe3-a830-441a-a097-1777e6bfae95")
assert.Equal(suite.T(), user.Email, "jack@nicholson.com")
assert.Equal(suite.T(), user.FullName, "Jack Nicholson")
// select sessions
selSessions := suite.dialect.
Select("s.id", "s.auth_token", "s.created_at", "s.expires_at").
From("p_user u").
InnerJoin("p_session s", "u.id = s.user_id").
Where("u.id = ?", "b6f8bfe3-a830-441a-a097-1777e6bfae95").
Query()
rows, err := suite.metadata.Engine().Query(selSessions)
assert.Nil(suite.T(), err)
if err != nil {
defer rows.Close()
}
sessions := []pSession{}
for rows.Next() {
var session pSession
rows.Scan(&session.ID, &session.AuthToken, &session.CreatedAt, &session.ExpiresAt)
assert.True(suite.T(), session.ID >= int64(1))
assert.NotNil(suite.T(), session.CreatedAt)
assert.NotNil(suite.T(), session.ExpiresAt)
sessions = append(sessions, session)
}
assert.Equal(suite.T(), len(sessions), 1)
// update session
query := suite.dialect.
Update("p_session").
Set(
map[string]interface{}{
"auth_token": "99e591f8-1025-41ef-a833-6904a0f89a38",
},
).
Where("id = ?", 1).Query()
_, err = suite.metadata.Engine().Exec(query)
assert.Nil(suite.T(), err)
// delete session
delSession := suite.dialect.
Delete("p_session").
Where("auth_token = ?", "99e591f8-1025-41ef-a833-6904a0f89a38").
Query()
_, err = suite.metadata.Engine().Exec(delSession)
assert.Nil(suite.T(), err)
// insert failure
insFail := suite.dialect.
Insert("p_user").
Values(
map[string]interface{}{
"invalid_column": "invalid_value",
}).
Query()
_, err = suite.metadata.Engine().Exec(insFail)
assert.NotNil(suite.T(), err)
// insert type failure
insTypeFail := suite.dialect.
Insert("p_user").
Values(map[string]interface{}{
"email": 5,
}).Query()
_, err = suite.metadata.Engine().Exec(insTypeFail)
assert.NotNil(suite.T(), err)
// drop tables
err = suite.metadata.DropAll()
assert.Nil(suite.T(), err)
// metadata create all fail
metadata := NewMetaData(suite.engine)
metadata.Add(pFailModel{})
assert.NotNil(suite.T(), metadata.CreateAll())
assert.NotNil(suite.T(), metadata.DropAll())
}
func TestPostgresTestSuite(t *testing.T) {
suite.Run(t, new(PostgresTestSuite))
}
remove db tag from postgres test structs
package qb
import (
"fmt"
"github.com/nu7hatch/gouuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"testing"
"time"
)
type pUser struct {
ID string `qb:"type:uuid; constraints:primary_key, auto_increment"`
Email string `qb:"constraints:unique, notnull"`
FullName string `qb:"constraints:notnull"`
Bio *string `qb:"type:text; constraints:null"`
Oscars int `qb:"constraints:default(0)"`
}
type pSession struct {
ID int64 `qb:"type:bigserial; constraints:primary_key"`
UserID string `qb:"type:uuid; constraints:ref(p_user.id)"`
AuthToken string `qb:"type:uuid; constraints:notnull, unique"`
CreatedAt time.Time `qb:"constraints:notnull"`
ExpiresAt time.Time `qb:"constraints:notnull"`
}
type pFailModel struct {
ID int64 `qb:"type:notype"`
}
type PostgresExpressionTestSuite struct {
suite.Suite
metadata *MetaData
dialect *Dialect
engine *Engine
session *Session
}
func (suite *PostgresExpressionTestSuite) SetupTest() {
engine, err := NewEngine("postgres", "user=postgres dbname=qb_test sslmode=disable")
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), engine)
suite.engine = engine
suite.dialect = NewDialect(engine.Driver())
suite.metadata = NewMetaData(engine)
suite.session = NewSession(suite.metadata)
}
func (suite *PostgresExpressionTestSuite) TestPostgresExpression() {
var err error
// create tables
suite.metadata.Add(pUser{})
suite.metadata.Add(pSession{})
err = suite.metadata.CreateAll()
assert.Nil(suite.T(), err)
fmt.Println()
// insert user using dialect
insUserJN := suite.dialect.
Insert("p_user").Values(
map[string]interface{}{
"id": "b6f8bfe3-a830-441a-a097-1777e6bfae95",
"email": "jack@nicholson.com",
"full_name": "Jack Nicholson",
"bio": "Jack Nicholson, an American actor, producer, screen-writer and director, is a three-time Academy Award winner and twelve-time nominee.",
}).Query()
fmt.Println(insUserJN.SQL())
fmt.Println(insUserJN.Bindings())
fmt.Println()
_, err = suite.metadata.Engine().Exec(insUserJN)
assert.Nil(suite.T(), err)
// insert user using table
ddlID, _ := uuid.NewV4()
insUserDDL := suite.metadata.Table("p_user").Insert(
map[string]interface{}{
"id": ddlID.String(),
"email": "daniel@day-lewis.com",
"full_name": "Daniel Day-Lewis",
}).Query()
_, err = suite.metadata.Engine().Exec(insUserDDL)
assert.Nil(suite.T(), err)
// insert session using dialect
insSession := suite.dialect.Insert("p_session").Values(
map[string]interface{}{
"user_id": "b6f8bfe3-a830-441a-a097-1777e6bfae95",
"auth_token": "e4968197-6137-47a4-ba79-690d8c552248",
"created_at": time.Now(),
"expires_at": time.Now().Add(24 * time.Hour),
}).Query()
_, err = suite.metadata.Engine().Exec(insSession)
assert.Nil(suite.T(), err)
// select user using dialect
selUser := suite.dialect.
Select("id", "email", "full_name", "bio").
From("p_user").
Where("p_user.id = ?", "b6f8bfe3-a830-441a-a097-1777e6bfae95").
Query()
var user pUser
suite.metadata.Engine().QueryRow(selUser).Scan(&user.ID, &user.Email, &user.FullName, &user.Bio)
assert.Equal(suite.T(), user.ID, "b6f8bfe3-a830-441a-a097-1777e6bfae95")
assert.Equal(suite.T(), user.Email, "jack@nicholson.com")
assert.Equal(suite.T(), user.FullName, "Jack Nicholson")
// select sessions
selSessions := suite.dialect.
Select("s.id", "s.auth_token", "s.created_at", "s.expires_at").
From("p_user u").
InnerJoin("p_session s", "u.id = s.user_id").
Where("u.id = ?", "b6f8bfe3-a830-441a-a097-1777e6bfae95").
Query()
rows, err := suite.metadata.Engine().Query(selSessions)
assert.Nil(suite.T(), err)
if err != nil {
defer rows.Close()
}
sessions := []pSession{}
for rows.Next() {
var session pSession
rows.Scan(&session.ID, &session.AuthToken, &session.CreatedAt, &session.ExpiresAt)
assert.True(suite.T(), session.ID >= int64(1))
assert.NotNil(suite.T(), session.CreatedAt)
assert.NotNil(suite.T(), session.ExpiresAt)
sessions = append(sessions, session)
}
assert.Equal(suite.T(), len(sessions), 1)
// update session
query := suite.dialect.
Update("p_session").
Set(
map[string]interface{}{
"auth_token": "99e591f8-1025-41ef-a833-6904a0f89a38",
},
).
Where("id = ?", 1).Query()
_, err = suite.metadata.Engine().Exec(query)
assert.Nil(suite.T(), err)
// delete session
delSession := suite.dialect.
Delete("p_session").
Where("auth_token = ?", "99e591f8-1025-41ef-a833-6904a0f89a38").
Query()
_, err = suite.metadata.Engine().Exec(delSession)
assert.Nil(suite.T(), err)
// insert failure
insFail := suite.dialect.
Insert("p_user").
Values(
map[string]interface{}{
"invalid_column": "invalid_value",
}).
Query()
_, err = suite.metadata.Engine().Exec(insFail)
assert.NotNil(suite.T(), err)
// insert type failure
insTypeFail := suite.dialect.
Insert("p_user").
Values(map[string]interface{}{
"email": 5,
}).Query()
_, err = suite.metadata.Engine().Exec(insTypeFail)
assert.NotNil(suite.T(), err)
// drop tables
err = suite.metadata.DropAll()
assert.Nil(suite.T(), err)
// metadata create all fail
metadata := NewMetaData(suite.engine)
metadata.Add(pFailModel{})
assert.NotNil(suite.T(), metadata.CreateAll())
assert.NotNil(suite.T(), metadata.DropAll())
}
type PostgresSessionTestSuite struct {
suite.Suite
metadata *MetaData
engine *Engine
session *Session
}
func (suite *PostgresSessionTestSuite) SetupTest() {
engine, err := NewEngine("postgres", "user=postgres dbname=qb_test sslmode=disable")
assert.Nil(suite.T(), err)
assert.NotNil(suite.T(), engine)
suite.engine = engine
suite.metadata = NewMetaData(engine)
suite.session = NewSession(suite.metadata)
}
func (suite *PostgresSessionTestSuite) TestPostgresSession() {
var err error
// create tables
suite.metadata.Add(pUser{})
suite.metadata.Add(pSession{})
err = suite.metadata.CreateAll()
assert.Nil(suite.T(), err)
// insert user using session
rdnID, _ := uuid.NewV4()
rdn := pUser{
ID: rdnID.String(),
Email: "robert@de-niro.com",
FullName: "Robert De Niro",
Oscars: 3,
}
apId, _ := uuid.NewV4()
ap := pUser{
ID: apId.String(),
Email: "al@pacino.com",
FullName: "Al Pacino",
Oscars: 1,
}
suite.session.AddAll(rdn, ap)
err = suite.session.Commit()
assert.Nil(suite.T(), err)
// find first user using session
var usr pUser
err = suite.session.Find(pUser{ID: rdnID.String()}).First(&usr)
assert.Nil(suite.T(), err)
assert.Equal(suite.T(), usr.Email, "robert@de-niro.com")
assert.Equal(suite.T(), usr.FullName, "Robert De Niro")
assert.Equal(suite.T(), usr.Oscars, 3)
fmt.Println(usr)
//// find filter by all using session
oneOscarUsers := []pUser{}
suite.session.Find(&pUser{Oscars:1}).All(&oneOscarUsers)
fmt.Println("One oscar users;")
fmt.Println(oneOscarUsers)
// delete user using session api
suite.session.Delete(rdn)
err = suite.session.Commit()
assert.Nil(suite.T(), err)
}
func TestPostgresTestSuite(t *testing.T) {
suite.Run(t, new(PostgresExpressionTestSuite))
suite.Run(t, new(PostgresSessionTestSuite))
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.13 && (linux || darwin)
// +build go1.13
// +build linux darwin
package main
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"html"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/google/go-github/github"
"go.opencensus.io/stats"
"golang.org/x/build/cmd/coordinator/internal"
"golang.org/x/build/dashboard"
"golang.org/x/build/internal/coordinator/pool"
"golang.org/x/build/internal/foreach"
"golang.org/x/build/internal/secret"
"golang.org/x/build/kubernetes/api"
"golang.org/x/oauth2"
)
// status
type statusLevel int
const (
// levelInfo is an informational text that's not an error,
// such as "coordinator just started recently, waiting to
// start health check"
levelInfo statusLevel = iota
// levelWarn is a non-critical error, such as "missing 1 of 50
// of ARM machines"
levelWarn
// levelError is something that should be fixed sooner, such
// as "all Macs are gone".
levelError
)
func (l statusLevel) String() string {
switch l {
case levelInfo:
return "Info"
case levelWarn:
return "Warn"
case levelError:
return "Error"
}
return ""
}
type levelText struct {
Level statusLevel
Text string
}
func (lt levelText) AsHTML() template.HTML {
switch lt.Level {
case levelInfo:
return template.HTML(html.EscapeString(lt.Text))
case levelWarn:
return template.HTML(fmt.Sprintf("<span style='color: orange'>%s</span>", html.EscapeString(lt.Text)))
case levelError:
return template.HTML(fmt.Sprintf("<span style='color: red'><b>%s</b></span>", html.EscapeString(lt.Text)))
}
return ""
}
type checkWriter struct {
Out []levelText
}
func (w *checkWriter) error(s string) { w.Out = append(w.Out, levelText{levelError, s}) }
func (w *checkWriter) errorf(a string, args ...interface{}) { w.error(fmt.Sprintf(a, args...)) }
func (w *checkWriter) info(s string) { w.Out = append(w.Out, levelText{levelInfo, s}) }
func (w *checkWriter) infof(a string, args ...interface{}) { w.info(fmt.Sprintf(a, args...)) }
func (w *checkWriter) warn(s string) { w.Out = append(w.Out, levelText{levelWarn, s}) }
func (w *checkWriter) warnf(a string, args ...interface{}) { w.warn(fmt.Sprintf(a, args...)) }
func (w *checkWriter) hasErrors() bool {
for _, v := range w.Out {
if v.Level == levelError {
return true
}
}
return false
}
type healthChecker struct {
ID string
Title string
DocURL string
// Check writes the health check status to a checkWriter.
//
// It's called when rendering the HTML page, so expensive
// operations (network calls, etc.) should be done in a
// separate goroutine and Check should report their results.
Check func(*checkWriter)
}
func (hc *healthChecker) DoCheck() *checkWriter {
cw := new(checkWriter)
hc.Check(cw)
return cw
}
var (
healthCheckers []*healthChecker
healthCheckerByID = map[string]*healthChecker{}
)
func addHealthChecker(hc *healthChecker) {
if _, dup := healthCheckerByID[hc.ID]; dup {
panic("duplicate health checker ID " + hc.ID)
}
healthCheckers = append(healthCheckers, hc)
healthCheckerByID[hc.ID] = hc
http.Handle("/status/"+hc.ID, healthCheckerHandler(hc))
}
// basePinErr is the status of the start-up time basepin disk creation
// in gce.go. It's of type string; no value means no result yet,
// empty string means success, and non-empty means an error.
var basePinErr atomic.Value
func addHealthCheckers(ctx context.Context, sc *secret.Client) {
addHealthChecker(newMacHealthChecker())
addHealthChecker(newMacOSARM64Checker())
addHealthChecker(newScalewayHealthChecker())
addHealthChecker(newPacketHealthChecker())
addHealthChecker(newOSUPPC64Checker())
addHealthChecker(newOSUPPC64leChecker())
addHealthChecker(newOSUPPC64lePower9Checker())
addHealthChecker(newBasepinChecker())
addHealthChecker(newGitMirrorChecker())
addHealthChecker(newTipGolangOrgChecker(ctx))
addHealthChecker(newGitHubAPIChecker(ctx, sc))
}
func newBasepinChecker() *healthChecker {
return &healthChecker{
ID: "basepin",
Title: "VM snapshots",
DocURL: "https://golang.org/issue/21305",
Check: func(w *checkWriter) {
v := basePinErr.Load()
if v == nil {
w.warnf("still running")
return
}
if v == "" {
return
}
w.error(v.(string))
},
}
}
// gitMirrorStatus is the latest known status of the gitmirror service.
var gitMirrorStatus = struct {
sync.Mutex
Errors []string
Warnings []string
}{Warnings: []string{"still checking"}}
func monitorGitMirror() {
for {
errs, warns := gitMirrorErrors()
gitMirrorStatus.Lock()
gitMirrorStatus.Errors, gitMirrorStatus.Warnings = errs, warns
gitMirrorStatus.Unlock()
time.Sleep(30 * time.Second)
}
}
// gitMirrorErrors queries the status pages of all
// running gitmirror instances and reports errors.
//
// It makes use of pool.KubeGoClient() to do the query.
func gitMirrorErrors() (errs, warns []string) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
pods, err := pool.KubeGoClient().GetPods(ctx)
if err != nil {
log.Println("gitMirrorErrors: goKubeClient.GetPods:", err)
return []string{"failed to get pods; can't query gitmirror status"}, nil
}
var runningGitMirror []api.Pod
for _, p := range pods {
if p.Labels["app"] != "gitmirror" || p.Status.Phase != "Running" {
continue
}
runningGitMirror = append(runningGitMirror, p)
}
if len(runningGitMirror) == 0 {
return []string{"no running gitmirror instances"}, nil
}
for _, pod := range runningGitMirror {
// The gitmirror -http=:8585 status page URL is hardcoded here.
// If the ReplicationController configuration changes (rare), this
// health check will begin to fail until it's updated accordingly.
instErrs, instWarns := gitMirrorInstanceErrors(ctx, fmt.Sprintf("http://%s:8585/", pod.Status.PodIP))
for _, err := range instErrs {
errs = append(errs, fmt.Sprintf("instance %s: %s", pod.Name, err))
}
for _, warn := range instWarns {
warns = append(warns, fmt.Sprintf("instance %s: %s", pod.Name, warn))
}
}
return errs, warns
}
func gitMirrorInstanceErrors(ctx context.Context, url string) (errs, warns []string) {
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
res, err := http.DefaultClient.Do(req)
if err != nil {
return []string{err.Error()}, nil
}
defer res.Body.Close()
if res.StatusCode != 200 {
return []string{res.Status}, nil
}
// TODO: add a JSON mode to gitmirror so we don't need to parse HTML.
// This works for now. We control its output.
bs := bufio.NewScanner(res.Body)
for bs.Scan() {
// Lines look like:
// <html><body><pre><a href='/debug/watcher/arch'>arch</a> - ok
// or:
// <a href='/debug/watcher/arch'>arch</a> - ok
// (See https://farmer.golang.org/debug/watcher/)
line := bs.Text()
if strings.HasSuffix(line, " - ok") {
continue
}
m := gitMirrorLineRx.FindStringSubmatch(line)
if len(m) != 3 {
if strings.Contains(line, "</html>") {
break
}
return []string{fmt.Sprintf("error parsing line %q", line)}, nil
}
if strings.HasPrefix(m[2], "ok; ") {
// If the status begins with "ok", it can't be that bad.
warns = append(warns, fmt.Sprintf("repo %s: %s", m[1], m[2]))
continue
}
errs = append(errs, fmt.Sprintf("repo %s: %s", m[1], m[2]))
}
if err := bs.Err(); err != nil {
errs = append(errs, err.Error())
}
return errs, warns
}
// $1 is repo; $2 is error message
var gitMirrorLineRx = regexp.MustCompile(`/debug/watcher/([\w-]+).?>.+</a> - (.*)`)
func newGitMirrorChecker() *healthChecker {
return &healthChecker{
ID: "gitmirror",
Title: "Git mirroring",
DocURL: "https://github.com/golang/build/tree/master/cmd/gitmirror",
Check: func(w *checkWriter) {
gitMirrorStatus.Lock()
errs, warns := gitMirrorStatus.Errors, gitMirrorStatus.Warnings
gitMirrorStatus.Unlock()
for _, v := range errs {
w.error(v)
}
for _, v := range warns {
w.warn(v)
}
},
}
}
func newTipGolangOrgChecker(ctx context.Context) *healthChecker {
// tipError is the status of the tip.golang.org website.
// It's of type string; no value means no result yet,
// empty string means success, and non-empty means an error.
var tipError atomic.Value
go func() {
for {
tipError.Store(fetchTipGolangOrgError(ctx))
time.Sleep(30 * time.Second)
}
}()
return &healthChecker{
ID: "tip",
Title: "tip.golang.org website",
DocURL: "https://github.com/golang/build/tree/master/cmd/tip",
Check: func(w *checkWriter) {
e, ok := tipError.Load().(string)
if !ok {
w.warn("still checking")
} else if e != "" {
w.error(e)
}
},
}
}
// fetchTipGolangOrgError fetches the error= value from https://tip.golang.org/_tipstatus.
func fetchTipGolangOrgError(ctx context.Context) string {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
req, _ := http.NewRequest(http.MethodGet, "https://tip.golang.org/_tipstatus", nil)
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err.Error()
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return resp.Status
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err.Error()
}
var e string
err = foreach.Line(b, func(s []byte) error {
if !bytes.HasPrefix(s, []byte("error=")) {
return nil
}
e = string(s[len("error="):])
return errFound
})
if err != errFound {
return "missing error= line"
} else if e != "<nil>" {
return "_tipstatus page reports error: " + e
}
return ""
}
var errFound = errors.New("error= line was found")
func newMacHealthChecker() *healthChecker {
var hosts []string
const numMacHosts = 10 // physical Mac minis, not reverse buildlet connections
for i := 1; i <= numMacHosts; i++ {
for _, suf := range []string{"a", "b"} {
name := fmt.Sprintf("macstadium_host%02d%s", i, suf)
hosts = append(hosts, name)
}
}
checkHosts := reverseHostChecker(hosts)
// And check that the makemac daemon is listening.
var makeMac struct {
sync.Mutex
lastCheck time.Time // currently unused
lastErrors []string
lastWarns []string
}
setMakeMacStatus := func(errs, warns []string) {
makeMac.Lock()
defer makeMac.Unlock()
makeMac.lastCheck = time.Now()
makeMac.lastErrors = errs
makeMac.lastWarns = warns
}
go func() {
for {
errs, warns := fetchMakeMacStatus()
setMakeMacStatus(errs, warns)
time.Sleep(15 * time.Second)
}
}()
return &healthChecker{
ID: "macs",
Title: "MacStadium Mac VMs",
DocURL: "https://github.com/golang/build/tree/master/env/darwin/macstadium",
Check: func(w *checkWriter) {
// Check hosts.
checkHosts(w)
// Check makemac daemon.
makeMac.Lock()
defer makeMac.Unlock()
for _, v := range makeMac.lastWarns {
w.warnf("makemac daemon: %v", v)
}
for _, v := range makeMac.lastErrors {
w.errorf("makemac daemon: %v", v)
}
},
}
}
func fetchMakeMacStatus() (errs, warns []string) {
c := &http.Client{Timeout: 15 * time.Second}
res, err := c.Get("http://macstadiumd.golang.org:8713")
if err != nil {
return []string{fmt.Sprintf("failed to fetch status: %v", err)}, nil
}
defer res.Body.Close()
if res.StatusCode != 200 {
return []string{fmt.Sprintf("HTTP status %v", res.Status)}, nil
}
if res.Header.Get("Content-Type") != "application/json" {
return []string{fmt.Sprintf("unexpected content-type %q; want JSON", res.Header.Get("Content-Type"))}, nil
}
var resj struct {
Errors []string
Warnings []string
}
if err := json.NewDecoder(res.Body).Decode(&resj); err != nil {
return []string{fmt.Sprintf("reading status response body: %v", err)}, nil
}
return resj.Errors, resj.Warnings
}
func newMacOSARM64Checker() *healthChecker {
return &healthChecker{
ID: "macos-arm64",
Title: "macOS ARM64 (M1 Mac minis)",
DocURL: "https://golang.org/issue/39782",
Check: hostTypeChecker("host-darwin-arm64-11_0-toothrot"),
}
}
func hostTypeChecker(hostType string) func(cw *checkWriter) {
want := expectedHosts(hostType)
return func(cw *checkWriter) {
n := pool.ReversePool().SingleHostTypeCount(hostType)
if n < want {
cw.errorf("%d connected; want %d", n, want)
}
}
}
func expectedHosts(hostType string) int {
hc, ok := dashboard.Hosts[hostType]
if !ok {
panic(fmt.Sprintf("unknown host type %q", hostType))
}
return hc.ExpectNum
}
func newScalewayHealthChecker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-arm-scaleway"); i++ {
name := fmt.Sprintf("scaleway-prod-%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "scaleway",
Title: "Scaleway linux/arm machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-arm/scaleway",
Check: reverseHostChecker(hosts),
}
}
func newPacketHealthChecker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-arm64-packet"); i++ {
name := fmt.Sprintf("packet%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "packet",
Title: "Packet linux/arm64 machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-arm64/packet",
Check: reverseHostChecker(hosts),
}
}
func newOSUPPC64Checker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-ppc64-osu"); i++ {
name := fmt.Sprintf("host-linux-ppc64-osu:ppc64_%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "osuppc64",
Title: "OSU linux/ppc64 machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-ppc64/osuosl",
Check: reverseHostChecker(hosts),
}
}
func newOSUPPC64leChecker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-ppc64le-osu"); i++ {
name := fmt.Sprintf("host-linux-ppc64le-osu:power_%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "osuppc64le",
Title: "OSU linux/ppc64le POWER8 machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-ppc64le/osuosl",
Check: reverseHostChecker(hosts),
}
}
func newOSUPPC64lePower9Checker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-ppc64le-power9-osu"); i++ {
name := fmt.Sprintf("host-linux-ppc64le-power9-osu:power_%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "osuppc64lepower9",
Title: "OSU linux/ppc64le POWER9 machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-ppc64le/osuosl",
Check: reverseHostChecker(hosts),
}
}
func reverseHostChecker(hosts []string) func(cw *checkWriter) {
const recentThreshold = 2 * time.Minute // let VMs be away 2 minutes; assume ~1 minute bootup + slop
checkStart := time.Now().Add(recentThreshold)
hostSet := map[string]bool{}
for _, v := range hosts {
hostSet[v] = true
}
// TODO(amedee): rethink how this is implemented. It has been
// modified due to golang.org/issues/36841
// instead of a single lock being held while all of the
// operations are performed, there is now a lock held
// durring each BuildletLastSeen call and again when
// the buildlet host names are retrieved.
return func(cw *checkWriter) {
p := pool.ReversePool()
now := time.Now()
wantGoodSince := now.Add(-recentThreshold)
numMissing := 0
numGood := 0
// Check last good times
for _, host := range hosts {
lastGood, ok := p.BuildletLastSeen(host)
if ok && lastGood.After(wantGoodSince) {
numGood++
continue
}
if now.Before(checkStart) {
cw.infof("%s not yet connected", host)
continue
}
if ok {
cw.warnf("%s missing, not seen for %v", host, time.Now().Sub(lastGood).Round(time.Second))
} else {
cw.warnf("%s missing, never seen (at least %v)", host, uptime())
}
numMissing++
}
if numMissing > 0 {
sum := numMissing + numGood
percentMissing := float64(numMissing) / float64(sum)
msg := fmt.Sprintf("%d machines missing, %.0f%% of capacity", numMissing, percentMissing*100)
if percentMissing >= 0.15 {
cw.error(msg)
} else {
cw.warn(msg)
}
}
// And check that we don't have more than 1
// connected of any type.
count := map[string]int{}
for _, hostname := range p.BuildletHostnames() {
if hostSet[hostname] {
count[hostname]++
}
}
for name, n := range count {
if n > 1 {
cw.errorf("%q is connected from %v machines", name, n)
}
}
}
}
// newGitHubAPIChecker creates a GitHub API health checker
// that queries the remaining rate limit at regular invervals
// and reports when the hourly quota has been exceeded.
//
// It also records metrics to track remaining rate limit over time.
//
func newGitHubAPIChecker(ctx context.Context, sc *secret.Client) *healthChecker {
// githubRate is the status of the GitHub API v3 client.
// It's of type *github.Rate; no value means no result yet,
// nil value means no recent result.
var githubRate atomic.Value
hc := &healthChecker{
ID: "githubapi",
Title: "GitHub API Rate Limit",
DocURL: "https://golang.org/issue/44406",
Check: func(w *checkWriter) {
rate, ok := githubRate.Load().(*github.Rate)
if !ok {
w.warn("still checking")
} else if rate == nil {
w.warn("no recent result")
} else if rate.Remaining == 0 {
resetIn := "a minute or so"
if t := time.Until(rate.Reset.Time); t > time.Minute {
resetIn = t.Round(time.Second).String()
}
w.warnf("hourly GitHub API rate limit exceeded; reset in %s", resetIn)
}
},
}
// Start measuring and reporting the remaining GitHub API v3 rate limit.
if sc == nil {
hc.Check = func(w *checkWriter) {
w.info("check disabled; credentials were not provided")
}
return hc
}
token, err := sc.Retrieve(ctx, secret.NameMaintnerGitHubToken)
if err != nil {
log.Printf("newGitHubAPIChecker: sc.Retrieve(_, %q) failed, err = %v\n", secret.NameMaintnerGitHubToken, err)
hc.Check = func(w *checkWriter) {
// The check is displayed publicly, so don't include details from err.
w.error("failed to retrieve API token")
}
return hc
}
gh := github.NewClient(oauth2.NewClient(ctx, oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})))
go func() {
t := time.NewTicker(time.Minute)
defer t.Stop()
for {
// Fetch the current rate limit from the GitHub API.
// This endpoint is special in that it doesn't consume rate limit quota itself.
var rate *github.Rate
rateLimitsCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
rl, _, err := gh.RateLimits(rateLimitsCtx)
cancel()
if rle := (*github.RateLimitError)(nil); errors.As(err, &rle) {
rate = &rle.Rate
} else if err != nil {
log.Println("GitHubAPIChecker: github.RateLimits:", err)
} else {
rate = rl.GetCore()
}
// Store the result of fetching, and record the current rate limit, if any.
githubRate.Store(rate)
if rate != nil {
stats.Record(ctx, mGitHubAPIRemaining.M(int64(rate.Remaining)))
}
select {
case <-t.C:
case <-ctx.Done():
return
}
}
}()
return hc
}
func healthCheckerHandler(hc *healthChecker) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cw := new(checkWriter)
hc.Check(cw)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
if cw.hasErrors() {
w.WriteHeader(500)
} else {
w.WriteHeader(200)
}
if len(cw.Out) == 0 {
io.WriteString(w, "ok\n")
return
}
fmt.Fprintf(w, "# %q status: %s\n", hc.ID, hc.Title)
if hc.DocURL != "" {
fmt.Fprintf(w, "# Notes: %v\n", hc.DocURL)
}
for _, v := range cw.Out {
fmt.Fprintf(w, "%s: %s\n", v.Level, v.Text)
}
})
}
func uptime() time.Duration { return time.Since(processStartTime).Round(time.Second) }
func handleStatus(w http.ResponseWriter, r *http.Request) {
// Support gRPC handlers. handleStatus is our toplevel ("/") handler, so reroute to the gRPC server for
// matching requests.
if r.ProtoMajor == 2 && strings.HasPrefix(r.Header.Get("Content-Type"), "application/grpc") {
grpcServer.ServeHTTP(w, r)
return
}
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
df := diskFree()
statusMu.Lock()
data := statusData{
Total: len(status),
Uptime: uptime(),
Recent: append([]*buildStatus{}, statusDone...),
DiskFree: df,
Version: Version,
NumFD: fdCount(),
NumGoroutine: runtime.NumGoroutine(),
HealthCheckers: healthCheckers,
}
for _, st := range status {
if st.HasBuildlet() {
data.ActiveBuilds++
data.Active = append(data.Active, st)
if st.conf.IsReverse() {
data.ActiveReverse++
}
} else {
data.Pending = append(data.Pending, st)
}
}
// TODO: make this prettier.
var buf bytes.Buffer
for _, key := range tryList {
if ts := tries[key]; ts != nil {
state := ts.state()
fmt.Fprintf(&buf, "Change-ID: %v Commit: %v (<a href='/try?commit=%v'>status</a>)\n",
key.ChangeTriple(), key.Commit, key.Commit[:8])
fmt.Fprintf(&buf, " Remain: %d, fails: %v\n", state.remain, state.failed)
for _, bs := range ts.builds {
fmt.Fprintf(&buf, " %s: running=%v\n", bs.Name, bs.isRunning())
}
}
}
statusMu.Unlock()
gce := pool.NewGCEConfiguration()
data.RemoteBuildlets = template.HTML(remoteBuildletStatus())
sort.Sort(byAge(data.Active))
sort.Sort(byAge(data.Pending))
sort.Sort(sort.Reverse(byAge(data.Recent)))
if gce.TryDepsErr() != nil {
data.TrybotsErr = gce.TryDepsErr().Error()
} else {
if buf.Len() == 0 {
data.Trybots = template.HTML("<i>(none)</i>")
} else {
data.Trybots = template.HTML("<pre>" + buf.String() + "</pre>")
}
}
buf.Reset()
gce.BuildletPool().WriteHTMLStatus(&buf)
data.GCEPoolStatus = template.HTML(buf.String())
buf.Reset()
buf.Reset()
pool.EC2BuildetPool().WriteHTMLStatus(&buf)
data.EC2PoolStatus = template.HTML(buf.String())
buf.Reset()
pool.KubePool().WriteHTMLStatus(&buf)
data.KubePoolStatus = template.HTML(buf.String())
buf.Reset()
pool.ReversePool().WriteHTMLStatus(&buf)
data.ReversePoolStatus = template.HTML(buf.String())
data.SchedState = sched.state()
buf.Reset()
if err := statusTmpl.Execute(&buf, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
buf.WriteTo(w)
}
func fdCount() int {
f, err := os.Open("/proc/self/fd")
if err != nil {
return -1
}
defer f.Close()
n := 0
for {
names, err := f.Readdirnames(1000)
n += len(names)
if err == io.EOF {
return n
}
if err != nil {
return -1
}
}
}
func friendlyDuration(d time.Duration) string {
if d > 10*time.Second {
d2 := ((d + 50*time.Millisecond) / (100 * time.Millisecond)) * (100 * time.Millisecond)
return d2.String()
}
if d > time.Second {
d2 := ((d + 5*time.Millisecond) / (10 * time.Millisecond)) * (10 * time.Millisecond)
return d2.String()
}
d2 := ((d + 50*time.Microsecond) / (100 * time.Microsecond)) * (100 * time.Microsecond)
return d2.String()
}
func diskFree() string {
out, _ := exec.Command("df", "-h").Output()
return string(out)
}
// statusData is the data that fills out statusTmpl.
type statusData struct {
Total int // number of total builds (including those waiting for a buildlet)
ActiveBuilds int // number of running builds (subset of Total with a buildlet)
ActiveReverse int // subset of ActiveBuilds that are reverse buildlets
NumFD int
NumGoroutine int
Uptime time.Duration
Active []*buildStatus // have a buildlet
Pending []*buildStatus // waiting on a buildlet
Recent []*buildStatus
TrybotsErr string
Trybots template.HTML
GCEPoolStatus template.HTML // TODO: embed template
EC2PoolStatus template.HTML // TODO: embed template
KubePoolStatus template.HTML // TODO: embed template
ReversePoolStatus template.HTML // TODO: embed template
RemoteBuildlets template.HTML
SchedState schedulerState
DiskFree string
Version string
HealthCheckers []*healthChecker
}
var statusTmpl = template.Must(template.New("status").Parse(`
<!DOCTYPE html>
<html>
<head><link rel="stylesheet" href="/style.css"/><title>Go Farmer</title></head>
<body>
<header>
<h1>Go Build Coordinator</h1>
<nav>
<a href="https://build.golang.org">Dashboard</a>
<a href="/builders">Builders</a>
</nav>
<div class="clear"></div>
</header>
<h2>Running</h2>
<p>{{printf "%d" .Total}} total builds; {{printf "%d" .ActiveBuilds}} active ({{.ActiveReverse}} reverse). Uptime {{printf "%s" .Uptime}}. Version {{.Version}}.
<h2 id=health>Health <a href='#health'>¶</a></h2>
<ul>{{range .HealthCheckers}}
<li><a href="/status/{{.ID}}">{{.Title}}</a>{{if .DocURL}} [<a href="{{.DocURL}}">docs</a>]{{end -}}: {{with .DoCheck.Out}}
<ul>
{{- range .}}
<li>{{ .AsHTML}}</li>
{{- end}}
</ul>
{{else}}ok{{end}}
</li>
{{end}}</ul>
<h2 id=remote>Remote buildlets <a href='#remote'>¶</a></h2>
{{.RemoteBuildlets}}
<h2 id=trybots>Active Trybot Runs <a href='#trybots'>¶</a></h2>
{{- if .TrybotsErr}}
<b>trybots disabled:</b>: {{.TrybotsErr}}
{{else}}
{{.Trybots}}
{{end}}
<h2 id=sched>Scheduler State <a href='#sched'>¶</a></h2>
<ul>
{{range .SchedState.HostTypes}}
<li><b>{{.HostType}}</b>: {{.Total.Count}} waiting (oldest {{.Total.Oldest}}, newest {{.Total.Newest}}{{if .LastProgress}}, progress {{.LastProgress}}{{end}})
{{if or .Gomote.Count .Try.Count}}<ul>
{{if .Gomote.Count}}<li>gomote: {{.Gomote.Count}} (oldest {{.Gomote.Oldest}}, newest {{.Gomote.Newest}})</li>{{end}}
{{if .Try.Count}}<li>try: {{.Try.Count}} (oldest {{.Try.Oldest}}, newest {{.Try.Newest}})</li>{{end}}
</ul>{{end}}
</li>
{{end}}
</ul>
<h2 id=pools>Buildlet pools <a href='#pools'>¶</a></h2>
<ul>
<li>{{.GCEPoolStatus}}</li>
<li>{{.EC2PoolStatus}}</li>
<li>{{.KubePoolStatus}}</li>
<li>{{.ReversePoolStatus}}</li>
</ul>
<h2 id=active>Active builds <a href='#active'>¶</a></h2>
<ul>
{{range .Active}}
<li><pre>{{.HTMLStatusTruncated}}</pre></li>
{{end}}
</ul>
<h2 id=pending>Pending builds <a href='#pending'>¶</a></h2>
<ul>
{{range .Pending}}
<li><span>{{.HTMLStatusLine}}</span></li>
{{end}}
</ul>
<h2 id=completed>Recently completed <a href='#completed'>¶</a></h2>
<ul>
{{range .Recent}}
<li><span>{{.HTMLStatusLine}}</span></li>
{{end}}
</ul>
<h2 id=disk>Disk Space <a href='#disk'>¶</a></h2>
<pre>{{.DiskFree}}</pre>
<h2 id=fd>File Descriptors <a href='#fd'>¶</a></h2>
<p>{{.NumFD}}</p>
<h2 id=goroutines>Goroutines <a href='#goroutines'>¶</a></h2>
<p>{{.NumGoroutine}} <a href='/debug/goroutines'>goroutines</a></p>
</body>
</html>
`))
var styleCSS []byte
// loadStatic loads static resources into memroy for serving.
func loadStatic() error {
path := internal.FilePath("style.css", "cmd/coordinator")
css, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("ioutil.ReadFile(%q): %w", path, err)
}
styleCSS = css
return nil
}
func handleStyleCSS(w http.ResponseWriter, r *http.Request) {
http.ServeContent(w, r, "style.css", processStartTime, bytes.NewReader(styleCSS))
}
cmd/coordinator: update the count for amd64 Mac Hosts
This change updates the number of Mac hosts that are expected to
connect to the coordinator. There is now a distinction between there
being a amd64 and arm64 Mac hosts. We have reduced the count of amd64
hosts to 8 and have been adding arm64 Macs.
Updates golang/go#45088
Change-Id: I97de966c8398ceb0e5cb31d6bbe688de69efc4a5
Reviewed-on: https://go-review.googlesource.com/c/build/+/307731
Trust: Carlos Amedee <ab5e2bca84933118bbc9d48ffaccce3bac4eeb64@golang.org>
Run-TryBot: Carlos Amedee <ab5e2bca84933118bbc9d48ffaccce3bac4eeb64@golang.org>
TryBot-Result: Go Bot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Dmitri Shuralyov <d181b7fea0ec87c86ba5a890ab716db52498e3ba@golang.org>
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.13 && (linux || darwin)
// +build go1.13
// +build linux darwin
package main
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"html"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/google/go-github/github"
"go.opencensus.io/stats"
"golang.org/x/build/cmd/coordinator/internal"
"golang.org/x/build/dashboard"
"golang.org/x/build/internal/coordinator/pool"
"golang.org/x/build/internal/foreach"
"golang.org/x/build/internal/secret"
"golang.org/x/build/kubernetes/api"
"golang.org/x/oauth2"
)
// status
type statusLevel int
const (
// levelInfo is an informational text that's not an error,
// such as "coordinator just started recently, waiting to
// start health check"
levelInfo statusLevel = iota
// levelWarn is a non-critical error, such as "missing 1 of 50
// of ARM machines"
levelWarn
// levelError is something that should be fixed sooner, such
// as "all Macs are gone".
levelError
)
func (l statusLevel) String() string {
switch l {
case levelInfo:
return "Info"
case levelWarn:
return "Warn"
case levelError:
return "Error"
}
return ""
}
type levelText struct {
Level statusLevel
Text string
}
func (lt levelText) AsHTML() template.HTML {
switch lt.Level {
case levelInfo:
return template.HTML(html.EscapeString(lt.Text))
case levelWarn:
return template.HTML(fmt.Sprintf("<span style='color: orange'>%s</span>", html.EscapeString(lt.Text)))
case levelError:
return template.HTML(fmt.Sprintf("<span style='color: red'><b>%s</b></span>", html.EscapeString(lt.Text)))
}
return ""
}
type checkWriter struct {
Out []levelText
}
func (w *checkWriter) error(s string) { w.Out = append(w.Out, levelText{levelError, s}) }
func (w *checkWriter) errorf(a string, args ...interface{}) { w.error(fmt.Sprintf(a, args...)) }
func (w *checkWriter) info(s string) { w.Out = append(w.Out, levelText{levelInfo, s}) }
func (w *checkWriter) infof(a string, args ...interface{}) { w.info(fmt.Sprintf(a, args...)) }
func (w *checkWriter) warn(s string) { w.Out = append(w.Out, levelText{levelWarn, s}) }
func (w *checkWriter) warnf(a string, args ...interface{}) { w.warn(fmt.Sprintf(a, args...)) }
func (w *checkWriter) hasErrors() bool {
for _, v := range w.Out {
if v.Level == levelError {
return true
}
}
return false
}
type healthChecker struct {
ID string
Title string
DocURL string
// Check writes the health check status to a checkWriter.
//
// It's called when rendering the HTML page, so expensive
// operations (network calls, etc.) should be done in a
// separate goroutine and Check should report their results.
Check func(*checkWriter)
}
func (hc *healthChecker) DoCheck() *checkWriter {
cw := new(checkWriter)
hc.Check(cw)
return cw
}
var (
healthCheckers []*healthChecker
healthCheckerByID = map[string]*healthChecker{}
)
func addHealthChecker(hc *healthChecker) {
if _, dup := healthCheckerByID[hc.ID]; dup {
panic("duplicate health checker ID " + hc.ID)
}
healthCheckers = append(healthCheckers, hc)
healthCheckerByID[hc.ID] = hc
http.Handle("/status/"+hc.ID, healthCheckerHandler(hc))
}
// basePinErr is the status of the start-up time basepin disk creation
// in gce.go. It's of type string; no value means no result yet,
// empty string means success, and non-empty means an error.
var basePinErr atomic.Value
func addHealthCheckers(ctx context.Context, sc *secret.Client) {
addHealthChecker(newMacHealthChecker())
addHealthChecker(newMacOSARM64Checker())
addHealthChecker(newScalewayHealthChecker())
addHealthChecker(newPacketHealthChecker())
addHealthChecker(newOSUPPC64Checker())
addHealthChecker(newOSUPPC64leChecker())
addHealthChecker(newOSUPPC64lePower9Checker())
addHealthChecker(newBasepinChecker())
addHealthChecker(newGitMirrorChecker())
addHealthChecker(newTipGolangOrgChecker(ctx))
addHealthChecker(newGitHubAPIChecker(ctx, sc))
}
func newBasepinChecker() *healthChecker {
return &healthChecker{
ID: "basepin",
Title: "VM snapshots",
DocURL: "https://golang.org/issue/21305",
Check: func(w *checkWriter) {
v := basePinErr.Load()
if v == nil {
w.warnf("still running")
return
}
if v == "" {
return
}
w.error(v.(string))
},
}
}
// gitMirrorStatus is the latest known status of the gitmirror service.
var gitMirrorStatus = struct {
sync.Mutex
Errors []string
Warnings []string
}{Warnings: []string{"still checking"}}
func monitorGitMirror() {
for {
errs, warns := gitMirrorErrors()
gitMirrorStatus.Lock()
gitMirrorStatus.Errors, gitMirrorStatus.Warnings = errs, warns
gitMirrorStatus.Unlock()
time.Sleep(30 * time.Second)
}
}
// gitMirrorErrors queries the status pages of all
// running gitmirror instances and reports errors.
//
// It makes use of pool.KubeGoClient() to do the query.
func gitMirrorErrors() (errs, warns []string) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
pods, err := pool.KubeGoClient().GetPods(ctx)
if err != nil {
log.Println("gitMirrorErrors: goKubeClient.GetPods:", err)
return []string{"failed to get pods; can't query gitmirror status"}, nil
}
var runningGitMirror []api.Pod
for _, p := range pods {
if p.Labels["app"] != "gitmirror" || p.Status.Phase != "Running" {
continue
}
runningGitMirror = append(runningGitMirror, p)
}
if len(runningGitMirror) == 0 {
return []string{"no running gitmirror instances"}, nil
}
for _, pod := range runningGitMirror {
// The gitmirror -http=:8585 status page URL is hardcoded here.
// If the ReplicationController configuration changes (rare), this
// health check will begin to fail until it's updated accordingly.
instErrs, instWarns := gitMirrorInstanceErrors(ctx, fmt.Sprintf("http://%s:8585/", pod.Status.PodIP))
for _, err := range instErrs {
errs = append(errs, fmt.Sprintf("instance %s: %s", pod.Name, err))
}
for _, warn := range instWarns {
warns = append(warns, fmt.Sprintf("instance %s: %s", pod.Name, warn))
}
}
return errs, warns
}
func gitMirrorInstanceErrors(ctx context.Context, url string) (errs, warns []string) {
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
res, err := http.DefaultClient.Do(req)
if err != nil {
return []string{err.Error()}, nil
}
defer res.Body.Close()
if res.StatusCode != 200 {
return []string{res.Status}, nil
}
// TODO: add a JSON mode to gitmirror so we don't need to parse HTML.
// This works for now. We control its output.
bs := bufio.NewScanner(res.Body)
for bs.Scan() {
// Lines look like:
// <html><body><pre><a href='/debug/watcher/arch'>arch</a> - ok
// or:
// <a href='/debug/watcher/arch'>arch</a> - ok
// (See https://farmer.golang.org/debug/watcher/)
line := bs.Text()
if strings.HasSuffix(line, " - ok") {
continue
}
m := gitMirrorLineRx.FindStringSubmatch(line)
if len(m) != 3 {
if strings.Contains(line, "</html>") {
break
}
return []string{fmt.Sprintf("error parsing line %q", line)}, nil
}
if strings.HasPrefix(m[2], "ok; ") {
// If the status begins with "ok", it can't be that bad.
warns = append(warns, fmt.Sprintf("repo %s: %s", m[1], m[2]))
continue
}
errs = append(errs, fmt.Sprintf("repo %s: %s", m[1], m[2]))
}
if err := bs.Err(); err != nil {
errs = append(errs, err.Error())
}
return errs, warns
}
// $1 is repo; $2 is error message
var gitMirrorLineRx = regexp.MustCompile(`/debug/watcher/([\w-]+).?>.+</a> - (.*)`)
func newGitMirrorChecker() *healthChecker {
return &healthChecker{
ID: "gitmirror",
Title: "Git mirroring",
DocURL: "https://github.com/golang/build/tree/master/cmd/gitmirror",
Check: func(w *checkWriter) {
gitMirrorStatus.Lock()
errs, warns := gitMirrorStatus.Errors, gitMirrorStatus.Warnings
gitMirrorStatus.Unlock()
for _, v := range errs {
w.error(v)
}
for _, v := range warns {
w.warn(v)
}
},
}
}
func newTipGolangOrgChecker(ctx context.Context) *healthChecker {
// tipError is the status of the tip.golang.org website.
// It's of type string; no value means no result yet,
// empty string means success, and non-empty means an error.
var tipError atomic.Value
go func() {
for {
tipError.Store(fetchTipGolangOrgError(ctx))
time.Sleep(30 * time.Second)
}
}()
return &healthChecker{
ID: "tip",
Title: "tip.golang.org website",
DocURL: "https://github.com/golang/build/tree/master/cmd/tip",
Check: func(w *checkWriter) {
e, ok := tipError.Load().(string)
if !ok {
w.warn("still checking")
} else if e != "" {
w.error(e)
}
},
}
}
// fetchTipGolangOrgError fetches the error= value from https://tip.golang.org/_tipstatus.
func fetchTipGolangOrgError(ctx context.Context) string {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
req, _ := http.NewRequest(http.MethodGet, "https://tip.golang.org/_tipstatus", nil)
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err.Error()
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return resp.Status
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err.Error()
}
var e string
err = foreach.Line(b, func(s []byte) error {
if !bytes.HasPrefix(s, []byte("error=")) {
return nil
}
e = string(s[len("error="):])
return errFound
})
if err != errFound {
return "missing error= line"
} else if e != "<nil>" {
return "_tipstatus page reports error: " + e
}
return ""
}
var errFound = errors.New("error= line was found")
func newMacHealthChecker() *healthChecker {
var hosts []string
const numMacHosts = 8 // physical Mac Pros, not reverse buildlet connections. M1 Macs will be included in seperate checks.
for i := 1; i <= numMacHosts; i++ {
for _, suf := range []string{"a", "b"} {
name := fmt.Sprintf("macstadium_host%02d%s", i, suf)
hosts = append(hosts, name)
}
}
checkHosts := reverseHostChecker(hosts)
// And check that the makemac daemon is listening.
var makeMac struct {
sync.Mutex
lastCheck time.Time // currently unused
lastErrors []string
lastWarns []string
}
setMakeMacStatus := func(errs, warns []string) {
makeMac.Lock()
defer makeMac.Unlock()
makeMac.lastCheck = time.Now()
makeMac.lastErrors = errs
makeMac.lastWarns = warns
}
go func() {
for {
errs, warns := fetchMakeMacStatus()
setMakeMacStatus(errs, warns)
time.Sleep(15 * time.Second)
}
}()
return &healthChecker{
ID: "macs",
Title: "MacStadium Mac VMs",
DocURL: "https://github.com/golang/build/tree/master/env/darwin/macstadium",
Check: func(w *checkWriter) {
// Check hosts.
checkHosts(w)
// Check makemac daemon.
makeMac.Lock()
defer makeMac.Unlock()
for _, v := range makeMac.lastWarns {
w.warnf("makemac daemon: %v", v)
}
for _, v := range makeMac.lastErrors {
w.errorf("makemac daemon: %v", v)
}
},
}
}
func fetchMakeMacStatus() (errs, warns []string) {
c := &http.Client{Timeout: 15 * time.Second}
res, err := c.Get("http://macstadiumd.golang.org:8713")
if err != nil {
return []string{fmt.Sprintf("failed to fetch status: %v", err)}, nil
}
defer res.Body.Close()
if res.StatusCode != 200 {
return []string{fmt.Sprintf("HTTP status %v", res.Status)}, nil
}
if res.Header.Get("Content-Type") != "application/json" {
return []string{fmt.Sprintf("unexpected content-type %q; want JSON", res.Header.Get("Content-Type"))}, nil
}
var resj struct {
Errors []string
Warnings []string
}
if err := json.NewDecoder(res.Body).Decode(&resj); err != nil {
return []string{fmt.Sprintf("reading status response body: %v", err)}, nil
}
return resj.Errors, resj.Warnings
}
func newMacOSARM64Checker() *healthChecker {
return &healthChecker{
ID: "macos-arm64",
Title: "macOS ARM64 (M1 Mac minis)",
DocURL: "https://golang.org/issue/39782",
Check: hostTypeChecker("host-darwin-arm64-11_0-toothrot"),
}
}
func hostTypeChecker(hostType string) func(cw *checkWriter) {
want := expectedHosts(hostType)
return func(cw *checkWriter) {
n := pool.ReversePool().SingleHostTypeCount(hostType)
if n < want {
cw.errorf("%d connected; want %d", n, want)
}
}
}
func expectedHosts(hostType string) int {
hc, ok := dashboard.Hosts[hostType]
if !ok {
panic(fmt.Sprintf("unknown host type %q", hostType))
}
return hc.ExpectNum
}
func newScalewayHealthChecker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-arm-scaleway"); i++ {
name := fmt.Sprintf("scaleway-prod-%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "scaleway",
Title: "Scaleway linux/arm machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-arm/scaleway",
Check: reverseHostChecker(hosts),
}
}
func newPacketHealthChecker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-arm64-packet"); i++ {
name := fmt.Sprintf("packet%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "packet",
Title: "Packet linux/arm64 machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-arm64/packet",
Check: reverseHostChecker(hosts),
}
}
func newOSUPPC64Checker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-ppc64-osu"); i++ {
name := fmt.Sprintf("host-linux-ppc64-osu:ppc64_%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "osuppc64",
Title: "OSU linux/ppc64 machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-ppc64/osuosl",
Check: reverseHostChecker(hosts),
}
}
func newOSUPPC64leChecker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-ppc64le-osu"); i++ {
name := fmt.Sprintf("host-linux-ppc64le-osu:power_%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "osuppc64le",
Title: "OSU linux/ppc64le POWER8 machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-ppc64le/osuosl",
Check: reverseHostChecker(hosts),
}
}
func newOSUPPC64lePower9Checker() *healthChecker {
var hosts []string
for i := 1; i <= expectedHosts("host-linux-ppc64le-power9-osu"); i++ {
name := fmt.Sprintf("host-linux-ppc64le-power9-osu:power_%02d", i)
hosts = append(hosts, name)
}
return &healthChecker{
ID: "osuppc64lepower9",
Title: "OSU linux/ppc64le POWER9 machines",
DocURL: "https://github.com/golang/build/tree/master/env/linux-ppc64le/osuosl",
Check: reverseHostChecker(hosts),
}
}
func reverseHostChecker(hosts []string) func(cw *checkWriter) {
const recentThreshold = 2 * time.Minute // let VMs be away 2 minutes; assume ~1 minute bootup + slop
checkStart := time.Now().Add(recentThreshold)
hostSet := map[string]bool{}
for _, v := range hosts {
hostSet[v] = true
}
// TODO(amedee): rethink how this is implemented. It has been
// modified due to golang.org/issues/36841
// instead of a single lock being held while all of the
// operations are performed, there is now a lock held
// durring each BuildletLastSeen call and again when
// the buildlet host names are retrieved.
return func(cw *checkWriter) {
p := pool.ReversePool()
now := time.Now()
wantGoodSince := now.Add(-recentThreshold)
numMissing := 0
numGood := 0
// Check last good times
for _, host := range hosts {
lastGood, ok := p.BuildletLastSeen(host)
if ok && lastGood.After(wantGoodSince) {
numGood++
continue
}
if now.Before(checkStart) {
cw.infof("%s not yet connected", host)
continue
}
if ok {
cw.warnf("%s missing, not seen for %v", host, time.Now().Sub(lastGood).Round(time.Second))
} else {
cw.warnf("%s missing, never seen (at least %v)", host, uptime())
}
numMissing++
}
if numMissing > 0 {
sum := numMissing + numGood
percentMissing := float64(numMissing) / float64(sum)
msg := fmt.Sprintf("%d machines missing, %.0f%% of capacity", numMissing, percentMissing*100)
if percentMissing >= 0.15 {
cw.error(msg)
} else {
cw.warn(msg)
}
}
// And check that we don't have more than 1
// connected of any type.
count := map[string]int{}
for _, hostname := range p.BuildletHostnames() {
if hostSet[hostname] {
count[hostname]++
}
}
for name, n := range count {
if n > 1 {
cw.errorf("%q is connected from %v machines", name, n)
}
}
}
}
// newGitHubAPIChecker creates a GitHub API health checker
// that queries the remaining rate limit at regular invervals
// and reports when the hourly quota has been exceeded.
//
// It also records metrics to track remaining rate limit over time.
//
func newGitHubAPIChecker(ctx context.Context, sc *secret.Client) *healthChecker {
// githubRate is the status of the GitHub API v3 client.
// It's of type *github.Rate; no value means no result yet,
// nil value means no recent result.
var githubRate atomic.Value
hc := &healthChecker{
ID: "githubapi",
Title: "GitHub API Rate Limit",
DocURL: "https://golang.org/issue/44406",
Check: func(w *checkWriter) {
rate, ok := githubRate.Load().(*github.Rate)
if !ok {
w.warn("still checking")
} else if rate == nil {
w.warn("no recent result")
} else if rate.Remaining == 0 {
resetIn := "a minute or so"
if t := time.Until(rate.Reset.Time); t > time.Minute {
resetIn = t.Round(time.Second).String()
}
w.warnf("hourly GitHub API rate limit exceeded; reset in %s", resetIn)
}
},
}
// Start measuring and reporting the remaining GitHub API v3 rate limit.
if sc == nil {
hc.Check = func(w *checkWriter) {
w.info("check disabled; credentials were not provided")
}
return hc
}
token, err := sc.Retrieve(ctx, secret.NameMaintnerGitHubToken)
if err != nil {
log.Printf("newGitHubAPIChecker: sc.Retrieve(_, %q) failed, err = %v\n", secret.NameMaintnerGitHubToken, err)
hc.Check = func(w *checkWriter) {
// The check is displayed publicly, so don't include details from err.
w.error("failed to retrieve API token")
}
return hc
}
gh := github.NewClient(oauth2.NewClient(ctx, oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})))
go func() {
t := time.NewTicker(time.Minute)
defer t.Stop()
for {
// Fetch the current rate limit from the GitHub API.
// This endpoint is special in that it doesn't consume rate limit quota itself.
var rate *github.Rate
rateLimitsCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
rl, _, err := gh.RateLimits(rateLimitsCtx)
cancel()
if rle := (*github.RateLimitError)(nil); errors.As(err, &rle) {
rate = &rle.Rate
} else if err != nil {
log.Println("GitHubAPIChecker: github.RateLimits:", err)
} else {
rate = rl.GetCore()
}
// Store the result of fetching, and record the current rate limit, if any.
githubRate.Store(rate)
if rate != nil {
stats.Record(ctx, mGitHubAPIRemaining.M(int64(rate.Remaining)))
}
select {
case <-t.C:
case <-ctx.Done():
return
}
}
}()
return hc
}
func healthCheckerHandler(hc *healthChecker) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cw := new(checkWriter)
hc.Check(cw)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
if cw.hasErrors() {
w.WriteHeader(500)
} else {
w.WriteHeader(200)
}
if len(cw.Out) == 0 {
io.WriteString(w, "ok\n")
return
}
fmt.Fprintf(w, "# %q status: %s\n", hc.ID, hc.Title)
if hc.DocURL != "" {
fmt.Fprintf(w, "# Notes: %v\n", hc.DocURL)
}
for _, v := range cw.Out {
fmt.Fprintf(w, "%s: %s\n", v.Level, v.Text)
}
})
}
func uptime() time.Duration { return time.Since(processStartTime).Round(time.Second) }
func handleStatus(w http.ResponseWriter, r *http.Request) {
// Support gRPC handlers. handleStatus is our toplevel ("/") handler, so reroute to the gRPC server for
// matching requests.
if r.ProtoMajor == 2 && strings.HasPrefix(r.Header.Get("Content-Type"), "application/grpc") {
grpcServer.ServeHTTP(w, r)
return
}
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
df := diskFree()
statusMu.Lock()
data := statusData{
Total: len(status),
Uptime: uptime(),
Recent: append([]*buildStatus{}, statusDone...),
DiskFree: df,
Version: Version,
NumFD: fdCount(),
NumGoroutine: runtime.NumGoroutine(),
HealthCheckers: healthCheckers,
}
for _, st := range status {
if st.HasBuildlet() {
data.ActiveBuilds++
data.Active = append(data.Active, st)
if st.conf.IsReverse() {
data.ActiveReverse++
}
} else {
data.Pending = append(data.Pending, st)
}
}
// TODO: make this prettier.
var buf bytes.Buffer
for _, key := range tryList {
if ts := tries[key]; ts != nil {
state := ts.state()
fmt.Fprintf(&buf, "Change-ID: %v Commit: %v (<a href='/try?commit=%v'>status</a>)\n",
key.ChangeTriple(), key.Commit, key.Commit[:8])
fmt.Fprintf(&buf, " Remain: %d, fails: %v\n", state.remain, state.failed)
for _, bs := range ts.builds {
fmt.Fprintf(&buf, " %s: running=%v\n", bs.Name, bs.isRunning())
}
}
}
statusMu.Unlock()
gce := pool.NewGCEConfiguration()
data.RemoteBuildlets = template.HTML(remoteBuildletStatus())
sort.Sort(byAge(data.Active))
sort.Sort(byAge(data.Pending))
sort.Sort(sort.Reverse(byAge(data.Recent)))
if gce.TryDepsErr() != nil {
data.TrybotsErr = gce.TryDepsErr().Error()
} else {
if buf.Len() == 0 {
data.Trybots = template.HTML("<i>(none)</i>")
} else {
data.Trybots = template.HTML("<pre>" + buf.String() + "</pre>")
}
}
buf.Reset()
gce.BuildletPool().WriteHTMLStatus(&buf)
data.GCEPoolStatus = template.HTML(buf.String())
buf.Reset()
buf.Reset()
pool.EC2BuildetPool().WriteHTMLStatus(&buf)
data.EC2PoolStatus = template.HTML(buf.String())
buf.Reset()
pool.KubePool().WriteHTMLStatus(&buf)
data.KubePoolStatus = template.HTML(buf.String())
buf.Reset()
pool.ReversePool().WriteHTMLStatus(&buf)
data.ReversePoolStatus = template.HTML(buf.String())
data.SchedState = sched.state()
buf.Reset()
if err := statusTmpl.Execute(&buf, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
buf.WriteTo(w)
}
func fdCount() int {
f, err := os.Open("/proc/self/fd")
if err != nil {
return -1
}
defer f.Close()
n := 0
for {
names, err := f.Readdirnames(1000)
n += len(names)
if err == io.EOF {
return n
}
if err != nil {
return -1
}
}
}
func friendlyDuration(d time.Duration) string {
if d > 10*time.Second {
d2 := ((d + 50*time.Millisecond) / (100 * time.Millisecond)) * (100 * time.Millisecond)
return d2.String()
}
if d > time.Second {
d2 := ((d + 5*time.Millisecond) / (10 * time.Millisecond)) * (10 * time.Millisecond)
return d2.String()
}
d2 := ((d + 50*time.Microsecond) / (100 * time.Microsecond)) * (100 * time.Microsecond)
return d2.String()
}
func diskFree() string {
out, _ := exec.Command("df", "-h").Output()
return string(out)
}
// statusData is the data that fills out statusTmpl.
type statusData struct {
Total int // number of total builds (including those waiting for a buildlet)
ActiveBuilds int // number of running builds (subset of Total with a buildlet)
ActiveReverse int // subset of ActiveBuilds that are reverse buildlets
NumFD int
NumGoroutine int
Uptime time.Duration
Active []*buildStatus // have a buildlet
Pending []*buildStatus // waiting on a buildlet
Recent []*buildStatus
TrybotsErr string
Trybots template.HTML
GCEPoolStatus template.HTML // TODO: embed template
EC2PoolStatus template.HTML // TODO: embed template
KubePoolStatus template.HTML // TODO: embed template
ReversePoolStatus template.HTML // TODO: embed template
RemoteBuildlets template.HTML
SchedState schedulerState
DiskFree string
Version string
HealthCheckers []*healthChecker
}
var statusTmpl = template.Must(template.New("status").Parse(`
<!DOCTYPE html>
<html>
<head><link rel="stylesheet" href="/style.css"/><title>Go Farmer</title></head>
<body>
<header>
<h1>Go Build Coordinator</h1>
<nav>
<a href="https://build.golang.org">Dashboard</a>
<a href="/builders">Builders</a>
</nav>
<div class="clear"></div>
</header>
<h2>Running</h2>
<p>{{printf "%d" .Total}} total builds; {{printf "%d" .ActiveBuilds}} active ({{.ActiveReverse}} reverse). Uptime {{printf "%s" .Uptime}}. Version {{.Version}}.
<h2 id=health>Health <a href='#health'>¶</a></h2>
<ul>{{range .HealthCheckers}}
<li><a href="/status/{{.ID}}">{{.Title}}</a>{{if .DocURL}} [<a href="{{.DocURL}}">docs</a>]{{end -}}: {{with .DoCheck.Out}}
<ul>
{{- range .}}
<li>{{ .AsHTML}}</li>
{{- end}}
</ul>
{{else}}ok{{end}}
</li>
{{end}}</ul>
<h2 id=remote>Remote buildlets <a href='#remote'>¶</a></h2>
{{.RemoteBuildlets}}
<h2 id=trybots>Active Trybot Runs <a href='#trybots'>¶</a></h2>
{{- if .TrybotsErr}}
<b>trybots disabled:</b>: {{.TrybotsErr}}
{{else}}
{{.Trybots}}
{{end}}
<h2 id=sched>Scheduler State <a href='#sched'>¶</a></h2>
<ul>
{{range .SchedState.HostTypes}}
<li><b>{{.HostType}}</b>: {{.Total.Count}} waiting (oldest {{.Total.Oldest}}, newest {{.Total.Newest}}{{if .LastProgress}}, progress {{.LastProgress}}{{end}})
{{if or .Gomote.Count .Try.Count}}<ul>
{{if .Gomote.Count}}<li>gomote: {{.Gomote.Count}} (oldest {{.Gomote.Oldest}}, newest {{.Gomote.Newest}})</li>{{end}}
{{if .Try.Count}}<li>try: {{.Try.Count}} (oldest {{.Try.Oldest}}, newest {{.Try.Newest}})</li>{{end}}
</ul>{{end}}
</li>
{{end}}
</ul>
<h2 id=pools>Buildlet pools <a href='#pools'>¶</a></h2>
<ul>
<li>{{.GCEPoolStatus}}</li>
<li>{{.EC2PoolStatus}}</li>
<li>{{.KubePoolStatus}}</li>
<li>{{.ReversePoolStatus}}</li>
</ul>
<h2 id=active>Active builds <a href='#active'>¶</a></h2>
<ul>
{{range .Active}}
<li><pre>{{.HTMLStatusTruncated}}</pre></li>
{{end}}
</ul>
<h2 id=pending>Pending builds <a href='#pending'>¶</a></h2>
<ul>
{{range .Pending}}
<li><span>{{.HTMLStatusLine}}</span></li>
{{end}}
</ul>
<h2 id=completed>Recently completed <a href='#completed'>¶</a></h2>
<ul>
{{range .Recent}}
<li><span>{{.HTMLStatusLine}}</span></li>
{{end}}
</ul>
<h2 id=disk>Disk Space <a href='#disk'>¶</a></h2>
<pre>{{.DiskFree}}</pre>
<h2 id=fd>File Descriptors <a href='#fd'>¶</a></h2>
<p>{{.NumFD}}</p>
<h2 id=goroutines>Goroutines <a href='#goroutines'>¶</a></h2>
<p>{{.NumGoroutine}} <a href='/debug/goroutines'>goroutines</a></p>
</body>
</html>
`))
var styleCSS []byte
// loadStatic loads static resources into memroy for serving.
func loadStatic() error {
path := internal.FilePath("style.css", "cmd/coordinator")
css, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("ioutil.ReadFile(%q): %w", path, err)
}
styleCSS = css
return nil
}
func handleStyleCSS(w http.ResponseWriter, r *http.Request) {
http.ServeContent(w, r, "style.css", processStartTime, bytes.NewReader(styleCSS))
}
|
package entrypoint
import (
"log"
"strings"
amb "github.com/datawire/ambassador/pkg/api/getambassador.io/v2"
"github.com/datawire/ambassador/pkg/kates"
)
// SecretRef is a secret reference -- basically, a namespace/name pair.
type SecretRef struct {
Namespace string
Name string
}
// ReconcileSecrets figures out which secrets we're actually using,
// since we don't want to send secrets to Ambassador unless we're
// using them, since any secret we send will be saved to disk.
func (s *AmbassadorInputs) ReconcileSecrets() {
// Start by building up a list of all the K8s objects that are
// allowed to mention secrets. Note that we vet the ambassador_id
// for all of these before putting them on the list.
var resources []kates.Object
// Annotations are straightforward, although honestly we should
// be filtering annotations by type here (or, even better, unfold
// them earlier so that we can treat them like any other resource
// here).
for _, a := range s.annotations {
if include(GetAmbId(a)) {
resources = append(resources, a)
}
}
// Hosts are a little weird, because we have two ways to find the
// ambassador_id. Sorry about that.
for _, h := range s.Hosts {
var id amb.AmbassadorID
if len(h.Spec.AmbassadorID) > 0 {
id = h.Spec.AmbassadorID
} else {
id = h.Spec.DeprecatedAmbassadorID
}
if include(id) {
resources = append(resources, h)
}
}
// TLSContexts, Modules, and Ingresses are all straightforward.
for _, t := range s.TLSContexts {
if include(t.Spec.AmbassadorID) {
resources = append(resources, t)
}
}
for _, m := range s.Modules {
if include(m.Spec.AmbassadorID) {
resources = append(resources, m)
}
}
for _, i := range s.Ingresses {
resources = append(resources, i)
}
// OK. Once that's done, we can check to see if we should be
// doing secret namespacing or not -- this requires a look into
// the Ambassador Module, if it's present.
//
// XXX Linear searches suck, but whatever, it's just not gonna
// be all that many things. We won't bother optimizing this unless
// a profiler shows that it's a problem.
secretNamespacing := true
for _, resource := range resources {
mod, ok := resource.(*amb.Module)
// We don't need to recheck ambassador_id on this Module because
// the Module can't have made it into the resources list without
// its ambassador_id being checked.
if ok && mod.GetName() == "ambassador" {
// XXX ModuleSecrets is a _godawful_ hack. See the comment on
// ModuleSecrets itself for more.
secs := ModuleSecrets{}
err := convert(mod.Spec.Config, &secs)
if err != nil {
log.Printf("error parsing module: %v", err)
continue
}
secretNamespacing = secs.Defaults.TLSSecretNamespacing
break
}
}
// Once we have our list of secrets, go figure out the names of all
// the secrets we need. We'll use this "refs" map to hold all the names...
refs := map[SecretRef]bool{}
// ...and, uh, this "action" function is really just a closure to avoid
// needing to pass "refs" to findSecretRefs. Shrug. Arguably more
// complex than needed, but meh.
action := func(ref SecretRef) {
refs[ref] = true
}
// So. Walk the list of resources...
for _, resource := range resources {
// ...and for each resource, dig out any secrets being referenced.
findSecretRefs(resource, secretNamespacing, action)
}
if IsEdgeStack() {
// For Edge Stack, we _always_ have implicit references to the fallback
// cert secret and the license secret.
secretRef(GetAmbassadorNamespace(), "fallback-self-signed-cert", false, action)
secretRef(GetLicenseSecretNamespace(), GetLicenseSecretName(), false, action)
}
// OK! After all that, go copy all the matching secrets from FSSecrets and
// K8sSecrets to Secrets.
//
// The way this works is kind of simple: first we check everything in
// FSSecrets. Then, when we check K8sSecrets, we skip any secrets that are
// also in FSSecrets. End result: FSSecrets wins if there are any conflicts.
s.Secrets = make([]*kates.Secret, 0, len(refs))
for ref, secret := range s.FSSecrets {
if refs[ref] {
log.Printf("Taking FSSecret %#v", ref)
s.Secrets = append(s.Secrets, secret)
}
}
for _, secret := range s.K8sSecrets {
ref := SecretRef{secret.GetNamespace(), secret.GetName()}
_, found := s.FSSecrets[ref]
if found {
log.Printf("Conflict! skipping K8sSecret %#v", ref)
continue
}
if refs[ref] {
log.Printf("Taking K8sSecret %#v", ref)
s.Secrets = append(s.Secrets, secret)
}
}
}
// Should we pay attention to a given AmbassadorID set?
//
// XXX Yes, amb.AmbassadorID is a singular name for a plural type. Sigh.
func include(id amb.AmbassadorID) bool {
// We always pay attention to the "_automatic_" ID -- it gives us a
// to easily always include certain configuration resources for Edge
// Stack.
if len(id) == 1 && id[0] == "_automatic_" {
return true
}
// It's not "_automatic_", so we have to actually do the work. Grab
// our AmbassadorID...
me := GetAmbassadorId()
// ...force an empty AmbassadorID to "default", per the documentation...
if len(id) == 0 {
id = amb.AmbassadorID{"default"}
}
// ...and then see if our AmbassadorID is in the list.
for _, name := range id {
if me == name {
return true
}
}
return false
}
// Find all the secrets a given Ambassador resource references.
func findSecretRefs(resource kates.Object, secretNamespacing bool, action func(SecretRef)) {
switch r := resource.(type) {
case *amb.Host:
// The Host resource is a little odd. Host.spec.tls, Host.spec.tlsSecret, and
// host.spec.acmeProvider.privateKeySecret can all refer to secrets.
if r.Spec == nil {
return
}
if r.Spec.TLS != nil {
// Host.spec.tls.caSecret is the thing to worry about here.
secretRef(r.GetNamespace(), r.Spec.TLS.CASecret, secretNamespacing, action)
}
// Host.spec.tlsSecret and Host.spec.acmeProvider.privateKeySecret are native-Kubernetes-style
// `core.v1.LocalObjectReference`s, not Ambassador-style `{name}.{namespace}` strings. If we
// ever decide that they should support cross-namespace references, we would do it by adding a
// `namespace:` field (i.e. changing them to `core.v1.SecretReference`s) rather than by
// adopting the `{name}.{namespace}` notation.
if r.Spec.TLSSecret != nil && r.Spec.TLSSecret.Name != "" {
secretRef(r.GetNamespace(), r.Spec.TLSSecret.Name, false, action)
}
if r.Spec.AcmeProvider != nil && r.Spec.AcmeProvider.PrivateKeySecret != nil &&
r.Spec.AcmeProvider.PrivateKeySecret.Name != "" {
secretRef(r.GetNamespace(), r.Spec.AcmeProvider.PrivateKeySecret.Name, false, action)
}
case *amb.TLSContext:
// TLSContext.spec.secret is the only thing to worry about -- but note well
// that TLSContexts can override the global secretNamespacing setting.
if r.Spec.Secret != "" {
if r.Spec.SecretNamespacing != nil {
secretNamespacing = *r.Spec.SecretNamespacing
}
secretRef(r.GetNamespace(), r.Spec.Secret, secretNamespacing, action)
}
case *amb.Module:
// This whole thing is a hack. We probably _should_ check to make sure that
// this is an Ambassador Module or a TLS Module, but, well, those're the only
// supported kinds now, anyway...
//
// XXX ModuleSecrets is a godawful hack. See its comment for more.
secs := ModuleSecrets{}
err := convert(r.Spec.Config, &secs)
if err != nil {
// XXX
log.Printf("error extracting secrets from module: %v", err)
return
}
// XXX Technically, this is wrong -- _any_ element named in the module can
// refer to a secret. Hmmm.
if secs.Upstream.Secret != "" {
secretRef(r.GetNamespace(), secs.Upstream.Secret, secretNamespacing, action)
}
if secs.Server.Secret != "" {
secretRef(r.GetNamespace(), secs.Server.Secret, secretNamespacing, action)
}
if secs.Client.Secret != "" {
secretRef(r.GetNamespace(), secs.Client.Secret, secretNamespacing, action)
}
case *kates.Ingress:
// Ingress is pretty straightforward, too, just look in spec.tls.
for _, itls := range r.Spec.TLS {
if itls.SecretName != "" {
secretRef(r.GetNamespace(), itls.SecretName, secretNamespacing, action)
}
}
}
}
// Mark a secret as one we reference, handling secretNamespacing correctly.
func secretRef(namespace, name string, secretNamespacing bool, action func(SecretRef)) {
if secretNamespacing {
parts := strings.Split(name, ".")
if len(parts) > 1 {
namespace = parts[len(parts)-1]
name = strings.Join(parts[:len(parts)-1], ".")
}
}
action(SecretRef{namespace, name})
}
// ModuleSecrets is... a hack. It's sort of a mashup of the chunk of the Ambassador
// Module and the chunk of the TLS Module that are common, because they're able to
// specify secrets. However... first, I don't think the TLS Module actually supported
// tls_secret_namespacing. Second, the Ambassador Module at least supports arbitrary
// origination context names -- _any_ key in the TLS dictionary will get turned into
// an origination context.
//
// I seriously doubt that either of these will actually affect anyone at this remove,
// but... yeah.
type ModuleSecrets struct {
Defaults struct {
TLSSecretNamespacing bool `json:"tls_secret_namespacing"`
} `json:"defaults"`
Upstream struct {
Secret string `json:"secret"`
} `json:"upstream"`
Server struct {
Secret string `json:"secret"`
} `json:"server"`
Client struct {
Secret string `json:"secret"`
} `json:"client"`
}
(from AES) D'oh! TLSContexts can have CASecrets, not just Secrets.
package entrypoint
import (
"log"
"strings"
amb "github.com/datawire/ambassador/pkg/api/getambassador.io/v2"
"github.com/datawire/ambassador/pkg/kates"
)
// SecretRef is a secret reference -- basically, a namespace/name pair.
type SecretRef struct {
Namespace string
Name string
}
// ReconcileSecrets figures out which secrets we're actually using,
// since we don't want to send secrets to Ambassador unless we're
// using them, since any secret we send will be saved to disk.
func (s *AmbassadorInputs) ReconcileSecrets() {
// Start by building up a list of all the K8s objects that are
// allowed to mention secrets. Note that we vet the ambassador_id
// for all of these before putting them on the list.
var resources []kates.Object
// Annotations are straightforward, although honestly we should
// be filtering annotations by type here (or, even better, unfold
// them earlier so that we can treat them like any other resource
// here).
for _, a := range s.annotations {
if include(GetAmbId(a)) {
resources = append(resources, a)
}
}
// Hosts are a little weird, because we have two ways to find the
// ambassador_id. Sorry about that.
for _, h := range s.Hosts {
var id amb.AmbassadorID
if len(h.Spec.AmbassadorID) > 0 {
id = h.Spec.AmbassadorID
} else {
id = h.Spec.DeprecatedAmbassadorID
}
if include(id) {
resources = append(resources, h)
}
}
// TLSContexts, Modules, and Ingresses are all straightforward.
for _, t := range s.TLSContexts {
if include(t.Spec.AmbassadorID) {
resources = append(resources, t)
}
}
for _, m := range s.Modules {
if include(m.Spec.AmbassadorID) {
resources = append(resources, m)
}
}
for _, i := range s.Ingresses {
resources = append(resources, i)
}
// OK. Once that's done, we can check to see if we should be
// doing secret namespacing or not -- this requires a look into
// the Ambassador Module, if it's present.
//
// XXX Linear searches suck, but whatever, it's just not gonna
// be all that many things. We won't bother optimizing this unless
// a profiler shows that it's a problem.
secretNamespacing := true
for _, resource := range resources {
mod, ok := resource.(*amb.Module)
// We don't need to recheck ambassador_id on this Module because
// the Module can't have made it into the resources list without
// its ambassador_id being checked.
if ok && mod.GetName() == "ambassador" {
// XXX ModuleSecrets is a _godawful_ hack. See the comment on
// ModuleSecrets itself for more.
secs := ModuleSecrets{}
err := convert(mod.Spec.Config, &secs)
if err != nil {
log.Printf("error parsing module: %v", err)
continue
}
secretNamespacing = secs.Defaults.TLSSecretNamespacing
break
}
}
// Once we have our list of secrets, go figure out the names of all
// the secrets we need. We'll use this "refs" map to hold all the names...
refs := map[SecretRef]bool{}
// ...and, uh, this "action" function is really just a closure to avoid
// needing to pass "refs" to findSecretRefs. Shrug. Arguably more
// complex than needed, but meh.
action := func(ref SecretRef) {
refs[ref] = true
}
// So. Walk the list of resources...
for _, resource := range resources {
// ...and for each resource, dig out any secrets being referenced.
findSecretRefs(resource, secretNamespacing, action)
}
if IsEdgeStack() {
// For Edge Stack, we _always_ have implicit references to the fallback
// cert secret and the license secret.
secretRef(GetAmbassadorNamespace(), "fallback-self-signed-cert", false, action)
secretRef(GetLicenseSecretNamespace(), GetLicenseSecretName(), false, action)
}
// OK! After all that, go copy all the matching secrets from FSSecrets and
// K8sSecrets to Secrets.
//
// The way this works is kind of simple: first we check everything in
// FSSecrets. Then, when we check K8sSecrets, we skip any secrets that are
// also in FSSecrets. End result: FSSecrets wins if there are any conflicts.
s.Secrets = make([]*kates.Secret, 0, len(refs))
for ref, secret := range s.FSSecrets {
if refs[ref] {
log.Printf("Taking FSSecret %#v", ref)
s.Secrets = append(s.Secrets, secret)
}
}
for _, secret := range s.K8sSecrets {
ref := SecretRef{secret.GetNamespace(), secret.GetName()}
_, found := s.FSSecrets[ref]
if found {
log.Printf("Conflict! skipping K8sSecret %#v", ref)
continue
}
if refs[ref] {
log.Printf("Taking K8sSecret %#v", ref)
s.Secrets = append(s.Secrets, secret)
}
}
}
// Should we pay attention to a given AmbassadorID set?
//
// XXX Yes, amb.AmbassadorID is a singular name for a plural type. Sigh.
func include(id amb.AmbassadorID) bool {
// We always pay attention to the "_automatic_" ID -- it gives us a
// to easily always include certain configuration resources for Edge
// Stack.
if len(id) == 1 && id[0] == "_automatic_" {
return true
}
// It's not "_automatic_", so we have to actually do the work. Grab
// our AmbassadorID...
me := GetAmbassadorId()
// ...force an empty AmbassadorID to "default", per the documentation...
if len(id) == 0 {
id = amb.AmbassadorID{"default"}
}
// ...and then see if our AmbassadorID is in the list.
for _, name := range id {
if me == name {
return true
}
}
return false
}
// Find all the secrets a given Ambassador resource references.
func findSecretRefs(resource kates.Object, secretNamespacing bool, action func(SecretRef)) {
switch r := resource.(type) {
case *amb.Host:
// The Host resource is a little odd. Host.spec.tls, Host.spec.tlsSecret, and
// host.spec.acmeProvider.privateKeySecret can all refer to secrets.
if r.Spec == nil {
return
}
if r.Spec.TLS != nil {
// Host.spec.tls.caSecret is the thing to worry about here.
secretRef(r.GetNamespace(), r.Spec.TLS.CASecret, secretNamespacing, action)
}
// Host.spec.tlsSecret and Host.spec.acmeProvider.privateKeySecret are native-Kubernetes-style
// `core.v1.LocalObjectReference`s, not Ambassador-style `{name}.{namespace}` strings. If we
// ever decide that they should support cross-namespace references, we would do it by adding a
// `namespace:` field (i.e. changing them to `core.v1.SecretReference`s) rather than by
// adopting the `{name}.{namespace}` notation.
if r.Spec.TLSSecret != nil && r.Spec.TLSSecret.Name != "" {
secretRef(r.GetNamespace(), r.Spec.TLSSecret.Name, false, action)
}
if r.Spec.AcmeProvider != nil && r.Spec.AcmeProvider.PrivateKeySecret != nil &&
r.Spec.AcmeProvider.PrivateKeySecret.Name != "" {
secretRef(r.GetNamespace(), r.Spec.AcmeProvider.PrivateKeySecret.Name, false, action)
}
case *amb.TLSContext:
// TLSContext.spec.secret and TLSContext.spec.ca_secret are the things to worry about --
// but note well that TLSContexts can override the global secretNamespacing setting.
if r.Spec.Secret != "" {
if r.Spec.SecretNamespacing != nil {
secretNamespacing = *r.Spec.SecretNamespacing
}
secretRef(r.GetNamespace(), r.Spec.Secret, secretNamespacing, action)
}
if r.Spec.CASecret != "" {
if r.Spec.SecretNamespacing != nil {
secretNamespacing = *r.Spec.SecretNamespacing
}
secretRef(r.GetNamespace(), r.Spec.CASecret, secretNamespacing, action)
}
case *amb.Module:
// This whole thing is a hack. We probably _should_ check to make sure that
// this is an Ambassador Module or a TLS Module, but, well, those're the only
// supported kinds now, anyway...
//
// XXX ModuleSecrets is a godawful hack. See its comment for more.
secs := ModuleSecrets{}
err := convert(r.Spec.Config, &secs)
if err != nil {
// XXX
log.Printf("error extracting secrets from module: %v", err)
return
}
// XXX Technically, this is wrong -- _any_ element named in the module can
// refer to a secret. Hmmm.
if secs.Upstream.Secret != "" {
secretRef(r.GetNamespace(), secs.Upstream.Secret, secretNamespacing, action)
}
if secs.Server.Secret != "" {
secretRef(r.GetNamespace(), secs.Server.Secret, secretNamespacing, action)
}
if secs.Client.Secret != "" {
secretRef(r.GetNamespace(), secs.Client.Secret, secretNamespacing, action)
}
case *kates.Ingress:
// Ingress is pretty straightforward, too, just look in spec.tls.
for _, itls := range r.Spec.TLS {
if itls.SecretName != "" {
secretRef(r.GetNamespace(), itls.SecretName, secretNamespacing, action)
}
}
}
}
// Mark a secret as one we reference, handling secretNamespacing correctly.
func secretRef(namespace, name string, secretNamespacing bool, action func(SecretRef)) {
if secretNamespacing {
parts := strings.Split(name, ".")
if len(parts) > 1 {
namespace = parts[len(parts)-1]
name = strings.Join(parts[:len(parts)-1], ".")
}
}
action(SecretRef{namespace, name})
}
// ModuleSecrets is... a hack. It's sort of a mashup of the chunk of the Ambassador
// Module and the chunk of the TLS Module that are common, because they're able to
// specify secrets. However... first, I don't think the TLS Module actually supported
// tls_secret_namespacing. Second, the Ambassador Module at least supports arbitrary
// origination context names -- _any_ key in the TLS dictionary will get turned into
// an origination context.
//
// I seriously doubt that either of these will actually affect anyone at this remove,
// but... yeah.
type ModuleSecrets struct {
Defaults struct {
TLSSecretNamespacing bool `json:"tls_secret_namespacing"`
} `json:"defaults"`
Upstream struct {
Secret string `json:"secret"`
} `json:"upstream"`
Server struct {
Secret string `json:"secret"`
} `json:"server"`
Client struct {
Secret string `json:"secret"`
} `json:"client"`
}
|
package main
import (
"fmt"
"log"
"os"
"os/exec"
"regexp"
"sort"
"strconv"
"strings"
"github.com/go-playground/locales"
"golang.org/x/text/unicode/cldr"
"text/template"
)
const (
locDir = "../%s"
locFilename = locDir + "/%s.go"
)
var (
tfuncs = template.FuncMap{
"is_multibyte": func(s string) bool {
return len([]byte(s)) > 1
},
"reverse_bytes": func(s string) string {
b := make([]byte, 0, 8)
for j := len(s) - 1; j >= 0; j-- {
b = append(b, s[j])
}
return fmt.Sprintf("%#v", b)
},
"byte_count": func(s ...string) string {
var count int
for i := 0; i < len(s); i++ {
count += len([]byte(s[i]))
}
return strconv.Itoa(count)
},
}
prVarFuncs = map[string]string{
"n": "n := math.Abs(num)\n",
"i": "i := int64(n)\n",
// "v": "v := ...", // inherently available as argument
"w": "w := locales.W(n, v)\n",
"f": "f := locales.F(n, v)\n",
"t": "t := locales.T(n, v)\n",
}
translators = make(map[string]*translator)
baseTranslators = make(map[string]*translator)
globalCurrenciesMap = make(map[string]struct{}) // ["USD"] = "$" currency code, just all currencies for mapping to enum
globCurrencyIdxMap = make(map[string]int) // ["USD"] = 0
globalCurrencies = make([]string, 0, 100) // array of currency codes index maps to enum
tmpl *template.Template
nModRegex = regexp.MustCompile("(n%[0-9]+)")
iModRegex = regexp.MustCompile("(i%[0-9]+)")
wModRegex = regexp.MustCompile("(w%[0-9]+)")
fModRegex = regexp.MustCompile("(f%[0-9]+)")
tModRegex = regexp.MustCompile("(t%[0-9]+)")
groupLenRegex = regexp.MustCompile(",([0-9#]+)\\.")
groupLenPercentRegex = regexp.MustCompile(",([0-9#]+)$")
secondaryGroupLenRegex = regexp.MustCompile(",([0-9#]+),")
requiredNumRegex = regexp.MustCompile("([0-9]+)\\.")
requiredDecimalRegex = regexp.MustCompile("\\.([0-9]+)")
)
type translator struct {
Locale string
BaseLocale string
Plurals string
CardinalFunc string
PluralsOrdinal string
OrdinalFunc string
PluralsRange string
RangeFunc string
Decimal string
Group string
Minus string
Percent string
PerMille string
TimeSeparator string
Infinity string
Currencies string
// FmtNumber vars
FmtNumberExists bool
FmtNumberGroupLen int
FmtNumberSecondaryGroupLen int
FmtNumberMinDecimalLen int
// FmtPercent vars
FmtPercentExists bool
FmtPercentGroupLen int
FmtPercentSecondaryGroupLen int
FmtPercentMinDecimalLen int
FmtPercentPrefix string
FmtPercentSuffix string
FmtPercentInPrefix bool
FmtPercentLeft bool
// FmtCurrency vars
FmtCurrencyExists bool
FmtCurrencyGroupLen int
FmtCurrencySecondaryGroupLen int
FmtCurrencyMinDecimalLen int
FmtCurrencyPrefix string
FmtCurrencySuffix string
FmtCurrencyInPrefix bool
FmtCurrencyLeft bool
FmtCurrencyNegativeExists bool
FmtCurrencyNegativePrefix string
FmtCurrencyNegativeSuffix string
FmtCurrencyNegativeInPrefix bool
FmtCurrencyNegativeLeft bool
// Date & Time
FmtCalendarExists bool
FmtMonthsAbbreviated string
FmtMonthsNarrow string
FmtMonthsWide string
FmtDaysAbbreviated string
FmtDaysNarrow string
FmtDaysShort string
FmtDaysWide string
FmtPeriodsAbbreviated string
FmtPeriodsNarrow string
FmtPeriodsShort string
FmtPeriodsWide string
FmtErasAbbreviated string
FmtErasNarrow string
FmtErasWide string
FmtTimezones string
// calculation only fields below this point...
DecimalNumberFormat string
PercentNumberFormat string
CurrencyNumberFormat string
NegativeCurrencyNumberFormat string
// Dates
FmtDateFull string
FmtDateLong string
FmtDateMedium string
FmtDateShort string
// Times
FmtTimeFull string
FmtTimeLong string
FmtTimeMedium string
FmtTimeShort string
// timezones per locale by type
timezones map[string]*zoneAbbrev // key = type eg. America_Eastern zone Abbrev will be long form eg. Eastern Standard Time, Pacific Standard Time.....
}
type zoneAbbrev struct {
standard string
daylight string
}
var timezones = map[string]*zoneAbbrev{} // key = type eg. America_Eastern zone Abbrev eg. EST & EDT
func main() {
var err error
// load template
tmpl, err = template.New("all").Funcs(tfuncs).ParseGlob("*.tmpl")
if err != nil {
log.Fatal(err)
}
// load CLDR recourses
var decoder cldr.Decoder
cldr, err := decoder.DecodePath("data/core")
if err != nil {
panic(err)
}
preProcess(cldr)
postProcess(cldr)
var currencies string
for i, curr := range globalCurrencies {
if i == 0 {
currencies = curr + " Type = iota\n"
continue
}
currencies += curr + "\n"
}
if err = os.MkdirAll(fmt.Sprintf(locDir, "currency"), 0777); err != nil {
log.Fatal(err)
}
filename := fmt.Sprintf(locFilename, "currency", "currency")
output, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
defer output.Close()
if err := tmpl.ExecuteTemplate(output, "currencies", currencies); err != nil {
log.Fatal(err)
}
output.Close()
// after file written run gofmt on file to ensure best formatting
cmd := exec.Command("goimports", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
cmd = exec.Command("gofmt", "-s", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
var locMap string
for _, trans := range translators {
locMap += `"` + trans.Locale + `" : ` + trans.Locale + `.New,
`
fmt.Println("Writing Data:", trans.Locale)
if err = os.MkdirAll(fmt.Sprintf(locDir, trans.Locale), 0777); err != nil {
log.Fatal(err)
}
filename = fmt.Sprintf(locFilename, trans.Locale, trans.Locale)
output, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
defer output.Close()
if err := tmpl.ExecuteTemplate(output, "translator", trans); err != nil {
log.Fatal(err)
}
output.Close()
// after file written run gofmt on file to ensure best formatting
cmd := exec.Command("goimports", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
// this simplifies some syntax that I can;t find an option for in goimports, namely '-s'
cmd = exec.Command("gofmt", "-s", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
filename = fmt.Sprintf(locFilename, trans.Locale, trans.Locale+"_test")
if _, err := os.Stat(filename); err == nil {
fmt.Println("*************** test file exists, skipping:", filename)
continue
}
output, err = os.Create(filename)
if err != nil {
log.Fatal(err)
}
defer output.Close()
if err := tmpl.ExecuteTemplate(output, "tests", trans); err != nil {
log.Fatal(err)
}
output.Close()
// after file written run gofmt on file to ensure best formatting
cmd = exec.Command("goimports", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
// this simplifies some syntax that I can;t find an option for in goimports, namely '-s'
cmd = exec.Command("gofmt", "-s", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
}
}
func ApplyOverrides(trans *translator) {
if trans.BaseLocale == "ru" {
trans.PercentNumberFormat = "#,##0%"
}
}
func postProcess(cldr *cldr.CLDR) {
for _, v := range timezones {
// no DST
if len(v.daylight) == 0 {
v.daylight = v.standard
}
}
var base *translator
var found bool
for _, trans := range translators {
fmt.Println("Post Processing:", trans.Locale)
// cardinal plural rules
trans.CardinalFunc, trans.Plurals = parseCardinalPluralRuleFunc(cldr, trans.BaseLocale)
//ordinal plural rules
trans.OrdinalFunc, trans.PluralsOrdinal = parseOrdinalPluralRuleFunc(cldr, trans.BaseLocale)
// range plural rules
trans.RangeFunc, trans.PluralsRange = parseRangePluralRuleFunc(cldr, trans.BaseLocale)
// ignore base locales
if trans.BaseLocale == trans.Locale {
found = false
} else {
base, found = baseTranslators[trans.BaseLocale]
}
// Numbers
if len(trans.Decimal) == 0 {
if found {
trans.Decimal = base.Decimal
}
if len(trans.Decimal) == 0 {
trans.Decimal = ""
}
}
if len(trans.Group) == 0 {
if found {
trans.Group = base.Group
}
if len(trans.Group) == 0 {
trans.Group = ""
}
}
if len(trans.Minus) == 0 {
if found {
trans.Minus = base.Minus
}
if len(trans.Minus) == 0 {
trans.Minus = ""
}
}
if len(trans.Percent) == 0 {
if found {
trans.Percent = base.Percent
}
if len(trans.Percent) == 0 {
trans.Percent = ""
}
}
if len(trans.PerMille) == 0 {
if found {
trans.PerMille = base.PerMille
}
if len(trans.PerMille) == 0 {
trans.PerMille = ""
}
}
if len(trans.TimeSeparator) == 0 && found {
trans.TimeSeparator = base.TimeSeparator
}
if len(trans.Infinity) == 0 && found {
trans.Infinity = base.Infinity
}
// Currency
// number values
if len(trans.DecimalNumberFormat) == 0 && found {
trans.DecimalNumberFormat = base.DecimalNumberFormat
}
if len(trans.PercentNumberFormat) == 0 && found {
trans.PercentNumberFormat = base.PercentNumberFormat
}
if len(trans.CurrencyNumberFormat) == 0 && found {
trans.CurrencyNumberFormat = base.CurrencyNumberFormat
}
if len(trans.NegativeCurrencyNumberFormat) == 0 && found {
trans.NegativeCurrencyNumberFormat = base.NegativeCurrencyNumberFormat
}
// date values
if len(trans.FmtDateFull) == 0 && found {
trans.FmtDateFull = base.FmtDateFull
}
if len(trans.FmtDateLong) == 0 && found {
trans.FmtDateLong = base.FmtDateLong
}
if len(trans.FmtDateMedium) == 0 && found {
trans.FmtDateMedium = base.FmtDateMedium
}
if len(trans.FmtDateShort) == 0 && found {
trans.FmtDateShort = base.FmtDateShort
}
// time values
if len(trans.FmtTimeFull) == 0 && found {
trans.FmtTimeFull = base.FmtTimeFull
}
if len(trans.FmtTimeLong) == 0 && found {
trans.FmtTimeLong = base.FmtTimeLong
}
if len(trans.FmtTimeMedium) == 0 && found {
trans.FmtTimeMedium = base.FmtTimeMedium
}
if len(trans.FmtTimeShort) == 0 && found {
trans.FmtTimeShort = base.FmtTimeShort
}
// month values
if len(trans.FmtMonthsAbbreviated) == 0 && found {
trans.FmtMonthsAbbreviated = base.FmtMonthsAbbreviated
}
if len(trans.FmtMonthsNarrow) == 0 && found {
trans.FmtMonthsNarrow = base.FmtMonthsNarrow
}
if len(trans.FmtMonthsWide) == 0 && found {
trans.FmtMonthsWide = base.FmtMonthsWide
}
// day values
if len(trans.FmtDaysAbbreviated) == 0 && found {
trans.FmtDaysAbbreviated = base.FmtDaysAbbreviated
}
if len(trans.FmtDaysNarrow) == 0 && found {
trans.FmtDaysNarrow = base.FmtDaysNarrow
}
if len(trans.FmtDaysShort) == 0 && found {
trans.FmtDaysShort = base.FmtDaysShort
}
if len(trans.FmtDaysWide) == 0 && found {
trans.FmtDaysWide = base.FmtDaysWide
}
// period values
if len(trans.FmtPeriodsAbbreviated) == 0 && found {
trans.FmtPeriodsAbbreviated = base.FmtPeriodsAbbreviated
}
if len(trans.FmtPeriodsNarrow) == 0 && found {
trans.FmtPeriodsNarrow = base.FmtPeriodsNarrow
}
if len(trans.FmtPeriodsShort) == 0 && found {
trans.FmtPeriodsShort = base.FmtPeriodsShort
}
if len(trans.FmtPeriodsWide) == 0 && found {
trans.FmtPeriodsWide = base.FmtPeriodsWide
}
// era values
if len(trans.FmtErasAbbreviated) == 0 && found {
trans.FmtErasAbbreviated = base.FmtErasAbbreviated
}
if len(trans.FmtErasNarrow) == 0 && found {
trans.FmtErasNarrow = base.FmtErasNarrow
}
if len(trans.FmtErasWide) == 0 && found {
trans.FmtErasWide = base.FmtErasWide
}
ldml := cldr.RawLDML(trans.Locale)
currencies := make([]string, len(globalCurrencies), len(globalCurrencies))
var kval string
for k, v := range globCurrencyIdxMap {
kval = k
// if kval[:len(kval)-1] != " " {
// kval += " "
// }
currencies[v] = kval
}
// some just have no data...
if ldml.Numbers != nil {
if ldml.Numbers.Currencies != nil {
for _, currency := range ldml.Numbers.Currencies.Currency {
if len(currency.Symbol) == 0 {
continue
}
if len(currency.Symbol[0].Data()) == 0 {
continue
}
if len(currency.Type) == 0 {
continue
}
currencies[globCurrencyIdxMap[currency.Type]] = currency.Symbol[0].Data()
}
}
}
trans.Currencies = fmt.Sprintf("%#v", currencies)
// timezones
if (trans.timezones == nil || len(trans.timezones) == 0) && found {
trans.timezones = base.timezones
}
// make sure all base timezones are part of sub locale timezones
if found {
var ok bool
for k, v := range base.timezones {
if _, ok = trans.timezones[k]; ok {
continue
}
trans.timezones[k] = v
}
}
ApplyOverrides(trans)
parseDecimalNumberFormat(trans)
parsePercentNumberFormat(trans)
parseCurrencyNumberFormat(trans)
}
for _, trans := range translators {
fmt.Println("Final Processing:", trans.Locale)
// if it's still nill.....
if trans.timezones == nil {
trans.timezones = make(map[string]*zoneAbbrev)
}
tz := make(map[string]string) // key = abbrev locale eg. EST, EDT, MST, PST... value = long locale eg. Eastern Standard Time, Pacific Time.....
for k, v := range timezones {
ttz, ok := trans.timezones[k]
if !ok {
ttz = v
trans.timezones[k] = v
}
tz[v.standard] = ttz.standard
tz[v.daylight] = ttz.daylight
}
trans.FmtTimezones = fmt.Sprintf("%#v", tz)
if len(trans.TimeSeparator) == 0 {
trans.TimeSeparator = ":"
}
trans.FmtDateShort, trans.FmtDateMedium, trans.FmtDateLong, trans.FmtDateFull = parseDateFormats(trans, trans.FmtDateShort, trans.FmtDateMedium, trans.FmtDateLong, trans.FmtDateFull)
trans.FmtTimeShort, trans.FmtTimeMedium, trans.FmtTimeLong, trans.FmtTimeFull = parseDateFormats(trans, trans.FmtTimeShort, trans.FmtTimeMedium, trans.FmtTimeLong, trans.FmtTimeFull)
}
}
// preprocesses maps, array etc... just requires multiple passes no choice....
func preProcess(cldrVar *cldr.CLDR) {
for _, l := range cldrVar.Locales() {
fmt.Println("Pre Processing:", l)
split := strings.SplitN(l, "_", 2)
baseLocale := split[0]
trans := &translator{
Locale: l,
BaseLocale: baseLocale,
}
// if is a base locale
if len(split) == 1 {
baseTranslators[baseLocale] = trans
}
translators[l] = trans
// get number, currency and datetime symbols
// number values
ldml := cldrVar.RawLDML(l)
// some just have no data...
if ldml.Numbers != nil {
if len(ldml.Numbers.Symbols) > 0 {
symbol := ldml.Numbers.Symbols[0]
if len(symbol.Decimal) > 0 {
trans.Decimal = symbol.Decimal[0].Data()
}
if len(symbol.Group) > 0 {
trans.Group = symbol.Group[0].Data()
}
if len(symbol.MinusSign) > 0 {
trans.Minus = symbol.MinusSign[0].Data()
}
if len(symbol.PercentSign) > 0 {
trans.Percent = symbol.PercentSign[0].Data()
}
if len(symbol.PerMille) > 0 {
trans.PerMille = symbol.PerMille[0].Data()
}
if len(symbol.TimeSeparator) > 0 {
trans.TimeSeparator = symbol.TimeSeparator[0].Data()
}
if len(symbol.Infinity) > 0 {
trans.Infinity = symbol.Infinity[0].Data()
}
}
if ldml.Numbers.Currencies != nil {
for _, currency := range ldml.Numbers.Currencies.Currency {
if len(strings.TrimSpace(currency.Type)) == 0 {
continue
}
globalCurrenciesMap[currency.Type] = struct{}{}
}
}
if len(ldml.Numbers.DecimalFormats) > 0 && len(ldml.Numbers.DecimalFormats[0].DecimalFormatLength) > 0 {
for _, dfl := range ldml.Numbers.DecimalFormats[0].DecimalFormatLength {
if len(dfl.Type) == 0 {
trans.DecimalNumberFormat = dfl.DecimalFormat[0].Pattern[0].Data()
break
}
}
}
if len(ldml.Numbers.PercentFormats) > 0 && len(ldml.Numbers.PercentFormats[0].PercentFormatLength) > 0 {
for _, dfl := range ldml.Numbers.PercentFormats[0].PercentFormatLength {
if len(dfl.Type) == 0 {
trans.PercentNumberFormat = dfl.PercentFormat[0].Pattern[0].Data()
break
}
}
}
if len(ldml.Numbers.CurrencyFormats) > 0 && len(ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength) > 0 {
if len(ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength[0].CurrencyFormat) > 1 {
split := strings.SplitN(ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength[0].CurrencyFormat[1].Pattern[0].Data(), ";", 2)
trans.CurrencyNumberFormat = split[0]
if len(split) > 1 && len(split[1]) > 0 {
trans.NegativeCurrencyNumberFormat = split[1]
} else {
trans.NegativeCurrencyNumberFormat = trans.CurrencyNumberFormat
}
} else {
trans.CurrencyNumberFormat = ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength[0].CurrencyFormat[0].Pattern[0].Data()
trans.NegativeCurrencyNumberFormat = trans.CurrencyNumberFormat
}
}
}
if ldml.Dates != nil {
if ldml.Dates.TimeZoneNames != nil {
for _, zone := range ldml.Dates.TimeZoneNames.Metazone {
for _, short := range zone.Short {
if len(short.Standard) > 0 {
za, ok := timezones[zone.Type]
if !ok {
za = new(zoneAbbrev)
timezones[zone.Type] = za
}
za.standard = short.Standard[0].Data()
}
if len(short.Daylight) > 0 {
za, ok := timezones[zone.Type]
if !ok {
za = new(zoneAbbrev)
timezones[zone.Type] = za
}
za.daylight = short.Daylight[0].Data()
}
}
for _, long := range zone.Long {
if trans.timezones == nil {
trans.timezones = make(map[string]*zoneAbbrev)
}
if len(long.Standard) > 0 {
za, ok := trans.timezones[zone.Type]
if !ok {
za = new(zoneAbbrev)
trans.timezones[zone.Type] = za
}
za.standard = long.Standard[0].Data()
}
za, ok := trans.timezones[zone.Type]
if !ok {
za = new(zoneAbbrev)
trans.timezones[zone.Type] = za
}
if len(long.Daylight) > 0 {
za.daylight = long.Daylight[0].Data()
} else {
za.daylight = za.standard
}
}
}
}
if ldml.Dates.Calendars != nil {
var calendar *cldr.Calendar
for _, cal := range ldml.Dates.Calendars.Calendar {
if cal.Type == "gregorian" {
calendar = cal
}
}
if calendar != nil {
if calendar.DateFormats != nil {
for _, datefmt := range calendar.DateFormats.DateFormatLength {
switch datefmt.Type {
case "full":
trans.FmtDateFull = datefmt.DateFormat[0].Pattern[0].Data()
case "long":
trans.FmtDateLong = datefmt.DateFormat[0].Pattern[0].Data()
case "medium":
trans.FmtDateMedium = datefmt.DateFormat[0].Pattern[0].Data()
case "short":
trans.FmtDateShort = datefmt.DateFormat[0].Pattern[0].Data()
}
}
}
if calendar.TimeFormats != nil {
for _, datefmt := range calendar.TimeFormats.TimeFormatLength {
switch datefmt.Type {
case "full":
trans.FmtTimeFull = datefmt.TimeFormat[0].Pattern[0].Data()
case "long":
trans.FmtTimeLong = datefmt.TimeFormat[0].Pattern[0].Data()
case "medium":
trans.FmtTimeMedium = datefmt.TimeFormat[0].Pattern[0].Data()
case "short":
trans.FmtTimeShort = datefmt.TimeFormat[0].Pattern[0].Data()
}
}
}
if calendar.Months != nil {
// month context starts at 'format', but there is also has 'stand-alone'
// I'm making the decision to use the 'stand-alone' if, and only if,
// the value does not exist in the 'format' month context
var abbrSet, narrSet, wideSet bool
for _, monthctx := range calendar.Months.MonthContext {
for _, months := range monthctx.MonthWidth {
var monthData []string
for _, m := range months.Month {
if len(m.Data()) == 0 {
continue
}
switch m.Type {
case "1":
monthData = append(monthData, m.Data())
case "2":
monthData = append(monthData, m.Data())
case "3":
monthData = append(monthData, m.Data())
case "4":
monthData = append(monthData, m.Data())
case "5":
monthData = append(monthData, m.Data())
case "6":
monthData = append(monthData, m.Data())
case "7":
monthData = append(monthData, m.Data())
case "8":
monthData = append(monthData, m.Data())
case "9":
monthData = append(monthData, m.Data())
case "10":
monthData = append(monthData, m.Data())
case "11":
monthData = append(monthData, m.Data())
case "12":
monthData = append(monthData, m.Data())
}
}
if len(monthData) > 0 {
// making array indexes line up with month values
// so I'll have an extra empty value, it's way faster
// than a switch over all type values...
monthData = append(make([]string, 1, len(monthData)+1), monthData...)
switch months.Type {
case "abbreviated":
if !abbrSet {
abbrSet = true
trans.FmtMonthsAbbreviated = fmt.Sprintf("%#v", monthData)
}
case "narrow":
if !narrSet {
narrSet = true
trans.FmtMonthsNarrow = fmt.Sprintf("%#v", monthData)
}
case "wide":
if !wideSet {
wideSet = true
trans.FmtMonthsWide = fmt.Sprintf("%#v", monthData)
}
}
}
}
}
}
if calendar.Days != nil {
// day context starts at 'format', but there is also has 'stand-alone'
// I'm making the decision to use the 'stand-alone' if, and only if,
// the value does not exist in the 'format' day context
var abbrSet, narrSet, shortSet, wideSet bool
for _, dayctx := range calendar.Days.DayContext {
for _, days := range dayctx.DayWidth {
var dayData []string
for _, d := range days.Day {
switch d.Type {
case "sun":
dayData = append(dayData, d.Data())
case "mon":
dayData = append(dayData, d.Data())
case "tue":
dayData = append(dayData, d.Data())
case "wed":
dayData = append(dayData, d.Data())
case "thu":
dayData = append(dayData, d.Data())
case "fri":
dayData = append(dayData, d.Data())
case "sat":
dayData = append(dayData, d.Data())
}
}
if len(dayData) > 0 {
switch days.Type {
case "abbreviated":
if !abbrSet {
abbrSet = true
trans.FmtDaysAbbreviated = fmt.Sprintf("%#v", dayData)
}
case "narrow":
if !narrSet {
narrSet = true
trans.FmtDaysNarrow = fmt.Sprintf("%#v", dayData)
}
case "short":
if !shortSet {
shortSet = true
trans.FmtDaysShort = fmt.Sprintf("%#v", dayData)
}
case "wide":
if !wideSet {
wideSet = true
trans.FmtDaysWide = fmt.Sprintf("%#v", dayData)
}
}
}
}
}
}
if calendar.DayPeriods != nil {
// day periods context starts at 'format', but there is also has 'stand-alone'
// I'm making the decision to use the 'stand-alone' if, and only if,
// the value does not exist in the 'format' day period context
var abbrSet, narrSet, shortSet, wideSet bool
for _, ctx := range calendar.DayPeriods.DayPeriodContext {
for _, width := range ctx.DayPeriodWidth {
// [0] = AM
// [0] = PM
ampm := make([]string, 2, 2)
for _, d := range width.DayPeriod {
if d.Type == "am" {
ampm[0] = d.Data()
continue
}
if d.Type == "pm" {
ampm[1] = d.Data()
}
}
switch width.Type {
case "abbreviated":
if !abbrSet {
abbrSet = true
trans.FmtPeriodsAbbreviated = fmt.Sprintf("%#v", ampm)
}
case "narrow":
if !narrSet {
narrSet = true
trans.FmtPeriodsNarrow = fmt.Sprintf("%#v", ampm)
}
case "short":
if !shortSet {
shortSet = true
trans.FmtPeriodsShort = fmt.Sprintf("%#v", ampm)
}
case "wide":
if !wideSet {
wideSet = true
trans.FmtPeriodsWide = fmt.Sprintf("%#v", ampm)
}
}
}
}
}
if calendar.Eras != nil {
// [0] = BC
// [0] = AD
abbrev := make([]string, 2, 2)
narr := make([]string, 2, 2)
wide := make([]string, 2, 2)
if calendar.Eras.EraAbbr != nil {
if len(calendar.Eras.EraAbbr.Era) == 4 {
abbrev[0] = calendar.Eras.EraAbbr.Era[0].Data()
abbrev[1] = calendar.Eras.EraAbbr.Era[2].Data()
} else if len(calendar.Eras.EraAbbr.Era) == 2 {
abbrev[0] = calendar.Eras.EraAbbr.Era[0].Data()
abbrev[1] = calendar.Eras.EraAbbr.Era[1].Data()
}
}
if calendar.Eras.EraNarrow != nil {
if len(calendar.Eras.EraNarrow.Era) == 4 {
narr[0] = calendar.Eras.EraNarrow.Era[0].Data()
narr[1] = calendar.Eras.EraNarrow.Era[2].Data()
} else if len(calendar.Eras.EraNarrow.Era) == 2 {
narr[0] = calendar.Eras.EraNarrow.Era[0].Data()
narr[1] = calendar.Eras.EraNarrow.Era[1].Data()
}
}
if calendar.Eras.EraNames != nil {
if len(calendar.Eras.EraNames.Era) == 4 {
wide[0] = calendar.Eras.EraNames.Era[0].Data()
wide[1] = calendar.Eras.EraNames.Era[2].Data()
} else if len(calendar.Eras.EraNames.Era) == 2 {
wide[0] = calendar.Eras.EraNames.Era[0].Data()
wide[1] = calendar.Eras.EraNames.Era[1].Data()
}
}
trans.FmtErasAbbreviated = fmt.Sprintf("%#v", abbrev)
trans.FmtErasNarrow = fmt.Sprintf("%#v", narr)
trans.FmtErasWide = fmt.Sprintf("%#v", wide)
}
}
}
}
}
for k := range globalCurrenciesMap {
globalCurrencies = append(globalCurrencies, k)
}
sort.Strings(globalCurrencies)
for i, loc := range globalCurrencies {
globCurrencyIdxMap[loc] = i
}
}
func parseDateFormats(trans *translator, shortFormat, mediumFormat, longFormat, fullFormat string) (short, medium, long, full string) {
// Short Date Parsing
short = parseDateTimeFormat(trans.BaseLocale, shortFormat, 2)
medium = parseDateTimeFormat(trans.BaseLocale, mediumFormat, 2)
long = parseDateTimeFormat(trans.BaseLocale, longFormat, 1)
full = parseDateTimeFormat(trans.BaseLocale, fullFormat, 0)
// End Short Data Parsing
return
}
func parseDateTimeFormat(baseLocale, format string, eraScore uint8) (results string) {
// rules:
// y = four digit year
// yy = two digit year
// var b []byte
var inConstantText bool
var start int
for i := 0; i < len(format); i++ {
switch format[i] {
// time separator
case ':':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
results += "b = append(b, " + baseLocale + ".timeSeparator...)"
case '\'':
i++
startI := i
// peek to see if ''
if len(format) != i && format[i] == '\'' {
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i-1])) + "...)\n"
} else {
inConstantText = true
start = i
}
continue
}
// not '' so whatever comes between '' is constant
if len(format) != i {
// advance i to the next single quote + 1
for ; i < len(format); i++ {
if format[i] == '\'' {
if inConstantText {
inConstantText = false
b := []byte(format[start : startI-1])
b = append(b, []byte(format[startI:i])...)
results += "b = append(b, " + fmt.Sprintf("%#v", b) + "...)\n"
} else {
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[startI:i])) + "...)\n"
}
break
}
}
}
// 24 hour
case 'H':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit hour required?
if len(format) != i+1 && format[i+1] == 'H' {
i++
results += `
if t.Hour() < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(t.Hour()), 10)\n"
// hour
case 'h':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
results += `
h := t.Hour()
if h > 12 {
h -= 12
}
`
// peek
// two digit hour required?
if len(format) != i+1 && format[i+1] == 'h' {
i++
results += `
if h < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(h), 10)\n"
// minute
case 'm':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit minute required?
if len(format) != i+1 && format[i+1] == 'm' {
i++
results += `
if t.Minute() < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(t.Minute()), 10)\n"
// second
case 's':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit minute required?
if len(format) != i+1 && format[i+1] == 's' {
i++
results += `
if t.Second() < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(t.Second()), 10)\n"
// day period
case 'a':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// only used with 'h', patterns should not contains 'a' without 'h' so not checking
// choosing to use abbreviated, didn't see any rules about which should be used with which
// date format....
results += `
if t.Hour() < 12 {
b = append(b, ` + baseLocale + `.periodsAbbreviated[0]...)
} else {
b = append(b, ` + baseLocale + `.periodsAbbreviated[1]...)
}
`
// timezone
case 'z', 'v':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// consume multiple, only handling Abbrev tz from time.Time for the moment...
var count int
if format[i] == 'z' {
for j := i; j < len(format); j++ {
if format[j] == 'z' {
count++
} else {
break
}
}
}
if format[i] == 'v' {
for j := i; j < len(format); j++ {
if format[j] == 'v' {
count++
} else {
break
}
}
}
i += count - 1
// using the timezone on the Go time object, eg. EST, EDT, MST.....
if count < 4 {
results += `
tz, _ := t.Zone()
b = append(b, tz...)
`
} else {
results += `
tz, _ := t.Zone()
if btz, ok := ` + baseLocale + `.timezones[tz]; ok {
b = append(b, btz...)
} else {
b = append(b, tz...)
}
`
}
// day
case 'd':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit day required?
if len(format) != i+1 && format[i+1] == 'd' {
i++
results += `
if t.Day() < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(t.Day()), 10)\n"
// month
case 'M':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
var count int
// count # of M's
for j := i; j < len(format); j++ {
if format[j] == 'M' {
count++
} else {
break
}
}
switch count {
// Numeric form, at least 1 digit
case 1:
results += "b = strconv.AppendInt(b, int64(t.Month()), 10)\n"
// Number form, at least 2 digits (padding with 0)
case 2:
results += `
if t.Month() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Month()), 10)
`
// Abbreviated form
case 3:
results += "b = append(b, " + baseLocale + ".monthsAbbreviated[t.Month()]...)\n"
// Full/Wide form
case 4:
results += "b = append(b, " + baseLocale + ".monthsWide[t.Month()]...)\n"
// Narrow form - only used in where context makes it clear, such as headers in a calendar.
// Should be one character wherever possible.
case 5:
results += "b = append(b, " + baseLocale + ".monthsNarrow[t.Month()]...)\n"
}
// skip over M's
i += count - 1
// year
case 'y':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit year
if len(format) != i+1 && format[i+1] == 'y' {
i++
results += `
if t.Year() > 9 {
b = append(b, strconv.Itoa(t.Year())[2:]...)
} else {
b = append(b, strconv.Itoa(t.Year())[1:]...)
}
`
} else {
// four digit year
results += "b = strconv.AppendInt(b, int64(t.Year()), 10)\n"
}
// weekday
// I know I only see 'EEEE' in the xml, but just in case handled all posibilities
case 'E':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
var count int
// count # of E's
for j := i; j < len(format); j++ {
if format[j] == 'E' {
count++
} else {
break
}
}
switch count {
// Narrow
case 1:
results += "b = append(b, " + baseLocale + ".daysNarrow[t.Weekday()]...)\n"
// Short
case 2:
results += "b = append(b, " + baseLocale + ".daysShort[t.Weekday()]...)\n"
// Abbreviated
case 3:
results += "b = append(b, " + baseLocale + ".daysAbbreviated[t.Weekday()]...)\n"
// Full/Wide
case 4:
results += "b = append(b, " + baseLocale + ".daysWide[t.Weekday()]...)\n"
}
// skip over E's
i += count - 1
// era eg. AD, BC
case 'G':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
switch eraScore {
case 0:
results += `
if t.Year() < 0 {
b = append(b, ` + baseLocale + `.erasWide[0]...)
} else {
b = append(b, ` + baseLocale + `.erasWide[1]...)
}
`
case 1, 2:
results += `
if t.Year() < 0 {
b = append(b, ` + baseLocale + `.erasAbbreviated[0]...)
} else {
b = append(b, ` + baseLocale + `.erasAbbreviated[1]...)
}
`
}
default:
// append all non matched text as they are constants
if !inConstantText {
inConstantText = true
start = i
}
}
}
// if we were inConstantText when the string ended, add what's left.
if inConstantText {
// inContantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:])) + "...)\n"
}
return
}
func parseCurrencyNumberFormat(trans *translator) {
if len(trans.CurrencyNumberFormat) == 0 {
return
}
trans.FmtCurrencyExists = true
negativeEqual := trans.CurrencyNumberFormat == trans.NegativeCurrencyNumberFormat
match := groupLenRegex.FindString(trans.CurrencyNumberFormat)
if len(match) > 0 {
trans.FmtCurrencyGroupLen = len(match) - 2
}
match = requiredDecimalRegex.FindString(trans.CurrencyNumberFormat)
if len(match) > 0 {
trans.FmtCurrencyMinDecimalLen = len(match) - 1
}
match = secondaryGroupLenRegex.FindString(trans.CurrencyNumberFormat)
if len(match) > 0 {
trans.FmtCurrencySecondaryGroupLen = len(match) - 2
}
idx := 0
for idx = 0; idx < len(trans.CurrencyNumberFormat); idx++ {
if trans.CurrencyNumberFormat[idx] == '#' || trans.CurrencyNumberFormat[idx] == '0' {
trans.FmtCurrencyPrefix = trans.CurrencyNumberFormat[:idx]
break
}
}
for idx = len(trans.CurrencyNumberFormat) - 1; idx >= 0; idx-- {
if trans.CurrencyNumberFormat[idx] == '#' || trans.CurrencyNumberFormat[idx] == '0' {
idx++
trans.FmtCurrencySuffix = trans.CurrencyNumberFormat[idx:]
break
}
}
for idx = 0; idx < len(trans.FmtCurrencyPrefix); idx++ {
if trans.FmtCurrencyPrefix[idx] == '¤' {
trans.FmtCurrencyInPrefix = true
trans.FmtCurrencyPrefix = strings.Replace(trans.FmtCurrencyPrefix, string(trans.FmtCurrencyPrefix[idx]), "", 1)
if idx == 0 {
trans.FmtCurrencyLeft = true
} else {
trans.FmtCurrencyLeft = false
}
break
}
}
for idx = 0; idx < len(trans.FmtCurrencySuffix); idx++ {
if trans.FmtCurrencySuffix[idx] == '¤' {
trans.FmtCurrencyInPrefix = false
trans.FmtCurrencySuffix = strings.Replace(trans.FmtCurrencySuffix, string(trans.FmtCurrencySuffix[idx]), "", 1)
if idx == 0 {
trans.FmtCurrencyLeft = true
} else {
trans.FmtCurrencyLeft = false
}
break
}
}
// if len(trans.FmtCurrencyPrefix) > 0 {
// trans.FmtCurrencyPrefix = fmt.Sprintf("%#v", []byte(trans.FmtCurrencyPrefix))
// }
// if len(trans.FmtCurrencySuffix) > 0 {
// trans.FmtCurrencySuffix = fmt.Sprintf("%#v", []byte(trans.FmtCurrencySuffix))
// }
// no need to parse again if true....
if negativeEqual {
trans.FmtCurrencyNegativePrefix = trans.FmtCurrencyPrefix
trans.FmtCurrencyNegativeSuffix = trans.FmtCurrencySuffix
trans.FmtCurrencyNegativeInPrefix = trans.FmtCurrencyInPrefix
trans.FmtCurrencyNegativeLeft = trans.FmtCurrencyLeft
return
}
trans.FmtCurrencyNegativeExists = true
for idx = 0; idx < len(trans.NegativeCurrencyNumberFormat); idx++ {
if trans.NegativeCurrencyNumberFormat[idx] == '#' || trans.NegativeCurrencyNumberFormat[idx] == '0' {
trans.FmtCurrencyNegativePrefix = trans.NegativeCurrencyNumberFormat[:idx]
break
}
}
for idx = len(trans.NegativeCurrencyNumberFormat) - 1; idx >= 0; idx-- {
if trans.NegativeCurrencyNumberFormat[idx] == '#' || trans.NegativeCurrencyNumberFormat[idx] == '0' {
idx++
trans.FmtCurrencyNegativeSuffix = trans.NegativeCurrencyNumberFormat[idx:]
break
}
}
for idx = 0; idx < len(trans.FmtCurrencyNegativePrefix); idx++ {
if trans.FmtCurrencyNegativePrefix[idx] == '¤' {
trans.FmtCurrencyNegativeInPrefix = true
trans.FmtCurrencyNegativePrefix = strings.Replace(trans.FmtCurrencyNegativePrefix, string(trans.FmtCurrencyNegativePrefix[idx]), "", 1)
if idx == 0 {
trans.FmtCurrencyNegativeLeft = true
} else {
trans.FmtCurrencyNegativeLeft = false
}
break
}
}
for idx = 0; idx < len(trans.FmtCurrencyNegativeSuffix); idx++ {
if trans.FmtCurrencyNegativeSuffix[idx] == '¤' {
trans.FmtCurrencyNegativeInPrefix = false
trans.FmtCurrencyNegativeSuffix = strings.Replace(trans.FmtCurrencyNegativeSuffix, string(trans.FmtCurrencyNegativeSuffix[idx]), "", 1)
if idx == 0 {
trans.FmtCurrencyNegativeLeft = true
} else {
trans.FmtCurrencyNegativeLeft = false
}
break
}
}
// if len(trans.FmtCurrencyNegativePrefix) > 0 {
// trans.FmtCurrencyNegativePrefix = fmt.Sprintf("%#v", []byte(trans.FmtCurrencyNegativePrefix))
// }
// if len(trans.FmtCurrencyNegativeSuffix) > 0 {
// trans.FmtCurrencyNegativeSuffix = fmt.Sprintf("%#v", []byte(trans.FmtCurrencyNegativeSuffix))
// }
return
}
func parsePercentNumberFormat(trans *translator) {
if len(trans.PercentNumberFormat) == 0 {
return
}
trans.FmtPercentExists = true
match := groupLenPercentRegex.FindString(trans.PercentNumberFormat)
if len(match) > 0 {
trans.FmtPercentGroupLen = len(match) - 1
}
match = requiredDecimalRegex.FindString(trans.PercentNumberFormat)
if len(match) > 0 {
trans.FmtPercentMinDecimalLen = len(match) - 1
}
match = secondaryGroupLenRegex.FindString(trans.PercentNumberFormat)
if len(match) > 0 {
trans.FmtPercentSecondaryGroupLen = len(match) - 2
}
idx := 0
for idx = 0; idx < len(trans.PercentNumberFormat); idx++ {
if trans.PercentNumberFormat[idx] == '#' || trans.PercentNumberFormat[idx] == '0' {
trans.FmtPercentPrefix = trans.PercentNumberFormat[:idx]
break
}
}
for idx = len(trans.PercentNumberFormat) - 1; idx >= 0; idx-- {
if trans.PercentNumberFormat[idx] == '#' || trans.PercentNumberFormat[idx] == '0' {
idx++
trans.FmtPercentSuffix = trans.PercentNumberFormat[idx:]
break
}
}
for idx = 0; idx < len(trans.FmtPercentPrefix); idx++ {
if trans.FmtPercentPrefix[idx] == '%' {
trans.FmtPercentInPrefix = true
trans.FmtPercentPrefix = strings.Replace(trans.FmtPercentPrefix, string(trans.FmtPercentPrefix[idx]), "", 1)
if idx == 0 {
trans.FmtPercentLeft = true
} else {
trans.FmtPercentLeft = false
}
break
}
}
for idx = 0; idx < len(trans.FmtPercentSuffix); idx++ {
if trans.FmtPercentSuffix[idx] == '%' {
trans.FmtPercentInPrefix = false
trans.FmtPercentSuffix = strings.Replace(trans.FmtPercentSuffix, string(trans.FmtPercentSuffix[idx]), "", 1)
if idx == 0 {
trans.FmtPercentLeft = true
} else {
trans.FmtPercentLeft = false
}
break
}
}
// if len(trans.FmtPercentPrefix) > 0 {
// trans.FmtPercentPrefix = fmt.Sprintf("%#v", []byte(trans.FmtPercentPrefix))
// }
// if len(trans.FmtPercentSuffix) > 0 {
// trans.FmtPercentSuffix = fmt.Sprintf("%#v", []byte(trans.FmtPercentSuffix))
// }
return
}
func parseDecimalNumberFormat(trans *translator) {
if len(trans.DecimalNumberFormat) == 0 {
return
}
trans.FmtNumberExists = true
formats := strings.SplitN(trans.DecimalNumberFormat, ";", 2)
match := groupLenRegex.FindString(formats[0])
if len(match) > 0 {
trans.FmtNumberGroupLen = len(match) - 2
}
match = requiredDecimalRegex.FindString(formats[0])
if len(match) > 0 {
trans.FmtNumberMinDecimalLen = len(match) - 1
}
match = secondaryGroupLenRegex.FindString(formats[0])
if len(match) > 0 {
trans.FmtNumberSecondaryGroupLen = len(match) - 2
}
return
}
type sortRank struct {
Rank uint8
Value string
}
type ByRank []sortRank
func (a ByRank) Len() int { return len(a) }
func (a ByRank) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRank) Less(i, j int) bool { return a[i].Rank < a[j].Rank }
type ByPluralRule []locales.PluralRule
func (a ByPluralRule) Len() int { return len(a) }
func (a ByPluralRule) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByPluralRule) Less(i, j int) bool { return a[i] < a[j] }
// TODO: refine generated code a bit, some combinations end up with same plural rule,
// could check all at once; but it works and that's step 1 complete
func parseRangePluralRuleFunc(current *cldr.CLDR, baseLocale string) (results string, plurals string) {
var pluralRange *struct {
cldr.Common
Locales string `xml:"locales,attr"`
PluralRange []*struct {
cldr.Common
Start string `xml:"start,attr"`
End string `xml:"end,attr"`
Result string `xml:"result,attr"`
} `xml:"pluralRange"`
}
var pluralArr []locales.PluralRule
for _, pr := range current.Supplemental().Plurals[1].PluralRanges {
locs := strings.Split(pr.Locales, " ")
for _, loc := range locs {
if loc == baseLocale {
pluralRange = pr
}
}
}
// no range plural rules for locale
if pluralRange == nil {
plurals = "nil"
results = "return locales.PluralRuleUnknown"
return
}
mp := make(map[string]struct{})
// pre-process if all the same
for _, rule := range pluralRange.PluralRange {
mp[rule.Result] = struct{}{}
}
for k := range mp {
psI := pluralStringToInt(k)
pluralArr = append(pluralArr, psI)
}
if len(mp) == 1 {
results += "return locales." + pluralStringToString(pluralRange.PluralRange[0].Result)
plurals = fmt.Sprintf("%#v", pluralArr)
return
}
multiple := len(pluralRange.PluralRange) > 1
if multiple {
results += "start := " + baseLocale + ".CardinalPluralRule(num1, v1)\n"
results += "end := " + baseLocale + ".CardinalPluralRule(num2, v2)\n\n"
}
first := true
// pre parse for variables
for i, rule := range pluralRange.PluralRange {
if i == len(pluralRange.PluralRange)-1 {
if multiple {
results += "\n\n"
}
results += "return locales." + pluralStringToString(rule.Result)
continue
}
if first {
results += "if"
first = false
} else {
results += "else if"
}
results += " start == locales." + pluralStringToString(rule.Start) + " && end == locales." + pluralStringToString(rule.End) + " {\n return locales." + pluralStringToString(rule.Result) + "\n} "
}
if multiple {
results = "\n" + results + "\n"
}
if len(pluralArr) == 0 {
plurals = "nil"
} else {
ints := make([]int, len(pluralArr))
for i := 0; i < len(pluralArr); i++ {
ints[i] = int(pluralArr[i])
}
sort.Ints(ints)
for i := 0; i < len(ints); i++ {
pluralArr[i] = locales.PluralRule(ints[i])
}
plurals = fmt.Sprintf("%#v", pluralArr)
}
return
}
// TODO: cleanup function logic perhaps write a lexer... but it's working right now, and
// I'm already farther down the rabbit hole than I'd like and so pulling the chute here.
func parseOrdinalPluralRuleFunc(current *cldr.CLDR, baseLocale string) (results string, plurals string) {
var prOrdinal *struct {
cldr.Common
Locales string "xml:\"locales,attr\""
PluralRule []*struct {
cldr.Common
Count string "xml:\"count,attr\""
} "xml:\"pluralRule\""
}
var pluralArr []locales.PluralRule
// idx 0 is ordinal rules
for _, pr := range current.Supplemental().Plurals[0].PluralRules {
locs := strings.Split(pr.Locales, " ")
for _, loc := range locs {
if loc == baseLocale {
prOrdinal = pr
// for _, pl := range pr.PluralRule {
// fmt.Println(pl.Count, pl.Common.Data())
// }
}
}
}
// no plural rules for locale
if prOrdinal == nil {
plurals = "nil"
results = "return locales.PluralRuleUnknown"
return
}
vals := make(map[string]struct{})
first := true
// pre parse for variables
for _, rule := range prOrdinal.PluralRule {
ps1 := pluralStringToString(rule.Count)
psI := pluralStringToInt(rule.Count)
pluralArr = append(pluralArr, psI)
data := strings.Replace(strings.Replace(strings.Replace(strings.TrimSpace(strings.SplitN(rule.Common.Data(), "@", 2)[0]), " = ", " == ", -1), " or ", " || ", -1), " and ", " && ", -1)
if len(data) == 0 {
if len(prOrdinal.PluralRule) == 1 {
results = "return locales." + ps1
} else {
results += "\n\nreturn locales." + ps1
// results += "else {\nreturn locales." + locales.PluralStringToString(rule.Count) + ", nil\n}"
}
continue
}
// // All need n, so always add
// if strings.Contains(data, "n") {
// vals[prVarFuncs["n"]] = struct{}{}
// }
if strings.Contains(data, "i") {
vals[prVarFuncs["i"]] = struct{}{}
}
// v is inherently avaialable as an argument
// if strings.Contains(data, "v") {
// vals[prVarFuncs["v"]] = struct{}{}
// }
if strings.Contains(data, "w") {
vals[prVarFuncs["w"]] = struct{}{}
}
if strings.Contains(data, "f") {
vals[prVarFuncs["f"]] = struct{}{}
}
if strings.Contains(data, "t") {
vals[prVarFuncs["t"]] = struct{}{}
}
if first {
results += "if "
first = false
} else {
results += "else if "
}
stmt := ""
// real work here
//
// split by 'or' then by 'and' allowing to better
// determine bracketing for formula
ors := strings.Split(data, "||")
for _, or := range ors {
stmt += "("
ands := strings.Split(strings.TrimSpace(or), "&&")
for _, and := range ands {
inArg := false
pre := ""
lft := ""
preOperator := ""
args := strings.Split(strings.TrimSpace(and), " ")
for _, a := range args {
if inArg {
// check to see if is a value range 2..9
multiRange := strings.Count(a, "..") > 1
cargs := strings.Split(strings.TrimSpace(a), ",")
hasBracket := len(cargs) > 1
bracketAdded := false
lastWasRange := false
for _, carg := range cargs {
if rng := strings.Split(carg, ".."); len(rng) > 1 {
if multiRange {
pre += " ("
} else {
pre += " "
}
switch preOperator {
case "==":
pre += lft + " >= " + rng[0] + " && " + lft + "<=" + rng[1]
case "!=":
pre += "(" + lft + " < " + rng[0] + " || " + lft + " > " + rng[1] + ")"
}
if multiRange {
pre += ") || "
} else {
pre += " || "
}
lastWasRange = true
continue
}
if lastWasRange {
pre = strings.TrimRight(pre, " || ") + " && "
}
lastWasRange = false
if hasBracket && !bracketAdded {
pre += "("
bracketAdded = true
}
// single comma separated values
switch preOperator {
case "==":
pre += " " + lft + preOperator + carg + " || "
case "!=":
pre += " " + lft + preOperator + carg + " && "
}
}
pre = strings.TrimRight(pre, " || ")
pre = strings.TrimRight(pre, " && ")
pre = strings.TrimRight(pre, " || ")
if hasBracket && bracketAdded {
pre += ")"
}
continue
}
if strings.Contains(a, "=") || a == ">" || a == "<" {
inArg = true
preOperator = a
continue
}
lft += a
}
stmt += pre + " && "
}
stmt = strings.TrimRight(stmt, " && ") + ") || "
}
stmt = strings.TrimRight(stmt, " || ")
results += stmt
results += " {\n"
// return plural rule here
results += "return locales." + ps1 + "\n"
results += "}"
}
pre := "\n"
// always needed
vals[prVarFuncs["n"]] = struct{}{}
sorted := make([]sortRank, 0, len(vals))
for k := range vals {
switch k[:1] {
case "n":
sorted = append(sorted, sortRank{
Value: prVarFuncs["n"],
Rank: 1,
})
case "i":
sorted = append(sorted, sortRank{
Value: prVarFuncs["i"],
Rank: 2,
})
case "w":
sorted = append(sorted, sortRank{
Value: prVarFuncs["w"],
Rank: 3,
})
case "f":
sorted = append(sorted, sortRank{
Value: prVarFuncs["f"],
Rank: 4,
})
case "t":
sorted = append(sorted, sortRank{
Value: prVarFuncs["t"],
Rank: 5,
})
}
}
sort.Sort(ByRank(sorted))
for _, k := range sorted {
pre += k.Value
}
if len(results) == 0 {
results = "return locales.PluralRuleUnknown"
} else {
if !strings.HasPrefix(results, "return") {
results = manyToSingleVars(results)
// pre += "\n"
results = pre + results
}
}
if len(pluralArr) == 0 {
plurals = "nil"
} else {
plurals = fmt.Sprintf("%#v", pluralArr)
}
return
}
// TODO: cleanup function logic perhaps write a lexer... but it's working right now, and
// I'm already farther down the rabbit hole than I'd like and so pulling the chute here.
func parseCardinalPluralRuleFunc(current *cldr.CLDR, baseLocale string) (results string, plurals string) {
var prCardinal *struct {
cldr.Common
Locales string "xml:\"locales,attr\""
PluralRule []*struct {
cldr.Common
Count string "xml:\"count,attr\""
} "xml:\"pluralRule\""
}
var pluralArr []locales.PluralRule
// idx 2 is cardinal rules
for _, pr := range current.Supplemental().Plurals[2].PluralRules {
locs := strings.Split(pr.Locales, " ")
for _, loc := range locs {
if loc == baseLocale {
prCardinal = pr
}
}
}
// no plural rules for locale
if prCardinal == nil {
plurals = "nil"
results = "return locales.PluralRuleUnknown"
return
}
vals := make(map[string]struct{})
first := true
// pre parse for variables
for _, rule := range prCardinal.PluralRule {
ps1 := pluralStringToString(rule.Count)
psI := pluralStringToInt(rule.Count)
pluralArr = append(pluralArr, psI)
data := strings.Replace(strings.Replace(strings.Replace(strings.TrimSpace(strings.SplitN(rule.Common.Data(), "@", 2)[0]), " = ", " == ", -1), " or ", " || ", -1), " and ", " && ", -1)
if len(data) == 0 {
if len(prCardinal.PluralRule) == 1 {
results = "return locales." + ps1
} else {
results += "\n\nreturn locales." + ps1
// results += "else {\nreturn locales." + locales.PluralStringToString(rule.Count) + ", nil\n}"
}
continue
}
// // All need n, so always add
// if strings.Contains(data, "n") {
// vals[prVarFuncs["n"]] = struct{}{}
// }
if strings.Contains(data, "i") {
vals[prVarFuncs["i"]] = struct{}{}
}
// v is inherently avaialable as an argument
// if strings.Contains(data, "v") {
// vals[prVarFuncs["v"]] = struct{}{}
// }
if strings.Contains(data, "w") {
vals[prVarFuncs["w"]] = struct{}{}
}
if strings.Contains(data, "f") {
vals[prVarFuncs["f"]] = struct{}{}
}
if strings.Contains(data, "t") {
vals[prVarFuncs["t"]] = struct{}{}
}
if first {
results += "if "
first = false
} else {
results += "else if "
}
stmt := ""
// real work here
//
// split by 'or' then by 'and' allowing to better
// determine bracketing for formula
ors := strings.Split(data, "||")
for _, or := range ors {
stmt += "("
ands := strings.Split(strings.TrimSpace(or), "&&")
for _, and := range ands {
inArg := false
pre := ""
lft := ""
preOperator := ""
args := strings.Split(strings.TrimSpace(and), " ")
for _, a := range args {
if inArg {
// check to see if is a value range 2..9
multiRange := strings.Count(a, "..") > 1
cargs := strings.Split(strings.TrimSpace(a), ",")
hasBracket := len(cargs) > 1
bracketAdded := false
lastWasRange := false
for _, carg := range cargs {
if rng := strings.Split(carg, ".."); len(rng) > 1 {
if multiRange {
pre += " ("
} else {
pre += " "
}
switch preOperator {
case "==":
pre += lft + " >= " + rng[0] + " && " + lft + "<=" + rng[1]
case "!=":
pre += "(" + lft + " < " + rng[0] + " || " + lft + " > " + rng[1] + ")"
}
if multiRange {
pre += ") || "
} else {
pre += " || "
}
lastWasRange = true
continue
}
if lastWasRange {
pre = strings.TrimRight(pre, " || ") + " && "
}
lastWasRange = false
if hasBracket && !bracketAdded {
pre += "("
bracketAdded = true
}
// single comma separated values
switch preOperator {
case "==":
pre += " " + lft + preOperator + carg + " || "
case "!=":
pre += " " + lft + preOperator + carg + " && "
}
}
pre = strings.TrimRight(pre, " || ")
pre = strings.TrimRight(pre, " && ")
pre = strings.TrimRight(pre, " || ")
if hasBracket && bracketAdded {
pre += ")"
}
continue
}
if strings.Contains(a, "=") || a == ">" || a == "<" {
inArg = true
preOperator = a
continue
}
lft += a
}
stmt += pre + " && "
}
stmt = strings.TrimRight(stmt, " && ") + ") || "
}
stmt = strings.TrimRight(stmt, " || ")
results += stmt
results += " {\n"
// return plural rule here
results += "return locales." + ps1 + "\n"
results += "}"
}
pre := "\n"
// always needed
vals[prVarFuncs["n"]] = struct{}{}
sorted := make([]sortRank, 0, len(vals))
for k := range vals {
switch k[:1] {
case "n":
sorted = append(sorted, sortRank{
Value: prVarFuncs["n"],
Rank: 1,
})
case "i":
sorted = append(sorted, sortRank{
Value: prVarFuncs["i"],
Rank: 2,
})
case "w":
sorted = append(sorted, sortRank{
Value: prVarFuncs["w"],
Rank: 3,
})
case "f":
sorted = append(sorted, sortRank{
Value: prVarFuncs["f"],
Rank: 4,
})
case "t":
sorted = append(sorted, sortRank{
Value: prVarFuncs["t"],
Rank: 5,
})
}
}
sort.Sort(ByRank(sorted))
for _, k := range sorted {
pre += k.Value
}
if len(results) == 0 {
results = "return locales.PluralRuleUnknown"
} else {
if !strings.HasPrefix(results, "return") {
results = manyToSingleVars(results)
// pre += "\n"
results = pre + results
}
}
if len(pluralArr) == 0 {
plurals = "nil"
} else {
plurals = fmt.Sprintf("%#v", pluralArr)
}
return
}
func manyToSingleVars(input string) (results string) {
matches := nModRegex.FindAllString(input, -1)
mp := make(map[string][]string) // map of formula to variable
var found bool
var split []string
var variable string
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], "math.Mod(" + split[0] + ", " + split[1] + ")"}
}
for k, v := range mp {
variable = "nMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
matches = iModRegex.FindAllString(input, -1)
mp = make(map[string][]string) // map of formula to variable
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], formula}
}
for k, v := range mp {
variable = "iMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
matches = wModRegex.FindAllString(input, -1)
mp = make(map[string][]string) // map of formula to variable
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], formula}
}
for k, v := range mp {
variable = "wMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
matches = fModRegex.FindAllString(input, -1)
mp = make(map[string][]string) // map of formula to variable
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], formula}
}
for k, v := range mp {
variable = "fMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
matches = tModRegex.FindAllString(input, -1)
mp = make(map[string][]string) // map of formula to variable
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], formula}
}
for k, v := range mp {
variable = "tMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
results = results + "\n" + input
return
}
// pluralStringToInt returns the enum value of 'plural' provided
func pluralStringToInt(plural string) locales.PluralRule {
switch plural {
case "zero":
return locales.PluralRuleZero
case "one":
return locales.PluralRuleOne
case "two":
return locales.PluralRuleTwo
case "few":
return locales.PluralRuleFew
case "many":
return locales.PluralRuleMany
case "other":
return locales.PluralRuleOther
default:
return locales.PluralRuleUnknown
}
}
func pluralStringToString(pr string) string {
pr = strings.TrimSpace(pr)
switch pr {
case "zero":
return "PluralRuleZero"
case "one":
return "PluralRuleOne"
case "two":
return "PluralRuleTwo"
case "few":
return "PluralRuleFew"
case "many":
return "PluralRuleMany"
case "other":
return "PluralRuleOther"
default:
return "PluralRuleUnknown"
}
}
Get default numbering system for symbols.
Should resolve #5
package main
import (
"fmt"
"log"
"os"
"os/exec"
"regexp"
"sort"
"strconv"
"strings"
"github.com/go-playground/locales"
"golang.org/x/text/unicode/cldr"
"text/template"
)
const (
locDir = "../%s"
locFilename = locDir + "/%s.go"
)
var (
tfuncs = template.FuncMap{
"is_multibyte": func(s string) bool {
return len([]byte(s)) > 1
},
"reverse_bytes": func(s string) string {
b := make([]byte, 0, 8)
for j := len(s) - 1; j >= 0; j-- {
b = append(b, s[j])
}
return fmt.Sprintf("%#v", b)
},
"byte_count": func(s ...string) string {
var count int
for i := 0; i < len(s); i++ {
count += len([]byte(s[i]))
}
return strconv.Itoa(count)
},
}
prVarFuncs = map[string]string{
"n": "n := math.Abs(num)\n",
"i": "i := int64(n)\n",
// "v": "v := ...", // inherently available as argument
"w": "w := locales.W(n, v)\n",
"f": "f := locales.F(n, v)\n",
"t": "t := locales.T(n, v)\n",
}
translators = make(map[string]*translator)
baseTranslators = make(map[string]*translator)
globalCurrenciesMap = make(map[string]struct{}) // ["USD"] = "$" currency code, just all currencies for mapping to enum
globCurrencyIdxMap = make(map[string]int) // ["USD"] = 0
globalCurrencies = make([]string, 0, 100) // array of currency codes index maps to enum
tmpl *template.Template
nModRegex = regexp.MustCompile("(n%[0-9]+)")
iModRegex = regexp.MustCompile("(i%[0-9]+)")
wModRegex = regexp.MustCompile("(w%[0-9]+)")
fModRegex = regexp.MustCompile("(f%[0-9]+)")
tModRegex = regexp.MustCompile("(t%[0-9]+)")
groupLenRegex = regexp.MustCompile(",([0-9#]+)\\.")
groupLenPercentRegex = regexp.MustCompile(",([0-9#]+)$")
secondaryGroupLenRegex = regexp.MustCompile(",([0-9#]+),")
requiredNumRegex = regexp.MustCompile("([0-9]+)\\.")
requiredDecimalRegex = regexp.MustCompile("\\.([0-9]+)")
)
type translator struct {
Locale string
BaseLocale string
Plurals string
CardinalFunc string
PluralsOrdinal string
OrdinalFunc string
PluralsRange string
RangeFunc string
Decimal string
Group string
Minus string
Percent string
PerMille string
TimeSeparator string
Infinity string
Currencies string
// FmtNumber vars
FmtNumberExists bool
FmtNumberGroupLen int
FmtNumberSecondaryGroupLen int
FmtNumberMinDecimalLen int
// FmtPercent vars
FmtPercentExists bool
FmtPercentGroupLen int
FmtPercentSecondaryGroupLen int
FmtPercentMinDecimalLen int
FmtPercentPrefix string
FmtPercentSuffix string
FmtPercentInPrefix bool
FmtPercentLeft bool
// FmtCurrency vars
FmtCurrencyExists bool
FmtCurrencyGroupLen int
FmtCurrencySecondaryGroupLen int
FmtCurrencyMinDecimalLen int
FmtCurrencyPrefix string
FmtCurrencySuffix string
FmtCurrencyInPrefix bool
FmtCurrencyLeft bool
FmtCurrencyNegativeExists bool
FmtCurrencyNegativePrefix string
FmtCurrencyNegativeSuffix string
FmtCurrencyNegativeInPrefix bool
FmtCurrencyNegativeLeft bool
// Date & Time
FmtCalendarExists bool
FmtMonthsAbbreviated string
FmtMonthsNarrow string
FmtMonthsWide string
FmtDaysAbbreviated string
FmtDaysNarrow string
FmtDaysShort string
FmtDaysWide string
FmtPeriodsAbbreviated string
FmtPeriodsNarrow string
FmtPeriodsShort string
FmtPeriodsWide string
FmtErasAbbreviated string
FmtErasNarrow string
FmtErasWide string
FmtTimezones string
// calculation only fields below this point...
DecimalNumberFormat string
PercentNumberFormat string
CurrencyNumberFormat string
NegativeCurrencyNumberFormat string
// Dates
FmtDateFull string
FmtDateLong string
FmtDateMedium string
FmtDateShort string
// Times
FmtTimeFull string
FmtTimeLong string
FmtTimeMedium string
FmtTimeShort string
// timezones per locale by type
timezones map[string]*zoneAbbrev // key = type eg. America_Eastern zone Abbrev will be long form eg. Eastern Standard Time, Pacific Standard Time.....
}
type zoneAbbrev struct {
standard string
daylight string
}
var timezones = map[string]*zoneAbbrev{} // key = type eg. America_Eastern zone Abbrev eg. EST & EDT
func main() {
var err error
// load template
tmpl, err = template.New("all").Funcs(tfuncs).ParseGlob("*.tmpl")
if err != nil {
log.Fatal(err)
}
// load CLDR recourses
var decoder cldr.Decoder
cldr, err := decoder.DecodePath("data/core")
if err != nil {
panic(err)
}
preProcess(cldr)
postProcess(cldr)
var currencies string
for i, curr := range globalCurrencies {
if i == 0 {
currencies = curr + " Type = iota\n"
continue
}
currencies += curr + "\n"
}
if err = os.MkdirAll(fmt.Sprintf(locDir, "currency"), 0777); err != nil {
log.Fatal(err)
}
filename := fmt.Sprintf(locFilename, "currency", "currency")
output, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
defer output.Close()
if err := tmpl.ExecuteTemplate(output, "currencies", currencies); err != nil {
log.Fatal(err)
}
output.Close()
// after file written run gofmt on file to ensure best formatting
cmd := exec.Command("goimports", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
cmd = exec.Command("gofmt", "-s", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
var locMap string
for _, trans := range translators {
locMap += `"` + trans.Locale + `" : ` + trans.Locale + `.New,
`
fmt.Println("Writing Data:", trans.Locale)
if err = os.MkdirAll(fmt.Sprintf(locDir, trans.Locale), 0777); err != nil {
log.Fatal(err)
}
filename = fmt.Sprintf(locFilename, trans.Locale, trans.Locale)
output, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
defer output.Close()
if err := tmpl.ExecuteTemplate(output, "translator", trans); err != nil {
log.Fatal(err)
}
output.Close()
// after file written run gofmt on file to ensure best formatting
cmd := exec.Command("goimports", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
// this simplifies some syntax that I can;t find an option for in goimports, namely '-s'
cmd = exec.Command("gofmt", "-s", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
filename = fmt.Sprintf(locFilename, trans.Locale, trans.Locale+"_test")
if _, err := os.Stat(filename); err == nil {
fmt.Println("*************** test file exists, skipping:", filename)
continue
}
output, err = os.Create(filename)
if err != nil {
log.Fatal(err)
}
defer output.Close()
if err := tmpl.ExecuteTemplate(output, "tests", trans); err != nil {
log.Fatal(err)
}
output.Close()
// after file written run gofmt on file to ensure best formatting
cmd = exec.Command("goimports", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
// this simplifies some syntax that I can;t find an option for in goimports, namely '-s'
cmd = exec.Command("gofmt", "-s", "-w", filename)
if err = cmd.Run(); err != nil {
log.Panic(err)
}
}
}
func ApplyOverrides(trans *translator) {
if trans.BaseLocale == "ru" {
trans.PercentNumberFormat = "#,##0%"
}
}
func postProcess(cldr *cldr.CLDR) {
for _, v := range timezones {
// no DST
if len(v.daylight) == 0 {
v.daylight = v.standard
}
}
var base *translator
var found bool
for _, trans := range translators {
fmt.Println("Post Processing:", trans.Locale)
// cardinal plural rules
trans.CardinalFunc, trans.Plurals = parseCardinalPluralRuleFunc(cldr, trans.BaseLocale)
//ordinal plural rules
trans.OrdinalFunc, trans.PluralsOrdinal = parseOrdinalPluralRuleFunc(cldr, trans.BaseLocale)
// range plural rules
trans.RangeFunc, trans.PluralsRange = parseRangePluralRuleFunc(cldr, trans.BaseLocale)
// ignore base locales
if trans.BaseLocale == trans.Locale {
found = false
} else {
base, found = baseTranslators[trans.BaseLocale]
}
// Numbers
if len(trans.Decimal) == 0 {
if found {
trans.Decimal = base.Decimal
}
if len(trans.Decimal) == 0 {
trans.Decimal = ""
}
}
if len(trans.Group) == 0 {
if found {
trans.Group = base.Group
}
if len(trans.Group) == 0 {
trans.Group = ""
}
}
if len(trans.Minus) == 0 {
if found {
trans.Minus = base.Minus
}
if len(trans.Minus) == 0 {
trans.Minus = ""
}
}
if len(trans.Percent) == 0 {
if found {
trans.Percent = base.Percent
}
if len(trans.Percent) == 0 {
trans.Percent = ""
}
}
if len(trans.PerMille) == 0 {
if found {
trans.PerMille = base.PerMille
}
if len(trans.PerMille) == 0 {
trans.PerMille = ""
}
}
if len(trans.TimeSeparator) == 0 && found {
trans.TimeSeparator = base.TimeSeparator
}
if len(trans.Infinity) == 0 && found {
trans.Infinity = base.Infinity
}
// Currency
// number values
if len(trans.DecimalNumberFormat) == 0 && found {
trans.DecimalNumberFormat = base.DecimalNumberFormat
}
if len(trans.PercentNumberFormat) == 0 && found {
trans.PercentNumberFormat = base.PercentNumberFormat
}
if len(trans.CurrencyNumberFormat) == 0 && found {
trans.CurrencyNumberFormat = base.CurrencyNumberFormat
}
if len(trans.NegativeCurrencyNumberFormat) == 0 && found {
trans.NegativeCurrencyNumberFormat = base.NegativeCurrencyNumberFormat
}
// date values
if len(trans.FmtDateFull) == 0 && found {
trans.FmtDateFull = base.FmtDateFull
}
if len(trans.FmtDateLong) == 0 && found {
trans.FmtDateLong = base.FmtDateLong
}
if len(trans.FmtDateMedium) == 0 && found {
trans.FmtDateMedium = base.FmtDateMedium
}
if len(trans.FmtDateShort) == 0 && found {
trans.FmtDateShort = base.FmtDateShort
}
// time values
if len(trans.FmtTimeFull) == 0 && found {
trans.FmtTimeFull = base.FmtTimeFull
}
if len(trans.FmtTimeLong) == 0 && found {
trans.FmtTimeLong = base.FmtTimeLong
}
if len(trans.FmtTimeMedium) == 0 && found {
trans.FmtTimeMedium = base.FmtTimeMedium
}
if len(trans.FmtTimeShort) == 0 && found {
trans.FmtTimeShort = base.FmtTimeShort
}
// month values
if len(trans.FmtMonthsAbbreviated) == 0 && found {
trans.FmtMonthsAbbreviated = base.FmtMonthsAbbreviated
}
if len(trans.FmtMonthsNarrow) == 0 && found {
trans.FmtMonthsNarrow = base.FmtMonthsNarrow
}
if len(trans.FmtMonthsWide) == 0 && found {
trans.FmtMonthsWide = base.FmtMonthsWide
}
// day values
if len(trans.FmtDaysAbbreviated) == 0 && found {
trans.FmtDaysAbbreviated = base.FmtDaysAbbreviated
}
if len(trans.FmtDaysNarrow) == 0 && found {
trans.FmtDaysNarrow = base.FmtDaysNarrow
}
if len(trans.FmtDaysShort) == 0 && found {
trans.FmtDaysShort = base.FmtDaysShort
}
if len(trans.FmtDaysWide) == 0 && found {
trans.FmtDaysWide = base.FmtDaysWide
}
// period values
if len(trans.FmtPeriodsAbbreviated) == 0 && found {
trans.FmtPeriodsAbbreviated = base.FmtPeriodsAbbreviated
}
if len(trans.FmtPeriodsNarrow) == 0 && found {
trans.FmtPeriodsNarrow = base.FmtPeriodsNarrow
}
if len(trans.FmtPeriodsShort) == 0 && found {
trans.FmtPeriodsShort = base.FmtPeriodsShort
}
if len(trans.FmtPeriodsWide) == 0 && found {
trans.FmtPeriodsWide = base.FmtPeriodsWide
}
// era values
if len(trans.FmtErasAbbreviated) == 0 && found {
trans.FmtErasAbbreviated = base.FmtErasAbbreviated
}
if len(trans.FmtErasNarrow) == 0 && found {
trans.FmtErasNarrow = base.FmtErasNarrow
}
if len(trans.FmtErasWide) == 0 && found {
trans.FmtErasWide = base.FmtErasWide
}
ldml := cldr.RawLDML(trans.Locale)
currencies := make([]string, len(globalCurrencies), len(globalCurrencies))
var kval string
for k, v := range globCurrencyIdxMap {
kval = k
// if kval[:len(kval)-1] != " " {
// kval += " "
// }
currencies[v] = kval
}
// some just have no data...
if ldml.Numbers != nil {
if ldml.Numbers.Currencies != nil {
for _, currency := range ldml.Numbers.Currencies.Currency {
if len(currency.Symbol) == 0 {
continue
}
if len(currency.Symbol[0].Data()) == 0 {
continue
}
if len(currency.Type) == 0 {
continue
}
currencies[globCurrencyIdxMap[currency.Type]] = currency.Symbol[0].Data()
}
}
}
trans.Currencies = fmt.Sprintf("%#v", currencies)
// timezones
if (trans.timezones == nil || len(trans.timezones) == 0) && found {
trans.timezones = base.timezones
}
// make sure all base timezones are part of sub locale timezones
if found {
var ok bool
for k, v := range base.timezones {
if _, ok = trans.timezones[k]; ok {
continue
}
trans.timezones[k] = v
}
}
ApplyOverrides(trans)
parseDecimalNumberFormat(trans)
parsePercentNumberFormat(trans)
parseCurrencyNumberFormat(trans)
}
for _, trans := range translators {
fmt.Println("Final Processing:", trans.Locale)
// if it's still nill.....
if trans.timezones == nil {
trans.timezones = make(map[string]*zoneAbbrev)
}
tz := make(map[string]string) // key = abbrev locale eg. EST, EDT, MST, PST... value = long locale eg. Eastern Standard Time, Pacific Time.....
for k, v := range timezones {
ttz, ok := trans.timezones[k]
if !ok {
ttz = v
trans.timezones[k] = v
}
tz[v.standard] = ttz.standard
tz[v.daylight] = ttz.daylight
}
trans.FmtTimezones = fmt.Sprintf("%#v", tz)
if len(trans.TimeSeparator) == 0 {
trans.TimeSeparator = ":"
}
trans.FmtDateShort, trans.FmtDateMedium, trans.FmtDateLong, trans.FmtDateFull = parseDateFormats(trans, trans.FmtDateShort, trans.FmtDateMedium, trans.FmtDateLong, trans.FmtDateFull)
trans.FmtTimeShort, trans.FmtTimeMedium, trans.FmtTimeLong, trans.FmtTimeFull = parseDateFormats(trans, trans.FmtTimeShort, trans.FmtTimeMedium, trans.FmtTimeLong, trans.FmtTimeFull)
}
}
// preprocesses maps, array etc... just requires multiple passes no choice....
func preProcess(cldrVar *cldr.CLDR) {
for _, l := range cldrVar.Locales() {
fmt.Println("Pre Processing:", l)
split := strings.SplitN(l, "_", 2)
baseLocale := split[0]
trans := &translator{
Locale: l,
BaseLocale: baseLocale,
}
// if is a base locale
if len(split) == 1 {
baseTranslators[baseLocale] = trans
}
translators[l] = trans
// get number, currency and datetime symbols
// number values
ldml := cldrVar.RawLDML(l)
// some just have no data...
if ldml.Numbers != nil {
if len(ldml.Numbers.Symbols) > 0 {
symbol := ldml.Numbers.Symbols[0]
// Try to get the default numbering system instead of the first one
systems := ldml.Numbers.DefaultNumberingSystem
// There shouldn't really be more than one DefaultNumberingSystem
if len(systems) > 0 {
if dns := systems[0].Data(); dns != "" {
for k := range ldml.Numbers.Symbols {
if ldml.Numbers.Symbols[k].NumberSystem == dns {
symbol = ldml.Numbers.Symbols[k]
break
}
}
}
}
if len(symbol.Decimal) > 0 {
trans.Decimal = symbol.Decimal[0].Data()
}
if len(symbol.Group) > 0 {
trans.Group = symbol.Group[0].Data()
}
if len(symbol.MinusSign) > 0 {
trans.Minus = symbol.MinusSign[0].Data()
}
if len(symbol.PercentSign) > 0 {
trans.Percent = symbol.PercentSign[0].Data()
}
if len(symbol.PerMille) > 0 {
trans.PerMille = symbol.PerMille[0].Data()
}
if len(symbol.TimeSeparator) > 0 {
trans.TimeSeparator = symbol.TimeSeparator[0].Data()
}
if len(symbol.Infinity) > 0 {
trans.Infinity = symbol.Infinity[0].Data()
}
}
if ldml.Numbers.Currencies != nil {
for _, currency := range ldml.Numbers.Currencies.Currency {
if len(strings.TrimSpace(currency.Type)) == 0 {
continue
}
globalCurrenciesMap[currency.Type] = struct{}{}
}
}
if len(ldml.Numbers.DecimalFormats) > 0 && len(ldml.Numbers.DecimalFormats[0].DecimalFormatLength) > 0 {
for _, dfl := range ldml.Numbers.DecimalFormats[0].DecimalFormatLength {
if len(dfl.Type) == 0 {
trans.DecimalNumberFormat = dfl.DecimalFormat[0].Pattern[0].Data()
break
}
}
}
if len(ldml.Numbers.PercentFormats) > 0 && len(ldml.Numbers.PercentFormats[0].PercentFormatLength) > 0 {
for _, dfl := range ldml.Numbers.PercentFormats[0].PercentFormatLength {
if len(dfl.Type) == 0 {
trans.PercentNumberFormat = dfl.PercentFormat[0].Pattern[0].Data()
break
}
}
}
if len(ldml.Numbers.CurrencyFormats) > 0 && len(ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength) > 0 {
if len(ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength[0].CurrencyFormat) > 1 {
split := strings.SplitN(ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength[0].CurrencyFormat[1].Pattern[0].Data(), ";", 2)
trans.CurrencyNumberFormat = split[0]
if len(split) > 1 && len(split[1]) > 0 {
trans.NegativeCurrencyNumberFormat = split[1]
} else {
trans.NegativeCurrencyNumberFormat = trans.CurrencyNumberFormat
}
} else {
trans.CurrencyNumberFormat = ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength[0].CurrencyFormat[0].Pattern[0].Data()
trans.NegativeCurrencyNumberFormat = trans.CurrencyNumberFormat
}
}
}
if ldml.Dates != nil {
if ldml.Dates.TimeZoneNames != nil {
for _, zone := range ldml.Dates.TimeZoneNames.Metazone {
for _, short := range zone.Short {
if len(short.Standard) > 0 {
za, ok := timezones[zone.Type]
if !ok {
za = new(zoneAbbrev)
timezones[zone.Type] = za
}
za.standard = short.Standard[0].Data()
}
if len(short.Daylight) > 0 {
za, ok := timezones[zone.Type]
if !ok {
za = new(zoneAbbrev)
timezones[zone.Type] = za
}
za.daylight = short.Daylight[0].Data()
}
}
for _, long := range zone.Long {
if trans.timezones == nil {
trans.timezones = make(map[string]*zoneAbbrev)
}
if len(long.Standard) > 0 {
za, ok := trans.timezones[zone.Type]
if !ok {
za = new(zoneAbbrev)
trans.timezones[zone.Type] = za
}
za.standard = long.Standard[0].Data()
}
za, ok := trans.timezones[zone.Type]
if !ok {
za = new(zoneAbbrev)
trans.timezones[zone.Type] = za
}
if len(long.Daylight) > 0 {
za.daylight = long.Daylight[0].Data()
} else {
za.daylight = za.standard
}
}
}
}
if ldml.Dates.Calendars != nil {
var calendar *cldr.Calendar
for _, cal := range ldml.Dates.Calendars.Calendar {
if cal.Type == "gregorian" {
calendar = cal
}
}
if calendar != nil {
if calendar.DateFormats != nil {
for _, datefmt := range calendar.DateFormats.DateFormatLength {
switch datefmt.Type {
case "full":
trans.FmtDateFull = datefmt.DateFormat[0].Pattern[0].Data()
case "long":
trans.FmtDateLong = datefmt.DateFormat[0].Pattern[0].Data()
case "medium":
trans.FmtDateMedium = datefmt.DateFormat[0].Pattern[0].Data()
case "short":
trans.FmtDateShort = datefmt.DateFormat[0].Pattern[0].Data()
}
}
}
if calendar.TimeFormats != nil {
for _, datefmt := range calendar.TimeFormats.TimeFormatLength {
switch datefmt.Type {
case "full":
trans.FmtTimeFull = datefmt.TimeFormat[0].Pattern[0].Data()
case "long":
trans.FmtTimeLong = datefmt.TimeFormat[0].Pattern[0].Data()
case "medium":
trans.FmtTimeMedium = datefmt.TimeFormat[0].Pattern[0].Data()
case "short":
trans.FmtTimeShort = datefmt.TimeFormat[0].Pattern[0].Data()
}
}
}
if calendar.Months != nil {
// month context starts at 'format', but there is also has 'stand-alone'
// I'm making the decision to use the 'stand-alone' if, and only if,
// the value does not exist in the 'format' month context
var abbrSet, narrSet, wideSet bool
for _, monthctx := range calendar.Months.MonthContext {
for _, months := range monthctx.MonthWidth {
var monthData []string
for _, m := range months.Month {
if len(m.Data()) == 0 {
continue
}
switch m.Type {
case "1":
monthData = append(monthData, m.Data())
case "2":
monthData = append(monthData, m.Data())
case "3":
monthData = append(monthData, m.Data())
case "4":
monthData = append(monthData, m.Data())
case "5":
monthData = append(monthData, m.Data())
case "6":
monthData = append(monthData, m.Data())
case "7":
monthData = append(monthData, m.Data())
case "8":
monthData = append(monthData, m.Data())
case "9":
monthData = append(monthData, m.Data())
case "10":
monthData = append(monthData, m.Data())
case "11":
monthData = append(monthData, m.Data())
case "12":
monthData = append(monthData, m.Data())
}
}
if len(monthData) > 0 {
// making array indexes line up with month values
// so I'll have an extra empty value, it's way faster
// than a switch over all type values...
monthData = append(make([]string, 1, len(monthData)+1), monthData...)
switch months.Type {
case "abbreviated":
if !abbrSet {
abbrSet = true
trans.FmtMonthsAbbreviated = fmt.Sprintf("%#v", monthData)
}
case "narrow":
if !narrSet {
narrSet = true
trans.FmtMonthsNarrow = fmt.Sprintf("%#v", monthData)
}
case "wide":
if !wideSet {
wideSet = true
trans.FmtMonthsWide = fmt.Sprintf("%#v", monthData)
}
}
}
}
}
}
if calendar.Days != nil {
// day context starts at 'format', but there is also has 'stand-alone'
// I'm making the decision to use the 'stand-alone' if, and only if,
// the value does not exist in the 'format' day context
var abbrSet, narrSet, shortSet, wideSet bool
for _, dayctx := range calendar.Days.DayContext {
for _, days := range dayctx.DayWidth {
var dayData []string
for _, d := range days.Day {
switch d.Type {
case "sun":
dayData = append(dayData, d.Data())
case "mon":
dayData = append(dayData, d.Data())
case "tue":
dayData = append(dayData, d.Data())
case "wed":
dayData = append(dayData, d.Data())
case "thu":
dayData = append(dayData, d.Data())
case "fri":
dayData = append(dayData, d.Data())
case "sat":
dayData = append(dayData, d.Data())
}
}
if len(dayData) > 0 {
switch days.Type {
case "abbreviated":
if !abbrSet {
abbrSet = true
trans.FmtDaysAbbreviated = fmt.Sprintf("%#v", dayData)
}
case "narrow":
if !narrSet {
narrSet = true
trans.FmtDaysNarrow = fmt.Sprintf("%#v", dayData)
}
case "short":
if !shortSet {
shortSet = true
trans.FmtDaysShort = fmt.Sprintf("%#v", dayData)
}
case "wide":
if !wideSet {
wideSet = true
trans.FmtDaysWide = fmt.Sprintf("%#v", dayData)
}
}
}
}
}
}
if calendar.DayPeriods != nil {
// day periods context starts at 'format', but there is also has 'stand-alone'
// I'm making the decision to use the 'stand-alone' if, and only if,
// the value does not exist in the 'format' day period context
var abbrSet, narrSet, shortSet, wideSet bool
for _, ctx := range calendar.DayPeriods.DayPeriodContext {
for _, width := range ctx.DayPeriodWidth {
// [0] = AM
// [0] = PM
ampm := make([]string, 2, 2)
for _, d := range width.DayPeriod {
if d.Type == "am" {
ampm[0] = d.Data()
continue
}
if d.Type == "pm" {
ampm[1] = d.Data()
}
}
switch width.Type {
case "abbreviated":
if !abbrSet {
abbrSet = true
trans.FmtPeriodsAbbreviated = fmt.Sprintf("%#v", ampm)
}
case "narrow":
if !narrSet {
narrSet = true
trans.FmtPeriodsNarrow = fmt.Sprintf("%#v", ampm)
}
case "short":
if !shortSet {
shortSet = true
trans.FmtPeriodsShort = fmt.Sprintf("%#v", ampm)
}
case "wide":
if !wideSet {
wideSet = true
trans.FmtPeriodsWide = fmt.Sprintf("%#v", ampm)
}
}
}
}
}
if calendar.Eras != nil {
// [0] = BC
// [0] = AD
abbrev := make([]string, 2, 2)
narr := make([]string, 2, 2)
wide := make([]string, 2, 2)
if calendar.Eras.EraAbbr != nil {
if len(calendar.Eras.EraAbbr.Era) == 4 {
abbrev[0] = calendar.Eras.EraAbbr.Era[0].Data()
abbrev[1] = calendar.Eras.EraAbbr.Era[2].Data()
} else if len(calendar.Eras.EraAbbr.Era) == 2 {
abbrev[0] = calendar.Eras.EraAbbr.Era[0].Data()
abbrev[1] = calendar.Eras.EraAbbr.Era[1].Data()
}
}
if calendar.Eras.EraNarrow != nil {
if len(calendar.Eras.EraNarrow.Era) == 4 {
narr[0] = calendar.Eras.EraNarrow.Era[0].Data()
narr[1] = calendar.Eras.EraNarrow.Era[2].Data()
} else if len(calendar.Eras.EraNarrow.Era) == 2 {
narr[0] = calendar.Eras.EraNarrow.Era[0].Data()
narr[1] = calendar.Eras.EraNarrow.Era[1].Data()
}
}
if calendar.Eras.EraNames != nil {
if len(calendar.Eras.EraNames.Era) == 4 {
wide[0] = calendar.Eras.EraNames.Era[0].Data()
wide[1] = calendar.Eras.EraNames.Era[2].Data()
} else if len(calendar.Eras.EraNames.Era) == 2 {
wide[0] = calendar.Eras.EraNames.Era[0].Data()
wide[1] = calendar.Eras.EraNames.Era[1].Data()
}
}
trans.FmtErasAbbreviated = fmt.Sprintf("%#v", abbrev)
trans.FmtErasNarrow = fmt.Sprintf("%#v", narr)
trans.FmtErasWide = fmt.Sprintf("%#v", wide)
}
}
}
}
}
for k := range globalCurrenciesMap {
globalCurrencies = append(globalCurrencies, k)
}
sort.Strings(globalCurrencies)
for i, loc := range globalCurrencies {
globCurrencyIdxMap[loc] = i
}
}
func parseDateFormats(trans *translator, shortFormat, mediumFormat, longFormat, fullFormat string) (short, medium, long, full string) {
// Short Date Parsing
short = parseDateTimeFormat(trans.BaseLocale, shortFormat, 2)
medium = parseDateTimeFormat(trans.BaseLocale, mediumFormat, 2)
long = parseDateTimeFormat(trans.BaseLocale, longFormat, 1)
full = parseDateTimeFormat(trans.BaseLocale, fullFormat, 0)
// End Short Data Parsing
return
}
func parseDateTimeFormat(baseLocale, format string, eraScore uint8) (results string) {
// rules:
// y = four digit year
// yy = two digit year
// var b []byte
var inConstantText bool
var start int
for i := 0; i < len(format); i++ {
switch format[i] {
// time separator
case ':':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
results += "b = append(b, " + baseLocale + ".timeSeparator...)"
case '\'':
i++
startI := i
// peek to see if ''
if len(format) != i && format[i] == '\'' {
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i-1])) + "...)\n"
} else {
inConstantText = true
start = i
}
continue
}
// not '' so whatever comes between '' is constant
if len(format) != i {
// advance i to the next single quote + 1
for ; i < len(format); i++ {
if format[i] == '\'' {
if inConstantText {
inConstantText = false
b := []byte(format[start : startI-1])
b = append(b, []byte(format[startI:i])...)
results += "b = append(b, " + fmt.Sprintf("%#v", b) + "...)\n"
} else {
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[startI:i])) + "...)\n"
}
break
}
}
}
// 24 hour
case 'H':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit hour required?
if len(format) != i+1 && format[i+1] == 'H' {
i++
results += `
if t.Hour() < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(t.Hour()), 10)\n"
// hour
case 'h':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
results += `
h := t.Hour()
if h > 12 {
h -= 12
}
`
// peek
// two digit hour required?
if len(format) != i+1 && format[i+1] == 'h' {
i++
results += `
if h < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(h), 10)\n"
// minute
case 'm':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit minute required?
if len(format) != i+1 && format[i+1] == 'm' {
i++
results += `
if t.Minute() < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(t.Minute()), 10)\n"
// second
case 's':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit minute required?
if len(format) != i+1 && format[i+1] == 's' {
i++
results += `
if t.Second() < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(t.Second()), 10)\n"
// day period
case 'a':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// only used with 'h', patterns should not contains 'a' without 'h' so not checking
// choosing to use abbreviated, didn't see any rules about which should be used with which
// date format....
results += `
if t.Hour() < 12 {
b = append(b, ` + baseLocale + `.periodsAbbreviated[0]...)
} else {
b = append(b, ` + baseLocale + `.periodsAbbreviated[1]...)
}
`
// timezone
case 'z', 'v':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// consume multiple, only handling Abbrev tz from time.Time for the moment...
var count int
if format[i] == 'z' {
for j := i; j < len(format); j++ {
if format[j] == 'z' {
count++
} else {
break
}
}
}
if format[i] == 'v' {
for j := i; j < len(format); j++ {
if format[j] == 'v' {
count++
} else {
break
}
}
}
i += count - 1
// using the timezone on the Go time object, eg. EST, EDT, MST.....
if count < 4 {
results += `
tz, _ := t.Zone()
b = append(b, tz...)
`
} else {
results += `
tz, _ := t.Zone()
if btz, ok := ` + baseLocale + `.timezones[tz]; ok {
b = append(b, btz...)
} else {
b = append(b, tz...)
}
`
}
// day
case 'd':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit day required?
if len(format) != i+1 && format[i+1] == 'd' {
i++
results += `
if t.Day() < 10 {
b = append(b, '0')
}
`
}
results += "b = strconv.AppendInt(b, int64(t.Day()), 10)\n"
// month
case 'M':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
var count int
// count # of M's
for j := i; j < len(format); j++ {
if format[j] == 'M' {
count++
} else {
break
}
}
switch count {
// Numeric form, at least 1 digit
case 1:
results += "b = strconv.AppendInt(b, int64(t.Month()), 10)\n"
// Number form, at least 2 digits (padding with 0)
case 2:
results += `
if t.Month() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Month()), 10)
`
// Abbreviated form
case 3:
results += "b = append(b, " + baseLocale + ".monthsAbbreviated[t.Month()]...)\n"
// Full/Wide form
case 4:
results += "b = append(b, " + baseLocale + ".monthsWide[t.Month()]...)\n"
// Narrow form - only used in where context makes it clear, such as headers in a calendar.
// Should be one character wherever possible.
case 5:
results += "b = append(b, " + baseLocale + ".monthsNarrow[t.Month()]...)\n"
}
// skip over M's
i += count - 1
// year
case 'y':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
// peek
// two digit year
if len(format) != i+1 && format[i+1] == 'y' {
i++
results += `
if t.Year() > 9 {
b = append(b, strconv.Itoa(t.Year())[2:]...)
} else {
b = append(b, strconv.Itoa(t.Year())[1:]...)
}
`
} else {
// four digit year
results += "b = strconv.AppendInt(b, int64(t.Year()), 10)\n"
}
// weekday
// I know I only see 'EEEE' in the xml, but just in case handled all posibilities
case 'E':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
var count int
// count # of E's
for j := i; j < len(format); j++ {
if format[j] == 'E' {
count++
} else {
break
}
}
switch count {
// Narrow
case 1:
results += "b = append(b, " + baseLocale + ".daysNarrow[t.Weekday()]...)\n"
// Short
case 2:
results += "b = append(b, " + baseLocale + ".daysShort[t.Weekday()]...)\n"
// Abbreviated
case 3:
results += "b = append(b, " + baseLocale + ".daysAbbreviated[t.Weekday()]...)\n"
// Full/Wide
case 4:
results += "b = append(b, " + baseLocale + ".daysWide[t.Weekday()]...)\n"
}
// skip over E's
i += count - 1
// era eg. AD, BC
case 'G':
if inConstantText {
inConstantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:i])) + "...)\n"
}
switch eraScore {
case 0:
results += `
if t.Year() < 0 {
b = append(b, ` + baseLocale + `.erasWide[0]...)
} else {
b = append(b, ` + baseLocale + `.erasWide[1]...)
}
`
case 1, 2:
results += `
if t.Year() < 0 {
b = append(b, ` + baseLocale + `.erasAbbreviated[0]...)
} else {
b = append(b, ` + baseLocale + `.erasAbbreviated[1]...)
}
`
}
default:
// append all non matched text as they are constants
if !inConstantText {
inConstantText = true
start = i
}
}
}
// if we were inConstantText when the string ended, add what's left.
if inConstantText {
// inContantText = false
results += "b = append(b, " + fmt.Sprintf("%#v", []byte(format[start:])) + "...)\n"
}
return
}
func parseCurrencyNumberFormat(trans *translator) {
if len(trans.CurrencyNumberFormat) == 0 {
return
}
trans.FmtCurrencyExists = true
negativeEqual := trans.CurrencyNumberFormat == trans.NegativeCurrencyNumberFormat
match := groupLenRegex.FindString(trans.CurrencyNumberFormat)
if len(match) > 0 {
trans.FmtCurrencyGroupLen = len(match) - 2
}
match = requiredDecimalRegex.FindString(trans.CurrencyNumberFormat)
if len(match) > 0 {
trans.FmtCurrencyMinDecimalLen = len(match) - 1
}
match = secondaryGroupLenRegex.FindString(trans.CurrencyNumberFormat)
if len(match) > 0 {
trans.FmtCurrencySecondaryGroupLen = len(match) - 2
}
idx := 0
for idx = 0; idx < len(trans.CurrencyNumberFormat); idx++ {
if trans.CurrencyNumberFormat[idx] == '#' || trans.CurrencyNumberFormat[idx] == '0' {
trans.FmtCurrencyPrefix = trans.CurrencyNumberFormat[:idx]
break
}
}
for idx = len(trans.CurrencyNumberFormat) - 1; idx >= 0; idx-- {
if trans.CurrencyNumberFormat[idx] == '#' || trans.CurrencyNumberFormat[idx] == '0' {
idx++
trans.FmtCurrencySuffix = trans.CurrencyNumberFormat[idx:]
break
}
}
for idx = 0; idx < len(trans.FmtCurrencyPrefix); idx++ {
if trans.FmtCurrencyPrefix[idx] == '¤' {
trans.FmtCurrencyInPrefix = true
trans.FmtCurrencyPrefix = strings.Replace(trans.FmtCurrencyPrefix, string(trans.FmtCurrencyPrefix[idx]), "", 1)
if idx == 0 {
trans.FmtCurrencyLeft = true
} else {
trans.FmtCurrencyLeft = false
}
break
}
}
for idx = 0; idx < len(trans.FmtCurrencySuffix); idx++ {
if trans.FmtCurrencySuffix[idx] == '¤' {
trans.FmtCurrencyInPrefix = false
trans.FmtCurrencySuffix = strings.Replace(trans.FmtCurrencySuffix, string(trans.FmtCurrencySuffix[idx]), "", 1)
if idx == 0 {
trans.FmtCurrencyLeft = true
} else {
trans.FmtCurrencyLeft = false
}
break
}
}
// if len(trans.FmtCurrencyPrefix) > 0 {
// trans.FmtCurrencyPrefix = fmt.Sprintf("%#v", []byte(trans.FmtCurrencyPrefix))
// }
// if len(trans.FmtCurrencySuffix) > 0 {
// trans.FmtCurrencySuffix = fmt.Sprintf("%#v", []byte(trans.FmtCurrencySuffix))
// }
// no need to parse again if true....
if negativeEqual {
trans.FmtCurrencyNegativePrefix = trans.FmtCurrencyPrefix
trans.FmtCurrencyNegativeSuffix = trans.FmtCurrencySuffix
trans.FmtCurrencyNegativeInPrefix = trans.FmtCurrencyInPrefix
trans.FmtCurrencyNegativeLeft = trans.FmtCurrencyLeft
return
}
trans.FmtCurrencyNegativeExists = true
for idx = 0; idx < len(trans.NegativeCurrencyNumberFormat); idx++ {
if trans.NegativeCurrencyNumberFormat[idx] == '#' || trans.NegativeCurrencyNumberFormat[idx] == '0' {
trans.FmtCurrencyNegativePrefix = trans.NegativeCurrencyNumberFormat[:idx]
break
}
}
for idx = len(trans.NegativeCurrencyNumberFormat) - 1; idx >= 0; idx-- {
if trans.NegativeCurrencyNumberFormat[idx] == '#' || trans.NegativeCurrencyNumberFormat[idx] == '0' {
idx++
trans.FmtCurrencyNegativeSuffix = trans.NegativeCurrencyNumberFormat[idx:]
break
}
}
for idx = 0; idx < len(trans.FmtCurrencyNegativePrefix); idx++ {
if trans.FmtCurrencyNegativePrefix[idx] == '¤' {
trans.FmtCurrencyNegativeInPrefix = true
trans.FmtCurrencyNegativePrefix = strings.Replace(trans.FmtCurrencyNegativePrefix, string(trans.FmtCurrencyNegativePrefix[idx]), "", 1)
if idx == 0 {
trans.FmtCurrencyNegativeLeft = true
} else {
trans.FmtCurrencyNegativeLeft = false
}
break
}
}
for idx = 0; idx < len(trans.FmtCurrencyNegativeSuffix); idx++ {
if trans.FmtCurrencyNegativeSuffix[idx] == '¤' {
trans.FmtCurrencyNegativeInPrefix = false
trans.FmtCurrencyNegativeSuffix = strings.Replace(trans.FmtCurrencyNegativeSuffix, string(trans.FmtCurrencyNegativeSuffix[idx]), "", 1)
if idx == 0 {
trans.FmtCurrencyNegativeLeft = true
} else {
trans.FmtCurrencyNegativeLeft = false
}
break
}
}
// if len(trans.FmtCurrencyNegativePrefix) > 0 {
// trans.FmtCurrencyNegativePrefix = fmt.Sprintf("%#v", []byte(trans.FmtCurrencyNegativePrefix))
// }
// if len(trans.FmtCurrencyNegativeSuffix) > 0 {
// trans.FmtCurrencyNegativeSuffix = fmt.Sprintf("%#v", []byte(trans.FmtCurrencyNegativeSuffix))
// }
return
}
func parsePercentNumberFormat(trans *translator) {
if len(trans.PercentNumberFormat) == 0 {
return
}
trans.FmtPercentExists = true
match := groupLenPercentRegex.FindString(trans.PercentNumberFormat)
if len(match) > 0 {
trans.FmtPercentGroupLen = len(match) - 1
}
match = requiredDecimalRegex.FindString(trans.PercentNumberFormat)
if len(match) > 0 {
trans.FmtPercentMinDecimalLen = len(match) - 1
}
match = secondaryGroupLenRegex.FindString(trans.PercentNumberFormat)
if len(match) > 0 {
trans.FmtPercentSecondaryGroupLen = len(match) - 2
}
idx := 0
for idx = 0; idx < len(trans.PercentNumberFormat); idx++ {
if trans.PercentNumberFormat[idx] == '#' || trans.PercentNumberFormat[idx] == '0' {
trans.FmtPercentPrefix = trans.PercentNumberFormat[:idx]
break
}
}
for idx = len(trans.PercentNumberFormat) - 1; idx >= 0; idx-- {
if trans.PercentNumberFormat[idx] == '#' || trans.PercentNumberFormat[idx] == '0' {
idx++
trans.FmtPercentSuffix = trans.PercentNumberFormat[idx:]
break
}
}
for idx = 0; idx < len(trans.FmtPercentPrefix); idx++ {
if trans.FmtPercentPrefix[idx] == '%' {
trans.FmtPercentInPrefix = true
trans.FmtPercentPrefix = strings.Replace(trans.FmtPercentPrefix, string(trans.FmtPercentPrefix[idx]), "", 1)
if idx == 0 {
trans.FmtPercentLeft = true
} else {
trans.FmtPercentLeft = false
}
break
}
}
for idx = 0; idx < len(trans.FmtPercentSuffix); idx++ {
if trans.FmtPercentSuffix[idx] == '%' {
trans.FmtPercentInPrefix = false
trans.FmtPercentSuffix = strings.Replace(trans.FmtPercentSuffix, string(trans.FmtPercentSuffix[idx]), "", 1)
if idx == 0 {
trans.FmtPercentLeft = true
} else {
trans.FmtPercentLeft = false
}
break
}
}
// if len(trans.FmtPercentPrefix) > 0 {
// trans.FmtPercentPrefix = fmt.Sprintf("%#v", []byte(trans.FmtPercentPrefix))
// }
// if len(trans.FmtPercentSuffix) > 0 {
// trans.FmtPercentSuffix = fmt.Sprintf("%#v", []byte(trans.FmtPercentSuffix))
// }
return
}
func parseDecimalNumberFormat(trans *translator) {
if len(trans.DecimalNumberFormat) == 0 {
return
}
trans.FmtNumberExists = true
formats := strings.SplitN(trans.DecimalNumberFormat, ";", 2)
match := groupLenRegex.FindString(formats[0])
if len(match) > 0 {
trans.FmtNumberGroupLen = len(match) - 2
}
match = requiredDecimalRegex.FindString(formats[0])
if len(match) > 0 {
trans.FmtNumberMinDecimalLen = len(match) - 1
}
match = secondaryGroupLenRegex.FindString(formats[0])
if len(match) > 0 {
trans.FmtNumberSecondaryGroupLen = len(match) - 2
}
return
}
type sortRank struct {
Rank uint8
Value string
}
type ByRank []sortRank
func (a ByRank) Len() int { return len(a) }
func (a ByRank) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRank) Less(i, j int) bool { return a[i].Rank < a[j].Rank }
type ByPluralRule []locales.PluralRule
func (a ByPluralRule) Len() int { return len(a) }
func (a ByPluralRule) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByPluralRule) Less(i, j int) bool { return a[i] < a[j] }
// TODO: refine generated code a bit, some combinations end up with same plural rule,
// could check all at once; but it works and that's step 1 complete
func parseRangePluralRuleFunc(current *cldr.CLDR, baseLocale string) (results string, plurals string) {
var pluralRange *struct {
cldr.Common
Locales string `xml:"locales,attr"`
PluralRange []*struct {
cldr.Common
Start string `xml:"start,attr"`
End string `xml:"end,attr"`
Result string `xml:"result,attr"`
} `xml:"pluralRange"`
}
var pluralArr []locales.PluralRule
for _, pr := range current.Supplemental().Plurals[1].PluralRanges {
locs := strings.Split(pr.Locales, " ")
for _, loc := range locs {
if loc == baseLocale {
pluralRange = pr
}
}
}
// no range plural rules for locale
if pluralRange == nil {
plurals = "nil"
results = "return locales.PluralRuleUnknown"
return
}
mp := make(map[string]struct{})
// pre-process if all the same
for _, rule := range pluralRange.PluralRange {
mp[rule.Result] = struct{}{}
}
for k := range mp {
psI := pluralStringToInt(k)
pluralArr = append(pluralArr, psI)
}
if len(mp) == 1 {
results += "return locales." + pluralStringToString(pluralRange.PluralRange[0].Result)
plurals = fmt.Sprintf("%#v", pluralArr)
return
}
multiple := len(pluralRange.PluralRange) > 1
if multiple {
results += "start := " + baseLocale + ".CardinalPluralRule(num1, v1)\n"
results += "end := " + baseLocale + ".CardinalPluralRule(num2, v2)\n\n"
}
first := true
// pre parse for variables
for i, rule := range pluralRange.PluralRange {
if i == len(pluralRange.PluralRange)-1 {
if multiple {
results += "\n\n"
}
results += "return locales." + pluralStringToString(rule.Result)
continue
}
if first {
results += "if"
first = false
} else {
results += "else if"
}
results += " start == locales." + pluralStringToString(rule.Start) + " && end == locales." + pluralStringToString(rule.End) + " {\n return locales." + pluralStringToString(rule.Result) + "\n} "
}
if multiple {
results = "\n" + results + "\n"
}
if len(pluralArr) == 0 {
plurals = "nil"
} else {
ints := make([]int, len(pluralArr))
for i := 0; i < len(pluralArr); i++ {
ints[i] = int(pluralArr[i])
}
sort.Ints(ints)
for i := 0; i < len(ints); i++ {
pluralArr[i] = locales.PluralRule(ints[i])
}
plurals = fmt.Sprintf("%#v", pluralArr)
}
return
}
// TODO: cleanup function logic perhaps write a lexer... but it's working right now, and
// I'm already farther down the rabbit hole than I'd like and so pulling the chute here.
func parseOrdinalPluralRuleFunc(current *cldr.CLDR, baseLocale string) (results string, plurals string) {
var prOrdinal *struct {
cldr.Common
Locales string "xml:\"locales,attr\""
PluralRule []*struct {
cldr.Common
Count string "xml:\"count,attr\""
} "xml:\"pluralRule\""
}
var pluralArr []locales.PluralRule
// idx 0 is ordinal rules
for _, pr := range current.Supplemental().Plurals[0].PluralRules {
locs := strings.Split(pr.Locales, " ")
for _, loc := range locs {
if loc == baseLocale {
prOrdinal = pr
// for _, pl := range pr.PluralRule {
// fmt.Println(pl.Count, pl.Common.Data())
// }
}
}
}
// no plural rules for locale
if prOrdinal == nil {
plurals = "nil"
results = "return locales.PluralRuleUnknown"
return
}
vals := make(map[string]struct{})
first := true
// pre parse for variables
for _, rule := range prOrdinal.PluralRule {
ps1 := pluralStringToString(rule.Count)
psI := pluralStringToInt(rule.Count)
pluralArr = append(pluralArr, psI)
data := strings.Replace(strings.Replace(strings.Replace(strings.TrimSpace(strings.SplitN(rule.Common.Data(), "@", 2)[0]), " = ", " == ", -1), " or ", " || ", -1), " and ", " && ", -1)
if len(data) == 0 {
if len(prOrdinal.PluralRule) == 1 {
results = "return locales." + ps1
} else {
results += "\n\nreturn locales." + ps1
// results += "else {\nreturn locales." + locales.PluralStringToString(rule.Count) + ", nil\n}"
}
continue
}
// // All need n, so always add
// if strings.Contains(data, "n") {
// vals[prVarFuncs["n"]] = struct{}{}
// }
if strings.Contains(data, "i") {
vals[prVarFuncs["i"]] = struct{}{}
}
// v is inherently avaialable as an argument
// if strings.Contains(data, "v") {
// vals[prVarFuncs["v"]] = struct{}{}
// }
if strings.Contains(data, "w") {
vals[prVarFuncs["w"]] = struct{}{}
}
if strings.Contains(data, "f") {
vals[prVarFuncs["f"]] = struct{}{}
}
if strings.Contains(data, "t") {
vals[prVarFuncs["t"]] = struct{}{}
}
if first {
results += "if "
first = false
} else {
results += "else if "
}
stmt := ""
// real work here
//
// split by 'or' then by 'and' allowing to better
// determine bracketing for formula
ors := strings.Split(data, "||")
for _, or := range ors {
stmt += "("
ands := strings.Split(strings.TrimSpace(or), "&&")
for _, and := range ands {
inArg := false
pre := ""
lft := ""
preOperator := ""
args := strings.Split(strings.TrimSpace(and), " ")
for _, a := range args {
if inArg {
// check to see if is a value range 2..9
multiRange := strings.Count(a, "..") > 1
cargs := strings.Split(strings.TrimSpace(a), ",")
hasBracket := len(cargs) > 1
bracketAdded := false
lastWasRange := false
for _, carg := range cargs {
if rng := strings.Split(carg, ".."); len(rng) > 1 {
if multiRange {
pre += " ("
} else {
pre += " "
}
switch preOperator {
case "==":
pre += lft + " >= " + rng[0] + " && " + lft + "<=" + rng[1]
case "!=":
pre += "(" + lft + " < " + rng[0] + " || " + lft + " > " + rng[1] + ")"
}
if multiRange {
pre += ") || "
} else {
pre += " || "
}
lastWasRange = true
continue
}
if lastWasRange {
pre = strings.TrimRight(pre, " || ") + " && "
}
lastWasRange = false
if hasBracket && !bracketAdded {
pre += "("
bracketAdded = true
}
// single comma separated values
switch preOperator {
case "==":
pre += " " + lft + preOperator + carg + " || "
case "!=":
pre += " " + lft + preOperator + carg + " && "
}
}
pre = strings.TrimRight(pre, " || ")
pre = strings.TrimRight(pre, " && ")
pre = strings.TrimRight(pre, " || ")
if hasBracket && bracketAdded {
pre += ")"
}
continue
}
if strings.Contains(a, "=") || a == ">" || a == "<" {
inArg = true
preOperator = a
continue
}
lft += a
}
stmt += pre + " && "
}
stmt = strings.TrimRight(stmt, " && ") + ") || "
}
stmt = strings.TrimRight(stmt, " || ")
results += stmt
results += " {\n"
// return plural rule here
results += "return locales." + ps1 + "\n"
results += "}"
}
pre := "\n"
// always needed
vals[prVarFuncs["n"]] = struct{}{}
sorted := make([]sortRank, 0, len(vals))
for k := range vals {
switch k[:1] {
case "n":
sorted = append(sorted, sortRank{
Value: prVarFuncs["n"],
Rank: 1,
})
case "i":
sorted = append(sorted, sortRank{
Value: prVarFuncs["i"],
Rank: 2,
})
case "w":
sorted = append(sorted, sortRank{
Value: prVarFuncs["w"],
Rank: 3,
})
case "f":
sorted = append(sorted, sortRank{
Value: prVarFuncs["f"],
Rank: 4,
})
case "t":
sorted = append(sorted, sortRank{
Value: prVarFuncs["t"],
Rank: 5,
})
}
}
sort.Sort(ByRank(sorted))
for _, k := range sorted {
pre += k.Value
}
if len(results) == 0 {
results = "return locales.PluralRuleUnknown"
} else {
if !strings.HasPrefix(results, "return") {
results = manyToSingleVars(results)
// pre += "\n"
results = pre + results
}
}
if len(pluralArr) == 0 {
plurals = "nil"
} else {
plurals = fmt.Sprintf("%#v", pluralArr)
}
return
}
// TODO: cleanup function logic perhaps write a lexer... but it's working right now, and
// I'm already farther down the rabbit hole than I'd like and so pulling the chute here.
func parseCardinalPluralRuleFunc(current *cldr.CLDR, baseLocale string) (results string, plurals string) {
var prCardinal *struct {
cldr.Common
Locales string "xml:\"locales,attr\""
PluralRule []*struct {
cldr.Common
Count string "xml:\"count,attr\""
} "xml:\"pluralRule\""
}
var pluralArr []locales.PluralRule
// idx 2 is cardinal rules
for _, pr := range current.Supplemental().Plurals[2].PluralRules {
locs := strings.Split(pr.Locales, " ")
for _, loc := range locs {
if loc == baseLocale {
prCardinal = pr
}
}
}
// no plural rules for locale
if prCardinal == nil {
plurals = "nil"
results = "return locales.PluralRuleUnknown"
return
}
vals := make(map[string]struct{})
first := true
// pre parse for variables
for _, rule := range prCardinal.PluralRule {
ps1 := pluralStringToString(rule.Count)
psI := pluralStringToInt(rule.Count)
pluralArr = append(pluralArr, psI)
data := strings.Replace(strings.Replace(strings.Replace(strings.TrimSpace(strings.SplitN(rule.Common.Data(), "@", 2)[0]), " = ", " == ", -1), " or ", " || ", -1), " and ", " && ", -1)
if len(data) == 0 {
if len(prCardinal.PluralRule) == 1 {
results = "return locales." + ps1
} else {
results += "\n\nreturn locales." + ps1
// results += "else {\nreturn locales." + locales.PluralStringToString(rule.Count) + ", nil\n}"
}
continue
}
// // All need n, so always add
// if strings.Contains(data, "n") {
// vals[prVarFuncs["n"]] = struct{}{}
// }
if strings.Contains(data, "i") {
vals[prVarFuncs["i"]] = struct{}{}
}
// v is inherently avaialable as an argument
// if strings.Contains(data, "v") {
// vals[prVarFuncs["v"]] = struct{}{}
// }
if strings.Contains(data, "w") {
vals[prVarFuncs["w"]] = struct{}{}
}
if strings.Contains(data, "f") {
vals[prVarFuncs["f"]] = struct{}{}
}
if strings.Contains(data, "t") {
vals[prVarFuncs["t"]] = struct{}{}
}
if first {
results += "if "
first = false
} else {
results += "else if "
}
stmt := ""
// real work here
//
// split by 'or' then by 'and' allowing to better
// determine bracketing for formula
ors := strings.Split(data, "||")
for _, or := range ors {
stmt += "("
ands := strings.Split(strings.TrimSpace(or), "&&")
for _, and := range ands {
inArg := false
pre := ""
lft := ""
preOperator := ""
args := strings.Split(strings.TrimSpace(and), " ")
for _, a := range args {
if inArg {
// check to see if is a value range 2..9
multiRange := strings.Count(a, "..") > 1
cargs := strings.Split(strings.TrimSpace(a), ",")
hasBracket := len(cargs) > 1
bracketAdded := false
lastWasRange := false
for _, carg := range cargs {
if rng := strings.Split(carg, ".."); len(rng) > 1 {
if multiRange {
pre += " ("
} else {
pre += " "
}
switch preOperator {
case "==":
pre += lft + " >= " + rng[0] + " && " + lft + "<=" + rng[1]
case "!=":
pre += "(" + lft + " < " + rng[0] + " || " + lft + " > " + rng[1] + ")"
}
if multiRange {
pre += ") || "
} else {
pre += " || "
}
lastWasRange = true
continue
}
if lastWasRange {
pre = strings.TrimRight(pre, " || ") + " && "
}
lastWasRange = false
if hasBracket && !bracketAdded {
pre += "("
bracketAdded = true
}
// single comma separated values
switch preOperator {
case "==":
pre += " " + lft + preOperator + carg + " || "
case "!=":
pre += " " + lft + preOperator + carg + " && "
}
}
pre = strings.TrimRight(pre, " || ")
pre = strings.TrimRight(pre, " && ")
pre = strings.TrimRight(pre, " || ")
if hasBracket && bracketAdded {
pre += ")"
}
continue
}
if strings.Contains(a, "=") || a == ">" || a == "<" {
inArg = true
preOperator = a
continue
}
lft += a
}
stmt += pre + " && "
}
stmt = strings.TrimRight(stmt, " && ") + ") || "
}
stmt = strings.TrimRight(stmt, " || ")
results += stmt
results += " {\n"
// return plural rule here
results += "return locales." + ps1 + "\n"
results += "}"
}
pre := "\n"
// always needed
vals[prVarFuncs["n"]] = struct{}{}
sorted := make([]sortRank, 0, len(vals))
for k := range vals {
switch k[:1] {
case "n":
sorted = append(sorted, sortRank{
Value: prVarFuncs["n"],
Rank: 1,
})
case "i":
sorted = append(sorted, sortRank{
Value: prVarFuncs["i"],
Rank: 2,
})
case "w":
sorted = append(sorted, sortRank{
Value: prVarFuncs["w"],
Rank: 3,
})
case "f":
sorted = append(sorted, sortRank{
Value: prVarFuncs["f"],
Rank: 4,
})
case "t":
sorted = append(sorted, sortRank{
Value: prVarFuncs["t"],
Rank: 5,
})
}
}
sort.Sort(ByRank(sorted))
for _, k := range sorted {
pre += k.Value
}
if len(results) == 0 {
results = "return locales.PluralRuleUnknown"
} else {
if !strings.HasPrefix(results, "return") {
results = manyToSingleVars(results)
// pre += "\n"
results = pre + results
}
}
if len(pluralArr) == 0 {
plurals = "nil"
} else {
plurals = fmt.Sprintf("%#v", pluralArr)
}
return
}
func manyToSingleVars(input string) (results string) {
matches := nModRegex.FindAllString(input, -1)
mp := make(map[string][]string) // map of formula to variable
var found bool
var split []string
var variable string
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], "math.Mod(" + split[0] + ", " + split[1] + ")"}
}
for k, v := range mp {
variable = "nMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
matches = iModRegex.FindAllString(input, -1)
mp = make(map[string][]string) // map of formula to variable
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], formula}
}
for k, v := range mp {
variable = "iMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
matches = wModRegex.FindAllString(input, -1)
mp = make(map[string][]string) // map of formula to variable
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], formula}
}
for k, v := range mp {
variable = "wMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
matches = fModRegex.FindAllString(input, -1)
mp = make(map[string][]string) // map of formula to variable
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], formula}
}
for k, v := range mp {
variable = "fMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
matches = tModRegex.FindAllString(input, -1)
mp = make(map[string][]string) // map of formula to variable
for _, formula := range matches {
if _, found = mp[formula]; found {
continue
}
split = strings.SplitN(formula, "%", 2)
mp[formula] = []string{split[1], formula}
}
for k, v := range mp {
variable = "tMod" + v[0]
results += variable + " := " + v[1] + "\n"
input = strings.Replace(input, k, variable, -1)
}
results = results + "\n" + input
return
}
// pluralStringToInt returns the enum value of 'plural' provided
func pluralStringToInt(plural string) locales.PluralRule {
switch plural {
case "zero":
return locales.PluralRuleZero
case "one":
return locales.PluralRuleOne
case "two":
return locales.PluralRuleTwo
case "few":
return locales.PluralRuleFew
case "many":
return locales.PluralRuleMany
case "other":
return locales.PluralRuleOther
default:
return locales.PluralRuleUnknown
}
}
func pluralStringToString(pr string) string {
pr = strings.TrimSpace(pr)
switch pr {
case "zero":
return "PluralRuleZero"
case "one":
return "PluralRuleOne"
case "two":
return "PluralRuleTwo"
case "few":
return "PluralRuleFew"
case "many":
return "PluralRuleMany"
case "other":
return "PluralRuleOther"
default:
return "PluralRuleUnknown"
}
}
|
package main
import (
"testing"
)
func TestGenKeyPairSuccess(t *testing.T) {
_, err := genKeyPair("/tmp/foo")
if err != nil {
t.Fatal(err)
}
}
more coverage.. beter temp directory
package main
import (
"io/ioutil"
"os"
"os/user"
"testing"
)
func TestGenKeyPairSuccess(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "example")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpfile.Name()) // clean up
_, err = genKeyPair(tmpfile.Name())
if err != nil {
t.Fatal(err)
}
//TODO: verify genKeyPair File content
}
func TestGenKeyPairFailNoPerms(t *testing.T) {
_, err := genKeyPair("/proc/something")
if err == nil {
t.Logf("Should have failed")
t.Fatal(err)
}
}
func TestGetUserHomeDirSuccess(t *testing.T) {
usr, err := user.Current()
if err != nil {
t.Logf("cannot get current user info")
t.Fatal(err)
}
homeDir, err := getUserHomeDir(usr)
if err != nil {
t.Fatal(err)
}
if len(homeDir) < 1 {
t.Fatal("invalid homedir")
}
}
|
// Copyright 2017 The Upspin Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"context"
"crypto/sha256"
"fmt"
"io/ioutil"
"net/http"
"os"
"golang.org/x/oauth2/google"
dns "google.golang.org/api/dns/v1"
"google.golang.org/api/googleapi"
"upspin.io/errors"
"upspin.io/upspin"
)
// TODO(adg): make this configurable?
const (
dnsProject = "upspin-prod"
dnsZone = "upspin-services"
dnsDomain = "upspin.services"
)
// userToHost converts an Upspin user name to a fully-qualified domain name
// under the upspin.services domain. The host portion of the name is the
// hex-encoded first 16 bytes of the SHA256 checksum of the user name.
// The security of this service relies on there not being collisions in this
// space, which should be astronomically unlikely.
func userToHost(name upspin.UserName) string {
hash := sha256.New()
hash.Write([]byte(name))
return fmt.Sprintf("%x."+dnsDomain, hash.Sum(nil)[:16])
}
// setupDNSService loads the credentials for accessing the Cloud DNS service
// and sets the server's dnsSvc with a ready-to-use dns.Service.
func (s *server) setupDNSService() error {
ctx := context.Background()
var client *http.Client
// First try to read the serviceaccount.json in the Docker image.
b, err := ioutil.ReadFile("/upspin/serviceaccount.json")
if err == nil {
cfg, err := google.JWTConfigFromJSON(b, dns.CloudPlatformScope)
if err != nil {
return err
}
client = cfg.Client(ctx)
} else if os.IsNotExist(err) {
// Otherwise use the default application credentials,
// which should work when testing locally.
client, err = google.DefaultClient(ctx, dns.CloudPlatformScope)
if err != nil {
return err
}
} else {
return err
}
s.dnsSvc, err = dns.New(client)
return err
}
// listRecordSets returns the list of record sets for a given host name.
func (s *server) listRecordSets(host string) ([]*dns.ResourceRecordSet, error) {
resp, err := s.dnsSvc.ResourceRecordSets.List(dnsProject, dnsZone).Name(host + ".").Do()
if err != nil {
return nil, err
}
return resp.Rrsets, nil
}
// lookupName returns the IP address and host name for a given user, or a
// NotExist error if there is no host name for that user.
func (s *server) lookupName(name upspin.UserName) (ip, host string, err error) {
host = userToHost(name)
rrsets, err := s.listRecordSets(host)
if err != nil {
return "", "", err
}
for _, rrs := range rrsets {
for _, rrd := range rrs.Rrdatas {
return rrd, host, nil
}
}
return "", "", errors.E(errors.NotExist)
}
// updateName creates (or replaces) an A record for the given user's host name
// that points to the given IP address, and returns the user's host name.
func (s *server) updateName(name upspin.UserName, ip string) (host string, err error) {
host = userToHost(name)
rrsets, err := s.listRecordSets(host)
if err != nil {
return "", err
}
change := &dns.Change{
Additions: []*dns.ResourceRecordSet{{
Name: host + ".",
Rrdatas: []string{ip},
Ttl: 3600, // 1 hour
Type: "A",
}},
Deletions: rrsets,
}
change, err = s.dnsSvc.Changes.Create(dnsProject, dnsZone, change).Do()
if err != nil && !googleapi.IsNotModified(err) {
return "", err
}
return host, nil
}
cmd/hostserver-gcp: don't update record if no change has been made
If the requested host/IP is already set then don't make any changes.
Easy to do since we already look up the existing records before making
the change.
Change-Id: I6cd2b4edf6540b1ce058de645931eb6ef20eb3ba
Reviewed-on: https://upspin-review.googlesource.com/12260
Reviewed-by: Rob Pike <4dc7c9ec434ed06502767136789763ec11d2c4b7@golang.org>
// Copyright 2017 The Upspin Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"context"
"crypto/sha256"
"fmt"
"io/ioutil"
"net/http"
"os"
"golang.org/x/oauth2/google"
dns "google.golang.org/api/dns/v1"
"google.golang.org/api/googleapi"
"upspin.io/errors"
"upspin.io/upspin"
)
// TODO(adg): make this configurable?
const (
dnsProject = "upspin-prod"
dnsZone = "upspin-services"
dnsDomain = "upspin.services"
)
// userToHost converts an Upspin user name to a fully-qualified domain name
// under the upspin.services domain. The host portion of the name is the
// hex-encoded first 16 bytes of the SHA256 checksum of the user name.
// The security of this service relies on there not being collisions in this
// space, which should be astronomically unlikely.
func userToHost(name upspin.UserName) string {
hash := sha256.New()
hash.Write([]byte(name))
return fmt.Sprintf("%x."+dnsDomain, hash.Sum(nil)[:16])
}
// setupDNSService loads the credentials for accessing the Cloud DNS service
// and sets the server's dnsSvc with a ready-to-use dns.Service.
func (s *server) setupDNSService() error {
ctx := context.Background()
var client *http.Client
// First try to read the serviceaccount.json in the Docker image.
b, err := ioutil.ReadFile("/upspin/serviceaccount.json")
if err == nil {
cfg, err := google.JWTConfigFromJSON(b, dns.CloudPlatformScope)
if err != nil {
return err
}
client = cfg.Client(ctx)
} else if os.IsNotExist(err) {
// Otherwise use the default application credentials,
// which should work when testing locally.
client, err = google.DefaultClient(ctx, dns.CloudPlatformScope)
if err != nil {
return err
}
} else {
return err
}
s.dnsSvc, err = dns.New(client)
return err
}
// listRecordSets returns the list of record sets for a given host name.
func (s *server) listRecordSets(host string) ([]*dns.ResourceRecordSet, error) {
resp, err := s.dnsSvc.ResourceRecordSets.List(dnsProject, dnsZone).Name(host + ".").Do()
if err != nil {
return nil, err
}
return resp.Rrsets, nil
}
// lookupName returns the IP address and host name for a given user, or a
// NotExist error if there is no host name for that user.
func (s *server) lookupName(name upspin.UserName) (ip, host string, err error) {
host = userToHost(name)
rrsets, err := s.listRecordSets(host)
if err != nil {
return "", "", err
}
for _, rrs := range rrsets {
for _, rrd := range rrs.Rrdatas {
return rrd, host, nil
}
}
return "", "", errors.E(errors.NotExist)
}
// updateName creates (or replaces) an A record for the given user's host name
// that points to the given IP address, and returns the user's host name.
func (s *server) updateName(name upspin.UserName, ip string) (host string, err error) {
host = userToHost(name)
rrsets, err := s.listRecordSets(host)
if err != nil {
return "", err
}
// Check whether the appropriate A record already exists,
// and do nothing if so.
if len(rrsets) == 1 && rrsets[0].Type == "A" {
if ds := rrsets[0].Rrdatas; len(ds) == 1 && ds[0] == ip {
return host, nil
}
}
// No appropriate A record exists; replace the existing
// records for this host with a new one.
change := &dns.Change{
Additions: []*dns.ResourceRecordSet{{
Name: host + ".",
Rrdatas: []string{ip},
Ttl: 3600, // 1 hour
Type: "A",
}},
Deletions: rrsets,
}
change, err = s.dnsSvc.Changes.Create(dnsProject, dnsZone, change).Do()
if err != nil && !googleapi.IsNotModified(err) {
return "", err
}
return host, nil
}
|
package main
import (
"fmt"
"time"
"launchpad.net/gnuflag"
"launchpad.net/juju-core/cmd"
"launchpad.net/juju-core/environs"
"launchpad.net/juju-core/log"
"launchpad.net/juju-core/state"
"launchpad.net/tomb"
// register providers
_ "launchpad.net/juju-core/environs/ec2"
)
var retryDuration = 10 * time.Second
// ProvisioningAgent is a cmd.Command responsible for running a provisioning agent.
type ProvisioningAgent struct {
Conf AgentConf
}
// Info returns usage information for the command.
func (a *ProvisioningAgent) Info() *cmd.Info {
return &cmd.Info{"provisioning", "", "run a juju provisioning agent", ""}
}
// Init initializes the command for running.
func (a *ProvisioningAgent) Init(f *gnuflag.FlagSet, args []string) error {
a.Conf.addFlags(f)
if err := f.Parse(true, args); err != nil {
return err
}
return a.Conf.checkArgs(f.Args())
}
// Run runs a provisioning agent.
func (a *ProvisioningAgent) Run(_ *cmd.Context) error {
for {
p, err := NewProvisioner(&a.Conf.StateInfo)
if err == nil {
if err = p.Wait(); err == nil {
// if Wait returns nil then we consider that a signal
// that the process should exit the retry logic.
return nil
}
}
log.Printf("restarting provisioner after error: %v", err)
time.Sleep(retryDuration)
}
panic("unreachable")
}
type Provisioner struct {
st *state.State
origInfo *state.Info
info *state.Info
environ environs.Environ
tomb tomb.Tomb
environWatcher *state.ConfigWatcher
machinesWatcher *state.MachinesWatcher
// machine.Id => environs.Instance
instances map[int]environs.Instance
// instance.Id => *state.Machine
machines map[string]*state.Machine
}
// NewProvisioner returns a Provisioner.
func NewProvisioner(info *state.Info) (*Provisioner, error) {
st, err := state.Open(info)
if err != nil {
return nil, err
}
p := &Provisioner{
st: st,
origInfo: info,
instances: make(map[int]environs.Instance),
machines: make(map[string]*state.Machine),
}
go p.loop()
return p, nil
}
func (p *Provisioner) loop() {
defer p.tomb.Done()
defer p.st.Close()
p.environWatcher = p.st.WatchEnvironConfig()
for {
select {
case <-p.tomb.Dying():
return
case config, ok := <-p.environWatcher.Changes():
if !ok {
err := p.environWatcher.Stop()
if err != nil {
p.tomb.Kill(err)
}
return
}
var err error
p.environ, err = environs.NewEnviron(config.Map())
if err != nil {
log.Printf("provisioner loaded invalid environment configuration: %v", err)
continue
}
log.Printf("provisioner loaded new environment configuration")
// Get another stateInfo from the environment
// because on the bootstrap machine the info passed
// into the agent may not use the correct address.
p.info, err = p.environ.StateInfo()
if err != nil {
p.tomb.Kill(err)
return
}
p.innerLoop()
}
}
}
func (p *Provisioner) innerLoop() {
// call processMachines to stop any unknown instances before watching machines.
if err := p.processMachines(nil, nil); err != nil {
p.tomb.Kill(err)
return
}
p.machinesWatcher = p.st.WatchMachines()
for {
select {
case <-p.tomb.Dying():
return
case change, ok := <-p.environWatcher.Changes():
if !ok {
err := p.environWatcher.Stop()
if err != nil {
p.tomb.Kill(err)
}
return
}
config, err := environs.NewConfig(change.Map())
if err != nil {
log.Printf("provisioner loaded invalid environment configuration: %v", err)
continue
}
p.environ.SetConfig(config)
log.Printf("provisioner loaded new environment configuration")
case machines, ok := <-p.machinesWatcher.Changes():
if !ok {
err := p.machinesWatcher.Stop()
if err != nil {
p.tomb.Kill(err)
}
return
}
// TODO(dfc) fire process machines periodically to shut down unknown
// instances.
if err := p.processMachines(machines.Added, machines.Deleted); err != nil {
p.tomb.Kill(err)
}
}
}
}
// Wait waits for the Provisioner to exit.
func (p *Provisioner) Wait() error {
return p.tomb.Wait()
}
// Stop stops the Provisioner and returns any error encountered while
// provisioning.
func (p *Provisioner) Stop() error {
p.tomb.Kill(nil)
return p.tomb.Wait()
}
func (p *Provisioner) processMachines(added, removed []*state.Machine) error {
// step 1. find which of the added machines have not
// yet been allocated a started instance.
notstarted, err := p.findNotStarted(added)
if err != nil {
return err
}
// step 2. start an instance for any machines we found.
if err := p.startMachines(notstarted); err != nil {
return err
}
// step 3. stop all machines that were removed from the state.
stopping, err := p.instancesForMachines(removed)
if err != nil {
return err
}
// step 4. find instances which are running but have no machine
// associated with them.
unknown, err := p.findUnknownInstances()
if err != nil {
return err
}
return p.stopInstances(append(stopping, unknown...))
}
// findUnknownInstances finds instances which are not associated with a machine.
func (p *Provisioner) findUnknownInstances() ([]environs.Instance, error) {
all, err := p.environ.AllInstances()
if err != nil {
return nil, err
}
instances := make(map[string]environs.Instance)
for _, i := range all {
instances[i.Id()] = i
}
// TODO(dfc) this is very inefficient, p.machines cache may help.
machines, err := p.st.AllMachines()
if err != nil {
return nil, err
}
for _, m := range machines {
id, err := m.InstanceId()
if err != nil {
return nil, err
}
if id == "" {
// TODO(dfc) InstanceId should return an error if the id isn't set.
continue
}
delete(instances, id)
}
var unknown []environs.Instance
for _, i := range instances {
unknown = append(unknown, i)
}
return unknown, nil
}
// findNotStarted finds machines without an InstanceId set, these are defined as not started.
func (p *Provisioner) findNotStarted(machines []*state.Machine) ([]*state.Machine, error) {
var notstarted []*state.Machine
for _, m := range machines {
id, err := m.InstanceId()
if err != nil {
return nil, err
}
if id == "" {
// TODO(dfc) InstanceId should return an error if the id isn't set.
notstarted = append(notstarted, m)
} else {
log.Printf("machine %s already started as instance %q", m, id)
}
}
return notstarted, nil
}
func (p *Provisioner) startMachines(machines []*state.Machine) error {
for _, m := range machines {
if err := p.startMachine(m); err != nil {
return err
}
}
return nil
}
func (p *Provisioner) startMachine(m *state.Machine) error {
// TODO(dfc) the state.Info passed to environ.StartInstance remains contentious
// however as the PA only knows one state.Info, and that info is used by MAs and
// UAs to locate the ZK for this environment, it is logical to use the same
// state.Info as the PA.
inst, err := p.environ.StartInstance(m.Id(), p.info)
if err != nil {
log.Printf("provisioner can't start machine %s: %v", m, err)
return err
}
// assign the instance id to the machine
if err := m.SetInstanceId(inst.Id()); err != nil {
return err
}
// populate the local cache
p.instances[m.Id()] = inst
p.machines[inst.Id()] = m
log.Printf("provisioner started machine %s as instance %s", m, inst.Id())
return nil
}
func (p *Provisioner) stopInstances(instances []environs.Instance) error {
// Although calling StopInstance with an empty slice should produce no change in the
// provider, environs like dummy do not consider this a noop.
if len(instances) == 0 {
return nil
}
if err := p.environ.StopInstances(instances); err != nil {
return err
}
// cleanup cache
for _, i := range instances {
if m, ok := p.machines[i.Id()]; ok {
delete(p.machines, i.Id())
delete(p.instances, m.Id())
}
}
return nil
}
// instanceForMachine returns the environs.Instance that represents this machine's instance.
func (p *Provisioner) instanceForMachine(m *state.Machine) (environs.Instance, error) {
inst, ok := p.instances[m.Id()]
if !ok {
// not cached locally, ask the environ.
id, err := m.InstanceId()
if err != nil {
return nil, err
}
if id == "" {
// TODO(dfc) InstanceId should return an error if the id isn't set.
return nil, fmt.Errorf("machine %s not found", m)
}
// TODO(dfc) this should be batched, or the cache preloaded at startup to
// avoid N calls to the envirion.
insts, err := p.environ.Instances([]string{id})
if err != nil {
// the provider doesn't know about this instance, give up.
return nil, err
}
inst = insts[0]
}
return inst, nil
}
// instancesForMachines returns a list of environs.Instance that represent the list of machines running
// in the provider.
func (p *Provisioner) instancesForMachines(machines []*state.Machine) ([]environs.Instance, error) {
var insts []environs.Instance
for _, m := range machines {
inst, err := p.instanceForMachine(m)
if err != nil {
return nil, err
}
insts = append(insts, inst)
}
return insts, nil
}
cmd/jujud: don't store original info in Provisioner
package main
import (
"fmt"
"time"
"launchpad.net/gnuflag"
"launchpad.net/juju-core/cmd"
"launchpad.net/juju-core/environs"
"launchpad.net/juju-core/log"
"launchpad.net/juju-core/state"
"launchpad.net/tomb"
// register providers
_ "launchpad.net/juju-core/environs/ec2"
)
var retryDuration = 10 * time.Second
// ProvisioningAgent is a cmd.Command responsible for running a provisioning agent.
type ProvisioningAgent struct {
Conf AgentConf
}
// Info returns usage information for the command.
func (a *ProvisioningAgent) Info() *cmd.Info {
return &cmd.Info{"provisioning", "", "run a juju provisioning agent", ""}
}
// Init initializes the command for running.
func (a *ProvisioningAgent) Init(f *gnuflag.FlagSet, args []string) error {
a.Conf.addFlags(f)
if err := f.Parse(true, args); err != nil {
return err
}
return a.Conf.checkArgs(f.Args())
}
// Run runs a provisioning agent.
func (a *ProvisioningAgent) Run(_ *cmd.Context) error {
for {
p, err := NewProvisioner(&a.Conf.StateInfo)
if err == nil {
if err = p.Wait(); err == nil {
// if Wait returns nil then we consider that a signal
// that the process should exit the retry logic.
return nil
}
}
log.Printf("restarting provisioner after error: %v", err)
time.Sleep(retryDuration)
}
panic("unreachable")
}
type Provisioner struct {
st *state.State
info *state.Info
environ environs.Environ
tomb tomb.Tomb
environWatcher *state.ConfigWatcher
machinesWatcher *state.MachinesWatcher
// machine.Id => environs.Instance
instances map[int]environs.Instance
// instance.Id => *state.Machine
machines map[string]*state.Machine
}
// NewProvisioner returns a Provisioner.
func NewProvisioner(info *state.Info) (*Provisioner, error) {
st, err := state.Open(info)
if err != nil {
return nil, err
}
p := &Provisioner{
st: st,
info: info,
instances: make(map[int]environs.Instance),
machines: make(map[string]*state.Machine),
}
go p.loop()
return p, nil
}
func (p *Provisioner) loop() {
defer p.tomb.Done()
defer p.st.Close()
p.environWatcher = p.st.WatchEnvironConfig()
for {
select {
case <-p.tomb.Dying():
return
case config, ok := <-p.environWatcher.Changes():
if !ok {
err := p.environWatcher.Stop()
if err != nil {
p.tomb.Kill(err)
}
return
}
var err error
p.environ, err = environs.NewEnviron(config.Map())
if err != nil {
log.Printf("provisioner loaded invalid environment configuration: %v", err)
continue
}
log.Printf("provisioner loaded new environment configuration")
// Get another stateInfo from the environment
// because on the bootstrap machine the info passed
// into the agent may not use the correct address.
info, err := p.environ.StateInfo()
if err != nil {
p.tomb.Kill(err)
return
}
p.info = info
p.innerLoop()
}
}
}
func (p *Provisioner) innerLoop() {
// call processMachines to stop any unknown instances before watching machines.
if err := p.processMachines(nil, nil); err != nil {
p.tomb.Kill(err)
return
}
p.machinesWatcher = p.st.WatchMachines()
for {
select {
case <-p.tomb.Dying():
return
case change, ok := <-p.environWatcher.Changes():
if !ok {
err := p.environWatcher.Stop()
if err != nil {
p.tomb.Kill(err)
}
return
}
config, err := environs.NewConfig(change.Map())
if err != nil {
log.Printf("provisioner loaded invalid environment configuration: %v", err)
continue
}
p.environ.SetConfig(config)
log.Printf("provisioner loaded new environment configuration")
case machines, ok := <-p.machinesWatcher.Changes():
if !ok {
err := p.machinesWatcher.Stop()
if err != nil {
p.tomb.Kill(err)
}
return
}
// TODO(dfc) fire process machines periodically to shut down unknown
// instances.
if err := p.processMachines(machines.Added, machines.Deleted); err != nil {
p.tomb.Kill(err)
}
}
}
}
// Wait waits for the Provisioner to exit.
func (p *Provisioner) Wait() error {
return p.tomb.Wait()
}
// Stop stops the Provisioner and returns any error encountered while
// provisioning.
func (p *Provisioner) Stop() error {
p.tomb.Kill(nil)
return p.tomb.Wait()
}
func (p *Provisioner) processMachines(added, removed []*state.Machine) error {
// step 1. find which of the added machines have not
// yet been allocated a started instance.
notstarted, err := p.findNotStarted(added)
if err != nil {
return err
}
// step 2. start an instance for any machines we found.
if err := p.startMachines(notstarted); err != nil {
return err
}
// step 3. stop all machines that were removed from the state.
stopping, err := p.instancesForMachines(removed)
if err != nil {
return err
}
// step 4. find instances which are running but have no machine
// associated with them.
unknown, err := p.findUnknownInstances()
if err != nil {
return err
}
return p.stopInstances(append(stopping, unknown...))
}
// findUnknownInstances finds instances which are not associated with a machine.
func (p *Provisioner) findUnknownInstances() ([]environs.Instance, error) {
all, err := p.environ.AllInstances()
if err != nil {
return nil, err
}
instances := make(map[string]environs.Instance)
for _, i := range all {
instances[i.Id()] = i
}
// TODO(dfc) this is very inefficient, p.machines cache may help.
machines, err := p.st.AllMachines()
if err != nil {
return nil, err
}
for _, m := range machines {
id, err := m.InstanceId()
if err != nil {
return nil, err
}
if id == "" {
// TODO(dfc) InstanceId should return an error if the id isn't set.
continue
}
delete(instances, id)
}
var unknown []environs.Instance
for _, i := range instances {
unknown = append(unknown, i)
}
return unknown, nil
}
// findNotStarted finds machines without an InstanceId set, these are defined as not started.
func (p *Provisioner) findNotStarted(machines []*state.Machine) ([]*state.Machine, error) {
var notstarted []*state.Machine
for _, m := range machines {
id, err := m.InstanceId()
if err != nil {
return nil, err
}
if id == "" {
// TODO(dfc) InstanceId should return an error if the id isn't set.
notstarted = append(notstarted, m)
} else {
log.Printf("machine %s already started as instance %q", m, id)
}
}
return notstarted, nil
}
func (p *Provisioner) startMachines(machines []*state.Machine) error {
for _, m := range machines {
if err := p.startMachine(m); err != nil {
return err
}
}
return nil
}
func (p *Provisioner) startMachine(m *state.Machine) error {
// TODO(dfc) the state.Info passed to environ.StartInstance remains contentious
// however as the PA only knows one state.Info, and that info is used by MAs and
// UAs to locate the ZK for this environment, it is logical to use the same
// state.Info as the PA.
inst, err := p.environ.StartInstance(m.Id(), p.info)
if err != nil {
log.Printf("provisioner can't start machine %s: %v", m, err)
return err
}
// assign the instance id to the machine
if err := m.SetInstanceId(inst.Id()); err != nil {
return err
}
// populate the local cache
p.instances[m.Id()] = inst
p.machines[inst.Id()] = m
log.Printf("provisioner started machine %s as instance %s", m, inst.Id())
return nil
}
func (p *Provisioner) stopInstances(instances []environs.Instance) error {
// Although calling StopInstance with an empty slice should produce no change in the
// provider, environs like dummy do not consider this a noop.
if len(instances) == 0 {
return nil
}
if err := p.environ.StopInstances(instances); err != nil {
return err
}
// cleanup cache
for _, i := range instances {
if m, ok := p.machines[i.Id()]; ok {
delete(p.machines, i.Id())
delete(p.instances, m.Id())
}
}
return nil
}
// instanceForMachine returns the environs.Instance that represents this machine's instance.
func (p *Provisioner) instanceForMachine(m *state.Machine) (environs.Instance, error) {
inst, ok := p.instances[m.Id()]
if !ok {
// not cached locally, ask the environ.
id, err := m.InstanceId()
if err != nil {
return nil, err
}
if id == "" {
// TODO(dfc) InstanceId should return an error if the id isn't set.
return nil, fmt.Errorf("machine %s not found", m)
}
// TODO(dfc) this should be batched, or the cache preloaded at startup to
// avoid N calls to the envirion.
insts, err := p.environ.Instances([]string{id})
if err != nil {
// the provider doesn't know about this instance, give up.
return nil, err
}
inst = insts[0]
}
return inst, nil
}
// instancesForMachines returns a list of environs.Instance that represent the list of machines running
// in the provider.
func (p *Provisioner) instancesForMachines(machines []*state.Machine) ([]environs.Instance, error) {
var insts []environs.Instance
for _, m := range machines {
inst, err := p.instanceForMachine(m)
if err != nil {
return nil, err
}
insts = append(insts, inst)
}
return insts, nil
}
|
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"math"
"net"
"net/url"
"os"
"os/exec"
"os/user"
"runtime"
"strings"
"github.com/blang/semver"
"github.com/docker/machine/libmachine/ssh"
"github.com/golang/glog"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/cpu"
gopshost "github.com/shirou/gopsutil/host"
"github.com/spf13/cobra"
"github.com/spf13/viper"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/notify"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/out/register"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/style"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/translate"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/version"
)
var (
registryMirror []string
insecureRegistry []string
apiServerNames []string
apiServerIPs []net.IP
)
func init() {
initMinikubeFlags()
initKubernetesFlags()
initDriverFlags()
initNetworkingFlags()
if err := viper.BindPFlags(startCmd.Flags()); err != nil {
exit.Error(reason.InternalBindFlags, "unable to bind flags", err)
}
}
// startCmd represents the start command
var startCmd = &cobra.Command{
Use: "start",
Short: "Starts a local Kubernetes cluster",
Long: "Starts a local Kubernetes cluster",
Run: runStart,
}
// platform generates a user-readable platform message
func platform() string {
var s strings.Builder
// Show the distro version if possible
hi, err := gopshost.Info()
if err == nil {
s.WriteString(fmt.Sprintf("%s %s", strings.Title(hi.Platform), hi.PlatformVersion))
glog.Infof("hostinfo: %+v", hi)
} else {
glog.Warningf("gopshost.Info returned error: %v", err)
s.WriteString(runtime.GOOS)
}
vsys, vrole, err := gopshost.Virtualization()
if err != nil {
glog.Warningf("gopshost.Virtualization returned error: %v", err)
} else {
glog.Infof("virtualization: %s %s", vsys, vrole)
}
// This environment is exotic, let's output a bit more.
if vrole == "guest" || runtime.GOARCH != "amd64" {
if vsys != "" {
s.WriteString(fmt.Sprintf(" (%s/%s)", vsys, runtime.GOARCH))
} else {
s.WriteString(fmt.Sprintf(" (%s)", runtime.GOARCH))
}
}
return s.String()
}
// runStart handles the executes the flow of "minikube start"
func runStart(cmd *cobra.Command, args []string) {
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
out.SetJSON(viper.GetString(startOutput) == "json")
displayVersion(version.GetVersion())
// No need to do the update check if no one is going to see it
if !viper.GetBool(interactive) || !viper.GetBool(dryRun) {
// Avoid blocking execution on optional HTTP fetches
go notify.MaybePrintUpdateTextFromGithub()
}
displayEnviron(os.Environ())
if viper.GetBool(force) {
out.WarningT("minikube skips various validations when --force is supplied; this may lead to unexpected behavior")
}
// if --registry-mirror specified when run minikube start,
// take arg precedence over MINIKUBE_REGISTRY_MIRROR
// actually this is a hack, because viper 1.0.0 can assign env to variable if StringSliceVar
// and i can't update it to 1.4.0, it affects too much code
// other types (like String, Bool) of flag works, so imageRepository, imageMirrorCountry
// can be configured as MINIKUBE_IMAGE_REPOSITORY and IMAGE_MIRROR_COUNTRY
// this should be updated to documentation
if len(registryMirror) == 0 {
registryMirror = viper.GetStringSlice("registry_mirror")
}
if !config.ProfileNameValid(ClusterFlagValue()) {
out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.")
}
existing, err := config.Load(ClusterFlagValue())
if err != nil && !config.IsNotExist(err) {
exit.Message(reason.HostConfigLoad, "Unable to load config: {{.error}}", out.V{"error": err})
}
if existing != nil {
upgradeExistingConfig(existing)
}
validateSpecifiedDriver(existing)
validateKubernetesVersion(existing)
ds, alts, specified := selectDriver(existing)
starter, err := provisionWithDriver(cmd, ds, existing)
if err != nil {
node.ExitIfFatal(err)
machine.MaybeDisplayAdvice(err, ds.Name)
if specified {
// If the user specified a driver, don't fallback to anything else
exit.Error(reason.GuestProvision, "error provisioning host", err)
} else {
success := false
// Walk down the rest of the options
for _, alt := range alts {
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
ds = alt
// Delete the existing cluster and try again with the next driver on the list
profile, err := config.LoadProfile(ClusterFlagValue())
if err != nil {
glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
}
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
continue
} else {
// Success!
success = true
break
}
}
if !success {
exit.Error(reason.GuestProvision, "error provisioning host", err)
}
}
}
if existing != nil && existing.KubernetesConfig.ContainerRuntime == "crio" && driver.IsKIC(existing.Driver) {
// Stop and start again if it's crio because it's broken above v1.17.3
out.WarningT("Due to issues with CRI-O post v1.17.3, we need to restart your cluster.")
out.WarningT("See details at https://github.com/kubernetes/minikube/issues/8861")
stopProfile(existing.Name)
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
exit.Error(reason.GuestProvision, "error provisioning host", err)
}
}
kubeconfig, err := startWithDriver(cmd, starter, existing)
if err != nil {
node.ExitIfFatal(err)
exit.Error(reason.GuestStart, "failed to start node", err)
}
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
}
}
func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) {
driverName := ds.Name
glog.Infof("selected driver: %s", driverName)
validateDriver(ds, existing)
err := autoSetDriverOptions(cmd, driverName)
if err != nil {
glog.Errorf("Error autoSetOptions : %v", err)
}
validateFlags(cmd, driverName)
validateUser(driverName)
// Download & update the driver, even in --download-only mode
if !viper.GetBool(dryRun) {
updateDriver(driverName)
}
k8sVersion := getKubernetesVersion(existing)
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to generate config")
}
// This is about as far as we can go without overwriting config files
if viper.GetBool(dryRun) {
out.T(style.DryRun, `dry-run validation complete!`)
os.Exit(0)
}
if driver.IsVM(driverName) {
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
}
cc.MinikubeISO = url
}
var existingAddons map[string]bool
if viper.GetBool(installAddons) {
existingAddons = map[string]bool{}
if existing != nil && existing.Addons != nil {
existingAddons = existing.Addons
}
}
if viper.GetBool(nativeSSH) {
ssh.SetDefaultClient(ssh.Native)
} else {
ssh.SetDefaultClient(ssh.External)
}
mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure))
if err != nil {
return node.Starter{}, err
}
return node.Starter{
Runner: mRunner,
PreExists: preExists,
MachineAPI: mAPI,
Host: host,
ExistingAddons: existingAddons,
Cfg: &cc,
Node: &n,
}, nil
}
func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
kubeconfig, err := node.Start(starter, true)
if err != nil {
kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err)
if err != nil {
return nil, err
}
}
numNodes := viper.GetInt(nodes)
if existing != nil {
if numNodes > 1 {
// We ignore the --nodes parameter if we're restarting an existing cluster
out.WarningT(`The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use "minikube node add" to add nodes to an existing cluster.`, out.V{"cluster": existing.Name})
}
numNodes = len(existing.Nodes)
}
if numNodes > 1 {
if driver.BareMetal(starter.Cfg.Driver) {
exit.Message(reason.DrvUnsupportedMulti, "The none driver is not compatible with multi-node clusters.")
} else {
// Only warn users on first start.
if existing == nil {
out.Ln("")
warnAboutMultiNode()
for i := 1; i < numNodes; i++ {
nodeName := node.Name(i + 1)
n := config.Node{
Name: nodeName,
Worker: true,
ControlPlane: false,
KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
}
out.Ln("") // extra newline for clarity on the command line
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
if err != nil {
return nil, errors.Wrap(err, "adding node")
}
}
} else {
for _, n := range existing.Nodes {
if !n.ControlPlane {
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
if err != nil {
return nil, errors.Wrap(err, "adding node")
}
}
}
}
}
}
return kubeconfig, nil
}
func warnAboutMultiNode() {
out.WarningT("Multi-node clusters are currently experimental and might exhibit unintended behavior.")
out.T(style.Documentation, "To track progress on multi-node clusters, see https://github.com/kubernetes/minikube/issues/7538.")
}
func updateDriver(driverName string) {
v, err := version.GetSemverVersion()
if err != nil {
out.WarningT("Error parsing minikube version: {{.error}}", out.V{"error": err})
} else if err := driver.InstallOrUpdate(driverName, localpath.MakeMiniPath("bin"), v, viper.GetBool(interactive), viper.GetBool(autoUpdate)); err != nil {
out.WarningT("Unable to update {{.driver}} driver: {{.error}}", out.V{"driver": driverName, "error": err})
}
}
func displayVersion(version string) {
prefix := ""
if ClusterFlagValue() != constants.DefaultClusterName {
prefix = fmt.Sprintf("[%s] ", ClusterFlagValue())
}
register.Reg.SetStep(register.InitialSetup)
out.T(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()})
}
// displayEnviron makes the user aware of environment variables that will affect how minikube operates
func displayEnviron(env []string) {
for _, kv := range env {
bits := strings.SplitN(kv, "=", 2)
k := bits[0]
v := bits[1]
if strings.HasPrefix(k, "MINIKUBE_") || k == constants.KubeconfigEnvVar {
out.Infof("{{.key}}={{.value}}", out.V{"key": k, "value": v})
}
}
}
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
register.Reg.SetStep(register.Done)
if kcs.KeepContext {
out.T(style.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
} else {
out.T(style.Ready, `Done! kubectl is now configured to use "{{.name}}"`, out.V{"name": machineName})
}
path, err := exec.LookPath("kubectl")
if err != nil {
out.ErrT(style.Kubectl, "Kubectl not found in your path")
out.ErrT(style.Workaround, "You can use kubectl inside minikube. For more information, visit https://minikube.sigs.k8s.io/docs/handbook/kubectl/")
out.ErrT(style.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
return nil
}
gitVersion, err := kubectlVersion(path)
if err != nil {
return err
}
client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix))
if err != nil {
return errors.Wrap(err, "client semver")
}
cluster := semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))
minorSkew := int(math.Abs(float64(int(client.Minor) - int(cluster.Minor))))
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
if client.Major != cluster.Major || minorSkew > 1 {
out.Ln("")
out.WarningT("{{.path}} is version {{.client_version}}, which may be incompatible with Kubernetes {{.cluster_version}}.",
out.V{"path": path, "client_version": client, "cluster_version": cluster})
out.ErrT(style.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
out.V{"path": path, "client_version": client})
}
return nil
}
func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) (*kubeconfig.Settings, error) {
if viper.GetBool(deleteOnFailure) {
out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
// Start failed, delete the cluster and try again
profile, err := config.LoadProfile(existing.Name)
if err != nil {
out.ErrT(style.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": existing.Name})
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": existing.Name})
}
// Re-generate the cluster config, just in case the failure was related to an old config format
cc := updateExistingConfigFromFlags(cmd, &existing)
var kubeconfig *kubeconfig.Settings
for _, n := range cc.Nodes {
r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane, false)
s := node.Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: &cc,
Node: &n,
ExistingAddons: existingAddons,
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
k, err := node.Start(s, n.ControlPlane)
if n.ControlPlane {
kubeconfig = k
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
}
return kubeconfig, nil
}
// Don't delete the cluster unless they ask
return nil, originalErr
}
func kubectlVersion(path string) (string, error) {
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
if err != nil {
// really old Kubernetes clients did not have the --output parameter
b, err := exec.Command(path, "version", "--client", "--short").Output()
if err != nil {
return "", errors.Wrap(err, "exec")
}
s := strings.TrimSpace(string(b))
return strings.Replace(s, "Client Version: ", "", 1), nil
}
cv := struct {
ClientVersion struct {
GitVersion string `json:"gitVersion"`
} `json:"clientVersion"`
}{}
err = json.Unmarshal(j, &cv)
if err != nil {
return "", errors.Wrap(err, "unmarshal")
}
return cv.ClientVersion.GitVersion, nil
}
func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []registry.DriverState, bool) {
// Technically unrelated, but important to perform before detection
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
register.Reg.SetStep(register.SelectingDriver)
// By default, the driver is whatever we used last time
if existing != nil {
old := hostDriver(existing)
ds := driver.Status(old)
out.T(style.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()})
return ds, nil, true
}
// Default to looking at the new driver parameter
if d := viper.GetString("driver"); d != "" {
if vmd := viper.GetString("vm-driver"); vmd != "" {
// Output a warning
warning := `Both driver={{.driver}} and vm-driver={{.vmd}} have been set.
Since vm-driver is deprecated, minikube will default to driver={{.driver}}.
If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning.
`
out.WarningT(warning, out.V{"driver": d, "vmd": vmd})
}
ds := driver.Status(d)
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
}
// Fallback to old driver parameter
if d := viper.GetString("vm-driver"); d != "" {
ds := driver.Status(viper.GetString("vm-driver"))
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
}
choices := driver.Choices(viper.GetBool("vm"))
pick, alts, rejects := driver.Suggest(choices)
if pick.Name == "" {
out.T(style.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:")
for _, r := range rejects {
out.Infof("{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection})
}
exit.Message(reason.DrvNotDetected, "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
}
if len(alts) > 1 {
altNames := []string{}
for _, a := range alts {
altNames = append(altNames, a.String())
}
out.T(style.Sparkle, `Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}`, out.V{"driver": pick.Name, "alternates": strings.Join(altNames, ", ")})
} else {
out.T(style.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()})
}
return pick, alts, false
}
// hostDriver returns the actual driver used by a libmachine host, which can differ from our config
func hostDriver(existing *config.ClusterConfig) string {
if existing == nil {
return ""
}
api, err := machine.NewAPIClient()
if err != nil {
glog.Warningf("selectDriver NewAPIClient: %v", err)
return existing.Driver
}
cp, err := config.PrimaryControlPlane(existing)
if err != nil {
glog.Warningf("Unable to get control plane from existing config: %v", err)
return existing.Driver
}
machineName := driver.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
glog.Warningf("api.Load failed for %s: %v", machineName, err)
if existing.VMDriver != "" {
return existing.VMDriver
}
return existing.Driver
}
return h.Driver.DriverName()
}
// validateSpecifiedDriver makes sure that if a user has passed in a driver
// it matches the existing cluster if there is one
func validateSpecifiedDriver(existing *config.ClusterConfig) {
if existing == nil {
return
}
var requested string
if d := viper.GetString("driver"); d != "" {
requested = d
} else if d := viper.GetString("vm-driver"); d != "" {
requested = d
}
// Neither --vm-driver or --driver was specified
if requested == "" {
return
}
old := hostDriver(existing)
if requested == old {
return
}
exit.Advice(
reason.GuestDrvMismatch,
`The existing "{{.name}}" cluster was created using the "{{.old}}" driver, which is incompatible with requested "{{.new}}" driver.`,
"Delete the existing '{{.name}}' cluster using: '{{.delcommand}}', or start the existing '{{.name}}' cluster using: '{{.command}} --driver={{.old}}'",
out.V{
"name": existing.Name,
"new": requested,
"old": old,
"command": mustload.ExampleCmd(existing.Name, "start"),
"delcommand": mustload.ExampleCmd(existing.Name, "delete"),
},
)
}
// validateDriver validates that the selected driver appears sane, exits if not
func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
name := ds.Name
glog.Infof("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
}
// if we are only downloading artifacts for a driver, we can stop validation here
if viper.GetBool("download-only") {
return
}
st := ds.State
glog.Infof("status for %s: %+v", name, st)
if st.NeedsImprovement {
out.WarnReason(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_IMPROVEMENT", strings.ToUpper(name)),
Advice: translate.T(st.Fix),
Style: style.Improvement,
}, `The '{{.driver}}' driver reported a performance issue`, out.V{"driver": name})
}
if st.Error == nil {
return
}
if !st.Installed {
exit.Message(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(name)),
Advice: translate.T(st.Fix),
ExitCode: reason.ExProviderNotFound,
URL: st.Doc,
Style: style.Shrug,
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error})
}
id := fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
code := reason.ExProviderUnavailable
if !st.Running {
id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(name))
code = reason.ExProviderNotRunning
}
exitIfNotForced(reason.Kind{
ID: id,
Advice: translate.T(st.Fix),
ExitCode: code,
URL: st.Doc,
Style: style.Fatal,
}, st.Error.Error())
}
func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) {
var tryCountries []string
var fallback string
glog.Infof("selecting image repository for country %s ...", mirrorCountry)
if mirrorCountry != "" {
localRepos, ok := constants.ImageRepositories[mirrorCountry]
if !ok || len(localRepos) == 0 {
return false, "", fmt.Errorf("invalid image mirror country code: %s", mirrorCountry)
}
tryCountries = append(tryCountries, mirrorCountry)
// we'll use the first repository as fallback
// when none of the mirrors in the given location is available
fallback = localRepos[0]
} else {
// always make sure global is preferred
tryCountries = append(tryCountries, "global")
for k := range constants.ImageRepositories {
if strings.ToLower(k) != "global" {
tryCountries = append(tryCountries, k)
}
}
}
checkRepository := func(repo string) error {
pauseImage := images.Pause(v, repo)
ref, err := name.ParseReference(pauseImage, name.WeakValidation)
if err != nil {
return err
}
_, err = remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
return err
}
for _, code := range tryCountries {
localRepos := constants.ImageRepositories[code]
for _, repo := range localRepos {
err := checkRepository(repo)
if err == nil {
return true, repo, nil
}
}
}
return false, fallback, nil
}
// validateUser validates minikube is run by the recommended user (privileged or regular)
func validateUser(drvName string) {
u, err := user.Current()
if err != nil {
glog.Errorf("Error getting the current user: %v", err)
return
}
useForce := viper.GetBool(force)
if driver.NeedsRoot(drvName) && u.Uid != "0" && !useForce {
exit.Message(reason.DrvNeedsRoot, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo -E minikube start --driver={{.driver_name}}'.`, out.V{"driver_name": drvName})
}
// If root is required, or we are not root, exit early
if driver.NeedsRoot(drvName) || u.Uid != "0" {
return
}
out.ErrT(style.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
out.ErrT(style.Tip, "If you are running minikube within a VM, consider using --driver=none:")
out.ErrT(style.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
cname := ClusterFlagValue()
_, err = config.Load(cname)
if err == nil || !config.IsNotExist(err) {
out.ErrT(style.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")})
}
if !useForce {
exit.Message(reason.DrvAsRoot, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
}
}
// memoryLimits returns the amount of memory allocated to the system and hypervisor, the return value is in MiB
func memoryLimits(drvName string) (int, int, error) {
info, cpuErr, memErr, diskErr := machine.CachedHostInfo()
if cpuErr != nil {
glog.Warningf("could not get system cpu info while verifying memory limits, which might be okay: %v", cpuErr)
}
if diskErr != nil {
glog.Warningf("could not get system disk info while verifying memory limits, which might be okay: %v", diskErr)
}
if memErr != nil {
return -1, -1, memErr
}
sysLimit := int(info.Memory)
containerLimit := 0
if driver.IsKIC(drvName) {
s, err := oci.CachedDaemonInfo(drvName)
if err != nil {
return -1, -1, err
}
containerLimit = int(s.TotalMemory / 1024 / 1024)
}
return sysLimit, containerLimit, nil
}
// suggestMemoryAllocation calculates the default memory footprint in MiB
func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int {
if mem := viper.GetInt(memory); mem != 0 {
return mem
}
fallback := 2200
maximum := 6000
if sysLimit > 0 && fallback > sysLimit {
return sysLimit
}
// If there are container limits, add tiny bit of slack for non-minikube components
if containerLimit > 0 {
if fallback > containerLimit {
return containerLimit
}
maximum = containerLimit - 48
}
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
suggested := int(float32(sysLimit)/400.0) * 100
if nodes > 1 {
suggested /= nodes
}
if suggested > maximum {
return maximum
}
if suggested < fallback {
return fallback
}
return suggested
}
// validateRequestedMemorySize validates the memory size matches the minimum recommended
func validateRequestedMemorySize(req int, drvName string) {
// TODO: Fix MB vs MiB confusion
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
glog.Warningf("Unable to query memory limits: %v", err)
}
// Detect if their system doesn't have enough memory to work with.
if driver.IsKIC(drvName) && containerLimit < minUsableMem {
if driver.IsDockerDesktop(drvName) {
if runtime.GOOS == "darwin" {
exitIfNotForced(reason.RsrcInsufficientDarwinDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"})
} else {
exitIfNotForced(reason.RsrcInsufficientWindowsDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"})
}
}
exitIfNotForced(reason.RsrcInsufficientContainerMemory, "{{.driver}} only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem})
}
if sysLimit < minUsableMem {
exitIfNotForced(reason.RsrcInsufficientSysMemory, "System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem})
}
if req < minUsableMem {
exitIfNotForced(reason.RsrcInsufficientReqMemory, "Requested memory allocation {{.requested}}MiB is less than the usable minimum of {{.minimum_memory}}MB", out.V{"requested": req, "minimum_memory": minUsableMem})
}
if req < minRecommendedMem {
out.WarnReason(reason.RsrcInsufficientReqMemory, "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommend}}MB. Deployments may fail.", out.V{"requested": req, "recommend": minRecommendedMem})
}
if driver.IsDockerDesktop(drvName) && containerLimit < 2997 && sysLimit > 8000 { // for users with more than 8 GB advice 3 GB
r := reason.RsrcInsufficientDarwinDockerMemory
if runtime.GOOS == "Windows" {
r = reason.RsrcInsufficientWindowsDockerMemory
}
r.Style = style.Improvement
out.WarnReason(r, "Docker Desktop has access to only {{.size}}MiB of the {{.sys}}MiB in available system memory. Consider increasing this for improved performance.", out.V{"size": containerLimit, "sys": sysLimit, "recommend": "3 GB"})
}
advised := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
if req > sysLimit {
exitIfNotForced(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"},
`Requested memory allocation {{.requested}}MB is more than your system limit {{.system_limit}}MB.`,
out.V{"requested": req, "system_limit": sysLimit, "advised": advised})
}
// Recommend 1GB to handle OS/VM overhead
maxAdvised := sysLimit - 1024
if req > maxAdvised {
out.WarnReason(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"},
`The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.`,
out.V{"requested": req, "system_limit": sysLimit, "advised": advised})
}
}
// validateCPUCount validates the cpu count matches the minimum recommended
func validateCPUCount(drvName string) {
var cpuCount int
if driver.BareMetal(drvName) {
// Uses the gopsutil cpu package to count the number of physical cpu cores
ci, err := cpu.Counts(false)
if err != nil {
glog.Warningf("Unable to get CPU info: %v", err)
} else {
cpuCount = ci
}
} else {
cpuCount = viper.GetInt(cpus)
}
if cpuCount < minimumCPUS {
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS})
}
if !driver.IsKIC((drvName)) {
return
}
si, err := oci.CachedDaemonInfo(drvName)
if err != nil {
out.T(style.Confused, "Failed to verify '{{.driver_name}} info' will try again ...", out.V{"driver_name": drvName})
si, err = oci.DaemonInfo(drvName)
if err != nil {
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
}
}
// looks good
if si.CPUs >= 2 {
return
}
if drvName == oci.Docker && runtime.GOOS == "darwin" {
exitIfNotForced(reason.RsrcInsufficientDarwinDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available")
} else if drvName == oci.Docker && runtime.GOOS == "windows" {
exitIfNotForced(reason.RsrcInsufficientWindowsDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available")
} else {
exitIfNotForced(reason.RsrcInsufficientCores, "{{.driver_name}} has less than 2 CPUs available, but Kubernetes requires at least 2 to be available", out.V{"driver_name": driver.FullName(viper.GetString("driver"))})
}
}
// validateFlags validates the supplied flags against known bad combinations
func validateFlags(cmd *cobra.Command, drvName string) {
if cmd.Flags().Changed(humanReadableDiskSize) {
diskSizeMB, err := util.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exitIfNotForced(reason.Usage, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
if diskSizeMB < minimumDiskSize {
exitIfNotForced(reason.RsrcInsufficientStorage, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize})
}
}
if cmd.Flags().Changed(cpus) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --cpus flag", out.V{"name": drvName})
}
}
validateCPUCount(drvName)
if cmd.Flags().Changed(memory) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --memory flag", out.V{"name": drvName})
}
req, err := util.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
validateRequestedMemorySize(req, drvName)
}
if cmd.Flags().Changed(containerRuntime) {
runtime := strings.ToLower(viper.GetString(containerRuntime))
validOptions := cruntime.ValidRuntimes()
// `crio` is accepted as an alternative spelling to `cri-o`
validOptions = append(validOptions, constants.CRIO)
var validRuntime bool
for _, option := range validOptions {
if runtime == option {
validRuntime = true
}
// Convert `cri-o` to `crio` as the K8s config uses the `crio` spelling
if runtime == "cri-o" {
viper.Set(containerRuntime, constants.CRIO)
}
}
if !validRuntime {
exit.Message(reason.Usage, `Invalid Container Runtime: "{{.runtime}}". Valid runtimes are: {{.validOptions}}`, out.V{"runtime": runtime, "validOptions": strings.Join(cruntime.ValidRuntimes(), ", ")})
}
}
if driver.BareMetal(drvName) {
if ClusterFlagValue() != constants.DefaultClusterName {
exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName})
}
runtime := viper.GetString(containerRuntime)
if runtime != "docker" {
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime})
}
// conntrack is required starting with Kubernetes 1.18, include the release candidates for completion
version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil))
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
if _, err := exec.LookPath("conntrack"); err != nil {
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
}
}
}
// validate kubeadm extra args
if invalidOpts := bsutil.FindInvalidExtraConfigFlags(config.ExtraOptions); len(invalidOpts) > 0 {
out.WarningT(
"These --extra-config parameters are invalid: {{.invalid_extra_opts}}",
out.V{"invalid_extra_opts": invalidOpts},
)
exit.Message(
reason.Usage,
"Valid components are: {{.valid_extra_opts}}",
out.V{"valid_extra_opts": bsutil.KubeadmExtraConfigOpts},
)
}
// check that kubeadm extra args contain only allowed parameters
for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) {
if !config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmCmdParam], param) &&
!config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmConfigParam], param) {
exit.Message(reason.Usage, "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
}
}
if s := viper.GetString(startOutput); s != "text" && s != "json" {
exit.Message(reason.Usage, "Sorry, please set the --output flag to one of the following valid options: [text,json]")
}
validateRegistryMirror()
}
// This function validates if the --registry-mirror
// args match the format of http://localhost
func validateRegistryMirror() {
if len(registryMirror) > 0 {
for _, loc := range registryMirror {
URL, err := url.Parse(loc)
if err != nil {
glog.Errorln("Error Parsing URL: ", err)
}
if (URL.Scheme != "http" && URL.Scheme != "https") || URL.Path != "" {
exit.Message(reason.Usage, "Sorry, the url provided with the --registry-mirror flag is invalid: {{.url}}", out.V{"url": loc})
}
}
}
}
func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.ClusterConfig) (config.ClusterConfig, config.Node, error) {
// Create the initial node, which will necessarily be a control plane
if existing != nil {
cp, err := config.PrimaryControlPlane(existing)
cp.KubernetesVersion = getKubernetesVersion(&cc)
if err != nil {
return cc, config.Node{}, err
}
// Make sure that existing nodes honor if KubernetesVersion gets specified on restart
// KubernetesVersion is the only attribute that the user can override in the Node object
nodes := []config.Node{}
for _, n := range existing.Nodes {
n.KubernetesVersion = getKubernetesVersion(&cc)
nodes = append(nodes, n)
}
cc.Nodes = nodes
return cc, cp, nil
}
cp := config.Node{
Port: cc.KubernetesConfig.NodePort,
KubernetesVersion: getKubernetesVersion(&cc),
Name: kubeNodeName,
ControlPlane: true,
Worker: true,
}
cc.Nodes = []config.Node{cp}
return cc, cp, nil
}
// autoSetDriverOptions sets the options needed for specific driver automatically.
func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
err = nil
hints := driver.FlagDefaults(drvName)
if len(hints.ExtraOptions) > 0 {
for _, eo := range hints.ExtraOptions {
if config.ExtraOptions.Exists(eo) {
glog.Infof("skipping extra-config %q.", eo)
continue
}
glog.Infof("auto setting extra-config to %q.", eo)
err = config.ExtraOptions.Set(eo)
if err != nil {
err = errors.Wrapf(err, "setting extra option %s", eo)
}
}
}
if !cmd.Flags().Changed(cacheImages) {
viper.Set(cacheImages, hints.CacheImages)
}
if !cmd.Flags().Changed(containerRuntime) && hints.ContainerRuntime != "" {
viper.Set(containerRuntime, hints.ContainerRuntime)
glog.Infof("auto set %s to %q.", containerRuntime, hints.ContainerRuntime)
}
if !cmd.Flags().Changed(cmdcfg.Bootstrapper) && hints.Bootstrapper != "" {
viper.Set(cmdcfg.Bootstrapper, hints.Bootstrapper)
glog.Infof("auto set %s to %q.", cmdcfg.Bootstrapper, hints.Bootstrapper)
}
return err
}
// validateKubernetesVersion ensures that the requested version is reasonable
func validateKubernetesVersion(old *config.ClusterConfig) {
nvs, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(old), version.VersionPrefix))
oldestVersion, err := semver.Make(strings.TrimPrefix(constants.OldestKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.InternalSemverParse, "Unable to parse oldest Kubernetes version from constants: {{.error}}", out.V{"error": err})
}
defaultVersion, err := semver.Make(strings.TrimPrefix(constants.DefaultKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.InternalSemverParse, "Unable to parse default Kubernetes version from constants: {{.error}}", out.V{"error": err})
}
if nvs.LT(oldestVersion) {
out.WarningT("Specified Kubernetes version {{.specified}} is less than the oldest supported version: {{.oldest}}", out.V{"specified": nvs, "oldest": constants.OldestKubernetesVersion})
if !viper.GetBool(force) {
out.WarningT("You can force an unsupported Kubernetes version via the --force flag")
}
exitIfNotForced(reason.KubernetesTooOld, "Kubernetes {{.version}} is not supported by this release of minikube", out.V{"version": nvs})
}
if old == nil || old.KubernetesConfig.KubernetesVersion == "" {
return
}
ovs, err := semver.Make(strings.TrimPrefix(old.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
if err != nil {
glog.Errorf("Error parsing old version %q: %v", old.KubernetesConfig.KubernetesVersion, err)
}
if nvs.LT(ovs) {
profileArg := ""
if old.Name != constants.DefaultClusterName {
profileArg = fmt.Sprintf(" -p %s", old.Name)
}
suggestedName := old.Name + "2"
exit.Message(reason.KubernetesDowngrade, "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}",
out.V{"prefix": version.VersionPrefix, "new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName})
}
if defaultVersion.GT(nvs) {
out.T(style.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.prefix}}{{.new}}", out.V{"prefix": version.VersionPrefix, "new": defaultVersion})
}
}
func getKubernetesVersion(old *config.ClusterConfig) string {
paramVersion := viper.GetString(kubernetesVersion)
// try to load the old version first if the user didn't specify anything
if paramVersion == "" && old != nil {
paramVersion = old.KubernetesConfig.KubernetesVersion
}
if paramVersion == "" || strings.EqualFold(paramVersion, "stable") {
paramVersion = constants.DefaultKubernetesVersion
} else if strings.EqualFold(paramVersion, "latest") {
paramVersion = constants.NewestKubernetesVersion
}
nvs, err := semver.Make(strings.TrimPrefix(paramVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.Usage, `Unable to parse "{{.kubernetes_version}}": {{.error}}`, out.V{"kubernetes_version": paramVersion, "error": err})
}
return version.VersionPrefix + nvs.String()
}
func exitIfNotForced(r reason.Kind, message string, v ...out.V) {
if !viper.GetBool(force) {
exit.Message(r, message, v...)
}
out.Error(r, message, v...)
}
start: fixed the type value for kubectl version mismatch to warning
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"math"
"net"
"net/url"
"os"
"os/exec"
"os/user"
"runtime"
"strings"
"github.com/blang/semver"
"github.com/docker/machine/libmachine/ssh"
"github.com/golang/glog"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/cpu"
gopshost "github.com/shirou/gopsutil/host"
"github.com/spf13/cobra"
"github.com/spf13/viper"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/notify"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/out/register"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/style"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/translate"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/version"
)
var (
registryMirror []string
insecureRegistry []string
apiServerNames []string
apiServerIPs []net.IP
)
func init() {
initMinikubeFlags()
initKubernetesFlags()
initDriverFlags()
initNetworkingFlags()
if err := viper.BindPFlags(startCmd.Flags()); err != nil {
exit.Error(reason.InternalBindFlags, "unable to bind flags", err)
}
}
// startCmd represents the start command
var startCmd = &cobra.Command{
Use: "start",
Short: "Starts a local Kubernetes cluster",
Long: "Starts a local Kubernetes cluster",
Run: runStart,
}
// platform generates a user-readable platform message
func platform() string {
var s strings.Builder
// Show the distro version if possible
hi, err := gopshost.Info()
if err == nil {
s.WriteString(fmt.Sprintf("%s %s", strings.Title(hi.Platform), hi.PlatformVersion))
glog.Infof("hostinfo: %+v", hi)
} else {
glog.Warningf("gopshost.Info returned error: %v", err)
s.WriteString(runtime.GOOS)
}
vsys, vrole, err := gopshost.Virtualization()
if err != nil {
glog.Warningf("gopshost.Virtualization returned error: %v", err)
} else {
glog.Infof("virtualization: %s %s", vsys, vrole)
}
// This environment is exotic, let's output a bit more.
if vrole == "guest" || runtime.GOARCH != "amd64" {
if vsys != "" {
s.WriteString(fmt.Sprintf(" (%s/%s)", vsys, runtime.GOARCH))
} else {
s.WriteString(fmt.Sprintf(" (%s)", runtime.GOARCH))
}
}
return s.String()
}
// runStart handles the executes the flow of "minikube start"
func runStart(cmd *cobra.Command, args []string) {
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
out.SetJSON(viper.GetString(startOutput) == "json")
displayVersion(version.GetVersion())
// No need to do the update check if no one is going to see it
if !viper.GetBool(interactive) || !viper.GetBool(dryRun) {
// Avoid blocking execution on optional HTTP fetches
go notify.MaybePrintUpdateTextFromGithub()
}
displayEnviron(os.Environ())
if viper.GetBool(force) {
out.WarningT("minikube skips various validations when --force is supplied; this may lead to unexpected behavior")
}
// if --registry-mirror specified when run minikube start,
// take arg precedence over MINIKUBE_REGISTRY_MIRROR
// actually this is a hack, because viper 1.0.0 can assign env to variable if StringSliceVar
// and i can't update it to 1.4.0, it affects too much code
// other types (like String, Bool) of flag works, so imageRepository, imageMirrorCountry
// can be configured as MINIKUBE_IMAGE_REPOSITORY and IMAGE_MIRROR_COUNTRY
// this should be updated to documentation
if len(registryMirror) == 0 {
registryMirror = viper.GetStringSlice("registry_mirror")
}
if !config.ProfileNameValid(ClusterFlagValue()) {
out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.")
}
existing, err := config.Load(ClusterFlagValue())
if err != nil && !config.IsNotExist(err) {
exit.Message(reason.HostConfigLoad, "Unable to load config: {{.error}}", out.V{"error": err})
}
if existing != nil {
upgradeExistingConfig(existing)
}
validateSpecifiedDriver(existing)
validateKubernetesVersion(existing)
ds, alts, specified := selectDriver(existing)
starter, err := provisionWithDriver(cmd, ds, existing)
if err != nil {
node.ExitIfFatal(err)
machine.MaybeDisplayAdvice(err, ds.Name)
if specified {
// If the user specified a driver, don't fallback to anything else
exit.Error(reason.GuestProvision, "error provisioning host", err)
} else {
success := false
// Walk down the rest of the options
for _, alt := range alts {
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
ds = alt
// Delete the existing cluster and try again with the next driver on the list
profile, err := config.LoadProfile(ClusterFlagValue())
if err != nil {
glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
}
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
continue
} else {
// Success!
success = true
break
}
}
if !success {
exit.Error(reason.GuestProvision, "error provisioning host", err)
}
}
}
if existing != nil && existing.KubernetesConfig.ContainerRuntime == "crio" && driver.IsKIC(existing.Driver) {
// Stop and start again if it's crio because it's broken above v1.17.3
out.WarningT("Due to issues with CRI-O post v1.17.3, we need to restart your cluster.")
out.WarningT("See details at https://github.com/kubernetes/minikube/issues/8861")
stopProfile(existing.Name)
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
exit.Error(reason.GuestProvision, "error provisioning host", err)
}
}
kubeconfig, err := startWithDriver(cmd, starter, existing)
if err != nil {
node.ExitIfFatal(err)
exit.Error(reason.GuestStart, "failed to start node", err)
}
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
}
}
func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) {
driverName := ds.Name
glog.Infof("selected driver: %s", driverName)
validateDriver(ds, existing)
err := autoSetDriverOptions(cmd, driverName)
if err != nil {
glog.Errorf("Error autoSetOptions : %v", err)
}
validateFlags(cmd, driverName)
validateUser(driverName)
// Download & update the driver, even in --download-only mode
if !viper.GetBool(dryRun) {
updateDriver(driverName)
}
k8sVersion := getKubernetesVersion(existing)
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to generate config")
}
// This is about as far as we can go without overwriting config files
if viper.GetBool(dryRun) {
out.T(style.DryRun, `dry-run validation complete!`)
os.Exit(0)
}
if driver.IsVM(driverName) {
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
}
cc.MinikubeISO = url
}
var existingAddons map[string]bool
if viper.GetBool(installAddons) {
existingAddons = map[string]bool{}
if existing != nil && existing.Addons != nil {
existingAddons = existing.Addons
}
}
if viper.GetBool(nativeSSH) {
ssh.SetDefaultClient(ssh.Native)
} else {
ssh.SetDefaultClient(ssh.External)
}
mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure))
if err != nil {
return node.Starter{}, err
}
return node.Starter{
Runner: mRunner,
PreExists: preExists,
MachineAPI: mAPI,
Host: host,
ExistingAddons: existingAddons,
Cfg: &cc,
Node: &n,
}, nil
}
func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
kubeconfig, err := node.Start(starter, true)
if err != nil {
kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err)
if err != nil {
return nil, err
}
}
numNodes := viper.GetInt(nodes)
if existing != nil {
if numNodes > 1 {
// We ignore the --nodes parameter if we're restarting an existing cluster
out.WarningT(`The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use "minikube node add" to add nodes to an existing cluster.`, out.V{"cluster": existing.Name})
}
numNodes = len(existing.Nodes)
}
if numNodes > 1 {
if driver.BareMetal(starter.Cfg.Driver) {
exit.Message(reason.DrvUnsupportedMulti, "The none driver is not compatible with multi-node clusters.")
} else {
// Only warn users on first start.
if existing == nil {
out.Ln("")
warnAboutMultiNode()
for i := 1; i < numNodes; i++ {
nodeName := node.Name(i + 1)
n := config.Node{
Name: nodeName,
Worker: true,
ControlPlane: false,
KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
}
out.Ln("") // extra newline for clarity on the command line
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
if err != nil {
return nil, errors.Wrap(err, "adding node")
}
}
} else {
for _, n := range existing.Nodes {
if !n.ControlPlane {
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
if err != nil {
return nil, errors.Wrap(err, "adding node")
}
}
}
}
}
}
return kubeconfig, nil
}
func warnAboutMultiNode() {
out.WarningT("Multi-node clusters are currently experimental and might exhibit unintended behavior.")
out.T(style.Documentation, "To track progress on multi-node clusters, see https://github.com/kubernetes/minikube/issues/7538.")
}
func updateDriver(driverName string) {
v, err := version.GetSemverVersion()
if err != nil {
out.WarningT("Error parsing minikube version: {{.error}}", out.V{"error": err})
} else if err := driver.InstallOrUpdate(driverName, localpath.MakeMiniPath("bin"), v, viper.GetBool(interactive), viper.GetBool(autoUpdate)); err != nil {
out.WarningT("Unable to update {{.driver}} driver: {{.error}}", out.V{"driver": driverName, "error": err})
}
}
func displayVersion(version string) {
prefix := ""
if ClusterFlagValue() != constants.DefaultClusterName {
prefix = fmt.Sprintf("[%s] ", ClusterFlagValue())
}
register.Reg.SetStep(register.InitialSetup)
out.T(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()})
}
// displayEnviron makes the user aware of environment variables that will affect how minikube operates
func displayEnviron(env []string) {
for _, kv := range env {
bits := strings.SplitN(kv, "=", 2)
k := bits[0]
v := bits[1]
if strings.HasPrefix(k, "MINIKUBE_") || k == constants.KubeconfigEnvVar {
out.Infof("{{.key}}={{.value}}", out.V{"key": k, "value": v})
}
}
}
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
register.Reg.SetStep(register.Done)
if kcs.KeepContext {
out.T(style.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
} else {
out.T(style.Ready, `Done! kubectl is now configured to use "{{.name}}"`, out.V{"name": machineName})
}
path, err := exec.LookPath("kubectl")
if err != nil {
out.ErrT(style.Kubectl, "Kubectl not found in your path")
out.ErrT(style.Workaround, "You can use kubectl inside minikube. For more information, visit https://minikube.sigs.k8s.io/docs/handbook/kubectl/")
out.ErrT(style.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
return nil
}
gitVersion, err := kubectlVersion(path)
if err != nil {
return err
}
client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix))
if err != nil {
return errors.Wrap(err, "client semver")
}
cluster := semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))
minorSkew := int(math.Abs(float64(int(client.Minor) - int(cluster.Minor))))
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
if client.Major != cluster.Major || minorSkew > 1 {
out.Ln("")
out.WarningT("{{.path}} is version {{.client_version}}, which may be incompatible with Kubernetes {{.cluster_version}}.",
out.V{"path": path, "client_version": client, "cluster_version": cluster})
out.WarningT("You can also use 'minikube kubectl -- get pods' to invoke a matching version",
out.V{"path": path, "client_version": client})
}
return nil
}
func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) (*kubeconfig.Settings, error) {
if viper.GetBool(deleteOnFailure) {
out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
// Start failed, delete the cluster and try again
profile, err := config.LoadProfile(existing.Name)
if err != nil {
out.ErrT(style.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": existing.Name})
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": existing.Name})
}
// Re-generate the cluster config, just in case the failure was related to an old config format
cc := updateExistingConfigFromFlags(cmd, &existing)
var kubeconfig *kubeconfig.Settings
for _, n := range cc.Nodes {
r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane, false)
s := node.Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: &cc,
Node: &n,
ExistingAddons: existingAddons,
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
k, err := node.Start(s, n.ControlPlane)
if n.ControlPlane {
kubeconfig = k
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
}
return kubeconfig, nil
}
// Don't delete the cluster unless they ask
return nil, originalErr
}
func kubectlVersion(path string) (string, error) {
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
if err != nil {
// really old Kubernetes clients did not have the --output parameter
b, err := exec.Command(path, "version", "--client", "--short").Output()
if err != nil {
return "", errors.Wrap(err, "exec")
}
s := strings.TrimSpace(string(b))
return strings.Replace(s, "Client Version: ", "", 1), nil
}
cv := struct {
ClientVersion struct {
GitVersion string `json:"gitVersion"`
} `json:"clientVersion"`
}{}
err = json.Unmarshal(j, &cv)
if err != nil {
return "", errors.Wrap(err, "unmarshal")
}
return cv.ClientVersion.GitVersion, nil
}
func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []registry.DriverState, bool) {
// Technically unrelated, but important to perform before detection
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
register.Reg.SetStep(register.SelectingDriver)
// By default, the driver is whatever we used last time
if existing != nil {
old := hostDriver(existing)
ds := driver.Status(old)
out.T(style.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()})
return ds, nil, true
}
// Default to looking at the new driver parameter
if d := viper.GetString("driver"); d != "" {
if vmd := viper.GetString("vm-driver"); vmd != "" {
// Output a warning
warning := `Both driver={{.driver}} and vm-driver={{.vmd}} have been set.
Since vm-driver is deprecated, minikube will default to driver={{.driver}}.
If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning.
`
out.WarningT(warning, out.V{"driver": d, "vmd": vmd})
}
ds := driver.Status(d)
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
}
// Fallback to old driver parameter
if d := viper.GetString("vm-driver"); d != "" {
ds := driver.Status(viper.GetString("vm-driver"))
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
}
choices := driver.Choices(viper.GetBool("vm"))
pick, alts, rejects := driver.Suggest(choices)
if pick.Name == "" {
out.T(style.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:")
for _, r := range rejects {
out.Infof("{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection})
}
exit.Message(reason.DrvNotDetected, "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
}
if len(alts) > 1 {
altNames := []string{}
for _, a := range alts {
altNames = append(altNames, a.String())
}
out.T(style.Sparkle, `Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}`, out.V{"driver": pick.Name, "alternates": strings.Join(altNames, ", ")})
} else {
out.T(style.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()})
}
return pick, alts, false
}
// hostDriver returns the actual driver used by a libmachine host, which can differ from our config
func hostDriver(existing *config.ClusterConfig) string {
if existing == nil {
return ""
}
api, err := machine.NewAPIClient()
if err != nil {
glog.Warningf("selectDriver NewAPIClient: %v", err)
return existing.Driver
}
cp, err := config.PrimaryControlPlane(existing)
if err != nil {
glog.Warningf("Unable to get control plane from existing config: %v", err)
return existing.Driver
}
machineName := driver.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
glog.Warningf("api.Load failed for %s: %v", machineName, err)
if existing.VMDriver != "" {
return existing.VMDriver
}
return existing.Driver
}
return h.Driver.DriverName()
}
// validateSpecifiedDriver makes sure that if a user has passed in a driver
// it matches the existing cluster if there is one
func validateSpecifiedDriver(existing *config.ClusterConfig) {
if existing == nil {
return
}
var requested string
if d := viper.GetString("driver"); d != "" {
requested = d
} else if d := viper.GetString("vm-driver"); d != "" {
requested = d
}
// Neither --vm-driver or --driver was specified
if requested == "" {
return
}
old := hostDriver(existing)
if requested == old {
return
}
exit.Advice(
reason.GuestDrvMismatch,
`The existing "{{.name}}" cluster was created using the "{{.old}}" driver, which is incompatible with requested "{{.new}}" driver.`,
"Delete the existing '{{.name}}' cluster using: '{{.delcommand}}', or start the existing '{{.name}}' cluster using: '{{.command}} --driver={{.old}}'",
out.V{
"name": existing.Name,
"new": requested,
"old": old,
"command": mustload.ExampleCmd(existing.Name, "start"),
"delcommand": mustload.ExampleCmd(existing.Name, "delete"),
},
)
}
// validateDriver validates that the selected driver appears sane, exits if not
func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
name := ds.Name
glog.Infof("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
}
// if we are only downloading artifacts for a driver, we can stop validation here
if viper.GetBool("download-only") {
return
}
st := ds.State
glog.Infof("status for %s: %+v", name, st)
if st.NeedsImprovement {
out.WarnReason(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_IMPROVEMENT", strings.ToUpper(name)),
Advice: translate.T(st.Fix),
Style: style.Improvement,
}, `The '{{.driver}}' driver reported a performance issue`, out.V{"driver": name})
}
if st.Error == nil {
return
}
if !st.Installed {
exit.Message(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(name)),
Advice: translate.T(st.Fix),
ExitCode: reason.ExProviderNotFound,
URL: st.Doc,
Style: style.Shrug,
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error})
}
id := fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
code := reason.ExProviderUnavailable
if !st.Running {
id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(name))
code = reason.ExProviderNotRunning
}
exitIfNotForced(reason.Kind{
ID: id,
Advice: translate.T(st.Fix),
ExitCode: code,
URL: st.Doc,
Style: style.Fatal,
}, st.Error.Error())
}
func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) {
var tryCountries []string
var fallback string
glog.Infof("selecting image repository for country %s ...", mirrorCountry)
if mirrorCountry != "" {
localRepos, ok := constants.ImageRepositories[mirrorCountry]
if !ok || len(localRepos) == 0 {
return false, "", fmt.Errorf("invalid image mirror country code: %s", mirrorCountry)
}
tryCountries = append(tryCountries, mirrorCountry)
// we'll use the first repository as fallback
// when none of the mirrors in the given location is available
fallback = localRepos[0]
} else {
// always make sure global is preferred
tryCountries = append(tryCountries, "global")
for k := range constants.ImageRepositories {
if strings.ToLower(k) != "global" {
tryCountries = append(tryCountries, k)
}
}
}
checkRepository := func(repo string) error {
pauseImage := images.Pause(v, repo)
ref, err := name.ParseReference(pauseImage, name.WeakValidation)
if err != nil {
return err
}
_, err = remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
return err
}
for _, code := range tryCountries {
localRepos := constants.ImageRepositories[code]
for _, repo := range localRepos {
err := checkRepository(repo)
if err == nil {
return true, repo, nil
}
}
}
return false, fallback, nil
}
// validateUser validates minikube is run by the recommended user (privileged or regular)
func validateUser(drvName string) {
u, err := user.Current()
if err != nil {
glog.Errorf("Error getting the current user: %v", err)
return
}
useForce := viper.GetBool(force)
if driver.NeedsRoot(drvName) && u.Uid != "0" && !useForce {
exit.Message(reason.DrvNeedsRoot, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo -E minikube start --driver={{.driver_name}}'.`, out.V{"driver_name": drvName})
}
// If root is required, or we are not root, exit early
if driver.NeedsRoot(drvName) || u.Uid != "0" {
return
}
out.ErrT(style.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
out.ErrT(style.Tip, "If you are running minikube within a VM, consider using --driver=none:")
out.ErrT(style.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
cname := ClusterFlagValue()
_, err = config.Load(cname)
if err == nil || !config.IsNotExist(err) {
out.ErrT(style.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")})
}
if !useForce {
exit.Message(reason.DrvAsRoot, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
}
}
// memoryLimits returns the amount of memory allocated to the system and hypervisor, the return value is in MiB
func memoryLimits(drvName string) (int, int, error) {
info, cpuErr, memErr, diskErr := machine.CachedHostInfo()
if cpuErr != nil {
glog.Warningf("could not get system cpu info while verifying memory limits, which might be okay: %v", cpuErr)
}
if diskErr != nil {
glog.Warningf("could not get system disk info while verifying memory limits, which might be okay: %v", diskErr)
}
if memErr != nil {
return -1, -1, memErr
}
sysLimit := int(info.Memory)
containerLimit := 0
if driver.IsKIC(drvName) {
s, err := oci.CachedDaemonInfo(drvName)
if err != nil {
return -1, -1, err
}
containerLimit = int(s.TotalMemory / 1024 / 1024)
}
return sysLimit, containerLimit, nil
}
// suggestMemoryAllocation calculates the default memory footprint in MiB
func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int {
if mem := viper.GetInt(memory); mem != 0 {
return mem
}
fallback := 2200
maximum := 6000
if sysLimit > 0 && fallback > sysLimit {
return sysLimit
}
// If there are container limits, add tiny bit of slack for non-minikube components
if containerLimit > 0 {
if fallback > containerLimit {
return containerLimit
}
maximum = containerLimit - 48
}
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
suggested := int(float32(sysLimit)/400.0) * 100
if nodes > 1 {
suggested /= nodes
}
if suggested > maximum {
return maximum
}
if suggested < fallback {
return fallback
}
return suggested
}
// validateRequestedMemorySize validates the memory size matches the minimum recommended
func validateRequestedMemorySize(req int, drvName string) {
// TODO: Fix MB vs MiB confusion
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
glog.Warningf("Unable to query memory limits: %v", err)
}
// Detect if their system doesn't have enough memory to work with.
if driver.IsKIC(drvName) && containerLimit < minUsableMem {
if driver.IsDockerDesktop(drvName) {
if runtime.GOOS == "darwin" {
exitIfNotForced(reason.RsrcInsufficientDarwinDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"})
} else {
exitIfNotForced(reason.RsrcInsufficientWindowsDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"})
}
}
exitIfNotForced(reason.RsrcInsufficientContainerMemory, "{{.driver}} only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem})
}
if sysLimit < minUsableMem {
exitIfNotForced(reason.RsrcInsufficientSysMemory, "System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem})
}
if req < minUsableMem {
exitIfNotForced(reason.RsrcInsufficientReqMemory, "Requested memory allocation {{.requested}}MiB is less than the usable minimum of {{.minimum_memory}}MB", out.V{"requested": req, "minimum_memory": minUsableMem})
}
if req < minRecommendedMem {
out.WarnReason(reason.RsrcInsufficientReqMemory, "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommend}}MB. Deployments may fail.", out.V{"requested": req, "recommend": minRecommendedMem})
}
if driver.IsDockerDesktop(drvName) && containerLimit < 2997 && sysLimit > 8000 { // for users with more than 8 GB advice 3 GB
r := reason.RsrcInsufficientDarwinDockerMemory
if runtime.GOOS == "Windows" {
r = reason.RsrcInsufficientWindowsDockerMemory
}
r.Style = style.Improvement
out.WarnReason(r, "Docker Desktop has access to only {{.size}}MiB of the {{.sys}}MiB in available system memory. Consider increasing this for improved performance.", out.V{"size": containerLimit, "sys": sysLimit, "recommend": "3 GB"})
}
advised := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
if req > sysLimit {
exitIfNotForced(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"},
`Requested memory allocation {{.requested}}MB is more than your system limit {{.system_limit}}MB.`,
out.V{"requested": req, "system_limit": sysLimit, "advised": advised})
}
// Recommend 1GB to handle OS/VM overhead
maxAdvised := sysLimit - 1024
if req > maxAdvised {
out.WarnReason(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"},
`The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.`,
out.V{"requested": req, "system_limit": sysLimit, "advised": advised})
}
}
// validateCPUCount validates the cpu count matches the minimum recommended
func validateCPUCount(drvName string) {
var cpuCount int
if driver.BareMetal(drvName) {
// Uses the gopsutil cpu package to count the number of physical cpu cores
ci, err := cpu.Counts(false)
if err != nil {
glog.Warningf("Unable to get CPU info: %v", err)
} else {
cpuCount = ci
}
} else {
cpuCount = viper.GetInt(cpus)
}
if cpuCount < minimumCPUS {
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS})
}
if !driver.IsKIC((drvName)) {
return
}
si, err := oci.CachedDaemonInfo(drvName)
if err != nil {
out.T(style.Confused, "Failed to verify '{{.driver_name}} info' will try again ...", out.V{"driver_name": drvName})
si, err = oci.DaemonInfo(drvName)
if err != nil {
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
}
}
// looks good
if si.CPUs >= 2 {
return
}
if drvName == oci.Docker && runtime.GOOS == "darwin" {
exitIfNotForced(reason.RsrcInsufficientDarwinDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available")
} else if drvName == oci.Docker && runtime.GOOS == "windows" {
exitIfNotForced(reason.RsrcInsufficientWindowsDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available")
} else {
exitIfNotForced(reason.RsrcInsufficientCores, "{{.driver_name}} has less than 2 CPUs available, but Kubernetes requires at least 2 to be available", out.V{"driver_name": driver.FullName(viper.GetString("driver"))})
}
}
// validateFlags validates the supplied flags against known bad combinations
func validateFlags(cmd *cobra.Command, drvName string) {
if cmd.Flags().Changed(humanReadableDiskSize) {
diskSizeMB, err := util.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exitIfNotForced(reason.Usage, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
if diskSizeMB < minimumDiskSize {
exitIfNotForced(reason.RsrcInsufficientStorage, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize})
}
}
if cmd.Flags().Changed(cpus) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --cpus flag", out.V{"name": drvName})
}
}
validateCPUCount(drvName)
if cmd.Flags().Changed(memory) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --memory flag", out.V{"name": drvName})
}
req, err := util.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
validateRequestedMemorySize(req, drvName)
}
if cmd.Flags().Changed(containerRuntime) {
runtime := strings.ToLower(viper.GetString(containerRuntime))
validOptions := cruntime.ValidRuntimes()
// `crio` is accepted as an alternative spelling to `cri-o`
validOptions = append(validOptions, constants.CRIO)
var validRuntime bool
for _, option := range validOptions {
if runtime == option {
validRuntime = true
}
// Convert `cri-o` to `crio` as the K8s config uses the `crio` spelling
if runtime == "cri-o" {
viper.Set(containerRuntime, constants.CRIO)
}
}
if !validRuntime {
exit.Message(reason.Usage, `Invalid Container Runtime: "{{.runtime}}". Valid runtimes are: {{.validOptions}}`, out.V{"runtime": runtime, "validOptions": strings.Join(cruntime.ValidRuntimes(), ", ")})
}
}
if driver.BareMetal(drvName) {
if ClusterFlagValue() != constants.DefaultClusterName {
exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName})
}
runtime := viper.GetString(containerRuntime)
if runtime != "docker" {
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime})
}
// conntrack is required starting with Kubernetes 1.18, include the release candidates for completion
version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil))
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
if _, err := exec.LookPath("conntrack"); err != nil {
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
}
}
}
// validate kubeadm extra args
if invalidOpts := bsutil.FindInvalidExtraConfigFlags(config.ExtraOptions); len(invalidOpts) > 0 {
out.WarningT(
"These --extra-config parameters are invalid: {{.invalid_extra_opts}}",
out.V{"invalid_extra_opts": invalidOpts},
)
exit.Message(
reason.Usage,
"Valid components are: {{.valid_extra_opts}}",
out.V{"valid_extra_opts": bsutil.KubeadmExtraConfigOpts},
)
}
// check that kubeadm extra args contain only allowed parameters
for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) {
if !config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmCmdParam], param) &&
!config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmConfigParam], param) {
exit.Message(reason.Usage, "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
}
}
if s := viper.GetString(startOutput); s != "text" && s != "json" {
exit.Message(reason.Usage, "Sorry, please set the --output flag to one of the following valid options: [text,json]")
}
validateRegistryMirror()
}
// This function validates if the --registry-mirror
// args match the format of http://localhost
func validateRegistryMirror() {
if len(registryMirror) > 0 {
for _, loc := range registryMirror {
URL, err := url.Parse(loc)
if err != nil {
glog.Errorln("Error Parsing URL: ", err)
}
if (URL.Scheme != "http" && URL.Scheme != "https") || URL.Path != "" {
exit.Message(reason.Usage, "Sorry, the url provided with the --registry-mirror flag is invalid: {{.url}}", out.V{"url": loc})
}
}
}
}
func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.ClusterConfig) (config.ClusterConfig, config.Node, error) {
// Create the initial node, which will necessarily be a control plane
if existing != nil {
cp, err := config.PrimaryControlPlane(existing)
cp.KubernetesVersion = getKubernetesVersion(&cc)
if err != nil {
return cc, config.Node{}, err
}
// Make sure that existing nodes honor if KubernetesVersion gets specified on restart
// KubernetesVersion is the only attribute that the user can override in the Node object
nodes := []config.Node{}
for _, n := range existing.Nodes {
n.KubernetesVersion = getKubernetesVersion(&cc)
nodes = append(nodes, n)
}
cc.Nodes = nodes
return cc, cp, nil
}
cp := config.Node{
Port: cc.KubernetesConfig.NodePort,
KubernetesVersion: getKubernetesVersion(&cc),
Name: kubeNodeName,
ControlPlane: true,
Worker: true,
}
cc.Nodes = []config.Node{cp}
return cc, cp, nil
}
// autoSetDriverOptions sets the options needed for specific driver automatically.
func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
err = nil
hints := driver.FlagDefaults(drvName)
if len(hints.ExtraOptions) > 0 {
for _, eo := range hints.ExtraOptions {
if config.ExtraOptions.Exists(eo) {
glog.Infof("skipping extra-config %q.", eo)
continue
}
glog.Infof("auto setting extra-config to %q.", eo)
err = config.ExtraOptions.Set(eo)
if err != nil {
err = errors.Wrapf(err, "setting extra option %s", eo)
}
}
}
if !cmd.Flags().Changed(cacheImages) {
viper.Set(cacheImages, hints.CacheImages)
}
if !cmd.Flags().Changed(containerRuntime) && hints.ContainerRuntime != "" {
viper.Set(containerRuntime, hints.ContainerRuntime)
glog.Infof("auto set %s to %q.", containerRuntime, hints.ContainerRuntime)
}
if !cmd.Flags().Changed(cmdcfg.Bootstrapper) && hints.Bootstrapper != "" {
viper.Set(cmdcfg.Bootstrapper, hints.Bootstrapper)
glog.Infof("auto set %s to %q.", cmdcfg.Bootstrapper, hints.Bootstrapper)
}
return err
}
// validateKubernetesVersion ensures that the requested version is reasonable
func validateKubernetesVersion(old *config.ClusterConfig) {
nvs, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(old), version.VersionPrefix))
oldestVersion, err := semver.Make(strings.TrimPrefix(constants.OldestKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.InternalSemverParse, "Unable to parse oldest Kubernetes version from constants: {{.error}}", out.V{"error": err})
}
defaultVersion, err := semver.Make(strings.TrimPrefix(constants.DefaultKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.InternalSemverParse, "Unable to parse default Kubernetes version from constants: {{.error}}", out.V{"error": err})
}
if nvs.LT(oldestVersion) {
out.WarningT("Specified Kubernetes version {{.specified}} is less than the oldest supported version: {{.oldest}}", out.V{"specified": nvs, "oldest": constants.OldestKubernetesVersion})
if !viper.GetBool(force) {
out.WarningT("You can force an unsupported Kubernetes version via the --force flag")
}
exitIfNotForced(reason.KubernetesTooOld, "Kubernetes {{.version}} is not supported by this release of minikube", out.V{"version": nvs})
}
if old == nil || old.KubernetesConfig.KubernetesVersion == "" {
return
}
ovs, err := semver.Make(strings.TrimPrefix(old.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
if err != nil {
glog.Errorf("Error parsing old version %q: %v", old.KubernetesConfig.KubernetesVersion, err)
}
if nvs.LT(ovs) {
profileArg := ""
if old.Name != constants.DefaultClusterName {
profileArg = fmt.Sprintf(" -p %s", old.Name)
}
suggestedName := old.Name + "2"
exit.Message(reason.KubernetesDowngrade, "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}",
out.V{"prefix": version.VersionPrefix, "new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName})
}
if defaultVersion.GT(nvs) {
out.T(style.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.prefix}}{{.new}}", out.V{"prefix": version.VersionPrefix, "new": defaultVersion})
}
}
func getKubernetesVersion(old *config.ClusterConfig) string {
paramVersion := viper.GetString(kubernetesVersion)
// try to load the old version first if the user didn't specify anything
if paramVersion == "" && old != nil {
paramVersion = old.KubernetesConfig.KubernetesVersion
}
if paramVersion == "" || strings.EqualFold(paramVersion, "stable") {
paramVersion = constants.DefaultKubernetesVersion
} else if strings.EqualFold(paramVersion, "latest") {
paramVersion = constants.NewestKubernetesVersion
}
nvs, err := semver.Make(strings.TrimPrefix(paramVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.Usage, `Unable to parse "{{.kubernetes_version}}": {{.error}}`, out.V{"kubernetes_version": paramVersion, "error": err})
}
return version.VersionPrefix + nvs.String()
}
func exitIfNotForced(r reason.Kind, message string, v ...out.V) {
if !viper.GetBool(force) {
exit.Message(r, message, v...)
}
out.Error(r, message, v...)
}
|
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"math"
"net"
"net/url"
"os"
"os/exec"
"os/user"
"runtime"
"strings"
"github.com/blang/semver"
"github.com/docker/machine/libmachine/ssh"
"github.com/golang/glog"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/cpu"
gopshost "github.com/shirou/gopsutil/host"
"github.com/spf13/cobra"
"github.com/spf13/viper"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/notify"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/out/register"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/style"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/translate"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/version"
)
var (
registryMirror []string
insecureRegistry []string
apiServerNames []string
apiServerIPs []net.IP
)
func init() {
initMinikubeFlags()
initKubernetesFlags()
initDriverFlags()
initNetworkingFlags()
if err := viper.BindPFlags(startCmd.Flags()); err != nil {
exit.Error(reason.InternalBindFlags, "unable to bind flags", err)
}
}
// startCmd represents the start command
var startCmd = &cobra.Command{
Use: "start",
Short: "Starts a local Kubernetes cluster",
Long: "Starts a local Kubernetes cluster",
Run: runStart,
}
// platform generates a user-readable platform message
func platform() string {
var s strings.Builder
// Show the distro version if possible
hi, err := gopshost.Info()
if err == nil {
s.WriteString(fmt.Sprintf("%s %s", strings.Title(hi.Platform), hi.PlatformVersion))
glog.Infof("hostinfo: %+v", hi)
} else {
glog.Warningf("gopshost.Info returned error: %v", err)
s.WriteString(runtime.GOOS)
}
vsys, vrole, err := gopshost.Virtualization()
if err != nil {
glog.Warningf("gopshost.Virtualization returned error: %v", err)
} else {
glog.Infof("virtualization: %s %s", vsys, vrole)
}
// This environment is exotic, let's output a bit more.
if vrole == "guest" || runtime.GOARCH != "amd64" {
if vsys != "" {
s.WriteString(fmt.Sprintf(" (%s/%s)", vsys, runtime.GOARCH))
} else {
s.WriteString(fmt.Sprintf(" (%s)", runtime.GOARCH))
}
}
return s.String()
}
// runStart handles the executes the flow of "minikube start"
func runStart(cmd *cobra.Command, args []string) {
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
out.SetJSON(viper.GetString(startOutput) == "json")
displayVersion(version.GetVersion())
// No need to do the update check if no one is going to see it
if !viper.GetBool(interactive) || !viper.GetBool(dryRun) {
// Avoid blocking execution on optional HTTP fetches
go notify.MaybePrintUpdateTextFromGithub()
}
displayEnviron(os.Environ())
if viper.GetBool(force) {
out.WarningT("minikube skips various validations when --force is supplied; this may lead to unexpected behavior")
}
// if --registry-mirror specified when run minikube start,
// take arg precedence over MINIKUBE_REGISTRY_MIRROR
// actually this is a hack, because viper 1.0.0 can assign env to variable if StringSliceVar
// and i can't update it to 1.4.0, it affects too much code
// other types (like String, Bool) of flag works, so imageRepository, imageMirrorCountry
// can be configured as MINIKUBE_IMAGE_REPOSITORY and IMAGE_MIRROR_COUNTRY
// this should be updated to documentation
if len(registryMirror) == 0 {
registryMirror = viper.GetStringSlice("registry_mirror")
}
if !config.ProfileNameValid(ClusterFlagValue()) {
out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.")
}
existing, err := config.Load(ClusterFlagValue())
if err != nil && !config.IsNotExist(err) {
exit.Message(reason.HostConfigLoad, "Unable to load config: {{.error}}", out.V{"error": err})
}
if existing != nil {
upgradeExistingConfig(existing)
}
validateSpecifiedDriver(existing)
validateKubernetesVersion(existing)
ds, alts, specified := selectDriver(existing)
starter, err := provisionWithDriver(cmd, ds, existing)
if err != nil {
node.ExitIfFatal(err)
machine.MaybeDisplayAdvice(err, ds.Name)
if specified {
// If the user specified a driver, don't fallback to anything else
exit.Error(reason.GuestProvision, "error provisioning host", err)
} else {
success := false
// Walk down the rest of the options
for _, alt := range alts {
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
ds = alt
// Delete the existing cluster and try again with the next driver on the list
profile, err := config.LoadProfile(ClusterFlagValue())
if err != nil {
glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
}
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
continue
} else {
// Success!
success = true
break
}
}
if !success {
exit.Error(reason.GuestProvision, "error provisioning host", err)
}
}
}
if existing != nil && existing.KubernetesConfig.ContainerRuntime == "crio" && driver.IsKIC(existing.Driver) {
// Stop and start again if it's crio because it's broken above v1.17.3
out.WarningT("Due to issues with CRI-O post v1.17.3, we need to restart your cluster.")
out.WarningT("See details at https://github.com/kubernetes/minikube/issues/8861")
stopProfile(existing.Name)
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
exit.Error(reason.GuestProvision, "error provisioning host", err)
}
}
kubeconfig, err := startWithDriver(cmd, starter, existing)
if err != nil {
node.ExitIfFatal(err)
exit.Error(reason.GuestStart, "failed to start node", err)
}
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
}
}
func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) {
driverName := ds.Name
glog.Infof("selected driver: %s", driverName)
validateDriver(ds, existing)
err := autoSetDriverOptions(cmd, driverName)
if err != nil {
glog.Errorf("Error autoSetOptions : %v", err)
}
validateFlags(cmd, driverName)
validateUser(driverName)
// Download & update the driver, even in --download-only mode
if !viper.GetBool(dryRun) {
updateDriver(driverName)
}
k8sVersion := getKubernetesVersion(existing)
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to generate config")
}
// This is about as far as we can go without overwriting config files
if viper.GetBool(dryRun) {
out.T(style.DryRun, `dry-run validation complete!`)
os.Exit(0)
}
if driver.IsVM(driverName) {
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
}
cc.MinikubeISO = url
}
var existingAddons map[string]bool
if viper.GetBool(installAddons) {
existingAddons = map[string]bool{}
if existing != nil && existing.Addons != nil {
existingAddons = existing.Addons
}
}
if viper.GetBool(nativeSSH) {
ssh.SetDefaultClient(ssh.Native)
} else {
ssh.SetDefaultClient(ssh.External)
}
mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure))
if err != nil {
return node.Starter{}, err
}
return node.Starter{
Runner: mRunner,
PreExists: preExists,
MachineAPI: mAPI,
Host: host,
ExistingAddons: existingAddons,
Cfg: &cc,
Node: &n,
}, nil
}
func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
kubeconfig, err := node.Start(starter, true)
if err != nil {
kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err)
if err != nil {
return nil, err
}
}
numNodes := viper.GetInt(nodes)
if existing != nil {
if numNodes > 1 {
// We ignore the --nodes parameter if we're restarting an existing cluster
out.WarningT(`The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use "minikube node add" to add nodes to an existing cluster.`, out.V{"cluster": existing.Name})
}
numNodes = len(existing.Nodes)
}
if numNodes > 1 {
if driver.BareMetal(starter.Cfg.Driver) {
exit.Message(reason.DrvUnsupportedMulti, "The none driver is not compatible with multi-node clusters.")
} else {
// Only warn users on first start.
if existing == nil {
out.Ln("")
warnAboutMultiNode()
for i := 1; i < numNodes; i++ {
nodeName := node.Name(i + 1)
n := config.Node{
Name: nodeName,
Worker: true,
ControlPlane: false,
KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
}
out.Ln("") // extra newline for clarity on the command line
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
if err != nil {
return nil, errors.Wrap(err, "adding node")
}
}
} else {
for _, n := range existing.Nodes {
if !n.ControlPlane {
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
if err != nil {
return nil, errors.Wrap(err, "adding node")
}
}
}
}
}
}
return kubeconfig, nil
}
func warnAboutMultiNode() {
out.WarningT("Multi-node clusters are currently experimental and might exhibit unintended behavior.")
out.T(style.Documentation, "To track progress on multi-node clusters, see https://github.com/kubernetes/minikube/issues/7538.")
}
func updateDriver(driverName string) {
v, err := version.GetSemverVersion()
if err != nil {
out.WarningT("Error parsing minikube version: {{.error}}", out.V{"error": err})
} else if err := driver.InstallOrUpdate(driverName, localpath.MakeMiniPath("bin"), v, viper.GetBool(interactive), viper.GetBool(autoUpdate)); err != nil {
out.WarningT("Unable to update {{.driver}} driver: {{.error}}", out.V{"driver": driverName, "error": err})
}
}
func displayVersion(version string) {
prefix := ""
if ClusterFlagValue() != constants.DefaultClusterName {
prefix = fmt.Sprintf("[%s] ", ClusterFlagValue())
}
register.Reg.SetStep(register.InitialSetup)
out.T(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()})
}
// displayEnviron makes the user aware of environment variables that will affect how minikube operates
func displayEnviron(env []string) {
for _, kv := range env {
bits := strings.SplitN(kv, "=", 2)
k := bits[0]
v := bits[1]
if strings.HasPrefix(k, "MINIKUBE_") || k == constants.KubeconfigEnvVar {
out.Infof("{{.key}}={{.value}}", out.V{"key": k, "value": v})
}
}
}
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
register.Reg.SetStep(register.Done)
if kcs.KeepContext {
out.T(style.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
} else {
out.T(style.Ready, `Done! kubectl is now configured to use "{{.name}}"`, out.V{"name": machineName})
}
path, err := exec.LookPath("kubectl")
if err != nil {
out.ErrT(style.Kubectl, "Kubectl not found in your path")
out.ErrT(style.Workaround, "You can use kubectl inside minikube. For more information, visit https://minikube.sigs.k8s.io/docs/handbook/kubectl/")
out.ErrT(style.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
return nil
}
gitVersion, err := kubectlVersion(path)
if err != nil {
return err
}
client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix))
if err != nil {
return errors.Wrap(err, "client semver")
}
cluster := semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))
minorSkew := int(math.Abs(float64(int(client.Minor) - int(cluster.Minor))))
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
if client.Major != cluster.Major || minorSkew > 1 {
out.Ln("")
out.WarningT("{{.path}} is version {{.client_version}}, which may be incompatible with Kubernetes {{.cluster_version}}.",
out.V{"path": path, "client_version": client, "cluster_version": cluster})
out.WarningT("You can also use 'minikube kubectl -- get pods' to invoke a matching version",
out.V{"path": path, "client_version": client})
}
return nil
}
func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) (*kubeconfig.Settings, error) {
if viper.GetBool(deleteOnFailure) {
out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
// Start failed, delete the cluster and try again
profile, err := config.LoadProfile(existing.Name)
if err != nil {
out.ErrT(style.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": existing.Name})
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": existing.Name})
}
// Re-generate the cluster config, just in case the failure was related to an old config format
cc := updateExistingConfigFromFlags(cmd, &existing)
var kubeconfig *kubeconfig.Settings
for _, n := range cc.Nodes {
r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane, false)
s := node.Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: &cc,
Node: &n,
ExistingAddons: existingAddons,
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
k, err := node.Start(s, n.ControlPlane)
if n.ControlPlane {
kubeconfig = k
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
}
return kubeconfig, nil
}
// Don't delete the cluster unless they ask
return nil, originalErr
}
func kubectlVersion(path string) (string, error) {
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
if err != nil {
// really old Kubernetes clients did not have the --output parameter
b, err := exec.Command(path, "version", "--client", "--short").Output()
if err != nil {
return "", errors.Wrap(err, "exec")
}
s := strings.TrimSpace(string(b))
return strings.Replace(s, "Client Version: ", "", 1), nil
}
cv := struct {
ClientVersion struct {
GitVersion string `json:"gitVersion"`
} `json:"clientVersion"`
}{}
err = json.Unmarshal(j, &cv)
if err != nil {
return "", errors.Wrap(err, "unmarshal")
}
return cv.ClientVersion.GitVersion, nil
}
func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []registry.DriverState, bool) {
// Technically unrelated, but important to perform before detection
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
register.Reg.SetStep(register.SelectingDriver)
// By default, the driver is whatever we used last time
if existing != nil {
old := hostDriver(existing)
ds := driver.Status(old)
out.T(style.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()})
return ds, nil, true
}
// Default to looking at the new driver parameter
if d := viper.GetString("driver"); d != "" {
if vmd := viper.GetString("vm-driver"); vmd != "" {
// Output a warning
warning := `Both driver={{.driver}} and vm-driver={{.vmd}} have been set.
Since vm-driver is deprecated, minikube will default to driver={{.driver}}.
If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning.
`
out.WarningT(warning, out.V{"driver": d, "vmd": vmd})
}
ds := driver.Status(d)
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
}
// Fallback to old driver parameter
if d := viper.GetString("vm-driver"); d != "" {
ds := driver.Status(viper.GetString("vm-driver"))
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
}
choices := driver.Choices(viper.GetBool("vm"))
pick, alts, rejects := driver.Suggest(choices)
if pick.Name == "" {
out.T(style.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:")
for _, r := range rejects {
out.Infof("{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection})
}
exit.Message(reason.DrvNotDetected, "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
}
if len(alts) > 1 {
altNames := []string{}
for _, a := range alts {
altNames = append(altNames, a.String())
}
out.T(style.Sparkle, `Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}`, out.V{"driver": pick.Name, "alternates": strings.Join(altNames, ", ")})
} else {
out.T(style.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()})
}
return pick, alts, false
}
// hostDriver returns the actual driver used by a libmachine host, which can differ from our config
func hostDriver(existing *config.ClusterConfig) string {
if existing == nil {
return ""
}
api, err := machine.NewAPIClient()
if err != nil {
glog.Warningf("selectDriver NewAPIClient: %v", err)
return existing.Driver
}
cp, err := config.PrimaryControlPlane(existing)
if err != nil {
glog.Warningf("Unable to get control plane from existing config: %v", err)
return existing.Driver
}
machineName := driver.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
glog.Warningf("api.Load failed for %s: %v", machineName, err)
if existing.VMDriver != "" {
return existing.VMDriver
}
return existing.Driver
}
return h.Driver.DriverName()
}
// validateSpecifiedDriver makes sure that if a user has passed in a driver
// it matches the existing cluster if there is one
func validateSpecifiedDriver(existing *config.ClusterConfig) {
if existing == nil {
return
}
var requested string
if d := viper.GetString("driver"); d != "" {
requested = d
} else if d := viper.GetString("vm-driver"); d != "" {
requested = d
}
// Neither --vm-driver or --driver was specified
if requested == "" {
return
}
old := hostDriver(existing)
if requested == old {
return
}
exit.Advice(
reason.GuestDrvMismatch,
`The existing "{{.name}}" cluster was created using the "{{.old}}" driver, which is incompatible with requested "{{.new}}" driver.`,
"Delete the existing '{{.name}}' cluster using: '{{.delcommand}}', or start the existing '{{.name}}' cluster using: '{{.command}} --driver={{.old}}'",
out.V{
"name": existing.Name,
"new": requested,
"old": old,
"command": mustload.ExampleCmd(existing.Name, "start"),
"delcommand": mustload.ExampleCmd(existing.Name, "delete"),
},
)
}
// validateDriver validates that the selected driver appears sane, exits if not
func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
name := ds.Name
glog.Infof("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
}
// if we are only downloading artifacts for a driver, we can stop validation here
if viper.GetBool("download-only") {
return
}
st := ds.State
glog.Infof("status for %s: %+v", name, st)
if st.NeedsImprovement {
out.WarnReason(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_IMPROVEMENT", strings.ToUpper(name)),
Advice: translate.T(st.Fix),
Style: style.Improvement,
}, `The '{{.driver}}' driver reported a performance issue`, out.V{"driver": name})
}
if st.Error == nil {
return
}
if !st.Installed {
exit.Message(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(name)),
Advice: translate.T(st.Fix),
ExitCode: reason.ExProviderNotFound,
URL: st.Doc,
Style: style.Shrug,
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error})
}
id := fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
code := reason.ExProviderUnavailable
if !st.Running {
id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(name))
code = reason.ExProviderNotRunning
}
exitIfNotForced(reason.Kind{
ID: id,
Advice: translate.T(st.Fix),
ExitCode: code,
URL: st.Doc,
Style: style.Fatal,
}, st.Error.Error())
}
func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) {
var tryCountries []string
var fallback string
glog.Infof("selecting image repository for country %s ...", mirrorCountry)
if mirrorCountry != "" {
localRepos, ok := constants.ImageRepositories[mirrorCountry]
if !ok || len(localRepos) == 0 {
return false, "", fmt.Errorf("invalid image mirror country code: %s", mirrorCountry)
}
tryCountries = append(tryCountries, mirrorCountry)
// we'll use the first repository as fallback
// when none of the mirrors in the given location is available
fallback = localRepos[0]
} else {
// always make sure global is preferred
tryCountries = append(tryCountries, "global")
for k := range constants.ImageRepositories {
if strings.ToLower(k) != "global" {
tryCountries = append(tryCountries, k)
}
}
}
checkRepository := func(repo string) error {
pauseImage := images.Pause(v, repo)
ref, err := name.ParseReference(pauseImage, name.WeakValidation)
if err != nil {
return err
}
_, err = remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
return err
}
for _, code := range tryCountries {
localRepos := constants.ImageRepositories[code]
for _, repo := range localRepos {
err := checkRepository(repo)
if err == nil {
return true, repo, nil
}
}
}
return false, fallback, nil
}
// validateUser validates minikube is run by the recommended user (privileged or regular)
func validateUser(drvName string) {
u, err := user.Current()
if err != nil {
glog.Errorf("Error getting the current user: %v", err)
return
}
useForce := viper.GetBool(force)
if driver.NeedsRoot(drvName) && u.Uid != "0" && !useForce {
exit.Message(reason.DrvNeedsRoot, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo -E minikube start --driver={{.driver_name}}'.`, out.V{"driver_name": drvName})
}
// If root is required, or we are not root, exit early
if driver.NeedsRoot(drvName) || u.Uid != "0" {
return
}
out.ErrT(style.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
out.ErrT(style.Tip, "If you are running minikube within a VM, consider using --driver=none:")
out.ErrT(style.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
cname := ClusterFlagValue()
_, err = config.Load(cname)
if err == nil || !config.IsNotExist(err) {
out.ErrT(style.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")})
}
if !useForce {
exit.Message(reason.DrvAsRoot, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
}
}
// memoryLimits returns the amount of memory allocated to the system and hypervisor, the return value is in MiB
func memoryLimits(drvName string) (int, int, error) {
info, cpuErr, memErr, diskErr := machine.CachedHostInfo()
if cpuErr != nil {
glog.Warningf("could not get system cpu info while verifying memory limits, which might be okay: %v", cpuErr)
}
if diskErr != nil {
glog.Warningf("could not get system disk info while verifying memory limits, which might be okay: %v", diskErr)
}
if memErr != nil {
return -1, -1, memErr
}
sysLimit := int(info.Memory)
containerLimit := 0
if driver.IsKIC(drvName) {
s, err := oci.CachedDaemonInfo(drvName)
if err != nil {
return -1, -1, err
}
containerLimit = int(s.TotalMemory / 1024 / 1024)
}
return sysLimit, containerLimit, nil
}
// suggestMemoryAllocation calculates the default memory footprint in MiB
func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int {
if mem := viper.GetInt(memory); mem != 0 {
return mem
}
fallback := 2200
maximum := 6000
if sysLimit > 0 && fallback > sysLimit {
return sysLimit
}
// If there are container limits, add tiny bit of slack for non-minikube components
if containerLimit > 0 {
if fallback > containerLimit {
return containerLimit
}
maximum = containerLimit - 48
}
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
suggested := int(float32(sysLimit)/400.0) * 100
if nodes > 1 {
suggested /= nodes
}
if suggested > maximum {
return maximum
}
if suggested < fallback {
return fallback
}
return suggested
}
// validateRequestedMemorySize validates the memory size matches the minimum recommended
func validateRequestedMemorySize(req int, drvName string) {
// TODO: Fix MB vs MiB confusion
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
glog.Warningf("Unable to query memory limits: %v", err)
}
// Detect if their system doesn't have enough memory to work with.
if driver.IsKIC(drvName) && containerLimit < minUsableMem {
if driver.IsDockerDesktop(drvName) {
if runtime.GOOS == "darwin" {
exitIfNotForced(reason.RsrcInsufficientDarwinDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"})
} else {
exitIfNotForced(reason.RsrcInsufficientWindowsDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"})
}
}
exitIfNotForced(reason.RsrcInsufficientContainerMemory, "{{.driver}} only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem})
}
if sysLimit < minUsableMem {
exitIfNotForced(reason.RsrcInsufficientSysMemory, "System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem})
}
if req < minUsableMem {
exitIfNotForced(reason.RsrcInsufficientReqMemory, "Requested memory allocation {{.requested}}MiB is less than the usable minimum of {{.minimum_memory}}MB", out.V{"requested": req, "minimum_memory": minUsableMem})
}
if req < minRecommendedMem {
out.WarnReason(reason.RsrcInsufficientReqMemory, "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommend}}MB. Deployments may fail.", out.V{"requested": req, "recommend": minRecommendedMem})
}
if driver.IsDockerDesktop(drvName) && containerLimit < 2997 && sysLimit > 8000 { // for users with more than 8 GB advice 3 GB
r := reason.RsrcInsufficientDarwinDockerMemory
if runtime.GOOS == "Windows" {
r = reason.RsrcInsufficientWindowsDockerMemory
}
r.Style = style.Improvement
out.WarnReason(r, "Docker Desktop has access to only {{.size}}MiB of the {{.sys}}MiB in available system memory. Consider increasing this for improved performance.", out.V{"size": containerLimit, "sys": sysLimit, "recommend": "3 GB"})
}
advised := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
if req > sysLimit {
exitIfNotForced(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"},
`Requested memory allocation {{.requested}}MB is more than your system limit {{.system_limit}}MB.`,
out.V{"requested": req, "system_limit": sysLimit, "advised": advised})
}
// Recommend 1GB to handle OS/VM overhead
maxAdvised := sysLimit - 1024
if req > maxAdvised {
out.WarnReason(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"},
`The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.`,
out.V{"requested": req, "system_limit": sysLimit, "advised": advised})
}
}
// validateCPUCount validates the cpu count matches the minimum recommended
func validateCPUCount(drvName string) {
var cpuCount int
if driver.BareMetal(drvName) {
// Uses the gopsutil cpu package to count the number of physical cpu cores
ci, err := cpu.Counts(false)
if err != nil {
glog.Warningf("Unable to get CPU info: %v", err)
} else {
cpuCount = ci
}
} else {
cpuCount = viper.GetInt(cpus)
}
if cpuCount < minimumCPUS {
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS})
}
if !driver.IsKIC((drvName)) {
return
}
si, err := oci.CachedDaemonInfo(drvName)
if err != nil {
out.T(style.Confused, "Failed to verify '{{.driver_name}} info' will try again ...", out.V{"driver_name": drvName})
si, err = oci.DaemonInfo(drvName)
if err != nil {
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
}
}
// looks good
if si.CPUs >= 2 {
return
}
if drvName == oci.Docker && runtime.GOOS == "darwin" {
exitIfNotForced(reason.RsrcInsufficientDarwinDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available")
} else if drvName == oci.Docker && runtime.GOOS == "windows" {
exitIfNotForced(reason.RsrcInsufficientWindowsDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available")
} else {
exitIfNotForced(reason.RsrcInsufficientCores, "{{.driver_name}} has less than 2 CPUs available, but Kubernetes requires at least 2 to be available", out.V{"driver_name": driver.FullName(viper.GetString("driver"))})
}
}
// validateFlags validates the supplied flags against known bad combinations
func validateFlags(cmd *cobra.Command, drvName string) {
if cmd.Flags().Changed(humanReadableDiskSize) {
diskSizeMB, err := util.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exitIfNotForced(reason.Usage, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
if diskSizeMB < minimumDiskSize {
exitIfNotForced(reason.RsrcInsufficientStorage, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize})
}
}
if cmd.Flags().Changed(cpus) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --cpus flag", out.V{"name": drvName})
}
}
validateCPUCount(drvName)
if cmd.Flags().Changed(memory) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --memory flag", out.V{"name": drvName})
}
req, err := util.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
validateRequestedMemorySize(req, drvName)
}
if cmd.Flags().Changed(containerRuntime) {
runtime := strings.ToLower(viper.GetString(containerRuntime))
validOptions := cruntime.ValidRuntimes()
// `crio` is accepted as an alternative spelling to `cri-o`
validOptions = append(validOptions, constants.CRIO)
var validRuntime bool
for _, option := range validOptions {
if runtime == option {
validRuntime = true
}
// Convert `cri-o` to `crio` as the K8s config uses the `crio` spelling
if runtime == "cri-o" {
viper.Set(containerRuntime, constants.CRIO)
}
}
if !validRuntime {
exit.Message(reason.Usage, `Invalid Container Runtime: "{{.runtime}}". Valid runtimes are: {{.validOptions}}`, out.V{"runtime": runtime, "validOptions": strings.Join(cruntime.ValidRuntimes(), ", ")})
}
}
if driver.BareMetal(drvName) {
if ClusterFlagValue() != constants.DefaultClusterName {
exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName})
}
runtime := viper.GetString(containerRuntime)
if runtime != "docker" {
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime})
}
// conntrack is required starting with Kubernetes 1.18, include the release candidates for completion
version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil))
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
if _, err := exec.LookPath("conntrack"); err != nil {
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
}
}
}
// validate kubeadm extra args
if invalidOpts := bsutil.FindInvalidExtraConfigFlags(config.ExtraOptions); len(invalidOpts) > 0 {
out.WarningT(
"These --extra-config parameters are invalid: {{.invalid_extra_opts}}",
out.V{"invalid_extra_opts": invalidOpts},
)
exit.Message(
reason.Usage,
"Valid components are: {{.valid_extra_opts}}",
out.V{"valid_extra_opts": bsutil.KubeadmExtraConfigOpts},
)
}
// check that kubeadm extra args contain only allowed parameters
for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) {
if !config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmCmdParam], param) &&
!config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmConfigParam], param) {
exit.Message(reason.Usage, "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
}
}
if s := viper.GetString(startOutput); s != "text" && s != "json" {
exit.Message(reason.Usage, "Sorry, please set the --output flag to one of the following valid options: [text,json]")
}
validateRegistryMirror()
}
// This function validates if the --registry-mirror
// args match the format of http://localhost
func validateRegistryMirror() {
if len(registryMirror) > 0 {
for _, loc := range registryMirror {
URL, err := url.Parse(loc)
if err != nil {
glog.Errorln("Error Parsing URL: ", err)
}
if (URL.Scheme != "http" && URL.Scheme != "https") || URL.Path != "" {
exit.Message(reason.Usage, "Sorry, the url provided with the --registry-mirror flag is invalid: {{.url}}", out.V{"url": loc})
}
}
}
}
func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.ClusterConfig) (config.ClusterConfig, config.Node, error) {
// Create the initial node, which will necessarily be a control plane
if existing != nil {
cp, err := config.PrimaryControlPlane(existing)
cp.KubernetesVersion = getKubernetesVersion(&cc)
if err != nil {
return cc, config.Node{}, err
}
// Make sure that existing nodes honor if KubernetesVersion gets specified on restart
// KubernetesVersion is the only attribute that the user can override in the Node object
nodes := []config.Node{}
for _, n := range existing.Nodes {
n.KubernetesVersion = getKubernetesVersion(&cc)
nodes = append(nodes, n)
}
cc.Nodes = nodes
return cc, cp, nil
}
cp := config.Node{
Port: cc.KubernetesConfig.NodePort,
KubernetesVersion: getKubernetesVersion(&cc),
Name: kubeNodeName,
ControlPlane: true,
Worker: true,
}
cc.Nodes = []config.Node{cp}
return cc, cp, nil
}
// autoSetDriverOptions sets the options needed for specific driver automatically.
func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
err = nil
hints := driver.FlagDefaults(drvName)
if len(hints.ExtraOptions) > 0 {
for _, eo := range hints.ExtraOptions {
if config.ExtraOptions.Exists(eo) {
glog.Infof("skipping extra-config %q.", eo)
continue
}
glog.Infof("auto setting extra-config to %q.", eo)
err = config.ExtraOptions.Set(eo)
if err != nil {
err = errors.Wrapf(err, "setting extra option %s", eo)
}
}
}
if !cmd.Flags().Changed(cacheImages) {
viper.Set(cacheImages, hints.CacheImages)
}
if !cmd.Flags().Changed(containerRuntime) && hints.ContainerRuntime != "" {
viper.Set(containerRuntime, hints.ContainerRuntime)
glog.Infof("auto set %s to %q.", containerRuntime, hints.ContainerRuntime)
}
if !cmd.Flags().Changed(cmdcfg.Bootstrapper) && hints.Bootstrapper != "" {
viper.Set(cmdcfg.Bootstrapper, hints.Bootstrapper)
glog.Infof("auto set %s to %q.", cmdcfg.Bootstrapper, hints.Bootstrapper)
}
return err
}
// validateKubernetesVersion ensures that the requested version is reasonable
func validateKubernetesVersion(old *config.ClusterConfig) {
nvs, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(old), version.VersionPrefix))
oldestVersion, err := semver.Make(strings.TrimPrefix(constants.OldestKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.InternalSemverParse, "Unable to parse oldest Kubernetes version from constants: {{.error}}", out.V{"error": err})
}
defaultVersion, err := semver.Make(strings.TrimPrefix(constants.DefaultKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.InternalSemverParse, "Unable to parse default Kubernetes version from constants: {{.error}}", out.V{"error": err})
}
if nvs.LT(oldestVersion) {
out.WarningT("Specified Kubernetes version {{.specified}} is less than the oldest supported version: {{.oldest}}", out.V{"specified": nvs, "oldest": constants.OldestKubernetesVersion})
if !viper.GetBool(force) {
out.WarningT("You can force an unsupported Kubernetes version via the --force flag")
}
exitIfNotForced(reason.KubernetesTooOld, "Kubernetes {{.version}} is not supported by this release of minikube", out.V{"version": nvs})
}
if old == nil || old.KubernetesConfig.KubernetesVersion == "" {
return
}
ovs, err := semver.Make(strings.TrimPrefix(old.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
if err != nil {
glog.Errorf("Error parsing old version %q: %v", old.KubernetesConfig.KubernetesVersion, err)
}
if nvs.LT(ovs) {
profileArg := ""
if old.Name != constants.DefaultClusterName {
profileArg = fmt.Sprintf(" -p %s", old.Name)
}
suggestedName := old.Name + "2"
exit.Message(reason.KubernetesDowngrade, "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}",
out.V{"prefix": version.VersionPrefix, "new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName})
}
if defaultVersion.GT(nvs) {
out.T(style.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.prefix}}{{.new}}", out.V{"prefix": version.VersionPrefix, "new": defaultVersion})
}
}
func getKubernetesVersion(old *config.ClusterConfig) string {
paramVersion := viper.GetString(kubernetesVersion)
// try to load the old version first if the user didn't specify anything
if paramVersion == "" && old != nil {
paramVersion = old.KubernetesConfig.KubernetesVersion
}
if paramVersion == "" || strings.EqualFold(paramVersion, "stable") {
paramVersion = constants.DefaultKubernetesVersion
} else if strings.EqualFold(paramVersion, "latest") {
paramVersion = constants.NewestKubernetesVersion
}
nvs, err := semver.Make(strings.TrimPrefix(paramVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.Usage, `Unable to parse "{{.kubernetes_version}}": {{.error}}`, out.V{"kubernetes_version": paramVersion, "error": err})
}
return version.VersionPrefix + nvs.String()
}
func exitIfNotForced(r reason.Kind, message string, v ...out.V) {
if !viper.GetBool(force) {
exit.Message(r, message, v...)
}
out.Error(r, message, v...)
}
Simplify kubectl advice messaging
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"math"
"net"
"net/url"
"os"
"os/exec"
"os/user"
"runtime"
"strings"
"github.com/blang/semver"
"github.com/docker/machine/libmachine/ssh"
"github.com/golang/glog"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/cpu"
gopshost "github.com/shirou/gopsutil/host"
"github.com/spf13/cobra"
"github.com/spf13/viper"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
"k8s.io/minikube/pkg/minikube/notify"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/out/register"
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/style"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/translate"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/version"
)
var (
registryMirror []string
insecureRegistry []string
apiServerNames []string
apiServerIPs []net.IP
)
func init() {
initMinikubeFlags()
initKubernetesFlags()
initDriverFlags()
initNetworkingFlags()
if err := viper.BindPFlags(startCmd.Flags()); err != nil {
exit.Error(reason.InternalBindFlags, "unable to bind flags", err)
}
}
// startCmd represents the start command
var startCmd = &cobra.Command{
Use: "start",
Short: "Starts a local Kubernetes cluster",
Long: "Starts a local Kubernetes cluster",
Run: runStart,
}
// platform generates a user-readable platform message
func platform() string {
var s strings.Builder
// Show the distro version if possible
hi, err := gopshost.Info()
if err == nil {
s.WriteString(fmt.Sprintf("%s %s", strings.Title(hi.Platform), hi.PlatformVersion))
glog.Infof("hostinfo: %+v", hi)
} else {
glog.Warningf("gopshost.Info returned error: %v", err)
s.WriteString(runtime.GOOS)
}
vsys, vrole, err := gopshost.Virtualization()
if err != nil {
glog.Warningf("gopshost.Virtualization returned error: %v", err)
} else {
glog.Infof("virtualization: %s %s", vsys, vrole)
}
// This environment is exotic, let's output a bit more.
if vrole == "guest" || runtime.GOARCH != "amd64" {
if vsys != "" {
s.WriteString(fmt.Sprintf(" (%s/%s)", vsys, runtime.GOARCH))
} else {
s.WriteString(fmt.Sprintf(" (%s)", runtime.GOARCH))
}
}
return s.String()
}
// runStart handles the executes the flow of "minikube start"
func runStart(cmd *cobra.Command, args []string) {
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
out.SetJSON(viper.GetString(startOutput) == "json")
displayVersion(version.GetVersion())
// No need to do the update check if no one is going to see it
if !viper.GetBool(interactive) || !viper.GetBool(dryRun) {
// Avoid blocking execution on optional HTTP fetches
go notify.MaybePrintUpdateTextFromGithub()
}
displayEnviron(os.Environ())
if viper.GetBool(force) {
out.WarningT("minikube skips various validations when --force is supplied; this may lead to unexpected behavior")
}
// if --registry-mirror specified when run minikube start,
// take arg precedence over MINIKUBE_REGISTRY_MIRROR
// actually this is a hack, because viper 1.0.0 can assign env to variable if StringSliceVar
// and i can't update it to 1.4.0, it affects too much code
// other types (like String, Bool) of flag works, so imageRepository, imageMirrorCountry
// can be configured as MINIKUBE_IMAGE_REPOSITORY and IMAGE_MIRROR_COUNTRY
// this should be updated to documentation
if len(registryMirror) == 0 {
registryMirror = viper.GetStringSlice("registry_mirror")
}
if !config.ProfileNameValid(ClusterFlagValue()) {
out.WarningT("Profile name '{{.name}}' is not valid", out.V{"name": ClusterFlagValue()})
exit.Message(reason.Usage, "Only alphanumeric and dashes '-' are permitted. Minimum 1 character, starting with alphanumeric.")
}
existing, err := config.Load(ClusterFlagValue())
if err != nil && !config.IsNotExist(err) {
exit.Message(reason.HostConfigLoad, "Unable to load config: {{.error}}", out.V{"error": err})
}
if existing != nil {
upgradeExistingConfig(existing)
}
validateSpecifiedDriver(existing)
validateKubernetesVersion(existing)
ds, alts, specified := selectDriver(existing)
starter, err := provisionWithDriver(cmd, ds, existing)
if err != nil {
node.ExitIfFatal(err)
machine.MaybeDisplayAdvice(err, ds.Name)
if specified {
// If the user specified a driver, don't fallback to anything else
exit.Error(reason.GuestProvision, "error provisioning host", err)
} else {
success := false
// Walk down the rest of the options
for _, alt := range alts {
out.WarningT("Startup with {{.old_driver}} driver failed, trying with alternate driver {{.new_driver}}: {{.error}}", out.V{"old_driver": ds.Name, "new_driver": alt.Name, "error": err})
ds = alt
// Delete the existing cluster and try again with the next driver on the list
profile, err := config.LoadProfile(ClusterFlagValue())
if err != nil {
glog.Warningf("%s profile does not exist, trying anyways.", ClusterFlagValue())
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": ClusterFlagValue()})
}
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
continue
} else {
// Success!
success = true
break
}
}
if !success {
exit.Error(reason.GuestProvision, "error provisioning host", err)
}
}
}
if existing != nil && existing.KubernetesConfig.ContainerRuntime == "crio" && driver.IsKIC(existing.Driver) {
// Stop and start again if it's crio because it's broken above v1.17.3
out.WarningT("Due to issues with CRI-O post v1.17.3, we need to restart your cluster.")
out.WarningT("See details at https://github.com/kubernetes/minikube/issues/8861")
stopProfile(existing.Name)
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
exit.Error(reason.GuestProvision, "error provisioning host", err)
}
}
kubeconfig, err := startWithDriver(cmd, starter, existing)
if err != nil {
node.ExitIfFatal(err)
exit.Error(reason.GuestStart, "failed to start node", err)
}
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Cfg.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
}
}
func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *config.ClusterConfig) (node.Starter, error) {
driverName := ds.Name
glog.Infof("selected driver: %s", driverName)
validateDriver(ds, existing)
err := autoSetDriverOptions(cmd, driverName)
if err != nil {
glog.Errorf("Error autoSetOptions : %v", err)
}
validateFlags(cmd, driverName)
validateUser(driverName)
// Download & update the driver, even in --download-only mode
if !viper.GetBool(dryRun) {
updateDriver(driverName)
}
k8sVersion := getKubernetesVersion(existing)
cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, driverName)
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to generate config")
}
// This is about as far as we can go without overwriting config files
if viper.GetBool(dryRun) {
out.T(style.DryRun, `dry-run validation complete!`)
os.Exit(0)
}
if driver.IsVM(driverName) {
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
}
cc.MinikubeISO = url
}
var existingAddons map[string]bool
if viper.GetBool(installAddons) {
existingAddons = map[string]bool{}
if existing != nil && existing.Addons != nil {
existingAddons = existing.Addons
}
}
if viper.GetBool(nativeSSH) {
ssh.SetDefaultClient(ssh.Native)
} else {
ssh.SetDefaultClient(ssh.External)
}
mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure))
if err != nil {
return node.Starter{}, err
}
return node.Starter{
Runner: mRunner,
PreExists: preExists,
MachineAPI: mAPI,
Host: host,
ExistingAddons: existingAddons,
Cfg: &cc,
Node: &n,
}, nil
}
func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
kubeconfig, err := node.Start(starter, true)
if err != nil {
kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err)
if err != nil {
return nil, err
}
}
numNodes := viper.GetInt(nodes)
if existing != nil {
if numNodes > 1 {
// We ignore the --nodes parameter if we're restarting an existing cluster
out.WarningT(`The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use "minikube node add" to add nodes to an existing cluster.`, out.V{"cluster": existing.Name})
}
numNodes = len(existing.Nodes)
}
if numNodes > 1 {
if driver.BareMetal(starter.Cfg.Driver) {
exit.Message(reason.DrvUnsupportedMulti, "The none driver is not compatible with multi-node clusters.")
} else {
// Only warn users on first start.
if existing == nil {
out.Ln("")
warnAboutMultiNode()
for i := 1; i < numNodes; i++ {
nodeName := node.Name(i + 1)
n := config.Node{
Name: nodeName,
Worker: true,
ControlPlane: false,
KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion,
}
out.Ln("") // extra newline for clarity on the command line
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
if err != nil {
return nil, errors.Wrap(err, "adding node")
}
}
} else {
for _, n := range existing.Nodes {
if !n.ControlPlane {
err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure))
if err != nil {
return nil, errors.Wrap(err, "adding node")
}
}
}
}
}
}
return kubeconfig, nil
}
func warnAboutMultiNode() {
out.WarningT("Multi-node clusters are currently experimental and might exhibit unintended behavior.")
out.T(style.Documentation, "To track progress on multi-node clusters, see https://github.com/kubernetes/minikube/issues/7538.")
}
func updateDriver(driverName string) {
v, err := version.GetSemverVersion()
if err != nil {
out.WarningT("Error parsing minikube version: {{.error}}", out.V{"error": err})
} else if err := driver.InstallOrUpdate(driverName, localpath.MakeMiniPath("bin"), v, viper.GetBool(interactive), viper.GetBool(autoUpdate)); err != nil {
out.WarningT("Unable to update {{.driver}} driver: {{.error}}", out.V{"driver": driverName, "error": err})
}
}
func displayVersion(version string) {
prefix := ""
if ClusterFlagValue() != constants.DefaultClusterName {
prefix = fmt.Sprintf("[%s] ", ClusterFlagValue())
}
register.Reg.SetStep(register.InitialSetup)
out.T(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()})
}
// displayEnviron makes the user aware of environment variables that will affect how minikube operates
func displayEnviron(env []string) {
for _, kv := range env {
bits := strings.SplitN(kv, "=", 2)
k := bits[0]
v := bits[1]
if strings.HasPrefix(k, "MINIKUBE_") || k == constants.KubeconfigEnvVar {
out.Infof("{{.key}}={{.value}}", out.V{"key": k, "value": v})
}
}
}
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
// To be shown at the end, regardless of exit path
defer func() {
register.Reg.SetStep(register.Done)
if kcs.KeepContext {
out.T(style.Kubectl, "To connect to this cluster, use: --context={{.name}}", out.V{"name": kcs.ClusterName})
} else {
out.T(style.Ready, `Done! kubectl is now configured to use "{{.name}}" by default`, out.V{"name": machineName})
}
}()
path, err := exec.LookPath("kubectl")
if err != nil {
out.T(style.Tip, "kubectl not found. If you need it, try: 'minikube kubectl -- get pods -A'")
return nil
}
gitVersion, err := kubectlVersion(path)
if err != nil {
return err
}
client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix))
if err != nil {
return errors.Wrap(err, "client semver")
}
cluster := semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix))
minorSkew := int(math.Abs(float64(int(client.Minor) - int(cluster.Minor))))
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
if client.Major != cluster.Major || minorSkew > 1 {
out.Ln("")
out.WarningT("{{.path}} is version {{.client_version}}, which may have incompatibilites with Kubernetes {{.cluster_version}}.",
out.V{"path": path, "client_version": client, "cluster_version": cluster})
out.T(style.Tip, "Want kubectl {{.version}}? Try 'minikube kubectl -- get pods -A'", out.V{"version": k8sVersion})
}
return nil
}
func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) (*kubeconfig.Settings, error) {
if viper.GetBool(deleteOnFailure) {
out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
// Start failed, delete the cluster and try again
profile, err := config.LoadProfile(existing.Name)
if err != nil {
out.ErrT(style.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": existing.Name})
}
err = deleteProfile(profile)
if err != nil {
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": existing.Name})
}
// Re-generate the cluster config, just in case the failure was related to an old config format
cc := updateExistingConfigFromFlags(cmd, &existing)
var kubeconfig *kubeconfig.Settings
for _, n := range cc.Nodes {
r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane, false)
s := node.Starter{
Runner: r,
PreExists: p,
MachineAPI: m,
Host: h,
Cfg: &cc,
Node: &n,
ExistingAddons: existingAddons,
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
k, err := node.Start(s, n.ControlPlane)
if n.ControlPlane {
kubeconfig = k
}
if err != nil {
// Ok we failed again, let's bail
return nil, err
}
}
return kubeconfig, nil
}
// Don't delete the cluster unless they ask
return nil, originalErr
}
func kubectlVersion(path string) (string, error) {
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
if err != nil {
// really old Kubernetes clients did not have the --output parameter
b, err := exec.Command(path, "version", "--client", "--short").Output()
if err != nil {
return "", errors.Wrap(err, "exec")
}
s := strings.TrimSpace(string(b))
return strings.Replace(s, "Client Version: ", "", 1), nil
}
cv := struct {
ClientVersion struct {
GitVersion string `json:"gitVersion"`
} `json:"clientVersion"`
}{}
err = json.Unmarshal(j, &cv)
if err != nil {
return "", errors.Wrap(err, "unmarshal")
}
return cv.ClientVersion.GitVersion, nil
}
func selectDriver(existing *config.ClusterConfig) (registry.DriverState, []registry.DriverState, bool) {
// Technically unrelated, but important to perform before detection
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
register.Reg.SetStep(register.SelectingDriver)
// By default, the driver is whatever we used last time
if existing != nil {
old := hostDriver(existing)
ds := driver.Status(old)
out.T(style.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()})
return ds, nil, true
}
// Default to looking at the new driver parameter
if d := viper.GetString("driver"); d != "" {
if vmd := viper.GetString("vm-driver"); vmd != "" {
// Output a warning
warning := `Both driver={{.driver}} and vm-driver={{.vmd}} have been set.
Since vm-driver is deprecated, minikube will default to driver={{.driver}}.
If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning.
`
out.WarningT(warning, out.V{"driver": d, "vmd": vmd})
}
ds := driver.Status(d)
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
}
// Fallback to old driver parameter
if d := viper.GetString("vm-driver"); d != "" {
ds := driver.Status(viper.GetString("vm-driver"))
if ds.Name == "" {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": d, "os": runtime.GOOS})
}
out.T(style.Sparkle, `Using the {{.driver}} driver based on user configuration`, out.V{"driver": ds.String()})
return ds, nil, true
}
choices := driver.Choices(viper.GetBool("vm"))
pick, alts, rejects := driver.Suggest(choices)
if pick.Name == "" {
out.T(style.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:")
for _, r := range rejects {
out.Infof("{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection})
}
exit.Message(reason.DrvNotDetected, "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
}
if len(alts) > 1 {
altNames := []string{}
for _, a := range alts {
altNames = append(altNames, a.String())
}
out.T(style.Sparkle, `Automatically selected the {{.driver}} driver. Other choices: {{.alternates}}`, out.V{"driver": pick.Name, "alternates": strings.Join(altNames, ", ")})
} else {
out.T(style.Sparkle, `Automatically selected the {{.driver}} driver`, out.V{"driver": pick.String()})
}
return pick, alts, false
}
// hostDriver returns the actual driver used by a libmachine host, which can differ from our config
func hostDriver(existing *config.ClusterConfig) string {
if existing == nil {
return ""
}
api, err := machine.NewAPIClient()
if err != nil {
glog.Warningf("selectDriver NewAPIClient: %v", err)
return existing.Driver
}
cp, err := config.PrimaryControlPlane(existing)
if err != nil {
glog.Warningf("Unable to get control plane from existing config: %v", err)
return existing.Driver
}
machineName := driver.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
glog.Warningf("api.Load failed for %s: %v", machineName, err)
if existing.VMDriver != "" {
return existing.VMDriver
}
return existing.Driver
}
return h.Driver.DriverName()
}
// validateSpecifiedDriver makes sure that if a user has passed in a driver
// it matches the existing cluster if there is one
func validateSpecifiedDriver(existing *config.ClusterConfig) {
if existing == nil {
return
}
var requested string
if d := viper.GetString("driver"); d != "" {
requested = d
} else if d := viper.GetString("vm-driver"); d != "" {
requested = d
}
// Neither --vm-driver or --driver was specified
if requested == "" {
return
}
old := hostDriver(existing)
if requested == old {
return
}
exit.Advice(
reason.GuestDrvMismatch,
`The existing "{{.name}}" cluster was created using the "{{.old}}" driver, which is incompatible with requested "{{.new}}" driver.`,
"Delete the existing '{{.name}}' cluster using: '{{.delcommand}}', or start the existing '{{.name}}' cluster using: '{{.command}} --driver={{.old}}'",
out.V{
"name": existing.Name,
"new": requested,
"old": old,
"command": mustload.ExampleCmd(existing.Name, "start"),
"delcommand": mustload.ExampleCmd(existing.Name, "delete"),
},
)
}
// validateDriver validates that the selected driver appears sane, exits if not
func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
name := ds.Name
glog.Infof("validating driver %q against %+v", name, existing)
if !driver.Supported(name) {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}", out.V{"driver": name, "os": runtime.GOOS})
}
// if we are only downloading artifacts for a driver, we can stop validation here
if viper.GetBool("download-only") {
return
}
st := ds.State
glog.Infof("status for %s: %+v", name, st)
if st.NeedsImprovement {
out.WarnReason(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_IMPROVEMENT", strings.ToUpper(name)),
Advice: translate.T(st.Fix),
Style: style.Improvement,
}, `The '{{.driver}}' driver reported a performance issue`, out.V{"driver": name})
}
if st.Error == nil {
return
}
if !st.Installed {
exit.Message(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(name)),
Advice: translate.T(st.Fix),
ExitCode: reason.ExProviderNotFound,
URL: st.Doc,
Style: style.Shrug,
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error})
}
id := fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
code := reason.ExProviderUnavailable
if !st.Running {
id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(name))
code = reason.ExProviderNotRunning
}
exitIfNotForced(reason.Kind{
ID: id,
Advice: translate.T(st.Fix),
ExitCode: code,
URL: st.Doc,
Style: style.Fatal,
}, st.Error.Error())
}
func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) {
var tryCountries []string
var fallback string
glog.Infof("selecting image repository for country %s ...", mirrorCountry)
if mirrorCountry != "" {
localRepos, ok := constants.ImageRepositories[mirrorCountry]
if !ok || len(localRepos) == 0 {
return false, "", fmt.Errorf("invalid image mirror country code: %s", mirrorCountry)
}
tryCountries = append(tryCountries, mirrorCountry)
// we'll use the first repository as fallback
// when none of the mirrors in the given location is available
fallback = localRepos[0]
} else {
// always make sure global is preferred
tryCountries = append(tryCountries, "global")
for k := range constants.ImageRepositories {
if strings.ToLower(k) != "global" {
tryCountries = append(tryCountries, k)
}
}
}
checkRepository := func(repo string) error {
pauseImage := images.Pause(v, repo)
ref, err := name.ParseReference(pauseImage, name.WeakValidation)
if err != nil {
return err
}
_, err = remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
return err
}
for _, code := range tryCountries {
localRepos := constants.ImageRepositories[code]
for _, repo := range localRepos {
err := checkRepository(repo)
if err == nil {
return true, repo, nil
}
}
}
return false, fallback, nil
}
// validateUser validates minikube is run by the recommended user (privileged or regular)
func validateUser(drvName string) {
u, err := user.Current()
if err != nil {
glog.Errorf("Error getting the current user: %v", err)
return
}
useForce := viper.GetBool(force)
if driver.NeedsRoot(drvName) && u.Uid != "0" && !useForce {
exit.Message(reason.DrvNeedsRoot, `The "{{.driver_name}}" driver requires root privileges. Please run minikube using 'sudo -E minikube start --driver={{.driver_name}}'.`, out.V{"driver_name": drvName})
}
// If root is required, or we are not root, exit early
if driver.NeedsRoot(drvName) || u.Uid != "0" {
return
}
out.ErrT(style.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
out.ErrT(style.Tip, "If you are running minikube within a VM, consider using --driver=none:")
out.ErrT(style.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
cname := ClusterFlagValue()
_, err = config.Load(cname)
if err == nil || !config.IsNotExist(err) {
out.ErrT(style.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")})
}
if !useForce {
exit.Message(reason.DrvAsRoot, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
}
}
// memoryLimits returns the amount of memory allocated to the system and hypervisor, the return value is in MiB
func memoryLimits(drvName string) (int, int, error) {
info, cpuErr, memErr, diskErr := machine.CachedHostInfo()
if cpuErr != nil {
glog.Warningf("could not get system cpu info while verifying memory limits, which might be okay: %v", cpuErr)
}
if diskErr != nil {
glog.Warningf("could not get system disk info while verifying memory limits, which might be okay: %v", diskErr)
}
if memErr != nil {
return -1, -1, memErr
}
sysLimit := int(info.Memory)
containerLimit := 0
if driver.IsKIC(drvName) {
s, err := oci.CachedDaemonInfo(drvName)
if err != nil {
return -1, -1, err
}
containerLimit = int(s.TotalMemory / 1024 / 1024)
}
return sysLimit, containerLimit, nil
}
// suggestMemoryAllocation calculates the default memory footprint in MiB
func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int {
if mem := viper.GetInt(memory); mem != 0 {
return mem
}
fallback := 2200
maximum := 6000
if sysLimit > 0 && fallback > sysLimit {
return sysLimit
}
// If there are container limits, add tiny bit of slack for non-minikube components
if containerLimit > 0 {
if fallback > containerLimit {
return containerLimit
}
maximum = containerLimit - 48
}
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
suggested := int(float32(sysLimit)/400.0) * 100
if nodes > 1 {
suggested /= nodes
}
if suggested > maximum {
return maximum
}
if suggested < fallback {
return fallback
}
return suggested
}
// validateRequestedMemorySize validates the memory size matches the minimum recommended
func validateRequestedMemorySize(req int, drvName string) {
// TODO: Fix MB vs MiB confusion
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
glog.Warningf("Unable to query memory limits: %v", err)
}
// Detect if their system doesn't have enough memory to work with.
if driver.IsKIC(drvName) && containerLimit < minUsableMem {
if driver.IsDockerDesktop(drvName) {
if runtime.GOOS == "darwin" {
exitIfNotForced(reason.RsrcInsufficientDarwinDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"})
} else {
exitIfNotForced(reason.RsrcInsufficientWindowsDockerMemory, "Docker Desktop only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "req": minUsableMem, "recommend": "2.25 GB"})
}
}
exitIfNotForced(reason.RsrcInsufficientContainerMemory, "{{.driver}} only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem})
}
if sysLimit < minUsableMem {
exitIfNotForced(reason.RsrcInsufficientSysMemory, "System only has {{.size}}MiB available, less than the required {{.req}}MiB for Kubernetes", out.V{"size": containerLimit, "driver": drvName, "req": minUsableMem})
}
if req < minUsableMem {
exitIfNotForced(reason.RsrcInsufficientReqMemory, "Requested memory allocation {{.requested}}MiB is less than the usable minimum of {{.minimum_memory}}MB", out.V{"requested": req, "minimum_memory": minUsableMem})
}
if req < minRecommendedMem {
out.WarnReason(reason.RsrcInsufficientReqMemory, "Requested memory allocation ({{.requested}}MB) is less than the recommended minimum {{.recommend}}MB. Deployments may fail.", out.V{"requested": req, "recommend": minRecommendedMem})
}
if driver.IsDockerDesktop(drvName) && containerLimit < 2997 && sysLimit > 8000 { // for users with more than 8 GB advice 3 GB
r := reason.RsrcInsufficientDarwinDockerMemory
if runtime.GOOS == "Windows" {
r = reason.RsrcInsufficientWindowsDockerMemory
}
r.Style = style.Improvement
out.WarnReason(r, "Docker Desktop has access to only {{.size}}MiB of the {{.sys}}MiB in available system memory. Consider increasing this for improved performance.", out.V{"size": containerLimit, "sys": sysLimit, "recommend": "3 GB"})
}
advised := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
if req > sysLimit {
exitIfNotForced(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"},
`Requested memory allocation {{.requested}}MB is more than your system limit {{.system_limit}}MB.`,
out.V{"requested": req, "system_limit": sysLimit, "advised": advised})
}
// Recommend 1GB to handle OS/VM overhead
maxAdvised := sysLimit - 1024
if req > maxAdvised {
out.WarnReason(reason.Kind{ID: "RSRC_OVER_ALLOC_MEM", Advice: "Start minikube with less memory allocated: 'minikube start --memory={{.advised}}mb'"},
`The requested memory allocation of {{.requested}}MiB does not leave room for system overhead (total system memory: {{.system_limit}}MiB). You may face stability issues.`,
out.V{"requested": req, "system_limit": sysLimit, "advised": advised})
}
}
// validateCPUCount validates the cpu count matches the minimum recommended
func validateCPUCount(drvName string) {
var cpuCount int
if driver.BareMetal(drvName) {
// Uses the gopsutil cpu package to count the number of physical cpu cores
ci, err := cpu.Counts(false)
if err != nil {
glog.Warningf("Unable to get CPU info: %v", err)
} else {
cpuCount = ci
}
} else {
cpuCount = viper.GetInt(cpus)
}
if cpuCount < minimumCPUS {
exitIfNotForced(reason.RsrcInsufficientCores, "Requested cpu count {{.requested_cpus}} is less than the minimum allowed of {{.minimum_cpus}}", out.V{"requested_cpus": cpuCount, "minimum_cpus": minimumCPUS})
}
if !driver.IsKIC((drvName)) {
return
}
si, err := oci.CachedDaemonInfo(drvName)
if err != nil {
out.T(style.Confused, "Failed to verify '{{.driver_name}} info' will try again ...", out.V{"driver_name": drvName})
si, err = oci.DaemonInfo(drvName)
if err != nil {
exit.Message(reason.Usage, "Ensure your {{.driver_name}} is running and is healthy.", out.V{"driver_name": driver.FullName(drvName)})
}
}
// looks good
if si.CPUs >= 2 {
return
}
if drvName == oci.Docker && runtime.GOOS == "darwin" {
exitIfNotForced(reason.RsrcInsufficientDarwinDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available")
} else if drvName == oci.Docker && runtime.GOOS == "windows" {
exitIfNotForced(reason.RsrcInsufficientWindowsDockerCores, "Docker Desktop has less than 2 CPUs configured, but Kubernetes requires at least 2 to be available")
} else {
exitIfNotForced(reason.RsrcInsufficientCores, "{{.driver_name}} has less than 2 CPUs available, but Kubernetes requires at least 2 to be available", out.V{"driver_name": driver.FullName(viper.GetString("driver"))})
}
}
// validateFlags validates the supplied flags against known bad combinations
func validateFlags(cmd *cobra.Command, drvName string) {
if cmd.Flags().Changed(humanReadableDiskSize) {
diskSizeMB, err := util.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exitIfNotForced(reason.Usage, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
if diskSizeMB < minimumDiskSize {
exitIfNotForced(reason.RsrcInsufficientStorage, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize})
}
}
if cmd.Flags().Changed(cpus) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --cpus flag", out.V{"name": drvName})
}
}
validateCPUCount(drvName)
if cmd.Flags().Changed(memory) {
if !driver.HasResourceLimits(drvName) {
out.WarningT("The '{{.name}}' driver does not respect the --memory flag", out.V{"name": drvName})
}
req, err := util.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exitIfNotForced(reason.Usage, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
validateRequestedMemorySize(req, drvName)
}
if cmd.Flags().Changed(containerRuntime) {
runtime := strings.ToLower(viper.GetString(containerRuntime))
validOptions := cruntime.ValidRuntimes()
// `crio` is accepted as an alternative spelling to `cri-o`
validOptions = append(validOptions, constants.CRIO)
var validRuntime bool
for _, option := range validOptions {
if runtime == option {
validRuntime = true
}
// Convert `cri-o` to `crio` as the K8s config uses the `crio` spelling
if runtime == "cri-o" {
viper.Set(containerRuntime, constants.CRIO)
}
}
if !validRuntime {
exit.Message(reason.Usage, `Invalid Container Runtime: "{{.runtime}}". Valid runtimes are: {{.validOptions}}`, out.V{"runtime": runtime, "validOptions": strings.Join(cruntime.ValidRuntimes(), ", ")})
}
}
if driver.BareMetal(drvName) {
if ClusterFlagValue() != constants.DefaultClusterName {
exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName})
}
runtime := viper.GetString(containerRuntime)
if runtime != "docker" {
out.WarningT("Using the '{{.runtime}}' runtime with the 'none' driver is an untested configuration!", out.V{"runtime": runtime})
}
// conntrack is required starting with Kubernetes 1.18, include the release candidates for completion
version, _ := util.ParseKubernetesVersion(getKubernetesVersion(nil))
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
if _, err := exec.LookPath("conntrack"); err != nil {
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
}
}
}
// validate kubeadm extra args
if invalidOpts := bsutil.FindInvalidExtraConfigFlags(config.ExtraOptions); len(invalidOpts) > 0 {
out.WarningT(
"These --extra-config parameters are invalid: {{.invalid_extra_opts}}",
out.V{"invalid_extra_opts": invalidOpts},
)
exit.Message(
reason.Usage,
"Valid components are: {{.valid_extra_opts}}",
out.V{"valid_extra_opts": bsutil.KubeadmExtraConfigOpts},
)
}
// check that kubeadm extra args contain only allowed parameters
for param := range config.ExtraOptions.AsMap().Get(bsutil.Kubeadm) {
if !config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmCmdParam], param) &&
!config.ContainsParam(bsutil.KubeadmExtraArgsAllowed[bsutil.KubeadmConfigParam], param) {
exit.Message(reason.Usage, "Sorry, the kubeadm.{{.parameter_name}} parameter is currently not supported by --extra-config", out.V{"parameter_name": param})
}
}
if s := viper.GetString(startOutput); s != "text" && s != "json" {
exit.Message(reason.Usage, "Sorry, please set the --output flag to one of the following valid options: [text,json]")
}
validateRegistryMirror()
}
// This function validates if the --registry-mirror
// args match the format of http://localhost
func validateRegistryMirror() {
if len(registryMirror) > 0 {
for _, loc := range registryMirror {
URL, err := url.Parse(loc)
if err != nil {
glog.Errorln("Error Parsing URL: ", err)
}
if (URL.Scheme != "http" && URL.Scheme != "https") || URL.Path != "" {
exit.Message(reason.Usage, "Sorry, the url provided with the --registry-mirror flag is invalid: {{.url}}", out.V{"url": loc})
}
}
}
}
func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.ClusterConfig) (config.ClusterConfig, config.Node, error) {
// Create the initial node, which will necessarily be a control plane
if existing != nil {
cp, err := config.PrimaryControlPlane(existing)
cp.KubernetesVersion = getKubernetesVersion(&cc)
if err != nil {
return cc, config.Node{}, err
}
// Make sure that existing nodes honor if KubernetesVersion gets specified on restart
// KubernetesVersion is the only attribute that the user can override in the Node object
nodes := []config.Node{}
for _, n := range existing.Nodes {
n.KubernetesVersion = getKubernetesVersion(&cc)
nodes = append(nodes, n)
}
cc.Nodes = nodes
return cc, cp, nil
}
cp := config.Node{
Port: cc.KubernetesConfig.NodePort,
KubernetesVersion: getKubernetesVersion(&cc),
Name: kubeNodeName,
ControlPlane: true,
Worker: true,
}
cc.Nodes = []config.Node{cp}
return cc, cp, nil
}
// autoSetDriverOptions sets the options needed for specific driver automatically.
func autoSetDriverOptions(cmd *cobra.Command, drvName string) (err error) {
err = nil
hints := driver.FlagDefaults(drvName)
if len(hints.ExtraOptions) > 0 {
for _, eo := range hints.ExtraOptions {
if config.ExtraOptions.Exists(eo) {
glog.Infof("skipping extra-config %q.", eo)
continue
}
glog.Infof("auto setting extra-config to %q.", eo)
err = config.ExtraOptions.Set(eo)
if err != nil {
err = errors.Wrapf(err, "setting extra option %s", eo)
}
}
}
if !cmd.Flags().Changed(cacheImages) {
viper.Set(cacheImages, hints.CacheImages)
}
if !cmd.Flags().Changed(containerRuntime) && hints.ContainerRuntime != "" {
viper.Set(containerRuntime, hints.ContainerRuntime)
glog.Infof("auto set %s to %q.", containerRuntime, hints.ContainerRuntime)
}
if !cmd.Flags().Changed(cmdcfg.Bootstrapper) && hints.Bootstrapper != "" {
viper.Set(cmdcfg.Bootstrapper, hints.Bootstrapper)
glog.Infof("auto set %s to %q.", cmdcfg.Bootstrapper, hints.Bootstrapper)
}
return err
}
// validateKubernetesVersion ensures that the requested version is reasonable
func validateKubernetesVersion(old *config.ClusterConfig) {
nvs, _ := semver.Make(strings.TrimPrefix(getKubernetesVersion(old), version.VersionPrefix))
oldestVersion, err := semver.Make(strings.TrimPrefix(constants.OldestKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.InternalSemverParse, "Unable to parse oldest Kubernetes version from constants: {{.error}}", out.V{"error": err})
}
defaultVersion, err := semver.Make(strings.TrimPrefix(constants.DefaultKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.InternalSemverParse, "Unable to parse default Kubernetes version from constants: {{.error}}", out.V{"error": err})
}
if nvs.LT(oldestVersion) {
out.WarningT("Specified Kubernetes version {{.specified}} is less than the oldest supported version: {{.oldest}}", out.V{"specified": nvs, "oldest": constants.OldestKubernetesVersion})
if !viper.GetBool(force) {
out.WarningT("You can force an unsupported Kubernetes version via the --force flag")
}
exitIfNotForced(reason.KubernetesTooOld, "Kubernetes {{.version}} is not supported by this release of minikube", out.V{"version": nvs})
}
if old == nil || old.KubernetesConfig.KubernetesVersion == "" {
return
}
ovs, err := semver.Make(strings.TrimPrefix(old.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
if err != nil {
glog.Errorf("Error parsing old version %q: %v", old.KubernetesConfig.KubernetesVersion, err)
}
if nvs.LT(ovs) {
profileArg := ""
if old.Name != constants.DefaultClusterName {
profileArg = fmt.Sprintf(" -p %s", old.Name)
}
suggestedName := old.Name + "2"
exit.Message(reason.KubernetesDowngrade, "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}",
out.V{"prefix": version.VersionPrefix, "new": nvs, "old": ovs, "profile": profileArg, "suggestedName": suggestedName})
}
if defaultVersion.GT(nvs) {
out.T(style.New, "Kubernetes {{.new}} is now available. If you would like to upgrade, specify: --kubernetes-version={{.prefix}}{{.new}}", out.V{"prefix": version.VersionPrefix, "new": defaultVersion})
}
}
func getKubernetesVersion(old *config.ClusterConfig) string {
paramVersion := viper.GetString(kubernetesVersion)
// try to load the old version first if the user didn't specify anything
if paramVersion == "" && old != nil {
paramVersion = old.KubernetesConfig.KubernetesVersion
}
if paramVersion == "" || strings.EqualFold(paramVersion, "stable") {
paramVersion = constants.DefaultKubernetesVersion
} else if strings.EqualFold(paramVersion, "latest") {
paramVersion = constants.NewestKubernetesVersion
}
nvs, err := semver.Make(strings.TrimPrefix(paramVersion, version.VersionPrefix))
if err != nil {
exit.Message(reason.Usage, `Unable to parse "{{.kubernetes_version}}": {{.error}}`, out.V{"kubernetes_version": paramVersion, "error": err})
}
return version.VersionPrefix + nvs.String()
}
func exitIfNotForced(r reason.Kind, message string, v ...out.V) {
if !viper.GetBool(force) {
exit.Message(r, message, v...)
}
out.Error(r, message, v...)
}
|
package main
import (
"errors"
"fmt"
"net/url"
"os"
"regexp"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/Sirupsen/logrus"
"github.com/cerana/cerana/pkg/logrusx"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
var (
flagSep = regexp.MustCompile(`[\s._-]+`)
specialCaps = regexp.MustCompile("(?i)^(url|ttl|cpu|ip|id)$")
)
type config struct {
viper *viper.Viper
flagSet *pflag.FlagSet
}
// ConfigData defines the structure of the config data (e.g. in the config file)
type ConfigData struct {
CoordinatorURL string `json:"coordinatorURL"`
HeartbeatURL string `json:"heartbeatURL"`
LogLevel string `json:"logLevel"`
// Timeout and TTL values are in seconds
RequestTimeout uint `json:"requestTimeout"`
DatasetTTL uint `json:"datasetTTL"`
BundleTTL uint `json:"bundleTTL"`
NodeTTL uint `json:"nodeTTL"`
}
func newConfig(flagSet *pflag.FlagSet, v *viper.Viper) *config {
if flagSet == nil {
flagSet = pflag.CommandLine
}
if v == nil {
v = viper.New()
}
// Set normalization function before adding any flags
flagSet.SetNormalizeFunc(canonicalFlagName)
// Update Usage (--help output) to indicate flag
pflag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
pflag.PrintDefaults()
fmt.Fprintln(os.Stderr, "Note: Flags can be used in either fooBar or foo[_-.]bar form.")
}
flagSet.StringP("configFile", "c", "", "path to config file")
flagSet.StringP("coordinatorURL", "u", "", "url of coordinator for information retrieval")
flagSet.StringP("heartbeatURL", "e", "", "url of coordinator for the heartbeat registering")
flagSet.StringP("logLevel", "l", "warning", "log level: debug/info/warn/error/fatal/panic")
flagSet.Uint64P("requestTimeout", "r", 0, "default timeout for requests made (seconds)")
flagSet.Uint64P("datasetTTL", "d", 0, "default timeout for requests made (seconds)")
flagSet.Uint64P("bundleTTL", "b", 0, "default timeout for requests made (seconds)")
flagSet.Uint64P("nodeTTL", "n", 0, "default timeout for requests made (seconds)")
return &config{
viper: v,
flagSet: flagSet,
}
}
// canonicalFlagName translates flag names to camelCase using whitespace,
// periods, underscores, and dashes as word boundaries. All-caps words are
// preserved.
func canonicalFlagName(f *pflag.FlagSet, name string) pflag.NormalizedName {
// Standardize separators to a single space and trim leading/trailing spaces
name = strings.TrimSpace(flagSep.ReplaceAllString(name, " "))
// Convert to title case (lower case with leading caps, preserved all caps)
name = strings.Title(name)
// Some words should always be all caps or all lower case (e.g. TTL)
nameParts := strings.Split(name, " ")
for i, part := range nameParts {
caseFn := strings.ToUpper
if i == 0 {
caseFn = strings.ToLower
}
nameParts[i] = specialCaps.ReplaceAllStringFunc(part, caseFn)
}
// Split on space and examine the first part
first := nameParts[0]
if utf8.RuneCountInString(first) == 1 || first != strings.ToUpper(first) {
// Lowercase the first letter if it is not an all-caps word
r, n := utf8.DecodeRuneInString(first)
nameParts[0] = string(unicode.ToLower(r)) + first[n:]
}
return pflag.NormalizedName(strings.Join(nameParts, ""))
}
func (c *config) loadConfig() error {
if err := c.viper.BindPFlags(c.flagSet); err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
}).Error("failed to bind flags")
return err
}
filePath := c.viper.GetString("configFile")
if filePath == "" {
return c.validate()
}
c.viper.SetConfigFile(filePath)
if err := c.viper.ReadInConfig(); err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
"filePath": filePath,
}).Error("failed to parse config file")
return err
}
return c.validate()
}
func (c *config) coordinatorURL() *url.URL {
// Error checking has been done during validation
u, _ := url.ParseRequestURI(c.viper.GetString("coordinatorURL"))
return u
}
func (c *config) heartbeatURL() *url.URL {
// Error checking has been done during validation
u, _ := url.ParseRequestURI(c.viper.GetString("heartbeatURL"))
return u
}
func (c *config) requestTimeout() time.Duration {
return time.Second * time.Duration(c.viper.GetInt("requestTimeout"))
}
func (c *config) datasetTTL() time.Duration {
return c.getTTL("datasetTTL")
}
func (c *config) nodeTTL() time.Duration {
return c.getTTL("nodeTTL")
}
func (c *config) bundleTTL() time.Duration {
return c.getTTL("bundleTTL")
}
func (c *config) getTTL(key string) time.Duration {
return time.Second * time.Duration(c.viper.GetInt(key))
}
func (c *config) setupLogging() error {
logLevel := c.viper.GetString("logLevel")
if err := logrusx.SetLevel(logLevel); err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
"level": logLevel,
}).Error("failed to set up logging")
return err
}
return nil
}
func (c *config) validate() error {
if c.datasetTTL() <= 0 {
return errors.New("dataset ttl must be > 0")
}
if c.bundleTTL() <= 0 {
return errors.New("bundle ttl must be > 0")
}
if c.nodeTTL() <= 0 {
return errors.New("node ttl must be > 0")
}
if c.requestTimeout() <= 0 {
return errors.New("request timeout must be > 0")
}
if err := c.validateURL("coordinatorURL"); err != nil {
return err
}
if err := c.validateURL("heartbeatURL"); err != nil {
return err
}
return nil
}
func (c *config) validateURL(name string) error {
u := c.viper.GetString(name)
if u == "" {
return errors.New("missing " + name)
}
if _, err := url.ParseRequestURI(u); err != nil {
logrus.WithFields(logrus.Fields{
name: u,
"error": err,
}).Error("invalid config")
return errors.New("invalid " + name)
}
return nil
}
Fix flag comments for statspusher ttls
package main
import (
"errors"
"fmt"
"net/url"
"os"
"regexp"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/Sirupsen/logrus"
"github.com/cerana/cerana/pkg/logrusx"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
var (
flagSep = regexp.MustCompile(`[\s._-]+`)
specialCaps = regexp.MustCompile("(?i)^(url|ttl|cpu|ip|id)$")
)
type config struct {
viper *viper.Viper
flagSet *pflag.FlagSet
}
// ConfigData defines the structure of the config data (e.g. in the config file)
type ConfigData struct {
CoordinatorURL string `json:"coordinatorURL"`
HeartbeatURL string `json:"heartbeatURL"`
LogLevel string `json:"logLevel"`
// Timeout and TTL values are in seconds
RequestTimeout uint `json:"requestTimeout"`
DatasetTTL uint `json:"datasetTTL"`
BundleTTL uint `json:"bundleTTL"`
NodeTTL uint `json:"nodeTTL"`
}
func newConfig(flagSet *pflag.FlagSet, v *viper.Viper) *config {
if flagSet == nil {
flagSet = pflag.CommandLine
}
if v == nil {
v = viper.New()
}
// Set normalization function before adding any flags
flagSet.SetNormalizeFunc(canonicalFlagName)
// Update Usage (--help output) to indicate flag
pflag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
pflag.PrintDefaults()
fmt.Fprintln(os.Stderr, "Note: Flags can be used in either fooBar or foo[_-.]bar form.")
}
flagSet.StringP("configFile", "c", "", "path to config file")
flagSet.StringP("coordinatorURL", "u", "", "url of coordinator for information retrieval")
flagSet.StringP("heartbeatURL", "e", "", "url of coordinator for the heartbeat registering")
flagSet.StringP("logLevel", "l", "warning", "log level: debug/info/warn/error/fatal/panic")
flagSet.Uint64P("requestTimeout", "r", 0, "default timeout for requests made (seconds)")
flagSet.Uint64P("datasetTTL", "d", 0, "dataset heartbeat ttl (seconds)")
flagSet.Uint64P("bundleTTL", "b", 0, "bundle heartbeat ttl (seconds)")
flagSet.Uint64P("nodeTTL", "n", 0, "node heartbeat ttl (seconds)")
return &config{
viper: v,
flagSet: flagSet,
}
}
// canonicalFlagName translates flag names to camelCase using whitespace,
// periods, underscores, and dashes as word boundaries. All-caps words are
// preserved.
func canonicalFlagName(f *pflag.FlagSet, name string) pflag.NormalizedName {
// Standardize separators to a single space and trim leading/trailing spaces
name = strings.TrimSpace(flagSep.ReplaceAllString(name, " "))
// Convert to title case (lower case with leading caps, preserved all caps)
name = strings.Title(name)
// Some words should always be all caps or all lower case (e.g. TTL)
nameParts := strings.Split(name, " ")
for i, part := range nameParts {
caseFn := strings.ToUpper
if i == 0 {
caseFn = strings.ToLower
}
nameParts[i] = specialCaps.ReplaceAllStringFunc(part, caseFn)
}
// Split on space and examine the first part
first := nameParts[0]
if utf8.RuneCountInString(first) == 1 || first != strings.ToUpper(first) {
// Lowercase the first letter if it is not an all-caps word
r, n := utf8.DecodeRuneInString(first)
nameParts[0] = string(unicode.ToLower(r)) + first[n:]
}
return pflag.NormalizedName(strings.Join(nameParts, ""))
}
func (c *config) loadConfig() error {
if err := c.viper.BindPFlags(c.flagSet); err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
}).Error("failed to bind flags")
return err
}
filePath := c.viper.GetString("configFile")
if filePath == "" {
return c.validate()
}
c.viper.SetConfigFile(filePath)
if err := c.viper.ReadInConfig(); err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
"filePath": filePath,
}).Error("failed to parse config file")
return err
}
return c.validate()
}
func (c *config) coordinatorURL() *url.URL {
// Error checking has been done during validation
u, _ := url.ParseRequestURI(c.viper.GetString("coordinatorURL"))
return u
}
func (c *config) heartbeatURL() *url.URL {
// Error checking has been done during validation
u, _ := url.ParseRequestURI(c.viper.GetString("heartbeatURL"))
return u
}
func (c *config) requestTimeout() time.Duration {
return time.Second * time.Duration(c.viper.GetInt("requestTimeout"))
}
func (c *config) datasetTTL() time.Duration {
return c.getTTL("datasetTTL")
}
func (c *config) nodeTTL() time.Duration {
return c.getTTL("nodeTTL")
}
func (c *config) bundleTTL() time.Duration {
return c.getTTL("bundleTTL")
}
func (c *config) getTTL(key string) time.Duration {
return time.Second * time.Duration(c.viper.GetInt(key))
}
func (c *config) setupLogging() error {
logLevel := c.viper.GetString("logLevel")
if err := logrusx.SetLevel(logLevel); err != nil {
logrus.WithFields(logrus.Fields{
"error": err,
"level": logLevel,
}).Error("failed to set up logging")
return err
}
return nil
}
func (c *config) validate() error {
if c.datasetTTL() <= 0 {
return errors.New("dataset ttl must be > 0")
}
if c.bundleTTL() <= 0 {
return errors.New("bundle ttl must be > 0")
}
if c.nodeTTL() <= 0 {
return errors.New("node ttl must be > 0")
}
if c.requestTimeout() <= 0 {
return errors.New("request timeout must be > 0")
}
if err := c.validateURL("coordinatorURL"); err != nil {
return err
}
if err := c.validateURL("heartbeatURL"); err != nil {
return err
}
return nil
}
func (c *config) validateURL(name string) error {
u := c.viper.GetString(name)
if u == "" {
return errors.New("missing " + name)
}
if _, err := url.ParseRequestURI(u); err != nil {
logrus.WithFields(logrus.Fields{
name: u,
"error": err,
}).Error("invalid config")
return errors.New("invalid " + name)
}
return nil
}
|
package integration_test
import (
"os/exec"
"path/filepath"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("CF PHP Buildpack", func() {
SkipUnlessCflinuxfs2() // app depends on newrelic, so php5, so cflinuxfs2
var app *cutlass.App
RunCf := func(args ...string) error {
command := exec.Command("cf", args...)
command.Stdout = GinkgoWriter
command.Stderr = GinkgoWriter
return command.Run()
}
AfterEach(func() {
app = DestroyApp(app)
_ = RunCf("delete-service", "-f", "with_appdynamics")
})
It("configures appdynamics", func() {
SkipUnlessCflinuxfs2() // app depends on newrelic, so php5, so cflinuxfs2
app = cutlass.New(filepath.Join(bpDir, "fixtures", "with_appdynamics"))
app.SetEnv("BP_DEBUG", "true")
Expect(app.PushNoStart()).To(Succeed())
Expect(RunCf("cups", "with_appdynamics", "-p", `{"account-access-key":"fe244dc3-372f-4d36-83b0-379973103c5c","account-name":"customer1","host-name":"testhostname.com","port":"8090","ssl-enabled":"False"}`)).To(Succeed())
Expect(RunCf("bind-service", app.Name, "with_appdynamics")).To(Succeed())
Expect(RunCf("start", app.Name)).To(Succeed())
ConfirmRunning(app)
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
By("should compile appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("AppDynamics service detected, beginning compilation"))
})
By("should configure appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("Running AppDynamics extension method _configure"))
})
By("should set credentials for appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("Setting AppDynamics credentials info..."))
})
By("should download appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("Downloading AppDynamics package..."))
})
By("should install appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("Installing AppDynamics package..."))
})
})
})
Remove Skip in Context block as it causes tests to panic
Signed-off-by: Danny Joyce <656eadb1608d5e3ea5db131e513d97abdf96b075@pivotal.io>
package integration_test
import (
"os/exec"
"path/filepath"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("CF PHP Buildpack", func() {
var app *cutlass.App
RunCf := func(args ...string) error {
command := exec.Command("cf", args...)
command.Stdout = GinkgoWriter
command.Stderr = GinkgoWriter
return command.Run()
}
AfterEach(func() {
app = DestroyApp(app)
_ = RunCf("delete-service", "-f", "with_appdynamics")
})
It("configures appdynamics", func() {
SkipUnlessCflinuxfs2() // app depends on newrelic, so php5, so cflinuxfs2
app = cutlass.New(filepath.Join(bpDir, "fixtures", "with_appdynamics"))
app.SetEnv("BP_DEBUG", "true")
Expect(app.PushNoStart()).To(Succeed())
Expect(RunCf("cups", "with_appdynamics", "-p", `{"account-access-key":"fe244dc3-372f-4d36-83b0-379973103c5c","account-name":"customer1","host-name":"testhostname.com","port":"8090","ssl-enabled":"False"}`)).To(Succeed())
Expect(RunCf("bind-service", app.Name, "with_appdynamics")).To(Succeed())
Expect(RunCf("start", app.Name)).To(Succeed())
ConfirmRunning(app)
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
By("should compile appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("AppDynamics service detected, beginning compilation"))
})
By("should configure appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("Running AppDynamics extension method _configure"))
})
By("should set credentials for appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("Setting AppDynamics credentials info..."))
})
By("should download appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("Downloading AppDynamics package..."))
})
By("should install appdynamics agent", func() {
Eventually(app.Stdout.String).Should(ContainSubstring("Installing AppDynamics package..."))
})
})
})
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rt_5_2_aggregate_test
import (
"fmt"
"sort"
"testing"
"time"
"github.com/openconfig/featureprofiles/internal/attrs"
"github.com/openconfig/featureprofiles/internal/deviations"
"github.com/openconfig/featureprofiles/internal/fptest"
"github.com/openconfig/ondatra"
"github.com/openconfig/ondatra/netutil"
"github.com/openconfig/ondatra/telemetry"
"github.com/openconfig/ygot/ygot"
)
func TestMain(m *testing.M) {
fptest.RunTests(m)
}
// Settings for configuring the aggregate testbed with the test
// topology. IxNetwork flow requires both source and destination
// networks be configured on the ATE. It is not possible to send
// packets to the ether.
//
// The testbed consists of ate:port1 -> dut:port1 and dut:port{2-9} ->
// ate:port{2-9}. The first pair is called the "source" pair, and the
// second aggregate link the "destination" pair.
//
// - Source: ate:port1 -> dut:port1 subnet 192.0.2.0/30 2001:db8::0/126
// - Destination: dut:port{2-9} -> ate:port{2-9}
// subnet 192.0.2.4/30 2001:db8::4/126
//
// Note that the first (.0, .4) and last (.3, .7) IPv4 addresses are
// reserved from the subnet for broadcast, so a /30 leaves exactly 2
// usable addresses. This does not apply to IPv6 which allows /127
// for point to point links, but we use /126 so the numbering is
// consistent with IPv4.
//
// A traffic flow is configured from ate:port1 as source and ate:port{2-9}
// as destination.
const (
plen4 = 30
plen6 = 126
)
var (
dutSrc = attrs.Attributes{
Desc: "dutsrc",
IPv4: "192.0.2.1",
IPv6: "2001:db8::1",
IPv4Len: plen4,
IPv6Len: plen6,
}
ateSrc = attrs.Attributes{
Name: "atesrc",
IPv4: "192.0.2.2",
IPv6: "2001:db8::2",
IPv4Len: plen4,
IPv6Len: plen6,
}
dutDst = attrs.Attributes{
Desc: "dutdst",
IPv4: "192.0.2.5",
IPv6: "2001:db8::5",
IPv4Len: plen4,
IPv6Len: plen6,
}
ateDst = attrs.Attributes{
Name: "atedst",
IPv4: "192.0.2.6",
IPv6: "2001:db8::6",
IPv4Len: plen4,
IPv6Len: plen6,
}
)
const (
lagTypeLACP = telemetry.IfAggregate_AggregationType_LACP
lagTypeSTATIC = telemetry.IfAggregate_AggregationType_STATIC
)
type testCase struct {
minlinks uint16
lagType telemetry.E_IfAggregate_AggregationType
dut *ondatra.DUTDevice
ate *ondatra.ATEDevice
top *ondatra.ATETopology
dutPorts []*ondatra.Port
atePorts []*ondatra.Port
aggID string
l3header []ondatra.Header
}
func (*testCase) configSrcDUT(i *telemetry.Interface, a *attrs.Attributes) {
i.Description = ygot.String(a.Desc)
if *deviations.InterfaceEnabled {
i.Enabled = ygot.Bool(true)
}
s := i.GetOrCreateSubinterface(0)
s4 := s.GetOrCreateIpv4()
if *deviations.InterfaceEnabled {
s4.Enabled = ygot.Bool(true)
}
a4 := s4.GetOrCreateAddress(a.IPv4)
a4.PrefixLength = ygot.Uint8(plen4)
s6 := s.GetOrCreateIpv6()
if *deviations.InterfaceEnabled {
s6.Enabled = ygot.Bool(true)
}
s6.GetOrCreateAddress(a.IPv6).PrefixLength = ygot.Uint8(plen6)
}
func (tc *testCase) configDstAggregateDUT(i *telemetry.Interface, a *attrs.Attributes) {
tc.configSrcDUT(i, a)
i.Type = ieee8023adLag
g := i.GetOrCreateAggregation()
g.LagType = tc.lagType
g.MinLinks = ygot.Uint16(tc.minlinks)
}
func (tc *testCase) configDstMemberDUT(i *telemetry.Interface, p *ondatra.Port) {
i.Description = ygot.String(p.String())
i.Type = ethernetCsmacd
if *deviations.InterfaceEnabled {
i.Enabled = ygot.Bool(true)
}
e := i.GetOrCreateEthernet()
e.AggregateId = ygot.String(tc.aggID)
}
func (tc *testCase) setupAggregateAtomically(t *testing.T) {
d := &telemetry.Device{}
if tc.lagType == lagTypeLACP {
d.GetOrCreateLacp().GetOrCreateInterface(tc.aggID)
}
agg := d.GetOrCreateInterface(tc.aggID)
agg.GetOrCreateAggregation().LagType = tc.lagType
agg.Type = ieee8023adLag
for _, port := range tc.dutPorts[1:] {
i := d.GetOrCreateInterface(port.Name())
i.GetOrCreateEthernet().AggregateId = ygot.String(tc.aggID)
i.Type = ethernetCsmacd
if *deviations.InterfaceEnabled {
i.Enabled = ygot.Bool(true)
}
}
p := tc.dut.Config()
fptest.LogYgot(t, fmt.Sprintf("%s to Update()", tc.dut), p, d)
p.Update(t, d)
}
func (tc *testCase) clearAggregate(t *testing.T) {
// Clear the aggregate minlink.
tc.dut.Config().Interface(tc.aggID).Aggregation().MinLinks().Delete(t)
// Clear the members of the aggregate.
for _, port := range tc.dutPorts[1:] {
tc.dut.Config().Interface(port.Name()).Ethernet().AggregateId().Delete(t)
}
}
func (tc *testCase) configureDUT(t *testing.T) {
t.Logf("dut ports = %v", tc.dutPorts)
if len(tc.dutPorts) < 2 {
t.Fatalf("Testbed requires at least 2 ports, got %d", len(tc.dutPorts))
}
d := tc.dut.Config()
if *deviations.AggregateAtomicUpdate {
tc.clearAggregate(t)
tc.setupAggregateAtomically(t)
}
lacp := &telemetry.Lacp_Interface{Name: ygot.String(tc.aggID)}
if tc.lagType == lagTypeLACP {
lacp.LacpMode = telemetry.Lacp_LacpActivityType_ACTIVE
} else {
lacp.LacpMode = telemetry.Lacp_LacpActivityType_UNSET
}
lacpPath := d.Lacp().Interface(tc.aggID)
fptest.LogYgot(t, "LACP", lacpPath, lacp)
lacpPath.Replace(t, lacp)
agg := &telemetry.Interface{Name: ygot.String(tc.aggID)}
tc.configDstAggregateDUT(agg, &dutDst)
aggPath := d.Interface(tc.aggID)
fptest.LogYgot(t, tc.aggID, aggPath, agg)
aggPath.Replace(t, agg)
srcp := tc.dutPorts[0]
srci := &telemetry.Interface{Name: ygot.String(srcp.Name())}
tc.configSrcDUT(srci, &dutSrc)
srci.Type = ethernetCsmacd
srciPath := d.Interface(srcp.Name())
fptest.LogYgot(t, srcp.String(), srciPath, srci)
srciPath.Replace(t, srci)
for _, port := range tc.dutPorts[1:] {
i := &telemetry.Interface{Name: ygot.String(port.Name())}
i.Type = ethernetCsmacd
if *deviations.InterfaceEnabled {
i.Enabled = ygot.Bool(true)
}
tc.configDstMemberDUT(i, port)
iPath := d.Interface(port.Name())
fptest.LogYgot(t, port.String(), iPath, i)
iPath.Replace(t, i)
}
}
func (tc *testCase) configureATE(t *testing.T) {
if len(tc.atePorts) < 2 {
t.Fatalf("Testbed requires at least 2 ports, got: %v", tc.atePorts)
}
p0 := tc.atePorts[0]
i0 := tc.top.AddInterface(ateSrc.Name).WithPort(p0)
i0.IPv4().
WithAddress(ateSrc.IPv4CIDR()).
WithDefaultGateway(dutSrc.IPv4)
i0.IPv6().
WithAddress(ateSrc.IPv6CIDR()).
WithDefaultGateway(dutSrc.IPv6)
// Don't use WithLACPEnabled which is for emulated Ixia LACP.
agg := tc.top.AddInterface(ateDst.Name)
lag := tc.top.AddLAG("lag").WithPorts(tc.atePorts[1:]...)
lag.LACP().WithEnabled(tc.lagType == lagTypeLACP)
agg.WithLAG(lag)
// Disable FEC for 100G-FR ports because Novus does not support it.
if p0.PMD() == ondatra.PMD100GFR {
i0.Ethernet().FEC().WithEnabled(false)
}
is100gfr := false
for _, p := range tc.atePorts[1:] {
if p.PMD() == ondatra.PMD100GFR {
is100gfr = true
}
}
if is100gfr {
agg.Ethernet().FEC().WithEnabled(false)
}
agg.IPv4().
WithAddress(ateDst.IPv4CIDR()).
WithDefaultGateway(dutDst.IPv4)
agg.IPv6().
WithAddress(ateDst.IPv6CIDR()).
WithDefaultGateway(dutDst.IPv6)
tc.top.Push(t).StartProtocols(t)
}
const (
ethernetCsmacd = telemetry.IETFInterfaces_InterfaceType_ethernetCsmacd
ieee8023adLag = telemetry.IETFInterfaces_InterfaceType_ieee8023adLag
adminUp = telemetry.Interface_AdminStatus_UP
opUp = telemetry.Interface_OperStatus_UP
opDown = telemetry.Interface_OperStatus_DOWN
full = telemetry.Ethernet_DuplexMode_FULL
dynamic = telemetry.IfIp_NeighborOrigin_DYNAMIC
)
func (tc *testCase) verifyLagID(t *testing.T, dp *ondatra.Port) {
dip := tc.dut.Telemetry().Interface(dp.Name())
di := dip.Get(t)
if lagID := di.GetEthernet().GetAggregateId(); lagID != tc.aggID {
t.Errorf("%s LagID got %v, want %v", dp, lagID, tc.aggID)
}
}
func (tc *testCase) verifyInterfaceDUT(t *testing.T, dp *ondatra.Port) {
dip := tc.dut.Telemetry().Interface(dp.Name())
di := dip.Get(t)
fptest.LogYgot(t, dp.String(), dip, di)
if got := di.GetAdminStatus(); got != adminUp {
t.Errorf("%s admin-status got %v, want %v", dp, got, adminUp)
}
if got := di.GetOperStatus(); got != opUp {
t.Errorf("%s oper-status got %v, want %v", dp, got, opUp)
}
}
func (tc *testCase) verifyDUT(t *testing.T) {
for n, port := range tc.dutPorts {
if n < 1 {
// We designate port 0 as the source link, not part of LAG.
t.Run("Source Link Verification", func(t *testing.T) {
tc.verifyInterfaceDUT(t, port)
})
continue
}
t.Run("Lag ports verification", func(t *testing.T) {
tc.verifyInterfaceDUT(t, port)
tc.verifyLagID(t, port)
})
}
// Verify LAG Type for aggregate interface
tc.dut.Telemetry().Interface(tc.aggID).Type().Await(t, 10*time.Second, ieee8023adLag)
}
// verifyATE checks the telemetry against the parameters set by
// configureDUT().
func (tc *testCase) verifyATE(t *testing.T) {
ap := tc.atePorts[0]
aip := tc.ate.Telemetry().Interface(ap.Name())
fptest.LogYgot(t, ap.String(), aip, aip.Get(t))
// State for the interface.
if got := aip.OperStatus().Get(t); got != opUp {
t.Errorf("%s oper-status got %v, want %v", ap, got, opUp)
}
}
// sortPorts sorts the ports by the testbed port ID.
func sortPorts(ports []*ondatra.Port) []*ondatra.Port {
sort.SliceStable(ports, func(i, j int) bool {
return ports[i].ID() < ports[j].ID()
})
return ports
}
func (tc *testCase) verifyMinLinks(t *testing.T) {
totalPorts := len(tc.dutPorts)
numLagPorts := totalPorts - 1
minLinks := uint16(numLagPorts - 1)
tc.dut.Config().Interface(tc.aggID).Aggregation().MinLinks().Replace(t, minLinks)
tests := []struct {
desc string
downCount int
want telemetry.E_Interface_OperStatus
}{
{
desc: "MinLink + 1",
downCount: 0,
want: opUp,
},
{
desc: "MinLink",
downCount: 1,
want: opUp,
},
{
desc: "MinLink - 1",
downCount: 2,
want: telemetry.Interface_OperStatus_LOWER_LAYER_DOWN,
},
}
for _, tf := range tests {
t.Run(tf.desc, func(t *testing.T) {
for _, port := range tc.atePorts[1 : 1+tf.downCount] {
tc.ate.Actions().NewSetPortState().WithPort(port).WithEnabled(false).Send(t)
// Linked DUT and ATE ports have the same ID.
dp := tc.dut.Port(t, port.ID())
dip := tc.dut.Telemetry().Interface(dp.Name())
t.Logf("Awaiting DUT port down: %v", dp)
dip.OperStatus().Await(t, time.Minute, opDown)
t.Log("Port is down.")
}
tc.dut.Telemetry().Interface(tc.aggID).OperStatus().Await(t, 1*time.Minute, tf.want)
})
}
}
func TestNegotiation(t *testing.T) {
dut := ondatra.DUT(t, "dut")
ate := ondatra.ATE(t, "ate")
aggID := netutil.NextBundleInterface(t, dut)
lagTypes := []telemetry.E_IfAggregate_AggregationType{lagTypeLACP, lagTypeSTATIC}
for _, lagType := range lagTypes {
top := ate.Topology().New()
tc := &testCase{
minlinks: uint16(len(dut.Ports()) / 2),
dut: dut,
ate: ate,
top: top,
lagType: lagType,
dutPorts: sortPorts(dut.Ports()),
atePorts: sortPorts(ate.Ports()),
aggID: aggID,
l3header: []ondatra.Header{ondatra.NewIPv4Header()},
}
t.Run(fmt.Sprintf("LagType=%s", lagType), func(t *testing.T) {
tc.configureDUT(t)
tc.configureATE(t)
t.Run("VerifyATE", tc.verifyATE)
t.Run("VerifyDUT", tc.verifyDUT)
t.Run("MinLinks", tc.verifyMinLinks)
})
}
}
aggregate_test verify DUT oper-status before configure ATE (#400)
* aggregate_test verify DUT oper-status before configure ATE
* Clarify LogYgot on di but remove the di check.
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rt_5_2_aggregate_test
import (
"fmt"
"sort"
"testing"
"time"
"github.com/openconfig/featureprofiles/internal/attrs"
"github.com/openconfig/featureprofiles/internal/deviations"
"github.com/openconfig/featureprofiles/internal/fptest"
"github.com/openconfig/ondatra"
"github.com/openconfig/ondatra/netutil"
"github.com/openconfig/ondatra/telemetry"
"github.com/openconfig/ygot/ygot"
)
func TestMain(m *testing.M) {
fptest.RunTests(m)
}
// Settings for configuring the aggregate testbed with the test
// topology. IxNetwork flow requires both source and destination
// networks be configured on the ATE. It is not possible to send
// packets to the ether.
//
// The testbed consists of ate:port1 -> dut:port1 and dut:port{2-9} ->
// ate:port{2-9}. The first pair is called the "source" pair, and the
// second aggregate link the "destination" pair.
//
// - Source: ate:port1 -> dut:port1 subnet 192.0.2.0/30 2001:db8::0/126
// - Destination: dut:port{2-9} -> ate:port{2-9}
// subnet 192.0.2.4/30 2001:db8::4/126
//
// Note that the first (.0, .4) and last (.3, .7) IPv4 addresses are
// reserved from the subnet for broadcast, so a /30 leaves exactly 2
// usable addresses. This does not apply to IPv6 which allows /127
// for point to point links, but we use /126 so the numbering is
// consistent with IPv4.
//
// A traffic flow is configured from ate:port1 as source and ate:port{2-9}
// as destination.
const (
plen4 = 30
plen6 = 126
)
var (
dutSrc = attrs.Attributes{
Desc: "dutsrc",
IPv4: "192.0.2.1",
IPv6: "2001:db8::1",
IPv4Len: plen4,
IPv6Len: plen6,
}
ateSrc = attrs.Attributes{
Name: "atesrc",
IPv4: "192.0.2.2",
IPv6: "2001:db8::2",
IPv4Len: plen4,
IPv6Len: plen6,
}
dutDst = attrs.Attributes{
Desc: "dutdst",
IPv4: "192.0.2.5",
IPv6: "2001:db8::5",
IPv4Len: plen4,
IPv6Len: plen6,
}
ateDst = attrs.Attributes{
Name: "atedst",
IPv4: "192.0.2.6",
IPv6: "2001:db8::6",
IPv4Len: plen4,
IPv6Len: plen6,
}
)
const (
lagTypeLACP = telemetry.IfAggregate_AggregationType_LACP
lagTypeSTATIC = telemetry.IfAggregate_AggregationType_STATIC
)
type testCase struct {
lagType telemetry.E_IfAggregate_AggregationType
dut *ondatra.DUTDevice
ate *ondatra.ATEDevice
top *ondatra.ATETopology
dutPorts []*ondatra.Port
atePorts []*ondatra.Port
aggID string
l3header []ondatra.Header
}
func (*testCase) configSrcDUT(i *telemetry.Interface, a *attrs.Attributes) {
i.Description = ygot.String(a.Desc)
if *deviations.InterfaceEnabled {
i.Enabled = ygot.Bool(true)
}
s := i.GetOrCreateSubinterface(0)
s4 := s.GetOrCreateIpv4()
if *deviations.InterfaceEnabled {
s4.Enabled = ygot.Bool(true)
}
a4 := s4.GetOrCreateAddress(a.IPv4)
a4.PrefixLength = ygot.Uint8(plen4)
s6 := s.GetOrCreateIpv6()
if *deviations.InterfaceEnabled {
s6.Enabled = ygot.Bool(true)
}
s6.GetOrCreateAddress(a.IPv6).PrefixLength = ygot.Uint8(plen6)
}
func (tc *testCase) configDstAggregateDUT(i *telemetry.Interface, a *attrs.Attributes) {
tc.configSrcDUT(i, a)
i.Type = ieee8023adLag
g := i.GetOrCreateAggregation()
g.LagType = tc.lagType
}
func (tc *testCase) configDstMemberDUT(i *telemetry.Interface, p *ondatra.Port) {
i.Description = ygot.String(p.String())
i.Type = ethernetCsmacd
if *deviations.InterfaceEnabled {
i.Enabled = ygot.Bool(true)
}
e := i.GetOrCreateEthernet()
e.AggregateId = ygot.String(tc.aggID)
}
func (tc *testCase) setupAggregateAtomically(t *testing.T) {
d := &telemetry.Device{}
if tc.lagType == lagTypeLACP {
d.GetOrCreateLacp().GetOrCreateInterface(tc.aggID)
}
agg := d.GetOrCreateInterface(tc.aggID)
agg.GetOrCreateAggregation().LagType = tc.lagType
agg.Type = ieee8023adLag
for _, port := range tc.dutPorts[1:] {
i := d.GetOrCreateInterface(port.Name())
i.GetOrCreateEthernet().AggregateId = ygot.String(tc.aggID)
i.Type = ethernetCsmacd
if *deviations.InterfaceEnabled {
i.Enabled = ygot.Bool(true)
}
}
p := tc.dut.Config()
fptest.LogYgot(t, fmt.Sprintf("%s to Update()", tc.dut), p, d)
p.Update(t, d)
}
func (tc *testCase) clearAggregate(t *testing.T) {
// Clear the aggregate minlink.
tc.dut.Config().Interface(tc.aggID).Aggregation().MinLinks().Delete(t)
// Clear the members of the aggregate.
for _, port := range tc.dutPorts[1:] {
tc.dut.Config().Interface(port.Name()).Ethernet().AggregateId().Delete(t)
}
}
func (tc *testCase) configureDUT(t *testing.T) {
t.Logf("dut ports = %v", tc.dutPorts)
if len(tc.dutPorts) < 2 {
t.Fatalf("Testbed requires at least 2 ports, got %d", len(tc.dutPorts))
}
d := tc.dut.Config()
if *deviations.AggregateAtomicUpdate {
tc.clearAggregate(t)
tc.setupAggregateAtomically(t)
}
lacp := &telemetry.Lacp_Interface{Name: ygot.String(tc.aggID)}
if tc.lagType == lagTypeLACP {
lacp.LacpMode = telemetry.Lacp_LacpActivityType_ACTIVE
} else {
lacp.LacpMode = telemetry.Lacp_LacpActivityType_UNSET
}
lacpPath := d.Lacp().Interface(tc.aggID)
fptest.LogYgot(t, "LACP", lacpPath, lacp)
lacpPath.Replace(t, lacp)
agg := &telemetry.Interface{Name: ygot.String(tc.aggID)}
tc.configDstAggregateDUT(agg, &dutDst)
aggPath := d.Interface(tc.aggID)
fptest.LogYgot(t, tc.aggID, aggPath, agg)
aggPath.Replace(t, agg)
srcp := tc.dutPorts[0]
srci := &telemetry.Interface{Name: ygot.String(srcp.Name())}
tc.configSrcDUT(srci, &dutSrc)
srci.Type = ethernetCsmacd
srciPath := d.Interface(srcp.Name())
fptest.LogYgot(t, srcp.String(), srciPath, srci)
srciPath.Replace(t, srci)
for _, port := range tc.dutPorts[1:] {
i := &telemetry.Interface{Name: ygot.String(port.Name())}
i.Type = ethernetCsmacd
if *deviations.InterfaceEnabled {
i.Enabled = ygot.Bool(true)
}
tc.configDstMemberDUT(i, port)
iPath := d.Interface(port.Name())
fptest.LogYgot(t, port.String(), iPath, i)
iPath.Replace(t, i)
}
}
func (tc *testCase) configureATE(t *testing.T) {
if len(tc.atePorts) < 2 {
t.Fatalf("Testbed requires at least 2 ports, got: %v", tc.atePorts)
}
p0 := tc.atePorts[0]
i0 := tc.top.AddInterface(ateSrc.Name).WithPort(p0)
i0.IPv4().
WithAddress(ateSrc.IPv4CIDR()).
WithDefaultGateway(dutSrc.IPv4)
i0.IPv6().
WithAddress(ateSrc.IPv6CIDR()).
WithDefaultGateway(dutSrc.IPv6)
// Don't use WithLACPEnabled which is for emulated Ixia LACP.
agg := tc.top.AddInterface(ateDst.Name)
lag := tc.top.AddLAG("lag").WithPorts(tc.atePorts[1:]...)
lag.LACP().WithEnabled(tc.lagType == lagTypeLACP)
agg.WithLAG(lag)
// Disable FEC for 100G-FR ports because Novus does not support it.
if p0.PMD() == ondatra.PMD100GFR {
i0.Ethernet().FEC().WithEnabled(false)
}
is100gfr := false
for _, p := range tc.atePorts[1:] {
if p.PMD() == ondatra.PMD100GFR {
is100gfr = true
}
}
if is100gfr {
agg.Ethernet().FEC().WithEnabled(false)
}
agg.IPv4().
WithAddress(ateDst.IPv4CIDR()).
WithDefaultGateway(dutDst.IPv4)
agg.IPv6().
WithAddress(ateDst.IPv6CIDR()).
WithDefaultGateway(dutDst.IPv6)
tc.top.Push(t).StartProtocols(t)
}
const (
ethernetCsmacd = telemetry.IETFInterfaces_InterfaceType_ethernetCsmacd
ieee8023adLag = telemetry.IETFInterfaces_InterfaceType_ieee8023adLag
adminUp = telemetry.Interface_AdminStatus_UP
opUp = telemetry.Interface_OperStatus_UP
opDown = telemetry.Interface_OperStatus_DOWN
full = telemetry.Ethernet_DuplexMode_FULL
dynamic = telemetry.IfIp_NeighborOrigin_DYNAMIC
)
func (tc *testCase) verifyAggID(t *testing.T, dp *ondatra.Port) {
dip := tc.dut.Telemetry().Interface(dp.Name())
di := dip.Get(t)
if lagID := di.GetEthernet().GetAggregateId(); lagID != tc.aggID {
t.Errorf("%s LagID got %v, want %v", dp, lagID, tc.aggID)
}
}
func (tc *testCase) verifyInterfaceDUT(t *testing.T, dp *ondatra.Port) {
dip := tc.dut.Telemetry().Interface(dp.Name())
di := dip.Get(t)
fptest.LogYgot(t, dp.String()+" before Await", dip, di)
if got := di.GetAdminStatus(); got != adminUp {
t.Errorf("%s admin-status got %v, want %v", dp, got, adminUp)
}
// LAG members may fall behind, so wait for them to be up.
dip.OperStatus().Await(t, time.Minute, opUp)
}
func (tc *testCase) verifyDUT(t *testing.T) {
// Wait for LAG negotiation and verify LAG type for the aggregate interface.
tc.dut.Telemetry().Interface(tc.aggID).Type().Await(t, time.Minute, ieee8023adLag)
for n, port := range tc.dutPorts {
if n < 1 {
// We designate port 0 as the source link, not part of LAG.
t.Run(fmt.Sprintf("%s [source]", port.ID()), func(t *testing.T) {
tc.verifyInterfaceDUT(t, port)
})
continue
}
t.Run(fmt.Sprintf("%s [member]", port.ID()), func(t *testing.T) {
tc.verifyInterfaceDUT(t, port)
tc.verifyAggID(t, port)
})
}
}
// verifyATE checks the telemetry against the parameters set by
// configureDUT().
func (tc *testCase) verifyATE(t *testing.T) {
ap := tc.atePorts[0]
aip := tc.ate.Telemetry().Interface(ap.Name())
fptest.LogYgot(t, ap.String(), aip, aip.Get(t))
// State for the interface.
if got := aip.OperStatus().Get(t); got != opUp {
t.Errorf("%s oper-status got %v, want %v", ap, got, opUp)
}
}
// sortPorts sorts the ports by the testbed port ID.
func sortPorts(ports []*ondatra.Port) []*ondatra.Port {
sort.SliceStable(ports, func(i, j int) bool {
return ports[i].ID() < ports[j].ID()
})
return ports
}
func (tc *testCase) verifyMinLinks(t *testing.T) {
totalPorts := len(tc.dutPorts)
numLagPorts := totalPorts - 1
minLinks := uint16(numLagPorts - 1)
tc.dut.Config().Interface(tc.aggID).Aggregation().MinLinks().Replace(t, minLinks)
tests := []struct {
desc string
downCount int
want telemetry.E_Interface_OperStatus
}{
{
desc: "MinLink + 1",
downCount: 0,
want: opUp,
},
{
desc: "MinLink",
downCount: 1,
want: opUp,
},
{
desc: "MinLink - 1",
downCount: 2,
want: telemetry.Interface_OperStatus_LOWER_LAYER_DOWN,
},
}
for _, tf := range tests {
t.Run(tf.desc, func(t *testing.T) {
for _, port := range tc.atePorts[1 : 1+tf.downCount] {
tc.ate.Actions().NewSetPortState().WithPort(port).WithEnabled(false).Send(t)
// Linked DUT and ATE ports have the same ID.
dp := tc.dut.Port(t, port.ID())
dip := tc.dut.Telemetry().Interface(dp.Name())
t.Logf("Awaiting DUT port down: %v", dp)
dip.OperStatus().Await(t, time.Minute, opDown)
t.Log("Port is down.")
}
tc.dut.Telemetry().Interface(tc.aggID).OperStatus().Await(t, 1*time.Minute, tf.want)
})
}
// Bring the ATE ports back up.
for _, port := range tc.atePorts {
tc.ate.Actions().NewSetPortState().WithPort(port).WithEnabled(true).Send(t)
}
}
func TestNegotiation(t *testing.T) {
dut := ondatra.DUT(t, "dut")
ate := ondatra.ATE(t, "ate")
aggID := netutil.NextBundleInterface(t, dut)
lagTypes := []telemetry.E_IfAggregate_AggregationType{lagTypeLACP, lagTypeSTATIC}
for _, lagType := range lagTypes {
top := ate.Topology().New()
tc := &testCase{
dut: dut,
ate: ate,
top: top,
lagType: lagType,
dutPorts: sortPorts(dut.Ports()),
atePorts: sortPorts(ate.Ports()),
aggID: aggID,
l3header: []ondatra.Header{ondatra.NewIPv4Header()},
}
t.Run(fmt.Sprintf("LagType=%s", lagType), func(t *testing.T) {
tc.configureDUT(t)
t.Run("VerifyDUT", tc.verifyDUT)
tc.configureATE(t)
t.Run("VerifyATE", tc.verifyATE)
t.Run("MinLinks", tc.verifyMinLinks)
})
}
}
|
// Package dht implements a distributed hash table that satisfies the ipfs routing
// interface. This DHT is modeled after kademlia with Coral and S/Kademlia modifications.
package dht
import (
"crypto/rand"
"fmt"
"sync"
"time"
peer "github.com/jbenet/go-ipfs/p2p/peer"
routing "github.com/jbenet/go-ipfs/routing"
u "github.com/jbenet/go-ipfs/util"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
goprocess "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
)
// DefaultBootstrapQueries specifies how many queries to run,
// if the user does not specify a different number as an option.
//
// For now, this is set to 16 queries, which is an aggressive number.
// We are currently more interested in ensuring we have a properly formed
// DHT than making sure our dht minimizes traffic. Once we are more certain
// of our implementation's robustness, we should lower this down to 8 or 4.
//
// Note there is also a tradeoff between the bootstrap period and the number
// of queries. We could support a higher period with a smaller number of
// queries
const DefaultBootstrapQueries = 16
// DefaultBootstrapPeriod specifies how often to periodically run bootstrap,
// if the user does not specify a different number as an option.
//
// For now, this is set to 10 seconds, which is an aggressive period. We are
// We are currently more interested in ensuring we have a properly formed
// DHT than making sure our dht minimizes traffic. Once we are more certain
// implementation's robustness, we should lower this down to 30s or 1m.
//
// Note there is also a tradeoff between the bootstrap period and the number
// of queries. We could support a higher period with a smaller number of
// queries
const DefaultBootstrapPeriod = time.Duration(10 * time.Second)
// Bootstrap runs bootstrapping once, then calls SignalBootstrap with default
// parameters: DefaultBootstrapQueries and DefaultBootstrapPeriod. This allows
// the user to catch an error off the bat if the connections are faulty. It also
// allows BootstrapOnSignal not to run bootstrap at the beginning, which is useful
// for instrumenting it on tests, or delaying bootstrap until the network is online
// and connected to at least a few nodes.
//
// Like PeriodicBootstrap, Bootstrap returns a process, so the user can stop it.
func (dht *IpfsDHT) Bootstrap() (goprocess.Process, error) {
if err := dht.runBootstrap(dht.Context(), DefaultBootstrapQueries); err != nil {
return nil, err
}
sig := time.Tick(DefaultBootstrapPeriod)
return dht.BootstrapOnSignal(DefaultBootstrapQueries, sig)
}
// SignalBootstrap ensures the dht routing table remains healthy as peers come and go.
// it builds up a list of peers by requesting random peer IDs. The Bootstrap
// process will run a number of queries each time, and run every time signal fires.
// These parameters are configurable.
//
// SignalBootstrap returns a process, so the user can stop it.
func (dht *IpfsDHT) BootstrapOnSignal(queries int, signal <-chan time.Time) (goprocess.Process, error) {
if queries <= 0 {
return nil, fmt.Errorf("invalid number of queries: %d", queries)
}
if signal == nil {
return nil, fmt.Errorf("invalid signal: %v", signal)
}
proc := goprocess.Go(func(worker goprocess.Process) {
for {
select {
case <-worker.Closing():
log.Debug("dht bootstrapper shutting down")
return
case <-signal:
// it would be useful to be able to send out signals of when we bootstrap, too...
// maybe this is a good case for whole module event pub/sub?
ctx := dht.Context()
if err := dht.runBootstrap(ctx, queries); err != nil {
log.Error(err)
// A bootstrapping error is important to notice but not fatal.
// maybe the client should be able to consume these errors,
// though I dont have a clear use case in mind-- what **could**
// the client do if one of the bootstrap calls fails?
//
// This is also related to the core's bootstrap failures.
// superviseConnections should perhaps allow clients to detect
// bootstrapping problems.
//
// Anyway, passing errors could be done with a bootstrapper object.
// this would imply the client should be able to consume a lot of
// other non-fatal dht errors too. providing this functionality
// should be done correctly DHT-wide.
// NB: whatever the design, clients must ensure they drain errors!
// This pattern is common to many things, perhaps long-running services
// should have something like an ErrStream that allows clients to consume
// periodic errors and take action. It should allow the user to also
// ignore all errors with something like an ErrStreamDiscard. We should
// study what other systems do for ideas.
}
}
}
})
return proc, nil
}
// runBootstrap builds up list of peers by requesting random peer IDs
func (dht *IpfsDHT) runBootstrap(ctx context.Context, queries int) error {
var merr u.MultiErr
randomID := func() peer.ID {
// 16 random bytes is not a valid peer id. it may be fine becuase
// the dht will rehash to its own keyspace anyway.
id := make([]byte, 16)
rand.Read(id)
return peer.ID(id)
}
// bootstrap sequentially, as results will compound
runQuery := func(ctx context.Context, id peer.ID) {
p, err := dht.FindPeer(ctx, id)
if err == routing.ErrNotFound {
// this isn't an error. this is precisely what we expect.
} else if err != nil {
merr = append(merr, err)
} else {
// woah, actually found a peer with that ID? this shouldn't happen normally
// (as the ID we use is not a real ID). this is an odd error worth logging.
err := fmt.Errorf("Bootstrap peer error: Actually FOUND peer. (%s, %s)", id, p)
log.Errorf("%s", err)
merr = append(merr, err)
}
}
sequential := true
if sequential {
// these should be parallel normally. but can make them sequential for debugging.
// note that the core/bootstrap context deadline should be extended too for that.
for i := 0; i < queries; i++ {
id := randomID()
log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, queries, id)
runQuery(ctx, id)
}
} else {
// note on parallelism here: the context is passed in to the queries, so they
// **should** exit when it exceeds, making this function exit on ctx cancel.
// normally, we should be selecting on ctx.Done() here too, but this gets
// complicated to do with WaitGroup, and doesnt wait for the children to exit.
var wg sync.WaitGroup
for i := 0; i < queries; i++ {
wg.Add(1)
go func() {
defer wg.Done()
id := randomID()
log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, queries, id)
runQuery(ctx, id)
}()
}
wg.Wait()
}
if len(merr) > 0 {
return merr
}
return nil
}
try less aggressive bootstrap
// Package dht implements a distributed hash table that satisfies the ipfs routing
// interface. This DHT is modeled after kademlia with Coral and S/Kademlia modifications.
package dht
import (
"crypto/rand"
"fmt"
"sync"
"time"
peer "github.com/jbenet/go-ipfs/p2p/peer"
routing "github.com/jbenet/go-ipfs/routing"
u "github.com/jbenet/go-ipfs/util"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
goprocess "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
)
// DefaultBootstrapQueries specifies how many queries to run,
// if the user does not specify a different number as an option.
//
// For now, this is set to 16 queries, which is an aggressive number.
// We are currently more interested in ensuring we have a properly formed
// DHT than making sure our dht minimizes traffic. Once we are more certain
// of our implementation's robustness, we should lower this down to 8 or 4.
//
// Note there is also a tradeoff between the bootstrap period and the number
// of queries. We could support a higher period with a smaller number of
// queries
const DefaultBootstrapQueries = 1
// DefaultBootstrapPeriod specifies how often to periodically run bootstrap,
// if the user does not specify a different number as an option.
//
// For now, this is set to 10 seconds, which is an aggressive period. We are
// We are currently more interested in ensuring we have a properly formed
// DHT than making sure our dht minimizes traffic. Once we are more certain
// implementation's robustness, we should lower this down to 30s or 1m.
//
// Note there is also a tradeoff between the bootstrap period and the number
// of queries. We could support a higher period with a smaller number of
// queries
const DefaultBootstrapPeriod = time.Duration(10 * time.Second)
// Bootstrap runs bootstrapping once, then calls SignalBootstrap with default
// parameters: DefaultBootstrapQueries and DefaultBootstrapPeriod. This allows
// the user to catch an error off the bat if the connections are faulty. It also
// allows BootstrapOnSignal not to run bootstrap at the beginning, which is useful
// for instrumenting it on tests, or delaying bootstrap until the network is online
// and connected to at least a few nodes.
//
// Like PeriodicBootstrap, Bootstrap returns a process, so the user can stop it.
func (dht *IpfsDHT) Bootstrap() (goprocess.Process, error) {
if err := dht.runBootstrap(dht.Context(), DefaultBootstrapQueries); err != nil {
return nil, err
}
sig := time.Tick(DefaultBootstrapPeriod)
return dht.BootstrapOnSignal(DefaultBootstrapQueries, sig)
}
// SignalBootstrap ensures the dht routing table remains healthy as peers come and go.
// it builds up a list of peers by requesting random peer IDs. The Bootstrap
// process will run a number of queries each time, and run every time signal fires.
// These parameters are configurable.
//
// SignalBootstrap returns a process, so the user can stop it.
func (dht *IpfsDHT) BootstrapOnSignal(queries int, signal <-chan time.Time) (goprocess.Process, error) {
if queries <= 0 {
return nil, fmt.Errorf("invalid number of queries: %d", queries)
}
if signal == nil {
return nil, fmt.Errorf("invalid signal: %v", signal)
}
proc := goprocess.Go(func(worker goprocess.Process) {
for {
select {
case <-worker.Closing():
log.Debug("dht bootstrapper shutting down")
return
case <-signal:
// it would be useful to be able to send out signals of when we bootstrap, too...
// maybe this is a good case for whole module event pub/sub?
ctx := dht.Context()
if err := dht.runBootstrap(ctx, queries); err != nil {
log.Error(err)
// A bootstrapping error is important to notice but not fatal.
// maybe the client should be able to consume these errors,
// though I dont have a clear use case in mind-- what **could**
// the client do if one of the bootstrap calls fails?
//
// This is also related to the core's bootstrap failures.
// superviseConnections should perhaps allow clients to detect
// bootstrapping problems.
//
// Anyway, passing errors could be done with a bootstrapper object.
// this would imply the client should be able to consume a lot of
// other non-fatal dht errors too. providing this functionality
// should be done correctly DHT-wide.
// NB: whatever the design, clients must ensure they drain errors!
// This pattern is common to many things, perhaps long-running services
// should have something like an ErrStream that allows clients to consume
// periodic errors and take action. It should allow the user to also
// ignore all errors with something like an ErrStreamDiscard. We should
// study what other systems do for ideas.
}
}
}
})
return proc, nil
}
// runBootstrap builds up list of peers by requesting random peer IDs
func (dht *IpfsDHT) runBootstrap(ctx context.Context, queries int) error {
var merr u.MultiErr
randomID := func() peer.ID {
// 16 random bytes is not a valid peer id. it may be fine becuase
// the dht will rehash to its own keyspace anyway.
id := make([]byte, 16)
rand.Read(id)
return peer.ID(id)
}
// bootstrap sequentially, as results will compound
runQuery := func(ctx context.Context, id peer.ID) {
p, err := dht.FindPeer(ctx, id)
if err == routing.ErrNotFound {
// this isn't an error. this is precisely what we expect.
} else if err != nil {
merr = append(merr, err)
} else {
// woah, actually found a peer with that ID? this shouldn't happen normally
// (as the ID we use is not a real ID). this is an odd error worth logging.
err := fmt.Errorf("Bootstrap peer error: Actually FOUND peer. (%s, %s)", id, p)
log.Errorf("%s", err)
merr = append(merr, err)
}
}
sequential := true
if sequential {
// these should be parallel normally. but can make them sequential for debugging.
// note that the core/bootstrap context deadline should be extended too for that.
for i := 0; i < queries; i++ {
id := randomID()
log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, queries, id)
runQuery(ctx, id)
}
} else {
// note on parallelism here: the context is passed in to the queries, so they
// **should** exit when it exceeds, making this function exit on ctx cancel.
// normally, we should be selecting on ctx.Done() here too, but this gets
// complicated to do with WaitGroup, and doesnt wait for the children to exit.
var wg sync.WaitGroup
for i := 0; i < queries; i++ {
wg.Add(1)
go func() {
defer wg.Done()
id := randomID()
log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, queries, id)
runQuery(ctx, id)
}()
}
wg.Wait()
}
if len(merr) > 0 {
return merr
}
return nil
}
|
package cpu6502
import "fmt"
type opcode byte
type Instruction struct {
Name string
Imm opcode
ZP, ZPX, ZPY opcode
ABS, ABSX, ABSY opcode
IND, INDX, INDY opcode
SNGL, BRA opcode
}
var Optable map[opcode]*Op
type Op struct {
F func(*Cpu, byte)
Name string
Mode mode
}
type mode int
const (
MODE_IMM mode = iota
MODE_ZP
MODE_ZPX
MODE_ZPY
MODE_ABS
MODE_ABSX
MODE_ABSY
MODE_IND
MODE_INDX
MODE_INDY
MODE_SNGL
MODE_BRA
)
type Cpu struct {
A, X, Y, S, P byte
PC uint16
Mem [0xffff]byte
}
func (c *Cpu) Step() {
inst := c.Mem[c.PC]
c.PC++
o, ok := Optable[opcode(inst)]
if !ok {
return
}
var v byte
switch o.Mode {
case MODE_IMM:
v = c.Mem[c.PC]
c.PC++
default:
panic("6502: bad address mode")
}
o.F(c, v)
}
func (c *Cpu) setNV(v byte) {
if v != 0 {
c.P &= 0xfd
} else {
c.P |= 0x02
}
if v&0x80 != 0 {
c.P |= 0x80
} else {
c.P &= 0x7f
}
}
func (c *Cpu) SEC() { c.P |= P_C }
func (c *Cpu) CLC() { c.P &= 0xfe }
func (c *Cpu) SEV() { c.P |= P_V }
func (c *Cpu) CLV() { c.P &= 0xbf }
func (c *Cpu) C() bool { return c.p(P_C) }
func (c *Cpu) V() bool { return c.p(P_V) }
func (c *Cpu) p(v byte) bool { return c.P&v != 0 }
const (
P_C = 0x01
P_V = 0x40
)
func (c *Cpu) Print() {
const f = "%2s: %5d 0x%04[2]X %016[2]b\n"
fmt.Printf(f, "A", c.A)
fmt.Printf(f, "X", c.X)
fmt.Printf(f, "Y", c.Y)
fmt.Printf(f, "P", c.P)
fmt.Printf(f, "PC", c.PC)
}
func init() {
Optable = make(map[opcode]*Op)
for _, inst := range Opcodes {
if inst.Imm != null {
Optable[inst.Imm] = &Op{
F: Functions[inst.Name],
Name: inst.Name,
Mode: MODE_IMM,
}
}
}
}
var Functions = map[string]func(*Cpu, byte){
"ADC": func(c *Cpu, v byte) {
if (c.A^v)&0x80 != 0 {
c.CLV()
} else {
c.SEV()
}
a := int(c.A) + int(v)
if c.C() {
a++
}
if a > 0xff {
c.SEC()
if c.V() && a >= 0x180 {
c.CLV()
}
} else {
c.CLC()
if c.V() && a < 0x80 {
c.CLV()
}
}
c.A = byte(a & 0xff)
c.setNV(c.A)
},
}
const null = 0
var Opcodes = []Instruction{
/* Name, Imm, ZP, ZPX, ZPY, ABS, ABSX, ABSY, IND, INDX, INDY, SNGL, BRA */
{"ADC", 0x69, 0x65, 0x75, null, 0x6d, 0x7d, 0x79, null, 0x61, 0x71, null, null},
{"AND", 0x29, 0x25, 0x35, null, 0x2d, 0x3d, 0x39, null, 0x21, 0x31, null, null},
{"ASL", null, 0x06, 0x16, null, 0x0e, 0x1e, null, null, null, null, 0x0a, null},
{"BCC", null, null, null, null, null, null, null, null, null, null, null, 0x90},
{"BCS", null, null, null, null, null, null, null, null, null, null, null, 0xb0},
{"BEQ", null, null, null, null, null, null, null, null, null, null, null, 0xf0},
{"BIT", null, 0x24, null, null, 0x2c, null, null, null, null, null, null, null},
{"BMI", null, null, null, null, null, null, null, null, null, null, null, 0x30},
{"BNE", null, null, null, null, null, null, null, null, null, null, null, 0xd0},
{"BPL", null, null, null, null, null, null, null, null, null, null, null, 0x10},
{"BRK", null, null, null, null, null, null, null, null, null, null, 0x00, null},
{"BVC", null, null, null, null, null, null, null, null, null, null, null, 0x50},
{"BVS", null, null, null, null, null, null, null, null, null, null, null, 0x70},
{"CLC", null, null, null, null, null, null, null, null, null, null, 0x18, null},
{"CLD", null, null, null, null, null, null, null, null, null, null, 0xd8, null},
{"CLI", null, null, null, null, null, null, null, null, null, null, 0x58, null},
{"CLV", null, null, null, null, null, null, null, null, null, null, 0xb8, null},
{"CMP", 0xc9, 0xc5, 0xd5, null, 0xcd, 0xdd, 0xd9, null, 0xc1, 0xd1, null, null},
{"CPX", 0xe0, 0xe4, null, null, 0xec, null, null, null, null, null, null, null},
{"CPY", 0xc0, 0xc4, null, null, 0xcc, null, null, null, null, null, null, null},
{"DEC", null, 0xc6, 0xd6, null, 0xce, 0xde, null, null, null, null, null, null},
{"DEX", null, null, null, null, null, null, null, null, null, null, 0xca, null},
{"DEY", null, null, null, null, null, null, null, null, null, null, 0x88, null},
{"EOR", 0x49, 0x45, 0x55, null, 0x4d, 0x5d, 0x59, null, 0x41, 0x51, null, null},
{"INC", null, 0xe6, 0xf6, null, 0xee, 0xfe, null, null, null, null, null, null},
{"INX", null, null, null, null, null, null, null, null, null, null, 0xe8, null},
{"INY", null, null, null, null, null, null, null, null, null, null, 0xc8, null},
{"JMP", null, null, null, null, 0x4c, null, null, 0x6c, null, null, null, null},
{"JSR", null, null, null, null, 0x20, null, null, null, null, null, null, null},
{"LDA", 0xa9, 0xa5, 0xb5, null, 0xad, 0xbd, 0xb9, null, 0xa1, 0xb1, null, null},
{"LDX", 0xa2, 0xa6, null, 0xb6, 0xae, null, 0xbe, null, null, null, null, null},
{"LDY", 0xa0, 0xa4, 0xb4, null, 0xac, 0xbc, null, null, null, null, null, null},
{"LSR", null, 0x46, 0x56, null, 0x4e, 0x5e, null, null, null, null, 0x4a, null},
{"NOP", null, null, null, null, null, null, null, null, null, null, 0xea, null},
{"ORA", 0x09, 0x05, 0x15, null, 0x0d, 0x1d, 0x19, null, 0x01, 0x11, null, null},
{"PHA", null, null, null, null, null, null, null, null, null, null, 0x48, null},
{"PHP", null, null, null, null, null, null, null, null, null, null, 0x08, null},
{"PLA", null, null, null, null, null, null, null, null, null, null, 0x68, null},
{"PLP", null, null, null, null, null, null, null, null, null, null, 0x28, null},
{"ROL", null, 0x26, 0x36, null, 0x2e, 0x3e, null, null, null, null, 0x2a, null},
{"ROR", null, 0x66, 0x76, null, 0x6e, 0x7e, null, null, null, null, 0x6a, null},
{"RTI", null, null, null, null, null, null, null, null, null, null, 0x40, null},
{"RTS", null, null, null, null, null, null, null, null, null, null, 0x60, null},
{"SBC", 0xe9, 0xe5, 0xf5, null, 0xed, 0xfd, 0xf9, null, 0xe1, 0xf1, null, null},
{"SEC", null, null, null, null, null, null, null, null, null, null, 0x38, null},
{"SED", null, null, null, null, null, null, null, null, null, null, 0xf8, null},
{"SEI", null, null, null, null, null, null, null, null, null, null, 0x78, null},
{"STA", null, 0x85, 0x95, null, 0x8d, 0x9d, 0x99, null, 0x81, 0x91, null, null},
{"STX", null, 0x86, null, 0x96, 0x8e, null, null, null, null, null, null, null},
{"STY", null, 0x84, 0x94, null, 0x8c, null, null, null, null, null, null, null},
{"TAX", null, null, null, null, null, null, null, null, null, null, 0xaa, null},
{"TAY", null, null, null, null, null, null, null, null, null, null, 0xa8, null},
{"TSX", null, null, null, null, null, null, null, null, null, null, 0xba, null},
{"TXA", null, null, null, null, null, null, null, null, null, null, 0x8a, null},
{"TXS", null, null, null, null, null, null, null, null, null, null, 0x9a, null},
{"TYA", null, null, null, null, null, null, null, null, null, null, 0x98, null},
}
Use an array instead of a map
package cpu6502
import "fmt"
type Instruction struct {
Name string
Imm byte
ZP, ZPX, ZPY byte
ABS, ABSX, ABSY byte
IND, INDX, INDY byte
SNGL, BRA byte
}
var Optable [0xff]*Op
type Op struct {
F func(*Cpu, byte)
Name string
Mode mode
}
type mode int
const (
MODE_IMM mode = iota
MODE_ZP
MODE_ZPX
MODE_ZPY
MODE_ABS
MODE_ABSX
MODE_ABSY
MODE_IND
MODE_INDX
MODE_INDY
MODE_SNGL
MODE_BRA
)
type Cpu struct {
A, X, Y, S, P byte
PC uint16
Mem [0xffff]byte
}
func (c *Cpu) Step() {
inst := c.Mem[c.PC]
c.PC++
o := Optable[inst]
if o == nil {
return
}
var v byte
switch o.Mode {
case MODE_IMM:
v = c.Mem[c.PC]
c.PC++
default:
panic("6502: bad address mode")
}
o.F(c, v)
}
func (c *Cpu) setNV(v byte) {
if v != 0 {
c.P &= 0xfd
} else {
c.P |= 0x02
}
if v&0x80 != 0 {
c.P |= 0x80
} else {
c.P &= 0x7f
}
}
func (c *Cpu) SEC() { c.P |= P_C }
func (c *Cpu) CLC() { c.P &= 0xfe }
func (c *Cpu) SEV() { c.P |= P_V }
func (c *Cpu) CLV() { c.P &= 0xbf }
func (c *Cpu) C() bool { return c.p(P_C) }
func (c *Cpu) V() bool { return c.p(P_V) }
func (c *Cpu) p(v byte) bool { return c.P&v != 0 }
const (
P_C = 0x01
P_V = 0x40
)
func (c *Cpu) Print() {
const f = "%2s: %5d 0x%04[2]X %016[2]b\n"
fmt.Printf(f, "A", c.A)
fmt.Printf(f, "X", c.X)
fmt.Printf(f, "Y", c.Y)
fmt.Printf(f, "P", c.P)
fmt.Printf(f, "PC", c.PC)
}
func init() {
for _, inst := range Opcodes {
if inst.Imm != null {
Optable[inst.Imm] = &Op{
F: Functions[inst.Name],
Name: inst.Name,
Mode: MODE_IMM,
}
}
}
}
var Functions = map[string]func(*Cpu, byte){
"ADC": func(c *Cpu, v byte) {
if (c.A^v)&0x80 != 0 {
c.CLV()
} else {
c.SEV()
}
a := int(c.A) + int(v)
if c.C() {
a++
}
if a > 0xff {
c.SEC()
if c.V() && a >= 0x180 {
c.CLV()
}
} else {
c.CLC()
if c.V() && a < 0x80 {
c.CLV()
}
}
c.A = byte(a & 0xff)
c.setNV(c.A)
},
}
const null = 0
var Opcodes = []Instruction{
/* Name, Imm, ZP, ZPX, ZPY, ABS, ABSX, ABSY, IND, INDX, INDY, SNGL, BRA */
{"ADC", 0x69, 0x65, 0x75, null, 0x6d, 0x7d, 0x79, null, 0x61, 0x71, null, null},
{"AND", 0x29, 0x25, 0x35, null, 0x2d, 0x3d, 0x39, null, 0x21, 0x31, null, null},
{"ASL", null, 0x06, 0x16, null, 0x0e, 0x1e, null, null, null, null, 0x0a, null},
{"BCC", null, null, null, null, null, null, null, null, null, null, null, 0x90},
{"BCS", null, null, null, null, null, null, null, null, null, null, null, 0xb0},
{"BEQ", null, null, null, null, null, null, null, null, null, null, null, 0xf0},
{"BIT", null, 0x24, null, null, 0x2c, null, null, null, null, null, null, null},
{"BMI", null, null, null, null, null, null, null, null, null, null, null, 0x30},
{"BNE", null, null, null, null, null, null, null, null, null, null, null, 0xd0},
{"BPL", null, null, null, null, null, null, null, null, null, null, null, 0x10},
{"BRK", null, null, null, null, null, null, null, null, null, null, 0x00, null},
{"BVC", null, null, null, null, null, null, null, null, null, null, null, 0x50},
{"BVS", null, null, null, null, null, null, null, null, null, null, null, 0x70},
{"CLC", null, null, null, null, null, null, null, null, null, null, 0x18, null},
{"CLD", null, null, null, null, null, null, null, null, null, null, 0xd8, null},
{"CLI", null, null, null, null, null, null, null, null, null, null, 0x58, null},
{"CLV", null, null, null, null, null, null, null, null, null, null, 0xb8, null},
{"CMP", 0xc9, 0xc5, 0xd5, null, 0xcd, 0xdd, 0xd9, null, 0xc1, 0xd1, null, null},
{"CPX", 0xe0, 0xe4, null, null, 0xec, null, null, null, null, null, null, null},
{"CPY", 0xc0, 0xc4, null, null, 0xcc, null, null, null, null, null, null, null},
{"DEC", null, 0xc6, 0xd6, null, 0xce, 0xde, null, null, null, null, null, null},
{"DEX", null, null, null, null, null, null, null, null, null, null, 0xca, null},
{"DEY", null, null, null, null, null, null, null, null, null, null, 0x88, null},
{"EOR", 0x49, 0x45, 0x55, null, 0x4d, 0x5d, 0x59, null, 0x41, 0x51, null, null},
{"INC", null, 0xe6, 0xf6, null, 0xee, 0xfe, null, null, null, null, null, null},
{"INX", null, null, null, null, null, null, null, null, null, null, 0xe8, null},
{"INY", null, null, null, null, null, null, null, null, null, null, 0xc8, null},
{"JMP", null, null, null, null, 0x4c, null, null, 0x6c, null, null, null, null},
{"JSR", null, null, null, null, 0x20, null, null, null, null, null, null, null},
{"LDA", 0xa9, 0xa5, 0xb5, null, 0xad, 0xbd, 0xb9, null, 0xa1, 0xb1, null, null},
{"LDX", 0xa2, 0xa6, null, 0xb6, 0xae, null, 0xbe, null, null, null, null, null},
{"LDY", 0xa0, 0xa4, 0xb4, null, 0xac, 0xbc, null, null, null, null, null, null},
{"LSR", null, 0x46, 0x56, null, 0x4e, 0x5e, null, null, null, null, 0x4a, null},
{"NOP", null, null, null, null, null, null, null, null, null, null, 0xea, null},
{"ORA", 0x09, 0x05, 0x15, null, 0x0d, 0x1d, 0x19, null, 0x01, 0x11, null, null},
{"PHA", null, null, null, null, null, null, null, null, null, null, 0x48, null},
{"PHP", null, null, null, null, null, null, null, null, null, null, 0x08, null},
{"PLA", null, null, null, null, null, null, null, null, null, null, 0x68, null},
{"PLP", null, null, null, null, null, null, null, null, null, null, 0x28, null},
{"ROL", null, 0x26, 0x36, null, 0x2e, 0x3e, null, null, null, null, 0x2a, null},
{"ROR", null, 0x66, 0x76, null, 0x6e, 0x7e, null, null, null, null, 0x6a, null},
{"RTI", null, null, null, null, null, null, null, null, null, null, 0x40, null},
{"RTS", null, null, null, null, null, null, null, null, null, null, 0x60, null},
{"SBC", 0xe9, 0xe5, 0xf5, null, 0xed, 0xfd, 0xf9, null, 0xe1, 0xf1, null, null},
{"SEC", null, null, null, null, null, null, null, null, null, null, 0x38, null},
{"SED", null, null, null, null, null, null, null, null, null, null, 0xf8, null},
{"SEI", null, null, null, null, null, null, null, null, null, null, 0x78, null},
{"STA", null, 0x85, 0x95, null, 0x8d, 0x9d, 0x99, null, 0x81, 0x91, null, null},
{"STX", null, 0x86, null, 0x96, 0x8e, null, null, null, null, null, null, null},
{"STY", null, 0x84, 0x94, null, 0x8c, null, null, null, null, null, null, null},
{"TAX", null, null, null, null, null, null, null, null, null, null, 0xaa, null},
{"TAY", null, null, null, null, null, null, null, null, null, null, 0xa8, null},
{"TSX", null, null, null, null, null, null, null, null, null, null, 0xba, null},
{"TXA", null, null, null, null, null, null, null, null, null, null, 0x8a, null},
{"TXS", null, null, null, null, null, null, null, null, null, null, 0x9a, null},
{"TYA", null, null, null, null, null, null, null, null, null, null, 0x98, null},
}
|
// Copyright 2016 Afshin Darian. All rights reserved.
// Use of this source code is governed by The MIT License
// that can be found in the LICENSE file.
package sleuth
import (
"bytes"
"errors"
"io/ioutil"
"net/http"
"testing"
"time"
"github.com/ursiform/logger"
)
func init() {
// Tests should run using a different group than production.
group = "SLEUTH-vT"
}
// badWhisperer generates an error when whispering.
type badWhisperer struct{}
// Whisper allows goodWhisperer to conform to the whisperer interface. It
// returns and error every time.
func (b *badWhisperer) Whisper(addr string, payload []byte) error {
return errors.New("bad whisperer error")
}
// goodWhisperer can whisper without errors.
type goodWhisperer struct{}
// Whisper allows goodWhisperer to conform to the whisperer interface. It
// succeeds every time.
func (g *goodWhisperer) Whisper(addr string, payload []byte) error {
return nil
}
// echoHandler is the handler for the server in the integration test.
type echoHandler struct{}
// ServeHTTP allows echoHandler to conform to the http.Handler interface.
func (*echoHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
body, _ := ioutil.ReadAll(req.Body)
res.Write(body)
}
// testCodes compares the error codes in an error with a list of wanted codes.
func testCodes(t *testing.T, err error, want []int) {
codes := err.(*Error).Codes
if len(codes) != len(want) {
t.Errorf("expected codes length %d to be %d", len(codes), len(want))
return
}
for i, code := range codes {
if code != want[i] {
t.Errorf("expected code [%d] to be [%d]", code, want[i])
return
}
}
}
// Test client.go
func TestClientAddBadMember(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.add(group, "foo", "bar", "", "")
if err == nil {
t.Errorf("expected client dispatch to fail on bad action")
return
}
testCodes(t, err, []int{errAdd})
}
func TestClientDispatchBadAction(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.dispatch([]byte(group + "FAIL"))
if err == nil {
t.Errorf("expected client dispatch to fail on bad action")
return
}
testCodes(t, err, []int{errDispatchAction})
}
func TestClientDispatchEmpty(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.dispatch([]byte{})
if err == nil {
t.Errorf("expected client dispatch to fail on empty payload")
return
}
testCodes(t, err, []int{errDispatchHeader})
}
func TestClientDoTimeout(t *testing.T) {
c, _ := New(nil)
defer c.Close()
service := "foo"
c.add(group, "bar", "baz", service, "")
req, _ := http.NewRequest("POST", "sleuth://"+service+"/", nil)
_, err := c.Do(req)
if err == nil {
t.Errorf("expected client Do to fail by timing out")
return
}
testCodes(t, err, []int{errTimeout})
}
func TestClientDoUnknownScheme(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
req, _ := http.NewRequest("POST", "foo://bar/baz", nil)
_, err := c.Do(req)
if err == nil {
t.Errorf("expected client Do to fail on unknown scheme")
return
}
testCodes(t, err, []int{errScheme})
}
func TestClientDoUnknownService(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
req, _ := http.NewRequest("POST", "sleuth://foo/bar", nil)
_, err := c.Do(req)
if err == nil {
t.Errorf("expected client Do to fail on unknown service")
return
}
testCodes(t, err, []int{errUnknownService})
}
func TestClientReceiveBadHandle(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
res := &response{Handle: "foo"}
err := c.receive(marshalResponse(res)[len(group)+len(recv):])
if err == nil {
t.Errorf("expected client receive to fail on bad handle")
return
}
testCodes(t, err, []int{errRECV})
}
func TestClientReceiveBadPayload(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.receive([]byte(""))
if err == nil {
t.Errorf("expected client receive to fail on bad payload")
return
}
testCodes(t, err, []int{errUnzip, errResUnmarshal, errRECV})
}
func TestClientReplyBadPayload(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.reply([]byte(""))
if err == nil {
t.Errorf("expected client reply to fail on bad payload")
return
}
testCodes(t, err, []int{errUnzip, errReqUnmarshal, errREPL})
}
// Test error.go
func TestError(t *testing.T) {
code := 1
message := "test"
want := "sleuth: test [1]"
err := newError(code, message)
if err.Error() != want {
t.Errorf("expected error to be formatted as: %s", want)
}
}
// Test request.go
func TestRequestUnmarshalBadJSON(t *testing.T) {
payload := zip([]byte("{bad json}"))
_, _, err := unmarshalRequest(payload)
if err == nil {
t.Errorf("expected unmarshalRequest to fail on bad json")
return
}
testCodes(t, err, []int{errReqUnmarshalJSON})
}
// Test response.go
func TestResponseUnmarshalBadJSON(t *testing.T) {
payload := zip([]byte("{bad json}"))
_, _, err := unmarshalResponse(payload)
if err == nil {
t.Errorf("expected unmarshalResponse to fail on bad json")
return
}
testCodes(t, err, []int{errResUnmarshalJSON})
}
// Test sleuth.go
func TestSleuthNewBadInterface(t *testing.T) {
_, err := New(&Config{Interface: "foo"})
if err == nil {
t.Errorf("expected New to fail on start with bad interface")
return
}
testCodes(t, err, []int{errStart, errCreate, errNew})
}
func TestSleuthNewBadLogLevel(t *testing.T) {
c, _ := New(&Config{LogLevel: "foo"})
if c.log.Level() != logger.Debug {
t.Errorf("expected log level 'foo' to be coerced to 'debug'")
return
}
}
func TestSleuthNewBadPort(t *testing.T) {
_, err := New(&Config{Port: 1})
if err == nil {
t.Errorf("expected New to fail on start with bad port")
return
}
testCodes(t, err, []int{errStart, errCreate, errNew})
}
func TestSleuthNewBadService(t *testing.T) {
_, err := New(&Config{Handler: http.FileServer(http.Dir("."))})
if err == nil {
t.Errorf("expected New to fail with bad service")
return
}
testCodes(t, err, []int{errService})
}
// Test workers.go
func TestWorkersAddDuplicate(t *testing.T) {
w := newWorkers()
p := &peer{name: "foo", node: "bar", service: "baz"}
if n := w.add(p); n != w.add(p) {
t.Error("expected duplicate addition to be ignored")
}
}
func TestWorkersAvailable(t *testing.T) {
w := newWorkers()
p := &peer{name: "foo", node: "bar", service: "baz"}
if w.add(p); !w.available() {
t.Error("expected workers to be available")
}
}
func TestWorkersNext(t *testing.T) {
w := newWorkers()
w.add(&peer{name: "foo", node: "bar", service: "baz"})
w.add(&peer{name: "qux", node: "quux", service: "corge"})
if w.next().name != "foo" {
t.Error("expected next to return first added item")
}
if w.next().name != "qux" {
t.Error("expected next to return second added item")
}
if w.next().name != "foo" {
t.Error("expected next to loop back to first added item")
}
}
func TestWorkersNextNonexistent(t *testing.T) {
w := newWorkers()
if p := w.next(); p != nil {
t.Error("expected nonexistent worker to be nil")
}
}
func TestWorkersRemove(t *testing.T) {
w := newWorkers()
w.add(&peer{name: "foo", node: "bar", service: "baz"})
if _, p := w.remove("foo"); p == nil || p.name != "foo" || w.available() {
t.Error("expected worker to be removed")
}
}
func TestWorkersRemoveNonexistent(t *testing.T) {
w := newWorkers()
if _, p := w.remove("foo"); p != nil {
t.Error("expected nonexistent worker removal to return nil")
}
}
// Test writer.go
func TestWriterWrite(t *testing.T) {
data := []byte("foo bar baz")
w := newWriter(new(goodWhisperer), &destination{node: "qux", handle: "quux"})
if n, err := w.Write(data); err != nil {
t.Errorf("expected write to succeed: %s", err.Error())
} else if n <= 0 {
t.Errorf("expected (%d) to be greater than 0", n)
}
}
func TestWriterWriteBadWhisperer(t *testing.T) {
data := []byte("foo bar baz")
w := newWriter(new(badWhisperer), &destination{node: "qux", handle: "quux"})
_, err := w.Write(data)
if err == nil {
t.Errorf("expected writer to fail using bad whisperer")
return
}
testCodes(t, err, []int{errResWhisper})
}
// Test zip.go
func TestZipUnzipBadInput(t *testing.T) {
in := []byte("a value that cannot be unzipped")
_, err := unzip(in)
if err == nil {
t.Errorf("expected unzip to fail with bad input")
return
}
testCodes(t, err, []int{errUnzip})
}
func TestZipUnzip(t *testing.T) {
in := []byte("a value that should be zipped")
zipped := zip(in)
if out, err := unzip(zipped); err != nil {
t.Errorf("unzip failed: %s", err.Error())
} else if string(out) != string(in) {
t.Errorf("zip failed")
}
}
// Test integrated package.
func TestIntegratedCycle(t *testing.T) {
// Create client.
client, err := New(nil)
if err != nil {
t.Errorf("client instantiation failed: %s", err.Error())
return
}
defer func(client *Client, t *testing.T) {
if err := client.Close(); err != nil {
t.Errorf("client close failed: %s", err.Error())
}
}(client, t)
// Create server.
addr := "sleuth-test-server-one"
server, err := New(&Config{
Handler: new(echoHandler), Service: addr})
if err != nil {
t.Errorf("server instantiation failed: %s", err.Error())
return
}
defer func(server *Client, t *testing.T) {
if err := server.Close(); err != nil {
t.Errorf("server close failed: %s", err.Error())
}
}(server, t)
// Wait until the server has been added to the client pool.
client.WaitFor(addr)
// Set timeout to 10 seconds to accommodate slow test spin-up.
client.Timeout = time.Second * 10
if client.block(addr) {
t.Errorf("call to block should have returned immediately")
}
body := "foo bar baz"
buffer := bytes.NewBuffer([]byte(body))
request, err := http.NewRequest("GET", scheme+"://"+addr+"/", buffer)
if err != nil {
t.Errorf("request instantiation failed: %s", err.Error())
return
}
response, err := client.Do(request)
if err != nil {
t.Errorf("client.Do failed: %s", err.Error())
return
}
if response.StatusCode != http.StatusOK {
t.Errorf("client.Do expected %d got %d", http.StatusOK, response.StatusCode)
return
}
output, _ := ioutil.ReadAll(response.Body)
// It's not really necessary to close a sleuth response.
response.Body.Close()
if string(output) != body {
t.Errorf("client.Do expected %s to equal %s", string(output), body)
return
}
}
Don't use the word "pool"
// Copyright 2016 Afshin Darian. All rights reserved.
// Use of this source code is governed by The MIT License
// that can be found in the LICENSE file.
package sleuth
import (
"bytes"
"errors"
"io/ioutil"
"net/http"
"testing"
"time"
"github.com/ursiform/logger"
)
func init() {
// Tests should run using a different group than production.
group = "SLEUTH-vT"
}
// badWhisperer generates an error when whispering.
type badWhisperer struct{}
// Whisper allows goodWhisperer to conform to the whisperer interface. It
// returns and error every time.
func (b *badWhisperer) Whisper(addr string, payload []byte) error {
return errors.New("bad whisperer error")
}
// goodWhisperer can whisper without errors.
type goodWhisperer struct{}
// Whisper allows goodWhisperer to conform to the whisperer interface. It
// succeeds every time.
func (g *goodWhisperer) Whisper(addr string, payload []byte) error {
return nil
}
// echoHandler is the handler for the server in the integration test.
type echoHandler struct{}
// ServeHTTP allows echoHandler to conform to the http.Handler interface.
func (*echoHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
body, _ := ioutil.ReadAll(req.Body)
res.Write(body)
}
// testCodes compares the error codes in an error with a list of wanted codes.
func testCodes(t *testing.T, err error, want []int) {
codes := err.(*Error).Codes
if len(codes) != len(want) {
t.Errorf("expected codes length %d to be %d", len(codes), len(want))
return
}
for i, code := range codes {
if code != want[i] {
t.Errorf("expected code [%d] to be [%d]", code, want[i])
return
}
}
}
// Test client.go
func TestClientAddBadMember(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.add(group, "foo", "bar", "", "")
if err == nil {
t.Errorf("expected client dispatch to fail on bad action")
return
}
testCodes(t, err, []int{errAdd})
}
func TestClientDispatchBadAction(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.dispatch([]byte(group + "FAIL"))
if err == nil {
t.Errorf("expected client dispatch to fail on bad action")
return
}
testCodes(t, err, []int{errDispatchAction})
}
func TestClientDispatchEmpty(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.dispatch([]byte{})
if err == nil {
t.Errorf("expected client dispatch to fail on empty payload")
return
}
testCodes(t, err, []int{errDispatchHeader})
}
func TestClientDoTimeout(t *testing.T) {
c, _ := New(nil)
defer c.Close()
service := "foo"
c.add(group, "bar", "baz", service, "")
req, _ := http.NewRequest("POST", "sleuth://"+service+"/", nil)
_, err := c.Do(req)
if err == nil {
t.Errorf("expected client Do to fail by timing out")
return
}
testCodes(t, err, []int{errTimeout})
}
func TestClientDoUnknownScheme(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
req, _ := http.NewRequest("POST", "foo://bar/baz", nil)
_, err := c.Do(req)
if err == nil {
t.Errorf("expected client Do to fail on unknown scheme")
return
}
testCodes(t, err, []int{errScheme})
}
func TestClientDoUnknownService(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
req, _ := http.NewRequest("POST", "sleuth://foo/bar", nil)
_, err := c.Do(req)
if err == nil {
t.Errorf("expected client Do to fail on unknown service")
return
}
testCodes(t, err, []int{errUnknownService})
}
func TestClientReceiveBadHandle(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
res := &response{Handle: "foo"}
err := c.receive(marshalResponse(res)[len(group)+len(recv):])
if err == nil {
t.Errorf("expected client receive to fail on bad handle")
return
}
testCodes(t, err, []int{errRECV})
}
func TestClientReceiveBadPayload(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.receive([]byte(""))
if err == nil {
t.Errorf("expected client receive to fail on bad payload")
return
}
testCodes(t, err, []int{errUnzip, errResUnmarshal, errRECV})
}
func TestClientReplyBadPayload(t *testing.T) {
log, _ := logger.New(logger.Silent)
c := newClient(nil, log)
err := c.reply([]byte(""))
if err == nil {
t.Errorf("expected client reply to fail on bad payload")
return
}
testCodes(t, err, []int{errUnzip, errReqUnmarshal, errREPL})
}
// Test error.go
func TestError(t *testing.T) {
code := 1
message := "test"
want := "sleuth: test [1]"
err := newError(code, message)
if err.Error() != want {
t.Errorf("expected error to be formatted as: %s", want)
}
}
// Test request.go
func TestRequestUnmarshalBadJSON(t *testing.T) {
payload := zip([]byte("{bad json}"))
_, _, err := unmarshalRequest(payload)
if err == nil {
t.Errorf("expected unmarshalRequest to fail on bad json")
return
}
testCodes(t, err, []int{errReqUnmarshalJSON})
}
// Test response.go
func TestResponseUnmarshalBadJSON(t *testing.T) {
payload := zip([]byte("{bad json}"))
_, _, err := unmarshalResponse(payload)
if err == nil {
t.Errorf("expected unmarshalResponse to fail on bad json")
return
}
testCodes(t, err, []int{errResUnmarshalJSON})
}
// Test sleuth.go
func TestSleuthNewBadInterface(t *testing.T) {
_, err := New(&Config{Interface: "foo"})
if err == nil {
t.Errorf("expected New to fail on start with bad interface")
return
}
testCodes(t, err, []int{errStart, errCreate, errNew})
}
func TestSleuthNewBadLogLevel(t *testing.T) {
c, _ := New(&Config{LogLevel: "foo"})
if c.log.Level() != logger.Debug {
t.Errorf("expected log level 'foo' to be coerced to 'debug'")
return
}
}
func TestSleuthNewBadPort(t *testing.T) {
_, err := New(&Config{Port: 1})
if err == nil {
t.Errorf("expected New to fail on start with bad port")
return
}
testCodes(t, err, []int{errStart, errCreate, errNew})
}
func TestSleuthNewBadService(t *testing.T) {
_, err := New(&Config{Handler: http.FileServer(http.Dir("."))})
if err == nil {
t.Errorf("expected New to fail with bad service")
return
}
testCodes(t, err, []int{errService})
}
// Test workers.go
func TestWorkersAddDuplicate(t *testing.T) {
w := newWorkers()
p := &peer{name: "foo", node: "bar", service: "baz"}
if n := w.add(p); n != w.add(p) {
t.Error("expected duplicate addition to be ignored")
}
}
func TestWorkersAvailable(t *testing.T) {
w := newWorkers()
p := &peer{name: "foo", node: "bar", service: "baz"}
if w.add(p); !w.available() {
t.Error("expected workers to be available")
}
}
func TestWorkersNext(t *testing.T) {
w := newWorkers()
w.add(&peer{name: "foo", node: "bar", service: "baz"})
w.add(&peer{name: "qux", node: "quux", service: "corge"})
if w.next().name != "foo" {
t.Error("expected next to return first added item")
}
if w.next().name != "qux" {
t.Error("expected next to return second added item")
}
if w.next().name != "foo" {
t.Error("expected next to loop back to first added item")
}
}
func TestWorkersNextNonexistent(t *testing.T) {
w := newWorkers()
if p := w.next(); p != nil {
t.Error("expected nonexistent worker to be nil")
}
}
func TestWorkersRemove(t *testing.T) {
w := newWorkers()
w.add(&peer{name: "foo", node: "bar", service: "baz"})
if _, p := w.remove("foo"); p == nil || p.name != "foo" || w.available() {
t.Error("expected worker to be removed")
}
}
func TestWorkersRemoveNonexistent(t *testing.T) {
w := newWorkers()
if _, p := w.remove("foo"); p != nil {
t.Error("expected nonexistent worker removal to return nil")
}
}
// Test writer.go
func TestWriterWrite(t *testing.T) {
data := []byte("foo bar baz")
w := newWriter(new(goodWhisperer), &destination{node: "qux", handle: "quux"})
if n, err := w.Write(data); err != nil {
t.Errorf("expected write to succeed: %s", err.Error())
} else if n <= 0 {
t.Errorf("expected (%d) to be greater than 0", n)
}
}
func TestWriterWriteBadWhisperer(t *testing.T) {
data := []byte("foo bar baz")
w := newWriter(new(badWhisperer), &destination{node: "qux", handle: "quux"})
_, err := w.Write(data)
if err == nil {
t.Errorf("expected writer to fail using bad whisperer")
return
}
testCodes(t, err, []int{errResWhisper})
}
// Test zip.go
func TestZipUnzipBadInput(t *testing.T) {
in := []byte("a value that cannot be unzipped")
_, err := unzip(in)
if err == nil {
t.Errorf("expected unzip to fail with bad input")
return
}
testCodes(t, err, []int{errUnzip})
}
func TestZipUnzip(t *testing.T) {
in := []byte("a value that should be zipped")
zipped := zip(in)
if out, err := unzip(zipped); err != nil {
t.Errorf("unzip failed: %s", err.Error())
} else if string(out) != string(in) {
t.Errorf("zip failed")
}
}
// Test integrated package.
func TestIntegratedCycle(t *testing.T) {
// Create client.
client, err := New(nil)
if err != nil {
t.Errorf("client instantiation failed: %s", err.Error())
return
}
defer func(client *Client, t *testing.T) {
if err := client.Close(); err != nil {
t.Errorf("client close failed: %s", err.Error())
}
}(client, t)
// Create server.
addr := "sleuth-test-server-one"
server, err := New(&Config{
Handler: new(echoHandler), Service: addr})
if err != nil {
t.Errorf("server instantiation failed: %s", err.Error())
return
}
defer func(server *Client, t *testing.T) {
if err := server.Close(); err != nil {
t.Errorf("server close failed: %s", err.Error())
}
}(server, t)
// Wait until the server becomes available.
client.WaitFor(addr)
// Set timeout to 10 seconds to accommodate slow test spin-up.
client.Timeout = time.Second * 10
if client.block(addr) {
t.Errorf("call to block should have returned immediately")
}
body := "foo bar baz"
buffer := bytes.NewBuffer([]byte(body))
request, err := http.NewRequest("GET", scheme+"://"+addr+"/", buffer)
if err != nil {
t.Errorf("request instantiation failed: %s", err.Error())
return
}
response, err := client.Do(request)
if err != nil {
t.Errorf("client.Do failed: %s", err.Error())
return
}
if response.StatusCode != http.StatusOK {
t.Errorf("client.Do expected %d got %d", http.StatusOK, response.StatusCode)
return
}
output, _ := ioutil.ReadAll(response.Body)
// It's not really necessary to close a sleuth response.
response.Body.Close()
if string(output) != body {
t.Errorf("client.Do expected %s to equal %s", string(output), body)
return
}
}
|
package chromaticity
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/http/httputil"
"time"
"github.com/evq/chromaticity/utils"
"github.com/evq/go-restful"
)
var schedulesContainer *restful.Container
type Schedule struct {
Name string `json:"name"`
Description string `json:"description"`
Command Command `json:"command"`
LocalTime string `json:"localtime"`
Time string `json:"time"`
Create string `json:"create"`
Status string `json:"status"`
Autodelete bool `json:autdoelete"`
}
type Command struct {
Address string `json:"address"`
Body json.RawMessage `json:"body"`
Method string `json:"method"`
}
func (s Schedule) execute() {
err := s.executeOptionally(false)
if err != nil {
panic(err)
}
}
func (s Schedule) executeOptionally(test bool) error {
// execute command
b, err := s.Command.Body.MarshalJSON()
if err != nil {
return err
}
req, err := http.NewRequest(s.Command.Method, s.Command.Address, bytes.NewBuffer(b))
if err != nil {
return err
}
if !test {
rec := httptest.NewRecorder()
schedulesContainer.Dispatch(rec, req)
resp := rec.Result()
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
fmt.Println(err)
}
fmt.Println(string(dump))
// reschedule
t, err := utils.GetNextTimeFrom(s.LocalTime, nil)
if err != nil {
panic(err)
}
if t != nil {
// if valid, schedule again
time.AfterFunc(time.Until(*t), s.execute)
}
}
return nil
}
func (l LightResource) listSchedules(request *restful.Request, response *restful.Response) {
response.WriteEntity(l.Schedules)
}
func (l LightResource) createSchedule(request *restful.Request, response *restful.Response) {
s := Schedule{}
request.ReadEntity(&s)
t, err := utils.GetNextTimeFrom(s.LocalTime, nil)
if err != nil {
response.AddHeader("Content-Type", "text/plain")
response.WriteErrorString(http.StatusBadRequest, fmt.Sprintf("400: Error in GetNextTimeFrom: %s", err.Error()))
return
}
if t == nil {
response.AddHeader("Content-Type", "text/plain")
response.WriteErrorString(http.StatusBadRequest, fmt.Sprintf("400: No time returned from GetNextTimeFrom"))
return
}
err = s.executeOptionally(true)
if err != nil {
response.AddHeader("Content-Type", "text/plain")
response.WriteErrorString(http.StatusBadRequest, fmt.Sprintf("400: Error in executeOptionally: %s", err.Error()))
return
}
time.AfterFunc(time.Until(*t), s.execute)
response.WriteEntity(l.Schedules)
}
func (l LightResource) RegisterSchedulesApi(container *restful.Container) {
utils.RegisterApis(
container,
"/api/{api_key}/schedules",
"Manage Schedules",
l._RegisterSchedulesApi,
)
schedulesContainer = container
}
func (l LightResource) _RegisterSchedulesApi(ws *restful.WebService) {
ws.Route(ws.GET("/").To(l.listSchedules).
Doc("list all schedules").
Param(ws.PathParameter("api_key", "api key").DataType("string")).
Operation("listSchedules"))
ws.Route(ws.POST("/").To(l.createSchedule).
Doc("create schedule").
Param(ws.PathParameter("api_key", "api key").DataType("string")).
Operation("createSchedule").
Reads(Schedule{}).
Writes([]SuccessResponse{}))
/*
ws.Route(ws.GET("/{light-id}").To(l.findLight).
Doc("get a light").
Operation("findLight").
Param(ws.PathParameter("api_key", "api key").DataType("string")).
Param(ws.PathParameter("light-id", "identifier of the light").DataType("int")))
ws.Route(ws.PUT("/{light-id}/state").To(l.updateLightState).
Doc("modify a light's state").
Operation("updateLightState").
Param(ws.PathParameter("api_key", "api key").DataType("string")).
Param(ws.PathParameter("light-id", "identifier of the light").DataType("int")).
Reads(ColorState{}))
*/
}
add print statement
package chromaticity
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/http/httputil"
"time"
"github.com/evq/chromaticity/utils"
"github.com/evq/go-restful"
)
var schedulesContainer *restful.Container
type Schedule struct {
Name string `json:"name"`
Description string `json:"description"`
Command Command `json:"command"`
LocalTime string `json:"localtime"`
Time string `json:"time"`
Create string `json:"create"`
Status string `json:"status"`
Autodelete bool `json:autdoelete"`
}
type Command struct {
Address string `json:"address"`
Body json.RawMessage `json:"body"`
Method string `json:"method"`
}
func (s Schedule) execute() {
err := s.executeOptionally(false)
if err != nil {
panic(err)
}
}
func (s Schedule) executeOptionally(test bool) error {
// execute command
b, err := s.Command.Body.MarshalJSON()
if err != nil {
return err
}
req, err := http.NewRequest(s.Command.Method, s.Command.Address, bytes.NewBuffer(b))
if err != nil {
return err
}
if !test {
rec := httptest.NewRecorder()
schedulesContainer.Dispatch(rec, req)
resp := rec.Result()
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
fmt.Println(err)
}
fmt.Println(string(dump))
// reschedule
t, err := utils.GetNextTimeFrom(s.LocalTime, nil)
if err != nil {
panic(err)
}
if t != nil {
// if valid, schedule again
time.AfterFunc(time.Until(*t), s.execute)
}
}
return nil
}
func (l LightResource) listSchedules(request *restful.Request, response *restful.Response) {
response.WriteEntity(l.Schedules)
}
func (l LightResource) createSchedule(request *restful.Request, response *restful.Response) {
s := Schedule{}
request.ReadEntity(&s)
t, err := utils.GetNextTimeFrom(s.LocalTime, nil)
if err != nil {
response.AddHeader("Content-Type", "text/plain")
response.WriteErrorString(http.StatusBadRequest, fmt.Sprintf("400: Error in GetNextTimeFrom: %s", err.Error()))
return
}
if t == nil {
response.AddHeader("Content-Type", "text/plain")
response.WriteErrorString(http.StatusBadRequest, fmt.Sprintf("400: No time returned from GetNextTimeFrom"))
return
}
err = s.executeOptionally(true)
if err != nil {
response.AddHeader("Content-Type", "text/plain")
response.WriteErrorString(http.StatusBadRequest, fmt.Sprintf("400: Error in executeOptionally: %s", err.Error()))
return
}
fmt.Printf("Schedule %s will next run at %s", s.Name, t.String())
time.AfterFunc(time.Until(*t), s.execute)
response.WriteEntity(l.Schedules)
}
func (l LightResource) RegisterSchedulesApi(container *restful.Container) {
utils.RegisterApis(
container,
"/api/{api_key}/schedules",
"Manage Schedules",
l._RegisterSchedulesApi,
)
schedulesContainer = container
}
func (l LightResource) _RegisterSchedulesApi(ws *restful.WebService) {
ws.Route(ws.GET("/").To(l.listSchedules).
Doc("list all schedules").
Param(ws.PathParameter("api_key", "api key").DataType("string")).
Operation("listSchedules"))
ws.Route(ws.POST("/").To(l.createSchedule).
Doc("create schedule").
Param(ws.PathParameter("api_key", "api key").DataType("string")).
Operation("createSchedule").
Reads(Schedule{}).
Writes([]SuccessResponse{}))
/*
ws.Route(ws.GET("/{light-id}").To(l.findLight).
Doc("get a light").
Operation("findLight").
Param(ws.PathParameter("api_key", "api key").DataType("string")).
Param(ws.PathParameter("light-id", "identifier of the light").DataType("int")))
ws.Route(ws.PUT("/{light-id}/state").To(l.updateLightState).
Doc("modify a light's state").
Operation("updateLightState").
Param(ws.PathParameter("api_key", "api key").DataType("string")).
Param(ws.PathParameter("light-id", "identifier of the light").DataType("int")).
Reads(ColorState{}))
*/
}
|
package presence
import (
"errors"
"fmt"
"testing"
"time"
)
func initRedisence(t *testing.T) *Session {
backend, err := NewRedis("192.168.59.103:6381", 10, time.Second*1)
if err != nil {
t.Fatal(err)
}
ses, err := New(backend)
if err != nil {
t.Fatal(err)
}
return ses
}
func TestInitialization(t *testing.T) {
s := initRedisence(t)
defer s.Close()
}
func TestSinglePing(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id"); err != nil {
t.Fatal(err)
}
}
func TestMultiPing(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id", "id2"); err != nil {
t.Fatal(err)
}
}
func TestOnlineStatus(t *testing.T) {
s := initRedisence(t)
defer s.Close()
id := "id3"
if err := s.Online(id); err != nil {
t.Fatal(err)
}
status, err := s.Status(id)
if err != nil {
t.Fatal(err)
}
if status[0].Status != Online {
t.Fatal(errors.New("user should be active"))
}
}
func TestOfflineStatus(t *testing.T) {
s := initRedisence(t)
defer s.Close()
id := "id4"
if err := s.Online(id); err != nil {
t.Fatal(err)
}
status, err := s.Status("id5")
if err != nil {
t.Fatal(err)
}
if status[0].Status != Offline {
t.Fatal(errors.New("user should be offline"))
}
}
func TestMultiStatusAllOnline(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id6", "id7"); err != nil {
t.Fatal(err)
}
status, err := s.Status([]string{"id6", "id7"}...)
if err != nil {
t.Fatal(err)
}
for _, st := range status {
if st.Status != Online {
t.Fatal(errors.New("user should be active"))
}
}
}
func TestMultiStatusAllOffline(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id8", "id9"); err != nil {
t.Fatal(err)
}
status, err := s.Status([]string{"id10", "id11"}...)
if err != nil {
t.Fatal(err)
}
for _, st := range status {
if st.Status != Offline {
t.Fatal(errors.New("user should be offline"))
}
}
}
func TestStatusWithTimeout(t *testing.T) {
s := initRedisence(t)
defer s.Close()
id := "12"
if err := s.Online(id); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second * 2)
status, err := s.Status(id)
if err != nil {
t.Fatal(err)
}
if status[0].Status == Online {
t.Fatal(errors.New("user should not be active"))
}
}
func TestSubscriptions(t *testing.T) {
t.Skip("Skipped to travis")
s := initRedisence(t)
// wait for all keys to expire
time.Sleep(time.Second * 1)
id1 := "13"
id2 := "14"
id3 := "15"
time.AfterFunc(time.Second*5, func() {
err := s.Close()
if err != nil {
t.Fatal(err)
}
})
time.AfterFunc(time.Second*1, func() {
err := s.Online(id1, id2, id3)
if err != nil {
t.Fatal(err)
}
// err = s.Offline(id1, id2, id3)
// if err != nil {
// t.Fatal(err)
// }
})
onlineCount := 0
offlineCount := 0
for event := range s.ListenStatusChanges() {
switch event.Status {
case Online:
onlineCount++
case Offline:
offlineCount++
}
}
if onlineCount != 3 {
t.Fatal(
fmt.Errorf("online count should be 3 it is %d", onlineCount),
)
}
if offlineCount != 3 {
t.Fatal(
fmt.Errorf("offline count should be 3 it is %d", offlineCount),
)
}
}
func TestJustMultiOffline(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Offline("id16", "id17"); err != nil {
t.Fatal(err)
}
}
func TestMultiOnlineAndOfflineTogether(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id18", "id19"); err != nil {
t.Fatal(err)
}
if err := s.Offline("id18", "id19"); err != nil {
t.Fatal(err)
}
}
func TestMultiOfflineWithMultiStatus(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id20", "id21"); err != nil {
t.Fatal(err)
}
if err := s.Offline("id20", "id21"); err != nil {
t.Fatal(err)
}
status, err := s.Status([]string{"id20", "id21"}...)
if err != nil {
t.Fatal(err)
}
for _, st := range status {
if st.Status != Offline {
t.Fatal(errors.New("user should be offline"))
}
}
}
Presence: update redis instance ip/port
package presence
import (
"errors"
"fmt"
"testing"
"time"
)
func initRedisence(t *testing.T) *Session {
backend, err := NewRedis("localhost:6379", 10, time.Second*1)
if err != nil {
t.Fatal(err)
}
ses, err := New(backend)
if err != nil {
t.Fatal(err)
}
return ses
}
func TestInitialization(t *testing.T) {
s := initRedisence(t)
defer s.Close()
}
func TestSinglePing(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id"); err != nil {
t.Fatal(err)
}
}
func TestMultiPing(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id", "id2"); err != nil {
t.Fatal(err)
}
}
func TestOnlineStatus(t *testing.T) {
s := initRedisence(t)
defer s.Close()
id := "id3"
if err := s.Online(id); err != nil {
t.Fatal(err)
}
status, err := s.Status(id)
if err != nil {
t.Fatal(err)
}
if status[0].Status != Online {
t.Fatal(errors.New("user should be active"))
}
}
func TestOfflineStatus(t *testing.T) {
s := initRedisence(t)
defer s.Close()
id := "id4"
if err := s.Online(id); err != nil {
t.Fatal(err)
}
status, err := s.Status("id5")
if err != nil {
t.Fatal(err)
}
if status[0].Status != Offline {
t.Fatal(errors.New("user should be offline"))
}
}
func TestMultiStatusAllOnline(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id6", "id7"); err != nil {
t.Fatal(err)
}
status, err := s.Status([]string{"id6", "id7"}...)
if err != nil {
t.Fatal(err)
}
for _, st := range status {
if st.Status != Online {
t.Fatal(errors.New("user should be active"))
}
}
}
func TestMultiStatusAllOffline(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id8", "id9"); err != nil {
t.Fatal(err)
}
status, err := s.Status([]string{"id10", "id11"}...)
if err != nil {
t.Fatal(err)
}
for _, st := range status {
if st.Status != Offline {
t.Fatal(errors.New("user should be offline"))
}
}
}
func TestStatusWithTimeout(t *testing.T) {
s := initRedisence(t)
defer s.Close()
id := "12"
if err := s.Online(id); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second * 2)
status, err := s.Status(id)
if err != nil {
t.Fatal(err)
}
if status[0].Status == Online {
t.Fatal(errors.New("user should not be active"))
}
}
func TestSubscriptions(t *testing.T) {
t.Skip("Skipped to travis")
s := initRedisence(t)
// wait for all keys to expire
time.Sleep(time.Second * 1)
id1 := "13"
id2 := "14"
id3 := "15"
time.AfterFunc(time.Second*5, func() {
err := s.Close()
if err != nil {
t.Fatal(err)
}
})
time.AfterFunc(time.Second*1, func() {
err := s.Online(id1, id2, id3)
if err != nil {
t.Fatal(err)
}
// err = s.Offline(id1, id2, id3)
// if err != nil {
// t.Fatal(err)
// }
})
onlineCount := 0
offlineCount := 0
for event := range s.ListenStatusChanges() {
switch event.Status {
case Online:
onlineCount++
case Offline:
offlineCount++
}
}
if onlineCount != 3 {
t.Fatal(
fmt.Errorf("online count should be 3 it is %d", onlineCount),
)
}
if offlineCount != 3 {
t.Fatal(
fmt.Errorf("offline count should be 3 it is %d", offlineCount),
)
}
}
func TestJustMultiOffline(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Offline("id16", "id17"); err != nil {
t.Fatal(err)
}
}
func TestMultiOnlineAndOfflineTogether(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id18", "id19"); err != nil {
t.Fatal(err)
}
if err := s.Offline("id18", "id19"); err != nil {
t.Fatal(err)
}
}
func TestMultiOfflineWithMultiStatus(t *testing.T) {
s := initRedisence(t)
defer s.Close()
if err := s.Online("id20", "id21"); err != nil {
t.Fatal(err)
}
if err := s.Offline("id20", "id21"); err != nil {
t.Fatal(err)
}
status, err := s.Status([]string{"id20", "id21"}...)
if err != nil {
t.Fatal(err)
}
for _, st := range status {
if st.Status != Offline {
t.Fatal(errors.New("user should be offline"))
}
}
}
|
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package prog
import (
"bytes"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
// String generates a very compact program description (mostly for debug output).
func (p *Prog) String() string {
buf := new(bytes.Buffer)
for i, c := range p.Calls {
if i != 0 {
fmt.Fprintf(buf, "-")
}
fmt.Fprintf(buf, "%v", c.Meta.Name)
}
return buf.String()
}
func (p *Prog) Serialize() []byte {
return p.serialize(false)
}
func (p *Prog) SerializeVerbose() []byte {
return p.serialize(true)
}
func (p *Prog) serialize(verbose bool) []byte {
p.debugValidate()
ctx := &serializer{
target: p.Target,
buf: new(bytes.Buffer),
vars: make(map[*ResultArg]int),
verbose: verbose,
}
for _, c := range p.Calls {
ctx.call(c)
}
return ctx.buf.Bytes()
}
type serializer struct {
target *Target
buf *bytes.Buffer
vars map[*ResultArg]int
varSeq int
verbose bool
}
func (ctx *serializer) printf(text string, args ...interface{}) {
fmt.Fprintf(ctx.buf, text, args...)
}
func (ctx *serializer) allocVarID(arg *ResultArg) int {
id := ctx.varSeq
ctx.varSeq++
ctx.vars[arg] = id
return id
}
func (ctx *serializer) call(c *Call) {
if c.Ret != nil && len(c.Ret.uses) != 0 {
ctx.printf("r%v = ", ctx.allocVarID(c.Ret))
}
ctx.printf("%v(", c.Meta.Name)
for i, a := range c.Args {
if IsPad(a.Type()) {
continue
}
if i != 0 {
ctx.printf(", ")
}
ctx.arg(a)
}
ctx.printf(")")
anyChangedProps := false
c.Props.ForeachProp(func(name, key string, value reflect.Value) {
// reflect.Value.IsZero is added in go1.13, not available in Appengine SDK.
if reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface()) {
return
}
if !anyChangedProps {
ctx.printf(" (")
anyChangedProps = true
} else {
ctx.printf(", ")
}
ctx.printf(key)
switch kind := value.Kind(); kind {
case reflect.Int:
ctx.printf(": %d", value.Int())
case reflect.Bool:
default:
panic("unable to serialize call prop of type " + kind.String())
}
})
if anyChangedProps {
ctx.printf(")")
}
ctx.printf("\n")
}
func (ctx *serializer) arg(arg Arg) {
if arg == nil {
ctx.printf("nil")
return
}
arg.serialize(ctx)
}
func (a *ConstArg) serialize(ctx *serializer) {
ctx.printf("0x%x", a.Val)
}
func (a *PointerArg) serialize(ctx *serializer) {
if a.IsSpecial() {
ctx.printf("0x%x", a.Address)
return
}
target := ctx.target
ctx.printf("&%v", target.serializeAddr(a))
if a.Res != nil && !ctx.verbose && isDefault(a.Res) && !target.isAnyPtr(a.Type()) {
return
}
ctx.printf("=")
if target.isAnyPtr(a.Type()) {
ctx.printf("ANY=")
}
ctx.arg(a.Res)
}
func (a *DataArg) serialize(ctx *serializer) {
typ := a.Type().(*BufferType)
if a.Dir() == DirOut {
ctx.printf("\"\"/%v", a.Size())
return
}
data := a.Data()
// Statically typed data will be padded with 0s during deserialization,
// so we can strip them here for readability always. For variable-size
// data we strip trailing 0s only if we strip enough of them.
sz := len(data)
for len(data) >= 2 && data[len(data)-1] == 0 && data[len(data)-2] == 0 {
data = data[:len(data)-1]
}
if typ.Varlen() && len(data)+8 >= sz {
data = data[:sz]
}
serializeData(ctx.buf, data, isReadableDataType(typ))
if typ.Varlen() && sz != len(data) {
ctx.printf("/%v", sz)
}
}
func (a *GroupArg) serialize(ctx *serializer) {
var delims []byte
switch a.Type().(type) {
case *StructType:
delims = []byte{'{', '}'}
case *ArrayType:
delims = []byte{'[', ']'}
default:
panic("unknown group type")
}
ctx.buf.WriteByte(delims[0])
lastNonDefault := len(a.Inner) - 1
if !ctx.verbose && a.fixedInnerSize() {
for ; lastNonDefault >= 0; lastNonDefault-- {
if !isDefault(a.Inner[lastNonDefault]) {
break
}
}
}
for i := 0; i <= lastNonDefault; i++ {
arg1 := a.Inner[i]
if arg1 != nil && IsPad(arg1.Type()) {
continue
}
if i != 0 {
ctx.printf(", ")
}
ctx.arg(arg1)
}
ctx.buf.WriteByte(delims[1])
}
func (a *UnionArg) serialize(ctx *serializer) {
typ := a.Type().(*UnionType)
ctx.printf("@%v", typ.Fields[a.Index].Name)
if !ctx.verbose && isDefault(a.Option) {
return
}
ctx.printf("=")
ctx.arg(a.Option)
}
func (a *ResultArg) serialize(ctx *serializer) {
if len(a.uses) != 0 {
ctx.printf("<r%v=>", ctx.allocVarID(a))
}
if a.Res == nil {
ctx.printf("0x%x", a.Val)
return
}
id, ok := ctx.vars[a.Res]
if !ok {
panic("no result")
}
ctx.printf("r%v", id)
if a.OpDiv != 0 {
ctx.printf("/%v", a.OpDiv)
}
if a.OpAdd != 0 {
ctx.printf("+%v", a.OpAdd)
}
}
type DeserializeMode int
const (
Strict DeserializeMode = iota
NonStrict DeserializeMode = iota
)
func (target *Target) Deserialize(data []byte, mode DeserializeMode) (*Prog, error) {
defer func() {
if err := recover(); err != nil {
panic(fmt.Errorf("%v\ntarget: %v/%v, rev: %v, mode=%v, prog:\n%q",
err, target.OS, target.Arch, GitRevision, mode, data))
}
}()
p := newParser(target, data, mode == Strict)
prog, err := p.parseProg()
if err := p.Err(); err != nil {
return nil, err
}
if err != nil {
return nil, err
}
// This validation is done even in non-debug mode because deserialization
// procedure does not catch all bugs (e.g. mismatched types).
// And we can receive bad programs from corpus and hub.
if err := prog.validate(); err != nil {
return nil, err
}
if p.autos != nil {
p.fixupAutos(prog)
}
if err := prog.sanitize(mode == NonStrict); err != nil {
return nil, err
}
return prog, nil
}
func (p *parser) parseProg() (*Prog, error) {
prog := &Prog{
Target: p.target,
}
for p.Scan() {
if p.EOF() {
if p.comment != "" {
prog.Comments = append(prog.Comments, p.comment)
p.comment = ""
}
continue
}
if p.Char() == '#' {
if p.comment != "" {
prog.Comments = append(prog.Comments, p.comment)
}
p.comment = strings.TrimSpace(p.s[p.i+1:])
continue
}
name := p.Ident()
r := ""
if p.Char() == '=' {
r = name
p.Parse('=')
name = p.Ident()
}
meta := p.target.SyscallMap[name]
if meta == nil {
return nil, fmt.Errorf("unknown syscall %v", name)
}
c := MakeCall(meta, nil)
c.Comment = p.comment
prog.Calls = append(prog.Calls, c)
p.Parse('(')
for i := 0; p.Char() != ')'; i++ {
if i >= len(meta.Args) {
p.eatExcessive(false, "excessive syscall arguments")
break
}
field := meta.Args[i]
if IsPad(field.Type) {
return nil, fmt.Errorf("padding in syscall %v arguments", name)
}
arg, err := p.parseArg(field.Type, DirIn)
if err != nil {
return nil, err
}
c.Args = append(c.Args, arg)
if p.Char() != ')' {
p.Parse(',')
}
}
p.Parse(')')
if !p.EOF() && p.Char() == '(' {
p.Parse('(')
c.Props = p.parseCallProps()
p.Parse(')')
}
if !p.EOF() {
if p.Char() != '#' {
return nil, fmt.Errorf("tailing data (line #%v)", p.l)
}
if c.Comment != "" {
prog.Comments = append(prog.Comments, c.Comment)
}
c.Comment = strings.TrimSpace(p.s[p.i+1:])
}
for i := len(c.Args); i < len(meta.Args); i++ {
p.strictFailf("missing syscall args")
c.Args = append(c.Args, meta.Args[i].DefaultArg(DirIn))
}
if len(c.Args) != len(meta.Args) {
return nil, fmt.Errorf("wrong call arg count: %v, want %v", len(c.Args), len(meta.Args))
}
if r != "" && c.Ret != nil {
p.vars[r] = c.Ret
}
p.comment = ""
}
if p.comment != "" {
prog.Comments = append(prog.Comments, p.comment)
}
return prog, nil
}
func (p *parser) parseCallProps() CallProps {
nameToValue := map[string]reflect.Value{}
callProps := CallProps{}
callProps.ForeachProp(func(_, key string, value reflect.Value) {
nameToValue[key] = value
})
for p.e == nil && p.Char() != ')' {
propName := p.Ident()
value, ok := nameToValue[propName]
if !ok {
p.eatExcessive(true, "unknown call property: %s", propName)
if p.Char() == ',' {
p.Parse(',')
}
continue
}
switch kind := value.Kind(); kind {
case reflect.Int:
p.Parse(':')
strVal := p.Ident()
intV, err := strconv.ParseInt(strVal, 0, 64)
if err != nil {
p.strictFailf("invalid int value: %s", strVal)
} else {
value.SetInt(intV)
}
case reflect.Bool:
value.SetBool(true)
default:
panic("unable to handle call props of type " + kind.String())
}
if p.Char() != ')' {
p.Parse(',')
}
}
return callProps
}
func (p *parser) parseArg(typ Type, dir Dir) (Arg, error) {
r := ""
if p.Char() == '<' {
p.Parse('<')
r = p.Ident()
p.Parse('=')
p.Parse('>')
}
arg, err := p.parseArgImpl(typ, dir)
if err != nil {
return nil, err
}
if arg == nil {
if typ != nil {
arg = typ.DefaultArg(dir)
} else if r != "" {
return nil, fmt.Errorf("named nil argument")
}
}
if r != "" {
if res, ok := arg.(*ResultArg); ok {
p.vars[r] = res
}
}
return arg, nil
}
func (p *parser) parseArgImpl(typ Type, dir Dir) (Arg, error) {
if typ == nil && p.Char() != 'n' {
p.eatExcessive(true, "non-nil argument for nil type")
return nil, nil
}
switch p.Char() {
case '0':
return p.parseArgInt(typ, dir)
case 'r':
return p.parseArgRes(typ, dir)
case '&':
return p.parseArgAddr(typ, dir)
case '"', '\'':
return p.parseArgString(typ, dir)
case '{':
return p.parseArgStruct(typ, dir)
case '[':
return p.parseArgArray(typ, dir)
case '@':
return p.parseArgUnion(typ, dir)
case 'n':
p.Parse('n')
p.Parse('i')
p.Parse('l')
return nil, nil
case 'A':
p.Parse('A')
p.Parse('U')
p.Parse('T')
p.Parse('O')
return p.parseAuto(typ, dir)
default:
return nil, fmt.Errorf("failed to parse argument at '%c' (line #%v/%v: %v)",
p.Char(), p.l, p.i, p.s)
}
}
func (p *parser) parseArgInt(typ Type, dir Dir) (Arg, error) {
val := p.Ident()
v, err := strconv.ParseUint(val, 0, 64)
if err != nil {
return nil, fmt.Errorf("wrong arg value '%v': %v", val, err)
}
switch typ.(type) {
case *ConstType, *IntType, *FlagsType, *ProcType, *CsumType:
arg := Arg(MakeConstArg(typ, dir, v))
if dir == DirOut && !typ.isDefaultArg(arg) {
p.strictFailf("out arg %v has non-default value: %v", typ, v)
arg = typ.DefaultArg(dir)
}
return arg, nil
case *LenType:
return MakeConstArg(typ, dir, v), nil
case *ResourceType:
return MakeResultArg(typ, dir, nil, v), nil
case *PtrType, *VmaType:
index := -v % uint64(len(p.target.SpecialPointers))
return MakeSpecialPointerArg(typ, dir, index), nil
default:
p.eatExcessive(true, "wrong int arg %T", typ)
return typ.DefaultArg(dir), nil
}
}
func (p *parser) parseAuto(typ Type, dir Dir) (Arg, error) {
switch typ.(type) {
case *ConstType, *LenType, *CsumType:
return p.auto(MakeConstArg(typ, dir, 0)), nil
default:
return nil, fmt.Errorf("wrong type %T for AUTO", typ)
}
}
func (p *parser) parseArgRes(typ Type, dir Dir) (Arg, error) {
id := p.Ident()
var div, add uint64
if p.Char() == '/' {
p.Parse('/')
op := p.Ident()
v, err := strconv.ParseUint(op, 0, 64)
if err != nil {
return nil, fmt.Errorf("wrong result div op: '%v'", op)
}
div = v
}
if p.Char() == '+' {
p.Parse('+')
op := p.Ident()
v, err := strconv.ParseUint(op, 0, 64)
if err != nil {
return nil, fmt.Errorf("wrong result add op: '%v'", op)
}
add = v
}
v := p.vars[id]
if v == nil {
p.strictFailf("undeclared variable %v", id)
return typ.DefaultArg(dir), nil
}
arg := MakeResultArg(typ, dir, v, 0)
arg.OpDiv = div
arg.OpAdd = add
return arg, nil
}
func (p *parser) parseArgAddr(typ Type, dir Dir) (Arg, error) {
var elem Type
elemDir := DirInOut
switch t1 := typ.(type) {
case *PtrType:
elem, elemDir = t1.Elem, t1.ElemDir
case *VmaType:
default:
p.eatExcessive(true, "wrong addr arg %T", typ)
return typ.DefaultArg(dir), nil
}
p.Parse('&')
auto := false
var addr, vmaSize uint64
if p.Char() == 'A' {
p.Parse('A')
p.Parse('U')
p.Parse('T')
p.Parse('O')
if elem == nil {
return nil, fmt.Errorf("vma type can't be AUTO")
}
auto = true
} else {
var err error
addr, vmaSize, err = p.parseAddr()
if err != nil {
return nil, err
}
}
var inner Arg
if p.Char() == '=' {
p.Parse('=')
if p.Char() == 'A' {
p.Parse('A')
p.Parse('N')
p.Parse('Y')
p.Parse('=')
anyPtr := p.target.getAnyPtrType(typ.Size())
typ, elem, elemDir = anyPtr, anyPtr.Elem, anyPtr.ElemDir
}
var err error
inner, err = p.parseArg(elem, elemDir)
if err != nil {
return nil, err
}
}
if elem == nil {
if addr%p.target.PageSize != 0 {
p.strictFailf("unaligned vma address 0x%x", addr)
addr &= ^(p.target.PageSize - 1)
}
return MakeVmaPointerArg(typ, dir, addr, vmaSize), nil
}
if inner == nil {
inner = elem.DefaultArg(elemDir)
}
arg := MakePointerArg(typ, dir, addr, inner)
if auto {
p.auto(arg)
}
return arg, nil
}
func (p *parser) parseArgString(t Type, dir Dir) (Arg, error) {
typ, ok := t.(*BufferType)
if !ok {
p.eatExcessive(true, "wrong string arg")
return t.DefaultArg(dir), nil
}
data, err := p.deserializeData()
if err != nil {
return nil, err
}
size := ^uint64(0)
if p.Char() == '/' {
p.Parse('/')
sizeStr := p.Ident()
size, err = strconv.ParseUint(sizeStr, 0, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse buffer size: %q", sizeStr)
}
maxMem := p.target.NumPages * p.target.PageSize
if size > maxMem {
p.strictFailf("too large string argument %v", size)
size = maxMem
}
}
if !typ.Varlen() {
size = typ.Size()
} else if size == ^uint64(0) {
size = uint64(len(data))
}
if dir == DirOut {
return MakeOutDataArg(typ, dir, size), nil
}
if diff := int(size) - len(data); diff > 0 {
data = append(data, make([]byte, diff)...)
}
data = data[:size]
if (typ.Kind == BufferString || typ.Kind == BufferGlob) &&
len(typ.Values) != 0 &&
// AUTOGENERATED will be padded by 0's.
!strings.HasPrefix(typ.Values[0], "AUTOGENERATED") {
matched := false
for _, val := range typ.Values {
if string(data) == val {
matched = true
break
}
}
if !matched {
p.strictFailf("bad string value %q, expect %q", data, typ.Values)
data = []byte(typ.Values[0])
}
}
return MakeDataArg(typ, dir, data), nil
}
func (p *parser) parseArgStruct(typ Type, dir Dir) (Arg, error) {
p.Parse('{')
t1, ok := typ.(*StructType)
if !ok {
p.eatExcessive(false, "wrong struct arg")
p.Parse('}')
return typ.DefaultArg(dir), nil
}
var inner []Arg
for i := 0; p.Char() != '}'; i++ {
if i >= len(t1.Fields) {
p.eatExcessive(false, "excessive struct %v fields", typ.Name())
break
}
field := t1.Fields[i]
if IsPad(field.Type) {
inner = append(inner, MakeConstArg(field.Type, field.Dir(dir), 0))
} else {
arg, err := p.parseArg(field.Type, field.Dir(dir))
if err != nil {
return nil, err
}
inner = append(inner, arg)
if p.Char() != '}' {
p.Parse(',')
}
}
}
p.Parse('}')
for len(inner) < len(t1.Fields) {
field := t1.Fields[len(inner)]
if !IsPad(field.Type) {
p.strictFailf("missing struct %v fields %v/%v", typ.Name(), len(inner), len(t1.Fields))
}
inner = append(inner, field.Type.DefaultArg(field.Dir(dir)))
}
return MakeGroupArg(typ, dir, inner), nil
}
func (p *parser) parseArgArray(typ Type, dir Dir) (Arg, error) {
p.Parse('[')
t1, ok := typ.(*ArrayType)
if !ok {
p.eatExcessive(false, "wrong array arg %T", typ)
p.Parse(']')
return typ.DefaultArg(dir), nil
}
var inner []Arg
for i := 0; p.Char() != ']'; i++ {
arg, err := p.parseArg(t1.Elem, dir)
if err != nil {
return nil, err
}
inner = append(inner, arg)
if p.Char() != ']' {
p.Parse(',')
}
}
p.Parse(']')
if t1.Kind == ArrayRangeLen && t1.RangeBegin == t1.RangeEnd {
for uint64(len(inner)) < t1.RangeBegin {
p.strictFailf("missing array elements")
inner = append(inner, t1.Elem.DefaultArg(dir))
}
inner = inner[:t1.RangeBegin]
}
return MakeGroupArg(typ, dir, inner), nil
}
func (p *parser) parseArgUnion(typ Type, dir Dir) (Arg, error) {
t1, ok := typ.(*UnionType)
if !ok {
p.eatExcessive(true, "wrong union arg")
return typ.DefaultArg(dir), nil
}
p.Parse('@')
name := p.Ident()
var (
optType Type
optDir Dir
)
index := -1
for i, field := range t1.Fields {
if name == field.Name {
optType, index, optDir = field.Type, i, field.Dir(dir)
break
}
}
if optType == nil {
p.eatExcessive(true, "wrong union option")
return typ.DefaultArg(dir), nil
}
var opt Arg
if p.Char() == '=' {
p.Parse('=')
var err error
opt, err = p.parseArg(optType, optDir)
if err != nil {
return nil, err
}
} else {
opt = optType.DefaultArg(optDir)
}
return MakeUnionArg(typ, dir, opt, index), nil
}
// Eats excessive call arguments and struct fields to recover after description changes.
func (p *parser) eatExcessive(stopAtComma bool, what string, args ...interface{}) {
p.strictFailf(what, args...)
paren, brack, brace := 0, 0, 0
for !p.EOF() && p.e == nil {
ch := p.Char()
switch ch {
case '(':
paren++
case ')':
if paren == 0 {
return
}
paren--
case '[':
brack++
case ']':
if brack == 0 {
return
}
brack--
case '{':
brace++
case '}':
if brace == 0 {
return
}
brace--
case ',':
if stopAtComma && paren == 0 && brack == 0 && brace == 0 {
return
}
case '\'', '"':
p.Parse(ch)
for !p.EOF() && p.Char() != ch {
p.Parse(p.Char())
}
if p.EOF() {
return
}
}
p.Parse(ch)
}
}
const (
encodingAddrBase = 0x7f0000000000
)
func (target *Target) serializeAddr(arg *PointerArg) string {
ssize := ""
if arg.VmaSize != 0 {
ssize = fmt.Sprintf("/0x%x", arg.VmaSize)
}
return fmt.Sprintf("(0x%x%v)", encodingAddrBase+arg.Address, ssize)
}
func (p *parser) parseAddr() (uint64, uint64, error) {
p.Parse('(')
pstr := p.Ident()
addr, err := strconv.ParseUint(pstr, 0, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse addr: %q", pstr)
}
if addr < encodingAddrBase {
return 0, 0, fmt.Errorf("address without base offset: %q", pstr)
}
addr -= encodingAddrBase
// This is not used anymore, but left here to parse old programs.
if p.Char() == '+' || p.Char() == '-' {
minus := false
if p.Char() == '-' {
minus = true
p.Parse('-')
} else {
p.Parse('+')
}
ostr := p.Ident()
off, err := strconv.ParseUint(ostr, 0, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse addr offset: %q", ostr)
}
if minus {
off = -off
}
addr += off
}
target := p.target
maxMem := target.NumPages * target.PageSize
var vmaSize uint64
if p.Char() == '/' {
p.Parse('/')
pstr := p.Ident()
size, err := strconv.ParseUint(pstr, 0, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse addr size: %q", pstr)
}
addr = addr & ^(target.PageSize - 1)
vmaSize = (size + target.PageSize - 1) & ^(target.PageSize - 1)
if vmaSize == 0 {
vmaSize = target.PageSize
}
if vmaSize > maxMem {
vmaSize = maxMem
}
if addr > maxMem-vmaSize {
addr = maxMem - vmaSize
}
}
p.Parse(')')
return addr, vmaSize, nil
}
func serializeData(buf *bytes.Buffer, data []byte, readable bool) {
if !readable && !isReadableData(data) {
fmt.Fprintf(buf, "\"%v\"", hex.EncodeToString(data))
return
}
buf.WriteByte('\'')
encodeData(buf, data, true, false)
buf.WriteByte('\'')
}
func EncodeData(buf *bytes.Buffer, data []byte, readable bool) {
if !readable && isReadableData(data) {
readable = true
}
encodeData(buf, data, readable, true)
}
func encodeData(buf *bytes.Buffer, data []byte, readable, cstr bool) {
for _, v := range data {
if !readable {
lo, hi := byteToHex(v)
buf.Write([]byte{'\\', 'x', hi, lo})
continue
}
switch v {
case '\a':
buf.Write([]byte{'\\', 'a'})
case '\b':
buf.Write([]byte{'\\', 'b'})
case '\f':
buf.Write([]byte{'\\', 'f'})
case '\n':
buf.Write([]byte{'\\', 'n'})
case '\r':
buf.Write([]byte{'\\', 'r'})
case '\t':
buf.Write([]byte{'\\', 't'})
case '\v':
buf.Write([]byte{'\\', 'v'})
case '\'':
buf.Write([]byte{'\\', '\''})
case '"':
buf.Write([]byte{'\\', '"'})
case '\\':
buf.Write([]byte{'\\', '\\'})
default:
if isPrintable(v) {
buf.WriteByte(v)
} else {
if cstr {
// We would like to use hex encoding with \x,
// but C's \x is hard to use: it can contain _any_ number of hex digits
// (not just 2 or 4), so later non-hex encoded chars will glue to \x.
c0 := (v>>6)&0x7 + '0'
c1 := (v>>3)&0x7 + '0'
c2 := (v>>0)&0x7 + '0'
buf.Write([]byte{'\\', c0, c1, c2})
} else {
lo, hi := byteToHex(v)
buf.Write([]byte{'\\', 'x', hi, lo})
}
}
}
}
}
func isReadableDataType(typ *BufferType) bool {
return typ.Kind == BufferString || typ.Kind == BufferFilename || typ.Kind == BufferGlob
}
func isReadableData(data []byte) bool {
if len(data) == 0 {
return false
}
for _, v := range data {
if isPrintable(v) {
continue
}
switch v {
case 0, '\a', '\b', '\f', '\n', '\r', '\t', '\v':
continue
}
return false
}
return true
}
func (p *parser) deserializeData() ([]byte, error) {
var data []byte
if p.Char() == '"' {
p.Parse('"')
val := ""
if p.Char() != '"' {
val = p.Ident()
}
p.Parse('"')
var err error
data, err = hex.DecodeString(val)
if err != nil {
return nil, fmt.Errorf("data arg has bad value %q", val)
}
} else {
if p.consume() != '\'' {
return nil, fmt.Errorf("data arg does not start with \" nor with '")
}
for p.Char() != '\'' && p.Char() != 0 {
v := p.consume()
if v != '\\' {
data = append(data, v)
continue
}
v = p.consume()
switch v {
case 'x':
hi := p.consume()
lo := p.consume()
b, ok := hexToByte(lo, hi)
if !ok {
return nil, fmt.Errorf("invalid hex \\x%v%v in data arg", hi, lo)
}
data = append(data, b)
case 'a':
data = append(data, '\a')
case 'b':
data = append(data, '\b')
case 'f':
data = append(data, '\f')
case 'n':
data = append(data, '\n')
case 'r':
data = append(data, '\r')
case 't':
data = append(data, '\t')
case 'v':
data = append(data, '\v')
case '\'':
data = append(data, '\'')
case '"':
data = append(data, '"')
case '\\':
data = append(data, '\\')
default:
return nil, fmt.Errorf("invalid \\%c escape sequence in data arg", v)
}
}
p.Parse('\'')
}
return data, nil
}
func isPrintable(v byte) bool {
return v >= 0x20 && v < 0x7f
}
func byteToHex(v byte) (lo, hi byte) {
return toHexChar(v & 0xf), toHexChar(v >> 4)
}
func hexToByte(lo, hi byte) (byte, bool) {
h, ok1 := fromHexChar(hi)
l, ok2 := fromHexChar(lo)
return h<<4 + l, ok1 && ok2
}
func toHexChar(v byte) byte {
if v >= 16 {
panic("bad hex char")
}
if v < 10 {
return '0' + v
}
return 'a' + v - 10
}
func fromHexChar(v byte) (byte, bool) {
if v >= '0' && v <= '9' {
return v - '0', true
}
if v >= 'a' && v <= 'f' {
return v - 'a' + 10, true
}
return 0, false
}
type parser struct {
target *Target
strict bool
vars map[string]*ResultArg
autos map[Arg]bool
comment string
data []byte
s string
i int
l int
e error
}
func newParser(target *Target, data []byte, strict bool) *parser {
p := &parser{
target: target,
strict: strict,
vars: make(map[string]*ResultArg),
data: data,
}
return p
}
func (p *parser) auto(arg Arg) Arg {
if p.autos == nil {
p.autos = make(map[Arg]bool)
}
p.autos[arg] = true
return arg
}
func (p *parser) fixupAutos(prog *Prog) {
s := analyze(nil, nil, prog, nil)
for _, c := range prog.Calls {
p.target.assignSizesArray(c.Args, c.Meta.Args, p.autos)
ForeachArg(c, func(arg Arg, _ *ArgCtx) {
if !p.autos[arg] {
return
}
delete(p.autos, arg)
switch typ := arg.Type().(type) {
case *ConstType:
arg.(*ConstArg).Val = typ.Val
_ = s
case *PtrType:
a := arg.(*PointerArg)
a.Address = s.ma.alloc(nil, a.Res.Size(), a.Res.Type().Alignment())
default:
panic(fmt.Sprintf("unsupported auto type %T", typ))
}
})
}
if len(p.autos) != 0 {
panic(fmt.Sprintf("leftoever autos: %+v", p.autos))
}
}
func (p *parser) Scan() bool {
if p.e != nil || len(p.data) == 0 {
return false
}
nextLine := bytes.IndexByte(p.data, '\n')
if nextLine != -1 {
p.s = string(p.data[:nextLine])
p.data = p.data[nextLine+1:]
} else {
p.s = string(p.data)
p.data = nil
}
p.i = 0
p.l++
return true
}
func (p *parser) Err() error {
return p.e
}
func (p *parser) Str() string {
return p.s
}
func (p *parser) EOF() bool {
return p.i == len(p.s)
}
func (p *parser) Char() byte {
if p.e != nil {
return 0
}
if p.EOF() {
p.failf("unexpected eof")
return 0
}
return p.s[p.i]
}
func (p *parser) Parse(ch byte) {
if p.e != nil {
return
}
if p.EOF() {
p.failf("want %s, got EOF", string(ch))
return
}
if p.s[p.i] != ch {
p.failf("want '%v', got '%v'", string(ch), string(p.s[p.i]))
return
}
p.i++
p.SkipWs()
}
func (p *parser) consume() byte {
if p.e != nil {
return 0
}
if p.EOF() {
p.failf("unexpected eof")
return 0
}
v := p.s[p.i]
p.i++
return v
}
func (p *parser) SkipWs() {
for p.i < len(p.s) && (p.s[p.i] == ' ' || p.s[p.i] == '\t') {
p.i++
}
}
func (p *parser) Ident() string {
i := p.i
for p.i < len(p.s) &&
(p.s[p.i] >= 'a' && p.s[p.i] <= 'z' ||
p.s[p.i] >= 'A' && p.s[p.i] <= 'Z' ||
p.s[p.i] >= '0' && p.s[p.i] <= '9' ||
p.s[p.i] == '_' || p.s[p.i] == '$') {
p.i++
}
if i == p.i {
p.failf("failed to parse identifier at pos %v", i)
return ""
}
s := p.s[i:p.i]
p.SkipWs()
return s
}
func (p *parser) failf(msg string, args ...interface{}) {
if p.e == nil {
p.e = fmt.Errorf("%v\nline #%v:%v: %v", fmt.Sprintf(msg, args...), p.l, p.i, p.s)
}
}
func (p *parser) strictFailf(msg string, args ...interface{}) {
if p.strict {
p.failf(msg, args...)
}
}
// CallSet returns a set of all calls in the program.
// It does very conservative parsing and is intended to parse past/future serialization formats.
func CallSet(data []byte) (map[string]struct{}, int, error) {
calls := make(map[string]struct{})
ncalls := 0
for len(data) > 0 {
ln := data
nextLine := bytes.IndexByte(data, '\n')
if nextLine != -1 {
ln = data[:nextLine]
data = data[nextLine+1:]
} else {
data = nil
}
if len(ln) == 0 || ln[0] == '#' {
continue
}
bracket := bytes.IndexByte(ln, '(')
if bracket == -1 {
return nil, 0, fmt.Errorf("line does not contain opening bracket")
}
call := ln[:bracket]
if eq := bytes.IndexByte(call, '='); eq != -1 {
eq++
for eq < len(call) && call[eq] == ' ' {
eq++
}
call = call[eq:]
}
if len(call) == 0 {
return nil, 0, fmt.Errorf("call name is empty")
}
calls[string(call)] = struct{}{}
ncalls++
}
if len(calls) == 0 {
return nil, 0, fmt.Errorf("program does not contain any calls")
}
return calls, ncalls, nil
}
prog: error if program variable refers to non-resource
Error if program variable refers to non-resource in strict parsing mode.
Such errors are hard to diagnose otherwise since the variable is silently discarded.
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package prog
import (
"bytes"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
// String generates a very compact program description (mostly for debug output).
func (p *Prog) String() string {
buf := new(bytes.Buffer)
for i, c := range p.Calls {
if i != 0 {
fmt.Fprintf(buf, "-")
}
fmt.Fprintf(buf, "%v", c.Meta.Name)
}
return buf.String()
}
func (p *Prog) Serialize() []byte {
return p.serialize(false)
}
func (p *Prog) SerializeVerbose() []byte {
return p.serialize(true)
}
func (p *Prog) serialize(verbose bool) []byte {
p.debugValidate()
ctx := &serializer{
target: p.Target,
buf: new(bytes.Buffer),
vars: make(map[*ResultArg]int),
verbose: verbose,
}
for _, c := range p.Calls {
ctx.call(c)
}
return ctx.buf.Bytes()
}
type serializer struct {
target *Target
buf *bytes.Buffer
vars map[*ResultArg]int
varSeq int
verbose bool
}
func (ctx *serializer) printf(text string, args ...interface{}) {
fmt.Fprintf(ctx.buf, text, args...)
}
func (ctx *serializer) allocVarID(arg *ResultArg) int {
id := ctx.varSeq
ctx.varSeq++
ctx.vars[arg] = id
return id
}
func (ctx *serializer) call(c *Call) {
if c.Ret != nil && len(c.Ret.uses) != 0 {
ctx.printf("r%v = ", ctx.allocVarID(c.Ret))
}
ctx.printf("%v(", c.Meta.Name)
for i, a := range c.Args {
if IsPad(a.Type()) {
continue
}
if i != 0 {
ctx.printf(", ")
}
ctx.arg(a)
}
ctx.printf(")")
anyChangedProps := false
c.Props.ForeachProp(func(name, key string, value reflect.Value) {
// reflect.Value.IsZero is added in go1.13, not available in Appengine SDK.
if reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface()) {
return
}
if !anyChangedProps {
ctx.printf(" (")
anyChangedProps = true
} else {
ctx.printf(", ")
}
ctx.printf(key)
switch kind := value.Kind(); kind {
case reflect.Int:
ctx.printf(": %d", value.Int())
case reflect.Bool:
default:
panic("unable to serialize call prop of type " + kind.String())
}
})
if anyChangedProps {
ctx.printf(")")
}
ctx.printf("\n")
}
func (ctx *serializer) arg(arg Arg) {
if arg == nil {
ctx.printf("nil")
return
}
arg.serialize(ctx)
}
func (a *ConstArg) serialize(ctx *serializer) {
ctx.printf("0x%x", a.Val)
}
func (a *PointerArg) serialize(ctx *serializer) {
if a.IsSpecial() {
ctx.printf("0x%x", a.Address)
return
}
target := ctx.target
ctx.printf("&%v", target.serializeAddr(a))
if a.Res != nil && !ctx.verbose && isDefault(a.Res) && !target.isAnyPtr(a.Type()) {
return
}
ctx.printf("=")
if target.isAnyPtr(a.Type()) {
ctx.printf("ANY=")
}
ctx.arg(a.Res)
}
func (a *DataArg) serialize(ctx *serializer) {
typ := a.Type().(*BufferType)
if a.Dir() == DirOut {
ctx.printf("\"\"/%v", a.Size())
return
}
data := a.Data()
// Statically typed data will be padded with 0s during deserialization,
// so we can strip them here for readability always. For variable-size
// data we strip trailing 0s only if we strip enough of them.
sz := len(data)
for len(data) >= 2 && data[len(data)-1] == 0 && data[len(data)-2] == 0 {
data = data[:len(data)-1]
}
if typ.Varlen() && len(data)+8 >= sz {
data = data[:sz]
}
serializeData(ctx.buf, data, isReadableDataType(typ))
if typ.Varlen() && sz != len(data) {
ctx.printf("/%v", sz)
}
}
func (a *GroupArg) serialize(ctx *serializer) {
var delims []byte
switch a.Type().(type) {
case *StructType:
delims = []byte{'{', '}'}
case *ArrayType:
delims = []byte{'[', ']'}
default:
panic("unknown group type")
}
ctx.buf.WriteByte(delims[0])
lastNonDefault := len(a.Inner) - 1
if !ctx.verbose && a.fixedInnerSize() {
for ; lastNonDefault >= 0; lastNonDefault-- {
if !isDefault(a.Inner[lastNonDefault]) {
break
}
}
}
for i := 0; i <= lastNonDefault; i++ {
arg1 := a.Inner[i]
if arg1 != nil && IsPad(arg1.Type()) {
continue
}
if i != 0 {
ctx.printf(", ")
}
ctx.arg(arg1)
}
ctx.buf.WriteByte(delims[1])
}
func (a *UnionArg) serialize(ctx *serializer) {
typ := a.Type().(*UnionType)
ctx.printf("@%v", typ.Fields[a.Index].Name)
if !ctx.verbose && isDefault(a.Option) {
return
}
ctx.printf("=")
ctx.arg(a.Option)
}
func (a *ResultArg) serialize(ctx *serializer) {
if len(a.uses) != 0 {
ctx.printf("<r%v=>", ctx.allocVarID(a))
}
if a.Res == nil {
ctx.printf("0x%x", a.Val)
return
}
id, ok := ctx.vars[a.Res]
if !ok {
panic("no result")
}
ctx.printf("r%v", id)
if a.OpDiv != 0 {
ctx.printf("/%v", a.OpDiv)
}
if a.OpAdd != 0 {
ctx.printf("+%v", a.OpAdd)
}
}
type DeserializeMode int
const (
Strict DeserializeMode = iota
NonStrict DeserializeMode = iota
)
func (target *Target) Deserialize(data []byte, mode DeserializeMode) (*Prog, error) {
defer func() {
if err := recover(); err != nil {
panic(fmt.Errorf("%v\ntarget: %v/%v, rev: %v, mode=%v, prog:\n%q",
err, target.OS, target.Arch, GitRevision, mode, data))
}
}()
p := newParser(target, data, mode == Strict)
prog, err := p.parseProg()
if err := p.Err(); err != nil {
return nil, err
}
if err != nil {
return nil, err
}
// This validation is done even in non-debug mode because deserialization
// procedure does not catch all bugs (e.g. mismatched types).
// And we can receive bad programs from corpus and hub.
if err := prog.validate(); err != nil {
return nil, err
}
if p.autos != nil {
p.fixupAutos(prog)
}
if err := prog.sanitize(mode == NonStrict); err != nil {
return nil, err
}
return prog, nil
}
func (p *parser) parseProg() (*Prog, error) {
prog := &Prog{
Target: p.target,
}
for p.Scan() {
if p.EOF() {
if p.comment != "" {
prog.Comments = append(prog.Comments, p.comment)
p.comment = ""
}
continue
}
if p.Char() == '#' {
if p.comment != "" {
prog.Comments = append(prog.Comments, p.comment)
}
p.comment = strings.TrimSpace(p.s[p.i+1:])
continue
}
name := p.Ident()
r := ""
if p.Char() == '=' {
r = name
p.Parse('=')
name = p.Ident()
}
meta := p.target.SyscallMap[name]
if meta == nil {
return nil, fmt.Errorf("unknown syscall %v", name)
}
c := MakeCall(meta, nil)
c.Comment = p.comment
prog.Calls = append(prog.Calls, c)
p.Parse('(')
for i := 0; p.Char() != ')'; i++ {
if i >= len(meta.Args) {
p.eatExcessive(false, "excessive syscall arguments")
break
}
field := meta.Args[i]
if IsPad(field.Type) {
return nil, fmt.Errorf("padding in syscall %v arguments", name)
}
arg, err := p.parseArg(field.Type, DirIn)
if err != nil {
return nil, err
}
c.Args = append(c.Args, arg)
if p.Char() != ')' {
p.Parse(',')
}
}
p.Parse(')')
if !p.EOF() && p.Char() == '(' {
p.Parse('(')
c.Props = p.parseCallProps()
p.Parse(')')
}
if !p.EOF() {
if p.Char() != '#' {
return nil, fmt.Errorf("tailing data (line #%v)", p.l)
}
if c.Comment != "" {
prog.Comments = append(prog.Comments, c.Comment)
}
c.Comment = strings.TrimSpace(p.s[p.i+1:])
}
for i := len(c.Args); i < len(meta.Args); i++ {
p.strictFailf("missing syscall args")
c.Args = append(c.Args, meta.Args[i].DefaultArg(DirIn))
}
if len(c.Args) != len(meta.Args) {
return nil, fmt.Errorf("wrong call arg count: %v, want %v", len(c.Args), len(meta.Args))
}
if r != "" && c.Ret != nil {
p.vars[r] = c.Ret
}
p.comment = ""
}
if p.comment != "" {
prog.Comments = append(prog.Comments, p.comment)
}
return prog, nil
}
func (p *parser) parseCallProps() CallProps {
nameToValue := map[string]reflect.Value{}
callProps := CallProps{}
callProps.ForeachProp(func(_, key string, value reflect.Value) {
nameToValue[key] = value
})
for p.e == nil && p.Char() != ')' {
propName := p.Ident()
value, ok := nameToValue[propName]
if !ok {
p.eatExcessive(true, "unknown call property: %s", propName)
if p.Char() == ',' {
p.Parse(',')
}
continue
}
switch kind := value.Kind(); kind {
case reflect.Int:
p.Parse(':')
strVal := p.Ident()
intV, err := strconv.ParseInt(strVal, 0, 64)
if err != nil {
p.strictFailf("invalid int value: %s", strVal)
} else {
value.SetInt(intV)
}
case reflect.Bool:
value.SetBool(true)
default:
panic("unable to handle call props of type " + kind.String())
}
if p.Char() != ')' {
p.Parse(',')
}
}
return callProps
}
func (p *parser) parseArg(typ Type, dir Dir) (Arg, error) {
r := ""
if p.Char() == '<' {
p.Parse('<')
r = p.Ident()
p.Parse('=')
p.Parse('>')
}
arg, err := p.parseArgImpl(typ, dir)
if err != nil {
return nil, err
}
if arg == nil {
if typ != nil {
arg = typ.DefaultArg(dir)
} else if r != "" {
return nil, fmt.Errorf("named nil argument")
}
}
if r != "" {
if res, ok := arg.(*ResultArg); ok {
p.vars[r] = res
} else {
p.strictFailf("variable %v doesn't refers to a resource", r)
}
}
return arg, nil
}
func (p *parser) parseArgImpl(typ Type, dir Dir) (Arg, error) {
if typ == nil && p.Char() != 'n' {
p.eatExcessive(true, "non-nil argument for nil type")
return nil, nil
}
switch p.Char() {
case '0':
return p.parseArgInt(typ, dir)
case 'r':
return p.parseArgRes(typ, dir)
case '&':
return p.parseArgAddr(typ, dir)
case '"', '\'':
return p.parseArgString(typ, dir)
case '{':
return p.parseArgStruct(typ, dir)
case '[':
return p.parseArgArray(typ, dir)
case '@':
return p.parseArgUnion(typ, dir)
case 'n':
p.Parse('n')
p.Parse('i')
p.Parse('l')
return nil, nil
case 'A':
p.Parse('A')
p.Parse('U')
p.Parse('T')
p.Parse('O')
return p.parseAuto(typ, dir)
default:
return nil, fmt.Errorf("failed to parse argument at '%c' (line #%v/%v: %v)",
p.Char(), p.l, p.i, p.s)
}
}
func (p *parser) parseArgInt(typ Type, dir Dir) (Arg, error) {
val := p.Ident()
v, err := strconv.ParseUint(val, 0, 64)
if err != nil {
return nil, fmt.Errorf("wrong arg value '%v': %v", val, err)
}
switch typ.(type) {
case *ConstType, *IntType, *FlagsType, *ProcType, *CsumType:
arg := Arg(MakeConstArg(typ, dir, v))
if dir == DirOut && !typ.isDefaultArg(arg) {
p.strictFailf("out arg %v has non-default value: %v", typ, v)
arg = typ.DefaultArg(dir)
}
return arg, nil
case *LenType:
return MakeConstArg(typ, dir, v), nil
case *ResourceType:
return MakeResultArg(typ, dir, nil, v), nil
case *PtrType, *VmaType:
index := -v % uint64(len(p.target.SpecialPointers))
return MakeSpecialPointerArg(typ, dir, index), nil
default:
p.eatExcessive(true, "wrong int arg %T", typ)
return typ.DefaultArg(dir), nil
}
}
func (p *parser) parseAuto(typ Type, dir Dir) (Arg, error) {
switch typ.(type) {
case *ConstType, *LenType, *CsumType:
return p.auto(MakeConstArg(typ, dir, 0)), nil
default:
return nil, fmt.Errorf("wrong type %T for AUTO", typ)
}
}
func (p *parser) parseArgRes(typ Type, dir Dir) (Arg, error) {
id := p.Ident()
var div, add uint64
if p.Char() == '/' {
p.Parse('/')
op := p.Ident()
v, err := strconv.ParseUint(op, 0, 64)
if err != nil {
return nil, fmt.Errorf("wrong result div op: '%v'", op)
}
div = v
}
if p.Char() == '+' {
p.Parse('+')
op := p.Ident()
v, err := strconv.ParseUint(op, 0, 64)
if err != nil {
return nil, fmt.Errorf("wrong result add op: '%v'", op)
}
add = v
}
v := p.vars[id]
if v == nil {
p.strictFailf("undeclared variable %v", id)
return typ.DefaultArg(dir), nil
}
arg := MakeResultArg(typ, dir, v, 0)
arg.OpDiv = div
arg.OpAdd = add
return arg, nil
}
func (p *parser) parseArgAddr(typ Type, dir Dir) (Arg, error) {
var elem Type
elemDir := DirInOut
switch t1 := typ.(type) {
case *PtrType:
elem, elemDir = t1.Elem, t1.ElemDir
case *VmaType:
default:
p.eatExcessive(true, "wrong addr arg %T", typ)
return typ.DefaultArg(dir), nil
}
p.Parse('&')
auto := false
var addr, vmaSize uint64
if p.Char() == 'A' {
p.Parse('A')
p.Parse('U')
p.Parse('T')
p.Parse('O')
if elem == nil {
return nil, fmt.Errorf("vma type can't be AUTO")
}
auto = true
} else {
var err error
addr, vmaSize, err = p.parseAddr()
if err != nil {
return nil, err
}
}
var inner Arg
if p.Char() == '=' {
p.Parse('=')
if p.Char() == 'A' {
p.Parse('A')
p.Parse('N')
p.Parse('Y')
p.Parse('=')
anyPtr := p.target.getAnyPtrType(typ.Size())
typ, elem, elemDir = anyPtr, anyPtr.Elem, anyPtr.ElemDir
}
var err error
inner, err = p.parseArg(elem, elemDir)
if err != nil {
return nil, err
}
}
if elem == nil {
if addr%p.target.PageSize != 0 {
p.strictFailf("unaligned vma address 0x%x", addr)
addr &= ^(p.target.PageSize - 1)
}
return MakeVmaPointerArg(typ, dir, addr, vmaSize), nil
}
if inner == nil {
inner = elem.DefaultArg(elemDir)
}
arg := MakePointerArg(typ, dir, addr, inner)
if auto {
p.auto(arg)
}
return arg, nil
}
func (p *parser) parseArgString(t Type, dir Dir) (Arg, error) {
typ, ok := t.(*BufferType)
if !ok {
p.eatExcessive(true, "wrong string arg")
return t.DefaultArg(dir), nil
}
data, err := p.deserializeData()
if err != nil {
return nil, err
}
size := ^uint64(0)
if p.Char() == '/' {
p.Parse('/')
sizeStr := p.Ident()
size, err = strconv.ParseUint(sizeStr, 0, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse buffer size: %q", sizeStr)
}
maxMem := p.target.NumPages * p.target.PageSize
if size > maxMem {
p.strictFailf("too large string argument %v", size)
size = maxMem
}
}
if !typ.Varlen() {
size = typ.Size()
} else if size == ^uint64(0) {
size = uint64(len(data))
}
if dir == DirOut {
return MakeOutDataArg(typ, dir, size), nil
}
if diff := int(size) - len(data); diff > 0 {
data = append(data, make([]byte, diff)...)
}
data = data[:size]
if (typ.Kind == BufferString || typ.Kind == BufferGlob) &&
len(typ.Values) != 0 &&
// AUTOGENERATED will be padded by 0's.
!strings.HasPrefix(typ.Values[0], "AUTOGENERATED") {
matched := false
for _, val := range typ.Values {
if string(data) == val {
matched = true
break
}
}
if !matched {
p.strictFailf("bad string value %q, expect %q", data, typ.Values)
data = []byte(typ.Values[0])
}
}
return MakeDataArg(typ, dir, data), nil
}
func (p *parser) parseArgStruct(typ Type, dir Dir) (Arg, error) {
p.Parse('{')
t1, ok := typ.(*StructType)
if !ok {
p.eatExcessive(false, "wrong struct arg")
p.Parse('}')
return typ.DefaultArg(dir), nil
}
var inner []Arg
for i := 0; p.Char() != '}'; i++ {
if i >= len(t1.Fields) {
p.eatExcessive(false, "excessive struct %v fields", typ.Name())
break
}
field := t1.Fields[i]
if IsPad(field.Type) {
inner = append(inner, MakeConstArg(field.Type, field.Dir(dir), 0))
} else {
arg, err := p.parseArg(field.Type, field.Dir(dir))
if err != nil {
return nil, err
}
inner = append(inner, arg)
if p.Char() != '}' {
p.Parse(',')
}
}
}
p.Parse('}')
for len(inner) < len(t1.Fields) {
field := t1.Fields[len(inner)]
if !IsPad(field.Type) {
p.strictFailf("missing struct %v fields %v/%v", typ.Name(), len(inner), len(t1.Fields))
}
inner = append(inner, field.Type.DefaultArg(field.Dir(dir)))
}
return MakeGroupArg(typ, dir, inner), nil
}
func (p *parser) parseArgArray(typ Type, dir Dir) (Arg, error) {
p.Parse('[')
t1, ok := typ.(*ArrayType)
if !ok {
p.eatExcessive(false, "wrong array arg %T", typ)
p.Parse(']')
return typ.DefaultArg(dir), nil
}
var inner []Arg
for i := 0; p.Char() != ']'; i++ {
arg, err := p.parseArg(t1.Elem, dir)
if err != nil {
return nil, err
}
inner = append(inner, arg)
if p.Char() != ']' {
p.Parse(',')
}
}
p.Parse(']')
if t1.Kind == ArrayRangeLen && t1.RangeBegin == t1.RangeEnd {
for uint64(len(inner)) < t1.RangeBegin {
p.strictFailf("missing array elements")
inner = append(inner, t1.Elem.DefaultArg(dir))
}
inner = inner[:t1.RangeBegin]
}
return MakeGroupArg(typ, dir, inner), nil
}
func (p *parser) parseArgUnion(typ Type, dir Dir) (Arg, error) {
t1, ok := typ.(*UnionType)
if !ok {
p.eatExcessive(true, "wrong union arg")
return typ.DefaultArg(dir), nil
}
p.Parse('@')
name := p.Ident()
var (
optType Type
optDir Dir
)
index := -1
for i, field := range t1.Fields {
if name == field.Name {
optType, index, optDir = field.Type, i, field.Dir(dir)
break
}
}
if optType == nil {
p.eatExcessive(true, "wrong union option")
return typ.DefaultArg(dir), nil
}
var opt Arg
if p.Char() == '=' {
p.Parse('=')
var err error
opt, err = p.parseArg(optType, optDir)
if err != nil {
return nil, err
}
} else {
opt = optType.DefaultArg(optDir)
}
return MakeUnionArg(typ, dir, opt, index), nil
}
// Eats excessive call arguments and struct fields to recover after description changes.
func (p *parser) eatExcessive(stopAtComma bool, what string, args ...interface{}) {
p.strictFailf(what, args...)
paren, brack, brace := 0, 0, 0
for !p.EOF() && p.e == nil {
ch := p.Char()
switch ch {
case '(':
paren++
case ')':
if paren == 0 {
return
}
paren--
case '[':
brack++
case ']':
if brack == 0 {
return
}
brack--
case '{':
brace++
case '}':
if brace == 0 {
return
}
brace--
case ',':
if stopAtComma && paren == 0 && brack == 0 && brace == 0 {
return
}
case '\'', '"':
p.Parse(ch)
for !p.EOF() && p.Char() != ch {
p.Parse(p.Char())
}
if p.EOF() {
return
}
}
p.Parse(ch)
}
}
const (
encodingAddrBase = 0x7f0000000000
)
func (target *Target) serializeAddr(arg *PointerArg) string {
ssize := ""
if arg.VmaSize != 0 {
ssize = fmt.Sprintf("/0x%x", arg.VmaSize)
}
return fmt.Sprintf("(0x%x%v)", encodingAddrBase+arg.Address, ssize)
}
func (p *parser) parseAddr() (uint64, uint64, error) {
p.Parse('(')
pstr := p.Ident()
addr, err := strconv.ParseUint(pstr, 0, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse addr: %q", pstr)
}
if addr < encodingAddrBase {
return 0, 0, fmt.Errorf("address without base offset: %q", pstr)
}
addr -= encodingAddrBase
// This is not used anymore, but left here to parse old programs.
if p.Char() == '+' || p.Char() == '-' {
minus := false
if p.Char() == '-' {
minus = true
p.Parse('-')
} else {
p.Parse('+')
}
ostr := p.Ident()
off, err := strconv.ParseUint(ostr, 0, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse addr offset: %q", ostr)
}
if minus {
off = -off
}
addr += off
}
target := p.target
maxMem := target.NumPages * target.PageSize
var vmaSize uint64
if p.Char() == '/' {
p.Parse('/')
pstr := p.Ident()
size, err := strconv.ParseUint(pstr, 0, 64)
if err != nil {
return 0, 0, fmt.Errorf("failed to parse addr size: %q", pstr)
}
addr = addr & ^(target.PageSize - 1)
vmaSize = (size + target.PageSize - 1) & ^(target.PageSize - 1)
if vmaSize == 0 {
vmaSize = target.PageSize
}
if vmaSize > maxMem {
vmaSize = maxMem
}
if addr > maxMem-vmaSize {
addr = maxMem - vmaSize
}
}
p.Parse(')')
return addr, vmaSize, nil
}
func serializeData(buf *bytes.Buffer, data []byte, readable bool) {
if !readable && !isReadableData(data) {
fmt.Fprintf(buf, "\"%v\"", hex.EncodeToString(data))
return
}
buf.WriteByte('\'')
encodeData(buf, data, true, false)
buf.WriteByte('\'')
}
func EncodeData(buf *bytes.Buffer, data []byte, readable bool) {
if !readable && isReadableData(data) {
readable = true
}
encodeData(buf, data, readable, true)
}
func encodeData(buf *bytes.Buffer, data []byte, readable, cstr bool) {
for _, v := range data {
if !readable {
lo, hi := byteToHex(v)
buf.Write([]byte{'\\', 'x', hi, lo})
continue
}
switch v {
case '\a':
buf.Write([]byte{'\\', 'a'})
case '\b':
buf.Write([]byte{'\\', 'b'})
case '\f':
buf.Write([]byte{'\\', 'f'})
case '\n':
buf.Write([]byte{'\\', 'n'})
case '\r':
buf.Write([]byte{'\\', 'r'})
case '\t':
buf.Write([]byte{'\\', 't'})
case '\v':
buf.Write([]byte{'\\', 'v'})
case '\'':
buf.Write([]byte{'\\', '\''})
case '"':
buf.Write([]byte{'\\', '"'})
case '\\':
buf.Write([]byte{'\\', '\\'})
default:
if isPrintable(v) {
buf.WriteByte(v)
} else {
if cstr {
// We would like to use hex encoding with \x,
// but C's \x is hard to use: it can contain _any_ number of hex digits
// (not just 2 or 4), so later non-hex encoded chars will glue to \x.
c0 := (v>>6)&0x7 + '0'
c1 := (v>>3)&0x7 + '0'
c2 := (v>>0)&0x7 + '0'
buf.Write([]byte{'\\', c0, c1, c2})
} else {
lo, hi := byteToHex(v)
buf.Write([]byte{'\\', 'x', hi, lo})
}
}
}
}
}
func isReadableDataType(typ *BufferType) bool {
return typ.Kind == BufferString || typ.Kind == BufferFilename || typ.Kind == BufferGlob
}
func isReadableData(data []byte) bool {
if len(data) == 0 {
return false
}
for _, v := range data {
if isPrintable(v) {
continue
}
switch v {
case 0, '\a', '\b', '\f', '\n', '\r', '\t', '\v':
continue
}
return false
}
return true
}
func (p *parser) deserializeData() ([]byte, error) {
var data []byte
if p.Char() == '"' {
p.Parse('"')
val := ""
if p.Char() != '"' {
val = p.Ident()
}
p.Parse('"')
var err error
data, err = hex.DecodeString(val)
if err != nil {
return nil, fmt.Errorf("data arg has bad value %q", val)
}
} else {
if p.consume() != '\'' {
return nil, fmt.Errorf("data arg does not start with \" nor with '")
}
for p.Char() != '\'' && p.Char() != 0 {
v := p.consume()
if v != '\\' {
data = append(data, v)
continue
}
v = p.consume()
switch v {
case 'x':
hi := p.consume()
lo := p.consume()
b, ok := hexToByte(lo, hi)
if !ok {
return nil, fmt.Errorf("invalid hex \\x%v%v in data arg", hi, lo)
}
data = append(data, b)
case 'a':
data = append(data, '\a')
case 'b':
data = append(data, '\b')
case 'f':
data = append(data, '\f')
case 'n':
data = append(data, '\n')
case 'r':
data = append(data, '\r')
case 't':
data = append(data, '\t')
case 'v':
data = append(data, '\v')
case '\'':
data = append(data, '\'')
case '"':
data = append(data, '"')
case '\\':
data = append(data, '\\')
default:
return nil, fmt.Errorf("invalid \\%c escape sequence in data arg", v)
}
}
p.Parse('\'')
}
return data, nil
}
func isPrintable(v byte) bool {
return v >= 0x20 && v < 0x7f
}
func byteToHex(v byte) (lo, hi byte) {
return toHexChar(v & 0xf), toHexChar(v >> 4)
}
func hexToByte(lo, hi byte) (byte, bool) {
h, ok1 := fromHexChar(hi)
l, ok2 := fromHexChar(lo)
return h<<4 + l, ok1 && ok2
}
func toHexChar(v byte) byte {
if v >= 16 {
panic("bad hex char")
}
if v < 10 {
return '0' + v
}
return 'a' + v - 10
}
func fromHexChar(v byte) (byte, bool) {
if v >= '0' && v <= '9' {
return v - '0', true
}
if v >= 'a' && v <= 'f' {
return v - 'a' + 10, true
}
return 0, false
}
type parser struct {
target *Target
strict bool
vars map[string]*ResultArg
autos map[Arg]bool
comment string
data []byte
s string
i int
l int
e error
}
func newParser(target *Target, data []byte, strict bool) *parser {
p := &parser{
target: target,
strict: strict,
vars: make(map[string]*ResultArg),
data: data,
}
return p
}
func (p *parser) auto(arg Arg) Arg {
if p.autos == nil {
p.autos = make(map[Arg]bool)
}
p.autos[arg] = true
return arg
}
func (p *parser) fixupAutos(prog *Prog) {
s := analyze(nil, nil, prog, nil)
for _, c := range prog.Calls {
p.target.assignSizesArray(c.Args, c.Meta.Args, p.autos)
ForeachArg(c, func(arg Arg, _ *ArgCtx) {
if !p.autos[arg] {
return
}
delete(p.autos, arg)
switch typ := arg.Type().(type) {
case *ConstType:
arg.(*ConstArg).Val = typ.Val
_ = s
case *PtrType:
a := arg.(*PointerArg)
a.Address = s.ma.alloc(nil, a.Res.Size(), a.Res.Type().Alignment())
default:
panic(fmt.Sprintf("unsupported auto type %T", typ))
}
})
}
if len(p.autos) != 0 {
panic(fmt.Sprintf("leftoever autos: %+v", p.autos))
}
}
func (p *parser) Scan() bool {
if p.e != nil || len(p.data) == 0 {
return false
}
nextLine := bytes.IndexByte(p.data, '\n')
if nextLine != -1 {
p.s = string(p.data[:nextLine])
p.data = p.data[nextLine+1:]
} else {
p.s = string(p.data)
p.data = nil
}
p.i = 0
p.l++
return true
}
func (p *parser) Err() error {
return p.e
}
func (p *parser) Str() string {
return p.s
}
func (p *parser) EOF() bool {
return p.i == len(p.s)
}
func (p *parser) Char() byte {
if p.e != nil {
return 0
}
if p.EOF() {
p.failf("unexpected eof")
return 0
}
return p.s[p.i]
}
func (p *parser) Parse(ch byte) {
if p.e != nil {
return
}
if p.EOF() {
p.failf("want %s, got EOF", string(ch))
return
}
if p.s[p.i] != ch {
p.failf("want '%v', got '%v'", string(ch), string(p.s[p.i]))
return
}
p.i++
p.SkipWs()
}
func (p *parser) consume() byte {
if p.e != nil {
return 0
}
if p.EOF() {
p.failf("unexpected eof")
return 0
}
v := p.s[p.i]
p.i++
return v
}
func (p *parser) SkipWs() {
for p.i < len(p.s) && (p.s[p.i] == ' ' || p.s[p.i] == '\t') {
p.i++
}
}
func (p *parser) Ident() string {
i := p.i
for p.i < len(p.s) &&
(p.s[p.i] >= 'a' && p.s[p.i] <= 'z' ||
p.s[p.i] >= 'A' && p.s[p.i] <= 'Z' ||
p.s[p.i] >= '0' && p.s[p.i] <= '9' ||
p.s[p.i] == '_' || p.s[p.i] == '$') {
p.i++
}
if i == p.i {
p.failf("failed to parse identifier at pos %v", i)
return ""
}
s := p.s[i:p.i]
p.SkipWs()
return s
}
func (p *parser) failf(msg string, args ...interface{}) {
if p.e == nil {
p.e = fmt.Errorf("%v\nline #%v:%v: %v", fmt.Sprintf(msg, args...), p.l, p.i, p.s)
}
}
func (p *parser) strictFailf(msg string, args ...interface{}) {
if p.strict {
p.failf(msg, args...)
}
}
// CallSet returns a set of all calls in the program.
// It does very conservative parsing and is intended to parse past/future serialization formats.
func CallSet(data []byte) (map[string]struct{}, int, error) {
calls := make(map[string]struct{})
ncalls := 0
for len(data) > 0 {
ln := data
nextLine := bytes.IndexByte(data, '\n')
if nextLine != -1 {
ln = data[:nextLine]
data = data[nextLine+1:]
} else {
data = nil
}
if len(ln) == 0 || ln[0] == '#' {
continue
}
bracket := bytes.IndexByte(ln, '(')
if bracket == -1 {
return nil, 0, fmt.Errorf("line does not contain opening bracket")
}
call := ln[:bracket]
if eq := bytes.IndexByte(call, '='); eq != -1 {
eq++
for eq < len(call) && call[eq] == ' ' {
eq++
}
call = call[eq:]
}
if len(call) == 0 {
return nil, 0, fmt.Errorf("call name is empty")
}
calls[string(call)] = struct{}{}
ncalls++
}
if len(calls) == 0 {
return nil, 0, fmt.Errorf("program does not contain any calls")
}
return calls, ncalls, nil
}
|
package backoffs
import (
"time"
)
type ExponentialBackoff struct {
Interval int
Multiplier int
}
// Exponential backoff multiplies initial interval by failed request count and specified multiplier
func NewExponentialBackoff(interval, multiplier int) *ExponentialBackoff {
b := new(ExponentialBackoff)
b.Interval = interval
if multiplier < 0 {
multiplier = 1
}
b.Multiplier = multiplier
return b
}
// Returns initial interval
func (b *ExponentialBackoff) Init() time.Duration {
return time.Second * time.Duration(b.Interval)
}
// Returns next interval based on failed requests count
func (b *ExponentialBackoff) Next(failCount int) time.Duration {
return time.Second * time.Duration(failCount*b.Interval*b.Multiplier)
}
default multiplier fix
package backoffs
import (
"time"
)
type ExponentialBackoff struct {
Interval int
Multiplier int
}
// Exponential backoff multiplies initial interval by failed request count and specified multiplier
func NewExponentialBackoff(interval, multiplier int) *ExponentialBackoff {
b := new(ExponentialBackoff)
b.Interval = interval
if multiplier <= 0 {
multiplier = 1
}
b.Multiplier = multiplier
return b
}
// Returns initial interval
func (b *ExponentialBackoff) Init() time.Duration {
return time.Second * time.Duration(b.Interval)
}
// Returns next interval based on failed requests count
func (b *ExponentialBackoff) Next(failCount int) time.Duration {
return time.Second * time.Duration(failCount*b.Interval*b.Multiplier)
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutil
import (
"fmt"
"math"
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
dto "github.com/prometheus/client_model/go"
"k8s.io/component-base/metrics"
"k8s.io/utils/pointer"
)
func samples2Histogram(samples []float64, upperBounds []float64) Histogram {
histogram := dto.Histogram{
SampleCount: uint64Ptr(0),
SampleSum: pointer.Float64Ptr(0.0),
}
for _, ub := range upperBounds {
histogram.Bucket = append(histogram.Bucket, &dto.Bucket{
CumulativeCount: uint64Ptr(0),
UpperBound: pointer.Float64Ptr(ub),
})
}
for _, sample := range samples {
for i, bucket := range histogram.Bucket {
if sample < *bucket.UpperBound {
*histogram.Bucket[i].CumulativeCount++
}
}
*histogram.SampleCount++
*histogram.SampleSum += sample
}
return Histogram{
&histogram,
}
}
func TestHistogramQuantile(t *testing.T) {
tests := []struct {
name string
samples []float64
bounds []float64
q50 float64
q90 float64
q99 float64
}{
{
name: "Repeating numbers",
samples: []float64{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
bounds: []float64{1, 2, 4, 8},
q50: 2,
q90: 6.4,
q99: 7.84,
},
{
name: "Random numbers",
samples: []float64{11, 67, 61, 21, 40, 36, 52, 63, 8, 3, 67, 35, 61, 1, 36, 58},
bounds: []float64{10, 20, 40, 80},
q50: 40,
q90: 72,
q99: 79.2,
},
{
name: "The last bucket is empty",
samples: []float64{6, 34, 30, 10, 20, 18, 26, 31, 4, 2, 33, 17, 30, 1, 18, 29},
bounds: []float64{10, 20, 40, 80},
q50: 20,
q90: 36,
q99: 39.6,
},
{
name: "The last bucket has positive infinity upper bound",
samples: []float64{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 500},
bounds: []float64{10, 20, 40, math.Inf(1)},
q50: 5.3125,
q90: 9.5625,
q99: 40,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
h := samples2Histogram(test.samples, test.bounds)
q50 := h.Quantile(0.5)
q90 := h.Quantile(0.9)
q99 := h.Quantile(0.99)
q999999 := h.Quantile(0.999999)
if q50 != test.q50 {
t.Errorf("Expected q50 to be %v, got %v instead", test.q50, q50)
}
if q90 != test.q90 {
t.Errorf("Expected q90 to be %v, got %v instead", test.q90, q90)
}
if q99 != test.q99 {
t.Errorf("Expected q99 to be %v, got %v instead", test.q99, q99)
}
lastUpperBound := test.bounds[len(test.bounds)-1]
if !(q999999 < lastUpperBound) {
t.Errorf("Expected q999999 to be less than %v, got %v instead", lastUpperBound, q999999)
}
})
}
}
func TestHistogramValidate(t *testing.T) {
tests := []struct {
name string
h Histogram
err error
}{
{
name: "nil SampleCount",
h: Histogram{
&dto.Histogram{},
},
err: fmt.Errorf("nil or empty histogram SampleCount"),
},
{
name: "empty SampleCount",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(0),
},
},
err: fmt.Errorf("nil or empty histogram SampleCount"),
},
{
name: "nil SampleSum",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
},
},
err: fmt.Errorf("nil or empty histogram SampleSum"),
},
{
name: "empty SampleSum",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(0.0),
},
},
err: fmt.Errorf("nil or empty histogram SampleSum"),
},
{
name: "nil bucket",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(1.0),
Bucket: []*dto.Bucket{
nil,
},
},
},
err: fmt.Errorf("empty histogram bucket"),
},
{
name: "nil bucket UpperBound",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(1.0),
Bucket: []*dto.Bucket{
{},
},
},
},
err: fmt.Errorf("nil or negative histogram bucket UpperBound"),
},
{
name: "negative bucket UpperBound",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(1.0),
Bucket: []*dto.Bucket{
{UpperBound: pointer.Float64Ptr(-1.0)},
},
},
},
err: fmt.Errorf("nil or negative histogram bucket UpperBound"),
},
{
name: "valid histogram",
h: samples2Histogram(
[]float64{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
[]float64{1, 2, 4, 8},
),
},
}
for _, test := range tests {
err := test.h.Validate()
if test.err != nil {
if err == nil || err.Error() != test.err.Error() {
t.Errorf("Expected %q error, got %q instead", test.err, err)
}
} else {
if err != nil {
t.Errorf("Expected error to be nil, got %q instead", err)
}
}
}
}
func TestLabelsMatch(t *testing.T) {
cases := []struct {
name string
metric *dto.Metric
labelFilter map[string]string
expectedMatch bool
}{
{name: "metric labels and labelFilter have the same labels and values", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
{Name: pointer.StringPtr("c"), Value: pointer.StringPtr("3")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: true},
{name: "metric labels contain all labelFilter labels, and labelFilter is a subset of metric labels", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
{Name: pointer.StringPtr("c"), Value: pointer.StringPtr("3")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
}, expectedMatch: true},
{name: "metric labels don't have all labelFilter labels and value", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "The intersection of metric labels and labelFilter labels is empty", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("aa"), Value: pointer.StringPtr("11")},
{Name: pointer.StringPtr("bb"), Value: pointer.StringPtr("22")},
{Name: pointer.StringPtr("cc"), Value: pointer.StringPtr("33")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "metric labels have the same labels names but different values with labelFilter labels and value", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
{Name: pointer.StringPtr("c"), Value: pointer.StringPtr("3")},
}}, labelFilter: map[string]string{
"a": "11",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "metric labels contain label name but different values with labelFilter labels and value", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
{Name: pointer.StringPtr("c"), Value: pointer.StringPtr("33")},
{Name: pointer.StringPtr("d"), Value: pointer.StringPtr("4")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "metric labels is empty and labelFilter is not empty", metric: &dto.Metric{
Label: []*dto.LabelPair{}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "metric labels is not empty and labelFilter is empty", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
}}, labelFilter: map[string]string{}, expectedMatch: true},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
got := LabelsMatch(tt.metric, tt.labelFilter)
if got != tt.expectedMatch {
t.Errorf("Expected %v, got %v instead", tt.expectedMatch, got)
}
})
}
}
func TestHistogramVec_GetAggregatedSampleCount(t *testing.T) {
tests := []struct {
name string
vec HistogramVec
want uint64
}{
{
name: "nil case",
want: 0,
},
{
name: "zero case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(0), SampleSum: pointer.Float64Ptr(0.0)}},
},
want: 0,
},
{
name: "standard case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(2.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(4.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(4), SampleSum: pointer.Float64Ptr(8.0)}},
},
want: 7,
},
{
name: "mixed case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(2.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(0), SampleSum: pointer.Float64Ptr(0.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(4.0)}},
},
want: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.vec.GetAggregatedSampleCount(); got != tt.want {
t.Errorf("GetAggregatedSampleCount() = %v, want %v", got, tt.want)
}
})
}
}
func TestHistogramVec_GetAggregatedSampleSum(t *testing.T) {
tests := []struct {
name string
vec HistogramVec
want float64
}{
{
name: "nil case",
want: 0.0,
},
{
name: "zero case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(0), SampleSum: pointer.Float64Ptr(0.0)}},
},
want: 0.0,
},
{
name: "standard case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(2.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(4.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(4), SampleSum: pointer.Float64Ptr(8.0)}},
},
want: 14.0,
},
{
name: "mixed case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(2.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(0), SampleSum: pointer.Float64Ptr(0.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(4.0)}},
},
want: 6.0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.vec.GetAggregatedSampleSum(); got != tt.want {
t.Errorf("GetAggregatedSampleSum() = %v, want %v", got, tt.want)
}
})
}
}
func TestHistogramVec_Quantile(t *testing.T) {
tests := []struct {
name string
samples [][]float64
bounds []float64
quantile float64
want []float64
}{
{
name: "duplicated histograms",
samples: [][]float64{
{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
},
bounds: []float64{1, 2, 4, 8},
want: []float64{2, 6.4, 7.2, 7.84},
},
{
name: "random numbers",
samples: [][]float64{
{8, 35, 47, 61, 56, 69, 66, 74, 35, 69, 5, 38, 58, 40, 36, 12},
{79, 44, 57, 46, 11, 8, 53, 77, 13, 35, 38, 47, 73, 16, 26, 29},
{51, 76, 22, 55, 20, 63, 59, 66, 34, 58, 64, 16, 79, 7, 58, 28},
},
bounds: []float64{10, 20, 40, 80},
want: []float64{44.44, 72.89, 76.44, 79.29},
},
{
name: "single histogram",
samples: [][]float64{
{6, 34, 30, 10, 20, 18, 26, 31, 4, 2, 33, 17, 30, 1, 18, 29},
},
bounds: []float64{10, 20, 40, 80},
want: []float64{20, 36, 38, 39.6},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var vec HistogramVec
for _, sample := range tt.samples {
histogram := samples2Histogram(sample, tt.bounds)
vec = append(vec, &histogram)
}
var got []float64
for _, q := range []float64{0.5, 0.9, 0.95, 0.99} {
got = append(got, math.Round(vec.Quantile(q)*100)/100)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Quantile() = %v, want %v", got, tt.want)
}
})
}
}
func TestHistogramVec_Validate(t *testing.T) {
tests := []struct {
name string
vec HistogramVec
want error
}{
{
name: "nil SampleCount",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(1.0)}},
&Histogram{&dto.Histogram{SampleSum: pointer.Float64Ptr(2.0)}},
},
want: fmt.Errorf("nil or empty histogram SampleCount"),
},
{
name: "valid HistogramVec",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(1.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(2.0)}},
},
},
{
name: "different bucket size",
vec: HistogramVec{
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(4),
SampleSum: pointer.Float64Ptr(10.0),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(1)},
{CumulativeCount: uint64Ptr(2), UpperBound: pointer.Float64Ptr(2)},
{CumulativeCount: uint64Ptr(5), UpperBound: pointer.Float64Ptr(4)},
},
}},
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(3),
SampleSum: pointer.Float64Ptr(8.0),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(2)},
{CumulativeCount: uint64Ptr(3), UpperBound: pointer.Float64Ptr(4)},
},
}},
},
want: fmt.Errorf("found different bucket size: expect 3, but got 2 at index 1"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.vec.Validate(); fmt.Sprintf("%v", got) != fmt.Sprintf("%v", tt.want) {
t.Errorf("Validate() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetHistogramVecFromGatherer(t *testing.T) {
tests := []struct {
name string
lvMap map[string]string
wantVec HistogramVec
}{
{
name: "filter with one label",
lvMap: map[string]string{"label1": "value1-0"},
wantVec: HistogramVec{
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(1.5),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(0.5)},
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(2.0)},
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(5.0)},
},
}},
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(2.5),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(0.5)},
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(2.0)},
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(5.0)},
},
}},
},
},
{
name: "filter with two labels",
lvMap: map[string]string{"label1": "value1-0", "label2": "value2-1"},
wantVec: HistogramVec{
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(2.5),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(0.5)},
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(2.0)},
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(5.0)},
},
}},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buckets := []float64{.5, 2, 5}
// HistogramVec has two labels defined.
labels := []string{"label1", "label2"}
HistogramOpts := &metrics.HistogramOpts{
Namespace: "namespace",
Name: "metric_test_name",
Subsystem: "subsystem",
Help: "histogram help message",
Buckets: buckets,
}
vec := metrics.NewHistogramVec(HistogramOpts, labels)
// Use local registry
var registry = metrics.NewKubeRegistry()
var gather metrics.Gatherer = registry
registry.MustRegister(vec)
// Observe two metrics with same value for label1 but different value of label2.
vec.WithLabelValues("value1-0", "value2-0").Observe(1.5)
vec.WithLabelValues("value1-0", "value2-1").Observe(2.5)
vec.WithLabelValues("value1-1", "value2-0").Observe(3.5)
vec.WithLabelValues("value1-1", "value2-1").Observe(4.5)
metricName := fmt.Sprintf("%s_%s_%s", HistogramOpts.Namespace, HistogramOpts.Subsystem, HistogramOpts.Name)
histogramVec, _ := GetHistogramVecFromGatherer(gather, metricName, tt.lvMap)
if diff := cmp.Diff(tt.wantVec, histogramVec); diff != "" {
t.Errorf("Got unexpected HistogramVec (-want +got):\n%s", diff)
}
})
}
}
add local registry.Reset()
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutil
import (
"fmt"
"math"
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
dto "github.com/prometheus/client_model/go"
"k8s.io/component-base/metrics"
"k8s.io/utils/pointer"
)
func samples2Histogram(samples []float64, upperBounds []float64) Histogram {
histogram := dto.Histogram{
SampleCount: uint64Ptr(0),
SampleSum: pointer.Float64Ptr(0.0),
}
for _, ub := range upperBounds {
histogram.Bucket = append(histogram.Bucket, &dto.Bucket{
CumulativeCount: uint64Ptr(0),
UpperBound: pointer.Float64Ptr(ub),
})
}
for _, sample := range samples {
for i, bucket := range histogram.Bucket {
if sample < *bucket.UpperBound {
*histogram.Bucket[i].CumulativeCount++
}
}
*histogram.SampleCount++
*histogram.SampleSum += sample
}
return Histogram{
&histogram,
}
}
func TestHistogramQuantile(t *testing.T) {
tests := []struct {
name string
samples []float64
bounds []float64
q50 float64
q90 float64
q99 float64
}{
{
name: "Repeating numbers",
samples: []float64{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
bounds: []float64{1, 2, 4, 8},
q50: 2,
q90: 6.4,
q99: 7.84,
},
{
name: "Random numbers",
samples: []float64{11, 67, 61, 21, 40, 36, 52, 63, 8, 3, 67, 35, 61, 1, 36, 58},
bounds: []float64{10, 20, 40, 80},
q50: 40,
q90: 72,
q99: 79.2,
},
{
name: "The last bucket is empty",
samples: []float64{6, 34, 30, 10, 20, 18, 26, 31, 4, 2, 33, 17, 30, 1, 18, 29},
bounds: []float64{10, 20, 40, 80},
q50: 20,
q90: 36,
q99: 39.6,
},
{
name: "The last bucket has positive infinity upper bound",
samples: []float64{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 500},
bounds: []float64{10, 20, 40, math.Inf(1)},
q50: 5.3125,
q90: 9.5625,
q99: 40,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
h := samples2Histogram(test.samples, test.bounds)
q50 := h.Quantile(0.5)
q90 := h.Quantile(0.9)
q99 := h.Quantile(0.99)
q999999 := h.Quantile(0.999999)
if q50 != test.q50 {
t.Errorf("Expected q50 to be %v, got %v instead", test.q50, q50)
}
if q90 != test.q90 {
t.Errorf("Expected q90 to be %v, got %v instead", test.q90, q90)
}
if q99 != test.q99 {
t.Errorf("Expected q99 to be %v, got %v instead", test.q99, q99)
}
lastUpperBound := test.bounds[len(test.bounds)-1]
if !(q999999 < lastUpperBound) {
t.Errorf("Expected q999999 to be less than %v, got %v instead", lastUpperBound, q999999)
}
})
}
}
func TestHistogramValidate(t *testing.T) {
tests := []struct {
name string
h Histogram
err error
}{
{
name: "nil SampleCount",
h: Histogram{
&dto.Histogram{},
},
err: fmt.Errorf("nil or empty histogram SampleCount"),
},
{
name: "empty SampleCount",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(0),
},
},
err: fmt.Errorf("nil or empty histogram SampleCount"),
},
{
name: "nil SampleSum",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
},
},
err: fmt.Errorf("nil or empty histogram SampleSum"),
},
{
name: "empty SampleSum",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(0.0),
},
},
err: fmt.Errorf("nil or empty histogram SampleSum"),
},
{
name: "nil bucket",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(1.0),
Bucket: []*dto.Bucket{
nil,
},
},
},
err: fmt.Errorf("empty histogram bucket"),
},
{
name: "nil bucket UpperBound",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(1.0),
Bucket: []*dto.Bucket{
{},
},
},
},
err: fmt.Errorf("nil or negative histogram bucket UpperBound"),
},
{
name: "negative bucket UpperBound",
h: Histogram{
&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(1.0),
Bucket: []*dto.Bucket{
{UpperBound: pointer.Float64Ptr(-1.0)},
},
},
},
err: fmt.Errorf("nil or negative histogram bucket UpperBound"),
},
{
name: "valid histogram",
h: samples2Histogram(
[]float64{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
[]float64{1, 2, 4, 8},
),
},
}
for _, test := range tests {
err := test.h.Validate()
if test.err != nil {
if err == nil || err.Error() != test.err.Error() {
t.Errorf("Expected %q error, got %q instead", test.err, err)
}
} else {
if err != nil {
t.Errorf("Expected error to be nil, got %q instead", err)
}
}
}
}
func TestLabelsMatch(t *testing.T) {
cases := []struct {
name string
metric *dto.Metric
labelFilter map[string]string
expectedMatch bool
}{
{name: "metric labels and labelFilter have the same labels and values", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
{Name: pointer.StringPtr("c"), Value: pointer.StringPtr("3")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: true},
{name: "metric labels contain all labelFilter labels, and labelFilter is a subset of metric labels", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
{Name: pointer.StringPtr("c"), Value: pointer.StringPtr("3")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
}, expectedMatch: true},
{name: "metric labels don't have all labelFilter labels and value", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "The intersection of metric labels and labelFilter labels is empty", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("aa"), Value: pointer.StringPtr("11")},
{Name: pointer.StringPtr("bb"), Value: pointer.StringPtr("22")},
{Name: pointer.StringPtr("cc"), Value: pointer.StringPtr("33")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "metric labels have the same labels names but different values with labelFilter labels and value", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
{Name: pointer.StringPtr("c"), Value: pointer.StringPtr("3")},
}}, labelFilter: map[string]string{
"a": "11",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "metric labels contain label name but different values with labelFilter labels and value", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
{Name: pointer.StringPtr("c"), Value: pointer.StringPtr("33")},
{Name: pointer.StringPtr("d"), Value: pointer.StringPtr("4")},
}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "metric labels is empty and labelFilter is not empty", metric: &dto.Metric{
Label: []*dto.LabelPair{}}, labelFilter: map[string]string{
"a": "1",
"b": "2",
"c": "3",
}, expectedMatch: false},
{name: "metric labels is not empty and labelFilter is empty", metric: &dto.Metric{
Label: []*dto.LabelPair{
{Name: pointer.StringPtr("a"), Value: pointer.StringPtr("1")},
{Name: pointer.StringPtr("b"), Value: pointer.StringPtr("2")},
}}, labelFilter: map[string]string{}, expectedMatch: true},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
got := LabelsMatch(tt.metric, tt.labelFilter)
if got != tt.expectedMatch {
t.Errorf("Expected %v, got %v instead", tt.expectedMatch, got)
}
})
}
}
func TestHistogramVec_GetAggregatedSampleCount(t *testing.T) {
tests := []struct {
name string
vec HistogramVec
want uint64
}{
{
name: "nil case",
want: 0,
},
{
name: "zero case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(0), SampleSum: pointer.Float64Ptr(0.0)}},
},
want: 0,
},
{
name: "standard case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(2.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(4.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(4), SampleSum: pointer.Float64Ptr(8.0)}},
},
want: 7,
},
{
name: "mixed case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(2.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(0), SampleSum: pointer.Float64Ptr(0.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(4.0)}},
},
want: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.vec.GetAggregatedSampleCount(); got != tt.want {
t.Errorf("GetAggregatedSampleCount() = %v, want %v", got, tt.want)
}
})
}
}
func TestHistogramVec_GetAggregatedSampleSum(t *testing.T) {
tests := []struct {
name string
vec HistogramVec
want float64
}{
{
name: "nil case",
want: 0.0,
},
{
name: "zero case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(0), SampleSum: pointer.Float64Ptr(0.0)}},
},
want: 0.0,
},
{
name: "standard case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(2.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(4.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(4), SampleSum: pointer.Float64Ptr(8.0)}},
},
want: 14.0,
},
{
name: "mixed case",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(2.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(0), SampleSum: pointer.Float64Ptr(0.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(4.0)}},
},
want: 6.0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.vec.GetAggregatedSampleSum(); got != tt.want {
t.Errorf("GetAggregatedSampleSum() = %v, want %v", got, tt.want)
}
})
}
}
func TestHistogramVec_Quantile(t *testing.T) {
tests := []struct {
name string
samples [][]float64
bounds []float64
quantile float64
want []float64
}{
{
name: "duplicated histograms",
samples: [][]float64{
{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6},
},
bounds: []float64{1, 2, 4, 8},
want: []float64{2, 6.4, 7.2, 7.84},
},
{
name: "random numbers",
samples: [][]float64{
{8, 35, 47, 61, 56, 69, 66, 74, 35, 69, 5, 38, 58, 40, 36, 12},
{79, 44, 57, 46, 11, 8, 53, 77, 13, 35, 38, 47, 73, 16, 26, 29},
{51, 76, 22, 55, 20, 63, 59, 66, 34, 58, 64, 16, 79, 7, 58, 28},
},
bounds: []float64{10, 20, 40, 80},
want: []float64{44.44, 72.89, 76.44, 79.29},
},
{
name: "single histogram",
samples: [][]float64{
{6, 34, 30, 10, 20, 18, 26, 31, 4, 2, 33, 17, 30, 1, 18, 29},
},
bounds: []float64{10, 20, 40, 80},
want: []float64{20, 36, 38, 39.6},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var vec HistogramVec
for _, sample := range tt.samples {
histogram := samples2Histogram(sample, tt.bounds)
vec = append(vec, &histogram)
}
var got []float64
for _, q := range []float64{0.5, 0.9, 0.95, 0.99} {
got = append(got, math.Round(vec.Quantile(q)*100)/100)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Quantile() = %v, want %v", got, tt.want)
}
})
}
}
func TestHistogramVec_Validate(t *testing.T) {
tests := []struct {
name string
vec HistogramVec
want error
}{
{
name: "nil SampleCount",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(1.0)}},
&Histogram{&dto.Histogram{SampleSum: pointer.Float64Ptr(2.0)}},
},
want: fmt.Errorf("nil or empty histogram SampleCount"),
},
{
name: "valid HistogramVec",
vec: HistogramVec{
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(1), SampleSum: pointer.Float64Ptr(1.0)}},
&Histogram{&dto.Histogram{SampleCount: uint64Ptr(2), SampleSum: pointer.Float64Ptr(2.0)}},
},
},
{
name: "different bucket size",
vec: HistogramVec{
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(4),
SampleSum: pointer.Float64Ptr(10.0),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(1)},
{CumulativeCount: uint64Ptr(2), UpperBound: pointer.Float64Ptr(2)},
{CumulativeCount: uint64Ptr(5), UpperBound: pointer.Float64Ptr(4)},
},
}},
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(3),
SampleSum: pointer.Float64Ptr(8.0),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(2)},
{CumulativeCount: uint64Ptr(3), UpperBound: pointer.Float64Ptr(4)},
},
}},
},
want: fmt.Errorf("found different bucket size: expect 3, but got 2 at index 1"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.vec.Validate(); fmt.Sprintf("%v", got) != fmt.Sprintf("%v", tt.want) {
t.Errorf("Validate() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetHistogramVecFromGatherer(t *testing.T) {
tests := []struct {
name string
lvMap map[string]string
wantVec HistogramVec
}{
{
name: "filter with one label",
lvMap: map[string]string{"label1": "value1-0"},
wantVec: HistogramVec{
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(1.5),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(0.5)},
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(2.0)},
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(5.0)},
},
}},
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(2.5),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(0.5)},
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(2.0)},
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(5.0)},
},
}},
},
},
{
name: "filter with two labels",
lvMap: map[string]string{"label1": "value1-0", "label2": "value2-1"},
wantVec: HistogramVec{
&Histogram{&dto.Histogram{
SampleCount: uint64Ptr(1),
SampleSum: pointer.Float64Ptr(2.5),
Bucket: []*dto.Bucket{
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(0.5)},
{CumulativeCount: uint64Ptr(0), UpperBound: pointer.Float64Ptr(2.0)},
{CumulativeCount: uint64Ptr(1), UpperBound: pointer.Float64Ptr(5.0)},
},
}},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buckets := []float64{.5, 2, 5}
// HistogramVec has two labels defined.
labels := []string{"label1", "label2"}
HistogramOpts := &metrics.HistogramOpts{
Namespace: "namespace",
Name: "metric_test_name",
Subsystem: "subsystem",
Help: "histogram help message",
Buckets: buckets,
}
vec := metrics.NewHistogramVec(HistogramOpts, labels)
// Use local registry
var registry = metrics.NewKubeRegistry()
var gather metrics.Gatherer = registry
registry.MustRegister(vec)
// Observe two metrics with same value for label1 but different value of label2.
vec.WithLabelValues("value1-0", "value2-0").Observe(1.5)
vec.WithLabelValues("value1-0", "value2-1").Observe(2.5)
vec.WithLabelValues("value1-1", "value2-0").Observe(3.5)
vec.WithLabelValues("value1-1", "value2-1").Observe(4.5)
metricName := fmt.Sprintf("%s_%s_%s", HistogramOpts.Namespace, HistogramOpts.Subsystem, HistogramOpts.Name)
histogramVec, _ := GetHistogramVecFromGatherer(gather, metricName, tt.lvMap)
if diff := cmp.Diff(tt.wantVec, histogramVec); diff != "" {
t.Errorf("Got unexpected HistogramVec (-want +got):\n%s", diff)
}
registry.Reset()
})
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package expvar provides a standardized interface to public variables, such
// as operation counters in servers. It exposes these variables via HTTP at
// /debug/vars in JSON format.
//
// Operations to set or modify these public variables are atomic.
//
// In addition to adding the HTTP handler, this package registers the
// following variables:
//
// cmdline os.Args
// memstats runtime.Memstats
//
// The package is sometimes only imported for the side effect of
// registering its HTTP handler and the above variables. To use it
// this way, simply link this package into your program:
// import _ "expvar"
//
package expvar
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime"
"strconv"
"sync"
)
// Var is an abstract type for all exported variables.
type Var interface {
String() string
}
// Int is a 64-bit integer variable that satisfies the Var interface.
type Int struct {
i int64
mu sync.Mutex
}
func (v *Int) String() string { return strconv.FormatInt(v.i, 10) }
func (v *Int) Add(delta int64) {
v.mu.Lock()
defer v.mu.Unlock()
v.i += delta
}
func (v *Int) Set(value int64) {
v.mu.Lock()
defer v.mu.Unlock()
v.i = value
}
// Float is a 64-bit float variable that satisfies the Var interface.
type Float struct {
f float64
mu sync.Mutex
}
func (v *Float) String() string { return strconv.FormatFloat(v.f, 'g', -1, 64) }
// Add adds delta to v.
func (v *Float) Add(delta float64) {
v.mu.Lock()
defer v.mu.Unlock()
v.f += delta
}
// Set sets v to value.
func (v *Float) Set(value float64) {
v.mu.Lock()
defer v.mu.Unlock()
v.f = value
}
// Map is a string-to-Var map variable that satisfies the Var interface.
type Map struct {
m map[string]Var
mu sync.Mutex
}
// KeyValue represents a single entry in a Map.
type KeyValue struct {
Key string
Value Var
}
func (v *Map) String() string {
v.mu.Lock()
defer v.mu.Unlock()
b := new(bytes.Buffer)
fmt.Fprintf(b, "{")
first := true
for key, val := range v.m {
if !first {
fmt.Fprintf(b, ", ")
}
fmt.Fprintf(b, "\"%s\": %v", key, val)
first = false
}
fmt.Fprintf(b, "}")
return b.String()
}
func (v *Map) Init() *Map {
v.m = make(map[string]Var)
return v
}
func (v *Map) Get(key string) Var {
v.mu.Lock()
defer v.mu.Unlock()
return v.m[key]
}
func (v *Map) Set(key string, av Var) {
v.mu.Lock()
defer v.mu.Unlock()
v.m[key] = av
}
func (v *Map) Add(key string, delta int64) {
v.mu.Lock()
defer v.mu.Unlock()
av, ok := v.m[key]
if !ok {
av = new(Int)
v.m[key] = av
}
// Add to Int; ignore otherwise.
if iv, ok := av.(*Int); ok {
iv.Add(delta)
}
}
// AddFloat adds delta to the *Float value stored under the given map key.
func (v *Map) AddFloat(key string, delta float64) {
v.mu.Lock()
defer v.mu.Unlock()
av, ok := v.m[key]
if !ok {
av = new(Float)
v.m[key] = av
}
// Add to Float; ignore otherwise.
if iv, ok := av.(*Float); ok {
iv.Add(delta)
}
}
// TODO(rsc): Make sure map access in separate thread is safe.
func (v *Map) iterate(c chan<- KeyValue) {
for k, v := range v.m {
c <- KeyValue{k, v}
}
close(c)
}
func (v *Map) Iter() <-chan KeyValue {
c := make(chan KeyValue)
go v.iterate(c)
return c
}
// String is a string variable, and satisfies the Var interface.
type String struct {
s string
}
func (v *String) String() string { return strconv.Quote(v.s) }
func (v *String) Set(value string) { v.s = value }
// Func implements Var by calling the function
// and formatting the returned value using JSON.
type Func func() interface{}
func (f Func) String() string {
v, _ := json.Marshal(f())
return string(v)
}
// All published variables.
var vars map[string]Var = make(map[string]Var)
var mutex sync.Mutex
// Publish declares an named exported variable. This should be called from a
// package's init function when it creates its Vars. If the name is already
// registered then this will log.Panic.
func Publish(name string, v Var) {
mutex.Lock()
defer mutex.Unlock()
if _, existing := vars[name]; existing {
log.Panicln("Reuse of exported var name:", name)
}
vars[name] = v
}
// Get retrieves a named exported variable.
func Get(name string) Var {
return vars[name]
}
// RemoveAll removes all exported variables.
// This is for tests; don't call this on a real server.
func RemoveAll() {
mutex.Lock()
defer mutex.Unlock()
vars = make(map[string]Var)
}
// Convenience functions for creating new exported variables.
func NewInt(name string) *Int {
v := new(Int)
Publish(name, v)
return v
}
func NewFloat(name string) *Float {
v := new(Float)
Publish(name, v)
return v
}
func NewMap(name string) *Map {
v := new(Map).Init()
Publish(name, v)
return v
}
func NewString(name string) *String {
v := new(String)
Publish(name, v)
return v
}
// TODO(rsc): Make sure map access in separate thread is safe.
func iterate(c chan<- KeyValue) {
for k, v := range vars {
c <- KeyValue{k, v}
}
close(c)
}
func Iter() <-chan KeyValue {
c := make(chan KeyValue)
go iterate(c)
return c
}
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
for name, value := range vars {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", name, value)
}
fmt.Fprintf(w, "\n}\n")
}
func cmdline() interface{} {
return os.Args
}
func memstats() interface{} {
return runtime.MemStats
}
func init() {
http.Handle("/debug/vars", http.HandlerFunc(expvarHandler))
Publish("cmdline", Func(cmdline))
Publish("memstats", Func(memstats))
}
expvar: fix typo in Publish documentation
Found and fixed by bketelsen@gmail.com.
Not worth making him a CONTRIBUTOR to delete one character.
R=golang-dev, gri
CC=golang-dev
https://golang.org/cl/5476054
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package expvar provides a standardized interface to public variables, such
// as operation counters in servers. It exposes these variables via HTTP at
// /debug/vars in JSON format.
//
// Operations to set or modify these public variables are atomic.
//
// In addition to adding the HTTP handler, this package registers the
// following variables:
//
// cmdline os.Args
// memstats runtime.Memstats
//
// The package is sometimes only imported for the side effect of
// registering its HTTP handler and the above variables. To use it
// this way, simply link this package into your program:
// import _ "expvar"
//
package expvar
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime"
"strconv"
"sync"
)
// Var is an abstract type for all exported variables.
type Var interface {
String() string
}
// Int is a 64-bit integer variable that satisfies the Var interface.
type Int struct {
i int64
mu sync.Mutex
}
func (v *Int) String() string { return strconv.FormatInt(v.i, 10) }
func (v *Int) Add(delta int64) {
v.mu.Lock()
defer v.mu.Unlock()
v.i += delta
}
func (v *Int) Set(value int64) {
v.mu.Lock()
defer v.mu.Unlock()
v.i = value
}
// Float is a 64-bit float variable that satisfies the Var interface.
type Float struct {
f float64
mu sync.Mutex
}
func (v *Float) String() string { return strconv.FormatFloat(v.f, 'g', -1, 64) }
// Add adds delta to v.
func (v *Float) Add(delta float64) {
v.mu.Lock()
defer v.mu.Unlock()
v.f += delta
}
// Set sets v to value.
func (v *Float) Set(value float64) {
v.mu.Lock()
defer v.mu.Unlock()
v.f = value
}
// Map is a string-to-Var map variable that satisfies the Var interface.
type Map struct {
m map[string]Var
mu sync.Mutex
}
// KeyValue represents a single entry in a Map.
type KeyValue struct {
Key string
Value Var
}
func (v *Map) String() string {
v.mu.Lock()
defer v.mu.Unlock()
b := new(bytes.Buffer)
fmt.Fprintf(b, "{")
first := true
for key, val := range v.m {
if !first {
fmt.Fprintf(b, ", ")
}
fmt.Fprintf(b, "\"%s\": %v", key, val)
first = false
}
fmt.Fprintf(b, "}")
return b.String()
}
func (v *Map) Init() *Map {
v.m = make(map[string]Var)
return v
}
func (v *Map) Get(key string) Var {
v.mu.Lock()
defer v.mu.Unlock()
return v.m[key]
}
func (v *Map) Set(key string, av Var) {
v.mu.Lock()
defer v.mu.Unlock()
v.m[key] = av
}
func (v *Map) Add(key string, delta int64) {
v.mu.Lock()
defer v.mu.Unlock()
av, ok := v.m[key]
if !ok {
av = new(Int)
v.m[key] = av
}
// Add to Int; ignore otherwise.
if iv, ok := av.(*Int); ok {
iv.Add(delta)
}
}
// AddFloat adds delta to the *Float value stored under the given map key.
func (v *Map) AddFloat(key string, delta float64) {
v.mu.Lock()
defer v.mu.Unlock()
av, ok := v.m[key]
if !ok {
av = new(Float)
v.m[key] = av
}
// Add to Float; ignore otherwise.
if iv, ok := av.(*Float); ok {
iv.Add(delta)
}
}
// TODO(rsc): Make sure map access in separate thread is safe.
func (v *Map) iterate(c chan<- KeyValue) {
for k, v := range v.m {
c <- KeyValue{k, v}
}
close(c)
}
func (v *Map) Iter() <-chan KeyValue {
c := make(chan KeyValue)
go v.iterate(c)
return c
}
// String is a string variable, and satisfies the Var interface.
type String struct {
s string
}
func (v *String) String() string { return strconv.Quote(v.s) }
func (v *String) Set(value string) { v.s = value }
// Func implements Var by calling the function
// and formatting the returned value using JSON.
type Func func() interface{}
func (f Func) String() string {
v, _ := json.Marshal(f())
return string(v)
}
// All published variables.
var vars map[string]Var = make(map[string]Var)
var mutex sync.Mutex
// Publish declares a named exported variable. This should be called from a
// package's init function when it creates its Vars. If the name is already
// registered then this will log.Panic.
func Publish(name string, v Var) {
mutex.Lock()
defer mutex.Unlock()
if _, existing := vars[name]; existing {
log.Panicln("Reuse of exported var name:", name)
}
vars[name] = v
}
// Get retrieves a named exported variable.
func Get(name string) Var {
return vars[name]
}
// RemoveAll removes all exported variables.
// This is for tests; don't call this on a real server.
func RemoveAll() {
mutex.Lock()
defer mutex.Unlock()
vars = make(map[string]Var)
}
// Convenience functions for creating new exported variables.
func NewInt(name string) *Int {
v := new(Int)
Publish(name, v)
return v
}
func NewFloat(name string) *Float {
v := new(Float)
Publish(name, v)
return v
}
func NewMap(name string) *Map {
v := new(Map).Init()
Publish(name, v)
return v
}
func NewString(name string) *String {
v := new(String)
Publish(name, v)
return v
}
// TODO(rsc): Make sure map access in separate thread is safe.
func iterate(c chan<- KeyValue) {
for k, v := range vars {
c <- KeyValue{k, v}
}
close(c)
}
func Iter() <-chan KeyValue {
c := make(chan KeyValue)
go iterate(c)
return c
}
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
for name, value := range vars {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", name, value)
}
fmt.Fprintf(w, "\n}\n")
}
func cmdline() interface{} {
return os.Args
}
func memstats() interface{} {
return runtime.MemStats
}
func init() {
http.Handle("/debug/vars", http.HandlerFunc(expvarHandler))
Publish("cmdline", Func(cmdline))
Publish("memstats", Func(memstats))
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"flag"
"regexp"
"runtime"
"testing"
"time"
)
func newLocalListener(t *testing.T) Listener {
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
ln, err = Listen("tcp6", "[::1]:0")
}
if err != nil {
t.Fatal(err)
}
return ln
}
func TestDialTimeout(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
errc := make(chan error)
numConns := listenerBacklog + 10
// TODO(bradfitz): It's hard to test this in a portable
// way. This is unfortunate, but works for now.
switch runtime.GOOS {
case "linux":
// The kernel will start accepting TCP connections before userspace
// gets a chance to not accept them, so fire off a bunch to fill up
// the kernel's backlog. Then we test we get a failure after that.
for i := 0; i < numConns; i++ {
go func() {
_, err := DialTimeout("tcp", ln.Addr().String(), 200*time.Millisecond)
errc <- err
}()
}
case "darwin":
// At least OS X 10.7 seems to accept any number of
// connections, ignoring listen's backlog, so resort
// to connecting to a hopefully-dead 127/8 address.
// Same for windows.
go func() {
_, err := DialTimeout("tcp", "127.0.71.111:80", 200*time.Millisecond)
errc <- err
}()
default:
// TODO(bradfitz):
// OpenBSD may have a reject route to 127/8 except 127.0.0.1/32
// by default. FreeBSD likely works, but is untested.
// TODO(rsc):
// The timeout never happens on Windows. Why? Issue 3016.
t.Logf("skipping test on %q; untested.", runtime.GOOS)
return
}
connected := 0
for {
select {
case <-time.After(15 * time.Second):
t.Fatal("too slow")
case err := <-errc:
if err == nil {
connected++
if connected == numConns {
t.Fatal("all connections connected; expected some to time out")
}
} else {
terr, ok := err.(timeout)
if !ok {
t.Fatalf("got error %q; want error with timeout interface", err)
}
if !terr.Timeout() {
t.Fatalf("got error %q; not a timeout", err)
}
// Pass. We saw a timeout error.
return
}
}
}
}
func TestSelfConnect(t *testing.T) {
if runtime.GOOS == "windows" {
// TODO(brainman): do not know why it hangs.
t.Logf("skipping known-broken test on windows")
return
}
// Test that Dial does not honor self-connects.
// See the comment in DialTCP.
// Find a port that would be used as a local address.
l, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
c, err := Dial("tcp", l.Addr().String())
if err != nil {
t.Fatal(err)
}
addr := c.LocalAddr().String()
c.Close()
l.Close()
// Try to connect to that address repeatedly.
n := 100000
if testing.Short() {
n = 1000
}
switch runtime.GOOS {
case "darwin", "freebsd", "openbsd", "windows":
// Non-Linux systems take a long time to figure
// out that there is nothing listening on localhost.
n = 100
}
for i := 0; i < n; i++ {
c, err := Dial("tcp", addr)
if err == nil {
c.Close()
t.Errorf("#%d: Dial %q succeeded", i, addr)
}
}
}
var runErrorTest = flag.Bool("run_error_test", false, "let TestDialError check for dns errors")
type DialErrorTest struct {
Net string
Raddr string
Pattern string
}
var dialErrorTests = []DialErrorTest{
{
"datakit", "mh/astro/r70",
"dial datakit mh/astro/r70: unknown network datakit",
},
{
"tcp", "127.0.0.1:☺",
"dial tcp 127.0.0.1:☺: unknown port tcp/☺",
},
{
"tcp", "no-such-name.google.com.:80",
"dial tcp no-such-name.google.com.:80: lookup no-such-name.google.com.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name.no-such-top-level-domain.:80",
"dial tcp no-such-name.no-such-top-level-domain.:80: lookup no-such-name.no-such-top-level-domain.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name:80",
`dial tcp no-such-name:80: lookup no-such-name\.(.*\.)?( on .*)?: no (.*)`,
},
{
"tcp", "mh/astro/r70:http",
"dial tcp mh/astro/r70:http: lookup mh/astro/r70: invalid domain name",
},
{
"unix", "/etc/file-not-found",
"dial unix /etc/file-not-found: no such file or directory",
},
{
"unix", "/etc/",
"dial unix /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
{
"unixpacket", "/etc/file-not-found",
"dial unixpacket /etc/file-not-found: no such file or directory",
},
{
"unixpacket", "/etc/",
"dial unixpacket /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
}
var duplicateErrorPattern = `dial (.*) dial (.*)`
func TestDialError(t *testing.T) {
if !*runErrorTest {
t.Logf("test disabled; use -run_error_test to enable")
return
}
for i, tt := range dialErrorTests {
c, err := Dial(tt.Net, tt.Raddr)
if c != nil {
c.Close()
}
if err == nil {
t.Errorf("#%d: nil error, want match for %#q", i, tt.Pattern)
continue
}
s := err.Error()
match, _ := regexp.MatchString(tt.Pattern, s)
if !match {
t.Errorf("#%d: %q, want match for %#q", i, s, tt.Pattern)
}
match, _ = regexp.MatchString(duplicateErrorPattern, s)
if match {
t.Errorf("#%d: %q, duplicate error return from Dial", i, s)
}
}
}
net: fix TestDialTimeout on windows builder
I don't know what's out there, but something
is answering to 127.0.71.111:80 on our builder,
so use a different port.
Also insert a check that the dial fails, which
would have diagnosed this problem.
Fixes issue 3016.
R=golang-dev, mikioh.mikioh, r
CC=golang-dev
http://codereview.appspot.com/5754062
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"flag"
"fmt"
"regexp"
"runtime"
"testing"
"time"
)
func newLocalListener(t *testing.T) Listener {
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
ln, err = Listen("tcp6", "[::1]:0")
}
if err != nil {
t.Fatal(err)
}
return ln
}
func TestDialTimeout(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
errc := make(chan error)
numConns := listenerBacklog + 10
// TODO(bradfitz): It's hard to test this in a portable
// way. This is unfortunate, but works for now.
switch runtime.GOOS {
case "linux":
// The kernel will start accepting TCP connections before userspace
// gets a chance to not accept them, so fire off a bunch to fill up
// the kernel's backlog. Then we test we get a failure after that.
for i := 0; i < numConns; i++ {
go func() {
_, err := DialTimeout("tcp", ln.Addr().String(), 200*time.Millisecond)
errc <- err
}()
}
case "darwin", "windows":
// At least OS X 10.7 seems to accept any number of
// connections, ignoring listen's backlog, so resort
// to connecting to a hopefully-dead 127/8 address.
// Same for windows.
//
// Use a bogus port (44444) instead of 80, because
// on our 386 builder, this Dial succeeds, connecting
// to an IIS web server somewhere. The data center
// or VM or firewall must be stealing the TCP connection.
go func() {
c, err := DialTimeout("tcp", "127.0.71.111:44444", 200*time.Millisecond)
if err == nil {
err = fmt.Errorf("unexpected: connected to %s!", c.RemoteAddr())
c.Close()
}
errc <- err
}()
default:
// TODO(bradfitz):
// OpenBSD may have a reject route to 127/8 except 127.0.0.1/32
// by default. FreeBSD likely works, but is untested.
// TODO(rsc):
// The timeout never happens on Windows. Why? Issue 3016.
t.Logf("skipping test on %q; untested.", runtime.GOOS)
return
}
connected := 0
for {
select {
case <-time.After(15 * time.Second):
t.Fatal("too slow")
case err := <-errc:
if err == nil {
connected++
if connected == numConns {
t.Fatal("all connections connected; expected some to time out")
}
} else {
terr, ok := err.(timeout)
if !ok {
t.Fatalf("got error %q; want error with timeout interface", err)
}
if !terr.Timeout() {
t.Fatalf("got error %q; not a timeout", err)
}
// Pass. We saw a timeout error.
return
}
}
}
}
func TestSelfConnect(t *testing.T) {
if runtime.GOOS == "windows" {
// TODO(brainman): do not know why it hangs.
t.Logf("skipping known-broken test on windows")
return
}
// Test that Dial does not honor self-connects.
// See the comment in DialTCP.
// Find a port that would be used as a local address.
l, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
c, err := Dial("tcp", l.Addr().String())
if err != nil {
t.Fatal(err)
}
addr := c.LocalAddr().String()
c.Close()
l.Close()
// Try to connect to that address repeatedly.
n := 100000
if testing.Short() {
n = 1000
}
switch runtime.GOOS {
case "darwin", "freebsd", "openbsd", "windows":
// Non-Linux systems take a long time to figure
// out that there is nothing listening on localhost.
n = 100
}
for i := 0; i < n; i++ {
c, err := Dial("tcp", addr)
if err == nil {
c.Close()
t.Errorf("#%d: Dial %q succeeded", i, addr)
}
}
}
var runErrorTest = flag.Bool("run_error_test", false, "let TestDialError check for dns errors")
type DialErrorTest struct {
Net string
Raddr string
Pattern string
}
var dialErrorTests = []DialErrorTest{
{
"datakit", "mh/astro/r70",
"dial datakit mh/astro/r70: unknown network datakit",
},
{
"tcp", "127.0.0.1:☺",
"dial tcp 127.0.0.1:☺: unknown port tcp/☺",
},
{
"tcp", "no-such-name.google.com.:80",
"dial tcp no-such-name.google.com.:80: lookup no-such-name.google.com.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name.no-such-top-level-domain.:80",
"dial tcp no-such-name.no-such-top-level-domain.:80: lookup no-such-name.no-such-top-level-domain.( on .*)?: no (.*)",
},
{
"tcp", "no-such-name:80",
`dial tcp no-such-name:80: lookup no-such-name\.(.*\.)?( on .*)?: no (.*)`,
},
{
"tcp", "mh/astro/r70:http",
"dial tcp mh/astro/r70:http: lookup mh/astro/r70: invalid domain name",
},
{
"unix", "/etc/file-not-found",
"dial unix /etc/file-not-found: no such file or directory",
},
{
"unix", "/etc/",
"dial unix /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
{
"unixpacket", "/etc/file-not-found",
"dial unixpacket /etc/file-not-found: no such file or directory",
},
{
"unixpacket", "/etc/",
"dial unixpacket /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
}
var duplicateErrorPattern = `dial (.*) dial (.*)`
func TestDialError(t *testing.T) {
if !*runErrorTest {
t.Logf("test disabled; use -run_error_test to enable")
return
}
for i, tt := range dialErrorTests {
c, err := Dial(tt.Net, tt.Raddr)
if c != nil {
c.Close()
}
if err == nil {
t.Errorf("#%d: nil error, want match for %#q", i, tt.Pattern)
continue
}
s := err.Error()
match, _ := regexp.MatchString(tt.Pattern, s)
if !match {
t.Errorf("#%d: %q, want match for %#q", i, s, tt.Pattern)
}
match, _ = regexp.MatchString(duplicateErrorPattern, s)
if match {
t.Errorf("#%d: %q, duplicate error return from Dial", i, s)
}
}
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os
import (
"runtime"
"syscall"
)
// File represents an open file descriptor.
type File struct {
*file
}
// file is the real representation of *File.
// The extra level of indirection ensures that no clients of os
// can overwrite this data, which could cause the finalizer
// to close the wrong file descriptor.
type file struct {
fd int
name string
dirinfo *dirInfo // nil unless directory being read
}
// Fd returns the integer Unix file descriptor referencing the open file.
func (file *File) Fd() int {
if file == nil {
return -1
}
return file.fd
}
// NewFile returns a new File with the given file descriptor and name.
func NewFile(fd int, name string) *File {
if fd < 0 {
return nil
}
f := &File{&file{fd: fd, name: name}}
runtime.SetFinalizer(f.file, (*file).close)
return f
}
// Auxiliary information if the File describes a directory
type dirInfo struct {
buf [syscall.STATMAX]byte // buffer for directory I/O
nbuf int // length of buf; return value from Read
bufp int // location of next record in buf.
}
func epipecheck(file *File, e error) {
}
// DevNull is the name of the operating system's ``null device.''
// On Unix-like systems, it is "/dev/null"; on Windows, "NUL".
const DevNull = "/dev/null"
// OpenFile is the generalized open call; most users will use Open
// or Create instead. It opens the named file with specified flag
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
// methods on the returned File can be used for I/O.
// It returns the File and an error, if any.
func OpenFile(name string, flag int, perm uint32) (file *File, err error) {
var (
fd int
e error
create bool
excl bool
trunc bool
append bool
)
if flag&O_CREATE == O_CREATE {
flag = flag & ^O_CREATE
create = true
}
if flag&O_EXCL == O_EXCL {
excl = true
}
if flag&O_TRUNC == O_TRUNC {
trunc = true
}
// O_APPEND is emulated on Plan 9
if flag&O_APPEND == O_APPEND {
flag = flag &^ O_APPEND
append = true
}
syscall.ForkLock.RLock()
if (create && trunc) || excl {
fd, e = syscall.Create(name, flag, perm)
} else {
fd, e = syscall.Open(name, flag)
if e != nil && create {
var e1 error
fd, e1 = syscall.Create(name, flag, perm)
if e1 == nil {
e = nil
}
}
}
syscall.ForkLock.RUnlock()
if e != nil {
return nil, &PathError{"open", name, e}
}
if append {
if _, e = syscall.Seek(fd, 0, SEEK_END); e != nil {
return nil, &PathError{"seek", name, e}
}
}
return NewFile(fd, name), nil
}
// Close closes the File, rendering it unusable for I/O.
// It returns an error, if any.
func (file *File) Close() error {
return file.file.close()
}
func (file *file) close() error {
if file == nil || file.fd < 0 {
return Ebadfd
}
var err error
syscall.ForkLock.RLock()
if e := syscall.Close(file.fd); e != nil {
err = &PathError{"close", file.name, e}
}
syscall.ForkLock.RUnlock()
file.fd = -1 // so it can't be closed again
// no need for a finalizer anymore
runtime.SetFinalizer(file, nil)
return err
}
// Stat returns the FileInfo structure describing file.
// It returns the FileInfo and an error, if any.
func (f *File) Stat() (FileInfo, error) {
d, err := dirstat(f)
if err != nil {
return nil, err
}
return fileInfoFromStat(d), nil
}
// Truncate changes the size of the file.
// It does not change the I/O offset.
func (f *File) Truncate(size int64) error {
var d Dir
d.Null()
d.Length = uint64(size)
if e := syscall.Fwstat(f.fd, pdir(nil, &d)); e != nil {
return &PathError{"truncate", f.name, e}
}
return nil
}
// Chmod changes the mode of the file to mode.
func (f *File) Chmod(mode uint32) error {
var d Dir
var mask = ^uint32(0777)
d.Null()
odir, e := dirstat(f)
if e != nil {
return &PathError{"chmod", f.name, e}
}
d.Mode = (odir.Mode & mask) | (mode &^ mask)
if e := syscall.Fwstat(f.fd, pdir(nil, &d)); e != nil {
return &PathError{"chmod", f.name, e}
}
return nil
}
// Sync commits the current contents of the file to stable storage.
// Typically, this means flushing the file system's in-memory copy
// of recently written data to disk.
func (f *File) Sync() (err error) {
if f == nil {
return EINVAL
}
var d Dir
d.Null()
if e := syscall.Fwstat(f.fd, pdir(nil, &d)); e != nil {
return NewSyscallError("fsync", e)
}
return nil
}
// read reads up to len(b) bytes from the File.
// It returns the number of bytes read and an error, if any.
func (f *File) read(b []byte) (n int, err error) {
return syscall.Read(f.fd, b)
}
// pread reads len(b) bytes from the File starting at byte offset off.
// It returns the number of bytes read and the error, if any.
// EOF is signaled by a zero count with err set to nil.
func (f *File) pread(b []byte, off int64) (n int, err error) {
return syscall.Pread(f.fd, b, off)
}
// write writes len(b) bytes to the File.
// It returns the number of bytes written and an error, if any.
func (f *File) write(b []byte) (n int, err error) {
return syscall.Write(f.fd, b)
}
// pwrite writes len(b) bytes to the File starting at byte offset off.
// It returns the number of bytes written and an error, if any.
func (f *File) pwrite(b []byte, off int64) (n int, err error) {
return syscall.Pwrite(f.fd, b, off)
}
// seek sets the offset for the next Read or Write on file to offset, interpreted
// according to whence: 0 means relative to the origin of the file, 1 means
// relative to the current offset, and 2 means relative to the end.
// It returns the new offset and an error, if any.
func (f *File) seek(offset int64, whence int) (ret int64, err error) {
return syscall.Seek(f.fd, offset, whence)
}
// Truncate changes the size of the named file.
// If the file is a symbolic link, it changes the size of the link's target.
func Truncate(name string, size int64) error {
var d Dir
d.Null()
d.Length = uint64(size)
if e := syscall.Wstat(name, pdir(nil, &d)); e != nil {
return &PathError{"truncate", name, e}
}
return nil
}
// Remove removes the named file or directory.
func Remove(name string) error {
if e := syscall.Remove(name); e != nil {
return &PathError{"remove", name, e}
}
return nil
}
// Rename renames a file.
func Rename(oldname, newname string) error {
var d Dir
d.Null()
d.Name = newname
if e := syscall.Wstat(oldname, pdir(nil, &d)); e != nil {
return &PathError{"rename", oldname, e}
}
return nil
}
// Chmod changes the mode of the named file to mode.
func Chmod(name string, mode uint32) error {
var d Dir
var mask = ^uint32(0777)
d.Null()
odir, e := dirstat(name)
if e != nil {
return &PathError{"chmod", name, e}
}
d.Mode = (odir.Mode & mask) | (mode &^ mask)
if e := syscall.Wstat(name, pdir(nil, &d)); e != nil {
return &PathError{"chmod", name, e}
}
return nil
}
// Chtimes changes the access and modification times of the named
// file, similar to the Unix utime() or utimes() functions.
//
// The argument times are in nanoseconds, although the underlying
// filesystem may truncate or round the values to a more
// coarse time unit.
func Chtimes(name string, atimeNs int64, mtimeNs int64) error {
var d Dir
d.Null()
d.Atime = uint32(atimeNs / 1e9)
d.Mtime = uint32(mtimeNs / 1e9)
if e := syscall.Wstat(name, pdir(nil, &d)); e != nil {
return &PathError{"chtimes", name, e}
}
return nil
}
func Pipe() (r *File, w *File, err error) {
var p [2]int
syscall.ForkLock.RLock()
if e := syscall.Pipe(p[0:]); e != nil {
syscall.ForkLock.RUnlock()
return nil, nil, NewSyscallError("pipe", e)
}
syscall.ForkLock.RUnlock()
return NewFile(p[0], "|0"), NewFile(p[1], "|1"), nil
}
// not supported on Plan 9
// Link creates a hard link.
func Link(oldname, newname string) error {
return EPLAN9
}
func Symlink(oldname, newname string) error {
return EPLAN9
}
func Readlink(name string) (string, error) {
return "", EPLAN9
}
func Chown(name string, uid, gid int) error {
return EPLAN9
}
func Lchown(name string, uid, gid int) error {
return EPLAN9
}
func (f *File) Chown(uid, gid int) error {
return EPLAN9
}
// TempDir returns the default directory to use for temporary files.
func TempDir() string {
return "/tmp"
}
os: fix Plan 9 build after more FileMode changes
This should go in after Brad's CL 5553064.
R=bradfitz, rsc
CC=golang-dev
http://codereview.appspot.com/5555056
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os
import (
"runtime"
"syscall"
)
// File represents an open file descriptor.
type File struct {
*file
}
// file is the real representation of *File.
// The extra level of indirection ensures that no clients of os
// can overwrite this data, which could cause the finalizer
// to close the wrong file descriptor.
type file struct {
fd int
name string
dirinfo *dirInfo // nil unless directory being read
}
// Fd returns the integer Unix file descriptor referencing the open file.
func (file *File) Fd() int {
if file == nil {
return -1
}
return file.fd
}
// NewFile returns a new File with the given file descriptor and name.
func NewFile(fd int, name string) *File {
if fd < 0 {
return nil
}
f := &File{&file{fd: fd, name: name}}
runtime.SetFinalizer(f.file, (*file).close)
return f
}
// Auxiliary information if the File describes a directory
type dirInfo struct {
buf [syscall.STATMAX]byte // buffer for directory I/O
nbuf int // length of buf; return value from Read
bufp int // location of next record in buf.
}
func epipecheck(file *File, e error) {
}
// DevNull is the name of the operating system's ``null device.''
// On Unix-like systems, it is "/dev/null"; on Windows, "NUL".
const DevNull = "/dev/null"
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
func syscallMode(i FileMode) (o uint32) {
o |= uint32(i.Perm())
if i&ModeAppend != 0 {
o |= syscall.DMAPPEND
}
if i&ModeExclusive != 0 {
o |= syscall.DMEXCL
}
if i&ModeTemporary != 0 {
o |= syscall.DMTMP
}
return
}
// OpenFile is the generalized open call; most users will use Open
// or Create instead. It opens the named file with specified flag
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
// methods on the returned File can be used for I/O.
// It returns the File and an error, if any.
func OpenFile(name string, flag int, perm FileMode) (file *File, err error) {
var (
fd int
e error
create bool
excl bool
trunc bool
append bool
)
if flag&O_CREATE == O_CREATE {
flag = flag & ^O_CREATE
create = true
}
if flag&O_EXCL == O_EXCL {
excl = true
}
if flag&O_TRUNC == O_TRUNC {
trunc = true
}
// O_APPEND is emulated on Plan 9
if flag&O_APPEND == O_APPEND {
flag = flag &^ O_APPEND
append = true
}
syscall.ForkLock.RLock()
if (create && trunc) || excl {
fd, e = syscall.Create(name, flag, syscallMode(perm))
} else {
fd, e = syscall.Open(name, flag)
if e != nil && create {
var e1 error
fd, e1 = syscall.Create(name, flag, syscallMode(perm))
if e1 == nil {
e = nil
}
}
}
syscall.ForkLock.RUnlock()
if e != nil {
return nil, &PathError{"open", name, e}
}
if append {
if _, e = syscall.Seek(fd, 0, SEEK_END); e != nil {
return nil, &PathError{"seek", name, e}
}
}
return NewFile(fd, name), nil
}
// Close closes the File, rendering it unusable for I/O.
// It returns an error, if any.
func (file *File) Close() error {
return file.file.close()
}
func (file *file) close() error {
if file == nil || file.fd < 0 {
return Ebadfd
}
var err error
syscall.ForkLock.RLock()
if e := syscall.Close(file.fd); e != nil {
err = &PathError{"close", file.name, e}
}
syscall.ForkLock.RUnlock()
file.fd = -1 // so it can't be closed again
// no need for a finalizer anymore
runtime.SetFinalizer(file, nil)
return err
}
// Stat returns the FileInfo structure describing file.
// It returns the FileInfo and an error, if any.
func (f *File) Stat() (FileInfo, error) {
d, err := dirstat(f)
if err != nil {
return nil, err
}
return fileInfoFromStat(d), nil
}
// Truncate changes the size of the file.
// It does not change the I/O offset.
func (f *File) Truncate(size int64) error {
var d Dir
d.Null()
d.Length = uint64(size)
if e := syscall.Fwstat(f.fd, pdir(nil, &d)); e != nil {
return &PathError{"truncate", f.name, e}
}
return nil
}
const chmodMask = uint32(syscall.DMAPPEND | syscall.DMEXCL | syscall.DMTMP | ModePerm)
// Chmod changes the mode of the file to mode.
func (f *File) Chmod(mode FileMode) error {
var d Dir
odir, e := dirstat(f)
if e != nil {
return &PathError{"chmod", f.name, e}
}
d.Null()
d.Mode = odir.Mode&^chmodMask | syscallMode(mode)&chmodMask
if e := syscall.Fwstat(f.fd, pdir(nil, &d)); e != nil {
return &PathError{"chmod", f.name, e}
}
return nil
}
// Sync commits the current contents of the file to stable storage.
// Typically, this means flushing the file system's in-memory copy
// of recently written data to disk.
func (f *File) Sync() (err error) {
if f == nil {
return EINVAL
}
var d Dir
d.Null()
if e := syscall.Fwstat(f.fd, pdir(nil, &d)); e != nil {
return NewSyscallError("fsync", e)
}
return nil
}
// read reads up to len(b) bytes from the File.
// It returns the number of bytes read and an error, if any.
func (f *File) read(b []byte) (n int, err error) {
return syscall.Read(f.fd, b)
}
// pread reads len(b) bytes from the File starting at byte offset off.
// It returns the number of bytes read and the error, if any.
// EOF is signaled by a zero count with err set to nil.
func (f *File) pread(b []byte, off int64) (n int, err error) {
return syscall.Pread(f.fd, b, off)
}
// write writes len(b) bytes to the File.
// It returns the number of bytes written and an error, if any.
func (f *File) write(b []byte) (n int, err error) {
return syscall.Write(f.fd, b)
}
// pwrite writes len(b) bytes to the File starting at byte offset off.
// It returns the number of bytes written and an error, if any.
func (f *File) pwrite(b []byte, off int64) (n int, err error) {
return syscall.Pwrite(f.fd, b, off)
}
// seek sets the offset for the next Read or Write on file to offset, interpreted
// according to whence: 0 means relative to the origin of the file, 1 means
// relative to the current offset, and 2 means relative to the end.
// It returns the new offset and an error, if any.
func (f *File) seek(offset int64, whence int) (ret int64, err error) {
return syscall.Seek(f.fd, offset, whence)
}
// Truncate changes the size of the named file.
// If the file is a symbolic link, it changes the size of the link's target.
func Truncate(name string, size int64) error {
var d Dir
d.Null()
d.Length = uint64(size)
if e := syscall.Wstat(name, pdir(nil, &d)); e != nil {
return &PathError{"truncate", name, e}
}
return nil
}
// Remove removes the named file or directory.
func Remove(name string) error {
if e := syscall.Remove(name); e != nil {
return &PathError{"remove", name, e}
}
return nil
}
// Rename renames a file.
func Rename(oldname, newname string) error {
var d Dir
d.Null()
d.Name = newname
if e := syscall.Wstat(oldname, pdir(nil, &d)); e != nil {
return &PathError{"rename", oldname, e}
}
return nil
}
// Chmod changes the mode of the named file to mode.
func Chmod(name string, mode FileMode) error {
var d Dir
odir, e := dirstat(name)
if e != nil {
return &PathError{"chmod", name, e}
}
d.Null()
d.Mode = odir.Mode&^chmodMask | syscallMode(mode)&chmodMask
if e := syscall.Wstat(name, pdir(nil, &d)); e != nil {
return &PathError{"chmod", name, e}
}
return nil
}
// Chtimes changes the access and modification times of the named
// file, similar to the Unix utime() or utimes() functions.
//
// The argument times are in nanoseconds, although the underlying
// filesystem may truncate or round the values to a more
// coarse time unit.
func Chtimes(name string, atimeNs int64, mtimeNs int64) error {
var d Dir
d.Null()
d.Atime = uint32(atimeNs / 1e9)
d.Mtime = uint32(mtimeNs / 1e9)
if e := syscall.Wstat(name, pdir(nil, &d)); e != nil {
return &PathError{"chtimes", name, e}
}
return nil
}
func Pipe() (r *File, w *File, err error) {
var p [2]int
syscall.ForkLock.RLock()
if e := syscall.Pipe(p[0:]); e != nil {
syscall.ForkLock.RUnlock()
return nil, nil, NewSyscallError("pipe", e)
}
syscall.ForkLock.RUnlock()
return NewFile(p[0], "|0"), NewFile(p[1], "|1"), nil
}
// not supported on Plan 9
// Link creates a hard link.
func Link(oldname, newname string) error {
return EPLAN9
}
func Symlink(oldname, newname string) error {
return EPLAN9
}
func Readlink(name string) (string, error) {
return "", EPLAN9
}
func Chown(name string, uid, gid int) error {
return EPLAN9
}
func Lchown(name string, uid, gid int) error {
return EPLAN9
}
func (f *File) Chown(uid, gid int) error {
return EPLAN9
}
// TempDir returns the default directory to use for temporary files.
func TempDir() string {
return "/tmp"
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package regexp implements regular expression search.
//
// The syntax of the regular expressions accepted is the same
// general syntax used by Perl, Python, and other languages.
// More precisely, it is the syntax accepted by RE2 and described at
// http://code.google.com/p/re2/wiki/Syntax, except for \C.
//
// All characters are UTF-8-encoded code points.
//
// There are 16 methods of Regexp that match a regular expression and identify
// the matched text. Their names are matched by this regular expression:
//
// Find(All)?(String)?(Submatch)?(Index)?
//
// If 'All' is present, the routine matches successive non-overlapping
// matches of the entire expression. Empty matches abutting a preceding
// match are ignored. The return value is a slice containing the successive
// return values of the corresponding non-'All' routine. These routines take
// an extra integer argument, n; if n >= 0, the function returns at most n
// matches/submatches.
//
// If 'String' is present, the argument is a string; otherwise it is a slice
// of bytes; return values are adjusted as appropriate.
//
// If 'Submatch' is present, the return value is a slice identifying the
// successive submatches of the expression. Submatches are matches of
// parenthesized subexpressions within the regular expression, numbered from
// left to right in order of opening parenthesis. Submatch 0 is the match of
// the entire expression, submatch 1 the match of the first parenthesized
// subexpression, and so on.
//
// If 'Index' is present, matches and submatches are identified by byte index
// pairs within the input string: result[2*n:2*n+1] identifies the indexes of
// the nth submatch. The pair for n==0 identifies the match of the entire
// expression. If 'Index' is not present, the match is identified by the
// text of the match/submatch. If an index is negative, it means that
// subexpression did not match any string in the input.
//
// There is also a subset of the methods that can be applied to text read
// from a RuneReader:
//
// MatchReader, FindReaderIndex, FindReaderSubmatchIndex
//
// This set may grow. Note that regular expression matches may need to
// examine text beyond the text returned by a match, so the methods that
// match text from a RuneReader may read arbitrarily far into the input
// before returning.
//
// (There are a few other methods that do not match this pattern.)
//
package regexp
import (
"bytes"
"io"
"regexp/syntax"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
var debug = false
// Regexp is the representation of a compiled regular expression.
// The public interface is entirely through methods.
// A Regexp is safe for concurrent use by multiple goroutines.
type Regexp struct {
// read-only after Compile
expr string // as passed to Compile
prog *syntax.Prog // compiled program
prefix string // required prefix in unanchored matches
prefixBytes []byte // prefix, as a []byte
prefixComplete bool // prefix is the entire regexp
prefixRune rune // first rune in prefix
cond syntax.EmptyOp // empty-width conditions required at start of match
numSubexp int
subexpNames []string
longest bool
// cache of machines for running regexp
mu sync.Mutex
machine []*machine
}
// String returns the source text used to compile the regular expression.
func (re *Regexp) String() string {
return re.expr
}
// Compile parses a regular expression and returns, if successful,
// a Regexp object that can be used to match against text.
//
// When matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses the one that a backtracking search would have found first.
// This so-called leftmost-first matching is the same semantics
// that Perl, Python, and other implementations use, although this
// package implements it without the expense of backtracking.
// For POSIX leftmost-longest matching, see CompilePOSIX.
func Compile(expr string) (*Regexp, error) {
return compile(expr, syntax.Perl, false)
}
// CompilePOSIX is like Compile but restricts the regular expression
// to POSIX ERE (egrep) syntax and changes the match semantics to
// leftmost-longest.
//
// That is, when matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses a match that is as long as possible.
// This so-called leftmost-longest matching is the same semantics
// that early regular expression implementations used and that POSIX
// specifies.
//
// However, there can be multiple leftmost-longest matches, with different
// submatch choices, and here this package diverges from POSIX.
// Among the possible leftmost-longest matches, this package chooses
// the one that a backtracking search would have found first, while POSIX
// specifies that the match be chosen to maximize the length of the first
// subexpression, then the second, and so on from left to right.
// The POSIX rule is computationally prohibitive and not even well-defined.
// See http://swtch.com/~rsc/regexp/regexp2.html#posix for details.
func CompilePOSIX(expr string) (*Regexp, error) {
return compile(expr, syntax.POSIX, true)
}
func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) {
re, err := syntax.Parse(expr, mode)
if err != nil {
return nil, err
}
maxCap := re.MaxCap()
capNames := re.CapNames()
re = re.Simplify()
prog, err := syntax.Compile(re)
if err != nil {
return nil, err
}
regexp := &Regexp{
expr: expr,
prog: prog,
numSubexp: maxCap,
subexpNames: capNames,
cond: prog.StartCond(),
longest: longest,
}
regexp.prefix, regexp.prefixComplete = prog.Prefix()
if regexp.prefix != "" {
// TODO(rsc): Remove this allocation by adding
// IndexString to package bytes.
regexp.prefixBytes = []byte(regexp.prefix)
regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix)
}
return regexp, nil
}
// get returns a machine to use for matching re.
// It uses the re's machine cache if possible, to avoid
// unnecessary allocation.
func (re *Regexp) get() *machine {
re.mu.Lock()
if n := len(re.machine); n > 0 {
z := re.machine[n-1]
re.machine = re.machine[:n-1]
re.mu.Unlock()
return z
}
re.mu.Unlock()
z := progMachine(re.prog)
z.re = re
return z
}
// put returns a machine to the re's machine cache.
// There is no attempt to limit the size of the cache, so it will
// grow to the maximum number of simultaneous matches
// run using re. (The cache empties when re gets garbage collected.)
func (re *Regexp) put(z *machine) {
re.mu.Lock()
re.machine = append(re.machine, z)
re.mu.Unlock()
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, error := Compile(str)
if error != nil {
panic(`regexp: Compile(` + quote(str) + `): ` + error.Error())
}
return regexp
}
// MustCompilePOSIX is like CompilePOSIX but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompilePOSIX(str string) *Regexp {
regexp, error := CompilePOSIX(str)
if error != nil {
panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + error.Error())
}
return regexp
}
func quote(s string) string {
if strconv.CanBackquote(s) {
return "`" + s + "`"
}
return strconv.Quote(s)
}
// NumSubexp returns the number of parenthesized subexpressions in this Regexp.
func (re *Regexp) NumSubexp() int {
return re.numSubexp
}
// SubexpNames returns the names of the parenthesized subexpressions
// in this Regexp. The name for the first sub-expression is names[1],
// so that if m is a match slice, the name for m[i] is SubexpNames()[i].
// Since the Regexp as a whole cannot be named, names[0] is always
// the empty string. The slice should not be modified.
func (re *Regexp) SubexpNames() []string {
return re.subexpNames
}
const endOfText rune = -1
// input abstracts different representations of the input text. It provides
// one-character lookahead.
type input interface {
step(pos int) (r rune, width int) // advance one rune
canCheckPrefix() bool // can we look ahead without losing info?
hasPrefix(re *Regexp) bool
index(re *Regexp, pos int) int
context(pos int) syntax.EmptyOp
}
// inputString scans a string.
type inputString struct {
str string
}
func (i *inputString) step(pos int) (rune, int) {
if pos < len(i.str) {
c := i.str[pos]
if c < utf8.RuneSelf {
return rune(c), 1
}
return utf8.DecodeRuneInString(i.str[pos:])
}
return endOfText, 0
}
func (i *inputString) canCheckPrefix() bool {
return true
}
func (i *inputString) hasPrefix(re *Regexp) bool {
return strings.HasPrefix(i.str, re.prefix)
}
func (i *inputString) index(re *Regexp, pos int) int {
return strings.Index(i.str[pos:], re.prefix)
}
func (i *inputString) context(pos int) syntax.EmptyOp {
r1, r2 := endOfText, endOfText
if pos > 0 && pos <= len(i.str) {
r1, _ = utf8.DecodeLastRuneInString(i.str[:pos])
}
if pos < len(i.str) {
r2, _ = utf8.DecodeRuneInString(i.str[pos:])
}
return syntax.EmptyOpContext(r1, r2)
}
// inputBytes scans a byte slice.
type inputBytes struct {
str []byte
}
func (i *inputBytes) step(pos int) (rune, int) {
if pos < len(i.str) {
c := i.str[pos]
if c < utf8.RuneSelf {
return rune(c), 1
}
return utf8.DecodeRune(i.str[pos:])
}
return endOfText, 0
}
func (i *inputBytes) canCheckPrefix() bool {
return true
}
func (i *inputBytes) hasPrefix(re *Regexp) bool {
return bytes.HasPrefix(i.str, re.prefixBytes)
}
func (i *inputBytes) index(re *Regexp, pos int) int {
return bytes.Index(i.str[pos:], re.prefixBytes)
}
func (i *inputBytes) context(pos int) syntax.EmptyOp {
r1, r2 := endOfText, endOfText
if pos > 0 && pos <= len(i.str) {
r1, _ = utf8.DecodeLastRune(i.str[:pos])
}
if pos < len(i.str) {
r2, _ = utf8.DecodeRune(i.str[pos:])
}
return syntax.EmptyOpContext(r1, r2)
}
// inputReader scans a RuneReader.
type inputReader struct {
r io.RuneReader
atEOT bool
pos int
}
func (i *inputReader) step(pos int) (rune, int) {
if !i.atEOT && pos != i.pos {
return endOfText, 0
}
r, w, err := i.r.ReadRune()
if err != nil {
i.atEOT = true
return endOfText, 0
}
i.pos += w
return r, w
}
func (i *inputReader) canCheckPrefix() bool {
return false
}
func (i *inputReader) hasPrefix(re *Regexp) bool {
return false
}
func (i *inputReader) index(re *Regexp, pos int) int {
return -1
}
func (i *inputReader) context(pos int) syntax.EmptyOp {
return 0
}
// LiteralPrefix returns a literal string that must begin any match
// of the regular expression re. It returns the boolean true if the
// literal string comprises the entire regular expression.
func (re *Regexp) LiteralPrefix() (prefix string, complete bool) {
return re.prefix, re.prefixComplete
}
// MatchReader returns whether the Regexp matches the text read by the
// RuneReader. The return value is a boolean: true for match, false for no
// match.
func (re *Regexp) MatchReader(r io.RuneReader) bool {
return re.doExecute(r, nil, "", 0, 0) != nil
}
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool {
return re.doExecute(nil, nil, s, 0, 0) != nil
}
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool {
return re.doExecute(nil, b, "", 0, 0) != nil
}
// MatchReader checks whether a textual regular expression matches the text
// read by the RuneReader. More complicated queries need to use Compile and
// the full Regexp interface.
func MatchReader(pattern string, r io.RuneReader) (matched bool, error error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchReader(r), nil
}
// MatchString checks whether a textual regular expression
// matches a string. More complicated queries need
// to use Compile and the full Regexp interface.
func MatchString(pattern string, s string) (matched bool, error error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchString(s), nil
}
// Match checks whether a textual regular expression
// matches a byte slice. More complicated queries need
// to use Compile and the full Regexp interface.
func Match(pattern string, b []byte) (matched bool, error error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.Match(b), nil
}
// ReplaceAllString returns a copy of src, replacing matches of the Regexp
// with the replacement string repl. Inside repl, $ signs are interpreted as
// in Expand, so for instance $1 represents the text of the first submatch.
func (re *Regexp) ReplaceAllString(src, repl string) string {
n := 2
if strings.Index(repl, "$") >= 0 {
n = 2 * (re.numSubexp + 1)
}
b := re.replaceAll(nil, src, n, func(dst []byte, match []int) []byte {
return re.expand(dst, repl, nil, src, match)
})
return string(b)
}
// ReplaceAllStringLiteral returns a copy of src, replacing matches of the Regexp
// with the replacement string repl. The replacement repl is substituted directly,
// without using Expand.
func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
return append(dst, repl...)
}))
}
// ReplaceAllStringFunc returns a copy of src in which all matches of the
// Regexp have been replaced by the return value of of function repl applied
// to the matched substring. The replacement returned by repl is substituted
// directly, without using Expand.
func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
return append(dst, repl(src[match[0]:match[1]])...)
})
return string(b)
}
func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst []byte, m []int) []byte) []byte {
lastMatchEnd := 0 // end position of the most recent match
searchPos := 0 // position where we next look for a match
var buf []byte
var endPos int
if bsrc != nil {
endPos = len(bsrc)
} else {
endPos = len(src)
}
for searchPos <= endPos {
a := re.doExecute(nil, bsrc, src, searchPos, nmatch)
if len(a) == 0 {
break // no more matches
}
// Copy the unmatched characters before this match.
if bsrc != nil {
buf = append(buf, bsrc[lastMatchEnd:a[0]]...)
} else {
buf = append(buf, src[lastMatchEnd:a[0]]...)
}
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
buf = repl(buf, a)
}
lastMatchEnd = a[1]
// Advance past this match; always advance at least one character.
var width int
if bsrc != nil {
_, width = utf8.DecodeRune(bsrc[searchPos:])
} else {
_, width = utf8.DecodeRuneInString(src[searchPos:])
}
if searchPos+width > a[1] {
searchPos += width
} else if searchPos+1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++
} else {
searchPos = a[1]
}
}
// Copy the unmatched characters after the last match.
if bsrc != nil {
buf = append(buf, bsrc[lastMatchEnd:]...)
} else {
buf = append(buf, src[lastMatchEnd:]...)
}
return buf
}
// ReplaceAll returns a copy of src, replacing matches of the Regexp
// with the replacement string repl. Inside repl, $ signs are interpreted as
// in Expand, so for instance $1 represents the text of the first submatch.
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
n := 2
if bytes.IndexByte(repl, '$') >= 0 {
n = 2 * (re.numSubexp + 1)
}
srepl := ""
b := re.replaceAll(src, "", n, func(dst []byte, match []int) []byte {
if len(srepl) != len(repl) {
srepl = string(repl)
}
return re.expand(dst, srepl, src, "", match)
})
return b
}
// ReplaceAllLiteral returns a copy of src, replacing matches of the Regexp
// with the replacement bytes repl. The replacement repl is substituted directly,
// without using Expand.
func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte {
return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
return append(dst, repl...)
})
}
// ReplaceAllFunc returns a copy of src in which all matches of the
// Regexp have been replaced by the return value of of function repl applied
// to the matched byte slice. The replacement returned by repl is substituted
// directly, without using Expand.
func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
return append(dst, repl(src[match[0]:match[1]])...)
})
}
var specialBytes = []byte(`\.+*?()|[]{}^$`)
func special(b byte) bool {
return bytes.IndexByte(specialBytes, b) >= 0
}
// QuoteMeta returns a string that quotes all regular expression metacharacters
// inside the argument text; the returned string is a regular expression matching
// the literal text. For example, QuoteMeta(`[foo]`) returns `\[foo\]`.
func QuoteMeta(s string) string {
b := make([]byte, 2*len(s))
// A byte loop is correct because all metacharacters are ASCII.
j := 0
for i := 0; i < len(s); i++ {
if special(s[i]) {
b[j] = '\\'
j++
}
b[j] = s[i]
j++
}
return string(b[0:j])
}
// The number of capture values in the program may correspond
// to fewer capturing expressions than are in the regexp.
// For example, "(a){0}" turns into an empty program, so the
// maximum capture in the program is 0 but we need to return
// an expression for \1. Pad appends -1s to the slice a as needed.
func (re *Regexp) pad(a []int) []int {
if a == nil {
// No match.
return nil
}
n := (1 + re.numSubexp) * 2
for len(a) < n {
a = append(a, -1)
}
return a
}
// Find matches in slice b if b is non-nil, otherwise find matches in string s.
func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) {
var end int
if b == nil {
end = len(s)
} else {
end = len(b)
}
for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; {
matches := re.doExecute(nil, b, s, pos, re.prog.NumCap)
if len(matches) == 0 {
break
}
accept := true
if matches[1] == pos {
// We've found an empty match.
if matches[0] == prevMatchEnd {
// We don't allow an empty match right
// after a previous match, so ignore it.
accept = false
}
var width int
// TODO: use step()
if b == nil {
_, width = utf8.DecodeRuneInString(s[pos:end])
} else {
_, width = utf8.DecodeRune(b[pos:end])
}
if width > 0 {
pos += width
} else {
pos = end + 1
}
} else {
pos = matches[1]
}
prevMatchEnd = matches[1]
if accept {
deliver(re.pad(matches))
i++
}
}
}
// Find returns a slice holding the text of the leftmost match in b of the regular expression.
// A return value of nil indicates no match.
func (re *Regexp) Find(b []byte) []byte {
a := re.doExecute(nil, b, "", 0, 2)
if a == nil {
return nil
}
return b[a[0]:a[1]]
}
// FindIndex returns a two-element slice of integers defining the location of
// the leftmost match in b of the regular expression. The match itself is at
// b[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindIndex(b []byte) (loc []int) {
a := re.doExecute(nil, b, "", 0, 2)
if a == nil {
return nil
}
return a[0:2]
}
// FindString returns a string holding the text of the leftmost match in s of the regular
// expression. If there is no match, the return value is an empty string,
// but it will also be empty if the regular expression successfully matches
// an empty string. Use FindStringIndex or FindStringSubmatch if it is
// necessary to distinguish these cases.
func (re *Regexp) FindString(s string) string {
a := re.doExecute(nil, nil, s, 0, 2)
if a == nil {
return ""
}
return s[a[0]:a[1]]
}
// FindStringIndex returns a two-element slice of integers defining the
// location of the leftmost match in s of the regular expression. The match
// itself is at s[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindStringIndex(s string) []int {
a := re.doExecute(nil, nil, s, 0, 2)
if a == nil {
return nil
}
return a[0:2]
}
// FindReaderIndex returns a two-element slice of integers defining the
// location of the leftmost match of the regular expression in text read from
// the RuneReader. The match itself is at s[loc[0]:loc[1]]. A return
// value of nil indicates no match.
func (re *Regexp) FindReaderIndex(r io.RuneReader) []int {
a := re.doExecute(r, nil, "", 0, 2)
if a == nil {
return nil
}
return a[0:2]
}
// FindSubmatch returns a slice of slices holding the text of the leftmost
// match of the regular expression in b and the matches, if any, of its
// subexpressions, as defined by the 'Submatch' descriptions in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatch(b []byte) [][]byte {
a := re.doExecute(nil, b, "", 0, re.prog.NumCap)
if a == nil {
return nil
}
ret := make([][]byte, 1+re.numSubexp)
for i := range ret {
if 2*i < len(a) && a[2*i] >= 0 {
ret[i] = b[a[2*i]:a[2*i+1]]
}
}
return ret
}
// Expand appends template to dst and returns the result; during the
// append, Expand replaces variables in the template with corresponding
// matches drawn from src. The match slice should have been returned by
// FindSubmatchIndex.
//
// In the template, a variable is denoted by a substring of the form
// $name or ${name}, where name is a non-empty sequence of letters,
// digits, and underscores. A purely numeric name like $1 refers to
// the submatch with the corresponding index; other names refer to
// capturing parentheses named with the (?P<name>...) syntax. A
// reference to an out of range or unmatched index or a name that is not
// present in the regular expression is replaced with an empty string.
//
// In the $name form, name is taken to be as long as possible: $1x is
// equivalent to ${1x}, not ${1}x, and, $10 is equivalent to ${10}, not ${1}0.
//
// To insert a literal $ in the output, use $$ in the template.
func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte {
return re.expand(dst, string(template), src, "", match)
}
// ExpandString is like Expand but the template and source are strings.
// It appends to and returns a byte slice in order to give the calling
// code control over allocation.
func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte {
return re.expand(dst, template, nil, src, match)
}
func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte {
for len(template) > 0 {
i := strings.Index(template, "$")
if i < 0 {
break
}
dst = append(dst, template[:i]...)
template = template[i:]
if len(template) > 1 && template[1] == '$' {
// Treat $$ as $.
dst = append(dst, '$')
template = template[2:]
continue
}
name, num, rest, ok := extract(template)
if !ok {
// Malformed; treat $ as raw text.
dst = append(dst, '$')
template = template[1:]
continue
}
template = rest
if num >= 0 {
if 2*num+1 < len(match) {
if bsrc != nil {
dst = append(dst, bsrc[match[2*num]:match[2*num+1]]...)
} else {
dst = append(dst, src[match[2*num]:match[2*num+1]]...)
}
}
} else {
for i, namei := range re.subexpNames {
if name == namei && 2*i+1 < len(match) && match[2*i] >= 0 {
if bsrc != nil {
dst = append(dst, bsrc[match[2*i]:match[2*i+1]]...)
} else {
dst = append(dst, src[match[2*i]:match[2*i+1]]...)
}
break
}
}
}
}
dst = append(dst, template...)
return dst
}
// extract returns the name from a leading "$name" or "${name}" in str.
// If it is a number, extract returns num set to that number; otherwise num = -1.
func extract(str string) (name string, num int, rest string, ok bool) {
if len(str) < 2 || str[0] != '$' {
return
}
brace := false
if str[1] == '{' {
brace = true
str = str[2:]
} else {
str = str[1:]
}
i := 0
for i < len(str) {
rune, size := utf8.DecodeRuneInString(str[i:])
if !unicode.IsLetter(rune) && !unicode.IsDigit(rune) && rune != '_' {
break
}
i += size
}
if i == 0 {
// empty name is not okay
return
}
name = str[:i]
if brace {
if i >= len(str) || str[i] != '}' {
// missing closing brace
return
}
i++
}
// Parse number.
num = 0
for i := 0; i < len(name); i++ {
if name[i] < '0' || '9' < name[i] || num >= 1e8 {
num = -1
break
}
num = num*10 + int(name[i]) - '0'
}
// Disallow leading zeros.
if name[0] == '0' && len(name) > 1 {
num = -1
}
rest = str[i:]
ok = true
return
}
// FindSubmatchIndex returns a slice holding the index pairs identifying the
// leftmost match of the regular expression in b and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatchIndex(b []byte) []int {
return re.pad(re.doExecute(nil, b, "", 0, re.prog.NumCap))
}
// FindStringSubmatch returns a slice of strings holding the text of the
// leftmost match of the regular expression in s and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatch(s string) []string {
a := re.doExecute(nil, nil, s, 0, re.prog.NumCap)
if a == nil {
return nil
}
ret := make([]string, 1+re.numSubexp)
for i := range ret {
if 2*i < len(a) && a[2*i] >= 0 {
ret[i] = s[a[2*i]:a[2*i+1]]
}
}
return ret
}
// FindStringSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression in s and the
// matches, if any, of its subexpressions, as defined by the 'Submatch' and
// 'Index' descriptions in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatchIndex(s string) []int {
return re.pad(re.doExecute(nil, nil, s, 0, re.prog.NumCap))
}
// FindReaderSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression of text read by
// the RuneReader, and the matches, if any, of its subexpressions, as defined
// by the 'Submatch' and 'Index' descriptions in the package comment. A
// return value of nil indicates no match.
func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
return re.pad(re.doExecute(r, nil, "", 0, re.prog.NumCap))
}
const startSize = 10 // The size at which to start a slice in the 'All' routines.
// FindAll is the 'All' version of Find; it returns a slice of all successive
// matches of the expression, as defined by the 'All' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAll(b []byte, n int) [][]byte {
if n < 0 {
n = len(b) + 1
}
result := make([][]byte, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, b[match[0]:match[1]])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
result := make([][]int, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, match[0:2])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllString is the 'All' version of FindString; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllString(s string, n int) []string {
if n < 0 {
n = len(s) + 1
}
result := make([]string, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, s[match[0]:match[1]])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a
// slice of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
result := make([][]int, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, match[0:2])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice
// of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte {
if n < 0 {
n = len(b) + 1
}
result := make([][][]byte, 0, startSize)
re.allMatches("", b, n, func(match []int) {
slice := make([][]byte, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = b[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns
// a slice of all successive matches of the expression, as defined by the
// 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
result := make([][]int, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, match)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it
// returns a slice of all successive matches of the expression, as defined by
// the 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
if n < 0 {
n = len(s) + 1
}
result := make([][]string, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
slice := make([]string, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = s[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringSubmatchIndex is the 'All' version of
// FindStringSubmatchIndex; it returns a slice of all successive matches of
// the expression, as defined by the 'All' description in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
result := make([][]int, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, match)
})
if len(result) == 0 {
return nil
}
return result
}
regexp: name result parameters referenced from docs
Fixes issue 2953
R=golang-dev, rsc
CC=golang-dev
http://codereview.appspot.com/5653051
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package regexp implements regular expression search.
//
// The syntax of the regular expressions accepted is the same
// general syntax used by Perl, Python, and other languages.
// More precisely, it is the syntax accepted by RE2 and described at
// http://code.google.com/p/re2/wiki/Syntax, except for \C.
//
// All characters are UTF-8-encoded code points.
//
// There are 16 methods of Regexp that match a regular expression and identify
// the matched text. Their names are matched by this regular expression:
//
// Find(All)?(String)?(Submatch)?(Index)?
//
// If 'All' is present, the routine matches successive non-overlapping
// matches of the entire expression. Empty matches abutting a preceding
// match are ignored. The return value is a slice containing the successive
// return values of the corresponding non-'All' routine. These routines take
// an extra integer argument, n; if n >= 0, the function returns at most n
// matches/submatches.
//
// If 'String' is present, the argument is a string; otherwise it is a slice
// of bytes; return values are adjusted as appropriate.
//
// If 'Submatch' is present, the return value is a slice identifying the
// successive submatches of the expression. Submatches are matches of
// parenthesized subexpressions within the regular expression, numbered from
// left to right in order of opening parenthesis. Submatch 0 is the match of
// the entire expression, submatch 1 the match of the first parenthesized
// subexpression, and so on.
//
// If 'Index' is present, matches and submatches are identified by byte index
// pairs within the input string: result[2*n:2*n+1] identifies the indexes of
// the nth submatch. The pair for n==0 identifies the match of the entire
// expression. If 'Index' is not present, the match is identified by the
// text of the match/submatch. If an index is negative, it means that
// subexpression did not match any string in the input.
//
// There is also a subset of the methods that can be applied to text read
// from a RuneReader:
//
// MatchReader, FindReaderIndex, FindReaderSubmatchIndex
//
// This set may grow. Note that regular expression matches may need to
// examine text beyond the text returned by a match, so the methods that
// match text from a RuneReader may read arbitrarily far into the input
// before returning.
//
// (There are a few other methods that do not match this pattern.)
//
package regexp
import (
"bytes"
"io"
"regexp/syntax"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
var debug = false
// Regexp is the representation of a compiled regular expression.
// The public interface is entirely through methods.
// A Regexp is safe for concurrent use by multiple goroutines.
type Regexp struct {
// read-only after Compile
expr string // as passed to Compile
prog *syntax.Prog // compiled program
prefix string // required prefix in unanchored matches
prefixBytes []byte // prefix, as a []byte
prefixComplete bool // prefix is the entire regexp
prefixRune rune // first rune in prefix
cond syntax.EmptyOp // empty-width conditions required at start of match
numSubexp int
subexpNames []string
longest bool
// cache of machines for running regexp
mu sync.Mutex
machine []*machine
}
// String returns the source text used to compile the regular expression.
func (re *Regexp) String() string {
return re.expr
}
// Compile parses a regular expression and returns, if successful,
// a Regexp object that can be used to match against text.
//
// When matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses the one that a backtracking search would have found first.
// This so-called leftmost-first matching is the same semantics
// that Perl, Python, and other implementations use, although this
// package implements it without the expense of backtracking.
// For POSIX leftmost-longest matching, see CompilePOSIX.
func Compile(expr string) (*Regexp, error) {
return compile(expr, syntax.Perl, false)
}
// CompilePOSIX is like Compile but restricts the regular expression
// to POSIX ERE (egrep) syntax and changes the match semantics to
// leftmost-longest.
//
// That is, when matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses a match that is as long as possible.
// This so-called leftmost-longest matching is the same semantics
// that early regular expression implementations used and that POSIX
// specifies.
//
// However, there can be multiple leftmost-longest matches, with different
// submatch choices, and here this package diverges from POSIX.
// Among the possible leftmost-longest matches, this package chooses
// the one that a backtracking search would have found first, while POSIX
// specifies that the match be chosen to maximize the length of the first
// subexpression, then the second, and so on from left to right.
// The POSIX rule is computationally prohibitive and not even well-defined.
// See http://swtch.com/~rsc/regexp/regexp2.html#posix for details.
func CompilePOSIX(expr string) (*Regexp, error) {
return compile(expr, syntax.POSIX, true)
}
func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) {
re, err := syntax.Parse(expr, mode)
if err != nil {
return nil, err
}
maxCap := re.MaxCap()
capNames := re.CapNames()
re = re.Simplify()
prog, err := syntax.Compile(re)
if err != nil {
return nil, err
}
regexp := &Regexp{
expr: expr,
prog: prog,
numSubexp: maxCap,
subexpNames: capNames,
cond: prog.StartCond(),
longest: longest,
}
regexp.prefix, regexp.prefixComplete = prog.Prefix()
if regexp.prefix != "" {
// TODO(rsc): Remove this allocation by adding
// IndexString to package bytes.
regexp.prefixBytes = []byte(regexp.prefix)
regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix)
}
return regexp, nil
}
// get returns a machine to use for matching re.
// It uses the re's machine cache if possible, to avoid
// unnecessary allocation.
func (re *Regexp) get() *machine {
re.mu.Lock()
if n := len(re.machine); n > 0 {
z := re.machine[n-1]
re.machine = re.machine[:n-1]
re.mu.Unlock()
return z
}
re.mu.Unlock()
z := progMachine(re.prog)
z.re = re
return z
}
// put returns a machine to the re's machine cache.
// There is no attempt to limit the size of the cache, so it will
// grow to the maximum number of simultaneous matches
// run using re. (The cache empties when re gets garbage collected.)
func (re *Regexp) put(z *machine) {
re.mu.Lock()
re.machine = append(re.machine, z)
re.mu.Unlock()
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, error := Compile(str)
if error != nil {
panic(`regexp: Compile(` + quote(str) + `): ` + error.Error())
}
return regexp
}
// MustCompilePOSIX is like CompilePOSIX but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompilePOSIX(str string) *Regexp {
regexp, error := CompilePOSIX(str)
if error != nil {
panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + error.Error())
}
return regexp
}
func quote(s string) string {
if strconv.CanBackquote(s) {
return "`" + s + "`"
}
return strconv.Quote(s)
}
// NumSubexp returns the number of parenthesized subexpressions in this Regexp.
func (re *Regexp) NumSubexp() int {
return re.numSubexp
}
// SubexpNames returns the names of the parenthesized subexpressions
// in this Regexp. The name for the first sub-expression is names[1],
// so that if m is a match slice, the name for m[i] is SubexpNames()[i].
// Since the Regexp as a whole cannot be named, names[0] is always
// the empty string. The slice should not be modified.
func (re *Regexp) SubexpNames() []string {
return re.subexpNames
}
const endOfText rune = -1
// input abstracts different representations of the input text. It provides
// one-character lookahead.
type input interface {
step(pos int) (r rune, width int) // advance one rune
canCheckPrefix() bool // can we look ahead without losing info?
hasPrefix(re *Regexp) bool
index(re *Regexp, pos int) int
context(pos int) syntax.EmptyOp
}
// inputString scans a string.
type inputString struct {
str string
}
func (i *inputString) step(pos int) (rune, int) {
if pos < len(i.str) {
c := i.str[pos]
if c < utf8.RuneSelf {
return rune(c), 1
}
return utf8.DecodeRuneInString(i.str[pos:])
}
return endOfText, 0
}
func (i *inputString) canCheckPrefix() bool {
return true
}
func (i *inputString) hasPrefix(re *Regexp) bool {
return strings.HasPrefix(i.str, re.prefix)
}
func (i *inputString) index(re *Regexp, pos int) int {
return strings.Index(i.str[pos:], re.prefix)
}
func (i *inputString) context(pos int) syntax.EmptyOp {
r1, r2 := endOfText, endOfText
if pos > 0 && pos <= len(i.str) {
r1, _ = utf8.DecodeLastRuneInString(i.str[:pos])
}
if pos < len(i.str) {
r2, _ = utf8.DecodeRuneInString(i.str[pos:])
}
return syntax.EmptyOpContext(r1, r2)
}
// inputBytes scans a byte slice.
type inputBytes struct {
str []byte
}
func (i *inputBytes) step(pos int) (rune, int) {
if pos < len(i.str) {
c := i.str[pos]
if c < utf8.RuneSelf {
return rune(c), 1
}
return utf8.DecodeRune(i.str[pos:])
}
return endOfText, 0
}
func (i *inputBytes) canCheckPrefix() bool {
return true
}
func (i *inputBytes) hasPrefix(re *Regexp) bool {
return bytes.HasPrefix(i.str, re.prefixBytes)
}
func (i *inputBytes) index(re *Regexp, pos int) int {
return bytes.Index(i.str[pos:], re.prefixBytes)
}
func (i *inputBytes) context(pos int) syntax.EmptyOp {
r1, r2 := endOfText, endOfText
if pos > 0 && pos <= len(i.str) {
r1, _ = utf8.DecodeLastRune(i.str[:pos])
}
if pos < len(i.str) {
r2, _ = utf8.DecodeRune(i.str[pos:])
}
return syntax.EmptyOpContext(r1, r2)
}
// inputReader scans a RuneReader.
type inputReader struct {
r io.RuneReader
atEOT bool
pos int
}
func (i *inputReader) step(pos int) (rune, int) {
if !i.atEOT && pos != i.pos {
return endOfText, 0
}
r, w, err := i.r.ReadRune()
if err != nil {
i.atEOT = true
return endOfText, 0
}
i.pos += w
return r, w
}
func (i *inputReader) canCheckPrefix() bool {
return false
}
func (i *inputReader) hasPrefix(re *Regexp) bool {
return false
}
func (i *inputReader) index(re *Regexp, pos int) int {
return -1
}
func (i *inputReader) context(pos int) syntax.EmptyOp {
return 0
}
// LiteralPrefix returns a literal string that must begin any match
// of the regular expression re. It returns the boolean true if the
// literal string comprises the entire regular expression.
func (re *Regexp) LiteralPrefix() (prefix string, complete bool) {
return re.prefix, re.prefixComplete
}
// MatchReader returns whether the Regexp matches the text read by the
// RuneReader. The return value is a boolean: true for match, false for no
// match.
func (re *Regexp) MatchReader(r io.RuneReader) bool {
return re.doExecute(r, nil, "", 0, 0) != nil
}
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool {
return re.doExecute(nil, nil, s, 0, 0) != nil
}
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool {
return re.doExecute(nil, b, "", 0, 0) != nil
}
// MatchReader checks whether a textual regular expression matches the text
// read by the RuneReader. More complicated queries need to use Compile and
// the full Regexp interface.
func MatchReader(pattern string, r io.RuneReader) (matched bool, error error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchReader(r), nil
}
// MatchString checks whether a textual regular expression
// matches a string. More complicated queries need
// to use Compile and the full Regexp interface.
func MatchString(pattern string, s string) (matched bool, error error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchString(s), nil
}
// Match checks whether a textual regular expression
// matches a byte slice. More complicated queries need
// to use Compile and the full Regexp interface.
func Match(pattern string, b []byte) (matched bool, error error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.Match(b), nil
}
// ReplaceAllString returns a copy of src, replacing matches of the Regexp
// with the replacement string repl. Inside repl, $ signs are interpreted as
// in Expand, so for instance $1 represents the text of the first submatch.
func (re *Regexp) ReplaceAllString(src, repl string) string {
n := 2
if strings.Index(repl, "$") >= 0 {
n = 2 * (re.numSubexp + 1)
}
b := re.replaceAll(nil, src, n, func(dst []byte, match []int) []byte {
return re.expand(dst, repl, nil, src, match)
})
return string(b)
}
// ReplaceAllStringLiteral returns a copy of src, replacing matches of the Regexp
// with the replacement string repl. The replacement repl is substituted directly,
// without using Expand.
func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
return append(dst, repl...)
}))
}
// ReplaceAllStringFunc returns a copy of src in which all matches of the
// Regexp have been replaced by the return value of of function repl applied
// to the matched substring. The replacement returned by repl is substituted
// directly, without using Expand.
func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
return append(dst, repl(src[match[0]:match[1]])...)
})
return string(b)
}
func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst []byte, m []int) []byte) []byte {
lastMatchEnd := 0 // end position of the most recent match
searchPos := 0 // position where we next look for a match
var buf []byte
var endPos int
if bsrc != nil {
endPos = len(bsrc)
} else {
endPos = len(src)
}
for searchPos <= endPos {
a := re.doExecute(nil, bsrc, src, searchPos, nmatch)
if len(a) == 0 {
break // no more matches
}
// Copy the unmatched characters before this match.
if bsrc != nil {
buf = append(buf, bsrc[lastMatchEnd:a[0]]...)
} else {
buf = append(buf, src[lastMatchEnd:a[0]]...)
}
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
buf = repl(buf, a)
}
lastMatchEnd = a[1]
// Advance past this match; always advance at least one character.
var width int
if bsrc != nil {
_, width = utf8.DecodeRune(bsrc[searchPos:])
} else {
_, width = utf8.DecodeRuneInString(src[searchPos:])
}
if searchPos+width > a[1] {
searchPos += width
} else if searchPos+1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++
} else {
searchPos = a[1]
}
}
// Copy the unmatched characters after the last match.
if bsrc != nil {
buf = append(buf, bsrc[lastMatchEnd:]...)
} else {
buf = append(buf, src[lastMatchEnd:]...)
}
return buf
}
// ReplaceAll returns a copy of src, replacing matches of the Regexp
// with the replacement string repl. Inside repl, $ signs are interpreted as
// in Expand, so for instance $1 represents the text of the first submatch.
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
n := 2
if bytes.IndexByte(repl, '$') >= 0 {
n = 2 * (re.numSubexp + 1)
}
srepl := ""
b := re.replaceAll(src, "", n, func(dst []byte, match []int) []byte {
if len(srepl) != len(repl) {
srepl = string(repl)
}
return re.expand(dst, srepl, src, "", match)
})
return b
}
// ReplaceAllLiteral returns a copy of src, replacing matches of the Regexp
// with the replacement bytes repl. The replacement repl is substituted directly,
// without using Expand.
func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte {
return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
return append(dst, repl...)
})
}
// ReplaceAllFunc returns a copy of src in which all matches of the
// Regexp have been replaced by the return value of of function repl applied
// to the matched byte slice. The replacement returned by repl is substituted
// directly, without using Expand.
func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
return append(dst, repl(src[match[0]:match[1]])...)
})
}
var specialBytes = []byte(`\.+*?()|[]{}^$`)
func special(b byte) bool {
return bytes.IndexByte(specialBytes, b) >= 0
}
// QuoteMeta returns a string that quotes all regular expression metacharacters
// inside the argument text; the returned string is a regular expression matching
// the literal text. For example, QuoteMeta(`[foo]`) returns `\[foo\]`.
func QuoteMeta(s string) string {
b := make([]byte, 2*len(s))
// A byte loop is correct because all metacharacters are ASCII.
j := 0
for i := 0; i < len(s); i++ {
if special(s[i]) {
b[j] = '\\'
j++
}
b[j] = s[i]
j++
}
return string(b[0:j])
}
// The number of capture values in the program may correspond
// to fewer capturing expressions than are in the regexp.
// For example, "(a){0}" turns into an empty program, so the
// maximum capture in the program is 0 but we need to return
// an expression for \1. Pad appends -1s to the slice a as needed.
func (re *Regexp) pad(a []int) []int {
if a == nil {
// No match.
return nil
}
n := (1 + re.numSubexp) * 2
for len(a) < n {
a = append(a, -1)
}
return a
}
// Find matches in slice b if b is non-nil, otherwise find matches in string s.
func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) {
var end int
if b == nil {
end = len(s)
} else {
end = len(b)
}
for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; {
matches := re.doExecute(nil, b, s, pos, re.prog.NumCap)
if len(matches) == 0 {
break
}
accept := true
if matches[1] == pos {
// We've found an empty match.
if matches[0] == prevMatchEnd {
// We don't allow an empty match right
// after a previous match, so ignore it.
accept = false
}
var width int
// TODO: use step()
if b == nil {
_, width = utf8.DecodeRuneInString(s[pos:end])
} else {
_, width = utf8.DecodeRune(b[pos:end])
}
if width > 0 {
pos += width
} else {
pos = end + 1
}
} else {
pos = matches[1]
}
prevMatchEnd = matches[1]
if accept {
deliver(re.pad(matches))
i++
}
}
}
// Find returns a slice holding the text of the leftmost match in b of the regular expression.
// A return value of nil indicates no match.
func (re *Regexp) Find(b []byte) []byte {
a := re.doExecute(nil, b, "", 0, 2)
if a == nil {
return nil
}
return b[a[0]:a[1]]
}
// FindIndex returns a two-element slice of integers defining the location of
// the leftmost match in b of the regular expression. The match itself is at
// b[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindIndex(b []byte) (loc []int) {
a := re.doExecute(nil, b, "", 0, 2)
if a == nil {
return nil
}
return a[0:2]
}
// FindString returns a string holding the text of the leftmost match in s of the regular
// expression. If there is no match, the return value is an empty string,
// but it will also be empty if the regular expression successfully matches
// an empty string. Use FindStringIndex or FindStringSubmatch if it is
// necessary to distinguish these cases.
func (re *Regexp) FindString(s string) string {
a := re.doExecute(nil, nil, s, 0, 2)
if a == nil {
return ""
}
return s[a[0]:a[1]]
}
// FindStringIndex returns a two-element slice of integers defining the
// location of the leftmost match in s of the regular expression. The match
// itself is at s[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindStringIndex(s string) (loc []int) {
a := re.doExecute(nil, nil, s, 0, 2)
if a == nil {
return nil
}
return a[0:2]
}
// FindReaderIndex returns a two-element slice of integers defining the
// location of the leftmost match of the regular expression in text read from
// the RuneReader. The match itself is at s[loc[0]:loc[1]]. A return
// value of nil indicates no match.
func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) {
a := re.doExecute(r, nil, "", 0, 2)
if a == nil {
return nil
}
return a[0:2]
}
// FindSubmatch returns a slice of slices holding the text of the leftmost
// match of the regular expression in b and the matches, if any, of its
// subexpressions, as defined by the 'Submatch' descriptions in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatch(b []byte) [][]byte {
a := re.doExecute(nil, b, "", 0, re.prog.NumCap)
if a == nil {
return nil
}
ret := make([][]byte, 1+re.numSubexp)
for i := range ret {
if 2*i < len(a) && a[2*i] >= 0 {
ret[i] = b[a[2*i]:a[2*i+1]]
}
}
return ret
}
// Expand appends template to dst and returns the result; during the
// append, Expand replaces variables in the template with corresponding
// matches drawn from src. The match slice should have been returned by
// FindSubmatchIndex.
//
// In the template, a variable is denoted by a substring of the form
// $name or ${name}, where name is a non-empty sequence of letters,
// digits, and underscores. A purely numeric name like $1 refers to
// the submatch with the corresponding index; other names refer to
// capturing parentheses named with the (?P<name>...) syntax. A
// reference to an out of range or unmatched index or a name that is not
// present in the regular expression is replaced with an empty string.
//
// In the $name form, name is taken to be as long as possible: $1x is
// equivalent to ${1x}, not ${1}x, and, $10 is equivalent to ${10}, not ${1}0.
//
// To insert a literal $ in the output, use $$ in the template.
func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte {
return re.expand(dst, string(template), src, "", match)
}
// ExpandString is like Expand but the template and source are strings.
// It appends to and returns a byte slice in order to give the calling
// code control over allocation.
func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte {
return re.expand(dst, template, nil, src, match)
}
func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte {
for len(template) > 0 {
i := strings.Index(template, "$")
if i < 0 {
break
}
dst = append(dst, template[:i]...)
template = template[i:]
if len(template) > 1 && template[1] == '$' {
// Treat $$ as $.
dst = append(dst, '$')
template = template[2:]
continue
}
name, num, rest, ok := extract(template)
if !ok {
// Malformed; treat $ as raw text.
dst = append(dst, '$')
template = template[1:]
continue
}
template = rest
if num >= 0 {
if 2*num+1 < len(match) {
if bsrc != nil {
dst = append(dst, bsrc[match[2*num]:match[2*num+1]]...)
} else {
dst = append(dst, src[match[2*num]:match[2*num+1]]...)
}
}
} else {
for i, namei := range re.subexpNames {
if name == namei && 2*i+1 < len(match) && match[2*i] >= 0 {
if bsrc != nil {
dst = append(dst, bsrc[match[2*i]:match[2*i+1]]...)
} else {
dst = append(dst, src[match[2*i]:match[2*i+1]]...)
}
break
}
}
}
}
dst = append(dst, template...)
return dst
}
// extract returns the name from a leading "$name" or "${name}" in str.
// If it is a number, extract returns num set to that number; otherwise num = -1.
func extract(str string) (name string, num int, rest string, ok bool) {
if len(str) < 2 || str[0] != '$' {
return
}
brace := false
if str[1] == '{' {
brace = true
str = str[2:]
} else {
str = str[1:]
}
i := 0
for i < len(str) {
rune, size := utf8.DecodeRuneInString(str[i:])
if !unicode.IsLetter(rune) && !unicode.IsDigit(rune) && rune != '_' {
break
}
i += size
}
if i == 0 {
// empty name is not okay
return
}
name = str[:i]
if brace {
if i >= len(str) || str[i] != '}' {
// missing closing brace
return
}
i++
}
// Parse number.
num = 0
for i := 0; i < len(name); i++ {
if name[i] < '0' || '9' < name[i] || num >= 1e8 {
num = -1
break
}
num = num*10 + int(name[i]) - '0'
}
// Disallow leading zeros.
if name[0] == '0' && len(name) > 1 {
num = -1
}
rest = str[i:]
ok = true
return
}
// FindSubmatchIndex returns a slice holding the index pairs identifying the
// leftmost match of the regular expression in b and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatchIndex(b []byte) []int {
return re.pad(re.doExecute(nil, b, "", 0, re.prog.NumCap))
}
// FindStringSubmatch returns a slice of strings holding the text of the
// leftmost match of the regular expression in s and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatch(s string) []string {
a := re.doExecute(nil, nil, s, 0, re.prog.NumCap)
if a == nil {
return nil
}
ret := make([]string, 1+re.numSubexp)
for i := range ret {
if 2*i < len(a) && a[2*i] >= 0 {
ret[i] = s[a[2*i]:a[2*i+1]]
}
}
return ret
}
// FindStringSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression in s and the
// matches, if any, of its subexpressions, as defined by the 'Submatch' and
// 'Index' descriptions in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatchIndex(s string) []int {
return re.pad(re.doExecute(nil, nil, s, 0, re.prog.NumCap))
}
// FindReaderSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression of text read by
// the RuneReader, and the matches, if any, of its subexpressions, as defined
// by the 'Submatch' and 'Index' descriptions in the package comment. A
// return value of nil indicates no match.
func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
return re.pad(re.doExecute(r, nil, "", 0, re.prog.NumCap))
}
const startSize = 10 // The size at which to start a slice in the 'All' routines.
// FindAll is the 'All' version of Find; it returns a slice of all successive
// matches of the expression, as defined by the 'All' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAll(b []byte, n int) [][]byte {
if n < 0 {
n = len(b) + 1
}
result := make([][]byte, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, b[match[0]:match[1]])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
result := make([][]int, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, match[0:2])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllString is the 'All' version of FindString; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllString(s string, n int) []string {
if n < 0 {
n = len(s) + 1
}
result := make([]string, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, s[match[0]:match[1]])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a
// slice of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
result := make([][]int, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, match[0:2])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice
// of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte {
if n < 0 {
n = len(b) + 1
}
result := make([][][]byte, 0, startSize)
re.allMatches("", b, n, func(match []int) {
slice := make([][]byte, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = b[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns
// a slice of all successive matches of the expression, as defined by the
// 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
result := make([][]int, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, match)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it
// returns a slice of all successive matches of the expression, as defined by
// the 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
if n < 0 {
n = len(s) + 1
}
result := make([][]string, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
slice := make([]string, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = s[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringSubmatchIndex is the 'All' version of
// FindStringSubmatchIndex; it returns a slice of all successive matches of
// the expression, as defined by the 'All' description in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
result := make([][]int, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, match)
})
if len(result) == 0 {
return nil
}
return result
}
|
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package regexp implements a simple regular expression library.
//
// The syntax of the regular expressions accepted is:
//
// regexp:
// concatenation { '|' concatenation }
// concatenation:
// { closure }
// closure:
// term [ '*' | '+' | '?' ]
// term:
// '^'
// '$'
// '.'
// character
// '[' [ '^' ] { character-range } ']'
// '(' regexp ')'
// character-range:
// character [ '-' character ]
//
// All characters are UTF-8-encoded code points. Backslashes escape special
// characters, including inside character classes. The standard Go character
// escapes are also recognized: \a \b \f \n \r \t \v.
//
// There are 16 methods of Regexp that match a regular expression and identify
// the matched text. Their names are matched by this regular expression:
//
// Find(All)?(String)?(Submatch)?(Index)?
//
// If 'All' is present, the routine matches successive non-overlapping
// matches of the entire expression. Empty matches abutting a preceding
// match are ignored. The return value is a slice containing the successive
// return values of the corresponding non-'All' routine. These routines take
// an extra integer argument, n; if n >= 0, the function returns at most n
// matches/submatches.
//
// If 'String' is present, the argument is a string; otherwise it is a slice
// of bytes; return values are adjusted as appropriate.
//
// If 'Submatch' is present, the return value is a slice identifying the
// successive submatches of the expression. Submatches are matches of
// parenthesized subexpressions within the regular expression, numbered from
// left to right in order of opening parenthesis. Submatch 0 is the match of
// the entire expression, submatch 1 the match of the first parenthesized
// subexpression, and so on.
//
// If 'Index' is present, matches and submatches are identified by byte index
// pairs within the input string: result[2*n:2*n+1] identifies the indexes of
// the nth submatch. The pair for n==0 identifies the match of the entire
// expression. If 'Index' is not present, the match is identified by the
// text of the match/submatch. If an index is negative, it means that
// subexpression did not match any string in the input.
//
// There is also a subset of the methods that can be applied to text read
// from a RuneReader:
//
// MatchReader, FindReaderIndex, FindReaderSubmatchIndex
//
// This set may grow. Note that regular expression matches may need to
// examine text beyond the text returned by a match, so the methods that
// match text from a RuneReader may read arbitrarily far into the input
// before returning.
//
// (There are a few other methods that do not match this pattern.)
//
package regexp
import (
"bytes"
"io"
"os"
"strings"
"utf8"
)
var debug = false
// Error is the local type for a parsing error.
type Error string
func (e Error) String() string {
return string(e)
}
// Error codes returned by failures to parse an expression.
var (
ErrInternal = Error("internal error")
ErrUnmatchedLpar = Error("unmatched '('")
ErrUnmatchedRpar = Error("unmatched ')'")
ErrUnmatchedLbkt = Error("unmatched '['")
ErrUnmatchedRbkt = Error("unmatched ']'")
ErrBadRange = Error("bad range in character class")
ErrExtraneousBackslash = Error("extraneous backslash")
ErrBadClosure = Error("repeated closure (**, ++, etc.)")
ErrBareClosure = Error("closure applies to nothing")
ErrBadBackslash = Error("illegal backslash escape")
)
const (
iStart = iota // beginning of program
iEnd // end of program: success
iBOT // '^' beginning of text
iEOT // '$' end of text
iChar // 'a' regular character
iCharClass // [a-z] character class
iAny // '.' any character including newline
iNotNL // [^\n] special case: any character but newline
iBra // '(' parenthesized expression: 2*braNum for left, 2*braNum+1 for right
iAlt // '|' alternation
iNop // do nothing; makes it easy to link without patching
)
// An instruction executed by the NFA
type instr struct {
kind int // the type of this instruction: iChar, iAny, etc.
index int // used only in debugging; could be eliminated
next *instr // the instruction to execute after this one
// Special fields valid only for some items.
char int // iChar
braNum int // iBra, iEbra
cclass *charClass // iCharClass
left *instr // iAlt, other branch
}
func (i *instr) print() {
switch i.kind {
case iStart:
print("start")
case iEnd:
print("end")
case iBOT:
print("bot")
case iEOT:
print("eot")
case iChar:
print("char ", string(i.char))
case iCharClass:
i.cclass.print()
case iAny:
print("any")
case iNotNL:
print("notnl")
case iBra:
if i.braNum&1 == 0 {
print("bra", i.braNum/2)
} else {
print("ebra", i.braNum/2)
}
case iAlt:
print("alt(", i.left.index, ")")
case iNop:
print("nop")
}
}
// Regexp is the representation of a compiled regular expression.
// The public interface is entirely through methods.
type Regexp struct {
expr string // the original expression
prefix string // initial plain text string
prefixBytes []byte // initial plain text bytes
inst []*instr
start *instr // first instruction of machine
prefixStart *instr // where to start if there is a prefix
nbra int // number of brackets in expression, for subexpressions
}
type charClass struct {
negate bool // is character class negated? ([^a-z])
// slice of int, stored pairwise: [a-z] is (a,z); x is (x,x):
ranges []int
cmin, cmax int
}
func (cclass *charClass) print() {
print("charclass")
if cclass.negate {
print(" (negated)")
}
for i := 0; i < len(cclass.ranges); i += 2 {
l := cclass.ranges[i]
r := cclass.ranges[i+1]
if l == r {
print(" [", string(l), "]")
} else {
print(" [", string(l), "-", string(r), "]")
}
}
}
func (cclass *charClass) addRange(a, b int) {
// range is a through b inclusive
cclass.ranges = append(cclass.ranges, a, b)
if a < cclass.cmin {
cclass.cmin = a
}
if b > cclass.cmax {
cclass.cmax = b
}
}
func (cclass *charClass) matches(c int) bool {
if c < cclass.cmin || c > cclass.cmax {
return cclass.negate
}
ranges := cclass.ranges
for i := 0; i < len(ranges); i = i + 2 {
if ranges[i] <= c && c <= ranges[i+1] {
return !cclass.negate
}
}
return cclass.negate
}
func newCharClass() *instr {
i := &instr{kind: iCharClass}
i.cclass = new(charClass)
i.cclass.ranges = make([]int, 0, 4)
i.cclass.cmin = 0x10FFFF + 1 // MaxRune + 1
i.cclass.cmax = -1
return i
}
func (re *Regexp) add(i *instr) *instr {
i.index = len(re.inst)
re.inst = append(re.inst, i)
return i
}
type parser struct {
re *Regexp
nlpar int // number of unclosed lpars
pos int
ch int
}
func (p *parser) error(err Error) {
panic(err)
}
const endOfText = -1
func (p *parser) c() int { return p.ch }
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfText
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:])
p.ch = c
p.pos += w
}
return p.ch
}
func newParser(re *Regexp) *parser {
p := new(parser)
p.re = re
p.nextc() // load p.ch
return p
}
func special(c int) bool {
for _, r := range `\.+*?()|[]^$` {
if c == r {
return true
}
}
return false
}
func ispunct(c int) bool {
for _, r := range "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" {
if c == r {
return true
}
}
return false
}
var escapes = []byte("abfnrtv")
var escaped = []byte("\a\b\f\n\r\t\v")
func escape(c int) int {
for i, b := range escapes {
if int(b) == c {
return i
}
}
return -1
}
func (p *parser) checkBackslash() int {
c := p.c()
if c == '\\' {
c = p.nextc()
switch {
case c == endOfText:
p.error(ErrExtraneousBackslash)
case ispunct(c):
// c is as delivered
case escape(c) >= 0:
c = int(escaped[escape(c)])
default:
p.error(ErrBadBackslash)
}
}
return c
}
func (p *parser) charClass() *instr {
i := newCharClass()
cc := i.cclass
if p.c() == '^' {
cc.negate = true
p.nextc()
}
left := -1
for {
switch c := p.c(); c {
case ']', endOfText:
if left >= 0 {
p.error(ErrBadRange)
}
// Is it [^\n]?
if cc.negate && len(cc.ranges) == 2 &&
cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
nl := &instr{kind: iNotNL}
p.re.add(nl)
return nl
}
// Special common case: "[a]" -> "a"
if !cc.negate && len(cc.ranges) == 2 && cc.ranges[0] == cc.ranges[1] {
c := &instr{kind: iChar, char: cc.ranges[0]}
p.re.add(c)
return c
}
p.re.add(i)
return i
case '-': // do this before backslash processing
p.error(ErrBadRange)
default:
c = p.checkBackslash()
p.nextc()
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc()
left = c
} else { // single char
cc.addRange(c, c)
}
case left <= c: // second of pair
cc.addRange(left, c)
left = -1
default:
p.error(ErrBadRange)
}
}
}
panic("unreachable")
}
func (p *parser) term() (start, end *instr) {
switch c := p.c(); c {
case '|', endOfText:
return nil, nil
case '*', '+', '?':
p.error(ErrBareClosure)
case ')':
if p.nlpar == 0 {
p.error(ErrUnmatchedRpar)
}
return nil, nil
case ']':
p.error(ErrUnmatchedRbkt)
case '^':
p.nextc()
start = p.re.add(&instr{kind: iBOT})
return start, start
case '$':
p.nextc()
start = p.re.add(&instr{kind: iEOT})
return start, start
case '.':
p.nextc()
start = p.re.add(&instr{kind: iAny})
return start, start
case '[':
p.nextc()
start = p.charClass()
if p.c() != ']' {
p.error(ErrUnmatchedLbkt)
}
p.nextc()
return start, start
case '(':
p.nextc()
p.nlpar++
p.re.nbra++ // increment first so first subexpr is \1
nbra := p.re.nbra
start, end = p.regexp()
if p.c() != ')' {
p.error(ErrUnmatchedLpar)
}
p.nlpar--
p.nextc()
bra := &instr{kind: iBra, braNum: 2 * nbra}
p.re.add(bra)
ebra := &instr{kind: iBra, braNum: 2*nbra + 1}
p.re.add(ebra)
if start == nil {
if end == nil {
p.error(ErrInternal)
return
}
start = ebra
} else {
end.next = ebra
}
bra.next = start
return bra, ebra
default:
c = p.checkBackslash()
p.nextc()
start = &instr{kind: iChar, char: c}
p.re.add(start)
return start, start
}
panic("unreachable")
}
func (p *parser) closure() (start, end *instr) {
start, end = p.term()
if start == nil {
return
}
switch p.c() {
case '*':
// (start,end)*:
alt := &instr{kind: iAlt}
p.re.add(alt)
end.next = alt // after end, do alt
alt.left = start // alternate brach: return to start
start = alt // alt becomes new (start, end)
end = alt
case '+':
// (start,end)+:
alt := &instr{kind: iAlt}
p.re.add(alt)
end.next = alt // after end, do alt
alt.left = start // alternate brach: return to start
end = alt // start is unchanged; end is alt
case '?':
// (start,end)?:
alt := &instr{kind: iAlt}
p.re.add(alt)
nop := &instr{kind: iNop}
p.re.add(nop)
alt.left = start // alternate branch is start
alt.next = nop // follow on to nop
end.next = nop // after end, go to nop
start = alt // start is now alt
end = nop // end is nop pointed to by both branches
default:
return
}
switch p.nextc() {
case '*', '+', '?':
p.error(ErrBadClosure)
}
return
}
func (p *parser) concatenation() (start, end *instr) {
for {
nstart, nend := p.closure()
switch {
case nstart == nil: // end of this concatenation
if start == nil { // this is the empty string
nop := p.re.add(&instr{kind: iNop})
return nop, nop
}
return
case start == nil: // this is first element of concatenation
start, end = nstart, nend
default:
end.next = nstart
end = nend
}
}
panic("unreachable")
}
func (p *parser) regexp() (start, end *instr) {
start, end = p.concatenation()
for {
switch p.c() {
default:
return
case '|':
p.nextc()
nstart, nend := p.concatenation()
alt := &instr{kind: iAlt}
p.re.add(alt)
alt.left = start
alt.next = nstart
nop := &instr{kind: iNop}
p.re.add(nop)
end.next = nop
nend.next = nop
start, end = alt, nop
}
}
panic("unreachable")
}
func unNop(i *instr) *instr {
for i.kind == iNop {
i = i.next
}
return i
}
func (re *Regexp) eliminateNops() {
for _, inst := range re.inst {
if inst.kind == iEnd {
continue
}
inst.next = unNop(inst.next)
if inst.kind == iAlt {
inst.left = unNop(inst.left)
}
}
}
func (re *Regexp) dump() {
print("prefix <", re.prefix, ">\n")
for _, inst := range re.inst {
print(inst.index, ": ")
inst.print()
if inst.kind != iEnd {
print(" -> ", inst.next.index)
}
print("\n")
}
}
func (re *Regexp) doParse() {
p := newParser(re)
start := &instr{kind: iStart}
re.add(start)
s, e := p.regexp()
start.next = s
re.start = start
e.next = re.add(&instr{kind: iEnd})
if debug {
re.dump()
println()
}
re.eliminateNops()
if debug {
re.dump()
println()
}
re.setPrefix()
if debug {
re.dump()
println()
}
}
// Extract regular text from the beginning of the pattern,
// possibly after a leading iBOT.
// That text can be used by doExecute to speed up matching.
func (re *Regexp) setPrefix() {
var b []byte
var utf = make([]byte, utf8.UTFMax)
var inst *instr
// First instruction is start; skip that. Also skip any initial iBOT.
inst = re.inst[0].next
for inst.kind == iBOT {
inst = inst.next
}
Loop:
for ; inst.kind != iEnd; inst = inst.next {
// stop if this is not a char
if inst.kind != iChar {
break
}
// stop if this char can be followed by a match for an empty string,
// which includes closures, ^, and $.
switch inst.next.kind {
case iBOT, iEOT, iAlt:
break Loop
}
n := utf8.EncodeRune(utf, inst.char)
b = append(b, utf[0:n]...)
}
// point prefixStart instruction to first non-CHAR after prefix
re.prefixStart = inst
re.prefixBytes = b
re.prefix = string(b)
}
// String returns the source text used to compile the regular expression.
func (re *Regexp) String() string {
return re.expr
}
// Compile parses a regular expression and returns, if successful, a Regexp
// object that can be used to match against text.
func Compile(str string) (regexp *Regexp, error os.Error) {
regexp = new(Regexp)
// doParse will panic if there is a parse error.
defer func() {
if e := recover(); e != nil {
regexp = nil
error = e.(Error) // Will re-panic if error was not an Error, e.g. nil-pointer exception
}
}()
regexp.expr = str
regexp.inst = make([]*instr, 0, 10)
regexp.doParse()
return
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, error := Compile(str)
if error != nil {
panic(`regexp: compiling "` + str + `": ` + error.String())
}
return regexp
}
// NumSubexp returns the number of parenthesized subexpressions in this Regexp.
func (re *Regexp) NumSubexp() int { return re.nbra }
// The match arena allows us to reduce the garbage generated by tossing
// match vectors away as we execute. Matches are ref counted and returned
// to a free list when no longer active. Increases a simple benchmark by 22X.
type matchArena struct {
head *matchVec
len int // length of match vector
pos int
atBOT bool // whether we're at beginning of text
atEOT bool // whether we're at end of text
}
type matchVec struct {
m []int // pairs of bracketing submatches. 0th is start,end
ref int
next *matchVec
}
func (a *matchArena) new() *matchVec {
if a.head == nil {
const N = 10
block := make([]matchVec, N)
for i := 0; i < N; i++ {
b := &block[i]
b.next = a.head
a.head = b
}
}
m := a.head
a.head = m.next
m.ref = 0
if m.m == nil {
m.m = make([]int, a.len)
}
return m
}
func (a *matchArena) free(m *matchVec) {
m.ref--
if m.ref == 0 {
m.next = a.head
a.head = m
}
}
func (a *matchArena) copy(m *matchVec) *matchVec {
m1 := a.new()
copy(m1.m, m.m)
return m1
}
func (a *matchArena) noMatch() *matchVec {
m := a.new()
for i := range m.m {
m.m[i] = -1 // no match seen; catches cases like "a(b)?c" on "ac"
}
m.ref = 1
return m
}
type state struct {
inst *instr // next instruction to execute
prefixed bool // this match began with a fixed prefix
match *matchVec
}
// Append new state to to-do list. Leftmost-longest wins so avoid
// adding a state that's already active. The matchVec will be inc-ref'ed
// if it is assigned to a state.
func (a *matchArena) addState(s []state, inst *instr, prefixed bool, match *matchVec) []state {
switch inst.kind {
case iBOT:
if a.atBOT {
s = a.addState(s, inst.next, prefixed, match)
}
return s
case iEOT:
if a.atEOT {
s = a.addState(s, inst.next, prefixed, match)
}
return s
case iBra:
match.m[inst.braNum] = a.pos
s = a.addState(s, inst.next, prefixed, match)
return s
}
l := len(s)
// States are inserted in order so it's sufficient to see if we have the same
// instruction; no need to see if existing match is earlier (it is).
for i := 0; i < l; i++ {
if s[i].inst == inst {
return s
}
}
s = append(s, state{inst, prefixed, match})
match.ref++
if inst.kind == iAlt {
s = a.addState(s, inst.left, prefixed, a.copy(match))
// give other branch a copy of this match vector
s = a.addState(s, inst.next, prefixed, a.copy(match))
}
return s
}
// input abstracts different representations of the input text. It provides
// one-character lookahead.
type input interface {
step(pos int) (rune int, width int) // advance one rune
canCheckPrefix() bool // can we look ahead without losing info?
hasPrefix(re *Regexp) bool
index(re *Regexp, pos int) int
}
// inputString scans a string.
type inputString struct {
str string
}
func newInputString(str string) *inputString {
return &inputString{str: str}
}
func (i *inputString) step(pos int) (int, int) {
if pos < len(i.str) {
return utf8.DecodeRuneInString(i.str[pos:len(i.str)])
}
return endOfText, 0
}
func (i *inputString) canCheckPrefix() bool {
return true
}
func (i *inputString) hasPrefix(re *Regexp) bool {
return strings.HasPrefix(i.str, re.prefix)
}
func (i *inputString) index(re *Regexp, pos int) int {
return strings.Index(i.str[pos:], re.prefix)
}
// inputBytes scans a byte slice.
type inputBytes struct {
str []byte
}
func newInputBytes(str []byte) *inputBytes {
return &inputBytes{str: str}
}
func (i *inputBytes) step(pos int) (int, int) {
if pos < len(i.str) {
return utf8.DecodeRune(i.str[pos:len(i.str)])
}
return endOfText, 0
}
func (i *inputBytes) canCheckPrefix() bool {
return true
}
func (i *inputBytes) hasPrefix(re *Regexp) bool {
return bytes.HasPrefix(i.str, re.prefixBytes)
}
func (i *inputBytes) index(re *Regexp, pos int) int {
return bytes.Index(i.str[pos:], re.prefixBytes)
}
// inputReader scans a RuneReader.
type inputReader struct {
r io.RuneReader
atEOT bool
pos int
}
func newInputReader(r io.RuneReader) *inputReader {
return &inputReader{r: r}
}
func (i *inputReader) step(pos int) (int, int) {
if !i.atEOT && pos != i.pos {
return endOfText, 0
}
r, w, err := i.r.ReadRune()
if err != nil {
i.atEOT = true
return endOfText, 0
}
i.pos += w
return r, w
}
func (i *inputReader) canCheckPrefix() bool {
return false
}
func (i *inputReader) hasPrefix(re *Regexp) bool {
return false
}
func (i *inputReader) index(re *Regexp, pos int) int {
return -1
}
// Search match starting from pos bytes into the input.
func (re *Regexp) doExecute(i input, pos int) []int {
var s [2][]state
s[0] = make([]state, 0, 10)
s[1] = make([]state, 0, 10)
in, out := 0, 1
var final state
found := false
anchored := re.inst[0].next.kind == iBOT
if anchored && pos > 0 {
return nil
}
// fast check for initial plain substring
if i.canCheckPrefix() && re.prefix != "" {
advance := 0
if anchored {
if !i.hasPrefix(re) {
return nil
}
} else {
advance = i.index(re, pos)
if advance == -1 {
return nil
}
}
pos += advance
}
// We look one character ahead so we can match $, which checks whether
// we are at EOT.
nextChar, nextWidth := i.step(pos)
arena := &matchArena{
len: 2 * (re.nbra + 1),
pos: pos,
atBOT: pos == 0,
atEOT: nextChar == endOfText,
}
for c, startPos := 0, pos; c != endOfText; {
if !found && (pos == startPos || !anchored) {
// prime the pump if we haven't seen a match yet
match := arena.noMatch()
match.m[0] = pos
s[out] = arena.addState(s[out], re.start.next, false, match)
arena.free(match) // if addState saved it, ref was incremented
} else if len(s[out]) == 0 {
// machine has completed
break
}
in, out = out, in // old out state is new in state
// clear out old state
old := s[out]
for _, state := range old {
arena.free(state.match)
}
s[out] = old[0:0] // truncate state vector
c = nextChar
thisPos := pos
pos += nextWidth
nextChar, nextWidth = i.step(pos)
arena.atEOT = nextChar == endOfText
arena.atBOT = false
arena.pos = pos
for _, st := range s[in] {
switch st.inst.kind {
case iBOT:
case iEOT:
case iChar:
if c == st.inst.char {
s[out] = arena.addState(s[out], st.inst.next, st.prefixed, st.match)
}
case iCharClass:
if st.inst.cclass.matches(c) {
s[out] = arena.addState(s[out], st.inst.next, st.prefixed, st.match)
}
case iAny:
if c != endOfText {
s[out] = arena.addState(s[out], st.inst.next, st.prefixed, st.match)
}
case iNotNL:
if c != endOfText && c != '\n' {
s[out] = arena.addState(s[out], st.inst.next, st.prefixed, st.match)
}
case iBra:
case iAlt:
case iEnd:
// choose leftmost longest
if !found || // first
st.match.m[0] < final.match.m[0] || // leftmost
(st.match.m[0] == final.match.m[0] && thisPos > final.match.m[1]) { // longest
if final.match != nil {
arena.free(final.match)
}
final = st
final.match.ref++
final.match.m[1] = thisPos
}
found = true
default:
st.inst.print()
panic("unknown instruction in execute")
}
}
}
if final.match == nil {
return nil
}
// if match found, back up start of match by width of prefix.
if final.prefixed && len(final.match.m) > 0 {
final.match.m[0] -= len(re.prefix)
}
return final.match.m
}
// LiteralPrefix returns a literal string that must begin any match
// of the regular expression re. It returns the boolean true if the
// literal string comprises the entire regular expression.
func (re *Regexp) LiteralPrefix() (prefix string, complete bool) {
c := make([]int, len(re.inst)-2) // minus start and end.
// First instruction is start; skip that.
i := 0
for inst := re.inst[0].next; inst.kind != iEnd; inst = inst.next {
// stop if this is not a char
if inst.kind != iChar {
return string(c[:i]), false
}
c[i] = inst.char
i++
}
return string(c[:i]), true
}
// MatchReader returns whether the Regexp matches the text read by the
// RuneReader. The return value is a boolean: true for match, false for no
// match.
func (re *Regexp) MatchReader(r io.RuneReader) bool {
return len(re.doExecute(newInputReader(r), 0)) > 0
}
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool { return len(re.doExecute(newInputString(s), 0)) > 0 }
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool { return len(re.doExecute(newInputBytes(b), 0)) > 0 }
// MatchReader checks whether a textual regular expression matches the text
// read by the RuneReader. More complicated queries need to use Compile and
// the full Regexp interface.
func MatchReader(pattern string, r io.RuneReader) (matched bool, error os.Error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchReader(r), nil
}
// MatchString checks whether a textual regular expression
// matches a string. More complicated queries need
// to use Compile and the full Regexp interface.
func MatchString(pattern string, s string) (matched bool, error os.Error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchString(s), nil
}
// Match checks whether a textual regular expression
// matches a byte slice. More complicated queries need
// to use Compile and the full Regexp interface.
func Match(pattern string, b []byte) (matched bool, error os.Error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.Match(b), nil
}
// ReplaceAllString returns a copy of src in which all matches for the Regexp
// have been replaced by repl. No support is provided for expressions
// (e.g. \1 or $1) in the replacement string.
func (re *Regexp) ReplaceAllString(src, repl string) string {
return re.ReplaceAllStringFunc(src, func(string) string { return repl })
}
// ReplaceAllStringFunc returns a copy of src in which all matches for the
// Regexp have been replaced by the return value of of function repl (whose
// first argument is the matched string). No support is provided for
// expressions (e.g. \1 or $1) in the replacement string.
func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
lastMatchEnd := 0 // end position of the most recent match
searchPos := 0 // position where we next look for a match
buf := new(bytes.Buffer)
for searchPos <= len(src) {
a := re.doExecute(newInputString(src), searchPos)
if len(a) == 0 {
break // no more matches
}
// Copy the unmatched characters before this match.
io.WriteString(buf, src[lastMatchEnd:a[0]])
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
io.WriteString(buf, repl(src[a[0]:a[1]]))
}
lastMatchEnd = a[1]
// Advance past this match; always advance at least one character.
_, width := utf8.DecodeRuneInString(src[searchPos:])
if searchPos+width > a[1] {
searchPos += width
} else if searchPos+1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++
} else {
searchPos = a[1]
}
}
// Copy the unmatched characters after the last match.
io.WriteString(buf, src[lastMatchEnd:])
return buf.String()
}
// ReplaceAll returns a copy of src in which all matches for the Regexp
// have been replaced by repl. No support is provided for expressions
// (e.g. \1 or $1) in the replacement text.
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
return re.ReplaceAllFunc(src, func([]byte) []byte { return repl })
}
// ReplaceAllFunc returns a copy of src in which all matches for the
// Regexp have been replaced by the return value of of function repl (whose
// first argument is the matched []byte). No support is provided for
// expressions (e.g. \1 or $1) in the replacement string.
func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
lastMatchEnd := 0 // end position of the most recent match
searchPos := 0 // position where we next look for a match
buf := new(bytes.Buffer)
for searchPos <= len(src) {
a := re.doExecute(newInputBytes(src), searchPos)
if len(a) == 0 {
break // no more matches
}
// Copy the unmatched characters before this match.
buf.Write(src[lastMatchEnd:a[0]])
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
buf.Write(repl(src[a[0]:a[1]]))
}
lastMatchEnd = a[1]
// Advance past this match; always advance at least one character.
_, width := utf8.DecodeRune(src[searchPos:])
if searchPos+width > a[1] {
searchPos += width
} else if searchPos+1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++
} else {
searchPos = a[1]
}
}
// Copy the unmatched characters after the last match.
buf.Write(src[lastMatchEnd:])
return buf.Bytes()
}
// QuoteMeta returns a string that quotes all regular expression metacharacters
// inside the argument text; the returned string is a regular expression matching
// the literal text. For example, QuoteMeta(`[foo]`) returns `\[foo\]`.
func QuoteMeta(s string) string {
b := make([]byte, 2*len(s))
// A byte loop is correct because all metacharacters are ASCII.
j := 0
for i := 0; i < len(s); i++ {
if special(int(s[i])) {
b[j] = '\\'
j++
}
b[j] = s[i]
j++
}
return string(b[0:j])
}
// Find matches in slice b if b is non-nil, otherwise find matches in string s.
func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) {
var end int
if b == nil {
end = len(s)
} else {
end = len(b)
}
for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; {
var in input
if b == nil {
in = newInputString(s)
} else {
in = newInputBytes(b)
}
matches := re.doExecute(in, pos)
if len(matches) == 0 {
break
}
accept := true
if matches[1] == pos {
// We've found an empty match.
if matches[0] == prevMatchEnd {
// We don't allow an empty match right
// after a previous match, so ignore it.
accept = false
}
var width int
// TODO: use step()
if b == nil {
_, width = utf8.DecodeRuneInString(s[pos:end])
} else {
_, width = utf8.DecodeRune(b[pos:end])
}
if width > 0 {
pos += width
} else {
pos = end + 1
}
} else {
pos = matches[1]
}
prevMatchEnd = matches[1]
if accept {
deliver(matches)
i++
}
}
}
// Find returns a slice holding the text of the leftmost match in b of the regular expression.
// A return value of nil indicates no match.
func (re *Regexp) Find(b []byte) []byte {
a := re.doExecute(newInputBytes(b), 0)
if a == nil {
return nil
}
return b[a[0]:a[1]]
}
// FindIndex returns a two-element slice of integers defining the location of
// the leftmost match in b of the regular expression. The match itself is at
// b[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindIndex(b []byte) (loc []int) {
a := re.doExecute(newInputBytes(b), 0)
if a == nil {
return nil
}
return a[0:2]
}
// FindString returns a string holding the text of the leftmost match in s of the regular
// expression. If there is no match, the return value is an empty string,
// but it will also be empty if the regular expression successfully matches
// an empty string. Use FindStringIndex or FindStringSubmatch if it is
// necessary to distinguish these cases.
func (re *Regexp) FindString(s string) string {
a := re.doExecute(newInputString(s), 0)
if a == nil {
return ""
}
return s[a[0]:a[1]]
}
// FindStringIndex returns a two-element slice of integers defining the
// location of the leftmost match in s of the regular expression. The match
// itself is at s[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindStringIndex(s string) []int {
a := re.doExecute(newInputString(s), 0)
if a == nil {
return nil
}
return a[0:2]
}
// FindReaderIndex returns a two-element slice of integers defining the
// location of the leftmost match of the regular expression in text read from
// the RuneReader. The match itself is at s[loc[0]:loc[1]]. A return
// value of nil indicates no match.
func (re *Regexp) FindReaderIndex(r io.RuneReader) []int {
a := re.doExecute(newInputReader(r), 0)
if a == nil {
return nil
}
return a[0:2]
}
// FindSubmatch returns a slice of slices holding the text of the leftmost
// match of the regular expression in b and the matches, if any, of its
// subexpressions, as defined by the 'Submatch' descriptions in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatch(b []byte) [][]byte {
a := re.doExecute(newInputBytes(b), 0)
if a == nil {
return nil
}
ret := make([][]byte, len(a)/2)
for i := range ret {
if a[2*i] >= 0 {
ret[i] = b[a[2*i]:a[2*i+1]]
}
}
return ret
}
// FindSubmatchIndex returns a slice holding the index pairs identifying the
// leftmost match of the regular expression in b and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatchIndex(b []byte) []int {
return re.doExecute(newInputBytes(b), 0)
}
// FindStringSubmatch returns a slice of strings holding the text of the
// leftmost match of the regular expression in s and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatch(s string) []string {
a := re.doExecute(newInputString(s), 0)
if a == nil {
return nil
}
ret := make([]string, len(a)/2)
for i := range ret {
if a[2*i] >= 0 {
ret[i] = s[a[2*i]:a[2*i+1]]
}
}
return ret
}
// FindStringSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression in s and the
// matches, if any, of its subexpressions, as defined by the 'Submatch' and
// 'Index' descriptions in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatchIndex(s string) []int {
return re.doExecute(newInputString(s), 0)
}
// FindReaderSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression of text read by
// the RuneReader, and the matches, if any, of its subexpressions, as defined
// by the 'Submatch' and 'Index' descriptions in the package comment. A
// return value of nil indicates no match.
func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
return re.doExecute(newInputReader(r), 0)
}
const startSize = 10 // The size at which to start a slice in the 'All' routines.
// FindAll is the 'All' version of Find; it returns a slice of all successive
// matches of the expression, as defined by the 'All' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAll(b []byte, n int) [][]byte {
if n < 0 {
n = len(b) + 1
}
result := make([][]byte, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, b[match[0]:match[1]])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
result := make([][]int, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, match[0:2])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllString is the 'All' version of FindString; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllString(s string, n int) []string {
if n < 0 {
n = len(s) + 1
}
result := make([]string, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, s[match[0]:match[1]])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a
// slice of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
result := make([][]int, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, match[0:2])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice
// of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte {
if n < 0 {
n = len(b) + 1
}
result := make([][][]byte, 0, startSize)
re.allMatches("", b, n, func(match []int) {
slice := make([][]byte, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = b[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns
// a slice of all successive matches of the expression, as defined by the
// 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
result := make([][]int, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, match)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it
// returns a slice of all successive matches of the expression, as defined by
// the 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
if n < 0 {
n = len(s) + 1
}
result := make([][]string, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
slice := make([]string, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = s[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringSubmatchIndex is the 'All' version of
// FindStringSubmatchIndex; it returns a slice of all successive matches of
// the expression, as defined by the 'All' description in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
result := make([][]int, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, match)
})
if len(result) == 0 {
return nil
}
return result
}
regexp: add a package prefix to error strings.
R=r, r
CC=golang-dev
http://codereview.appspot.com/4630041
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package regexp implements a simple regular expression library.
//
// The syntax of the regular expressions accepted is:
//
// regexp:
// concatenation { '|' concatenation }
// concatenation:
// { closure }
// closure:
// term [ '*' | '+' | '?' ]
// term:
// '^'
// '$'
// '.'
// character
// '[' [ '^' ] { character-range } ']'
// '(' regexp ')'
// character-range:
// character [ '-' character ]
//
// All characters are UTF-8-encoded code points. Backslashes escape special
// characters, including inside character classes. The standard Go character
// escapes are also recognized: \a \b \f \n \r \t \v.
//
// There are 16 methods of Regexp that match a regular expression and identify
// the matched text. Their names are matched by this regular expression:
//
// Find(All)?(String)?(Submatch)?(Index)?
//
// If 'All' is present, the routine matches successive non-overlapping
// matches of the entire expression. Empty matches abutting a preceding
// match are ignored. The return value is a slice containing the successive
// return values of the corresponding non-'All' routine. These routines take
// an extra integer argument, n; if n >= 0, the function returns at most n
// matches/submatches.
//
// If 'String' is present, the argument is a string; otherwise it is a slice
// of bytes; return values are adjusted as appropriate.
//
// If 'Submatch' is present, the return value is a slice identifying the
// successive submatches of the expression. Submatches are matches of
// parenthesized subexpressions within the regular expression, numbered from
// left to right in order of opening parenthesis. Submatch 0 is the match of
// the entire expression, submatch 1 the match of the first parenthesized
// subexpression, and so on.
//
// If 'Index' is present, matches and submatches are identified by byte index
// pairs within the input string: result[2*n:2*n+1] identifies the indexes of
// the nth submatch. The pair for n==0 identifies the match of the entire
// expression. If 'Index' is not present, the match is identified by the
// text of the match/submatch. If an index is negative, it means that
// subexpression did not match any string in the input.
//
// There is also a subset of the methods that can be applied to text read
// from a RuneReader:
//
// MatchReader, FindReaderIndex, FindReaderSubmatchIndex
//
// This set may grow. Note that regular expression matches may need to
// examine text beyond the text returned by a match, so the methods that
// match text from a RuneReader may read arbitrarily far into the input
// before returning.
//
// (There are a few other methods that do not match this pattern.)
//
package regexp
import (
"bytes"
"io"
"os"
"strings"
"utf8"
)
var debug = false
// Error is the local type for a parsing error.
type Error string
func (e Error) String() string {
return string(e)
}
// Error codes returned by failures to parse an expression.
var (
ErrInternal = Error("regexp: internal error")
ErrUnmatchedLpar = Error("regexp: unmatched '('")
ErrUnmatchedRpar = Error("regexp: unmatched ')'")
ErrUnmatchedLbkt = Error("regexp: unmatched '['")
ErrUnmatchedRbkt = Error("regexp: unmatched ']'")
ErrBadRange = Error("regexp: bad range in character class")
ErrExtraneousBackslash = Error("regexp: extraneous backslash")
ErrBadClosure = Error("regexp: repeated closure (**, ++, etc.)")
ErrBareClosure = Error("regexp: closure applies to nothing")
ErrBadBackslash = Error("regexp: illegal backslash escape")
)
const (
iStart = iota // beginning of program
iEnd // end of program: success
iBOT // '^' beginning of text
iEOT // '$' end of text
iChar // 'a' regular character
iCharClass // [a-z] character class
iAny // '.' any character including newline
iNotNL // [^\n] special case: any character but newline
iBra // '(' parenthesized expression: 2*braNum for left, 2*braNum+1 for right
iAlt // '|' alternation
iNop // do nothing; makes it easy to link without patching
)
// An instruction executed by the NFA
type instr struct {
kind int // the type of this instruction: iChar, iAny, etc.
index int // used only in debugging; could be eliminated
next *instr // the instruction to execute after this one
// Special fields valid only for some items.
char int // iChar
braNum int // iBra, iEbra
cclass *charClass // iCharClass
left *instr // iAlt, other branch
}
func (i *instr) print() {
switch i.kind {
case iStart:
print("start")
case iEnd:
print("end")
case iBOT:
print("bot")
case iEOT:
print("eot")
case iChar:
print("char ", string(i.char))
case iCharClass:
i.cclass.print()
case iAny:
print("any")
case iNotNL:
print("notnl")
case iBra:
if i.braNum&1 == 0 {
print("bra", i.braNum/2)
} else {
print("ebra", i.braNum/2)
}
case iAlt:
print("alt(", i.left.index, ")")
case iNop:
print("nop")
}
}
// Regexp is the representation of a compiled regular expression.
// The public interface is entirely through methods.
type Regexp struct {
expr string // the original expression
prefix string // initial plain text string
prefixBytes []byte // initial plain text bytes
inst []*instr
start *instr // first instruction of machine
prefixStart *instr // where to start if there is a prefix
nbra int // number of brackets in expression, for subexpressions
}
type charClass struct {
negate bool // is character class negated? ([^a-z])
// slice of int, stored pairwise: [a-z] is (a,z); x is (x,x):
ranges []int
cmin, cmax int
}
func (cclass *charClass) print() {
print("charclass")
if cclass.negate {
print(" (negated)")
}
for i := 0; i < len(cclass.ranges); i += 2 {
l := cclass.ranges[i]
r := cclass.ranges[i+1]
if l == r {
print(" [", string(l), "]")
} else {
print(" [", string(l), "-", string(r), "]")
}
}
}
func (cclass *charClass) addRange(a, b int) {
// range is a through b inclusive
cclass.ranges = append(cclass.ranges, a, b)
if a < cclass.cmin {
cclass.cmin = a
}
if b > cclass.cmax {
cclass.cmax = b
}
}
func (cclass *charClass) matches(c int) bool {
if c < cclass.cmin || c > cclass.cmax {
return cclass.negate
}
ranges := cclass.ranges
for i := 0; i < len(ranges); i = i + 2 {
if ranges[i] <= c && c <= ranges[i+1] {
return !cclass.negate
}
}
return cclass.negate
}
func newCharClass() *instr {
i := &instr{kind: iCharClass}
i.cclass = new(charClass)
i.cclass.ranges = make([]int, 0, 4)
i.cclass.cmin = 0x10FFFF + 1 // MaxRune + 1
i.cclass.cmax = -1
return i
}
func (re *Regexp) add(i *instr) *instr {
i.index = len(re.inst)
re.inst = append(re.inst, i)
return i
}
type parser struct {
re *Regexp
nlpar int // number of unclosed lpars
pos int
ch int
}
func (p *parser) error(err Error) {
panic(err)
}
const endOfText = -1
func (p *parser) c() int { return p.ch }
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfText
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:])
p.ch = c
p.pos += w
}
return p.ch
}
func newParser(re *Regexp) *parser {
p := new(parser)
p.re = re
p.nextc() // load p.ch
return p
}
func special(c int) bool {
for _, r := range `\.+*?()|[]^$` {
if c == r {
return true
}
}
return false
}
func ispunct(c int) bool {
for _, r := range "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" {
if c == r {
return true
}
}
return false
}
var escapes = []byte("abfnrtv")
var escaped = []byte("\a\b\f\n\r\t\v")
func escape(c int) int {
for i, b := range escapes {
if int(b) == c {
return i
}
}
return -1
}
func (p *parser) checkBackslash() int {
c := p.c()
if c == '\\' {
c = p.nextc()
switch {
case c == endOfText:
p.error(ErrExtraneousBackslash)
case ispunct(c):
// c is as delivered
case escape(c) >= 0:
c = int(escaped[escape(c)])
default:
p.error(ErrBadBackslash)
}
}
return c
}
func (p *parser) charClass() *instr {
i := newCharClass()
cc := i.cclass
if p.c() == '^' {
cc.negate = true
p.nextc()
}
left := -1
for {
switch c := p.c(); c {
case ']', endOfText:
if left >= 0 {
p.error(ErrBadRange)
}
// Is it [^\n]?
if cc.negate && len(cc.ranges) == 2 &&
cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
nl := &instr{kind: iNotNL}
p.re.add(nl)
return nl
}
// Special common case: "[a]" -> "a"
if !cc.negate && len(cc.ranges) == 2 && cc.ranges[0] == cc.ranges[1] {
c := &instr{kind: iChar, char: cc.ranges[0]}
p.re.add(c)
return c
}
p.re.add(i)
return i
case '-': // do this before backslash processing
p.error(ErrBadRange)
default:
c = p.checkBackslash()
p.nextc()
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc()
left = c
} else { // single char
cc.addRange(c, c)
}
case left <= c: // second of pair
cc.addRange(left, c)
left = -1
default:
p.error(ErrBadRange)
}
}
}
panic("unreachable")
}
func (p *parser) term() (start, end *instr) {
switch c := p.c(); c {
case '|', endOfText:
return nil, nil
case '*', '+', '?':
p.error(ErrBareClosure)
case ')':
if p.nlpar == 0 {
p.error(ErrUnmatchedRpar)
}
return nil, nil
case ']':
p.error(ErrUnmatchedRbkt)
case '^':
p.nextc()
start = p.re.add(&instr{kind: iBOT})
return start, start
case '$':
p.nextc()
start = p.re.add(&instr{kind: iEOT})
return start, start
case '.':
p.nextc()
start = p.re.add(&instr{kind: iAny})
return start, start
case '[':
p.nextc()
start = p.charClass()
if p.c() != ']' {
p.error(ErrUnmatchedLbkt)
}
p.nextc()
return start, start
case '(':
p.nextc()
p.nlpar++
p.re.nbra++ // increment first so first subexpr is \1
nbra := p.re.nbra
start, end = p.regexp()
if p.c() != ')' {
p.error(ErrUnmatchedLpar)
}
p.nlpar--
p.nextc()
bra := &instr{kind: iBra, braNum: 2 * nbra}
p.re.add(bra)
ebra := &instr{kind: iBra, braNum: 2*nbra + 1}
p.re.add(ebra)
if start == nil {
if end == nil {
p.error(ErrInternal)
return
}
start = ebra
} else {
end.next = ebra
}
bra.next = start
return bra, ebra
default:
c = p.checkBackslash()
p.nextc()
start = &instr{kind: iChar, char: c}
p.re.add(start)
return start, start
}
panic("unreachable")
}
func (p *parser) closure() (start, end *instr) {
start, end = p.term()
if start == nil {
return
}
switch p.c() {
case '*':
// (start,end)*:
alt := &instr{kind: iAlt}
p.re.add(alt)
end.next = alt // after end, do alt
alt.left = start // alternate brach: return to start
start = alt // alt becomes new (start, end)
end = alt
case '+':
// (start,end)+:
alt := &instr{kind: iAlt}
p.re.add(alt)
end.next = alt // after end, do alt
alt.left = start // alternate brach: return to start
end = alt // start is unchanged; end is alt
case '?':
// (start,end)?:
alt := &instr{kind: iAlt}
p.re.add(alt)
nop := &instr{kind: iNop}
p.re.add(nop)
alt.left = start // alternate branch is start
alt.next = nop // follow on to nop
end.next = nop // after end, go to nop
start = alt // start is now alt
end = nop // end is nop pointed to by both branches
default:
return
}
switch p.nextc() {
case '*', '+', '?':
p.error(ErrBadClosure)
}
return
}
func (p *parser) concatenation() (start, end *instr) {
for {
nstart, nend := p.closure()
switch {
case nstart == nil: // end of this concatenation
if start == nil { // this is the empty string
nop := p.re.add(&instr{kind: iNop})
return nop, nop
}
return
case start == nil: // this is first element of concatenation
start, end = nstart, nend
default:
end.next = nstart
end = nend
}
}
panic("unreachable")
}
func (p *parser) regexp() (start, end *instr) {
start, end = p.concatenation()
for {
switch p.c() {
default:
return
case '|':
p.nextc()
nstart, nend := p.concatenation()
alt := &instr{kind: iAlt}
p.re.add(alt)
alt.left = start
alt.next = nstart
nop := &instr{kind: iNop}
p.re.add(nop)
end.next = nop
nend.next = nop
start, end = alt, nop
}
}
panic("unreachable")
}
func unNop(i *instr) *instr {
for i.kind == iNop {
i = i.next
}
return i
}
func (re *Regexp) eliminateNops() {
for _, inst := range re.inst {
if inst.kind == iEnd {
continue
}
inst.next = unNop(inst.next)
if inst.kind == iAlt {
inst.left = unNop(inst.left)
}
}
}
func (re *Regexp) dump() {
print("prefix <", re.prefix, ">\n")
for _, inst := range re.inst {
print(inst.index, ": ")
inst.print()
if inst.kind != iEnd {
print(" -> ", inst.next.index)
}
print("\n")
}
}
func (re *Regexp) doParse() {
p := newParser(re)
start := &instr{kind: iStart}
re.add(start)
s, e := p.regexp()
start.next = s
re.start = start
e.next = re.add(&instr{kind: iEnd})
if debug {
re.dump()
println()
}
re.eliminateNops()
if debug {
re.dump()
println()
}
re.setPrefix()
if debug {
re.dump()
println()
}
}
// Extract regular text from the beginning of the pattern,
// possibly after a leading iBOT.
// That text can be used by doExecute to speed up matching.
func (re *Regexp) setPrefix() {
var b []byte
var utf = make([]byte, utf8.UTFMax)
var inst *instr
// First instruction is start; skip that. Also skip any initial iBOT.
inst = re.inst[0].next
for inst.kind == iBOT {
inst = inst.next
}
Loop:
for ; inst.kind != iEnd; inst = inst.next {
// stop if this is not a char
if inst.kind != iChar {
break
}
// stop if this char can be followed by a match for an empty string,
// which includes closures, ^, and $.
switch inst.next.kind {
case iBOT, iEOT, iAlt:
break Loop
}
n := utf8.EncodeRune(utf, inst.char)
b = append(b, utf[0:n]...)
}
// point prefixStart instruction to first non-CHAR after prefix
re.prefixStart = inst
re.prefixBytes = b
re.prefix = string(b)
}
// String returns the source text used to compile the regular expression.
func (re *Regexp) String() string {
return re.expr
}
// Compile parses a regular expression and returns, if successful, a Regexp
// object that can be used to match against text.
func Compile(str string) (regexp *Regexp, error os.Error) {
regexp = new(Regexp)
// doParse will panic if there is a parse error.
defer func() {
if e := recover(); e != nil {
regexp = nil
error = e.(Error) // Will re-panic if error was not an Error, e.g. nil-pointer exception
}
}()
regexp.expr = str
regexp.inst = make([]*instr, 0, 10)
regexp.doParse()
return
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, error := Compile(str)
if error != nil {
panic(`regexp: compiling "` + str + `": ` + error.String())
}
return regexp
}
// NumSubexp returns the number of parenthesized subexpressions in this Regexp.
func (re *Regexp) NumSubexp() int { return re.nbra }
// The match arena allows us to reduce the garbage generated by tossing
// match vectors away as we execute. Matches are ref counted and returned
// to a free list when no longer active. Increases a simple benchmark by 22X.
type matchArena struct {
head *matchVec
len int // length of match vector
pos int
atBOT bool // whether we're at beginning of text
atEOT bool // whether we're at end of text
}
type matchVec struct {
m []int // pairs of bracketing submatches. 0th is start,end
ref int
next *matchVec
}
func (a *matchArena) new() *matchVec {
if a.head == nil {
const N = 10
block := make([]matchVec, N)
for i := 0; i < N; i++ {
b := &block[i]
b.next = a.head
a.head = b
}
}
m := a.head
a.head = m.next
m.ref = 0
if m.m == nil {
m.m = make([]int, a.len)
}
return m
}
func (a *matchArena) free(m *matchVec) {
m.ref--
if m.ref == 0 {
m.next = a.head
a.head = m
}
}
func (a *matchArena) copy(m *matchVec) *matchVec {
m1 := a.new()
copy(m1.m, m.m)
return m1
}
func (a *matchArena) noMatch() *matchVec {
m := a.new()
for i := range m.m {
m.m[i] = -1 // no match seen; catches cases like "a(b)?c" on "ac"
}
m.ref = 1
return m
}
type state struct {
inst *instr // next instruction to execute
prefixed bool // this match began with a fixed prefix
match *matchVec
}
// Append new state to to-do list. Leftmost-longest wins so avoid
// adding a state that's already active. The matchVec will be inc-ref'ed
// if it is assigned to a state.
func (a *matchArena) addState(s []state, inst *instr, prefixed bool, match *matchVec) []state {
switch inst.kind {
case iBOT:
if a.atBOT {
s = a.addState(s, inst.next, prefixed, match)
}
return s
case iEOT:
if a.atEOT {
s = a.addState(s, inst.next, prefixed, match)
}
return s
case iBra:
match.m[inst.braNum] = a.pos
s = a.addState(s, inst.next, prefixed, match)
return s
}
l := len(s)
// States are inserted in order so it's sufficient to see if we have the same
// instruction; no need to see if existing match is earlier (it is).
for i := 0; i < l; i++ {
if s[i].inst == inst {
return s
}
}
s = append(s, state{inst, prefixed, match})
match.ref++
if inst.kind == iAlt {
s = a.addState(s, inst.left, prefixed, a.copy(match))
// give other branch a copy of this match vector
s = a.addState(s, inst.next, prefixed, a.copy(match))
}
return s
}
// input abstracts different representations of the input text. It provides
// one-character lookahead.
type input interface {
step(pos int) (rune int, width int) // advance one rune
canCheckPrefix() bool // can we look ahead without losing info?
hasPrefix(re *Regexp) bool
index(re *Regexp, pos int) int
}
// inputString scans a string.
type inputString struct {
str string
}
func newInputString(str string) *inputString {
return &inputString{str: str}
}
func (i *inputString) step(pos int) (int, int) {
if pos < len(i.str) {
return utf8.DecodeRuneInString(i.str[pos:len(i.str)])
}
return endOfText, 0
}
func (i *inputString) canCheckPrefix() bool {
return true
}
func (i *inputString) hasPrefix(re *Regexp) bool {
return strings.HasPrefix(i.str, re.prefix)
}
func (i *inputString) index(re *Regexp, pos int) int {
return strings.Index(i.str[pos:], re.prefix)
}
// inputBytes scans a byte slice.
type inputBytes struct {
str []byte
}
func newInputBytes(str []byte) *inputBytes {
return &inputBytes{str: str}
}
func (i *inputBytes) step(pos int) (int, int) {
if pos < len(i.str) {
return utf8.DecodeRune(i.str[pos:len(i.str)])
}
return endOfText, 0
}
func (i *inputBytes) canCheckPrefix() bool {
return true
}
func (i *inputBytes) hasPrefix(re *Regexp) bool {
return bytes.HasPrefix(i.str, re.prefixBytes)
}
func (i *inputBytes) index(re *Regexp, pos int) int {
return bytes.Index(i.str[pos:], re.prefixBytes)
}
// inputReader scans a RuneReader.
type inputReader struct {
r io.RuneReader
atEOT bool
pos int
}
func newInputReader(r io.RuneReader) *inputReader {
return &inputReader{r: r}
}
func (i *inputReader) step(pos int) (int, int) {
if !i.atEOT && pos != i.pos {
return endOfText, 0
}
r, w, err := i.r.ReadRune()
if err != nil {
i.atEOT = true
return endOfText, 0
}
i.pos += w
return r, w
}
func (i *inputReader) canCheckPrefix() bool {
return false
}
func (i *inputReader) hasPrefix(re *Regexp) bool {
return false
}
func (i *inputReader) index(re *Regexp, pos int) int {
return -1
}
// Search match starting from pos bytes into the input.
func (re *Regexp) doExecute(i input, pos int) []int {
var s [2][]state
s[0] = make([]state, 0, 10)
s[1] = make([]state, 0, 10)
in, out := 0, 1
var final state
found := false
anchored := re.inst[0].next.kind == iBOT
if anchored && pos > 0 {
return nil
}
// fast check for initial plain substring
if i.canCheckPrefix() && re.prefix != "" {
advance := 0
if anchored {
if !i.hasPrefix(re) {
return nil
}
} else {
advance = i.index(re, pos)
if advance == -1 {
return nil
}
}
pos += advance
}
// We look one character ahead so we can match $, which checks whether
// we are at EOT.
nextChar, nextWidth := i.step(pos)
arena := &matchArena{
len: 2 * (re.nbra + 1),
pos: pos,
atBOT: pos == 0,
atEOT: nextChar == endOfText,
}
for c, startPos := 0, pos; c != endOfText; {
if !found && (pos == startPos || !anchored) {
// prime the pump if we haven't seen a match yet
match := arena.noMatch()
match.m[0] = pos
s[out] = arena.addState(s[out], re.start.next, false, match)
arena.free(match) // if addState saved it, ref was incremented
} else if len(s[out]) == 0 {
// machine has completed
break
}
in, out = out, in // old out state is new in state
// clear out old state
old := s[out]
for _, state := range old {
arena.free(state.match)
}
s[out] = old[0:0] // truncate state vector
c = nextChar
thisPos := pos
pos += nextWidth
nextChar, nextWidth = i.step(pos)
arena.atEOT = nextChar == endOfText
arena.atBOT = false
arena.pos = pos
for _, st := range s[in] {
switch st.inst.kind {
case iBOT:
case iEOT:
case iChar:
if c == st.inst.char {
s[out] = arena.addState(s[out], st.inst.next, st.prefixed, st.match)
}
case iCharClass:
if st.inst.cclass.matches(c) {
s[out] = arena.addState(s[out], st.inst.next, st.prefixed, st.match)
}
case iAny:
if c != endOfText {
s[out] = arena.addState(s[out], st.inst.next, st.prefixed, st.match)
}
case iNotNL:
if c != endOfText && c != '\n' {
s[out] = arena.addState(s[out], st.inst.next, st.prefixed, st.match)
}
case iBra:
case iAlt:
case iEnd:
// choose leftmost longest
if !found || // first
st.match.m[0] < final.match.m[0] || // leftmost
(st.match.m[0] == final.match.m[0] && thisPos > final.match.m[1]) { // longest
if final.match != nil {
arena.free(final.match)
}
final = st
final.match.ref++
final.match.m[1] = thisPos
}
found = true
default:
st.inst.print()
panic("unknown instruction in execute")
}
}
}
if final.match == nil {
return nil
}
// if match found, back up start of match by width of prefix.
if final.prefixed && len(final.match.m) > 0 {
final.match.m[0] -= len(re.prefix)
}
return final.match.m
}
// LiteralPrefix returns a literal string that must begin any match
// of the regular expression re. It returns the boolean true if the
// literal string comprises the entire regular expression.
func (re *Regexp) LiteralPrefix() (prefix string, complete bool) {
c := make([]int, len(re.inst)-2) // minus start and end.
// First instruction is start; skip that.
i := 0
for inst := re.inst[0].next; inst.kind != iEnd; inst = inst.next {
// stop if this is not a char
if inst.kind != iChar {
return string(c[:i]), false
}
c[i] = inst.char
i++
}
return string(c[:i]), true
}
// MatchReader returns whether the Regexp matches the text read by the
// RuneReader. The return value is a boolean: true for match, false for no
// match.
func (re *Regexp) MatchReader(r io.RuneReader) bool {
return len(re.doExecute(newInputReader(r), 0)) > 0
}
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool { return len(re.doExecute(newInputString(s), 0)) > 0 }
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool { return len(re.doExecute(newInputBytes(b), 0)) > 0 }
// MatchReader checks whether a textual regular expression matches the text
// read by the RuneReader. More complicated queries need to use Compile and
// the full Regexp interface.
func MatchReader(pattern string, r io.RuneReader) (matched bool, error os.Error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchReader(r), nil
}
// MatchString checks whether a textual regular expression
// matches a string. More complicated queries need
// to use Compile and the full Regexp interface.
func MatchString(pattern string, s string) (matched bool, error os.Error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchString(s), nil
}
// Match checks whether a textual regular expression
// matches a byte slice. More complicated queries need
// to use Compile and the full Regexp interface.
func Match(pattern string, b []byte) (matched bool, error os.Error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.Match(b), nil
}
// ReplaceAllString returns a copy of src in which all matches for the Regexp
// have been replaced by repl. No support is provided for expressions
// (e.g. \1 or $1) in the replacement string.
func (re *Regexp) ReplaceAllString(src, repl string) string {
return re.ReplaceAllStringFunc(src, func(string) string { return repl })
}
// ReplaceAllStringFunc returns a copy of src in which all matches for the
// Regexp have been replaced by the return value of of function repl (whose
// first argument is the matched string). No support is provided for
// expressions (e.g. \1 or $1) in the replacement string.
func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
lastMatchEnd := 0 // end position of the most recent match
searchPos := 0 // position where we next look for a match
buf := new(bytes.Buffer)
for searchPos <= len(src) {
a := re.doExecute(newInputString(src), searchPos)
if len(a) == 0 {
break // no more matches
}
// Copy the unmatched characters before this match.
io.WriteString(buf, src[lastMatchEnd:a[0]])
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
io.WriteString(buf, repl(src[a[0]:a[1]]))
}
lastMatchEnd = a[1]
// Advance past this match; always advance at least one character.
_, width := utf8.DecodeRuneInString(src[searchPos:])
if searchPos+width > a[1] {
searchPos += width
} else if searchPos+1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++
} else {
searchPos = a[1]
}
}
// Copy the unmatched characters after the last match.
io.WriteString(buf, src[lastMatchEnd:])
return buf.String()
}
// ReplaceAll returns a copy of src in which all matches for the Regexp
// have been replaced by repl. No support is provided for expressions
// (e.g. \1 or $1) in the replacement text.
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
return re.ReplaceAllFunc(src, func([]byte) []byte { return repl })
}
// ReplaceAllFunc returns a copy of src in which all matches for the
// Regexp have been replaced by the return value of of function repl (whose
// first argument is the matched []byte). No support is provided for
// expressions (e.g. \1 or $1) in the replacement string.
func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
lastMatchEnd := 0 // end position of the most recent match
searchPos := 0 // position where we next look for a match
buf := new(bytes.Buffer)
for searchPos <= len(src) {
a := re.doExecute(newInputBytes(src), searchPos)
if len(a) == 0 {
break // no more matches
}
// Copy the unmatched characters before this match.
buf.Write(src[lastMatchEnd:a[0]])
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
buf.Write(repl(src[a[0]:a[1]]))
}
lastMatchEnd = a[1]
// Advance past this match; always advance at least one character.
_, width := utf8.DecodeRune(src[searchPos:])
if searchPos+width > a[1] {
searchPos += width
} else if searchPos+1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++
} else {
searchPos = a[1]
}
}
// Copy the unmatched characters after the last match.
buf.Write(src[lastMatchEnd:])
return buf.Bytes()
}
// QuoteMeta returns a string that quotes all regular expression metacharacters
// inside the argument text; the returned string is a regular expression matching
// the literal text. For example, QuoteMeta(`[foo]`) returns `\[foo\]`.
func QuoteMeta(s string) string {
b := make([]byte, 2*len(s))
// A byte loop is correct because all metacharacters are ASCII.
j := 0
for i := 0; i < len(s); i++ {
if special(int(s[i])) {
b[j] = '\\'
j++
}
b[j] = s[i]
j++
}
return string(b[0:j])
}
// Find matches in slice b if b is non-nil, otherwise find matches in string s.
func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) {
var end int
if b == nil {
end = len(s)
} else {
end = len(b)
}
for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; {
var in input
if b == nil {
in = newInputString(s)
} else {
in = newInputBytes(b)
}
matches := re.doExecute(in, pos)
if len(matches) == 0 {
break
}
accept := true
if matches[1] == pos {
// We've found an empty match.
if matches[0] == prevMatchEnd {
// We don't allow an empty match right
// after a previous match, so ignore it.
accept = false
}
var width int
// TODO: use step()
if b == nil {
_, width = utf8.DecodeRuneInString(s[pos:end])
} else {
_, width = utf8.DecodeRune(b[pos:end])
}
if width > 0 {
pos += width
} else {
pos = end + 1
}
} else {
pos = matches[1]
}
prevMatchEnd = matches[1]
if accept {
deliver(matches)
i++
}
}
}
// Find returns a slice holding the text of the leftmost match in b of the regular expression.
// A return value of nil indicates no match.
func (re *Regexp) Find(b []byte) []byte {
a := re.doExecute(newInputBytes(b), 0)
if a == nil {
return nil
}
return b[a[0]:a[1]]
}
// FindIndex returns a two-element slice of integers defining the location of
// the leftmost match in b of the regular expression. The match itself is at
// b[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindIndex(b []byte) (loc []int) {
a := re.doExecute(newInputBytes(b), 0)
if a == nil {
return nil
}
return a[0:2]
}
// FindString returns a string holding the text of the leftmost match in s of the regular
// expression. If there is no match, the return value is an empty string,
// but it will also be empty if the regular expression successfully matches
// an empty string. Use FindStringIndex or FindStringSubmatch if it is
// necessary to distinguish these cases.
func (re *Regexp) FindString(s string) string {
a := re.doExecute(newInputString(s), 0)
if a == nil {
return ""
}
return s[a[0]:a[1]]
}
// FindStringIndex returns a two-element slice of integers defining the
// location of the leftmost match in s of the regular expression. The match
// itself is at s[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindStringIndex(s string) []int {
a := re.doExecute(newInputString(s), 0)
if a == nil {
return nil
}
return a[0:2]
}
// FindReaderIndex returns a two-element slice of integers defining the
// location of the leftmost match of the regular expression in text read from
// the RuneReader. The match itself is at s[loc[0]:loc[1]]. A return
// value of nil indicates no match.
func (re *Regexp) FindReaderIndex(r io.RuneReader) []int {
a := re.doExecute(newInputReader(r), 0)
if a == nil {
return nil
}
return a[0:2]
}
// FindSubmatch returns a slice of slices holding the text of the leftmost
// match of the regular expression in b and the matches, if any, of its
// subexpressions, as defined by the 'Submatch' descriptions in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatch(b []byte) [][]byte {
a := re.doExecute(newInputBytes(b), 0)
if a == nil {
return nil
}
ret := make([][]byte, len(a)/2)
for i := range ret {
if a[2*i] >= 0 {
ret[i] = b[a[2*i]:a[2*i+1]]
}
}
return ret
}
// FindSubmatchIndex returns a slice holding the index pairs identifying the
// leftmost match of the regular expression in b and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatchIndex(b []byte) []int {
return re.doExecute(newInputBytes(b), 0)
}
// FindStringSubmatch returns a slice of strings holding the text of the
// leftmost match of the regular expression in s and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatch(s string) []string {
a := re.doExecute(newInputString(s), 0)
if a == nil {
return nil
}
ret := make([]string, len(a)/2)
for i := range ret {
if a[2*i] >= 0 {
ret[i] = s[a[2*i]:a[2*i+1]]
}
}
return ret
}
// FindStringSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression in s and the
// matches, if any, of its subexpressions, as defined by the 'Submatch' and
// 'Index' descriptions in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatchIndex(s string) []int {
return re.doExecute(newInputString(s), 0)
}
// FindReaderSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression of text read by
// the RuneReader, and the matches, if any, of its subexpressions, as defined
// by the 'Submatch' and 'Index' descriptions in the package comment. A
// return value of nil indicates no match.
func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
return re.doExecute(newInputReader(r), 0)
}
const startSize = 10 // The size at which to start a slice in the 'All' routines.
// FindAll is the 'All' version of Find; it returns a slice of all successive
// matches of the expression, as defined by the 'All' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAll(b []byte, n int) [][]byte {
if n < 0 {
n = len(b) + 1
}
result := make([][]byte, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, b[match[0]:match[1]])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllIndex is the 'All' version of FindIndex; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
result := make([][]int, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, match[0:2])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllString is the 'All' version of FindString; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllString(s string, n int) []string {
if n < 0 {
n = len(s) + 1
}
result := make([]string, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, s[match[0]:match[1]])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringIndex is the 'All' version of FindStringIndex; it returns a
// slice of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
result := make([][]int, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, match[0:2])
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllSubmatch is the 'All' version of FindSubmatch; it returns a slice
// of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte {
if n < 0 {
n = len(b) + 1
}
result := make([][][]byte, 0, startSize)
re.allMatches("", b, n, func(match []int) {
slice := make([][]byte, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = b[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllSubmatchIndex is the 'All' version of FindSubmatchIndex; it returns
// a slice of all successive matches of the expression, as defined by the
// 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
result := make([][]int, 0, startSize)
re.allMatches("", b, n, func(match []int) {
result = append(result, match)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it
// returns a slice of all successive matches of the expression, as defined by
// the 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
if n < 0 {
n = len(s) + 1
}
result := make([][]string, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
slice := make([]string, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = s[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
if len(result) == 0 {
return nil
}
return result
}
// FindAllStringSubmatchIndex is the 'All' version of
// FindStringSubmatchIndex; it returns a slice of all successive matches of
// the expression, as defined by the 'All' description in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
result := make([][]int, 0, startSize)
re.allMatches(s, nil, n, func(match []int) {
result = append(result, match)
})
if len(result) == 0 {
return nil
}
return result
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package regexp implements a simple regular expression library.
//
// The syntax of the regular expressions accepted is:
//
// regexp:
// concatenation { '|' concatenation }
// concatenation:
// { closure }
// closure:
// term [ '*' | '+' | '?' ]
// term:
// '^'
// '$'
// '.'
// character
// '[' [ '^' ] character-ranges ']'
// '(' regexp ')'
//
package regexp
import (
"bytes";
"container/vector";
"io";
"os";
"runtime";
"utf8";
)
var debug = false;
// Error codes returned by failures to parse an expression.
var (
ErrInternal = os.NewError("internal error");
ErrUnmatchedLpar = os.NewError("unmatched '('");
ErrUnmatchedRpar = os.NewError("unmatched ')'");
ErrUnmatchedLbkt = os.NewError("unmatched '['");
ErrUnmatchedRbkt = os.NewError("unmatched ']'");
ErrBadRange = os.NewError("bad range in character class");
ErrExtraneousBackslash = os.NewError("extraneous backslash");
ErrBadClosure = os.NewError("repeated closure (**, ++, etc.)");
ErrBareClosure = os.NewError("closure applies to nothing");
ErrBadBackslash = os.NewError("illegal backslash escape");
)
// An instruction executed by the NFA
type instr interface {
kind() int; // the type of this instruction: _CHAR, _ANY, etc.
next() instr; // the instruction to execute after this one
setNext(i instr);
index() int;
setIndex(i int);
print();
}
// Fields and methods common to all instructions
type common struct {
_next instr;
_index int;
}
func (c *common) next() instr { return c._next }
func (c *common) setNext(i instr) { c._next = i }
func (c *common) index() int { return c._index }
func (c *common) setIndex(i int) { c._index = i }
// Regexp is the representation of a compiled regular expression.
// The public interface is entirely through methods.
type Regexp struct {
expr string; // the original expression
ch chan<- *Regexp; // reply channel when we're done
error os.Error; // compile- or run-time error; nil if OK
inst *vector.Vector;
start instr;
nbra int; // number of brackets in expression, for subexpressions
}
const (
_START // beginning of program
= iota;
_END; // end of program: success
_BOT; // '^' beginning of text
_EOT; // '$' end of text
_CHAR; // 'a' regular character
_CHARCLASS; // [a-z] character class
_ANY; // '.' any character including newline
_NOTNL; // [^\n] special case: any character but newline
_BRA; // '(' parenthesized expression
_EBRA; // ')'; end of '(' parenthesized expression
_ALT; // '|' alternation
_NOP; // do nothing; makes it easy to link without patching
)
// --- START start of program
type _Start struct {
common
}
func (start *_Start) kind() int { return _START }
func (start *_Start) print() { print("start") }
// --- END end of program
type _End struct {
common
}
func (end *_End) kind() int { return _END }
func (end *_End) print() { print("end") }
// --- BOT beginning of text
type _Bot struct {
common
}
func (bot *_Bot) kind() int { return _BOT }
func (bot *_Bot) print() { print("bot") }
// --- EOT end of text
type _Eot struct {
common
}
func (eot *_Eot) kind() int { return _EOT }
func (eot *_Eot) print() { print("eot") }
// --- CHAR a regular character
type _Char struct {
common;
char int;
}
func (char *_Char) kind() int { return _CHAR }
func (char *_Char) print() { print("char ", string(char.char)) }
func newChar(char int) *_Char {
c := new(_Char);
c.char = char;
return c;
}
// --- CHARCLASS [a-z]
type _CharClass struct {
common;
char int;
negate bool; // is character class negated? ([^a-z])
// vector of int, stored pairwise: [a-z] is (a,z); x is (x,x):
ranges *vector.IntVector;
}
func (cclass *_CharClass) kind() int { return _CHARCLASS }
func (cclass *_CharClass) print() {
print("charclass");
if cclass.negate {
print(" (negated)");
}
for i := 0; i < cclass.ranges.Len(); i += 2 {
l := cclass.ranges.At(i);
r := cclass.ranges.At(i+1);
if l == r {
print(" [", string(l), "]");
} else {
print(" [", string(l), "-", string(r), "]");
}
}
}
func (cclass *_CharClass) addRange(a, b int) {
// range is a through b inclusive
cclass.ranges.Push(a);
cclass.ranges.Push(b);
}
func (cclass *_CharClass) matches(c int) bool {
for i := 0; i < cclass.ranges.Len(); i = i+2 {
min := cclass.ranges.At(i);
max := cclass.ranges.At(i+1);
if min <= c && c <= max {
return !cclass.negate
}
}
return cclass.negate
}
func newCharClass() *_CharClass {
c := new(_CharClass);
c.ranges = vector.NewIntVector(0);
return c;
}
// --- ANY any character
type _Any struct {
common
}
func (any *_Any) kind() int { return _ANY }
func (any *_Any) print() { print("any") }
// --- NOTNL any character but newline
type _NotNl struct {
common
}
func (notnl *_NotNl) kind() int { return _NOTNL }
func (notnl *_NotNl) print() { print("notnl") }
// --- BRA parenthesized expression
type _Bra struct {
common;
n int; // subexpression number
}
func (bra *_Bra) kind() int { return _BRA }
func (bra *_Bra) print() { print("bra", bra.n); }
// --- EBRA end of parenthesized expression
type _Ebra struct {
common;
n int; // subexpression number
}
func (ebra *_Ebra) kind() int { return _EBRA }
func (ebra *_Ebra) print() { print("ebra ", ebra.n); }
// --- ALT alternation
type _Alt struct {
common;
left instr; // other branch
}
func (alt *_Alt) kind() int { return _ALT }
func (alt *_Alt) print() { print("alt(", alt.left.index(), ")"); }
// --- NOP no operation
type _Nop struct {
common
}
func (nop *_Nop) kind() int { return _NOP }
func (nop *_Nop) print() { print("nop") }
// report error and exit compiling/executing goroutine
func (re *Regexp) setError(err os.Error) {
re.error = err;
re.ch <- re;
runtime.Goexit();
}
func (re *Regexp) add(i instr) instr {
i.setIndex(re.inst.Len());
re.inst.Push(i);
return i;
}
type parser struct {
re *Regexp;
nlpar int; // number of unclosed lpars
pos int;
ch int;
}
const endOfFile = -1
func (p *parser) c() int {
return p.ch;
}
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfFile
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:len(p.re.expr)]);
p.ch = c;
p.pos += w;
}
return p.ch;
}
func newParser(re *Regexp) *parser {
p := new(parser);
p.re = re;
p.nextc(); // load p.ch
return p;
}
var iNULL instr
func special(c int) bool {
s := `\.+*?()|[]^$`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false
}
func specialcclass(c int) bool {
s := `\-[]`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false
}
func (p *parser) charClass() instr {
cc := newCharClass();
if p.c() == '^' {
cc.negate = true;
p.nextc();
}
left := -1;
for {
switch c := p.c(); c {
case ']', endOfFile:
if left >= 0 {
p.re.setError(ErrBadRange);
}
// Is it [^\n]?
if cc.negate && cc.ranges.Len() == 2 &&
cc.ranges.At(0) == '\n' && cc.ranges.At(1) == '\n' {
nl := new(_NotNl);
p.re.add(nl);
return nl;
}
p.re.add(cc);
return cc;
case '-': // do this before backslash processing
p.re.setError(ErrBadRange);
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.re.setError(ErrExtraneousBackslash);
case c == 'n':
c = '\n';
case specialcclass(c):
// c is as delivered
default:
p.re.setError(ErrBadBackslash);
}
fallthrough;
default:
p.nextc();
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc();
left = c;
} else { // single char
cc.addRange(c, c);
}
case left <= c: // second of pair
cc.addRange(left, c);
left = -1;
default:
p.re.setError(ErrBadRange);
}
}
}
return iNULL
}
func (p *parser) term() (start, end instr) {
switch c := p.c(); c {
case '|', endOfFile:
return iNULL, iNULL;
case '*', '+':
p.re.setError(ErrBareClosure);
case ')':
if p.nlpar == 0 {
p.re.setError(ErrUnmatchedRpar);
}
return iNULL, iNULL;
case ']':
p.re.setError(ErrUnmatchedRbkt);
case '^':
p.nextc();
start = p.re.add(new(_Bot));
return start, start;
case '$':
p.nextc();
start = p.re.add(new(_Eot));
return start, start;
case '.':
p.nextc();
start = p.re.add(new(_Any));
return start, start;
case '[':
p.nextc();
start = p.charClass();
if p.c() != ']' {
p.re.setError(ErrUnmatchedLbkt);
}
p.nextc();
return start, start;
case '(':
p.nextc();
p.nlpar++;
p.re.nbra++; // increment first so first subexpr is \1
nbra := p.re.nbra;
start, end = p.regexp();
if p.c() != ')' {
p.re.setError(ErrUnmatchedLpar);
}
p.nlpar--;
p.nextc();
bra := new(_Bra);
p.re.add(bra);
ebra := new(_Ebra);
p.re.add(ebra);
bra.n = nbra;
ebra.n = nbra;
if start == iNULL {
if end == iNULL {
p.re.setError(ErrInternal)
}
start = ebra
} else {
end.setNext(ebra);
}
bra.setNext(start);
return bra, ebra;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.re.setError(ErrExtraneousBackslash);
case c == 'n':
c = '\n';
case special(c):
// c is as delivered
default:
p.re.setError(ErrBadBackslash);
}
fallthrough;
default:
p.nextc();
start = newChar(c);
p.re.add(start);
return start, start
}
panic("unreachable");
}
func (p *parser) closure() (start, end instr) {
start, end = p.term();
if start == iNULL {
return
}
switch p.c() {
case '*':
// (start,end)*:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
start = alt; // alt becomes new (start, end)
end = alt;
case '+':
// (start,end)+:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
end = alt; // start is unchanged; end is alt
case '?':
// (start,end)?:
alt := new(_Alt);
p.re.add(alt);
nop := new(_Nop);
p.re.add(nop);
alt.left = start; // alternate branch is start
alt.setNext(nop); // follow on to nop
end.setNext(nop); // after end, go to nop
start = alt; // start is now alt
end = nop; // end is nop pointed to by both branches
default:
return
}
switch p.nextc() {
case '*', '+', '?':
p.re.setError(ErrBadClosure);
}
return
}
func (p *parser) concatenation() (start, end instr) {
start, end = iNULL, iNULL;
for {
nstart, nend := p.closure();
switch {
case nstart == iNULL: // end of this concatenation
if start == iNULL { // this is the empty string
nop := p.re.add(new(_Nop));
return nop, nop;
}
return;
case start == iNULL: // this is first element of concatenation
start, end = nstart, nend;
default:
end.setNext(nstart);
end = nend;
}
}
panic("unreachable");
}
func (p *parser) regexp() (start, end instr) {
start, end = p.concatenation();
for {
switch p.c() {
default:
return;
case '|':
p.nextc();
nstart, nend := p.concatenation();
alt := new(_Alt);
p.re.add(alt);
alt.left = start;
alt.setNext(nstart);
nop := new(_Nop);
p.re.add(nop);
end.setNext(nop);
nend.setNext(nop);
start, end = alt, nop;
}
}
panic("unreachable");
}
func unNop(i instr) instr {
for i.kind() == _NOP {
i = i.next()
}
return i
}
func (re *Regexp) eliminateNops() {
for i := 0; i < re.inst.Len(); i++ {
inst := re.inst.At(i).(instr);
if inst.kind() == _END {
continue
}
inst.setNext(unNop(inst.next()));
if inst.kind() == _ALT {
alt := inst.(*_Alt);
alt.left = unNop(alt.left);
}
}
}
func (re *Regexp) dump() {
for i := 0; i < re.inst.Len(); i++ {
inst := re.inst.At(i).(instr);
print(inst.index(), ": ");
inst.print();
if inst.kind() != _END {
print(" -> ", inst.next().index())
}
print("\n");
}
}
func (re *Regexp) doParse() {
p := newParser(re);
start := new(_Start);
re.add(start);
s, e := p.regexp();
start.setNext(s);
re.start = start;
e.setNext(re.add(new(_End)));
if debug {
re.dump();
println();
}
re.eliminateNops();
if debug {
re.dump();
println();
}
}
func compiler(str string, ch chan *Regexp) {
re := new(Regexp);
re.expr = str;
re.inst = vector.New(0);
re.ch = ch;
re.doParse();
ch <- re;
}
// Compile parses a regular expression and returns, if successful, a Regexp
// object that can be used to match against text.
func Compile(str string) (regexp *Regexp, error os.Error) {
// Compile in a separate goroutine and wait for the result.
ch := make(chan *Regexp);
go compiler(str, ch);
re := <-ch;
return re, re.error
}
type state struct {
inst instr; // next instruction to execute
match []int; // pairs of bracketing submatches. 0th is start,end
}
// Append new state to to-do list. Leftmost-longest wins so avoid
// adding a state that's already active.
func addState(s []state, inst instr, match []int) []state {
index := inst.index();
l := len(s);
pos := match[0];
// TODO: Once the state is a vector and we can do insert, have inputs always
// go in order correctly and this "earlier" test is never necessary,
for i := 0; i < l; i++ {
if s[i].inst.index() == index && // same instruction
s[i].match[0] < pos { // earlier match already going; lefmost wins
return s
}
}
if l == cap(s) {
s1 := make([]state, 2*l)[0:l];
for i := 0; i < l; i++ {
s1[i] = s[i];
}
s = s1;
}
s = s[0:l+1];
s[l].inst = inst;
s[l].match = match;
return s;
}
// Accepts either string or bytes - the logic is identical either way.
// If bytes == nil, scan str.
func (re *Regexp) doExecute(str string, bytes []byte, pos int) []int {
var s [2][]state; // TODO: use a vector when state values (not ptrs) can be vector elements
s[0] = make([]state, 10)[0:0];
s[1] = make([]state, 10)[0:0];
in, out := 0, 1;
var final state;
found := false;
end := len(str);
if bytes != nil {
end = len(bytes)
}
for pos <= end {
if !found {
// prime the pump if we haven't seen a match yet
match := make([]int, 2*(re.nbra+1));
for i := 0; i < len(match); i++ {
match[i] = -1; // no match seen; catches cases like "a(b)?c" on "ac"
}
match[0] = pos;
s[out] = addState(s[out], re.start.next(), match);
}
in, out = out, in; // old out state is new in state
s[out] = s[out][0:0]; // clear out state
if len(s[in]) == 0 {
// machine has completed
break;
}
charwidth := 1;
c := endOfFile;
if pos < end {
if bytes == nil {
c, charwidth = utf8.DecodeRuneInString(str[pos:end]);
} else {
c, charwidth = utf8.DecodeRune(bytes[pos:end]);
}
}
for i := 0; i < len(s[in]); i++ {
st := s[in][i];
switch s[in][i].inst.kind() {
case _BOT:
if pos == 0 {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _EOT:
if pos == end {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _CHAR:
if c == st.inst.(*_Char).char {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _CHARCLASS:
if st.inst.(*_CharClass).matches(c) {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _ANY:
if c != endOfFile {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _NOTNL:
if c != endOfFile && c != '\n' {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _BRA:
n := st.inst.(*_Bra).n;
st.match[2*n] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _EBRA:
n := st.inst.(*_Ebra).n;
st.match[2*n+1] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _ALT:
s[in] = addState(s[in], st.inst.(*_Alt).left, st.match);
// give other branch a copy of this match vector
s1 := make([]int, 2*(re.nbra+1));
for i := 0; i < len(s1); i++ {
s1[i] = st.match[i]
}
s[in] = addState(s[in], st.inst.next(), s1);
case _END:
// choose leftmost longest
if !found || // first
st.match[0] < final.match[0] || // leftmost
(st.match[0] == final.match[0] && pos > final.match[1]) { // longest
final = st;
final.match[1] = pos;
}
found = true;
default:
st.inst.print();
panic("unknown instruction in execute");
}
}
pos += charwidth;
}
return final.match;
}
// ExecuteString matches the Regexp against the string s.
// The return value is an array of integers, in pairs, identifying the positions of
// substrings matched by the expression.
// s[a[0]:a[1]] is the substring matched by the entire expression.
// s[a[2*i]:a[2*i+1]] for i > 0 is the substring matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the string.
// An empty array means "no match".
func (re *Regexp) ExecuteString(s string) (a []int) {
return re.doExecute(s, nil, 0)
}
// Execute matches the Regexp against the byte slice b.
// The return value is an array of integers, in pairs, identifying the positions of
// subslices matched by the expression.
// b[a[0]:a[1]] is the subslice matched by the entire expression.
// b[a[2*i]:a[2*i+1]] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the slice.
// An empty array means "no match".
func (re *Regexp) Execute(b []byte) (a []int) {
return re.doExecute("", b, 0)
}
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool {
return len(re.doExecute(s, nil, 0)) > 0
}
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool {
return len(re.doExecute("", b, 0)) > 0
}
// MatchStrings matches the Regexp against the string s.
// The return value is an array of strings matched by the expression.
// a[0] is the substring matched by the entire expression.
// a[i] for i > 0 is the substring matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchStrings(s string) (a []string) {
r := re.doExecute(s, nil, 0);
if r == nil {
return nil
}
a = make([]string, len(r)/2);
for i := 0; i < len(r); i += 2 {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = s[r[i] : r[i+1]]
}
}
return
}
// MatchSlices matches the Regexp against the byte slice b.
// The return value is an array of subslices matched by the expression.
// a[0] is the subslice matched by the entire expression.
// a[i] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchSlices(b []byte) (a [][]byte) {
r := re.doExecute("", b, 0);
if r == nil {
return nil
}
a = make([][]byte, len(r)/2);
for i := 0; i < len(r); i += 2 {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = b[r[i] : r[i+1]]
}
}
return
}
// MatchString checks whether a textual regular expression
// matches a string. More complicated queries need
// to use Compile and the full Regexp interface.
func MatchString(pattern string, s string) (matched bool, error os.Error) {
re, err := Compile(pattern);
if err != nil {
return false, err
}
return re.MatchString(s), nil
}
// Match checks whether a textual regular expression
// matches a byte slice. More complicated queries need
// to use Compile and the full Regexp interface.
func Match(pattern string, b []byte) (matched bool, error os.Error) {
re, err := Compile(pattern);
if err != nil {
return false, err
}
return re.Match(b), nil
}
// ReplaceAllString returns a copy of src in which all matches for the Regexp
// have been replaced by repl. No support is provided for expressions
// (e.g. \1 or $1) in the replacement string.
func (re *Regexp) ReplaceAllString(src, repl string) string {
lastMatchEnd := 0; // end position of the most recent match
searchPos := 0; // position where we next look for a match
buf := new(bytes.Buffer);
for searchPos <= len(src) {
a := re.doExecute(src, nil, searchPos);
if len(a) == 0 {
break; // no more matches
}
// Copy the unmatched characters before this match.
io.WriteString(buf, src[lastMatchEnd:a[0]]);
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
io.WriteString(buf, repl);
}
lastMatchEnd = a[1];
// Advance past this match; always advance at least one character.
_, width := utf8.DecodeRuneInString(src[searchPos:len(src)]);
if searchPos + width > a[1] {
searchPos += width;
} else if searchPos + 1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++;
} else {
searchPos = a[1];
}
}
// Copy the unmatched characters after the last match.
io.WriteString(buf, src[lastMatchEnd:len(src)]);
return buf.String();
}
// ReplaceAll returns a copy of src in which all matches for the Regexp
// have been replaced by repl. No support is provided for expressions
// (e.g. \1 or $1) in the replacement text.
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
lastMatchEnd := 0; // end position of the most recent match
searchPos := 0; // position where we next look for a match
buf := new(bytes.Buffer);
for searchPos <= len(src) {
a := re.doExecute("", src, searchPos);
if len(a) == 0 {
break; // no more matches
}
// Copy the unmatched characters before this match.
buf.Write(src[lastMatchEnd:a[0]]);
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
buf.Write(repl);
}
lastMatchEnd = a[1];
// Advance past this match; always advance at least one character.
_, width := utf8.DecodeRune(src[searchPos:len(src)]);
if searchPos + width > a[1] {
searchPos += width;
} else if searchPos + 1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++;
} else {
searchPos = a[1];
}
}
// Copy the unmatched characters after the last match.
buf.Write(src[lastMatchEnd:len(src)]);
return buf.Bytes();
}
// QuoteMeta returns a string that quotes all regular expression metacharacters
// inside the argument text; the returned string is a regular expression matching
// the literal text. For example, QuoteMeta(`[foo]`) returns `\[foo\]`.
func QuoteMeta(s string) string {
b := make([]byte, 2 * len(s));
// A byte loop is correct because all metacharacters are ASCII.
j := 0;
for i := 0; i < len(s); i++ {
if special(int(s[i])) {
b[j] = '\\';
j++;
}
b[j] = s[i];
j++;
}
return string(b[0:j]);
}
// Find matches in slice b if b is non-nil, otherwise find matches in string s.
func (re *Regexp) allMatches(s string, b []byte, n int, deliver func(int, int)) {
var end int;
if b == nil {
end = len(s);
} else {
end = len(b);
}
for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; {
matches := re.doExecute(s, b, pos);
if len(matches) == 0 {
break;
}
accept := true;
if matches[1] == pos {
// We've found an empty match.
if matches[0] == prevMatchEnd {
// We don't allow an empty match right
// after a previous match, so ignore it.
accept = false;
}
var width int;
if b == nil {
_, width = utf8.DecodeRuneInString(s[pos:end]);
} else {
_, width = utf8.DecodeRune(b[pos:end]);
}
if width > 0 {
pos += width;
} else {
pos = end + 1;
}
} else {
pos = matches[1];
}
prevMatchEnd = matches[1];
if accept {
deliver(matches[0], matches[1]);
i++;
}
}
}
// AllMatches slices the byte slice b into substrings that are successive
// matches of the Regexp within b. If n > 0, the function returns at most n
// matches. Text that does not match the expression will be skipped. Empty
// matches abutting a preceding match are ignored. The function returns a slice
// containing the matching substrings.
func (re *Regexp) AllMatches(b []byte, n int) [][]byte {
if n <= 0 {
n = len(b) + 1;
}
result := make([][]byte, n);
i := 0;
re.allMatches("", b, n, func(start, end int) {
result[i] = b[start:end];
i++;
});
return result[0:i];
}
// AllMatchesString slices the string s into substrings that are successive
// matches of the Regexp within s. If n > 0, the function returns at most n
// matches. Text that does not match the expression will be skipped. Empty
// matches abutting a preceding match are ignored. The function returns a slice
// containing the matching substrings.
func (re *Regexp) AllMatchesString(s string, n int) []string {
if n <= 0 {
n = len(s) + 1;
}
result := make([]string, n);
i := 0;
re.allMatches(s, nil, n, func(start, end int) {
result[i] = s[start:end];
i++;
});
return result[0:i];
}
// AllMatchesIter slices the byte slice b into substrings that are successive
// matches of the Regexp within b. If n > 0, the function returns at most n
// matches. Text that does not match the expression will be skipped. Empty
// matches abutting a preceding match are ignored. The function returns a
// channel that iterates over the matching substrings.
func (re *Regexp) AllMatchesIter(b []byte, n int) (<-chan []byte) {
if n <= 0 {
n = len(b) + 1;
}
c := make(chan []byte, 10);
go func() {
re.allMatches("", b, n, func(start, end int) {
c <- b[start:end];
});
close(c);
}();
return c;
}
// AllMatchesStringIter slices the string s into substrings that are successive
// matches of the Regexp within s. If n > 0, the function returns at most n
// matches. Text that does not match the expression will be skipped. Empty
// matches abutting a preceding match are ignored. The function returns a
// channel that iterates over the matching substrings.
func (re *Regexp) AllMatchesStringIter(s string, n int) (<-chan string) {
if n <= 0 {
n = len(s) + 1;
}
c := make(chan string, 10);
go func() {
re.allMatches(s, nil, n, func(start, end int) {
c <- s[start:end];
});
close(c);
}();
return c;
}
take goroutines out of regexp so they can be created
during initialization.
R=rsc
CC=go-dev
http://go/go-review/1016023
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package regexp implements a simple regular expression library.
//
// The syntax of the regular expressions accepted is:
//
// regexp:
// concatenation { '|' concatenation }
// concatenation:
// { closure }
// closure:
// term [ '*' | '+' | '?' ]
// term:
// '^'
// '$'
// '.'
// character
// '[' [ '^' ] character-ranges ']'
// '(' regexp ')'
//
package regexp
import (
"bytes";
"container/vector";
"io";
"os";
"utf8";
)
var debug = false;
// Error codes returned by failures to parse an expression.
var (
ErrInternal = os.NewError("internal error");
ErrUnmatchedLpar = os.NewError("unmatched '('");
ErrUnmatchedRpar = os.NewError("unmatched ')'");
ErrUnmatchedLbkt = os.NewError("unmatched '['");
ErrUnmatchedRbkt = os.NewError("unmatched ']'");
ErrBadRange = os.NewError("bad range in character class");
ErrExtraneousBackslash = os.NewError("extraneous backslash");
ErrBadClosure = os.NewError("repeated closure (**, ++, etc.)");
ErrBareClosure = os.NewError("closure applies to nothing");
ErrBadBackslash = os.NewError("illegal backslash escape");
)
// An instruction executed by the NFA
type instr interface {
kind() int; // the type of this instruction: _CHAR, _ANY, etc.
next() instr; // the instruction to execute after this one
setNext(i instr);
index() int;
setIndex(i int);
print();
}
// Fields and methods common to all instructions
type common struct {
_next instr;
_index int;
}
func (c *common) next() instr { return c._next }
func (c *common) setNext(i instr) { c._next = i }
func (c *common) index() int { return c._index }
func (c *common) setIndex(i int) { c._index = i }
// Regexp is the representation of a compiled regular expression.
// The public interface is entirely through methods.
type Regexp struct {
expr string; // the original expression
inst *vector.Vector;
start instr;
nbra int; // number of brackets in expression, for subexpressions
}
const (
_START // beginning of program
= iota;
_END; // end of program: success
_BOT; // '^' beginning of text
_EOT; // '$' end of text
_CHAR; // 'a' regular character
_CHARCLASS; // [a-z] character class
_ANY; // '.' any character including newline
_NOTNL; // [^\n] special case: any character but newline
_BRA; // '(' parenthesized expression
_EBRA; // ')'; end of '(' parenthesized expression
_ALT; // '|' alternation
_NOP; // do nothing; makes it easy to link without patching
)
// --- START start of program
type _Start struct {
common
}
func (start *_Start) kind() int { return _START }
func (start *_Start) print() { print("start") }
// --- END end of program
type _End struct {
common
}
func (end *_End) kind() int { return _END }
func (end *_End) print() { print("end") }
// --- BOT beginning of text
type _Bot struct {
common
}
func (bot *_Bot) kind() int { return _BOT }
func (bot *_Bot) print() { print("bot") }
// --- EOT end of text
type _Eot struct {
common
}
func (eot *_Eot) kind() int { return _EOT }
func (eot *_Eot) print() { print("eot") }
// --- CHAR a regular character
type _Char struct {
common;
char int;
}
func (char *_Char) kind() int { return _CHAR }
func (char *_Char) print() { print("char ", string(char.char)) }
func newChar(char int) *_Char {
c := new(_Char);
c.char = char;
return c;
}
// --- CHARCLASS [a-z]
type _CharClass struct {
common;
char int;
negate bool; // is character class negated? ([^a-z])
// vector of int, stored pairwise: [a-z] is (a,z); x is (x,x):
ranges *vector.IntVector;
}
func (cclass *_CharClass) kind() int { return _CHARCLASS }
func (cclass *_CharClass) print() {
print("charclass");
if cclass.negate {
print(" (negated)");
}
for i := 0; i < cclass.ranges.Len(); i += 2 {
l := cclass.ranges.At(i);
r := cclass.ranges.At(i+1);
if l == r {
print(" [", string(l), "]");
} else {
print(" [", string(l), "-", string(r), "]");
}
}
}
func (cclass *_CharClass) addRange(a, b int) {
// range is a through b inclusive
cclass.ranges.Push(a);
cclass.ranges.Push(b);
}
func (cclass *_CharClass) matches(c int) bool {
for i := 0; i < cclass.ranges.Len(); i = i+2 {
min := cclass.ranges.At(i);
max := cclass.ranges.At(i+1);
if min <= c && c <= max {
return !cclass.negate
}
}
return cclass.negate
}
func newCharClass() *_CharClass {
c := new(_CharClass);
c.ranges = vector.NewIntVector(0);
return c;
}
// --- ANY any character
type _Any struct {
common
}
func (any *_Any) kind() int { return _ANY }
func (any *_Any) print() { print("any") }
// --- NOTNL any character but newline
type _NotNl struct {
common
}
func (notnl *_NotNl) kind() int { return _NOTNL }
func (notnl *_NotNl) print() { print("notnl") }
// --- BRA parenthesized expression
type _Bra struct {
common;
n int; // subexpression number
}
func (bra *_Bra) kind() int { return _BRA }
func (bra *_Bra) print() { print("bra", bra.n); }
// --- EBRA end of parenthesized expression
type _Ebra struct {
common;
n int; // subexpression number
}
func (ebra *_Ebra) kind() int { return _EBRA }
func (ebra *_Ebra) print() { print("ebra ", ebra.n); }
// --- ALT alternation
type _Alt struct {
common;
left instr; // other branch
}
func (alt *_Alt) kind() int { return _ALT }
func (alt *_Alt) print() { print("alt(", alt.left.index(), ")"); }
// --- NOP no operation
type _Nop struct {
common
}
func (nop *_Nop) kind() int { return _NOP }
func (nop *_Nop) print() { print("nop") }
func (re *Regexp) add(i instr) instr {
i.setIndex(re.inst.Len());
re.inst.Push(i);
return i;
}
type parser struct {
re *Regexp;
error os.Error;
nlpar int; // number of unclosed lpars
pos int;
ch int;
}
const endOfFile = -1
func (p *parser) c() int {
return p.ch;
}
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfFile
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:len(p.re.expr)]);
p.ch = c;
p.pos += w;
}
return p.ch;
}
func newParser(re *Regexp) *parser {
p := new(parser);
p.re = re;
p.nextc(); // load p.ch
return p;
}
func special(c int) bool {
s := `\.+*?()|[]^$`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false
}
func specialcclass(c int) bool {
s := `\-[]`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false
}
func (p *parser) charClass() instr {
cc := newCharClass();
if p.c() == '^' {
cc.negate = true;
p.nextc();
}
left := -1;
for {
switch c := p.c(); c {
case ']', endOfFile:
if left >= 0 {
p.error = ErrBadRange;
return nil;
}
// Is it [^\n]?
if cc.negate && cc.ranges.Len() == 2 &&
cc.ranges.At(0) == '\n' && cc.ranges.At(1) == '\n' {
nl := new(_NotNl);
p.re.add(nl);
return nl;
}
p.re.add(cc);
return cc;
case '-': // do this before backslash processing
p.error = ErrBadRange;
return nil;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return nil;
case c == 'n':
c = '\n';
case specialcclass(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return nil;
}
fallthrough;
default:
p.nextc();
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc();
left = c;
} else { // single char
cc.addRange(c, c);
}
case left <= c: // second of pair
cc.addRange(left, c);
left = -1;
default:
p.error = ErrBadRange;
return nil;
}
}
}
return nil
}
func (p *parser) term() (start, end instr) {
// term() is the leaf of the recursion, so it's sufficient to pick off the
// error state here for early exit.
// The other functions (closure(), concatenation() etc.) assume
// it's safe to recur to here.
if p.error != nil {
return
}
switch c := p.c(); c {
case '|', endOfFile:
return nil, nil;
case '*', '+':
p.error = ErrBareClosure;
return;
case ')':
if p.nlpar == 0 {
p.error = ErrUnmatchedRpar;
return;
}
return nil, nil;
case ']':
p.error = ErrUnmatchedRbkt;
return;
case '^':
p.nextc();
start = p.re.add(new(_Bot));
return start, start;
case '$':
p.nextc();
start = p.re.add(new(_Eot));
return start, start;
case '.':
p.nextc();
start = p.re.add(new(_Any));
return start, start;
case '[':
p.nextc();
start = p.charClass();
if p.error != nil {
return;
}
if p.c() != ']' {
p.error = ErrUnmatchedLbkt;
return;
}
p.nextc();
return start, start;
case '(':
p.nextc();
p.nlpar++;
p.re.nbra++; // increment first so first subexpr is \1
nbra := p.re.nbra;
start, end = p.regexp();
if p.c() != ')' {
p.error = ErrUnmatchedLpar;
return;
}
p.nlpar--;
p.nextc();
bra := new(_Bra);
p.re.add(bra);
ebra := new(_Ebra);
p.re.add(ebra);
bra.n = nbra;
ebra.n = nbra;
if start == nil {
if end == nil {
p.error = ErrInternal;
return;
}
start = ebra
} else {
end.setNext(ebra);
}
bra.setNext(start);
return bra, ebra;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return;
case c == 'n':
c = '\n';
case special(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return;
}
fallthrough;
default:
p.nextc();
start = newChar(c);
p.re.add(start);
return start, start
}
panic("unreachable");
}
func (p *parser) closure() (start, end instr) {
start, end = p.term();
if start == nil || p.error != nil {
return
}
switch p.c() {
case '*':
// (start,end)*:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
start = alt; // alt becomes new (start, end)
end = alt;
case '+':
// (start,end)+:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
end = alt; // start is unchanged; end is alt
case '?':
// (start,end)?:
alt := new(_Alt);
p.re.add(alt);
nop := new(_Nop);
p.re.add(nop);
alt.left = start; // alternate branch is start
alt.setNext(nop); // follow on to nop
end.setNext(nop); // after end, go to nop
start = alt; // start is now alt
end = nop; // end is nop pointed to by both branches
default:
return
}
switch p.nextc() {
case '*', '+', '?':
p.error = ErrBadClosure;
}
return
}
func (p *parser) concatenation() (start, end instr) {
for {
nstart, nend := p.closure();
if p.error != nil {
return
}
switch {
case nstart == nil: // end of this concatenation
if start == nil { // this is the empty string
nop := p.re.add(new(_Nop));
return nop, nop;
}
return;
case start == nil: // this is first element of concatenation
start, end = nstart, nend;
default:
end.setNext(nstart);
end = nend;
}
}
panic("unreachable");
}
func (p *parser) regexp() (start, end instr) {
start, end = p.concatenation();
if p.error != nil {
return
}
for {
switch p.c() {
default:
return;
case '|':
p.nextc();
nstart, nend := p.concatenation();
if p.error != nil {
return
}
alt := new(_Alt);
p.re.add(alt);
alt.left = start;
alt.setNext(nstart);
nop := new(_Nop);
p.re.add(nop);
end.setNext(nop);
nend.setNext(nop);
start, end = alt, nop;
}
}
panic("unreachable");
}
func unNop(i instr) instr {
for i.kind() == _NOP {
i = i.next()
}
return i
}
func (re *Regexp) eliminateNops() {
for i := 0; i < re.inst.Len(); i++ {
inst := re.inst.At(i).(instr);
if inst.kind() == _END {
continue
}
inst.setNext(unNop(inst.next()));
if inst.kind() == _ALT {
alt := inst.(*_Alt);
alt.left = unNop(alt.left);
}
}
}
func (re *Regexp) dump() {
for i := 0; i < re.inst.Len(); i++ {
inst := re.inst.At(i).(instr);
print(inst.index(), ": ");
inst.print();
if inst.kind() != _END {
print(" -> ", inst.next().index())
}
print("\n");
}
}
func (re *Regexp) doParse() os.Error{
p := newParser(re);
start := new(_Start);
re.add(start);
s, e := p.regexp();
if p.error != nil {
return p.error;
}
start.setNext(s);
re.start = start;
e.setNext(re.add(new(_End)));
if debug {
re.dump();
println();
}
re.eliminateNops();
if debug {
re.dump();
println();
}
return p.error;
}
// Compile parses a regular expression and returns, if successful, a Regexp
// object that can be used to match against text.
func Compile(str string) (regexp *Regexp, error os.Error) {
regexp = new(Regexp);
regexp.expr = str;
regexp.inst = vector.New(0);
error = regexp.doParse();
return;
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, error := Compile(str);
if error != nil {
panicln(`regexp: compiling "`, str, `": `, error);
}
return regexp;
}
type state struct {
inst instr; // next instruction to execute
match []int; // pairs of bracketing submatches. 0th is start,end
}
// Append new state to to-do list. Leftmost-longest wins so avoid
// adding a state that's already active.
func addState(s []state, inst instr, match []int) []state {
index := inst.index();
l := len(s);
pos := match[0];
// TODO: Once the state is a vector and we can do insert, have inputs always
// go in order correctly and this "earlier" test is never necessary,
for i := 0; i < l; i++ {
if s[i].inst.index() == index && // same instruction
s[i].match[0] < pos { // earlier match already going; lefmost wins
return s
}
}
if l == cap(s) {
s1 := make([]state, 2*l)[0:l];
for i := 0; i < l; i++ {
s1[i] = s[i];
}
s = s1;
}
s = s[0:l+1];
s[l].inst = inst;
s[l].match = match;
return s;
}
// Accepts either string or bytes - the logic is identical either way.
// If bytes == nil, scan str.
func (re *Regexp) doExecute(str string, bytes []byte, pos int) []int {
var s [2][]state; // TODO: use a vector when state values (not ptrs) can be vector elements
s[0] = make([]state, 10)[0:0];
s[1] = make([]state, 10)[0:0];
in, out := 0, 1;
var final state;
found := false;
end := len(str);
if bytes != nil {
end = len(bytes)
}
for pos <= end {
if !found {
// prime the pump if we haven't seen a match yet
match := make([]int, 2*(re.nbra+1));
for i := 0; i < len(match); i++ {
match[i] = -1; // no match seen; catches cases like "a(b)?c" on "ac"
}
match[0] = pos;
s[out] = addState(s[out], re.start.next(), match);
}
in, out = out, in; // old out state is new in state
s[out] = s[out][0:0]; // clear out state
if len(s[in]) == 0 {
// machine has completed
break;
}
charwidth := 1;
c := endOfFile;
if pos < end {
if bytes == nil {
c, charwidth = utf8.DecodeRuneInString(str[pos:end]);
} else {
c, charwidth = utf8.DecodeRune(bytes[pos:end]);
}
}
for i := 0; i < len(s[in]); i++ {
st := s[in][i];
switch s[in][i].inst.kind() {
case _BOT:
if pos == 0 {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _EOT:
if pos == end {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _CHAR:
if c == st.inst.(*_Char).char {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _CHARCLASS:
if st.inst.(*_CharClass).matches(c) {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _ANY:
if c != endOfFile {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _NOTNL:
if c != endOfFile && c != '\n' {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _BRA:
n := st.inst.(*_Bra).n;
st.match[2*n] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _EBRA:
n := st.inst.(*_Ebra).n;
st.match[2*n+1] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _ALT:
s[in] = addState(s[in], st.inst.(*_Alt).left, st.match);
// give other branch a copy of this match vector
s1 := make([]int, 2*(re.nbra+1));
for i := 0; i < len(s1); i++ {
s1[i] = st.match[i]
}
s[in] = addState(s[in], st.inst.next(), s1);
case _END:
// choose leftmost longest
if !found || // first
st.match[0] < final.match[0] || // leftmost
(st.match[0] == final.match[0] && pos > final.match[1]) { // longest
final = st;
final.match[1] = pos;
}
found = true;
default:
st.inst.print();
panic("unknown instruction in execute");
}
}
pos += charwidth;
}
return final.match;
}
// ExecuteString matches the Regexp against the string s.
// The return value is an array of integers, in pairs, identifying the positions of
// substrings matched by the expression.
// s[a[0]:a[1]] is the substring matched by the entire expression.
// s[a[2*i]:a[2*i+1]] for i > 0 is the substring matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the string.
// An empty array means "no match".
func (re *Regexp) ExecuteString(s string) (a []int) {
return re.doExecute(s, nil, 0)
}
// Execute matches the Regexp against the byte slice b.
// The return value is an array of integers, in pairs, identifying the positions of
// subslices matched by the expression.
// b[a[0]:a[1]] is the subslice matched by the entire expression.
// b[a[2*i]:a[2*i+1]] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the slice.
// An empty array means "no match".
func (re *Regexp) Execute(b []byte) (a []int) {
return re.doExecute("", b, 0)
}
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool {
return len(re.doExecute(s, nil, 0)) > 0
}
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool {
return len(re.doExecute("", b, 0)) > 0
}
// MatchStrings matches the Regexp against the string s.
// The return value is an array of strings matched by the expression.
// a[0] is the substring matched by the entire expression.
// a[i] for i > 0 is the substring matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchStrings(s string) (a []string) {
r := re.doExecute(s, nil, 0);
if r == nil {
return nil
}
a = make([]string, len(r)/2);
for i := 0; i < len(r); i += 2 {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = s[r[i] : r[i+1]]
}
}
return
}
// MatchSlices matches the Regexp against the byte slice b.
// The return value is an array of subslices matched by the expression.
// a[0] is the subslice matched by the entire expression.
// a[i] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchSlices(b []byte) (a [][]byte) {
r := re.doExecute("", b, 0);
if r == nil {
return nil
}
a = make([][]byte, len(r)/2);
for i := 0; i < len(r); i += 2 {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = b[r[i] : r[i+1]]
}
}
return
}
// MatchString checks whether a textual regular expression
// matches a string. More complicated queries need
// to use Compile and the full Regexp interface.
func MatchString(pattern string, s string) (matched bool, error os.Error) {
re, err := Compile(pattern);
if err != nil {
return false, err
}
return re.MatchString(s), nil
}
// Match checks whether a textual regular expression
// matches a byte slice. More complicated queries need
// to use Compile and the full Regexp interface.
func Match(pattern string, b []byte) (matched bool, error os.Error) {
re, err := Compile(pattern);
if err != nil {
return false, err
}
return re.Match(b), nil
}
// ReplaceAllString returns a copy of src in which all matches for the Regexp
// have been replaced by repl. No support is provided for expressions
// (e.g. \1 or $1) in the replacement string.
func (re *Regexp) ReplaceAllString(src, repl string) string {
lastMatchEnd := 0; // end position of the most recent match
searchPos := 0; // position where we next look for a match
buf := new(bytes.Buffer);
for searchPos <= len(src) {
a := re.doExecute(src, nil, searchPos);
if len(a) == 0 {
break; // no more matches
}
// Copy the unmatched characters before this match.
io.WriteString(buf, src[lastMatchEnd:a[0]]);
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
io.WriteString(buf, repl);
}
lastMatchEnd = a[1];
// Advance past this match; always advance at least one character.
_, width := utf8.DecodeRuneInString(src[searchPos:len(src)]);
if searchPos + width > a[1] {
searchPos += width;
} else if searchPos + 1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++;
} else {
searchPos = a[1];
}
}
// Copy the unmatched characters after the last match.
io.WriteString(buf, src[lastMatchEnd:len(src)]);
return buf.String();
}
// ReplaceAll returns a copy of src in which all matches for the Regexp
// have been replaced by repl. No support is provided for expressions
// (e.g. \1 or $1) in the replacement text.
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
lastMatchEnd := 0; // end position of the most recent match
searchPos := 0; // position where we next look for a match
buf := new(bytes.Buffer);
for searchPos <= len(src) {
a := re.doExecute("", src, searchPos);
if len(a) == 0 {
break; // no more matches
}
// Copy the unmatched characters before this match.
buf.Write(src[lastMatchEnd:a[0]]);
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
buf.Write(repl);
}
lastMatchEnd = a[1];
// Advance past this match; always advance at least one character.
_, width := utf8.DecodeRune(src[searchPos:len(src)]);
if searchPos + width > a[1] {
searchPos += width;
} else if searchPos + 1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++;
} else {
searchPos = a[1];
}
}
// Copy the unmatched characters after the last match.
buf.Write(src[lastMatchEnd:len(src)]);
return buf.Bytes();
}
// QuoteMeta returns a string that quotes all regular expression metacharacters
// inside the argument text; the returned string is a regular expression matching
// the literal text. For example, QuoteMeta(`[foo]`) returns `\[foo\]`.
func QuoteMeta(s string) string {
b := make([]byte, 2 * len(s));
// A byte loop is correct because all metacharacters are ASCII.
j := 0;
for i := 0; i < len(s); i++ {
if special(int(s[i])) {
b[j] = '\\';
j++;
}
b[j] = s[i];
j++;
}
return string(b[0:j]);
}
// Find matches in slice b if b is non-nil, otherwise find matches in string s.
func (re *Regexp) allMatches(s string, b []byte, n int, deliver func(int, int)) {
var end int;
if b == nil {
end = len(s);
} else {
end = len(b);
}
for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; {
matches := re.doExecute(s, b, pos);
if len(matches) == 0 {
break;
}
accept := true;
if matches[1] == pos {
// We've found an empty match.
if matches[0] == prevMatchEnd {
// We don't allow an empty match right
// after a previous match, so ignore it.
accept = false;
}
var width int;
if b == nil {
_, width = utf8.DecodeRuneInString(s[pos:end]);
} else {
_, width = utf8.DecodeRune(b[pos:end]);
}
if width > 0 {
pos += width;
} else {
pos = end + 1;
}
} else {
pos = matches[1];
}
prevMatchEnd = matches[1];
if accept {
deliver(matches[0], matches[1]);
i++;
}
}
}
// AllMatches slices the byte slice b into substrings that are successive
// matches of the Regexp within b. If n > 0, the function returns at most n
// matches. Text that does not match the expression will be skipped. Empty
// matches abutting a preceding match are ignored. The function returns a slice
// containing the matching substrings.
func (re *Regexp) AllMatches(b []byte, n int) [][]byte {
if n <= 0 {
n = len(b) + 1;
}
result := make([][]byte, n);
i := 0;
re.allMatches("", b, n, func(start, end int) {
result[i] = b[start:end];
i++;
});
return result[0:i];
}
// AllMatchesString slices the string s into substrings that are successive
// matches of the Regexp within s. If n > 0, the function returns at most n
// matches. Text that does not match the expression will be skipped. Empty
// matches abutting a preceding match are ignored. The function returns a slice
// containing the matching substrings.
func (re *Regexp) AllMatchesString(s string, n int) []string {
if n <= 0 {
n = len(s) + 1;
}
result := make([]string, n);
i := 0;
re.allMatches(s, nil, n, func(start, end int) {
result[i] = s[start:end];
i++;
});
return result[0:i];
}
// AllMatchesIter slices the byte slice b into substrings that are successive
// matches of the Regexp within b. If n > 0, the function returns at most n
// matches. Text that does not match the expression will be skipped. Empty
// matches abutting a preceding match are ignored. The function returns a
// channel that iterates over the matching substrings.
func (re *Regexp) AllMatchesIter(b []byte, n int) (<-chan []byte) {
if n <= 0 {
n = len(b) + 1;
}
c := make(chan []byte, 10);
go func() {
re.allMatches("", b, n, func(start, end int) {
c <- b[start:end];
});
close(c);
}();
return c;
}
// AllMatchesStringIter slices the string s into substrings that are successive
// matches of the Regexp within s. If n > 0, the function returns at most n
// matches. Text that does not match the expression will be skipped. Empty
// matches abutting a preceding match are ignored. The function returns a
// channel that iterates over the matching substrings.
func (re *Regexp) AllMatchesStringIter(s string, n int) (<-chan string) {
if n <= 0 {
n = len(s) + 1;
}
c := make(chan string, 10);
go func() {
re.allMatches(s, nil, n, func(start, end int) {
c <- s[start:end];
});
close(c);
}();
return c;
}
|
package byteutil
import (
"bytes"
// "fmt"
"github.com/shenwei356/bpool"
"unsafe"
)
// ReverseByteSlice reverses a byte slice
func ReverseByteSlice(s []byte) []byte {
// make a copy of s
l := len(s)
t := make([]byte, l)
for i := 0; i < l; i++ {
t[i] = s[i]
}
// reverse
for i, j := 0, l-1; i < j; i, j = i+1, j-1 {
t[i], t[j] = t[j], t[i]
}
return t
}
// ReverseByteSliceInplace reverses a byte slice
func ReverseByteSliceInplace(s []byte) {
// reverse
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}
// WrapByteSlice wraps byte slice
func WrapByteSlice(s []byte, width int) []byte {
if width < 1 {
return s
}
l := len(s)
if l == 0 {
return s
}
var lines int
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
// var buffer bytes.Buffer
buffer := bytes.NewBuffer(make([]byte, 0, l+lines))
var start, end int
for i := 0; i <= lines; i++ {
start = i * width
end = (i + 1) * width
if end > l {
end = l
}
buffer.Write(s[start:end])
if i < lines {
buffer.WriteString("\n")
}
}
return buffer.Bytes()
}
// BufferedByteSliceWrapper is used to wrap byte slice,
// using a buffer of bytes.Buffer to reduce GC
type BufferedByteSliceWrapper struct {
pool *bpool.SizedBufferPool
}
// NewBufferedByteSliceWrapper create a new BufferedByteSliceWrapper
func NewBufferedByteSliceWrapper(size, alloc int) *BufferedByteSliceWrapper {
if size < 1 {
panic("buffer number should be > 0")
}
if alloc < 1 {
panic("buffer size should be > 0")
}
return &BufferedByteSliceWrapper{bpool.NewSizedBufferPool(size, alloc)}
}
// NewBufferedByteSliceWrapper2 could pre alloc space according to slice and widht
func NewBufferedByteSliceWrapper2(size int, l, width int) *BufferedByteSliceWrapper {
if size < 1 {
panic("buffer number should be > 0")
}
if l < 1 {
panic("buffer size should be > 0")
}
if width <= 0 {
return NewBufferedByteSliceWrapper(size, l)
}
var lines int
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
return &BufferedByteSliceWrapper{bpool.NewSizedBufferPool(size, l+lines)}
}
// Recycle a buffer
func (w *BufferedByteSliceWrapper) Recycle(b *bytes.Buffer) {
w.pool.Put(b)
}
// Wrap a byte slice. DO NOT FORGET call Recycle() with the returned buffer
func (w *BufferedByteSliceWrapper) Wrap(s []byte, width int) ([]byte, *bytes.Buffer) {
if width < 1 {
return s, nil
}
l := len(s)
if l == 0 {
return s, nil
}
var lines int
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
// var buffer bytes.Buffer
// buffer := bytes.NewBuffer(make([]byte, 0, l+lines))
buffer := w.pool.Get()
var start, end int
for i := 0; i <= lines; i++ {
start = i * width
end = (i + 1) * width
if end > l {
end = l
}
buffer.Write(s[start:end])
if i < lines {
buffer.WriteString("\n")
}
}
return buffer.Bytes(), buffer
}
// WrapByteSliceInplace wraps byte slice in place.
// Sadly, it's too slow. Never use this!
func WrapByteSliceInplace(s []byte, width int) []byte {
if width < 1 {
return s
}
var l, lines int
l = len(s)
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
var end int
j := 0
for i := 0; i <= lines; i++ {
end = (i+1)*width + j
if end >= l {
break
}
// fmt.Printf("len:%d, lines:%d, i:%d, j:%d, end:%d\n", l, lines, i, j, end)
if i < lines {
// https://github.com/golang/go/wiki/SliceTricks
// Sadly, it's too slow
// s = append(s, []byte(" ")[0])
// copy(s[end+1:], s[end:])
// s[end] = []byte("\n")[0]
// slow too
s = append(s[:end], append([]byte("\n"), s[end:]...)...)
l = len(s)
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
j++
}
}
return s
}
// SubSlice provides similar slice indexing as python with one exception
// that end could be equal to 0.
// So we could get the last element by SubSlice(s, -1, 0)
// or get the whole element by SubSlice(s, 0, 0)
func SubSlice(slice []byte, start int, end int) []byte {
if start == 0 && end == 0 {
return slice
}
if start == end || (start < 0 && end > 0) {
return []byte{}
}
l := len(slice)
s, e := start, end
if s < 0 {
s = l + s
if s < 1 {
s = 0
}
}
if e < 0 {
e = l + e
if e < 0 {
e = 0
}
}
if e == 0 || e > l {
e = l
}
return slice[s:e]
}
// ByteToLower lowers a byte
func ByteToLower(b byte) byte {
if b <= '\u007F' {
if 'A' <= b && b <= 'Z' {
b += 'a' - 'A'
}
return b
}
return b
}
// ByteToUpper upper a byte
func ByteToUpper(b byte) byte {
if b <= '\u007F' {
if 'a' <= b && b <= 'z' {
b -= 'a' - 'A'
}
return b
}
return b
}
// MakeQuerySlice is used to replace map.
// see: http://blog.shenwei.me/map-is-not-the-fastest-in-go/
func MakeQuerySlice(letters []byte) []byte {
max := -1
for i := 0; i < len(letters); i++ {
j := int(letters[i])
if max < j {
max = j
}
}
querySlice := make([]byte, max+1)
for i := 0; i < len(letters); i++ {
querySlice[int(letters[i])] = letters[i]
}
return querySlice
}
// Split splits a byte slice by giveen letters.
// It's much faster than regexp.Split
func Split(slice []byte, letters []byte) [][]byte {
querySlice := MakeQuerySlice(letters)
results := [][]byte{}
tmp := []byte{}
var j int
var value byte
var sliceSize = len(querySlice)
for _, b := range slice {
j = int(b)
if j >= sliceSize { // not delimiter byte
tmp = append(tmp, b)
continue
}
value = querySlice[j]
if value == 0 { // not delimiter byte
tmp = append(tmp, b)
continue
} else {
if len(tmp) > 0 {
results = append(results, tmp)
tmp = []byte{}
}
}
}
if len(tmp) > 0 {
results = append(results, tmp)
}
return results
}
// Bytes2Str convert byte slice to string without GC
func Bytes2Str(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
fix doc
package byteutil
import (
"bytes"
// "fmt"
"github.com/shenwei356/bpool"
"unsafe"
)
// ReverseByteSlice reverses a byte slice
func ReverseByteSlice(s []byte) []byte {
// make a copy of s
l := len(s)
t := make([]byte, l)
for i := 0; i < l; i++ {
t[i] = s[i]
}
// reverse
for i, j := 0, l-1; i < j; i, j = i+1, j-1 {
t[i], t[j] = t[j], t[i]
}
return t
}
// ReverseByteSliceInplace reverses a byte slice
func ReverseByteSliceInplace(s []byte) {
// reverse
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}
// WrapByteSlice wraps byte slice
func WrapByteSlice(s []byte, width int) []byte {
if width < 1 {
return s
}
l := len(s)
if l == 0 {
return s
}
var lines int
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
// var buffer bytes.Buffer
buffer := bytes.NewBuffer(make([]byte, 0, l+lines))
var start, end int
for i := 0; i <= lines; i++ {
start = i * width
end = (i + 1) * width
if end > l {
end = l
}
buffer.Write(s[start:end])
if i < lines {
buffer.WriteString("\n")
}
}
return buffer.Bytes()
}
// BufferedByteSliceWrapper is used to wrap byte slice,
// using a buffer of bytes.Buffer to reduce GC
type BufferedByteSliceWrapper struct {
pool *bpool.SizedBufferPool
}
// NewBufferedByteSliceWrapper create a new BufferedByteSliceWrapper
func NewBufferedByteSliceWrapper(size, alloc int) *BufferedByteSliceWrapper {
if size < 1 {
panic("buffer number should be > 0")
}
if alloc < 1 {
panic("buffer size should be > 0")
}
return &BufferedByteSliceWrapper{bpool.NewSizedBufferPool(size, alloc)}
}
// NewBufferedByteSliceWrapper2 could pre-alloc space according to length of slice and width
func NewBufferedByteSliceWrapper2(size int, length, width int) *BufferedByteSliceWrapper {
if size < 1 {
panic("buffer number should be > 0")
}
if length < 1 {
panic("buffer size should be > 0")
}
if width <= 0 {
return NewBufferedByteSliceWrapper(size, length)
}
var lines int
if length%width == 0 {
lines = length/width - 1
} else {
lines = int(length / width)
}
return &BufferedByteSliceWrapper{bpool.NewSizedBufferPool(size, length+lines)}
}
// Recycle a buffer
func (w *BufferedByteSliceWrapper) Recycle(b *bytes.Buffer) {
w.pool.Put(b)
}
// Wrap a byte slice. DO NOT FORGET call Recycle() with the returned buffer
func (w *BufferedByteSliceWrapper) Wrap(s []byte, width int) ([]byte, *bytes.Buffer) {
if width < 1 {
return s, nil
}
l := len(s)
if l == 0 {
return s, nil
}
var lines int
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
// var buffer bytes.Buffer
// buffer := bytes.NewBuffer(make([]byte, 0, l+lines))
buffer := w.pool.Get()
var start, end int
for i := 0; i <= lines; i++ {
start = i * width
end = (i + 1) * width
if end > l {
end = l
}
buffer.Write(s[start:end])
if i < lines {
buffer.WriteString("\n")
}
}
return buffer.Bytes(), buffer
}
// WrapByteSliceInplace wraps byte slice in place.
// Sadly, it's too slow. Never use this!
func WrapByteSliceInplace(s []byte, width int) []byte {
if width < 1 {
return s
}
var l, lines int
l = len(s)
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
var end int
j := 0
for i := 0; i <= lines; i++ {
end = (i+1)*width + j
if end >= l {
break
}
// fmt.Printf("len:%d, lines:%d, i:%d, j:%d, end:%d\n", l, lines, i, j, end)
if i < lines {
// https://github.com/golang/go/wiki/SliceTricks
// Sadly, it's too slow
// s = append(s, []byte(" ")[0])
// copy(s[end+1:], s[end:])
// s[end] = []byte("\n")[0]
// slow too
s = append(s[:end], append([]byte("\n"), s[end:]...)...)
l = len(s)
if l%width == 0 {
lines = l/width - 1
} else {
lines = int(l / width)
}
j++
}
}
return s
}
// SubSlice provides similar slice indexing as python with one exception
// that end could be equal to 0.
// So we could get the last element by SubSlice(s, -1, 0)
// or get the whole element by SubSlice(s, 0, 0)
func SubSlice(slice []byte, start int, end int) []byte {
if start == 0 && end == 0 {
return slice
}
if start == end || (start < 0 && end > 0) {
return []byte{}
}
l := len(slice)
s, e := start, end
if s < 0 {
s = l + s
if s < 1 {
s = 0
}
}
if e < 0 {
e = l + e
if e < 0 {
e = 0
}
}
if e == 0 || e > l {
e = l
}
return slice[s:e]
}
// ByteToLower lowers a byte
func ByteToLower(b byte) byte {
if b <= '\u007F' {
if 'A' <= b && b <= 'Z' {
b += 'a' - 'A'
}
return b
}
return b
}
// ByteToUpper upper a byte
func ByteToUpper(b byte) byte {
if b <= '\u007F' {
if 'a' <= b && b <= 'z' {
b -= 'a' - 'A'
}
return b
}
return b
}
// MakeQuerySlice is used to replace map.
// see: http://blog.shenwei.me/map-is-not-the-fastest-in-go/
func MakeQuerySlice(letters []byte) []byte {
max := -1
for i := 0; i < len(letters); i++ {
j := int(letters[i])
if max < j {
max = j
}
}
querySlice := make([]byte, max+1)
for i := 0; i < len(letters); i++ {
querySlice[int(letters[i])] = letters[i]
}
return querySlice
}
// Split splits a byte slice by giveen letters.
// It's much faster than regexp.Split
func Split(slice []byte, letters []byte) [][]byte {
querySlice := MakeQuerySlice(letters)
results := [][]byte{}
tmp := []byte{}
var j int
var value byte
var sliceSize = len(querySlice)
for _, b := range slice {
j = int(b)
if j >= sliceSize { // not delimiter byte
tmp = append(tmp, b)
continue
}
value = querySlice[j]
if value == 0 { // not delimiter byte
tmp = append(tmp, b)
continue
} else {
if len(tmp) > 0 {
results = append(results, tmp)
tmp = []byte{}
}
}
}
if len(tmp) > 0 {
results = append(results, tmp)
}
return results
}
// Bytes2Str convert byte slice to string without GC
func Bytes2Str(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
|
//go:build functional
// +build functional
package cri_containerd
import (
"context"
"fmt"
"testing"
"github.com/Microsoft/hcsshim/internal/cpugroup"
"github.com/Microsoft/hcsshim/internal/processorinfo"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/pkg/annotations"
testutilities "github.com/Microsoft/hcsshim/test/functional/utilities"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
func Test_Pod_UpdateResources_Memory(t *testing.T) {
type config struct {
name string
requiredFeatures []string
runtimeHandler string
sandboxImage string
}
tests := []config{
{
name: "WCOW_Hypervisor",
requiredFeatures: []string{featureWCOWHypervisor},
runtimeHandler: wcowHypervisorRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
},
{
name: "LCOW",
requiredFeatures: []string{featureLCOW},
runtimeHandler: lcowRuntimeHandler,
sandboxImage: imageLcowK8sPause,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)
if test.runtimeHandler == lcowRuntimeHandler {
pullRequiredLCOWImages(t, []string{test.sandboxImage})
} else {
pullRequiredImages(t, []string{test.sandboxImage})
}
var startingMemorySize int64 = 768 * 1024 * 1024
podRequest := getRunPodSandboxRequest(
t,
test.runtimeHandler,
WithSandboxAnnotations(map[string]string{
annotations.ContainerMemorySizeInMB: fmt.Sprintf("%d", startingMemorySize),
}),
)
client := newTestRuntimeClient(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podID := runPodSandbox(t, client, ctx, podRequest)
defer removePodSandbox(t, client, ctx, podID)
defer stopPodSandbox(t, client, ctx, podID)
// make request for shrinking memory size
newMemorySize := startingMemorySize / 2
updateReq := &runtime.UpdateContainerResourcesRequest{
ContainerId: podID,
}
if test.runtimeHandler == lcowRuntimeHandler {
updateReq.Linux = &runtime.LinuxContainerResources{
MemoryLimitInBytes: newMemorySize,
}
} else {
updateReq.Windows = &runtime.WindowsContainerResources{
MemoryLimitInBytes: newMemorySize,
}
}
if _, err := client.UpdateContainerResources(ctx, updateReq); err != nil {
t.Fatalf("updating container resources for %s with %v", podID, err)
}
})
}
}
func Test_Pod_UpdateResources_Memory_PA(t *testing.T) {
type config struct {
name string
requiredFeatures []string
runtimeHandler string
sandboxImage string
}
tests := []config{
{
name: "WCOW_Hypervisor",
requiredFeatures: []string{featureWCOWHypervisor},
runtimeHandler: wcowHypervisorRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
},
{
name: "LCOW",
requiredFeatures: []string{featureLCOW},
runtimeHandler: lcowRuntimeHandler,
sandboxImage: imageLcowK8sPause,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)
if test.runtimeHandler == lcowRuntimeHandler {
pullRequiredLCOWImages(t, []string{test.sandboxImage})
} else {
pullRequiredImages(t, []string{test.sandboxImage})
}
var startingMemorySize int64 = 200 * 1024 * 1024
podRequest := getRunPodSandboxRequest(
t,
test.runtimeHandler,
WithSandboxAnnotations(map[string]string{
annotations.FullyPhysicallyBacked: "true",
annotations.ContainerMemorySizeInMB: fmt.Sprintf("%d", startingMemorySize),
}),
)
client := newTestRuntimeClient(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podID := runPodSandbox(t, client, ctx, podRequest)
defer removePodSandbox(t, client, ctx, podID)
defer stopPodSandbox(t, client, ctx, podID)
// make request for shrinking memory size
newMemorySize := startingMemorySize / 2
updateReq := &runtime.UpdateContainerResourcesRequest{
ContainerId: podID,
}
if test.runtimeHandler == lcowRuntimeHandler {
updateReq.Linux = &runtime.LinuxContainerResources{
MemoryLimitInBytes: newMemorySize,
}
} else {
updateReq.Windows = &runtime.WindowsContainerResources{
MemoryLimitInBytes: newMemorySize,
}
}
if _, err := client.UpdateContainerResources(ctx, updateReq); err != nil {
t.Fatalf("updating container resources for %s with %v", podID, err)
}
})
}
}
func Test_Pod_UpdateResources_CPUShares(t *testing.T) {
type config struct {
name string
requiredFeatures []string
runtimeHandler string
sandboxImage string
}
tests := []config{
{
name: "WCOW_Hypervisor",
requiredFeatures: []string{featureWCOWHypervisor},
runtimeHandler: wcowHypervisorRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
},
{
name: "LCOW",
requiredFeatures: []string{featureLCOW},
runtimeHandler: lcowRuntimeHandler,
sandboxImage: imageLcowK8sPause,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)
if test.runtimeHandler == lcowRuntimeHandler {
pullRequiredLCOWImages(t, []string{test.sandboxImage})
} else {
pullRequiredImages(t, []string{test.sandboxImage})
}
podRequest := getRunPodSandboxRequest(t, test.runtimeHandler)
client := newTestRuntimeClient(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podID := runPodSandbox(t, client, ctx, podRequest)
defer removePodSandbox(t, client, ctx, podID)
defer stopPodSandbox(t, client, ctx, podID)
updateReq := &runtime.UpdateContainerResourcesRequest{
ContainerId: podID,
}
if test.runtimeHandler == lcowRuntimeHandler {
updateReq.Linux = &runtime.LinuxContainerResources{
CpuShares: 2000,
}
} else {
updateReq.Windows = &runtime.WindowsContainerResources{
CpuShares: 2000,
}
}
if _, err := client.UpdateContainerResources(ctx, updateReq); err != nil {
t.Fatalf("updating container resources for %s with %v", podID, err)
}
})
}
}
func Test_Pod_UpdateResources_CPUGroup(t *testing.T) {
testutilities.RequiresBuild(t, osversion.V21H1)
ctx := context.Background()
processorTopology, err := processorinfo.HostProcessorInfo(ctx)
if err != nil {
t.Fatalf("failed to get host processor information: %s", err)
}
lpIndices := make([]uint32, processorTopology.LogicalProcessorCount)
for i, p := range processorTopology.LogicalProcessors {
lpIndices[i] = p.LpIndex
}
startCPUGroupID := "FA22A12C-36B3-486D-A3E9-BC526C2B450B"
if err := cpugroup.Create(ctx, startCPUGroupID, lpIndices); err != nil {
t.Fatalf("failed to create test cpugroup with: %v", err)
}
defer func() {
err := cpugroup.Delete(ctx, startCPUGroupID)
if err != nil && err != cpugroup.ErrHVStatusInvalidCPUGroupState {
t.Fatalf("failed to clean up test cpugroup with: %v", err)
}
}()
updateCPUGroupID := "FA22A12C-36B3-486D-A3E9-BC526C2B450C"
if err := cpugroup.Create(ctx, updateCPUGroupID, lpIndices); err != nil {
t.Fatalf("failed to create test cpugroup with: %v", err)
}
defer func() {
err := cpugroup.Delete(ctx, updateCPUGroupID)
if err != nil && err != cpugroup.ErrHVStatusInvalidCPUGroupState {
t.Fatalf("failed to clean up test cpugroup with: %v", err)
}
}()
type config struct {
name string
requiredFeatures []string
runtimeHandler string
sandboxImage string
}
tests := []config{
{
name: "WCOW_Hypervisor",
requiredFeatures: []string{featureWCOWHypervisor},
runtimeHandler: wcowHypervisorRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
},
{
name: "LCOW",
requiredFeatures: []string{featureLCOW},
runtimeHandler: lcowRuntimeHandler,
sandboxImage: imageLcowK8sPause,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)
if test.runtimeHandler == lcowRuntimeHandler {
pullRequiredLCOWImages(t, []string{test.sandboxImage})
} else {
pullRequiredImages(t, []string{test.sandboxImage})
}
podRequest := getRunPodSandboxRequest(t, test.runtimeHandler, WithSandboxAnnotations(map[string]string{
annotations.CPUGroupID: startCPUGroupID,
}))
client := newTestRuntimeClient(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podID := runPodSandbox(t, client, ctx, podRequest)
defer removePodSandbox(t, client, ctx, podID)
defer stopPodSandbox(t, client, ctx, podID)
updateReq := &runtime.UpdateContainerResourcesRequest{
ContainerId: podID,
Annotations: map[string]string{
annotations.CPUGroupID: updateCPUGroupID,
},
}
if test.runtimeHandler == lcowRuntimeHandler {
updateReq.Linux = &runtime.LinuxContainerResources{}
} else {
updateReq.Windows = &runtime.WindowsContainerResources{}
}
if _, err := client.UpdateContainerResources(ctx, updateReq); err != nil {
t.Fatalf("updating container resources for %s with %v", podID, err)
}
})
}
}
Skip test for updating VM cpugroup membership for now
Signed-off-by: Kathryn Baldauf <f9c2fbf13a17fb5274acf31a52eb81f1fe91283d@microsoft.com>
//go:build functional
// +build functional
package cri_containerd
import (
"context"
"fmt"
"testing"
"github.com/Microsoft/hcsshim/internal/cpugroup"
"github.com/Microsoft/hcsshim/internal/processorinfo"
"github.com/Microsoft/hcsshim/pkg/annotations"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
func Test_Pod_UpdateResources_Memory(t *testing.T) {
type config struct {
name string
requiredFeatures []string
runtimeHandler string
sandboxImage string
}
tests := []config{
{
name: "WCOW_Hypervisor",
requiredFeatures: []string{featureWCOWHypervisor},
runtimeHandler: wcowHypervisorRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
},
{
name: "LCOW",
requiredFeatures: []string{featureLCOW},
runtimeHandler: lcowRuntimeHandler,
sandboxImage: imageLcowK8sPause,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)
if test.runtimeHandler == lcowRuntimeHandler {
pullRequiredLCOWImages(t, []string{test.sandboxImage})
} else {
pullRequiredImages(t, []string{test.sandboxImage})
}
var startingMemorySize int64 = 768 * 1024 * 1024
podRequest := getRunPodSandboxRequest(
t,
test.runtimeHandler,
WithSandboxAnnotations(map[string]string{
annotations.ContainerMemorySizeInMB: fmt.Sprintf("%d", startingMemorySize),
}),
)
client := newTestRuntimeClient(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podID := runPodSandbox(t, client, ctx, podRequest)
defer removePodSandbox(t, client, ctx, podID)
defer stopPodSandbox(t, client, ctx, podID)
// make request for shrinking memory size
newMemorySize := startingMemorySize / 2
updateReq := &runtime.UpdateContainerResourcesRequest{
ContainerId: podID,
}
if test.runtimeHandler == lcowRuntimeHandler {
updateReq.Linux = &runtime.LinuxContainerResources{
MemoryLimitInBytes: newMemorySize,
}
} else {
updateReq.Windows = &runtime.WindowsContainerResources{
MemoryLimitInBytes: newMemorySize,
}
}
if _, err := client.UpdateContainerResources(ctx, updateReq); err != nil {
t.Fatalf("updating container resources for %s with %v", podID, err)
}
})
}
}
func Test_Pod_UpdateResources_Memory_PA(t *testing.T) {
type config struct {
name string
requiredFeatures []string
runtimeHandler string
sandboxImage string
}
tests := []config{
{
name: "WCOW_Hypervisor",
requiredFeatures: []string{featureWCOWHypervisor},
runtimeHandler: wcowHypervisorRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
},
{
name: "LCOW",
requiredFeatures: []string{featureLCOW},
runtimeHandler: lcowRuntimeHandler,
sandboxImage: imageLcowK8sPause,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)
if test.runtimeHandler == lcowRuntimeHandler {
pullRequiredLCOWImages(t, []string{test.sandboxImage})
} else {
pullRequiredImages(t, []string{test.sandboxImage})
}
var startingMemorySize int64 = 200 * 1024 * 1024
podRequest := getRunPodSandboxRequest(
t,
test.runtimeHandler,
WithSandboxAnnotations(map[string]string{
annotations.FullyPhysicallyBacked: "true",
annotations.ContainerMemorySizeInMB: fmt.Sprintf("%d", startingMemorySize),
}),
)
client := newTestRuntimeClient(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podID := runPodSandbox(t, client, ctx, podRequest)
defer removePodSandbox(t, client, ctx, podID)
defer stopPodSandbox(t, client, ctx, podID)
// make request for shrinking memory size
newMemorySize := startingMemorySize / 2
updateReq := &runtime.UpdateContainerResourcesRequest{
ContainerId: podID,
}
if test.runtimeHandler == lcowRuntimeHandler {
updateReq.Linux = &runtime.LinuxContainerResources{
MemoryLimitInBytes: newMemorySize,
}
} else {
updateReq.Windows = &runtime.WindowsContainerResources{
MemoryLimitInBytes: newMemorySize,
}
}
if _, err := client.UpdateContainerResources(ctx, updateReq); err != nil {
t.Fatalf("updating container resources for %s with %v", podID, err)
}
})
}
}
func Test_Pod_UpdateResources_CPUShares(t *testing.T) {
type config struct {
name string
requiredFeatures []string
runtimeHandler string
sandboxImage string
}
tests := []config{
{
name: "WCOW_Hypervisor",
requiredFeatures: []string{featureWCOWHypervisor},
runtimeHandler: wcowHypervisorRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
},
{
name: "LCOW",
requiredFeatures: []string{featureLCOW},
runtimeHandler: lcowRuntimeHandler,
sandboxImage: imageLcowK8sPause,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)
if test.runtimeHandler == lcowRuntimeHandler {
pullRequiredLCOWImages(t, []string{test.sandboxImage})
} else {
pullRequiredImages(t, []string{test.sandboxImage})
}
podRequest := getRunPodSandboxRequest(t, test.runtimeHandler)
client := newTestRuntimeClient(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podID := runPodSandbox(t, client, ctx, podRequest)
defer removePodSandbox(t, client, ctx, podID)
defer stopPodSandbox(t, client, ctx, podID)
updateReq := &runtime.UpdateContainerResourcesRequest{
ContainerId: podID,
}
if test.runtimeHandler == lcowRuntimeHandler {
updateReq.Linux = &runtime.LinuxContainerResources{
CpuShares: 2000,
}
} else {
updateReq.Windows = &runtime.WindowsContainerResources{
CpuShares: 2000,
}
}
if _, err := client.UpdateContainerResources(ctx, updateReq); err != nil {
t.Fatalf("updating container resources for %s with %v", podID, err)
}
})
}
}
func Test_Pod_UpdateResources_CPUGroup(t *testing.T) {
t.Skip("Skipping for now")
ctx := context.Background()
processorTopology, err := processorinfo.HostProcessorInfo(ctx)
if err != nil {
t.Fatalf("failed to get host processor information: %s", err)
}
lpIndices := make([]uint32, processorTopology.LogicalProcessorCount)
for i, p := range processorTopology.LogicalProcessors {
lpIndices[i] = p.LpIndex
}
startCPUGroupID := "FA22A12C-36B3-486D-A3E9-BC526C2B450B"
if err := cpugroup.Create(ctx, startCPUGroupID, lpIndices); err != nil {
t.Fatalf("failed to create test cpugroup with: %v", err)
}
defer func() {
err := cpugroup.Delete(ctx, startCPUGroupID)
if err != nil && err != cpugroup.ErrHVStatusInvalidCPUGroupState {
t.Fatalf("failed to clean up test cpugroup with: %v", err)
}
}()
updateCPUGroupID := "FA22A12C-36B3-486D-A3E9-BC526C2B450C"
if err := cpugroup.Create(ctx, updateCPUGroupID, lpIndices); err != nil {
t.Fatalf("failed to create test cpugroup with: %v", err)
}
defer func() {
err := cpugroup.Delete(ctx, updateCPUGroupID)
if err != nil && err != cpugroup.ErrHVStatusInvalidCPUGroupState {
t.Fatalf("failed to clean up test cpugroup with: %v", err)
}
}()
type config struct {
name string
requiredFeatures []string
runtimeHandler string
sandboxImage string
}
tests := []config{
{
name: "WCOW_Hypervisor",
requiredFeatures: []string{featureWCOWHypervisor},
runtimeHandler: wcowHypervisorRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
},
{
name: "LCOW",
requiredFeatures: []string{featureLCOW},
runtimeHandler: lcowRuntimeHandler,
sandboxImage: imageLcowK8sPause,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)
if test.runtimeHandler == lcowRuntimeHandler {
pullRequiredLCOWImages(t, []string{test.sandboxImage})
} else {
pullRequiredImages(t, []string{test.sandboxImage})
}
podRequest := getRunPodSandboxRequest(t, test.runtimeHandler, WithSandboxAnnotations(map[string]string{
annotations.CPUGroupID: startCPUGroupID,
}))
client := newTestRuntimeClient(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podID := runPodSandbox(t, client, ctx, podRequest)
defer removePodSandbox(t, client, ctx, podID)
defer stopPodSandbox(t, client, ctx, podID)
updateReq := &runtime.UpdateContainerResourcesRequest{
ContainerId: podID,
Annotations: map[string]string{
annotations.CPUGroupID: updateCPUGroupID,
},
}
if test.runtimeHandler == lcowRuntimeHandler {
updateReq.Linux = &runtime.LinuxContainerResources{}
} else {
updateReq.Windows = &runtime.WindowsContainerResources{}
}
if _, err := client.UpdateContainerResources(ctx, updateReq); err != nil {
t.Fatalf("updating container resources for %s with %v", podID, err)
}
})
}
}
|
/*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"flag"
)
type Framework struct {
}
func (f *Framework) AddFlags(fs *flag.FlagSet) {
}
func (c *Framework) Validate() []error {
return nil
}
deleted framework.go
Signed-off-by: RinkiyaKeDad <94438887e1a4c5232e70965941e47fb3ecc9daa8@gmail.com>
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
import (
"fmt"
"net"
"os/exec"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = framework.KubeDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
Context("when creating a pod in the host PID namespace", func() {
makeHostPidPod := func(podName, image string, command []string, hostPID bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
HostPID: hostPID,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
},
},
},
}
}
createAndWaitHostPidPod := func(podName string, hostPID bool) {
podClient.Create(makeHostPidPod(podName,
busyboxImage,
[]string{"sh", "-c", "pidof nginx || true"},
hostPID,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
nginxPid := ""
BeforeEach(func() {
nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID())
podClient.CreateSync(makeHostPidPod(nginxPodName,
imageutils.GetE2EImage(imageutils.NginxSlim),
nil,
true,
))
output := f.ExecShellInContainer(nginxPodName, nginxPodName,
"cat /var/run/nginx.pid")
nginxPid = strings.TrimSpace(output)
})
It("should show its pid in the host PID namespace", func() {
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(busyboxPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
pids := strings.TrimSpace(logs)
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
if pids == "" {
framework.Failf("nginx's pid should be seen by hostpid containers")
}
pidSets := sets.NewString(strings.Split(pids, " ")...)
if !pidSets.Has(nginxPid) {
framework.Failf("nginx's pid should be seen by hostpid containers")
}
})
It("should not show its pid in the non-hostpid containers", func() {
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(busyboxPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
pids := strings.TrimSpace(logs)
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
pidSets := sets.NewString(strings.Split(pids, " ")...)
if pidSets.Has(nginxPid) {
framework.Failf("nginx's pid should not be seen by non-hostpid containers")
}
})
})
Context("when creating a pod in the host IPC namespace", func() {
makeHostIPCPod := func(podName, image string, command []string, hostIPC bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
HostIPC: hostIPC,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
},
},
},
}
}
createAndWaitHostIPCPod := func(podName string, hostNetwork bool) {
podClient.Create(makeHostIPCPod(podName,
busyboxImage,
[]string{"sh", "-c", "ipcs -m | awk '{print $2}'"},
hostNetwork,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
hostSharedMemoryID := ""
BeforeEach(func() {
output, err := exec.Command("sh", "-c", "ipcmk -M 1M | awk '{print $NF}'").Output()
if err != nil {
framework.Failf("Failed to create the shared memory on the host: %v", err)
}
hostSharedMemoryID = strings.TrimSpace(string(output))
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
})
It("should show the shared memory ID in the host IPC containers", func() {
busyboxPodName := "busybox-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(busyboxPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
podSharedMemoryIDs := strings.TrimSpace(logs)
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, busyboxPodName)
if !strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
framework.Failf("hostIPC container should show shared memory IDs on host")
}
})
It("should not show the shared memory ID in the non-hostIPC containers", func() {
busyboxPodName := "busybox-non-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(busyboxPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
podSharedMemoryIDs := strings.TrimSpace(logs)
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, busyboxPodName)
if strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
framework.Failf("non-hostIPC container should not show shared memory IDs on host")
}
})
AfterEach(func() {
if hostSharedMemoryID != "" {
_, err := exec.Command("sh", "-c", fmt.Sprintf("ipcrm -m %q", hostSharedMemoryID)).Output()
if err != nil {
framework.Failf("Failed to remove shared memory %q on the host: %v", hostSharedMemoryID, err)
}
}
})
})
Context("when creating a pod in the host network namespace", func() {
makeHostNetworkPod := func(podName, image string, command []string, hostNetwork bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
HostNetwork: hostNetwork,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
},
},
},
}
}
listListeningPortsCommand := []string{"sh", "-c", "netstat -ln"}
createAndWaitHostNetworkPod := func(podName string, hostNetwork bool) {
podClient.Create(makeHostNetworkPod(podName,
busyboxImage,
listListeningPortsCommand,
hostNetwork,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
listeningPort := ""
var l net.Listener
BeforeEach(func() {
l, err := net.Listen("tcp", ":0")
if err != nil {
framework.Failf("Failed to open a new tcp port: %v", err)
}
addr := strings.Split(l.Addr().String(), ":")
listeningPort = addr[len(addr)-1]
framework.Logf("Opened a new tcp port %q", listeningPort)
})
It("should listen on same port in the host network containers", func() {
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(busyboxPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
if !strings.Contains(logs, listeningPort) {
framework.Failf("host-networked container should listening on same port as host")
}
})
It("shouldn't show the same port in the non-hostnetwork containers", func() {
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(busyboxPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
if strings.Contains(logs, listeningPort) {
framework.Failf("non-hostnetworked container shouldn't show the same port as host")
}
})
AfterEach(func() {
if l != nil {
l.Close()
}
})
})
Context("When creating a container with runAsUser", func() {
makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
RunAsUser: &userid,
},
},
},
},
}
}
createAndWaitUserPod := func(userid int64) {
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
busyboxImage,
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
userid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
It("should run the container with uid 65534", func() {
createAndWaitUserPod(65534)
})
It("should run the container with uid 0", func() {
createAndWaitUserPod(0)
})
})
Context("When creating a pod with readOnlyRootFilesystem", func() {
makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
},
},
},
},
}
}
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string {
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
imageutils.GetBusyBoxImage(),
[]string{"sh", "-c", "touch checkfile"},
readOnlyRootFilesystem,
))
if readOnlyRootFilesystem {
podClient.WaitForFailure(podName, framework.PodStartTimeout)
} else {
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
return podName
}
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true", func() {
createAndWaitUserPod(true)
})
It("should run the container with writable rootfs when readOnlyRootFilesystem=false", func() {
createAndWaitUserPod(false)
})
})
Context("when creating containers with AllowPrivilegeEscalation", func() {
BeforeEach(func() {
if framework.TestContext.ContainerRuntime == "docker" {
isSupported, err := isDockerNoNewPrivilegesSupported()
framework.ExpectNoError(err)
if !isSupported {
framework.Skipf("Skipping because no_new_privs is not supported in this docker")
}
}
})
makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: imageutils.GetE2EImage(imageutils.Nonewprivs),
Name: podName,
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: allowPrivilegeEscalation,
RunAsUser: &uid,
},
},
},
},
}
}
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
podClient.Create(makeAllowPrivilegeEscalationPod(podName,
allowPrivilegeEscalation,
uid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
if err := podClient.MatchContainerOutput(podName, podName, output); err != nil {
return err
}
return nil
}
It("should allow privilege escalation when not explicitly set and uid != 0", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
It("should not allow privilege escalation when false", func() {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
It("should allow privilege escalation when true", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
})
Context("When creating a pod with privileged", func() {
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
},
}
}
createAndWaitUserPod := func(privileged bool) string {
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
busyboxImage,
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"},
privileged,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podName
}
It("should run the container as privileged when true", func() {
podName := createAndWaitUserPod(true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
framework.Logf("Got logs for pod %q: %q", podName, logs)
if strings.Contains(logs, "Operation not permitted") {
framework.Failf("privileged container should be able to create dummy device")
}
})
It("should run the container as unprivileged when false", func() {
podName := createAndWaitUserPod(false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
framework.Logf("Got logs for pod %q: %q", podName, logs)
if !strings.Contains(logs, "Operation not permitted") {
framework.Failf("unprivileged container shouldn't be able to create dummy device")
}
})
})
})
Fix host network flake tests
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
import (
"fmt"
"net"
"os/exec"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = framework.KubeDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
Context("when creating a pod in the host PID namespace", func() {
makeHostPidPod := func(podName, image string, command []string, hostPID bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
HostPID: hostPID,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
},
},
},
}
}
createAndWaitHostPidPod := func(podName string, hostPID bool) {
podClient.Create(makeHostPidPod(podName,
busyboxImage,
[]string{"sh", "-c", "pidof nginx || true"},
hostPID,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
nginxPid := ""
BeforeEach(func() {
nginxPodName := "nginx-hostpid-" + string(uuid.NewUUID())
podClient.CreateSync(makeHostPidPod(nginxPodName,
imageutils.GetE2EImage(imageutils.NginxSlim),
nil,
true,
))
output := f.ExecShellInContainer(nginxPodName, nginxPodName,
"cat /var/run/nginx.pid")
nginxPid = strings.TrimSpace(output)
})
It("should show its pid in the host PID namespace", func() {
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(busyboxPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
pids := strings.TrimSpace(logs)
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
if pids == "" {
framework.Failf("nginx's pid should be seen by hostpid containers")
}
pidSets := sets.NewString(strings.Split(pids, " ")...)
if !pidSets.Has(nginxPid) {
framework.Failf("nginx's pid should be seen by hostpid containers")
}
})
It("should not show its pid in the non-hostpid containers", func() {
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(busyboxPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
pids := strings.TrimSpace(logs)
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
pidSets := sets.NewString(strings.Split(pids, " ")...)
if pidSets.Has(nginxPid) {
framework.Failf("nginx's pid should not be seen by non-hostpid containers")
}
})
})
Context("when creating a pod in the host IPC namespace", func() {
makeHostIPCPod := func(podName, image string, command []string, hostIPC bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
HostIPC: hostIPC,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
},
},
},
}
}
createAndWaitHostIPCPod := func(podName string, hostNetwork bool) {
podClient.Create(makeHostIPCPod(podName,
busyboxImage,
[]string{"sh", "-c", "ipcs -m | awk '{print $2}'"},
hostNetwork,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
hostSharedMemoryID := ""
BeforeEach(func() {
output, err := exec.Command("sh", "-c", "ipcmk -M 1M | awk '{print $NF}'").Output()
if err != nil {
framework.Failf("Failed to create the shared memory on the host: %v", err)
}
hostSharedMemoryID = strings.TrimSpace(string(output))
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
})
It("should show the shared memory ID in the host IPC containers", func() {
busyboxPodName := "busybox-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(busyboxPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
podSharedMemoryIDs := strings.TrimSpace(logs)
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, busyboxPodName)
if !strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
framework.Failf("hostIPC container should show shared memory IDs on host")
}
})
It("should not show the shared memory ID in the non-hostIPC containers", func() {
busyboxPodName := "busybox-non-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(busyboxPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
podSharedMemoryIDs := strings.TrimSpace(logs)
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, busyboxPodName)
if strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
framework.Failf("non-hostIPC container should not show shared memory IDs on host")
}
})
AfterEach(func() {
if hostSharedMemoryID != "" {
_, err := exec.Command("sh", "-c", fmt.Sprintf("ipcrm -m %q", hostSharedMemoryID)).Output()
if err != nil {
framework.Failf("Failed to remove shared memory %q on the host: %v", hostSharedMemoryID, err)
}
}
})
})
Context("when creating a pod in the host network namespace", func() {
makeHostNetworkPod := func(podName, image string, command []string, hostNetwork bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
HostNetwork: hostNetwork,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
},
},
},
}
}
listListeningPortsCommand := []string{"sh", "-c", "netstat -ln"}
createAndWaitHostNetworkPod := func(podName string, hostNetwork bool) {
podClient.Create(makeHostNetworkPod(podName,
busyboxImage,
listListeningPortsCommand,
hostNetwork,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
listeningPort := ""
var l net.Listener
var err error
BeforeEach(func() {
l, err = net.Listen("tcp", ":0")
if err != nil {
framework.Failf("Failed to open a new tcp port: %v", err)
}
addr := strings.Split(l.Addr().String(), ":")
listeningPort = addr[len(addr)-1]
framework.Logf("Opened a new tcp port %q", listeningPort)
})
It("should listen on same port in the host network containers", func() {
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(busyboxPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
if !strings.Contains(logs, listeningPort) {
framework.Failf("host-networked container should listening on same port as host")
}
})
It("shouldn't show the same port in the non-hostnetwork containers", func() {
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(busyboxPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
if strings.Contains(logs, listeningPort) {
framework.Failf("non-hostnetworked container shouldn't show the same port as host")
}
})
AfterEach(func() {
if l != nil {
l.Close()
}
})
})
Context("When creating a container with runAsUser", func() {
makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
RunAsUser: &userid,
},
},
},
},
}
}
createAndWaitUserPod := func(userid int64) {
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
busyboxImage,
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
userid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
It("should run the container with uid 65534", func() {
createAndWaitUserPod(65534)
})
It("should run the container with uid 0", func() {
createAndWaitUserPod(0)
})
})
Context("When creating a pod with readOnlyRootFilesystem", func() {
makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
},
},
},
},
}
}
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string {
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
imageutils.GetBusyBoxImage(),
[]string{"sh", "-c", "touch checkfile"},
readOnlyRootFilesystem,
))
if readOnlyRootFilesystem {
podClient.WaitForFailure(podName, framework.PodStartTimeout)
} else {
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
return podName
}
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true", func() {
createAndWaitUserPod(true)
})
It("should run the container with writable rootfs when readOnlyRootFilesystem=false", func() {
createAndWaitUserPod(false)
})
})
Context("when creating containers with AllowPrivilegeEscalation", func() {
BeforeEach(func() {
if framework.TestContext.ContainerRuntime == "docker" {
isSupported, err := isDockerNoNewPrivilegesSupported()
framework.ExpectNoError(err)
if !isSupported {
framework.Skipf("Skipping because no_new_privs is not supported in this docker")
}
}
})
makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: imageutils.GetE2EImage(imageutils.Nonewprivs),
Name: podName,
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: allowPrivilegeEscalation,
RunAsUser: &uid,
},
},
},
},
}
}
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
podClient.Create(makeAllowPrivilegeEscalationPod(podName,
allowPrivilegeEscalation,
uid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
if err := podClient.MatchContainerOutput(podName, podName, output); err != nil {
return err
}
return nil
}
It("should allow privilege escalation when not explicitly set and uid != 0", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
It("should not allow privilege escalation when false", func() {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
It("should allow privilege escalation when true", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
})
Context("When creating a pod with privileged", func() {
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
},
}
}
createAndWaitUserPod := func(privileged bool) string {
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
busyboxImage,
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"},
privileged,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podName
}
It("should run the container as privileged when true", func() {
podName := createAndWaitUserPod(true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
framework.Logf("Got logs for pod %q: %q", podName, logs)
if strings.Contains(logs, "Operation not permitted") {
framework.Failf("privileged container should be able to create dummy device")
}
})
It("should run the container as unprivileged when false", func() {
podName := createAndWaitUserPod(false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
framework.Logf("Got logs for pod %q: %q", podName, logs)
if !strings.Contains(logs, "Operation not permitted") {
framework.Failf("unprivileged container shouldn't be able to create dummy device")
}
})
})
})
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2enode
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// Helper for makeTopologyManagerPod().
type tmCtnAttribute struct {
ctnName string
cpuRequest string
cpuLimit string
}
// makeTopologyMangerPod returns a pod with the provided tmCtnAttributes.
func makeTopologyManagerPod(podName string, tmCtnAttributes []tmCtnAttribute) *v1.Pod {
var containers []v1.Container
for _, ctnAttr := range tmCtnAttributes {
cpusetCmd := fmt.Sprintf("grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d")
ctn := v1.Container{
Name: ctnAttr.ctnName,
Image: busyboxImage,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse(ctnAttr.cpuRequest),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse(ctnAttr.cpuLimit),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
},
},
Command: []string{"sh", "-c", cpusetCmd},
}
containers = append(containers, ctn)
}
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: containers,
},
}
}
func configureTopologyManagerInKubelet(f *framework.Framework, policy string) {
// Configure Topology Manager in Kubelet with policy.
oldCfg, err := getCurrentKubeletConfig()
framework.ExpectNoError(err)
newCfg := oldCfg.DeepCopy()
if newCfg.FeatureGates == nil {
newCfg.FeatureGates = make(map[string]bool)
}
newCfg.FeatureGates["CPUManager"] = true
newCfg.FeatureGates["TopologyManager"] = true
deleteStateFile()
// Set the Topology Manager policy
newCfg.TopologyManagerPolicy = policy
//newCfg.TopologyManagerPolicy = topologymanager.PolicySingleNumaNode
// Set the CPU Manager policy to static.
newCfg.CPUManagerPolicy = string(cpumanager.PolicyStatic)
// Set the CPU Manager reconcile period to 1 second.
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second}
// The Kubelet panics if either kube-reserved or system-reserved is not set
// when CPU Manager is enabled. Set cpu in kube-reserved > 0 so that
// kubelet doesn't panic.
if newCfg.KubeReserved == nil {
newCfg.KubeReserved = map[string]string{}
}
if _, ok := newCfg.KubeReserved["cpu"]; !ok {
newCfg.KubeReserved["cpu"] = "200m"
}
// Dump the config -- debug
framework.Logf("New kublet config is %s", *newCfg)
// Update the Kubelet configuration.
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
// Wait for the Kubelet to be ready.
gomega.Eventually(func() bool {
nodes, err := e2enode.TotalReady(f.ClientSet)
framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue())
}
func runTopologyManagerSuiteTests(f *framework.Framework) {
var cpuCap, cpuAlloc int64
var cpuListString, expAllowedCPUsListRegex string
var cpuList []int
var cpu1, cpu2 int
var cset cpuset.CPUSet
var err error
var ctnAttrs []tmCtnAttribute
var pod, pod1, pod2 *v1.Pod
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
ginkgo.By("running a non-Gu pod")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "non-gu-container",
cpuRequest: "100m",
cpuLimit: "200m",
},
}
pod = makeTopologyManagerPod("non-gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("checking if the expected cpuset was assigned")
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
ginkgo.By("running a Gu pod")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("checking if the expected cpuset was assigned")
cpu1 = 1
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
cpu1 = cpuList[1]
}
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
ginkgo.By("running multiple Gu and non-Gu pods")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod1 = makeTopologyManagerPod("gu-pod", ctnAttrs)
pod1 = f.PodClient().CreateSync(pod1)
ctnAttrs = []tmCtnAttribute{
{
ctnName: "non-gu-container",
cpuRequest: "200m",
cpuLimit: "300m",
},
}
pod2 = makeTopologyManagerPod("non-gu-pod", ctnAttrs)
pod2 = f.PodClient().CreateSync(pod2)
ginkgo.By("checking if the expected cpuset was assigned")
cpu1 = 1
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
cpu1 = cpuList[1]
}
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod1.Spec.Containers[0].Name, pod1.Name)
cpuListString = "0"
if cpuAlloc > 2 {
cset = cpuset.MustParse(fmt.Sprintf("0-%d", cpuCap-1))
cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.NewCPUSet(cpu1)))
}
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod2.Spec.Containers[0].Name, pod2.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod1.Name, pod2.Name})
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
// Skip rest of the tests if CPU capacity < 3.
if cpuCap < 3 {
e2eskipper.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
}
ginkgo.By("running a Gu pod requesting multiple CPUs")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container",
cpuRequest: "2000m",
cpuLimit: "2000m",
},
}
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("checking if the expected cpuset was assigned")
cpuListString = "1-2"
if isHTEnabled() {
cpuListString = "2-3"
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
if cpuList[1] != 1 {
cset = cpuset.MustParse(getCPUSiblingList(1))
cpuListString = fmt.Sprintf("%s", cset)
}
}
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container1",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
{
ctnName: "gu-container2",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("checking if the expected cpuset was assigned")
cpu1, cpu2 = 1, 2
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
if cpuList[1] != 1 {
cpu1, cpu2 = cpuList[1], 1
}
}
expAllowedCPUsListRegex = fmt.Sprintf("^%d|%d\n$", cpu1, cpu2)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[1].Name, pod.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
ginkgo.By("running multiple Gu pods")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container1",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod1 = makeTopologyManagerPod("gu-pod1", ctnAttrs)
pod1 = f.PodClient().CreateSync(pod1)
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container2",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod2 = makeTopologyManagerPod("gu-pod2", ctnAttrs)
pod2 = f.PodClient().CreateSync(pod2)
ginkgo.By("checking if the expected cpuset was assigned")
cpu1, cpu2 = 1, 2
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
if cpuList[1] != 1 {
cpu1, cpu2 = cpuList[1], 1
}
}
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod1.Spec.Containers[0].Name, pod1.Name)
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu2)
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod2.Spec.Containers[0].Name, pod2.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod1.Name, pod2.Name})
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
}
func runTopologyManagerTests(f *framework.Framework) {
var oldCfg *kubeletconfig.KubeletConfiguration
ginkgo.It("run Topology Manager test suite", func() {
var policies = []string{topologymanager.PolicySingleNumaNode, topologymanager.PolicyRestricted,
topologymanager.PolicyBestEffort, topologymanager.PolicyNone}
for _, policy := range policies {
// Configure Topology Manager
ginkgo.By("by configuring Topology Manager policy to xxx")
framework.Logf("Configuring topology Manager policy to %s", policy)
configureTopologyManagerInKubelet(f, policy)
// Run the tests
runTopologyManagerSuiteTests(f)
}
// restore kubelet config
setOldKubeletConfig(f, oldCfg)
// Debug sleep to allow time to look at kubelet config
time.Sleep(5 * time.Minute)
// Delete state file to allow repeated runs
deleteStateFile()
})
}
// Serial because the test updates kubelet configuration.
var _ = SIGDescribe("Topology Manager [Serial] [Feature:TopologyManager][NodeAlphaFeature:TopologyManager]", func() {
f := framework.NewDefaultFramework("topology-manager-test")
ginkgo.Context("With kubeconfig updated to static CPU Manager policy run the Topology Manager tests", func() {
runTopologyManagerTests(f)
})
})
e2e: topomgr: explicit save the kubelet config
For the sake of readability, save the old Kubelet config
once.
Signed-off-by: Francesco Romani <c7100b580e6017ef67482ace46578cdd2d2e0dfe@redhat.com>
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2enode
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// Helper for makeTopologyManagerPod().
type tmCtnAttribute struct {
ctnName string
cpuRequest string
cpuLimit string
}
// makeTopologyMangerPod returns a pod with the provided tmCtnAttributes.
func makeTopologyManagerPod(podName string, tmCtnAttributes []tmCtnAttribute) *v1.Pod {
var containers []v1.Container
for _, ctnAttr := range tmCtnAttributes {
cpusetCmd := fmt.Sprintf("grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d")
ctn := v1.Container{
Name: ctnAttr.ctnName,
Image: busyboxImage,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse(ctnAttr.cpuRequest),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse(ctnAttr.cpuLimit),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
},
},
Command: []string{"sh", "-c", cpusetCmd},
}
containers = append(containers, ctn)
}
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: containers,
},
}
}
func configureTopologyManagerInKubelet(f *framework.Framework, oldCfg *kubeletconfig.KubeletConfiguration, policy string) {
// Configure Topology Manager in Kubelet with policy.
newCfg := oldCfg.DeepCopy()
if newCfg.FeatureGates == nil {
newCfg.FeatureGates = make(map[string]bool)
}
newCfg.FeatureGates["CPUManager"] = true
newCfg.FeatureGates["TopologyManager"] = true
deleteStateFile()
// Set the Topology Manager policy
newCfg.TopologyManagerPolicy = policy
// Set the CPU Manager policy to static.
newCfg.CPUManagerPolicy = string(cpumanager.PolicyStatic)
// Set the CPU Manager reconcile period to 1 second.
newCfg.CPUManagerReconcilePeriod = metav1.Duration{Duration: 1 * time.Second}
// The Kubelet panics if either kube-reserved or system-reserved is not set
// when CPU Manager is enabled. Set cpu in kube-reserved > 0 so that
// kubelet doesn't panic.
if newCfg.KubeReserved == nil {
newCfg.KubeReserved = map[string]string{}
}
if _, ok := newCfg.KubeReserved["cpu"]; !ok {
newCfg.KubeReserved["cpu"] = "200m"
}
// Dump the config -- debug
framework.Logf("New kublet config is %s", *newCfg)
// Update the Kubelet configuration.
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
// Wait for the Kubelet to be ready.
gomega.Eventually(func() bool {
nodes, err := e2enode.TotalReady(f.ClientSet)
framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue())
}
func runTopologyManagerSuiteTests(f *framework.Framework) {
var cpuCap, cpuAlloc int64
var cpuListString, expAllowedCPUsListRegex string
var cpuList []int
var cpu1, cpu2 int
var cset cpuset.CPUSet
var err error
var ctnAttrs []tmCtnAttribute
var pod, pod1, pod2 *v1.Pod
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
ginkgo.By("running a non-Gu pod")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "non-gu-container",
cpuRequest: "100m",
cpuLimit: "200m",
},
}
pod = makeTopologyManagerPod("non-gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("checking if the expected cpuset was assigned")
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
ginkgo.By("running a Gu pod")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("checking if the expected cpuset was assigned")
cpu1 = 1
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
cpu1 = cpuList[1]
}
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
ginkgo.By("running multiple Gu and non-Gu pods")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod1 = makeTopologyManagerPod("gu-pod", ctnAttrs)
pod1 = f.PodClient().CreateSync(pod1)
ctnAttrs = []tmCtnAttribute{
{
ctnName: "non-gu-container",
cpuRequest: "200m",
cpuLimit: "300m",
},
}
pod2 = makeTopologyManagerPod("non-gu-pod", ctnAttrs)
pod2 = f.PodClient().CreateSync(pod2)
ginkgo.By("checking if the expected cpuset was assigned")
cpu1 = 1
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
cpu1 = cpuList[1]
}
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod1.Spec.Containers[0].Name, pod1.Name)
cpuListString = "0"
if cpuAlloc > 2 {
cset = cpuset.MustParse(fmt.Sprintf("0-%d", cpuCap-1))
cpuListString = fmt.Sprintf("%s", cset.Difference(cpuset.NewCPUSet(cpu1)))
}
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod2.Spec.Containers[0].Name, pod2.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod1.Name, pod2.Name})
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
// Skip rest of the tests if CPU capacity < 3.
if cpuCap < 3 {
e2eskipper.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
}
ginkgo.By("running a Gu pod requesting multiple CPUs")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container",
cpuRequest: "2000m",
cpuLimit: "2000m",
},
}
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("checking if the expected cpuset was assigned")
cpuListString = "1-2"
if isHTEnabled() {
cpuListString = "2-3"
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
if cpuList[1] != 1 {
cset = cpuset.MustParse(getCPUSiblingList(1))
cpuListString = fmt.Sprintf("%s", cset)
}
}
expAllowedCPUsListRegex = fmt.Sprintf("^%s\n$", cpuListString)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container1",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
{
ctnName: "gu-container2",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod = makeTopologyManagerPod("gu-pod", ctnAttrs)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("checking if the expected cpuset was assigned")
cpu1, cpu2 = 1, 2
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
if cpuList[1] != 1 {
cpu1, cpu2 = cpuList[1], 1
}
}
expAllowedCPUsListRegex = fmt.Sprintf("^%d|%d\n$", cpu1, cpu2)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[0].Name, pod.Name)
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod.Spec.Containers[1].Name, pod.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod.Name})
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
ginkgo.By("running multiple Gu pods")
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container1",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod1 = makeTopologyManagerPod("gu-pod1", ctnAttrs)
pod1 = f.PodClient().CreateSync(pod1)
ctnAttrs = []tmCtnAttribute{
{
ctnName: "gu-container2",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
}
pod2 = makeTopologyManagerPod("gu-pod2", ctnAttrs)
pod2 = f.PodClient().CreateSync(pod2)
ginkgo.By("checking if the expected cpuset was assigned")
cpu1, cpu2 = 1, 2
if isHTEnabled() {
cpuList = cpuset.MustParse(getCPUSiblingList(0)).ToSlice()
if cpuList[1] != 1 {
cpu1, cpu2 = cpuList[1], 1
}
}
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu1)
err = f.PodClient().MatchContainerOutput(pod1.Name, pod1.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod1.Spec.Containers[0].Name, pod1.Name)
expAllowedCPUsListRegex = fmt.Sprintf("^%d\n$", cpu2)
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
pod2.Spec.Containers[0].Name, pod2.Name)
ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(f, []string{pod1.Name, pod2.Name})
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
}
func runTopologyManagerTests(f *framework.Framework) {
ginkgo.It("run Topology Manager test suite", func() {
oldCfg, err := getCurrentKubeletConfig()
framework.ExpectNoError(err)
var policies = []string{topologymanager.PolicySingleNumaNode, topologymanager.PolicyRestricted,
topologymanager.PolicyBestEffort, topologymanager.PolicyNone}
for _, policy := range policies {
// Configure Topology Manager
ginkgo.By(fmt.Sprintf("by configuring Topology Manager policy to %s", policy))
framework.Logf("Configuring topology Manager policy to %s", policy)
configureTopologyManagerInKubelet(f, oldCfg, policy)
// Run the tests
runTopologyManagerSuiteTests(f)
}
// restore kubelet config
setOldKubeletConfig(f, oldCfg)
// Debug sleep to allow time to look at kubelet config
time.Sleep(5 * time.Minute)
// Delete state file to allow repeated runs
deleteStateFile()
})
}
// Serial because the test updates kubelet configuration.
var _ = SIGDescribe("Topology Manager [Serial] [Feature:TopologyManager][NodeAlphaFeature:TopologyManager]", func() {
f := framework.NewDefaultFramework("topology-manager-test")
ginkgo.Context("With kubeconfig updated to static CPU Manager policy run the Topology Manager tests", func() {
runTopologyManagerTests(f)
})
})
|
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package drive
import (
"fmt"
"reflect"
"runtime"
"strings"
"testing"
"time"
"google.golang.org/api/googleapi"
)
func callerFilepath() string {
_, p, _, _ := runtime.Caller(1)
return p
}
func TestRemoteOpToChangerTranslator(t *testing.T) {
g := &Commands{}
now := time.Now()
cases := []struct {
change *Change
name string
wantedFn func(*Change) error
}{
{change: &Change{Src: nil, Dest: nil}, wantedFn: nil, name: "nil"},
{change: &Change{Src: &File{}, Dest: &File{}}, wantedFn: nil, name: "nil"},
{change: &Change{Src: &File{}, Dest: nil}, wantedFn: g.remoteAdd, name: "remoteAdd"},
{change: &Change{Src: nil, Dest: &File{}}, wantedFn: g.remoteTrash, name: "remoteTrash"},
{
change: &Change{
Dest: &File{ModTime: now},
Src: &File{ModTime: now.Add(time.Hour)},
},
wantedFn: g.remoteMod, name: "remoteMod",
},
{
change: &Change{
Dest: &File{ModTime: now},
Src: &File{ModTime: now},
},
wantedFn: nil, name: "noop",
},
}
for _, tc := range cases {
got := remoteOpToChangerTranslator(g, tc.change)
vptr1 := reflect.ValueOf(got).Pointer()
vptr2 := reflect.ValueOf(tc.wantedFn).Pointer()
if vptr1 != vptr2 {
t.Errorf("expected %q expected (%v) got (%v)", tc.name, tc.wantedFn, got)
}
}
}
func TestLocalOpToChangerTranslator(t *testing.T) {
g := &Commands{}
now := time.Now()
cases := []struct {
change *Change
name string
wantedFn func(*Change, []string) error
}{
{change: &Change{Src: nil, Dest: nil}, wantedFn: nil, name: "nil"},
{change: &Change{Src: &File{}, Dest: &File{}}, wantedFn: nil, name: "nil"},
{
change: &Change{Src: &File{}, Dest: nil},
wantedFn: g.localAdd, name: "localAdd",
},
{
change: &Change{Dest: nil, Src: &File{}},
wantedFn: g.localAdd, name: "localAdd",
},
{
change: &Change{Src: nil, Dest: &File{}},
wantedFn: g.localDelete, name: "localDelete",
},
{
change: &Change{
Src: &File{ModTime: now},
Dest: &File{ModTime: now.Add(time.Hour)},
},
wantedFn: g.localMod, name: "localMod",
},
{
change: &Change{
Dest: &File{ModTime: now},
Src: &File{ModTime: now},
},
wantedFn: nil, name: "noop",
},
}
for _, tc := range cases {
got := localOpToChangerTranslator(g, tc.change)
vptr1 := reflect.ValueOf(got).Pointer()
vptr2 := reflect.ValueOf(tc.wantedFn).Pointer()
if vptr1 != vptr2 {
t.Errorf("expected %q expected (%v) got (%v)", tc.name, tc.wantedFn, got)
}
}
}
func TestRetryableErrorCheck(t *testing.T) {
cases := []struct {
value interface{}
success, retryable bool
comment string
}{
{
value: nil, success: false, retryable: true,
comment: "a nil tuple is retryable but not successful",
},
{
value: t, success: false, retryable: true,
comment: "t value is not a tuple, is retryable but not successful",
},
{
value: &tuple{first: nil, last: nil}, success: true, retryable: false,
comment: "last=nil representing a nil error so success, unretryable",
},
{
value: &tuple{first: nil, last: fmt.Errorf("flux")},
success: false, retryable: true,
comment: "last!=nil, non-familiar error so unsuccessful, retryable",
},
{
value: &tuple{
first: "",
last: &googleapi.Error{
Message: "This is an error",
},
},
success: false, retryable: false,
comment: "last!=nil, familiar error so unsuccessful, retryable:: statusCode undefined",
},
{
value: &tuple{
first: "",
last: &googleapi.Error{
Code: 500,
Message: "This is an error",
},
},
success: false, retryable: true,
comment: "last!=nil, familiar error so unsuccessful, retryable:: statusCode 500",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 401,
Message: "401 right here",
},
},
success: false, retryable: true,
comment: "last!=nil, 401 must be retryable",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 409,
Message: "409 right here",
},
},
success: false, retryable: false,
comment: "last!=nil, 409 is unclassified so unretryable",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 403,
Message: "403 right here",
},
},
success: false, retryable: true,
comment: "last!=nil, 403 is retryable",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 500,
Message: MsgErrFileNotMutable,
},
},
success: false, retryable: false,
comment: "issue #472 FileNotMutable is unretryable",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 500,
Message: strings.ToLower(MsgErrFileNotMutable),
},
},
success: false, retryable: false,
comment: "issue #472 FileNotMutable is unretryable, casefold held",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 501,
Message: strings.ToUpper(MsgErrFileNotMutable),
},
},
success: false, retryable: false,
comment: "issue #472 FileNotMutable is unretryable, casefold held",
},
}
for _, tc := range cases {
success, retryable := retryableErrorCheck(tc.value)
if success != tc.success {
t.Errorf("%v success got %v expected %v", tc.value, success, tc.success)
}
if retryable != tc.retryable {
t.Errorf("%v retryable got %v expected %v: %q", tc.value, retryable, tc.retryable, tc.comment)
}
}
}
func TestDriveIgnore(t *testing.T) {
testCases := []struct {
clauses []string
mustErr bool
nilIgnorer bool
excludesExpected []string
includesExpected []string
comment string
mustBeIgnored []string
mustNotBeIgnored []string
}{
{clauses: []string{}, nilIgnorer: true, comment: "no clauses in"},
{
clauses: []string{"#this is a comment"}, nilIgnorer: false,
comment: "plain commented file",
},
{
comment: "intentionally unescaped '.'",
clauses: []string{".git", ".docx$"},
mustBeIgnored: []string{"bgits", "frogdocx"},
mustNotBeIgnored: []string{"", " ", "frogdocxs"},
},
{
comment: "entirely commented, so all clauses should be skipped",
clauses: []string{"^#"},
mustBeIgnored: []string{"#patch", "# ", "#", "#Like this one", "#\\.git"},
mustNotBeIgnored: []string{"", " ", "src/misc_test.go"},
},
{
comment: "strictly escaped '.'",
clauses: []string{"\\.git", "\\.docx$"},
mustBeIgnored: []string{".git", "drive.docx", ".docx"},
mustNotBeIgnored: []string{
"", " ", "frogdocxs", "digit", "drive.docxs",
"drive.docxx", "drive.", ".drive", ".docx ",
},
},
{
comment: "strictly escaped '.'",
clauses: []string{"^\\.", "#!\\.driveignore"},
mustBeIgnored: []string{".git", ".driveignore", ".bashrc"},
mustNotBeIgnored: []string{
"", " ", "frogdocxs", "digit", "drive.docxs",
"drive.docxx", "drive.", " .drive", "a.docx ",
},
},
{
comment: "include vs exclude issue #535",
clauses: []string{"\\.", "!^\\.docx$", "!\\.bashrc", "#!\\.driveignore"},
mustBeIgnored: []string{".git", "drive.docx", ".docx ", ".driveignore"},
mustNotBeIgnored: []string{
".docx", ".bashrc",
},
},
}
for _, tc := range testCases {
ignorer, err := ignorerByClause(tc.clauses...)
if tc.mustErr {
if err == nil {
t.Fatalf("expected to err with clause %v comment %q", tc.clauses, tc.comment)
}
} else if err != nil {
t.Fatalf("%v should not err. Got %v", tc.clauses, err)
}
if tc.nilIgnorer {
if ignorer != nil {
t.Fatalf("ignorer for (%v)(%q) expected to be nil, got %p", tc.clauses, tc.comment, ignorer)
}
} else if ignorer == nil {
t.Fatalf("ignorer not expected to be nil for (%v) %q", tc.clauses, tc.comment)
}
if !tc.nilIgnorer && ignorer != nil {
for _, expectedPass := range tc.mustBeIgnored {
if !ignorer(expectedPass) {
t.Errorf("%q: %q must be ignored", tc.comment, expectedPass)
}
}
for _, expectedFail := range tc.mustNotBeIgnored {
if ignorer(expectedFail) {
t.Errorf("%q: %q must not be ignored", tc.comment, expectedFail)
}
}
}
}
}
func TestReadFile(t *testing.T) {
ownFilepath := callerFilepath()
comment := `
// A comment right here intentionally put that will self read and consumed.
+ A follow up right here and now.
`
clauses, err := readCommentedFile(ownFilepath, "//")
if err != nil {
t.Fatalf("%q is currently being run and should be read successfully, instead got err %v", ownFilepath, err)
}
if len(clauses) < 1 {
t.Errorf("expecting at least one line in this file %q", ownFilepath)
}
restitched := strings.Join(clauses, "\n")
if strings.Index(restitched, comment) != -1 {
t.Errorf("%q should have been ignored as a comment", comment)
}
}
tests: more tests added for src/misc methods
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package drive
import (
"fmt"
"reflect"
"runtime"
"strings"
"testing"
"time"
"google.golang.org/api/googleapi"
)
func callerFilepath() string {
_, p, _, _ := runtime.Caller(1)
return p
}
func TestRemoteOpToChangerTranslator(t *testing.T) {
g := &Commands{}
now := time.Now()
cases := []struct {
change *Change
name string
wantedFn func(*Change) error
}{
{change: &Change{Src: nil, Dest: nil}, wantedFn: nil, name: "nil"},
{change: &Change{Src: &File{}, Dest: &File{}}, wantedFn: nil, name: "nil"},
{change: &Change{Src: &File{}, Dest: nil}, wantedFn: g.remoteAdd, name: "remoteAdd"},
{change: &Change{Src: nil, Dest: &File{}}, wantedFn: g.remoteTrash, name: "remoteTrash"},
{
change: &Change{
Dest: &File{ModTime: now},
Src: &File{ModTime: now.Add(time.Hour)},
},
wantedFn: g.remoteMod, name: "remoteMod",
},
{
change: &Change{
Dest: &File{ModTime: now},
Src: &File{ModTime: now},
},
wantedFn: nil, name: "noop",
},
}
for _, tc := range cases {
got := remoteOpToChangerTranslator(g, tc.change)
vptr1 := reflect.ValueOf(got).Pointer()
vptr2 := reflect.ValueOf(tc.wantedFn).Pointer()
if vptr1 != vptr2 {
t.Errorf("expected %q expected (%v) got (%v)", tc.name, tc.wantedFn, got)
}
}
}
func TestLocalOpToChangerTranslator(t *testing.T) {
g := &Commands{}
now := time.Now()
cases := []struct {
change *Change
name string
wantedFn func(*Change, []string) error
}{
{change: &Change{Src: nil, Dest: nil}, wantedFn: nil, name: "nil"},
{change: &Change{Src: &File{}, Dest: &File{}}, wantedFn: nil, name: "nil"},
{
change: &Change{Src: &File{}, Dest: nil},
wantedFn: g.localAdd, name: "localAdd",
},
{
change: &Change{Dest: nil, Src: &File{}},
wantedFn: g.localAdd, name: "localAdd",
},
{
change: &Change{Src: nil, Dest: &File{}},
wantedFn: g.localDelete, name: "localDelete",
},
{
change: &Change{
Src: &File{ModTime: now},
Dest: &File{ModTime: now.Add(time.Hour)},
},
wantedFn: g.localMod, name: "localMod",
},
{
change: &Change{
Dest: &File{ModTime: now},
Src: &File{ModTime: now},
},
wantedFn: nil, name: "noop",
},
}
for _, tc := range cases {
got := localOpToChangerTranslator(g, tc.change)
vptr1 := reflect.ValueOf(got).Pointer()
vptr2 := reflect.ValueOf(tc.wantedFn).Pointer()
if vptr1 != vptr2 {
t.Errorf("expected %q expected (%v) got (%v)", tc.name, tc.wantedFn, got)
}
}
}
func TestRetryableErrorCheck(t *testing.T) {
cases := []struct {
value interface{}
success, retryable bool
comment string
}{
{
value: nil, success: false, retryable: true,
comment: "a nil tuple is retryable but not successful",
},
{
value: t, success: false, retryable: true,
comment: "t value is not a tuple, is retryable but not successful",
},
{
value: &tuple{first: nil, last: nil}, success: true, retryable: false,
comment: "last=nil representing a nil error so success, unretryable",
},
{
value: &tuple{first: nil, last: fmt.Errorf("flux")},
success: false, retryable: true,
comment: "last!=nil, non-familiar error so unsuccessful, retryable",
},
{
value: &tuple{
first: "",
last: &googleapi.Error{
Message: "This is an error",
},
},
success: false, retryable: false,
comment: "last!=nil, familiar error so unsuccessful, retryable:: statusCode undefined",
},
{
value: &tuple{
first: "",
last: &googleapi.Error{
Code: 500,
Message: "This is an error",
},
},
success: false, retryable: true,
comment: "last!=nil, familiar error so unsuccessful, retryable:: statusCode 500",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 401,
Message: "401 right here",
},
},
success: false, retryable: true,
comment: "last!=nil, 401 must be retryable",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 409,
Message: "409 right here",
},
},
success: false, retryable: false,
comment: "last!=nil, 409 is unclassified so unretryable",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 403,
Message: "403 right here",
},
},
success: false, retryable: true,
comment: "last!=nil, 403 is retryable",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 500,
Message: MsgErrFileNotMutable,
},
},
success: false, retryable: false,
comment: "issue #472 FileNotMutable is unretryable",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 500,
Message: strings.ToLower(MsgErrFileNotMutable),
},
},
success: false, retryable: false,
comment: "issue #472 FileNotMutable is unretryable, casefold held",
},
{
value: &tuple{
first: nil,
last: &googleapi.Error{
Code: 501,
Message: strings.ToUpper(MsgErrFileNotMutable),
},
},
success: false, retryable: false,
comment: "issue #472 FileNotMutable is unretryable, casefold held",
},
}
for _, tc := range cases {
success, retryable := retryableErrorCheck(tc.value)
if success != tc.success {
t.Errorf("%v success got %v expected %v", tc.value, success, tc.success)
}
if retryable != tc.retryable {
t.Errorf("%v retryable got %v expected %v: %q", tc.value, retryable, tc.retryable, tc.comment)
}
}
}
func TestDriveIgnore(t *testing.T) {
testCases := []struct {
clauses []string
mustErr bool
nilIgnorer bool
excludesExpected []string
includesExpected []string
comment string
mustBeIgnored []string
mustNotBeIgnored []string
}{
{clauses: []string{}, nilIgnorer: true, comment: "no clauses in"},
{
clauses: []string{"#this is a comment"}, nilIgnorer: false,
comment: "plain commented file",
},
{
comment: "intentionally unescaped '.'",
clauses: []string{".git", ".docx$"},
mustBeIgnored: []string{"bgits", "frogdocx"},
mustNotBeIgnored: []string{"", " ", "frogdocxs"},
},
{
comment: "entirely commented, so all clauses should be skipped",
clauses: []string{"^#"},
mustBeIgnored: []string{"#patch", "# ", "#", "#Like this one", "#\\.git"},
mustNotBeIgnored: []string{"", " ", "src/misc_test.go"},
},
{
comment: "strictly escaped '.'",
clauses: []string{"\\.git", "\\.docx$"},
mustBeIgnored: []string{".git", "drive.docx", ".docx"},
mustNotBeIgnored: []string{
"", " ", "frogdocxs", "digit", "drive.docxs",
"drive.docxx", "drive.", ".drive", ".docx ",
},
},
{
comment: "strictly escaped '.'",
clauses: []string{"^\\.", "#!\\.driveignore"},
mustBeIgnored: []string{".git", ".driveignore", ".bashrc"},
mustNotBeIgnored: []string{
"", " ", "frogdocxs", "digit", "drive.docxs",
"drive.docxx", "drive.", " .drive", "a.docx ",
},
},
{
comment: "include vs exclude issue #535",
clauses: []string{"\\.", "!^\\.docx$", "!\\.bashrc", "#!\\.driveignore"},
mustBeIgnored: []string{".git", "drive.docx", ".docx ", ".driveignore"},
mustNotBeIgnored: []string{
".docx", ".bashrc",
},
},
}
for _, tc := range testCases {
ignorer, err := ignorerByClause(tc.clauses...)
if tc.mustErr {
if err == nil {
t.Fatalf("expected to err with clause %v comment %q", tc.clauses, tc.comment)
}
} else if err != nil {
t.Fatalf("%v should not err. Got %v", tc.clauses, err)
}
if tc.nilIgnorer {
if ignorer != nil {
t.Fatalf("ignorer for (%v)(%q) expected to be nil, got %p", tc.clauses, tc.comment, ignorer)
}
} else if ignorer == nil {
t.Fatalf("ignorer not expected to be nil for (%v) %q", tc.clauses, tc.comment)
}
if !tc.nilIgnorer && ignorer != nil {
for _, expectedPass := range tc.mustBeIgnored {
if !ignorer(expectedPass) {
t.Errorf("%q: %q must be ignored", tc.comment, expectedPass)
}
}
for _, expectedFail := range tc.mustNotBeIgnored {
if ignorer(expectedFail) {
t.Errorf("%q: %q must not be ignored", tc.comment, expectedFail)
}
}
}
}
}
func TestReadFile(t *testing.T) {
ownFilepath := callerFilepath()
comment := `
// A comment right here intentionally put that will self read and consumed.
+ A follow up right here and now.
`
clauses, err := readCommentedFile(ownFilepath, "//")
if err != nil {
t.Fatalf("%q is currently being run and should be read successfully, instead got err %v", ownFilepath, err)
}
if len(clauses) < 1 {
t.Errorf("expecting at least one line in this file %q", ownFilepath)
}
restitched := strings.Join(clauses, "\n")
if strings.Index(restitched, comment) != -1 {
t.Errorf("%q should have been ignored as a comment", comment)
}
}
func TestCustomQuote(t *testing.T) {
// https://github.com/golang/go/issues/11511
// https://github.com/odeke-em/drive/issues/250
testCases := []struct {
sample, want string
}{
{
sample: "", want: "\"\"",
},
{
sample: "全角スペース 含みます/", want: "\"全角スペース 含みます/\"",
},
{
sample: "this is a test", want: "\"this is a test\"",
},
{
sample: "this ' is a test", want: "\"this ' is a test\"",
},
{
sample: "全角スペース\" 含みます/", want: "\"全角スペース\\\" 含みます/\"",
},
{
sample: "久聞大名 久聞大名", want: "\"久聞大名 久聞大名\"",
},
{
sample: "Go: для начинающих и профессионалов (18:30)", want: "\"Go: для начинающих и профессионалов (18:30)\"",
},
{
sample: "Go:\\ для начинающих и профессионалов (18:30)", want: "\"Go:\\\\ для начинающих и профессионалов (18:30)\"",
},
}
for _, tc := range testCases {
got := customQuote(tc.sample)
if got != tc.want {
t.Errorf("given sample: %v, wanted %v, got %v", tc.sample, tc.want, got)
}
}
}
func TestHttpOk(t *testing.T) {
testCases := []struct {
sample int
want bool
}{
{sample: 200, want: true},
{sample: 201, want: true},
{sample: 210, want: true},
{sample: 290, want: true},
{sample: 299, want: true},
{sample: -200, want: false},
{sample: -200000000000, want: false},
{sample: 300, want: false},
{sample: 403, want: false},
{sample: 500, want: false},
{sample: 100, want: false},
{sample: 0, want: false},
{sample: -1, want: false},
}
for _, tc := range testCases {
if got, want := httpOk(tc.sample), tc.want; got != want {
t.Errorf("given sample %v, expected %v instead got %v", tc.sample, want, got)
}
}
}
func TestCrudToAtoi(t *testing.T) {
testCases := []struct {
specimen []string
want CrudValue
comment string
}{
{
specimen: []string{"create", "delete", "update", "read"},
want: Create | Delete | Update | Read,
},
{
specimen: []string{"c", "r", "u", "d`"},
want: Create | Read | Update | Delete,
comment: "short forms being used",
},
{
specimen: []string{"trim", "create", "update", "bank`"},
want: Create | Update,
},
{
specimen: []string{"interfere", "extrict", "influence", "bank`"},
want: None,
},
{
specimen: []string{"", "", "", "ReAd"},
want: Read,
},
{
specimen: []string{"", "", "", ""},
want: None,
},
}
for _, tc := range testCases {
if got, want := CrudAtoi(tc.specimen...), tc.want; got != want {
t.Errorf("given specimen %v, expected %q instead got %q", tc.specimen, want, got)
}
}
}
|
package types
import (
"encoding/json"
"errors"
)
type Dependencies []Dependency
type Dependency struct {
App ACName `json:"app"`
ImageID Hash `json:"imageID"`
Labels Labels `json:"labels"`
}
type dependency Dependency
func (d Dependency) assertValid() error {
if len(d.App) < 1 {
return errors.New(`Name cannot be empty`)
}
return nil
}
func (d Dependency) MarshalJSON() ([]byte, error) {
if err := d.assertValid(); err != nil {
return nil, err
}
return json.Marshal(dependency(d))
}
func (d *Dependency) UnmarshalJSON(data []byte) error {
var jd dependency
if err := json.Unmarshal(data, &jd); err != nil {
return err
}
nd := Dependency(jd)
if err := nd.assertValid(); err != nil {
return err
}
*d = nd
return nil
}
schema: Fixup Name rename to App in assertValid error body.
package types
import (
"encoding/json"
"errors"
)
type Dependencies []Dependency
type Dependency struct {
App ACName `json:"app"`
ImageID Hash `json:"imageID"`
Labels Labels `json:"labels"`
}
type dependency Dependency
func (d Dependency) assertValid() error {
if len(d.App) < 1 {
return errors.New(`App cannot be empty`)
}
return nil
}
func (d Dependency) MarshalJSON() ([]byte, error) {
if err := d.assertValid(); err != nil {
return nil, err
}
return json.Marshal(dependency(d))
}
func (d *Dependency) UnmarshalJSON(data []byte) error {
var jd dependency
if err := json.Unmarshal(data, &jd); err != nil {
return err
}
nd := Dependency(jd)
if err := nd.assertValid(); err != nil {
return err
}
*d = nd
return nil
}
|
package proxy
import (
"context"
"v2ray.com/core/common/net"
)
type key int
const (
sourceKey key = iota
targetKey
originalTargetKey
inboundEntryPointKey
inboundTagKey
resolvedIPsKey
)
func ContextWithSource(ctx context.Context, src net.Destination) context.Context {
return context.WithValue(ctx, sourceKey, src)
}
func SourceFromContext(ctx context.Context) (net.Destination, bool) {
v, ok := ctx.Value(sourceKey).(net.Destination)
return v, ok
}
func ContextWithOriginalTarget(ctx context.Context, dest net.Destination) context.Context {
return context.WithValue(ctx, originalTargetKey, dest)
}
func OriginalTargetFromContext(ctx context.Context) (net.Destination, bool) {
v, ok := ctx.Value(originalTargetKey).(net.Destination)
return v, ok
}
func ContextWithTarget(ctx context.Context, dest net.Destination) context.Context {
return context.WithValue(ctx, targetKey, dest)
}
func TargetFromContext(ctx context.Context) (net.Destination, bool) {
v, ok := ctx.Value(targetKey).(net.Destination)
return v, ok
}
func ContextWithInboundEntryPoint(ctx context.Context, dest net.Destination) context.Context {
return context.WithValue(ctx, inboundEntryPointKey, dest)
}
func InboundEntryPointFromContext(ctx context.Context) (net.Destination, bool) {
v, ok := ctx.Value(inboundEntryPointKey).(net.Destination)
return v, ok
}
func ContextWithInboundTag(ctx context.Context, tag string) context.Context {
return context.WithValue(ctx, inboundTagKey, tag)
}
func InboundTagFromContext(ctx context.Context) (string, bool) {
v, ok := ctx.Value(inboundTagKey).(string)
return v, ok
}
func ContextWithResolveIPs(ctx context.Context, ips []net.Address) context.Context {
return context.WithValue(ctx, resolvedIPsKey, ips)
}
func ResolvedIPsFromContext(ctx context.Context) ([]net.Address, bool) {
ips, ok := ctx.Value(resolvedIPsKey).([]net.Address)
return ips, ok
}
comments
package proxy
import (
"context"
"v2ray.com/core/common/net"
)
type key int
const (
sourceKey key = iota
targetKey
originalTargetKey
inboundEntryPointKey
inboundTagKey
resolvedIPsKey
)
// ContextWithSource creates a new context with given source.
func ContextWithSource(ctx context.Context, src net.Destination) context.Context {
return context.WithValue(ctx, sourceKey, src)
}
// SourceFromContext retreives source from the given context.
func SourceFromContext(ctx context.Context) (net.Destination, bool) {
v, ok := ctx.Value(sourceKey).(net.Destination)
return v, ok
}
func ContextWithOriginalTarget(ctx context.Context, dest net.Destination) context.Context {
return context.WithValue(ctx, originalTargetKey, dest)
}
func OriginalTargetFromContext(ctx context.Context) (net.Destination, bool) {
v, ok := ctx.Value(originalTargetKey).(net.Destination)
return v, ok
}
func ContextWithTarget(ctx context.Context, dest net.Destination) context.Context {
return context.WithValue(ctx, targetKey, dest)
}
func TargetFromContext(ctx context.Context) (net.Destination, bool) {
v, ok := ctx.Value(targetKey).(net.Destination)
return v, ok
}
func ContextWithInboundEntryPoint(ctx context.Context, dest net.Destination) context.Context {
return context.WithValue(ctx, inboundEntryPointKey, dest)
}
func InboundEntryPointFromContext(ctx context.Context) (net.Destination, bool) {
v, ok := ctx.Value(inboundEntryPointKey).(net.Destination)
return v, ok
}
func ContextWithInboundTag(ctx context.Context, tag string) context.Context {
return context.WithValue(ctx, inboundTagKey, tag)
}
func InboundTagFromContext(ctx context.Context) (string, bool) {
v, ok := ctx.Value(inboundTagKey).(string)
return v, ok
}
func ContextWithResolveIPs(ctx context.Context, ips []net.Address) context.Context {
return context.WithValue(ctx, resolvedIPsKey, ips)
}
func ResolvedIPsFromContext(ctx context.Context) ([]net.Address, bool) {
ips, ok := ctx.Value(resolvedIPsKey).([]net.Address)
return ips, ok
}
|
package compute
import (
"encoding/json"
"fmt"
"net/http"
"strings"
)
const (
// FirewallRuleActionAccept indicates a firewall rule that, if it matches, will accept the packet and stop processing further rules.
FirewallRuleActionAccept = "ACCEPT_DECISIVELY"
// FirewallRuleActionDrop indicates a firewale rule that, if it matches, will drop the packet.
FirewallRuleActionDrop = "DROP"
// FirewallRuleIPVersion4 indicates a firewall rule that targets IPv4
FirewallRuleIPVersion4 = "IPv4"
// FirewallRuleIPVersion6 indicates a firewale rule that targets IPv6
FirewallRuleIPVersion6 = "IPv6"
// FirewallRuleProtocolIP indicates a firewall rule that targets the Internet Protocol (IP)
FirewallRuleProtocolIP = "IP"
// FirewallRuleProtocolTCP indicates a firewall rule that targets the Transmission Control Protocol (TCP)
FirewallRuleProtocolTCP = "TCP"
// FirewallRuleProtocolICMP indicates a firewall rule that targets the Internet Control Message Protocol (ICMP)
FirewallRuleProtocolICMP = "ICMP"
// FirewallRuleMatchAny indicates a firewall rule value that matches any other value in the same scope.
FirewallRuleMatchAny = "ANY"
)
// FirewallRule represents a firewall rule.
type FirewallRule struct {
ID string `json:"id"`
Name string `json:"name"`
Action string `json:"action"`
IPVersion string `json:"ipVersion"`
Protocol string `json:"protocol"`
Source FirewallRuleScope `json:"source"`
Destination FirewallRuleScope `json:"destination"`
Enabled bool `json:"enabled"`
State string `json:"state"`
NetworkDomainID string `json:"networkDomainId"`
DataCenterID string `json:"datacenterId"`
RuleType string `json:"ruleType"`
}
// GetID returns the firewall rule's Id.
func (rule *FirewallRule) GetID() string {
return rule.ID
}
// GetResourceType returns the firewall rule's resource type.
func (rule *FirewallRule) GetResourceType() ResourceType {
return ResourceTypeFirewallRule
}
// GetName returns the firewall rule's name.
func (rule *FirewallRule) GetName() string {
return rule.Name
}
// GetState returns the firewall rule's current state.
func (rule *FirewallRule) GetState() string {
return rule.State
}
// IsDeleted determines whether the firewall rule has been deleted (is nil).
func (rule *FirewallRule) IsDeleted() bool {
return rule == nil
}
var _ Resource = &FirewallRule{}
// FirewallRuleScope represents a scope (IP and / or port) for firewall configuration (source or destination).
type FirewallRuleScope struct {
IPAddress *FirewallRuleIPAddress `json:"ip,omitempty"`
AddressList *EntityReference `json:"ipAddressList,omitempty"`
Port *FirewallRulePort `json:"port,omitempty"`
PortListID *string `json:"portListId,omitempty"`
}
// IsScopeHost determines whether the firewall rule scope matches a host.
func (scope *FirewallRuleScope) IsScopeHost() bool {
return scope.IPAddress != nil && scope.IPAddress.PrefixSize == nil
}
// IsScopeNetwork determines whether the firewall rule scope matches a network.
func (scope *FirewallRuleScope) IsScopeNetwork() bool {
return scope.IPAddress != nil && scope.IPAddress.PrefixSize != nil
}
// IsScopePort determines whether the firewall rule scope matches a single port.
func (scope *FirewallRuleScope) IsScopePort() bool {
return scope.Port != nil && scope.Port.End == nil
}
// IsScopePortRange determines whether the firewall rule scope matches a port range.
func (scope *FirewallRuleScope) IsScopePortRange() bool {
return scope.Port != nil && scope.Port.End != nil
}
// IsScopeAddressList determines whether the firewall rule scope matches an IP address list.
func (scope *FirewallRuleScope) IsScopeAddressList() bool {
return scope.AddressList != nil
}
// IsScopeAny determines whether the firewall rule scope matches anything (i.e. the rule is unscoped).
func (scope *FirewallRuleScope) IsScopeAny() bool {
return scope.IPAddress == nil && scope.AddressList == nil && scope.Port == nil
}
// Diff captures the differences (if any) between a FirewallRuleScope and another FirewallRuleScope.
func (scope FirewallRuleScope) Diff(other FirewallRuleScope) (differences []string) {
if scope.IsScopeHost() {
if other.IsScopeHost() {
if scope.IPAddress.Address != other.IPAddress.Address {
differences = append(differences, fmt.Sprintf(
"target hosts do not match ('%s' vs '%s')",
scope.IPAddress.Address,
other.IPAddress.Address,
))
}
} else if other.IsScopeNetwork() {
differences = append(differences, "host scope vs network scope")
} else if other.IsScopeAddressList() {
differences = append(differences, "host scope vs address list scope")
} else {
differences = append(differences, "host scope vs unknown scope")
}
} else if scope.IsScopeNetwork() {
if other.IsScopeNetwork() {
scopeNetwork := fmt.Sprintf("%s/%d",
scope.IPAddress.Address,
*scope.IPAddress.PrefixSize,
)
otherNetwork := fmt.Sprintf("%s/%d",
other.IPAddress.Address,
*other.IPAddress.PrefixSize,
)
if scopeNetwork != otherNetwork {
differences = append(differences, fmt.Sprintf(
"target networks do not match ('%s' vs '%s')",
scopeNetwork,
otherNetwork,
))
}
} else if other.IsScopeHost() {
differences = append(differences, "network scope vs host scope")
} else if other.IsScopeAddressList() {
differences = append(differences, "network scope vs address list scope")
} else {
differences = append(differences, "network scope vs unknown scope")
}
} else if scope.IsScopeAddressList() {
if other.IsScopeAddressList() {
if scope.AddressList.ID != other.AddressList.ID {
differences = append(differences, fmt.Sprintf(
"address lists do not match ('%s' vs '%s')",
scope.AddressList.ID,
other.AddressList.ID,
))
}
} else if other.IsScopeHost() {
differences = append(differences, "address list scope vs host scope")
} else if other.IsScopeAddressList() {
differences = append(differences, "address list scope vs address list scope")
} else {
differences = append(differences, "address list scope vs unknown scope")
}
}
if scope.IsScopePort() {
if other.IsScopePort() {
if scope.Port.Begin != other.Port.Begin {
differences = append(differences, fmt.Sprintf(
"ports do not match (%d vs %d)",
scope.Port.Begin,
scope.Port.End,
))
}
} else if other.IsScopePortRange() {
differences = append(differences, "port scope vs port-range scope")
} else {
differences = append(differences, "port scope vs no scope")
}
} else if scope.IsScopePortRange() {
if other.IsScopePortRange() {
scopeRange := fmt.Sprintf("%d-%d",
scope.Port.Begin,
*scope.Port.End,
)
otherRange := fmt.Sprintf("%d-%d",
other.Port.Begin,
*other.Port.End,
)
differences = append(differences, fmt.Sprintf(
"port ranges do not match ('%s' vs '%s')",
scopeRange,
otherRange,
))
} else if other.IsScopePort() {
differences = append(differences, "port-range scope vs port scope")
} else {
differences = append(differences, "port-range scope vs no scope")
}
}
return
}
// FirewallRuleIPAddress represents represents an IP address for firewall configuration.
type FirewallRuleIPAddress struct {
Address string `json:"address"`
PrefixSize *int `json:"prefixSize,omitempty"`
}
// FirewallRulePort represents a firewall port configuration.
type FirewallRulePort struct {
Begin int `json:"begin"`
End *int `json:"end"`
}
// FirewallRules represents a page of FirewallRule results.
type FirewallRules struct {
Rules []FirewallRule `json:"firewallRule"`
PagedResult
}
// FirewallRuleConfiguration represents the configuration for a new firewall rule.
type FirewallRuleConfiguration struct {
Name string `json:"name"`
Action string `json:"action"`
Enabled bool `json:"enabled"`
Placement FirewallRulePlacement `json:"placement"`
IPVersion string `json:"ipVersion"`
Protocol string `json:"protocol"`
Source FirewallRuleScope `json:"source"`
Destination FirewallRuleScope `json:"destination"`
NetworkDomainID string `json:"networkDomainId"`
}
// Enable enables the firewall rule.
func (configuration *FirewallRuleConfiguration) Enable() *FirewallRuleConfiguration {
configuration.Enabled = true
return configuration
}
// Disable disables the firewall rule.
func (configuration *FirewallRuleConfiguration) Disable() *FirewallRuleConfiguration {
configuration.Enabled = false
return configuration
}
// Accept sets the firewall rule action to FirewallRuleActionAccept.
func (configuration *FirewallRuleConfiguration) Accept() *FirewallRuleConfiguration {
configuration.Action = FirewallRuleActionAccept
return configuration
}
// Drop sets the firewall rule action to FirewallRuleActionDrop.
func (configuration *FirewallRuleConfiguration) Drop() *FirewallRuleConfiguration {
configuration.Action = FirewallRuleActionDrop
return configuration
}
// IPv4 sets the firewall rule's target IP version to IPv4.
func (configuration *FirewallRuleConfiguration) IPv4() *FirewallRuleConfiguration {
configuration.IPVersion = FirewallRuleIPVersion4
return configuration
}
// IPv6 sets the firewall rule's target IP version to IPv6.
func (configuration *FirewallRuleConfiguration) IPv6() *FirewallRuleConfiguration {
configuration.IPVersion = FirewallRuleIPVersion4
return configuration
}
// IP sets the firewall rule's target protocol to IP.
func (configuration *FirewallRuleConfiguration) IP() *FirewallRuleConfiguration {
configuration.Protocol = FirewallRuleProtocolIP
return configuration
}
// TCP sets the firewall rule's target protocol to TCP.
func (configuration *FirewallRuleConfiguration) TCP() *FirewallRuleConfiguration {
configuration.Protocol = FirewallRuleProtocolTCP
return configuration
}
// ICMP sets the firewall rule's target protocol to ICMP.
func (configuration *FirewallRuleConfiguration) ICMP() *FirewallRuleConfiguration {
configuration.Protocol = FirewallRuleProtocolICMP
return configuration
}
// PlaceFirst modifies the configuration so that the firewall rule will be placed in the first available position.
func (configuration *FirewallRuleConfiguration) PlaceFirst() *FirewallRuleConfiguration {
configuration.Placement = FirewallRulePlacement{
Position: "FIRST",
}
return configuration
}
// PlaceBefore modifies the configuration so that the firewall rule will be placed before the specified rule.
func (configuration *FirewallRuleConfiguration) PlaceBefore(beforeRuleName string) *FirewallRuleConfiguration {
configuration.Placement = FirewallRulePlacement{
Position: "BEFORE",
RelativeToRuleName: &beforeRuleName,
}
return configuration
}
// PlaceAfter modifies the configuration so that the firewall rule will be placed after the specified rule.
func (configuration *FirewallRuleConfiguration) PlaceAfter(afterRuleName string) *FirewallRuleConfiguration {
configuration.Placement = FirewallRulePlacement{
Position: "AFTER",
RelativeToRuleName: &afterRuleName,
}
return configuration
}
// MatchAnySourceAddress modifies the configuration so that the firewall rule will match source IP address.
func (configuration *FirewallRuleConfiguration) MatchAnySourceAddress() *FirewallRuleConfiguration {
return configuration.MatchSourceAddress(FirewallRuleMatchAny)
}
// MatchSourceAddress modifies the configuration so that the firewall rule will match a specific source IP address.
func (configuration *FirewallRuleConfiguration) MatchSourceAddress(address string) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.IPAddress = &FirewallRuleIPAddress{
Address: strings.ToUpper(address),
}
sourceScope.AddressList = nil
return configuration
}
// MatchSourceNetwork modifies the configuration so that the firewall rule will match any source IP address on the specified network.
func (configuration *FirewallRuleConfiguration) MatchSourceNetwork(baseAddress string, prefixSize int) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.IPAddress = &FirewallRuleIPAddress{
Address: baseAddress,
PrefixSize: &prefixSize,
}
sourceScope.AddressList = nil
return configuration
}
// MatchSourceAddressList modifies the configuration so that the firewall rule will match a specific source IP address list.
func (configuration *FirewallRuleConfiguration) MatchSourceAddressList(addressListID string) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.IPAddress = nil
sourceScope.AddressList = &EntityReference{
ID: addressListID,
}
return configuration
}
// MatchAnySourcePort modifies the configuration so that the firewall rule will match any source port.
func (configuration *FirewallRuleConfiguration) MatchAnySourcePort() *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.Port = nil
sourceScope.PortListID = nil
return configuration
}
// MatchSourcePort modifies the configuration so that the firewall rule will match a specific source port.
func (configuration *FirewallRuleConfiguration) MatchSourcePort(port int) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.Port = &FirewallRulePort{
Begin: port,
}
sourceScope.PortListID = nil
return configuration
}
// MatchSourcePortRange modifies the configuration so that the firewall rule will match any source port in the specified range.
func (configuration *FirewallRuleConfiguration) MatchSourcePortRange(beginPort int, endPort int) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.Port = &FirewallRulePort{
Begin: beginPort,
End: &endPort,
}
sourceScope.PortListID = nil
return configuration
}
// MatchSourcePortList modifies the configuration so that the firewall rule will match any source port appearing on the specified port list (or its children).
func (configuration *FirewallRuleConfiguration) MatchSourcePortList(portListID string) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.Port = nil
sourceScope.PortListID = &portListID
return configuration
}
// MatchAnyDestinationAddress modifies the configuration so that the firewall rule will match any destination IP address.
func (configuration *FirewallRuleConfiguration) MatchAnyDestinationAddress() *FirewallRuleConfiguration {
return configuration.MatchDestinationAddress(FirewallRuleMatchAny)
}
// MatchDestinationAddress modifies the configuration so that the firewall rule will match a specific destination IP address.
func (configuration *FirewallRuleConfiguration) MatchDestinationAddress(address string) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.IPAddress = &FirewallRuleIPAddress{
Address: strings.ToUpper(address),
}
destinationScope.AddressList = nil
return configuration
}
// MatchDestinationNetwork modifies the configuration so that the firewall rule will match any destination IP address on the specified network.
func (configuration *FirewallRuleConfiguration) MatchDestinationNetwork(baseAddress string, prefixSize int) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.IPAddress = &FirewallRuleIPAddress{
Address: baseAddress,
PrefixSize: &prefixSize,
}
destinationScope.AddressList = nil
return configuration
}
// MatchDestinationAddressList modifies the configuration so that the firewall rule will match a specific destination IP address list (and, optionally, port).
func (configuration *FirewallRuleConfiguration) MatchDestinationAddressList(addressListID string) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.IPAddress = nil
destinationScope.AddressList = &EntityReference{
ID: addressListID,
}
return configuration
}
// MatchAnyDestinationPort modifies the configuration so that the firewall rule will match any destination port.
func (configuration *FirewallRuleConfiguration) MatchAnyDestinationPort() *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.Port = nil
destinationScope.PortListID = nil
return configuration
}
// MatchDestinationPort modifies the configuration so that the firewall rule will match a specific destination port.
func (configuration *FirewallRuleConfiguration) MatchDestinationPort(port int) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.Port = &FirewallRulePort{
Begin: port,
}
destinationScope.PortListID = nil
return configuration
}
// MatchDestinationPortRange modifies the configuration so that the firewall rule will match any destination port in the specified range.
func (configuration *FirewallRuleConfiguration) MatchDestinationPortRange(beginPort int, endPort int) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.Port = &FirewallRulePort{
Begin: beginPort,
End: &endPort,
}
destinationScope.PortListID = nil
return configuration
}
// MatchDestinationPortList modifies the configuration so that the firewall rule will match any destination port appearing on the specified port list (or its children).
func (configuration *FirewallRuleConfiguration) MatchDestinationPortList(portListID string) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.Port = nil
destinationScope.PortListID = &portListID
return configuration
}
// ToFirewallRule converts the FirewallRuleConfiguration to a FirewallRule (for use in test scenarios).
func (configuration *FirewallRuleConfiguration) ToFirewallRule() FirewallRule {
return FirewallRule{
Name: configuration.Name,
IPVersion: configuration.IPVersion,
Protocol: configuration.Protocol,
Source: configuration.Source,
Destination: configuration.Destination,
Action: configuration.Action,
Enabled: configuration.Enabled,
NetworkDomainID: configuration.NetworkDomainID,
}
}
// FirewallRulePlacement describes the placement for a firewall rule.
type FirewallRulePlacement struct {
Position string `json:"position"`
RelativeToRuleName *string `json:"relativeToRule,omitempty"`
}
type editFirewallRule struct {
ID string `json:"id"`
Enabled bool `json:"enabled"`
}
type deleteFirewallRule struct {
ID string `json:"id"`
}
// GetFirewallRule retrieves the Firewall rule with the specified Id.
// Returns nil if no Firewall rule is found with the specified Id.
func (client *Client) GetFirewallRule(id string) (rule *FirewallRule, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return nil, err
}
requestURI := fmt.Sprintf("%s/network/firewallRule/%s", organizationID, id)
request, err := client.newRequestV22(requestURI, http.MethodGet, nil)
if err != nil {
return nil, err
}
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return nil, err
}
if statusCode != http.StatusOK {
var apiResponse *APIResponseV2
apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return nil, err
}
if apiResponse.ResponseCode == ResponseCodeResourceNotFound {
return nil, nil // Not an error, but was not found.
}
return nil, apiResponse.ToError("Request to retrieve firewall rule failed with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
rule = &FirewallRule{}
err = json.Unmarshal(responseBody, rule)
if err != nil {
return nil, err
}
return rule, nil
}
// ListFirewallRules lists all firewall rules that apply to the specified network domain.
func (client *Client) ListFirewallRules(networkDomainID string, paging *Paging) (rules *FirewallRules, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return nil, err
}
requestURI := fmt.Sprintf("%s/network/firewallRule?networkDomainId=%s&%s",
organizationID,
networkDomainID,
paging.EnsurePaging().toQueryParameters(),
)
request, err := client.newRequestV22(requestURI, http.MethodGet, nil)
if err != nil {
return nil, err
}
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return nil, err
}
if statusCode != http.StatusOK {
var apiResponse *APIResponseV2
apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return nil, err
}
return nil, apiResponse.ToError("Request to list firewall rules for network domain '%s' failed with status code %d (%s): %s", networkDomainID, statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
rules = &FirewallRules{}
err = json.Unmarshal(responseBody, rules)
return rules, err
}
// CreateFirewallRule creates a new firewall rule.
func (client *Client) CreateFirewallRule(configuration FirewallRuleConfiguration) (firewallRuleID string, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return "", err
}
requestURI := fmt.Sprintf("%s/network/createFirewallRule", organizationID)
request, err := client.newRequestV22(requestURI, http.MethodPost, &configuration)
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return "", err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return "", err
}
if apiResponse.ResponseCode != ResponseCodeOK {
return "", apiResponse.ToError("Request to create firewall rule in network domain '%s' failed with unexpected status code %d (%s): %s", configuration.NetworkDomainID, statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
// Expected: "info" { "name": "firewallRuleId", "value": "the-Id-of-the-new-firewall-rule" }
firewallRuleIDMessage := apiResponse.GetFieldMessage("firewallRuleId")
if firewallRuleIDMessage == nil {
return "", apiResponse.ToError("Received an unexpected response (missing 'firewallRuleId') with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return *firewallRuleIDMessage, nil
}
// EditFirewallRule updates the configuration for a firewall rule (enable / disable).
// This operation is synchronous.
func (client *Client) EditFirewallRule(id string, enabled bool) error {
organizationID, err := client.getOrganizationID()
if err != nil {
return err
}
requestURI := fmt.Sprintf("%s/network/editFirewallRule", organizationID)
request, err := client.newRequestV22(requestURI, http.MethodPost, &editFirewallRule{
ID: id,
Enabled: enabled,
})
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return err
}
if apiResponse.ResponseCode != ResponseCodeOK {
return apiResponse.ToError("Request to edit firewall rule failed with unexpected status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return nil
}
// DeleteFirewallRule deletes the specified FirewallRule rule.
func (client *Client) DeleteFirewallRule(id string) error {
organizationID, err := client.getOrganizationID()
if err != nil {
return err
}
requestURI := fmt.Sprintf("%s/network/deleteFirewallRule", organizationID)
request, err := client.newRequestV22(requestURI, http.MethodPost,
&deleteFirewallRule{id},
)
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return err
}
if apiResponse.ResponseCode != ResponseCodeOK {
return apiResponse.ToError("Request to delete firewall rule '%s' failed with unexpected status code %d (%s): %s", id, statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return nil
}
Handle inconsistent naming of IP address list Id in CloudControl API.
package compute
import (
"encoding/json"
"fmt"
"net/http"
"strings"
)
const (
// FirewallRuleActionAccept indicates a firewall rule that, if it matches, will accept the packet and stop processing further rules.
FirewallRuleActionAccept = "ACCEPT_DECISIVELY"
// FirewallRuleActionDrop indicates a firewale rule that, if it matches, will drop the packet.
FirewallRuleActionDrop = "DROP"
// FirewallRuleIPVersion4 indicates a firewall rule that targets IPv4
FirewallRuleIPVersion4 = "IPv4"
// FirewallRuleIPVersion6 indicates a firewale rule that targets IPv6
FirewallRuleIPVersion6 = "IPv6"
// FirewallRuleProtocolIP indicates a firewall rule that targets the Internet Protocol (IP)
FirewallRuleProtocolIP = "IP"
// FirewallRuleProtocolTCP indicates a firewall rule that targets the Transmission Control Protocol (TCP)
FirewallRuleProtocolTCP = "TCP"
// FirewallRuleProtocolICMP indicates a firewall rule that targets the Internet Control Message Protocol (ICMP)
FirewallRuleProtocolICMP = "ICMP"
// FirewallRuleMatchAny indicates a firewall rule value that matches any other value in the same scope.
FirewallRuleMatchAny = "ANY"
)
// FirewallRule represents a firewall rule.
type FirewallRule struct {
ID string `json:"id"`
Name string `json:"name"`
Action string `json:"action"`
IPVersion string `json:"ipVersion"`
Protocol string `json:"protocol"`
Source FirewallRuleScope `json:"source"`
Destination FirewallRuleScope `json:"destination"`
Enabled bool `json:"enabled"`
State string `json:"state"`
NetworkDomainID string `json:"networkDomainId"`
DataCenterID string `json:"datacenterId"`
RuleType string `json:"ruleType"`
}
// GetID returns the firewall rule's Id.
func (rule *FirewallRule) GetID() string {
return rule.ID
}
// GetResourceType returns the firewall rule's resource type.
func (rule *FirewallRule) GetResourceType() ResourceType {
return ResourceTypeFirewallRule
}
// GetName returns the firewall rule's name.
func (rule *FirewallRule) GetName() string {
return rule.Name
}
// GetState returns the firewall rule's current state.
func (rule *FirewallRule) GetState() string {
return rule.State
}
// IsDeleted determines whether the firewall rule has been deleted (is nil).
func (rule *FirewallRule) IsDeleted() bool {
return rule == nil
}
var _ Resource = &FirewallRule{}
// FirewallRuleScope represents a scope (IP and / or port) for firewall configuration (source or destination).
type FirewallRuleScope struct {
IPAddress *FirewallRuleIPAddress `json:"ip,omitempty"`
AddressList *EntityReference `json:"ipAddressList,omitempty"`
AddressListID *string `json:"ipAddressListId,omitempty"`
Port *FirewallRulePort `json:"port,omitempty"`
PortListID *string `json:"portListId,omitempty"`
}
// IsScopeHost determines whether the firewall rule scope matches a host.
func (scope *FirewallRuleScope) IsScopeHost() bool {
return scope.IPAddress != nil && scope.IPAddress.PrefixSize == nil
}
// IsScopeNetwork determines whether the firewall rule scope matches a network.
func (scope *FirewallRuleScope) IsScopeNetwork() bool {
return scope.IPAddress != nil && scope.IPAddress.PrefixSize != nil
}
// IsScopePort determines whether the firewall rule scope matches a single port.
func (scope *FirewallRuleScope) IsScopePort() bool {
return scope.Port != nil && scope.Port.End == nil
}
// IsScopePortRange determines whether the firewall rule scope matches a port range.
func (scope *FirewallRuleScope) IsScopePortRange() bool {
return scope.Port != nil && scope.Port.End != nil
}
// IsScopeAddressList determines whether the firewall rule scope matches an IP address list.
func (scope *FirewallRuleScope) IsScopeAddressList() bool {
return scope.AddressList != nil || scope.AddressListID != nil
}
// IsScopeAny determines whether the firewall rule scope matches anything (i.e. the rule is unscoped).
func (scope *FirewallRuleScope) IsScopeAny() bool {
return scope.IPAddress == nil && scope.AddressList == nil && scope.Port == nil
}
// Diff captures the differences (if any) between a FirewallRuleScope and another FirewallRuleScope.
func (scope FirewallRuleScope) Diff(other FirewallRuleScope) (differences []string) {
if scope.IsScopeHost() {
if other.IsScopeHost() {
if scope.IPAddress.Address != other.IPAddress.Address {
differences = append(differences, fmt.Sprintf(
"target hosts do not match ('%s' vs '%s')",
scope.IPAddress.Address,
other.IPAddress.Address,
))
}
} else if other.IsScopeNetwork() {
differences = append(differences, "host scope vs network scope")
} else if other.IsScopeAddressList() {
differences = append(differences, "host scope vs address list scope")
} else {
differences = append(differences, "host scope vs unknown scope")
}
} else if scope.IsScopeNetwork() {
if other.IsScopeNetwork() {
scopeNetwork := fmt.Sprintf("%s/%d",
scope.IPAddress.Address,
*scope.IPAddress.PrefixSize,
)
otherNetwork := fmt.Sprintf("%s/%d",
other.IPAddress.Address,
*other.IPAddress.PrefixSize,
)
if scopeNetwork != otherNetwork {
differences = append(differences, fmt.Sprintf(
"target networks do not match ('%s' vs '%s')",
scopeNetwork,
otherNetwork,
))
}
} else if other.IsScopeHost() {
differences = append(differences, "network scope vs host scope")
} else if other.IsScopeAddressList() {
differences = append(differences, "network scope vs address list scope")
} else {
differences = append(differences, "network scope vs unknown scope")
}
} else if scope.IsScopeAddressList() {
if other.IsScopeAddressList() {
addressListID := scope.AddressListID
if addressListID == nil {
addressListID = &scope.AddressList.ID
}
otherAddressListID := other.AddressListID
if otherAddressListID == nil {
otherAddressListID = &other.AddressList.ID
}
if addressListID != otherAddressListID {
differences = append(differences, fmt.Sprintf(
"address lists do not match ('%s' vs '%s')",
scope.AddressList.ID,
other.AddressList.ID,
))
}
} else if other.IsScopeHost() {
differences = append(differences, "address list scope vs host scope")
} else if other.IsScopeNetwork() {
differences = append(differences, "address list scope vs network scope")
} else {
differences = append(differences, "address list scope vs unknown scope")
}
}
if scope.IsScopePort() {
if other.IsScopePort() {
if scope.Port.Begin != other.Port.Begin {
differences = append(differences, fmt.Sprintf(
"ports do not match (%d vs %d)",
scope.Port.Begin,
scope.Port.End,
))
}
} else if other.IsScopePortRange() {
differences = append(differences, "port scope vs port-range scope")
} else {
differences = append(differences, "port scope vs no scope")
}
} else if scope.IsScopePortRange() {
if other.IsScopePortRange() {
scopeRange := fmt.Sprintf("%d-%d",
scope.Port.Begin,
*scope.Port.End,
)
otherRange := fmt.Sprintf("%d-%d",
other.Port.Begin,
*other.Port.End,
)
differences = append(differences, fmt.Sprintf(
"port ranges do not match ('%s' vs '%s')",
scopeRange,
otherRange,
))
} else if other.IsScopePort() {
differences = append(differences, "port-range scope vs port scope")
} else {
differences = append(differences, "port-range scope vs no scope")
}
}
return
}
// FirewallRuleIPAddress represents represents an IP address for firewall configuration.
type FirewallRuleIPAddress struct {
Address string `json:"address"`
PrefixSize *int `json:"prefixSize,omitempty"`
}
// FirewallRulePort represents a firewall port configuration.
type FirewallRulePort struct {
Begin int `json:"begin"`
End *int `json:"end"`
}
// FirewallRules represents a page of FirewallRule results.
type FirewallRules struct {
Rules []FirewallRule `json:"firewallRule"`
PagedResult
}
// FirewallRuleConfiguration represents the configuration for a new firewall rule.
type FirewallRuleConfiguration struct {
Name string `json:"name"`
Action string `json:"action"`
Enabled bool `json:"enabled"`
Placement FirewallRulePlacement `json:"placement"`
IPVersion string `json:"ipVersion"`
Protocol string `json:"protocol"`
Source FirewallRuleScope `json:"source"`
Destination FirewallRuleScope `json:"destination"`
NetworkDomainID string `json:"networkDomainId"`
}
// Enable enables the firewall rule.
func (configuration *FirewallRuleConfiguration) Enable() *FirewallRuleConfiguration {
configuration.Enabled = true
return configuration
}
// Disable disables the firewall rule.
func (configuration *FirewallRuleConfiguration) Disable() *FirewallRuleConfiguration {
configuration.Enabled = false
return configuration
}
// Accept sets the firewall rule action to FirewallRuleActionAccept.
func (configuration *FirewallRuleConfiguration) Accept() *FirewallRuleConfiguration {
configuration.Action = FirewallRuleActionAccept
return configuration
}
// Drop sets the firewall rule action to FirewallRuleActionDrop.
func (configuration *FirewallRuleConfiguration) Drop() *FirewallRuleConfiguration {
configuration.Action = FirewallRuleActionDrop
return configuration
}
// IPv4 sets the firewall rule's target IP version to IPv4.
func (configuration *FirewallRuleConfiguration) IPv4() *FirewallRuleConfiguration {
configuration.IPVersion = FirewallRuleIPVersion4
return configuration
}
// IPv6 sets the firewall rule's target IP version to IPv6.
func (configuration *FirewallRuleConfiguration) IPv6() *FirewallRuleConfiguration {
configuration.IPVersion = FirewallRuleIPVersion4
return configuration
}
// IP sets the firewall rule's target protocol to IP.
func (configuration *FirewallRuleConfiguration) IP() *FirewallRuleConfiguration {
configuration.Protocol = FirewallRuleProtocolIP
return configuration
}
// TCP sets the firewall rule's target protocol to TCP.
func (configuration *FirewallRuleConfiguration) TCP() *FirewallRuleConfiguration {
configuration.Protocol = FirewallRuleProtocolTCP
return configuration
}
// ICMP sets the firewall rule's target protocol to ICMP.
func (configuration *FirewallRuleConfiguration) ICMP() *FirewallRuleConfiguration {
configuration.Protocol = FirewallRuleProtocolICMP
return configuration
}
// PlaceFirst modifies the configuration so that the firewall rule will be placed in the first available position.
func (configuration *FirewallRuleConfiguration) PlaceFirst() *FirewallRuleConfiguration {
configuration.Placement = FirewallRulePlacement{
Position: "FIRST",
}
return configuration
}
// PlaceBefore modifies the configuration so that the firewall rule will be placed before the specified rule.
func (configuration *FirewallRuleConfiguration) PlaceBefore(beforeRuleName string) *FirewallRuleConfiguration {
configuration.Placement = FirewallRulePlacement{
Position: "BEFORE",
RelativeToRuleName: &beforeRuleName,
}
return configuration
}
// PlaceAfter modifies the configuration so that the firewall rule will be placed after the specified rule.
func (configuration *FirewallRuleConfiguration) PlaceAfter(afterRuleName string) *FirewallRuleConfiguration {
configuration.Placement = FirewallRulePlacement{
Position: "AFTER",
RelativeToRuleName: &afterRuleName,
}
return configuration
}
// MatchAnySourceAddress modifies the configuration so that the firewall rule will match source IP address.
func (configuration *FirewallRuleConfiguration) MatchAnySourceAddress() *FirewallRuleConfiguration {
return configuration.MatchSourceAddress(FirewallRuleMatchAny)
}
// MatchSourceAddress modifies the configuration so that the firewall rule will match a specific source IP address.
func (configuration *FirewallRuleConfiguration) MatchSourceAddress(address string) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.IPAddress = &FirewallRuleIPAddress{
Address: strings.ToUpper(address),
}
sourceScope.AddressList = nil
return configuration
}
// MatchSourceNetwork modifies the configuration so that the firewall rule will match any source IP address on the specified network.
func (configuration *FirewallRuleConfiguration) MatchSourceNetwork(baseAddress string, prefixSize int) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.IPAddress = &FirewallRuleIPAddress{
Address: baseAddress,
PrefixSize: &prefixSize,
}
sourceScope.AddressList = nil
return configuration
}
// MatchSourceAddressList modifies the configuration so that the firewall rule will match a specific source IP address list.
func (configuration *FirewallRuleConfiguration) MatchSourceAddressList(addressListID string) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.IPAddress = nil
sourceScope.AddressList = nil
sourceScope.AddressListID = &addressListID
return configuration
}
// MatchAnySourcePort modifies the configuration so that the firewall rule will match any source port.
func (configuration *FirewallRuleConfiguration) MatchAnySourcePort() *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.Port = nil
sourceScope.PortListID = nil
return configuration
}
// MatchSourcePort modifies the configuration so that the firewall rule will match a specific source port.
func (configuration *FirewallRuleConfiguration) MatchSourcePort(port int) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.Port = &FirewallRulePort{
Begin: port,
}
sourceScope.PortListID = nil
return configuration
}
// MatchSourcePortRange modifies the configuration so that the firewall rule will match any source port in the specified range.
func (configuration *FirewallRuleConfiguration) MatchSourcePortRange(beginPort int, endPort int) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.Port = &FirewallRulePort{
Begin: beginPort,
End: &endPort,
}
sourceScope.PortListID = nil
return configuration
}
// MatchSourcePortList modifies the configuration so that the firewall rule will match any source port appearing on the specified port list (or its children).
func (configuration *FirewallRuleConfiguration) MatchSourcePortList(portListID string) *FirewallRuleConfiguration {
sourceScope := &configuration.Source
sourceScope.Port = nil
sourceScope.PortListID = &portListID
return configuration
}
// MatchAnyDestinationAddress modifies the configuration so that the firewall rule will match any destination IP address.
func (configuration *FirewallRuleConfiguration) MatchAnyDestinationAddress() *FirewallRuleConfiguration {
return configuration.MatchDestinationAddress(FirewallRuleMatchAny)
}
// MatchDestinationAddress modifies the configuration so that the firewall rule will match a specific destination IP address.
func (configuration *FirewallRuleConfiguration) MatchDestinationAddress(address string) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.IPAddress = &FirewallRuleIPAddress{
Address: strings.ToUpper(address),
}
destinationScope.AddressList = nil
return configuration
}
// MatchDestinationNetwork modifies the configuration so that the firewall rule will match any destination IP address on the specified network.
func (configuration *FirewallRuleConfiguration) MatchDestinationNetwork(baseAddress string, prefixSize int) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.IPAddress = &FirewallRuleIPAddress{
Address: baseAddress,
PrefixSize: &prefixSize,
}
destinationScope.AddressList = nil
return configuration
}
// MatchDestinationAddressList modifies the configuration so that the firewall rule will match a specific destination IP address list (and, optionally, port).
func (configuration *FirewallRuleConfiguration) MatchDestinationAddressList(addressListID string) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.AddressList = nil
destinationScope.AddressListID = &addressListID
return configuration
}
// MatchAnyDestinationPort modifies the configuration so that the firewall rule will match any destination port.
func (configuration *FirewallRuleConfiguration) MatchAnyDestinationPort() *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.Port = nil
destinationScope.PortListID = nil
return configuration
}
// MatchDestinationPort modifies the configuration so that the firewall rule will match a specific destination port.
func (configuration *FirewallRuleConfiguration) MatchDestinationPort(port int) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.Port = &FirewallRulePort{
Begin: port,
}
destinationScope.PortListID = nil
return configuration
}
// MatchDestinationPortRange modifies the configuration so that the firewall rule will match any destination port in the specified range.
func (configuration *FirewallRuleConfiguration) MatchDestinationPortRange(beginPort int, endPort int) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.Port = &FirewallRulePort{
Begin: beginPort,
End: &endPort,
}
destinationScope.PortListID = nil
return configuration
}
// MatchDestinationPortList modifies the configuration so that the firewall rule will match any destination port appearing on the specified port list (or its children).
func (configuration *FirewallRuleConfiguration) MatchDestinationPortList(portListID string) *FirewallRuleConfiguration {
destinationScope := &configuration.Destination
destinationScope.Port = nil
destinationScope.PortListID = &portListID
return configuration
}
// ToFirewallRule converts the FirewallRuleConfiguration to a FirewallRule (for use in test scenarios).
func (configuration *FirewallRuleConfiguration) ToFirewallRule() FirewallRule {
return FirewallRule{
Name: configuration.Name,
IPVersion: configuration.IPVersion,
Protocol: configuration.Protocol,
Source: configuration.Source,
Destination: configuration.Destination,
Action: configuration.Action,
Enabled: configuration.Enabled,
NetworkDomainID: configuration.NetworkDomainID,
}
}
// FirewallRulePlacement describes the placement for a firewall rule.
type FirewallRulePlacement struct {
Position string `json:"position"`
RelativeToRuleName *string `json:"relativeToRule,omitempty"`
}
type editFirewallRule struct {
ID string `json:"id"`
Enabled bool `json:"enabled"`
}
type deleteFirewallRule struct {
ID string `json:"id"`
}
// GetFirewallRule retrieves the Firewall rule with the specified Id.
// Returns nil if no Firewall rule is found with the specified Id.
func (client *Client) GetFirewallRule(id string) (rule *FirewallRule, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return nil, err
}
requestURI := fmt.Sprintf("%s/network/firewallRule/%s", organizationID, id)
request, err := client.newRequestV22(requestURI, http.MethodGet, nil)
if err != nil {
return nil, err
}
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return nil, err
}
if statusCode != http.StatusOK {
var apiResponse *APIResponseV2
apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return nil, err
}
if apiResponse.ResponseCode == ResponseCodeResourceNotFound {
return nil, nil // Not an error, but was not found.
}
return nil, apiResponse.ToError("Request to retrieve firewall rule failed with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
rule = &FirewallRule{}
err = json.Unmarshal(responseBody, rule)
if err != nil {
return nil, err
}
return rule, nil
}
// ListFirewallRules lists all firewall rules that apply to the specified network domain.
func (client *Client) ListFirewallRules(networkDomainID string, paging *Paging) (rules *FirewallRules, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return nil, err
}
requestURI := fmt.Sprintf("%s/network/firewallRule?networkDomainId=%s&%s",
organizationID,
networkDomainID,
paging.EnsurePaging().toQueryParameters(),
)
request, err := client.newRequestV22(requestURI, http.MethodGet, nil)
if err != nil {
return nil, err
}
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return nil, err
}
if statusCode != http.StatusOK {
var apiResponse *APIResponseV2
apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return nil, err
}
return nil, apiResponse.ToError("Request to list firewall rules for network domain '%s' failed with status code %d (%s): %s", networkDomainID, statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
rules = &FirewallRules{}
err = json.Unmarshal(responseBody, rules)
return rules, err
}
// CreateFirewallRule creates a new firewall rule.
func (client *Client) CreateFirewallRule(configuration FirewallRuleConfiguration) (firewallRuleID string, err error) {
organizationID, err := client.getOrganizationID()
if err != nil {
return "", err
}
requestURI := fmt.Sprintf("%s/network/createFirewallRule", organizationID)
request, err := client.newRequestV22(requestURI, http.MethodPost, &configuration)
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return "", err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return "", err
}
if apiResponse.ResponseCode != ResponseCodeOK {
return "", apiResponse.ToError("Request to create firewall rule in network domain '%s' failed with unexpected status code %d (%s): %s", configuration.NetworkDomainID, statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
// Expected: "info" { "name": "firewallRuleId", "value": "the-Id-of-the-new-firewall-rule" }
firewallRuleIDMessage := apiResponse.GetFieldMessage("firewallRuleId")
if firewallRuleIDMessage == nil {
return "", apiResponse.ToError("Received an unexpected response (missing 'firewallRuleId') with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return *firewallRuleIDMessage, nil
}
// EditFirewallRule updates the configuration for a firewall rule (enable / disable).
// This operation is synchronous.
func (client *Client) EditFirewallRule(id string, enabled bool) error {
organizationID, err := client.getOrganizationID()
if err != nil {
return err
}
requestURI := fmt.Sprintf("%s/network/editFirewallRule", organizationID)
request, err := client.newRequestV22(requestURI, http.MethodPost, &editFirewallRule{
ID: id,
Enabled: enabled,
})
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return err
}
if apiResponse.ResponseCode != ResponseCodeOK {
return apiResponse.ToError("Request to edit firewall rule failed with unexpected status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return nil
}
// DeleteFirewallRule deletes the specified FirewallRule rule.
func (client *Client) DeleteFirewallRule(id string) error {
organizationID, err := client.getOrganizationID()
if err != nil {
return err
}
requestURI := fmt.Sprintf("%s/network/deleteFirewallRule", organizationID)
request, err := client.newRequestV22(requestURI, http.MethodPost,
&deleteFirewallRule{id},
)
responseBody, statusCode, err := client.executeRequest(request)
if err != nil {
return err
}
apiResponse, err := readAPIResponseAsJSON(responseBody, statusCode)
if err != nil {
return err
}
if apiResponse.ResponseCode != ResponseCodeOK {
return apiResponse.ToError("Request to delete firewall rule '%s' failed with unexpected status code %d (%s): %s", id, statusCode, apiResponse.ResponseCode, apiResponse.Message)
}
return nil
}
|
package main
import (
"fmt"
"time"
"github.com/ninjasphere/go-ninja/channels"
"github.com/ninjasphere/go-zigbee/gateway"
)
type OnOffChannel struct {
Channel
channel *channels.OnOffChannel
}
// -------- On/Off Protocol --------
func (c *OnOffChannel) TurnOn() error {
return c.setState(gateway.GwOnOffStateT_ON_STATE.Enum())
}
func (c *OnOffChannel) TurnOff() error {
return c.setState(gateway.GwOnOffStateT_OFF_STATE.Enum())
}
func (c *OnOffChannel) ToggleOnOff() error {
return c.setState(gateway.GwOnOffStateT_TOGGLE_STATE.Enum())
}
func (c *OnOffChannel) SetOnOff(state bool) error {
if state {
return c.TurnOn()
}
return c.TurnOff()
}
func (c *OnOffChannel) init() error {
log.Debugf("Initialising on/off channel of device %d", *c.device.deviceInfo.IeeeAddress)
//clusterID := uint32(0x06)
/*attributeID := uint32(0)
minReportInterval := uint32(1)
maxReportInterval := uint32(120)
request := &gateway.GwSetAttributeReportingReq{
DstAddress: &gateway.GwAddressStructT{
AddressType: gateway.GwAddressTypeT_UNICAST.Enum(),
IeeeAddr: c.device.deviceInfo.IeeeAddress,
},
ClusterId: &clusterID,
AttributeReportList: []*gateway.GwAttributeReportT{{
AttributeId: &attributeID,
AttributeType: gateway.GwZclAttributeDataTypesT_ZCL_DATATYPE_BOOLEAN.Enum(),
MinReportInterval: &minReportInterval,
MaxReportInterval: &maxReportInterval,
}},
}
response := &gateway.GwSetAttributeReportingRspInd{}
err := c.device.driver.gatewayConn.SendAsyncCommand(request, response, 20*time.Second)
if err != nil {
log.Errorf("Error enabling on/off reporting: %s", err)
} else if response.Status.String() != "STATUS_SUCCESS" {
log.Errorf("Failed to enable on/off reporting. status: %s", response.Status.String())
}*/
c.channel = channels.NewOnOffChannel(c)
err = c.device.driver.Conn.ExportChannel(c.device, c.channel, c.ID)
if err != nil {
log.Fatalf("Failed to announce on/off channel: %s", err)
}
go func() {
for {
log.Debugf("Polling for on/off")
err := c.fetchState()
if err != nil {
log.Errorf("Failed to poll for on/off state %s", err)
}
time.Sleep(10 * time.Second)
}
}()
return nil
}
func (c *OnOffChannel) setState(state *gateway.GwOnOffStateT) error {
request := &gateway.DevSetOnOffStateReq{
DstAddress: &gateway.GwAddressStructT{
AddressType: gateway.GwAddressTypeT_UNICAST.Enum(),
IeeeAddr: c.device.deviceInfo.IeeeAddress,
},
State: state,
}
response := &gateway.GwZigbeeGenericRspInd{}
err := c.device.driver.gatewayConn.SendAsyncCommand(request, response, 2*time.Second)
if err != nil {
return fmt.Errorf("Error setting on/off state : %s", err)
}
if response.Status.String() != "STATUS_SUCCESS" {
return fmt.Errorf("Failed to set on/off state. status: %s", response.Status.String())
}
return c.fetchState()
}
func (c *OnOffChannel) fetchState() error {
request := &gateway.DevGetOnOffStateReq{
DstAddress: &gateway.GwAddressStructT{
AddressType: gateway.GwAddressTypeT_UNICAST.Enum(),
IeeeAddr: c.device.deviceInfo.IeeeAddress,
},
}
response := &gateway.DevGetOnOffStateRspInd{}
err := c.device.driver.gatewayConn.SendAsyncCommand(request, response, 10*time.Second)
if err != nil {
return fmt.Errorf("Error getting on/off state : %s", err)
}
if response.Status.String() != "STATUS_SUCCESS" {
return fmt.Errorf("Failed to get on/off state. status: %s", response.Status.String())
}
c.channel.SendState(*response.StateValue == gateway.GwOnOffStateValueT_ON)
return nil
}
Add assertions to catch case that causes this bugsnag.
https://bugsnag.com/ninja-blocks-1/driver-go-zigbee/errors/54ba7d920eb4aaa7d9721869?event_id=54caa94274d0327e3f00e543
Signed-off-by: Jon Seymour <44f878afe53efc66b76772bd845eb65944ed8232@ninjablocks.com>
package main
import (
"fmt"
"time"
"github.com/ninjasphere/go-ninja/channels"
"github.com/ninjasphere/go-zigbee/gateway"
)
type OnOffChannel struct {
Channel
channel *channels.OnOffChannel
}
// -------- On/Off Protocol --------
func (c *OnOffChannel) TurnOn() error {
return c.setState(gateway.GwOnOffStateT_ON_STATE.Enum())
}
func (c *OnOffChannel) TurnOff() error {
return c.setState(gateway.GwOnOffStateT_OFF_STATE.Enum())
}
func (c *OnOffChannel) ToggleOnOff() error {
return c.setState(gateway.GwOnOffStateT_TOGGLE_STATE.Enum())
}
func (c *OnOffChannel) SetOnOff(state bool) error {
if state {
return c.TurnOn()
}
return c.TurnOff()
}
func (c *OnOffChannel) init() error {
log.Debugf("Initialising on/off channel of device %d", *c.device.deviceInfo.IeeeAddress)
//clusterID := uint32(0x06)
/*attributeID := uint32(0)
minReportInterval := uint32(1)
maxReportInterval := uint32(120)
request := &gateway.GwSetAttributeReportingReq{
DstAddress: &gateway.GwAddressStructT{
AddressType: gateway.GwAddressTypeT_UNICAST.Enum(),
IeeeAddr: c.device.deviceInfo.IeeeAddress,
},
ClusterId: &clusterID,
AttributeReportList: []*gateway.GwAttributeReportT{{
AttributeId: &attributeID,
AttributeType: gateway.GwZclAttributeDataTypesT_ZCL_DATATYPE_BOOLEAN.Enum(),
MinReportInterval: &minReportInterval,
MaxReportInterval: &maxReportInterval,
}},
}
response := &gateway.GwSetAttributeReportingRspInd{}
err := c.device.driver.gatewayConn.SendAsyncCommand(request, response, 20*time.Second)
if err != nil {
log.Errorf("Error enabling on/off reporting: %s", err)
} else if response.Status.String() != "STATUS_SUCCESS" {
log.Errorf("Failed to enable on/off reporting. status: %s", response.Status.String())
}*/
c.channel = channels.NewOnOffChannel(c)
err = c.device.driver.Conn.ExportChannel(c.device, c.channel, c.ID)
if err != nil {
log.Fatalf("Failed to announce on/off channel: %s", err)
}
go func() {
for {
log.Debugf("Polling for on/off")
err := c.fetchState()
if err != nil {
log.Errorf("Failed to poll for on/off state %s", err)
}
time.Sleep(10 * time.Second)
}
}()
return nil
}
func (c *OnOffChannel) setState(state *gateway.GwOnOffStateT) error {
request := &gateway.DevSetOnOffStateReq{
DstAddress: &gateway.GwAddressStructT{
AddressType: gateway.GwAddressTypeT_UNICAST.Enum(),
IeeeAddr: c.device.deviceInfo.IeeeAddress,
},
State: state,
}
response := &gateway.GwZigbeeGenericRspInd{}
err := c.device.driver.gatewayConn.SendAsyncCommand(request, response, 2*time.Second)
if err != nil {
return fmt.Errorf("Error setting on/off state : %s", err)
}
if response.Status.String() != "STATUS_SUCCESS" {
return fmt.Errorf("Failed to set on/off state. status: %s", response.Status.String())
}
return c.fetchState()
}
func (c *OnOffChannel) fetchState() error {
request := &gateway.DevGetOnOffStateReq{
DstAddress: &gateway.GwAddressStructT{
AddressType: gateway.GwAddressTypeT_UNICAST.Enum(),
IeeeAddr: c.device.deviceInfo.IeeeAddress,
},
}
response := &gateway.DevGetOnOffStateRspInd{}
if c.device.driver == nil {
log.Fatalf("assertion failed: c.device.driver != nil")
}
if c.device.driver.gatewayConn == nil {
log.Fatalf("assertion failed: c.device.driver.gatewayConn != nil")
}
err := c.device.driver.gatewayConn.SendAsyncCommand(request, response, 10*time.Second)
if err != nil {
return fmt.Errorf("Error getting on/off state : %s", err)
}
if response.Status.String() != "STATUS_SUCCESS" {
return fmt.Errorf("Failed to get on/off state. status: %s", response.Status.String())
}
c.channel.SendState(*response.StateValue == gateway.GwOnOffStateValueT_ON)
return nil
}
|
// Package canvas is responsible to convert the ascii symbols to hand drawn diagrams.
// It implements the basic canvas drawing operations like moveTo, lineTo, fillText.
// The Draw method signature declared in the Drawer interface implements the method separately on Line and Text struct.
package canvas
import (
"math"
"math/rand"
"github.com/fogleman/gg"
)
// Canvas defines the canvas basic elements.
type Canvas struct {
*gg.Context
font string
lineWidth float64
}
// Drawer interface defines the Canvas drawing method.
// Struct needs to implement the Draw method.
type Drawer interface {
Draw(*Canvas)
}
// CellSize defines symbol's cell size.
const CellSize float64 = 20
// NewCanvas is a constructor method, which instantiates a new Canvas element.
func NewCanvas(ctx *gg.Context, font string, lineWidth float64) *Canvas {
if err := ctx.LoadFontFace(font, 20); err != nil {
panic(err)
}
ctx.SetLineWidth(lineWidth)
return &Canvas{ctx, font, lineWidth}
}
var _x0, _y0 float64
// moveTo move the pointer to (x0,y0) position
func (ctx *Canvas) moveTo(x0, y0 float64) {
_x0 = x0
_y0 = y0
}
// lineTo move the pointer to (x1,y1) position
func (ctx *Canvas) lineTo(x1, y1 float64) {
ctx.shakyLine(_x0, _y0, x1, y1)
ctx.moveTo(x1, y1)
}
// shakyLine draw a shaky line between (x0, y0) and (x1, y1).
func (ctx *Canvas) shakyLine(x0, y0, x1, y1 float64) {
var dx, dy float64
var k1, k2, l3, l4, x3, y3, x4, y4 float64
dx = x1 - x0
dy = y1 - y0
l := math.Sqrt(dx*dx + dy*dy)
// Pick two random points that are placed on different sides of the line that passes through.
K := math.Sqrt(l) / 1.5
k1 = rand.Float64()
k2 = rand.Float64()
l3 = rand.Float64() * K
l4 = rand.Float64() * K
// Pick a random point on the line between P0 and P1.
x3 = x0 + dx*k1 + dy/l*l3
y3 = y0 + dy*k1 - dx/l*l3
// Pick a random point on the line between P0 and P1 but in the opposite direction.
x4 = x0 + dx*k2 - dy/l*l4
y4 = y0 + dy*k2 + dx/l*l4
// Draw a bezier curve trough the four selected points.
ctx.MoveTo(x0, y0)
ctx.CubicTo(x3, y3, x4, y4, x1, y1)
}
// bulb draws a shaky bulb (used for line endings).
func (ctx *Canvas) bulb(x0, y0 float64) {
fuzziness := random()*2 - 1
for i := 0; i < 3; i++ {
ctx.DrawArc(x0+fuzziness, y0+fuzziness, 5, 0, math.Pi*2)
ctx.ClosePath()
ctx.Fill()
}
}
// arrowHead draws a shaky arrowhead at the (x1, y1) as an ending
// for the line from (x0, y0) to (x1, y1).
func (ctx *Canvas) arrowHead(x0, y0, x1, y1 float64) {
dx := x0 - x1
dy := y0 - y1
alpha := math.Atan(dy / dx)
if dy == 0 {
if dx < 0 {
alpha = -math.Pi
} else {
alpha = 0
}
}
alpha3 := alpha + 0.5
alpha4 := alpha - 0.5
l3 := float64(20.0)
x3 := x1 + l3*math.Cos(alpha3)
y3 := y1 + l3*math.Sin(alpha3)
ctx.moveTo(x3, y3)
ctx.lineTo(x1, y1)
ctx.Stroke()
l4 := float64(20.0)
x4 := x1 + l4*math.Cos(alpha4)
y4 := y1 + l4*math.Sin(alpha4)
ctx.moveTo(x4, y4)
ctx.lineTo(x1, y1)
ctx.Stroke()
}
// fillText fill out the text.
func (ctx *Canvas) fillText(text string, x0, y0 float64) {
ctx.DrawString(text, x0, y0)
}
// Draw draws the text annotation at (x0, y0) with the given color.
func (text *Text) Draw(ctx *Canvas) {
ctx.SetHexColor(text.color)
ctx.fillText(text.text, X(float64(text.x0)), Y(float64(text.y0)+0.5))
}
// Draw draws a line from (x0, y0) to (x1, y1) with the given color.
func (line *Line) Draw(ctx *Canvas) {
ctx.SetHexColor(line.color)
ctx.SetLineWidth(ctx.lineWidth)
ctx.moveTo(X(float64(line.x0)), Y(float64(line.y0)))
ctx.lineTo(X(float64(line.x1)), Y(float64(line.y1)))
ctx.Stroke()
// Draw given type of ending on the (x1, y1).
_ending := func(ctx *Canvas, typ string, x0, y0, x1, y1 float64) {
switch typ {
case "circle":
ctx.bulb(x1, y1)
return
case "arrow":
ctx.arrowHead(x0, y0, x1, y1)
return
}
}
_ending(ctx, line.start, X(float64(line.x1)), Y(float64(line.y1)), X(float64(line.x0)), Y(float64(line.y0)))
_ending(ctx, line.end, X(float64(line.x0)), Y(float64(line.y0)), X(float64(line.x1)), Y(float64(line.y1)))
}
// X returns the symbols x position.
func X(x float64) float64 {
return x*CellSize + (CellSize / 2)
}
// Y returns the symbol y position.
func Y(y float64) float64 {
return y*CellSize + (CellSize / 2)
}
fix typo
// Package canvas is responsible to convert the ascii symbols to hand drawn diagrams.
// It implements the basic canvas drawing operations like moveTo, lineTo, fillText.
// The Draw method signature declared in the Drawer interface implements the method separately on Line and Text struct.
package canvas
import (
"math"
"math/rand"
"github.com/fogleman/gg"
)
// Canvas defines the canvas basic elements.
type Canvas struct {
*gg.Context
font string
lineWidth float64
}
// Drawer interface defines the Canvas drawing method.
// Struct needs to implement the Draw method.
type Drawer interface {
Draw(*Canvas)
}
// CellSize defines symbol's cell size.
const CellSize float64 = 20
// NewCanvas is a constructor method, which instantiates a new Canvas element.
func NewCanvas(ctx *gg.Context, font string, lineWidth float64) *Canvas {
if err := ctx.LoadFontFace(font, 20); err != nil {
panic(err)
}
ctx.SetLineWidth(lineWidth)
return &Canvas{ctx, font, lineWidth}
}
var _x0, _y0 float64
// moveTo move the pointer to (x0,y0) position
func (ctx *Canvas) moveTo(x0, y0 float64) {
_x0 = x0
_y0 = y0
}
// lineTo move the pointer to (x1,y1) position
func (ctx *Canvas) lineTo(x1, y1 float64) {
ctx.shakyLine(_x0, _y0, x1, y1)
ctx.moveTo(x1, y1)
}
// shakyLine draw a shaky line between (x0, y0) and (x1, y1).
func (ctx *Canvas) shakyLine(x0, y0, x1, y1 float64) {
var dx, dy float64
var k1, k2, l3, l4, x3, y3, x4, y4 float64
dx = x1 - x0
dy = y1 - y0
l := math.Sqrt(dx*dx + dy*dy)
// Pick two random points that are placed on different sides of the line that passes through.
K := math.Sqrt(l) / 1.5
k1 = rand.Float64()
k2 = rand.Float64()
l3 = rand.Float64() * K
l4 = rand.Float64() * K
// Pick a random point on the line between P0 and P1.
x3 = x0 + dx*k1 + dy/l*l3
y3 = y0 + dy*k1 - dx/l*l3
// Pick a random point on the line between P0 and P1 but in the opposite direction.
x4 = x0 + dx*k2 - dy/l*l4
y4 = y0 + dy*k2 + dx/l*l4
// Draw a bezier curve through the four selected points.
ctx.MoveTo(x0, y0)
ctx.CubicTo(x3, y3, x4, y4, x1, y1)
}
// bulb draws a shaky bulb (used for line endings).
func (ctx *Canvas) bulb(x0, y0 float64) {
fuzziness := random()*2 - 1
for i := 0; i < 3; i++ {
ctx.DrawArc(x0+fuzziness, y0+fuzziness, 5, 0, math.Pi*2)
ctx.ClosePath()
ctx.Fill()
}
}
// arrowHead draws a shaky arrowhead at the (x1, y1) as an ending
// for the line from (x0, y0) to (x1, y1).
func (ctx *Canvas) arrowHead(x0, y0, x1, y1 float64) {
dx := x0 - x1
dy := y0 - y1
alpha := math.Atan(dy / dx)
if dy == 0 {
if dx < 0 {
alpha = -math.Pi
} else {
alpha = 0
}
}
alpha3 := alpha + 0.5
alpha4 := alpha - 0.5
l3 := float64(20.0)
x3 := x1 + l3*math.Cos(alpha3)
y3 := y1 + l3*math.Sin(alpha3)
ctx.moveTo(x3, y3)
ctx.lineTo(x1, y1)
ctx.Stroke()
l4 := float64(20.0)
x4 := x1 + l4*math.Cos(alpha4)
y4 := y1 + l4*math.Sin(alpha4)
ctx.moveTo(x4, y4)
ctx.lineTo(x1, y1)
ctx.Stroke()
}
// fillText fill out the text.
func (ctx *Canvas) fillText(text string, x0, y0 float64) {
ctx.DrawString(text, x0, y0)
}
// Draw draws the text annotation at (x0, y0) with the given color.
func (text *Text) Draw(ctx *Canvas) {
ctx.SetHexColor(text.color)
ctx.fillText(text.text, X(float64(text.x0)), Y(float64(text.y0)+0.5))
}
// Draw draws a line from (x0, y0) to (x1, y1) with the given color.
func (line *Line) Draw(ctx *Canvas) {
ctx.SetHexColor(line.color)
ctx.SetLineWidth(ctx.lineWidth)
ctx.moveTo(X(float64(line.x0)), Y(float64(line.y0)))
ctx.lineTo(X(float64(line.x1)), Y(float64(line.y1)))
ctx.Stroke()
// Draw given type of ending on the (x1, y1).
_ending := func(ctx *Canvas, typ string, x0, y0, x1, y1 float64) {
switch typ {
case "circle":
ctx.bulb(x1, y1)
return
case "arrow":
ctx.arrowHead(x0, y0, x1, y1)
return
}
}
_ending(ctx, line.start, X(float64(line.x1)), Y(float64(line.y1)), X(float64(line.x0)), Y(float64(line.y0)))
_ending(ctx, line.end, X(float64(line.x0)), Y(float64(line.y0)), X(float64(line.x1)), Y(float64(line.y1)))
}
// X returns the symbols x position.
func X(x float64) float64 {
return x*CellSize + (CellSize / 2)
}
// Y returns the symbol y position.
func Y(y float64) float64 {
return y*CellSize + (CellSize / 2)
}
|
package google
import (
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"google.golang.org/api/googleapi"
"google.golang.org/api/serviceusage/v1"
)
// These services can only be enabled as a side-effect of enabling other services,
// so don't bother storing them in the config or using them for diffing.
var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"}
var ignoredProjectServicesSet = golangSetFromStringSlice(ignoredProjectServices)
// Services that can't be user-specified but are otherwise valid. Renamed
// services should be added to this set during major releases.
var bannedProjectServices = []string{"bigquery-json.googleapis.com"}
// Service Renames
// we expect when a service is renamed:
// - both service names will continue to be able to be set
// - setting one will effectively enable the other as a dependent
// - GET will return whichever service name is requested
// - LIST responses will not contain the old service name
// renames may be reverted, though, so we should canonicalise both ways until
// the old service is fully removed from the provider
//
// We handle service renames in the provider by pretending that we've read both
// the old and new service names from the API if we see either, and only setting
// the one(s) that existed in prior state in config (if any). If neither exists,
// we'll set the old service name in state.
// Additionally, in case of service rename rollbacks or unexpected early
// removals of services, if we fail to create or delete a service that's been
// renamed we'll retry using an alternate name.
// We try creation by the user-specified value followed by the other value.
// We try deletion by the old value followed by the new value.
// map from old -> new names of services that have been renamed
// these should be removed during major provider versions. comment here with
// "DEPRECATED FOR {{version}} next to entries slated for removal in {{version}}
// upon removal, we should disallow the old name from being used even if it's
// not gone from the underlying API yet
var renamedServices = map[string]string{
"bigquery-json.googleapis.com": "bigquery.googleapis.com", // DEPRECATED FOR 4.0.0. Originally for 3.0.0, but the migration did not happen server-side yet.
}
// renamedServices in reverse (new -> old)
var renamedServicesByNewServiceNames = reverseStringMap(renamedServices)
// renamedServices expressed as both old -> new and new -> old
var renamedServicesByOldAndNewServiceNames = mergeStringMaps(renamedServices, renamedServicesByNewServiceNames)
const maxServiceUsageBatchSize = 20
func resourceGoogleProjectService() *schema.Resource {
return &schema.Resource{
Create: resourceGoogleProjectServiceCreate,
Read: resourceGoogleProjectServiceRead,
Delete: resourceGoogleProjectServiceDelete,
Update: resourceGoogleProjectServiceUpdate,
Importer: &schema.ResourceImporter{
State: resourceGoogleProjectServiceImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(20 * time.Minute),
Update: schema.DefaultTimeout(20 * time.Minute),
Read: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(20 * time.Minute),
},
Schema: map[string]*schema.Schema{
"service": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: StringNotInSlice(append(ignoredProjectServices, bannedProjectServices...), false),
},
"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"disable_dependent_services": {
Type: schema.TypeBool,
Optional: true,
},
"disable_on_destroy": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
},
}
}
func resourceGoogleProjectServiceImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
parts := strings.Split(d.Id(), "/")
if len(parts) != 2 {
return nil, fmt.Errorf("Invalid google_project_service id format for import, expecting `{project}/{service}`, found %s", d.Id())
}
d.Set("project", parts[0])
d.Set("service", parts[1])
return []*schema.ResourceData{d}, nil
}
func resourceGoogleProjectServiceCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
srv := d.Get("service").(string)
id, err := replaceVars(d, config, "{{project}}/{{service}}")
if err != nil {
return fmt.Errorf("unable to construct ID: %s", err)
}
// Check if the service has already been enabled
servicesRaw, err := BatchRequestReadServices(project, d, config)
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id()))
}
servicesList := servicesRaw.(map[string]struct{})
if _, ok := servicesList[srv]; ok {
log.Printf("[DEBUG] service %s was already found to be enabled in project %s", srv, project)
d.SetId(id)
d.Set("project", project)
d.Set("service", srv)
return nil
}
err = BatchRequestEnableService(srv, project, d, config)
if err != nil {
return err
}
d.SetId(id)
return resourceGoogleProjectServiceRead(d, meta)
}
func resourceGoogleProjectServiceRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
// Verify project for services still exists
p, err := config.clientResourceManager.Projects.Get(project).Do()
if err != nil {
return err
}
if p.LifecycleState == "DELETE_REQUESTED" {
// Construct a 404 error for handleNotFoundError
return &googleapi.Error{
Code: 404,
Message: "Project deletion was requested",
}
}
servicesRaw, err := BatchRequestReadServices(project, d, config)
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id()))
}
servicesList := servicesRaw.(map[string]struct{})
srv := d.Get("service").(string)
if _, ok := servicesList[srv]; ok {
d.Set("project", project)
d.Set("service", srv)
return nil
}
// The service is was not found in enabled services - remove it from state
log.Printf("[DEBUG] service %s not in enabled services for project %s, removing from state", srv, project)
d.SetId("")
return nil
}
func resourceGoogleProjectServiceDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
if disable := d.Get("disable_on_destroy"); !(disable.(bool)) {
log.Printf("[WARN] Project service %q disable_on_destroy is false, skip disabling service", d.Id())
d.SetId("")
return nil
}
project, err := getProject(d, config)
if err != nil {
return err
}
service := d.Get("service").(string)
disableDependencies := d.Get("disable_dependent_services").(bool)
if err = disableServiceUsageProjectService(service, project, d, config, disableDependencies); err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id()))
}
d.SetId("")
return nil
}
func resourceGoogleProjectServiceUpdate(d *schema.ResourceData, meta interface{}) error {
// This update method is no-op because the only updatable fields
// are state/config-only, i.e. they aren't sent in requests to the API.
return nil
}
// Disables a project service.
func disableServiceUsageProjectService(service, project string, d *schema.ResourceData, config *Config, disableDependentServices bool) error {
err := retryTimeDuration(func() error {
name := fmt.Sprintf("projects/%s/services/%s", project, service)
sop, err := config.clientServiceUsage.Services.Disable(name, &serviceusage.DisableServiceRequest{
DisableDependentServices: disableDependentServices,
}).Do()
if err != nil {
return err
}
// Wait for the operation to complete
waitErr := serviceUsageOperationWait(config, sop, project, "api to disable")
if waitErr != nil {
return waitErr
}
return nil
}, d.Timeout(schema.TimeoutDelete))
if err != nil {
return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, err)
}
return nil
}
treat project 404s correctly in project service read (#3354)
package google
import (
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"google.golang.org/api/googleapi"
"google.golang.org/api/serviceusage/v1"
)
// These services can only be enabled as a side-effect of enabling other services,
// so don't bother storing them in the config or using them for diffing.
var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"}
var ignoredProjectServicesSet = golangSetFromStringSlice(ignoredProjectServices)
// Services that can't be user-specified but are otherwise valid. Renamed
// services should be added to this set during major releases.
var bannedProjectServices = []string{"bigquery-json.googleapis.com"}
// Service Renames
// we expect when a service is renamed:
// - both service names will continue to be able to be set
// - setting one will effectively enable the other as a dependent
// - GET will return whichever service name is requested
// - LIST responses will not contain the old service name
// renames may be reverted, though, so we should canonicalise both ways until
// the old service is fully removed from the provider
//
// We handle service renames in the provider by pretending that we've read both
// the old and new service names from the API if we see either, and only setting
// the one(s) that existed in prior state in config (if any). If neither exists,
// we'll set the old service name in state.
// Additionally, in case of service rename rollbacks or unexpected early
// removals of services, if we fail to create or delete a service that's been
// renamed we'll retry using an alternate name.
// We try creation by the user-specified value followed by the other value.
// We try deletion by the old value followed by the new value.
// map from old -> new names of services that have been renamed
// these should be removed during major provider versions. comment here with
// "DEPRECATED FOR {{version}} next to entries slated for removal in {{version}}
// upon removal, we should disallow the old name from being used even if it's
// not gone from the underlying API yet
var renamedServices = map[string]string{
"bigquery-json.googleapis.com": "bigquery.googleapis.com", // DEPRECATED FOR 4.0.0. Originally for 3.0.0, but the migration did not happen server-side yet.
}
// renamedServices in reverse (new -> old)
var renamedServicesByNewServiceNames = reverseStringMap(renamedServices)
// renamedServices expressed as both old -> new and new -> old
var renamedServicesByOldAndNewServiceNames = mergeStringMaps(renamedServices, renamedServicesByNewServiceNames)
const maxServiceUsageBatchSize = 20
func resourceGoogleProjectService() *schema.Resource {
return &schema.Resource{
Create: resourceGoogleProjectServiceCreate,
Read: resourceGoogleProjectServiceRead,
Delete: resourceGoogleProjectServiceDelete,
Update: resourceGoogleProjectServiceUpdate,
Importer: &schema.ResourceImporter{
State: resourceGoogleProjectServiceImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(20 * time.Minute),
Update: schema.DefaultTimeout(20 * time.Minute),
Read: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(20 * time.Minute),
},
Schema: map[string]*schema.Schema{
"service": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: StringNotInSlice(append(ignoredProjectServices, bannedProjectServices...), false),
},
"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"disable_dependent_services": {
Type: schema.TypeBool,
Optional: true,
},
"disable_on_destroy": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
},
}
}
func resourceGoogleProjectServiceImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
parts := strings.Split(d.Id(), "/")
if len(parts) != 2 {
return nil, fmt.Errorf("Invalid google_project_service id format for import, expecting `{project}/{service}`, found %s", d.Id())
}
d.Set("project", parts[0])
d.Set("service", parts[1])
return []*schema.ResourceData{d}, nil
}
func resourceGoogleProjectServiceCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
srv := d.Get("service").(string)
id, err := replaceVars(d, config, "{{project}}/{{service}}")
if err != nil {
return fmt.Errorf("unable to construct ID: %s", err)
}
// Check if the service has already been enabled
servicesRaw, err := BatchRequestReadServices(project, d, config)
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id()))
}
servicesList := servicesRaw.(map[string]struct{})
if _, ok := servicesList[srv]; ok {
log.Printf("[DEBUG] service %s was already found to be enabled in project %s", srv, project)
d.SetId(id)
d.Set("project", project)
d.Set("service", srv)
return nil
}
err = BatchRequestEnableService(srv, project, d, config)
if err != nil {
return err
}
d.SetId(id)
return resourceGoogleProjectServiceRead(d, meta)
}
func resourceGoogleProjectServiceRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
// Verify project for services still exists
p, err := config.clientResourceManager.Projects.Get(project).Do()
if err == nil && p.LifecycleState == "DELETE_REQUESTED" {
// Construct a 404 error for handleNotFoundError
err = &googleapi.Error{
Code: 404,
Message: "Project deletion was requested",
}
}
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id()))
}
servicesRaw, err := BatchRequestReadServices(project, d, config)
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id()))
}
servicesList := servicesRaw.(map[string]struct{})
srv := d.Get("service").(string)
if _, ok := servicesList[srv]; ok {
d.Set("project", project)
d.Set("service", srv)
return nil
}
// The service is was not found in enabled services - remove it from state
log.Printf("[DEBUG] service %s not in enabled services for project %s, removing from state", srv, project)
d.SetId("")
return nil
}
func resourceGoogleProjectServiceDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
if disable := d.Get("disable_on_destroy"); !(disable.(bool)) {
log.Printf("[WARN] Project service %q disable_on_destroy is false, skip disabling service", d.Id())
d.SetId("")
return nil
}
project, err := getProject(d, config)
if err != nil {
return err
}
service := d.Get("service").(string)
disableDependencies := d.Get("disable_dependent_services").(bool)
if err = disableServiceUsageProjectService(service, project, d, config, disableDependencies); err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id()))
}
d.SetId("")
return nil
}
func resourceGoogleProjectServiceUpdate(d *schema.ResourceData, meta interface{}) error {
// This update method is no-op because the only updatable fields
// are state/config-only, i.e. they aren't sent in requests to the API.
return nil
}
// Disables a project service.
func disableServiceUsageProjectService(service, project string, d *schema.ResourceData, config *Config, disableDependentServices bool) error {
err := retryTimeDuration(func() error {
name := fmt.Sprintf("projects/%s/services/%s", project, service)
sop, err := config.clientServiceUsage.Services.Disable(name, &serviceusage.DisableServiceRequest{
DisableDependentServices: disableDependentServices,
}).Do()
if err != nil {
return err
}
// Wait for the operation to complete
waitErr := serviceUsageOperationWait(config, sop, project, "api to disable")
if waitErr != nil {
return waitErr
}
return nil
}, d.Timeout(schema.TimeoutDelete))
if err != nil {
return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, err)
}
return nil
}
|
package main
import (
"github.com/thehowl/go-osuapi"
"github.com/bwmarrin/discordgo"
"fmt"
"strings"
"strconv"
)
func checkBeatmapLink(s *discordgo.Session, m *discordgo.MessageCreate) {
//check if message contains only one beatmap link
BeatmapSet := strings.Count(m.Content, "https://osu.ppy.sh/s/")
Beatmap := strings.Count(m.Content, "https://osu.ppy.sh/b/")
if BeatmapSet + Beatmap != 1 {
return
}
id := 0
//check if message contains beatmap set
if BeatmapSet == 1 {
//get beatmap id in the message
tmp_id, err := strconv.Atoi(strings.Split(string(m.Content[strings.Index(m.Content,"https://osu.ppy.sh/s/")+21:])," ")[0])
if err != nil {
fmt.Println("An error ocurred while getting beatmap id, ",err)
return
}
id = tmp_id
}
//check if message contains specific beatmap difficulty
if Beatmap == 1 {
//get beatmap id in the message
tmp_id, err := strconv.Atoi(strings.Split(strings.Split(string(m.Content[strings.Index(m.Content,"https://osu.ppy.sh/b/")+21:])," ")[0],"?")[0])
if err != nil {
fmt.Println("An error ocurred while getting beatmap id, ",err)
return
}
id = tmp_id
}
//return if there's no beatmap link
if id == 0 {
return
}
var opts osuapi.GetBeatmapsOpts
if BeatmapSet == 1 {
opts = osuapi.GetBeatmapsOpts{BeatmapSetID: id}
} else {
opts = osuapi.GetBeatmapsOpts{BeatmapID: id}
}
//get beatmap info
beatmaps, err := osu_client.GetBeatmaps(opts)
if err != nil {
fmt.Println("An error ocurred while fecthing beatmap, ",err)
return
}
//check for empty list in case of no beatmaps found
if len(beatmaps) == 0 {
return
}
//get the highest difficulty in mapset
beatmap := beatmaps[0]
for i := 1; i < len(beatmaps); i++ {
if beatmaps[i].DifficultyRating > beatmap.DifficultyRating {
beatmap = beatmaps[i]
}
}
//create the embed message to send
message := discordgo.MessageEmbed{
URL: "https://osu.ppy.sh/s/"+strconv.Itoa(beatmap.BeatmapSetID),
Title: beatmap.Artist+" - "+beatmap.Title+" ["+beatmap.DiffName+"]",
Description: "**Mode:** "+beatmap.Mode.String()+" | **Length:** "+parseTime(beatmap.TotalLength)+"\n**Star rating:** "+strconv.FormatFloat(beatmap.DifficultyRating,'f',2,64)+" | **BPM:** "+strconv.FormatFloat(beatmap.BPM,'f',2,64)+"\n**OD:** "+strconv.FormatFloat(beatmap.OverallDifficulty,'f',2,64)+" | **CS:** "+strconv.FormatFloat(beatmap.CircleSize,'f',2,64)+"\n**AR:** "+strconv.FormatFloat(beatmap.ApproachRate,'f',2,64)+" | **HP:** "+strconv.FormatFloat(beatmap.HPDrain,'f',2,64),
Thumbnail: &discordgo.MessageEmbedThumbnail{
URL: "https://b.ppy.sh/thumb/"+strconv.Itoa(beatmap.BeatmapSetID)+"l.jpg",
},
Color: 16763135, //this should be pink
Footer: &discordgo.MessageEmbedFooter{
Text: "Mapped by "+beatmap.Creator+" | Status: "+beatmap.Approved.String(),
},
}
//send the message. finally.
_, err = s.ChannelMessageSendEmbed(m.ChannelID, &message)
if err != nil {
fmt.Println("An error ocurred while sending embed message, ",err)
}
}
//get time in format MM:SS
func parseTime(s int) string {
if s < 61 {
return "00:"+strconv.Itoa(s)
}
m := int(s/60)
s = s - m * 60
return strconv.Itoa(m)+":"+strconv.Itoa(s)
}
Fix beatmap linker length
Should now return 00:09 instead of 00:9
package main
import (
"github.com/thehowl/go-osuapi"
"github.com/bwmarrin/discordgo"
"fmt"
"strings"
"strconv"
)
func checkBeatmapLink(s *discordgo.Session, m *discordgo.MessageCreate) {
//check if message contains only one beatmap link
BeatmapSet := strings.Count(m.Content, "https://osu.ppy.sh/s/")
Beatmap := strings.Count(m.Content, "https://osu.ppy.sh/b/")
if BeatmapSet + Beatmap != 1 {
return
}
id := 0
//check if message contains beatmap set
if BeatmapSet == 1 {
//get beatmap id in the message
tmp_id, err := strconv.Atoi(strings.Split(string(m.Content[strings.Index(m.Content,"https://osu.ppy.sh/s/")+21:])," ")[0])
if err != nil {
fmt.Println("An error ocurred while getting beatmap id, ",err)
return
}
id = tmp_id
}
//check if message contains specific beatmap difficulty
if Beatmap == 1 {
//get beatmap id in the message
tmp_id, err := strconv.Atoi(strings.Split(strings.Split(string(m.Content[strings.Index(m.Content,"https://osu.ppy.sh/b/")+21:])," ")[0],"?")[0])
if err != nil {
fmt.Println("An error ocurred while getting beatmap id, ",err)
return
}
id = tmp_id
}
//return if there's no beatmap link
if id == 0 {
return
}
var opts osuapi.GetBeatmapsOpts
if BeatmapSet == 1 {
opts = osuapi.GetBeatmapsOpts{BeatmapSetID: id}
} else {
opts = osuapi.GetBeatmapsOpts{BeatmapID: id}
}
//get beatmap info
beatmaps, err := osu_client.GetBeatmaps(opts)
if err != nil {
fmt.Println("An error ocurred while fecthing beatmap, ",err)
return
}
//check for empty list in case of no beatmaps found
if len(beatmaps) == 0 {
return
}
//get the highest difficulty in mapset
beatmap := beatmaps[0]
for i := 1; i < len(beatmaps); i++ {
if beatmaps[i].DifficultyRating > beatmap.DifficultyRating {
beatmap = beatmaps[i]
}
}
//create the embed message to send
message := discordgo.MessageEmbed{
URL: "https://osu.ppy.sh/s/"+strconv.Itoa(beatmap.BeatmapSetID),
Title: beatmap.Artist+" - "+beatmap.Title+" ["+beatmap.DiffName+"]",
Description: "**Mode:** "+beatmap.Mode.String()+" | **Length:** "+parseTime(beatmap.TotalLength)+"\n**Star rating:** "+strconv.FormatFloat(beatmap.DifficultyRating,'f',2,64)+" | **BPM:** "+strconv.FormatFloat(beatmap.BPM,'f',2,64)+"\n**OD:** "+strconv.FormatFloat(beatmap.OverallDifficulty,'f',2,64)+" | **CS:** "+strconv.FormatFloat(beatmap.CircleSize,'f',2,64)+"\n**AR:** "+strconv.FormatFloat(beatmap.ApproachRate,'f',2,64)+" | **HP:** "+strconv.FormatFloat(beatmap.HPDrain,'f',2,64),
Thumbnail: &discordgo.MessageEmbedThumbnail{
URL: "https://b.ppy.sh/thumb/"+strconv.Itoa(beatmap.BeatmapSetID)+"l.jpg",
},
Color: 16763135, //this should be pink
Footer: &discordgo.MessageEmbedFooter{
Text: "Mapped by "+beatmap.Creator+" | Status: "+beatmap.Approved.String(),
},
}
//send the message. finally.
_, err = s.ChannelMessageSendEmbed(m.ChannelID, &message)
if err != nil {
fmt.Println("An error ocurred while sending embed message, ",err)
}
}
//get time in format MM:SS
func parseTime(s int) string {
if s < 10 {
return "00:0"+strconv.Itoa(s)
}
if s < 61 {
return "00:"+strconv.Itoa(s)
}
m := int(s/60)
s = s - m * 60
if s < 10 {
return strconv.Itoa(m)+":0"+strconv.Itoa(s)
}
return strconv.Itoa(m)+":"+strconv.Itoa(s)
} |
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package windows
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
)
const (
emptyDirVolumePath = "C:\\test-volume"
hostMapPath = "C:\\tmp"
containerName = "test-container"
volumeName = "test-volume"
)
var (
image = imageutils.GetE2EImage(imageutils.Pause)
)
var _ = SIGDescribe("[Feature:Windows] Windows volume mounts ", func() {
f := framework.NewDefaultFramework("windows-volumes")
var (
emptyDirSource = v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
Medium: v1.StorageMediumDefault,
},
}
hostPathDirectoryOrCreate = v1.HostPathDirectoryOrCreate
hostMapSource = v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: hostMapPath,
Type: &hostPathDirectoryOrCreate,
},
}
)
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessNodeOSDistroIs("windows")
})
ginkgo.Context("check volume mount permissions", func() {
ginkgo.It("container should have readOnly permissions on emptyDir", func() {
ginkgo.By("creating a container with readOnly permissions on emptyDir volume")
doReadOnlyTest(f, emptyDirSource, emptyDirVolumePath)
ginkgo.By("creating two containers, one with readOnly permissions the other with read-write permissions on emptyDir volume")
doReadWriteReadOnlyTest(f, emptyDirSource, emptyDirVolumePath)
})
ginkgo.It("container should have readOnly permissions on hostMapPath", func() {
ginkgo.By("creating a container with readOnly permissions on hostMap volume")
doReadOnlyTest(f, hostMapSource, hostMapPath)
ginkgo.By("creating two containers, one with readOnly permissions the other with read-write permissions on hostMap volume")
doReadWriteReadOnlyTest(f, hostMapSource, hostMapPath)
})
})
})
func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath string) {
var (
filePath = volumePath + "\\test-file.txt"
podName = "pod-" + string(uuid.NewUUID())
pod = testPodWithROVolume(podName, source, volumePath)
)
pod.Spec.NodeSelector = map[string]string{
"kubernetes.io/os": "windows",
}
pod = f.PodClient().CreateSync(pod)
ginkgo.By("verifying that pod has the correct nodeSelector")
framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows")
cmd := []string{"cmd", "/c", "echo windows-volume-test", ">", filePath}
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
framework.ExpectEqual(stderr, "Access is denied.")
}
func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath string) {
var (
filePath = volumePath + "\\test-file"
podName = "pod-" + string(uuid.NewUUID())
pod = testPodWithROVolume(podName, source, volumePath)
rwcontainerName = containerName + "-rw"
)
pod.Spec.NodeSelector = map[string]string{
"kubernetes.io/os": "windows",
}
rwcontainer := v1.Container{
Name: containerName + "-rw",
Image: image,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
},
}
pod.Spec.Containers = append(pod.Spec.Containers, rwcontainer)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("verifying that pod has the correct nodeSelector")
framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows")
cmd := []string{"cmd", "/c", "echo windows-volume-test", ">", filePath}
stdoutRW, stderrRW, errRW := f.ExecCommandInContainerWithFullOutput(podName, rwcontainerName, cmd...)
msg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", cmd, stdoutRW, stderrRW)
framework.ExpectNoError(errRW, msg)
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
framework.ExpectEqual(stderr, "Access is denied.")
readcmd := []string{"cmd", "/c", "type", filePath}
readout, readerr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, readcmd...)
readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr)
framework.ExpectEqual(readout, "windows-volume-test")
framework.ExpectNoError(err, readmsg)
}
// testPodWithROVolume makes a minimal pod defining a volume input source. Similarly to
// other tests for sig-windows this should append a nodeSelector for windows.
func testPodWithROVolume(podName string, source v1.VolumeSource, path string) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: image,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: source,
},
},
},
}
}
write to unique file to avoid conflicts
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package windows
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
)
const (
emptyDirVolumePath = "C:\\test-volume"
hostMapPath = "C:\\tmp"
containerName = "test-container"
volumeName = "test-volume"
)
var (
image = imageutils.GetE2EImage(imageutils.Pause)
)
var _ = SIGDescribe("[Feature:Windows] Windows volume mounts ", func() {
f := framework.NewDefaultFramework("windows-volumes")
var (
emptyDirSource = v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
Medium: v1.StorageMediumDefault,
},
}
hostPathDirectoryOrCreate = v1.HostPathDirectoryOrCreate
hostMapSource = v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: hostMapPath,
Type: &hostPathDirectoryOrCreate,
},
}
)
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessNodeOSDistroIs("windows")
})
ginkgo.Context("check volume mount permissions", func() {
ginkgo.It("container should have readOnly permissions on emptyDir", func() {
ginkgo.By("creating a container with readOnly permissions on emptyDir volume")
doReadOnlyTest(f, emptyDirSource, emptyDirVolumePath)
ginkgo.By("creating two containers, one with readOnly permissions the other with read-write permissions on emptyDir volume")
doReadWriteReadOnlyTest(f, emptyDirSource, emptyDirVolumePath)
})
ginkgo.It("container should have readOnly permissions on hostMapPath", func() {
ginkgo.By("creating a container with readOnly permissions on hostMap volume")
doReadOnlyTest(f, hostMapSource, hostMapPath)
ginkgo.By("creating two containers, one with readOnly permissions the other with read-write permissions on hostMap volume")
doReadWriteReadOnlyTest(f, hostMapSource, hostMapPath)
})
})
})
func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath string) {
var (
filePath = volumePath + "\\test-file.txt"
podName = "pod-" + string(uuid.NewUUID())
pod = testPodWithROVolume(podName, source, volumePath)
)
pod.Spec.NodeSelector = map[string]string{
"kubernetes.io/os": "windows",
}
pod = f.PodClient().CreateSync(pod)
ginkgo.By("verifying that pod has the correct nodeSelector")
framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows")
cmd := []string{"cmd", "/c", "echo windows-volume-test", ">", filePath}
ginkgo.By("verifying that pod will get an error when writing to a volume that is readonly")
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
framework.ExpectEqual(stderr, "Access is denied.")
}
func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath string) {
var (
filePath = volumePath + "\\test-file" + string(uuid.NewUUID())
podName = "pod-" + string(uuid.NewUUID())
pod = testPodWithROVolume(podName, source, volumePath)
rwcontainerName = containerName + "-rw"
)
pod.Spec.NodeSelector = map[string]string{
"kubernetes.io/os": "windows",
}
rwcontainer := v1.Container{
Name: containerName + "-rw",
Image: image,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
},
}
pod.Spec.Containers = append(pod.Spec.Containers, rwcontainer)
pod = f.PodClient().CreateSync(pod)
ginkgo.By("verifying that pod has the correct nodeSelector")
framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows")
ginkgo.By("verifying that pod can write to a volume with read/write access")
writecmd := []string{"cmd", "/c", "echo windows-volume-test", ">", filePath}
stdoutRW, stderrRW, errRW := f.ExecCommandInContainerWithFullOutput(podName, rwcontainerName, writecmd...)
msg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", writecmd, stdoutRW, stderrRW)
framework.ExpectNoError(errRW, msg)
ginkgo.By("verifying that pod will get an error when writing to a volume that is readonly")
_, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, writecmd...)
framework.ExpectEqual(stderr, "Access is denied.")
ginkgo.By("verifying that pod can read from the the volume that is readonly")
readcmd := []string{"cmd", "/c", "type", filePath}
readout, readerr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, readcmd...)
readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr)
framework.ExpectEqual(readout, "windows-volume-test")
framework.ExpectNoError(err, readmsg)
}
// testPodWithROVolume makes a minimal pod defining a volume input source. Similarly to
// other tests for sig-windows this should append a nodeSelector for windows.
func testPodWithROVolume(podName string, source v1.VolumeSource, path string) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: image,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: source,
},
},
},
}
}
|
// +build e2e
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"encoding/base64"
"fmt"
"strings"
"testing"
"time"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/artifacts"
tb "github.com/tektoncd/pipeline/test/builder"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"knative.dev/pkg/apis"
knativetest "knative.dev/pkg/test"
)
var (
pipelineName = "pipeline"
pipelineRunName = "pipelinerun"
secretName = "secret"
saName = "service-account"
taskName = "task"
task1Name = "task1"
cond1Name = "cond-1"
pipelineRunTimeout = 10 * time.Minute
)
func TestPipelineRun(t *testing.T) {
t.Parallel()
type tests struct {
name string
testSetup func(t *testing.T, c *clients, namespace string, index int)
expectedTaskRuns []string
expectedNumberOfEvents int
pipelineRunFunc func(int, string) *v1alpha1.PipelineRun
}
tds := []tests{{
name: "fan-in and fan-out",
testSetup: func(t *testing.T, c *clients, namespace string, index int) {
t.Helper()
for _, task := range getFanInFanOutTasks(namespace) {
if _, err := c.TaskClient.Create(task); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", task.Name, err)
}
}
for _, res := range getFanInFanOutGitResources(namespace) {
if _, err := c.PipelineResourceClient.Create(res); err != nil {
t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoResourceName, err)
}
}
if _, err := c.PipelineClient.Create(getFanInFanOutPipeline(index, namespace)); err != nil {
t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err)
}
},
pipelineRunFunc: getFanInFanOutPipelineRun,
expectedTaskRuns: []string{"create-file-kritis", "create-fan-out-1", "create-fan-out-2", "check-fan-in"},
// 1 from PipelineRun and 4 from Tasks defined in pipelinerun
expectedNumberOfEvents: 5,
}, {
name: "service account propagation and pipeline param",
testSetup: func(t *testing.T, c *clients, namespace string, index int) {
t.Helper()
if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getPipelineRunSecret(index, namespace)); err != nil {
t.Fatalf("Failed to create secret `%s`: %s", getName(secretName, index), err)
}
if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(getPipelineRunServiceAccount(index, namespace)); err != nil {
t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err)
}
task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec(
tb.TaskInputs(tb.InputsParamSpec("path", v1alpha1.ParamTypeString),
tb.InputsParamSpec("dest", v1alpha1.ParamTypeString)),
// Reference build: https://github.com/knative/build/tree/master/test/docker-basic
tb.Step("config-docker", "quay.io/rhpipeline/skopeo:alpine",
tb.StepCommand("skopeo"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
tb.StepArgs("copy", "${inputs.params.path}", "$(inputs.params.dest)"),
),
))
if _, err := c.TaskClient.Create(task); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err)
}
if _, err := c.PipelineClient.Create(getHelloWorldPipelineWithSingularTask(index, namespace)); err != nil {
t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err)
}
},
expectedTaskRuns: []string{task1Name},
// 1 from PipelineRun and 1 from Tasks defined in pipelinerun
expectedNumberOfEvents: 2,
pipelineRunFunc: getHelloWorldPipelineRun,
}, {
name: "pipeline succeeds when task skipped due to failed condition",
testSetup: func(t *testing.T, c *clients, namespace string, index int) {
t.Helper()
cond := getFailingCondition(namespace)
if _, err := c.ConditionClient.Create(cond); err != nil {
t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err)
}
task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec(
tb.Step("echo-hello", "ubuntu",
tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "echo hello, world"),
),
))
if _, err := c.TaskClient.Create(task); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err)
}
if _, err := c.PipelineClient.Create(getPipelineWithFailingCondition(index, namespace)); err != nil {
t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err)
}
},
expectedTaskRuns: []string{},
// 1 from PipelineRun; 0 from taskrun since it should not be executed due to condition failing
expectedNumberOfEvents: 1,
pipelineRunFunc: getConditionalPipelineRun,
}}
for i, td := range tds {
t.Run(td.name, func(t *testing.T) {
td := td
t.Parallel()
c, namespace := setup(t)
knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf)
defer tearDown(t, c, namespace)
t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace)
td.testSetup(t, c, namespace, i)
prName := fmt.Sprintf("%s%d", pipelineRunName, i)
pipelineRun, err := c.PipelineRunClient.Create(td.pipelineRunFunc(i, namespace))
if err != nil {
t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err)
}
t.Logf("Waiting for PipelineRun %s in namespace %s to complete", prName, namespace)
if err := WaitForPipelineRunState(c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil {
t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err)
}
t.Logf("Making sure the expected TaskRuns %s were created", td.expectedTaskRuns)
actualTaskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prName)})
if err != nil {
t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err)
}
expectedTaskRunNames := []string{}
for _, runName := range td.expectedTaskRuns {
taskRunName := strings.Join([]string{prName, runName}, "-")
// check the actual task name starting with prName+runName with a random suffix
for _, actualTaskRunItem := range actualTaskrunList.Items {
if strings.HasPrefix(actualTaskRunItem.Name, taskRunName) {
taskRunName = actualTaskRunItem.Name
}
}
expectedTaskRunNames = append(expectedTaskRunNames, taskRunName)
r, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
}
if !r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() {
t.Fatalf("Expected TaskRun %s to have succeeded but Status is %v", taskRunName, r.Status)
}
t.Logf("Checking that labels were propagated correctly for TaskRun %s", r.Name)
checkLabelPropagation(t, c, namespace, prName, r)
t.Logf("Checking that annotations were propagated correctly for TaskRun %s", r.Name)
checkAnnotationPropagation(t, c, namespace, prName, r)
}
matchKinds := map[string][]string{"PipelineRun": {prName}, "TaskRun": expectedTaskRunNames}
t.Logf("Making sure %d events were created from taskrun and pipelinerun with kinds %v", td.expectedNumberOfEvents, matchKinds)
events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Succeeded")
if err != nil {
t.Fatalf("Failed to collect matching events: %q", err)
}
if len(events) != td.expectedNumberOfEvents {
t.Fatalf("Expected %d number of successful events from pipelinerun and taskrun but got %d; list of receieved events : %#v", td.expectedNumberOfEvents, len(events), events)
}
// Wait for up to 10 minutes and restart every second to check if
// the PersistentVolumeClaims has the DeletionTimestamp
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
// Check to make sure the PipelineRun's artifact storage PVC has been "deleted" at the end of the run.
pvc, errWait := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(artifacts.GetPVCName(pipelineRun), metav1.GetOptions{})
if errWait != nil && !errors.IsNotFound(errWait) {
return true, fmt.Errorf("Error looking up PVC %s for PipelineRun %s: %s", artifacts.GetPVCName(pipelineRun), prName, errWait)
}
// If we are not found then we are okay since it got cleaned up
if errors.IsNotFound(errWait) {
return true, nil
}
return pvc.DeletionTimestamp != nil, nil
}); err != nil {
t.Fatalf("Error while waiting for the PVC to be set as deleted: %s: %s: %s", artifacts.GetPVCName(pipelineRun), err, prName)
}
t.Logf("Successfully finished test %q", td.name)
})
}
}
func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1alpha1.Pipeline {
return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec(
tb.PipelineParamSpec("path", v1alpha1.ParamTypeString),
tb.PipelineParamSpec("dest", v1alpha1.ParamTypeString),
tb.PipelineTask(task1Name, getName(taskName, suffix),
tb.PipelineTaskParam("path", "${params.path}"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
// In the next release we will remove support for $() entirely.
tb.PipelineTaskParam("dest", "$(params.dest)")),
))
}
func getFanInFanOutTasks(namespace string) []*v1alpha1.Task {
inWorkspaceResource := tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit)
outWorkspaceResource := tb.OutputsResource("workspace", v1alpha1.PipelineResourceTypeGit)
return []*v1alpha1.Task{
tb.Task("create-file", namespace, tb.TaskSpec(
tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit,
tb.ResourceTargetPath("brandnewspace"),
)),
tb.TaskOutputs(outWorkspaceResource),
tb.Step("write-data-task-0-step-0", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "echo stuff > $(inputs.resources.workspace.path)/stuff"),
),
tb.Step("write-data-task-0-step-1", "ubuntu", tb.StepCommand("/bin/bash"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
// In the next release we will remove support for $() entirely.
tb.StepArgs("-c", "echo other > ${inputs.resources.workspace.path}/other"),
),
)),
tb.Task("check-create-files-exists", namespace, tb.TaskSpec(
tb.TaskInputs(inWorkspaceResource),
tb.TaskOutputs(outWorkspaceResource),
tb.Step("read-from-task-0", "ubuntu", tb.StepCommand("/bin/bash"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
// In the next release we will remove support for $() entirely.
tb.StepArgs("-c", "[[ stuff == $(cat ${inputs.resources.workspace.path}/stuff) ]]"),
),
tb.Step("write-data-task-1", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "echo something > $(inputs.resources.workspace.path)/something"),
),
)),
tb.Task("check-create-files-exists-2", namespace, tb.TaskSpec(
tb.TaskInputs(inWorkspaceResource),
tb.TaskOutputs(outWorkspaceResource),
tb.Step("read-from-task-0", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "[[ other == $(cat $(inputs.resources.workspace.path)/other) ]]"),
),
tb.Step("write-data-task-1", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "echo else > $(inputs.resources.workspace.path)/else"),
),
)),
tb.Task("read-files", namespace, tb.TaskSpec(
tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit,
tb.ResourceTargetPath("readingspace"),
)),
tb.Step("read-from-task-0", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "[[ stuff == $(cat $(inputs.resources.workspace.path)/stuff) ]]"),
),
tb.Step("read-from-task-1", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "[[ something == $(cat $(inputs.resources.workspace.path)/something) ]]"),
),
tb.Step("read-from-task-2", "ubuntu", tb.StepCommand("/bin/bash"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
// In the next release we will remove support for $() entirely.
tb.StepArgs("-c", "[[ else == $(cat ${inputs.resources.workspace.path}/else) ]]"),
),
)),
}
}
func getFanInFanOutPipeline(suffix int, namespace string) *v1alpha1.Pipeline {
outGitResource := tb.PipelineTaskOutputResource("workspace", "git-repo")
return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec(
tb.PipelineDeclaredResource("git-repo", "git"),
tb.PipelineTask("create-file-kritis", "create-file",
tb.PipelineTaskInputResource("workspace", "git-repo"),
outGitResource,
),
tb.PipelineTask("create-fan-out-1", "check-create-files-exists",
tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")),
outGitResource,
),
tb.PipelineTask("create-fan-out-2", "check-create-files-exists-2",
tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")),
outGitResource,
),
tb.PipelineTask("check-fan-in", "read-files",
tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-fan-out-2", "create-fan-out-1")),
),
))
}
func getFanInFanOutGitResources(namespace string) []*v1alpha1.PipelineResource {
return []*v1alpha1.PipelineResource{
tb.PipelineResource("kritis-resource-git", namespace, tb.PipelineResourceSpec(
v1alpha1.PipelineResourceTypeGit,
tb.PipelineResourceSpecParam("Url", "https://github.com/grafeas/kritis"),
tb.PipelineResourceSpecParam("Revision", "master"),
)),
}
}
func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: getName(saName, suffix),
},
Secrets: []corev1.ObjectReference{{
Name: getName(secretName, suffix),
}},
}
}
func getFanInFanOutPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun {
return tb.PipelineRun(getName(pipelineRunName, suffix), namespace,
tb.PipelineRunSpec(getName(pipelineName, suffix),
tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef("kritis-resource-git")),
))
}
func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret {
// Generated by:
// cat /tmp/key.json | base64 -w 0
// This service account is JUST a storage reader on gcr.io/build-crd-testing
encoedDockercred := "ewogICJ0eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAiYnVpbGQtY3JkLXRlc3RpbmciLAogICJwcml2YXRlX2tleV9pZCI6ICIwNTAyYTQxYTgxMmZiNjRjZTU2YTY4ZWM1ODMyYWIwYmExMWMxMWU2IiwKICAicHJpdmF0ZV9rZXkiOiAiLS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tXG5NSUlFdlFJQkFEQU5CZ2txaGtpRzl3MEJBUUVGQUFTQ0JLY3dnZ1NqQWdFQUFvSUJBUUM5WDRFWU9BUmJ4UU04XG5EMnhYY2FaVGsrZ1k4ZWp1OTh0THFDUXFUckdNVzlSZVQyeE9ZNUF5Z2FsUFArcDd5WEVja3dCRC9IaE0wZ2xJXG43TVRMZGVlS1dyK3JBMUx3SFp5V0ZXN0gwT25mN3duWUhFSExXVW1jM0JDT1JFRHRIUlo3WnJQQmYxSFRBQS8zXG5Nblc1bFpIU045b2p6U1NGdzZBVnU2ajZheGJCSUlKNzU0THJnS2VBWXVyd2ZJUTJSTFR1MjAxazJJcUxZYmhiXG4zbVNWRzVSK3RiS3oxQ3ZNNTNuSENiN0NmdVZlV3NyQThrazd4SHJyTFFLTW1JOXYyc2dSdWd5TUF6d3ovNnpOXG5oNS9pTXh4Z2VxNVc4eGtWeDNKMm5ZOEpKZEhhZi9UNkFHc09ORW80M3B4ZWlRVmpuUmYvS24xMFRDYzJFc0lZXG5TNDlVc1o3QkFnTUJBQUVDZ2dFQUF1cGxkdWtDUVF1RDVVL2dhbUh0N0dnVzNBTVYxOGVxbkhuQ2EyamxhaCtTXG5BZVVHbmhnSmpOdkUrcE1GbFN2NXVmMnAySzRlZC9veEQ2K0NwOVpYRFJqZ3ZmdEl5cWpsemJ3dkZjZ3p3TnVEXG55Z1VrdXA3SGVjRHNEOFR0ZUFvYlQvVnB3cTZ6S01yQndDdk5rdnk2YlZsb0VqNXgzYlhzYXhlOTVETy95cHU2XG53MFc5N3p4d3dESlk2S1FjSVdNamhyR3h2d1g3bmlVQ2VNNGxlV0JEeUd0dzF6ZUpuNGhFYzZOM2FqUWFjWEtjXG4rNFFseGNpYW1ZcVFXYlBudHhXUWhoUXpjSFdMaTJsOWNGYlpENyt1SkxGNGlONnk4bVZOVTNLM0sxYlJZclNEXG5SVXAzYVVWQlhtRmcrWi8ycHVWTCttVTNqM0xMV1l5Qk9rZXZ1T21kZ1FLQmdRRGUzR0lRa3lXSVMxNFRkTU9TXG5CaUtCQ0R5OGg5NmVoTDBIa0RieU9rU3RQS2RGOXB1RXhaeGh5N29qSENJTTVGVnJwUk4yNXA0c0V6d0ZhYyt2XG5KSUZnRXZxN21YZm1YaVhJTmllUG9FUWFDbm54RHhXZ21yMEhVS0VtUzlvTWRnTGNHVStrQ1ZHTnN6N0FPdW0wXG5LcVkzczIyUTlsUTY3Rk95cWl1OFdGUTdRUUtCZ1FEWmlGaFRFWmtQRWNxWmpud0pwVEI1NlpXUDlLVHNsWlA3XG53VTRiemk2eSttZXlmM01KKzRMMlN5SGMzY3BTTWJqdE5PWkN0NDdiOTA4RlVtTFhVR05oY3d1WmpFUXhGZXkwXG5tNDFjUzVlNFA0OWI5bjZ5TEJqQnJCb3FzMldCYWwyZWdkaE5KU3NDV29pWlA4L1pUOGVnWHZoN2I5MWp6b0syXG5xMlBVbUE0RGdRS0JnQVdMMklqdkVJME95eDJTMTFjbi9lM1dKYVRQZ05QVEc5MDNVcGErcW56aE9JeCtNYXFoXG5QRjRXc3VBeTBBb2dHSndnTkpiTjhIdktVc0VUdkE1d3l5TjM5WE43dzBjaGFyRkwzN29zVStXT0F6RGpuamNzXG5BcTVPN0dQR21YdWI2RUJRQlBKaEpQMXd5NHYvSzFmSGcvRjQ3cTRmNDBMQUpPa2FZUkpENUh6QkFvR0JBTlVoXG5uSUJQSnFxNElNdlE2Y0M5ZzhCKzF4WURlYTkvWWsxdytTbVBHdndyRVh5M0dLeDRLN2xLcGJQejdtNFgzM3N4XG5zRVUvK1kyVlFtd1JhMXhRbS81M3JLN1YybDVKZi9ENDAwalJtNlpmU0FPdmdEVHJ0Wm5VR0pNcno5RTd1Tnc3XG5sZ1VIM0pyaXZ5Ri9meE1JOHFzelFid1hQMCt4bnlxQXhFQWdkdUtCQW9HQUlNK1BTTllXQ1pYeERwU0hJMThkXG5qS2tvQWJ3Mk1veXdRSWxrZXVBbjFkWEZhZDF6c1hRR2RUcm1YeXY3TlBQKzhHWEJrbkJMaTNjdnhUaWxKSVN5XG51Y05yQ01pcU5BU24vZHE3Y1dERlVBQmdqWDE2SkgyRE5GWi9sL1VWRjNOREFKalhDczFYN3lJSnlYQjZveC96XG5hU2xxbElNVjM1REJEN3F4Unl1S3Nnaz1cbi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS1cbiIsCiAgImNsaWVudF9lbWFpbCI6ICJwdWxsLXNlY3JldC10ZXN0aW5nQGJ1aWxkLWNyZC10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjEwNzkzNTg2MjAzMzAyNTI1MTM1MiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L3B1bGwtc2VjcmV0LXRlc3RpbmclNDBidWlsZC1jcmQtdGVzdGluZy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIKfQo="
decoded, err := base64.StdEncoding.DecodeString(encoedDockercred)
if err != nil {
return nil
}
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: getName(secretName, suffix),
Annotations: map[string]string{
"tekton.dev/docker-0": "https://us.gcr.io",
"tekton.dev/docker-1": "https://eu.gcr.io",
"tekton.dev/docker-2": "https://asia.gcr.io",
"tekton.dev/docker-3": "https://gcr.io",
},
},
Type: "kubernetes.io/basic-auth",
Data: map[string][]byte{
"username": []byte("_json_key"),
"password": decoded,
},
}
}
func getHelloWorldPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun {
return tb.PipelineRun(getName(pipelineRunName, suffix), namespace,
tb.PipelineRunLabel("hello-world-key", "hello-world-value"),
tb.PipelineRunSpec(getName(pipelineName, suffix),
tb.PipelineRunParam("path", "docker://gcr.io/build-crd-testing/secret-sauce"),
tb.PipelineRunParam("dest", "dir:///tmp/"),
tb.PipelineRunServiceAccount(fmt.Sprintf("%s%d", saName, suffix)),
),
)
}
func getName(namespace string, suffix int) string {
return fmt.Sprintf("%s%d", namespace, suffix)
}
// collectMatchingEvents collects list of events under 5 seconds that match
// 1. matchKinds which is a map of Kind of Object with name of objects
// 2. reason which is the expected reason of event
func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string, reason string) ([]*corev1.Event, error) {
var events []*corev1.Event
watchEvents, err := kubeClient.Kube.CoreV1().Events(namespace).Watch(metav1.ListOptions{})
// close watchEvents channel
defer watchEvents.Stop()
if err != nil {
return events, err
}
// create timer to not wait for events longer than 5 seconds
timer := time.NewTimer(5 * time.Second)
for {
select {
case wevent := <-watchEvents.ResultChan():
event := wevent.Object.(*corev1.Event)
if val, ok := kinds[event.InvolvedObject.Kind]; ok {
for _, expectedName := range val {
if event.InvolvedObject.Name == expectedName && event.Reason == reason {
events = append(events, event)
}
}
}
case <-timer.C:
return events, nil
}
}
}
// checkLabelPropagation checks that labels are correctly propagating from
// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods.
func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) {
// Our controllers add 4 labels automatically. If custom labels are set on
// the Pipeline, PipelineRun, or Task then the map will have to be resized.
labels := make(map[string]string, 4)
// Check label propagation to PipelineRuns.
pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err)
}
p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err)
}
for key, val := range p.ObjectMeta.Labels {
labels[key] = val
}
// This label is added to every PipelineRun by the PipelineRun controller
labels[pipeline.GroupName+pipeline.PipelineLabelKey] = p.Name
assertLabelsMatch(t, labels, pr.ObjectMeta.Labels)
// Check label propagation to TaskRuns.
for key, val := range pr.ObjectMeta.Labels {
labels[key] = val
}
// This label is added to every TaskRun by the PipelineRun controller
labels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = pr.Name
if tr.Spec.TaskRef != nil {
task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err)
}
for key, val := range task.ObjectMeta.Labels {
labels[key] = val
}
// This label is added to TaskRuns that reference a Task by the TaskRun controller
labels[pipeline.GroupName+pipeline.TaskLabelKey] = task.Name
}
assertLabelsMatch(t, labels, tr.ObjectMeta.Labels)
// PodName is "" iff a retry happened and pod is deleted
// This label is added to every Pod by the TaskRun controller
if tr.Status.PodName != "" {
// Check label propagation to Pods.
pod := getPodForTaskRun(t, c.KubeClient, namespace, tr)
// This label is added to every Pod by the TaskRun controller
labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = tr.Name
assertLabelsMatch(t, labels, pod.ObjectMeta.Labels)
}
}
// checkAnnotationPropagation checks that annotations are correctly propagating from
// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods.
func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) {
annotations := make(map[string]string)
// Check annotation propagation to PipelineRuns.
pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err)
}
p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err)
}
for key, val := range p.ObjectMeta.Annotations {
annotations[key] = val
}
assertAnnotationsMatch(t, annotations, pr.ObjectMeta.Annotations)
// Check annotation propagation to TaskRuns.
for key, val := range pr.ObjectMeta.Annotations {
annotations[key] = val
}
if tr.Spec.TaskRef != nil {
task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err)
}
for key, val := range task.ObjectMeta.Annotations {
annotations[key] = val
}
}
assertAnnotationsMatch(t, annotations, tr.ObjectMeta.Annotations)
// Check annotation propagation to Pods.
pod := getPodForTaskRun(t, c.KubeClient, namespace, tr)
assertAnnotationsMatch(t, annotations, pod.ObjectMeta.Annotations)
}
func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod {
// The Pod name has a random suffix, so we filter by label to find the one we care about.
pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{
LabelSelector: pipeline.GroupName + pipeline.TaskRunLabelKey + " = " + tr.Name,
})
if err != nil {
t.Fatalf("Couldn't get expected Pod for %s: %s", tr.Name, err)
}
if numPods := len(pods.Items); numPods != 1 {
t.Fatalf("Expected 1 Pod for %s, but got %d Pods", tr.Name, numPods)
}
return &pods.Items[0]
}
func assertLabelsMatch(t *testing.T, expectedLabels, actualLabels map[string]string) {
for key, expectedVal := range expectedLabels {
if actualVal := actualLabels[key]; actualVal != expectedVal {
t.Errorf("Expected labels containing %s=%s but labels were %v", key, expectedVal, actualLabels)
}
}
}
func assertAnnotationsMatch(t *testing.T, expectedAnnotations, actualAnnotations map[string]string) {
for key, expectedVal := range expectedAnnotations {
if actualVal := actualAnnotations[key]; actualVal != expectedVal {
t.Errorf("Expected annotations containing %s=%s but annotations were %v", key, expectedVal, actualAnnotations)
}
}
}
func getPipelineWithFailingCondition(suffix int, namespace string) *v1alpha1.Pipeline {
return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec(
tb.PipelineTask(task1Name, getName(taskName, suffix), tb.PipelineTaskCondition(cond1Name)),
tb.PipelineTask("task2", getName(taskName, suffix), tb.RunAfter(task1Name)),
))
}
func getFailingCondition(namespace string) *v1alpha1.Condition {
return tb.Condition(cond1Name, namespace, tb.ConditionSpec(tb.ConditionSpecCheck("", "ubuntu",
tb.Command("/bin/bash"), tb.Args("exit 1"))))
}
func getConditionalPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun {
return tb.PipelineRun(getName(pipelineRunName, suffix), namespace,
tb.PipelineRunLabel("hello-world-key", "hello-world-value"),
tb.PipelineRunSpec(getName(pipelineName, suffix)),
)
}
Update fan in / fan out test (no automatic copy) 📋
Now that we don't automatically copy the content of an input to an
output (when the same resource is used as both an input and an output),
this means that:
- Our fan-in / fan-out test will need to explicitly write to the output
path, instead of writing to the input path and assuming it would get
copied over (the very behaviour we're changing in #1188)
- Data previously written to an output that is used as an input, and
then an output later on, will be lost unless explicitly copied. In the
update to the examples this was handled by symlinking the input to the
output, I decided to instead update the test to no longer expect to
see a file that was written by the first task in the graph and to not
copy it explicitly.
Note that there is actually a race between the two tasks fanning out -
if the were writing the same file we would not be able to reliably
predict which would win.
Part of fixing #1188
// +build e2e
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"encoding/base64"
"fmt"
"strings"
"testing"
"time"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/artifacts"
tb "github.com/tektoncd/pipeline/test/builder"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"knative.dev/pkg/apis"
knativetest "knative.dev/pkg/test"
)
var (
pipelineName = "pipeline"
pipelineRunName = "pipelinerun"
secretName = "secret"
saName = "service-account"
taskName = "task"
task1Name = "task1"
cond1Name = "cond-1"
pipelineRunTimeout = 10 * time.Minute
)
func TestPipelineRun(t *testing.T) {
t.Parallel()
type tests struct {
name string
testSetup func(t *testing.T, c *clients, namespace string, index int)
expectedTaskRuns []string
expectedNumberOfEvents int
pipelineRunFunc func(int, string) *v1alpha1.PipelineRun
}
tds := []tests{{
name: "fan-in and fan-out",
testSetup: func(t *testing.T, c *clients, namespace string, index int) {
t.Helper()
for _, task := range getFanInFanOutTasks(namespace) {
if _, err := c.TaskClient.Create(task); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", task.Name, err)
}
}
for _, res := range getFanInFanOutGitResources(namespace) {
if _, err := c.PipelineResourceClient.Create(res); err != nil {
t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoResourceName, err)
}
}
if _, err := c.PipelineClient.Create(getFanInFanOutPipeline(index, namespace)); err != nil {
t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err)
}
},
pipelineRunFunc: getFanInFanOutPipelineRun,
expectedTaskRuns: []string{"create-file-kritis", "create-fan-out-1", "create-fan-out-2", "check-fan-in"},
// 1 from PipelineRun and 4 from Tasks defined in pipelinerun
expectedNumberOfEvents: 5,
}, {
name: "service account propagation and pipeline param",
testSetup: func(t *testing.T, c *clients, namespace string, index int) {
t.Helper()
if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getPipelineRunSecret(index, namespace)); err != nil {
t.Fatalf("Failed to create secret `%s`: %s", getName(secretName, index), err)
}
if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(getPipelineRunServiceAccount(index, namespace)); err != nil {
t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err)
}
task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec(
tb.TaskInputs(tb.InputsParamSpec("path", v1alpha1.ParamTypeString),
tb.InputsParamSpec("dest", v1alpha1.ParamTypeString)),
// Reference build: https://github.com/knative/build/tree/master/test/docker-basic
tb.Step("config-docker", "quay.io/rhpipeline/skopeo:alpine",
tb.StepCommand("skopeo"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
tb.StepArgs("copy", "${inputs.params.path}", "$(inputs.params.dest)"),
),
))
if _, err := c.TaskClient.Create(task); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err)
}
if _, err := c.PipelineClient.Create(getHelloWorldPipelineWithSingularTask(index, namespace)); err != nil {
t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err)
}
},
expectedTaskRuns: []string{task1Name},
// 1 from PipelineRun and 1 from Tasks defined in pipelinerun
expectedNumberOfEvents: 2,
pipelineRunFunc: getHelloWorldPipelineRun,
}, {
name: "pipeline succeeds when task skipped due to failed condition",
testSetup: func(t *testing.T, c *clients, namespace string, index int) {
t.Helper()
cond := getFailingCondition(namespace)
if _, err := c.ConditionClient.Create(cond); err != nil {
t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err)
}
task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec(
tb.Step("echo-hello", "ubuntu",
tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "echo hello, world"),
),
))
if _, err := c.TaskClient.Create(task); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err)
}
if _, err := c.PipelineClient.Create(getPipelineWithFailingCondition(index, namespace)); err != nil {
t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err)
}
},
expectedTaskRuns: []string{},
// 1 from PipelineRun; 0 from taskrun since it should not be executed due to condition failing
expectedNumberOfEvents: 1,
pipelineRunFunc: getConditionalPipelineRun,
}}
for i, td := range tds {
t.Run(td.name, func(t *testing.T) {
td := td
t.Parallel()
c, namespace := setup(t)
knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf)
defer tearDown(t, c, namespace)
t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace)
td.testSetup(t, c, namespace, i)
prName := fmt.Sprintf("%s%d", pipelineRunName, i)
pipelineRun, err := c.PipelineRunClient.Create(td.pipelineRunFunc(i, namespace))
if err != nil {
t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err)
}
t.Logf("Waiting for PipelineRun %s in namespace %s to complete", prName, namespace)
if err := WaitForPipelineRunState(c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil {
t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err)
}
t.Logf("Making sure the expected TaskRuns %s were created", td.expectedTaskRuns)
actualTaskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prName)})
if err != nil {
t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err)
}
expectedTaskRunNames := []string{}
for _, runName := range td.expectedTaskRuns {
taskRunName := strings.Join([]string{prName, runName}, "-")
// check the actual task name starting with prName+runName with a random suffix
for _, actualTaskRunItem := range actualTaskrunList.Items {
if strings.HasPrefix(actualTaskRunItem.Name, taskRunName) {
taskRunName = actualTaskRunItem.Name
}
}
expectedTaskRunNames = append(expectedTaskRunNames, taskRunName)
r, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
}
if !r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() {
t.Fatalf("Expected TaskRun %s to have succeeded but Status is %v", taskRunName, r.Status)
}
t.Logf("Checking that labels were propagated correctly for TaskRun %s", r.Name)
checkLabelPropagation(t, c, namespace, prName, r)
t.Logf("Checking that annotations were propagated correctly for TaskRun %s", r.Name)
checkAnnotationPropagation(t, c, namespace, prName, r)
}
matchKinds := map[string][]string{"PipelineRun": {prName}, "TaskRun": expectedTaskRunNames}
t.Logf("Making sure %d events were created from taskrun and pipelinerun with kinds %v", td.expectedNumberOfEvents, matchKinds)
events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Succeeded")
if err != nil {
t.Fatalf("Failed to collect matching events: %q", err)
}
if len(events) != td.expectedNumberOfEvents {
t.Fatalf("Expected %d number of successful events from pipelinerun and taskrun but got %d; list of receieved events : %#v", td.expectedNumberOfEvents, len(events), events)
}
// Wait for up to 10 minutes and restart every second to check if
// the PersistentVolumeClaims has the DeletionTimestamp
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
// Check to make sure the PipelineRun's artifact storage PVC has been "deleted" at the end of the run.
pvc, errWait := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(artifacts.GetPVCName(pipelineRun), metav1.GetOptions{})
if errWait != nil && !errors.IsNotFound(errWait) {
return true, fmt.Errorf("Error looking up PVC %s for PipelineRun %s: %s", artifacts.GetPVCName(pipelineRun), prName, errWait)
}
// If we are not found then we are okay since it got cleaned up
if errors.IsNotFound(errWait) {
return true, nil
}
return pvc.DeletionTimestamp != nil, nil
}); err != nil {
t.Fatalf("Error while waiting for the PVC to be set as deleted: %s: %s: %s", artifacts.GetPVCName(pipelineRun), err, prName)
}
t.Logf("Successfully finished test %q", td.name)
})
}
}
func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1alpha1.Pipeline {
return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec(
tb.PipelineParamSpec("path", v1alpha1.ParamTypeString),
tb.PipelineParamSpec("dest", v1alpha1.ParamTypeString),
tb.PipelineTask(task1Name, getName(taskName, suffix),
tb.PipelineTaskParam("path", "${params.path}"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
// In the next release we will remove support for $() entirely.
tb.PipelineTaskParam("dest", "$(params.dest)")),
))
}
func getFanInFanOutTasks(namespace string) []*v1alpha1.Task {
inWorkspaceResource := tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit)
outWorkspaceResource := tb.OutputsResource("workspace", v1alpha1.PipelineResourceTypeGit)
return []*v1alpha1.Task{
tb.Task("create-file", namespace, tb.TaskSpec(
tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit,
tb.ResourceTargetPath("brandnewspace"),
)),
tb.TaskOutputs(outWorkspaceResource),
tb.Step("write-data-task-0-step-0", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "echo stuff > $(outputs.resources.workspace.path)/stuff"),
),
tb.Step("write-data-task-0-step-1", "ubuntu", tb.StepCommand("/bin/bash"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
// In the next release we will remove support for $() entirely.
tb.StepArgs("-c", "echo other > ${outputs.resources.workspace.path}/other"),
),
)),
tb.Task("check-create-files-exists", namespace, tb.TaskSpec(
tb.TaskInputs(inWorkspaceResource),
tb.TaskOutputs(outWorkspaceResource),
tb.Step("read-from-task-0", "ubuntu", tb.StepCommand("/bin/bash"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
// In the next release we will remove support for $() entirely.
tb.StepArgs("-c", "[[ stuff == $(cat ${inputs.resources.workspace.path}/stuff) ]]"),
),
tb.Step("write-data-task-1", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "echo something > $(outputs.resources.workspace.path)/something"),
),
)),
tb.Task("check-create-files-exists-2", namespace, tb.TaskSpec(
tb.TaskInputs(inWorkspaceResource),
tb.TaskOutputs(outWorkspaceResource),
tb.Step("read-from-task-0", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "[[ other == $(cat $(inputs.resources.workspace.path)/other) ]]"),
),
tb.Step("write-data-task-1", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "echo else > $(outputs.resources.workspace.path)/else"),
),
)),
tb.Task("read-files", namespace, tb.TaskSpec(
tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit,
tb.ResourceTargetPath("readingspace"),
)),
tb.Step("read-from-task-0", "ubuntu", tb.StepCommand("/bin/bash"),
tb.StepArgs("-c", "[[ something == $(cat $(inputs.resources.workspace.path)/something) ]]"),
),
tb.Step("read-from-task-1", "ubuntu", tb.StepCommand("/bin/bash"),
// TODO(#1170): This test is using a mix of ${} and $() syntax to make sure both work.
// In the next release we will remove support for $() entirely.
tb.StepArgs("-c", "[[ else == $(cat ${inputs.resources.workspace.path}/else) ]]"),
),
)),
}
}
func getFanInFanOutPipeline(suffix int, namespace string) *v1alpha1.Pipeline {
outGitResource := tb.PipelineTaskOutputResource("workspace", "git-repo")
return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec(
tb.PipelineDeclaredResource("git-repo", "git"),
tb.PipelineTask("create-file-kritis", "create-file",
tb.PipelineTaskInputResource("workspace", "git-repo"),
outGitResource,
),
tb.PipelineTask("create-fan-out-1", "check-create-files-exists",
tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")),
outGitResource,
),
tb.PipelineTask("create-fan-out-2", "check-create-files-exists-2",
tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")),
outGitResource,
),
tb.PipelineTask("check-fan-in", "read-files",
tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-fan-out-2", "create-fan-out-1")),
),
))
}
func getFanInFanOutGitResources(namespace string) []*v1alpha1.PipelineResource {
return []*v1alpha1.PipelineResource{
tb.PipelineResource("kritis-resource-git", namespace, tb.PipelineResourceSpec(
v1alpha1.PipelineResourceTypeGit,
tb.PipelineResourceSpecParam("Url", "https://github.com/grafeas/kritis"),
tb.PipelineResourceSpecParam("Revision", "master"),
)),
}
}
func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: getName(saName, suffix),
},
Secrets: []corev1.ObjectReference{{
Name: getName(secretName, suffix),
}},
}
}
func getFanInFanOutPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun {
return tb.PipelineRun(getName(pipelineRunName, suffix), namespace,
tb.PipelineRunSpec(getName(pipelineName, suffix),
tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef("kritis-resource-git")),
))
}
func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret {
// Generated by:
// cat /tmp/key.json | base64 -w 0
// This service account is JUST a storage reader on gcr.io/build-crd-testing
encoedDockercred := "ewogICJ0eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAiYnVpbGQtY3JkLXRlc3RpbmciLAogICJwcml2YXRlX2tleV9pZCI6ICIwNTAyYTQxYTgxMmZiNjRjZTU2YTY4ZWM1ODMyYWIwYmExMWMxMWU2IiwKICAicHJpdmF0ZV9rZXkiOiAiLS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tXG5NSUlFdlFJQkFEQU5CZ2txaGtpRzl3MEJBUUVGQUFTQ0JLY3dnZ1NqQWdFQUFvSUJBUUM5WDRFWU9BUmJ4UU04XG5EMnhYY2FaVGsrZ1k4ZWp1OTh0THFDUXFUckdNVzlSZVQyeE9ZNUF5Z2FsUFArcDd5WEVja3dCRC9IaE0wZ2xJXG43TVRMZGVlS1dyK3JBMUx3SFp5V0ZXN0gwT25mN3duWUhFSExXVW1jM0JDT1JFRHRIUlo3WnJQQmYxSFRBQS8zXG5Nblc1bFpIU045b2p6U1NGdzZBVnU2ajZheGJCSUlKNzU0THJnS2VBWXVyd2ZJUTJSTFR1MjAxazJJcUxZYmhiXG4zbVNWRzVSK3RiS3oxQ3ZNNTNuSENiN0NmdVZlV3NyQThrazd4SHJyTFFLTW1JOXYyc2dSdWd5TUF6d3ovNnpOXG5oNS9pTXh4Z2VxNVc4eGtWeDNKMm5ZOEpKZEhhZi9UNkFHc09ORW80M3B4ZWlRVmpuUmYvS24xMFRDYzJFc0lZXG5TNDlVc1o3QkFnTUJBQUVDZ2dFQUF1cGxkdWtDUVF1RDVVL2dhbUh0N0dnVzNBTVYxOGVxbkhuQ2EyamxhaCtTXG5BZVVHbmhnSmpOdkUrcE1GbFN2NXVmMnAySzRlZC9veEQ2K0NwOVpYRFJqZ3ZmdEl5cWpsemJ3dkZjZ3p3TnVEXG55Z1VrdXA3SGVjRHNEOFR0ZUFvYlQvVnB3cTZ6S01yQndDdk5rdnk2YlZsb0VqNXgzYlhzYXhlOTVETy95cHU2XG53MFc5N3p4d3dESlk2S1FjSVdNamhyR3h2d1g3bmlVQ2VNNGxlV0JEeUd0dzF6ZUpuNGhFYzZOM2FqUWFjWEtjXG4rNFFseGNpYW1ZcVFXYlBudHhXUWhoUXpjSFdMaTJsOWNGYlpENyt1SkxGNGlONnk4bVZOVTNLM0sxYlJZclNEXG5SVXAzYVVWQlhtRmcrWi8ycHVWTCttVTNqM0xMV1l5Qk9rZXZ1T21kZ1FLQmdRRGUzR0lRa3lXSVMxNFRkTU9TXG5CaUtCQ0R5OGg5NmVoTDBIa0RieU9rU3RQS2RGOXB1RXhaeGh5N29qSENJTTVGVnJwUk4yNXA0c0V6d0ZhYyt2XG5KSUZnRXZxN21YZm1YaVhJTmllUG9FUWFDbm54RHhXZ21yMEhVS0VtUzlvTWRnTGNHVStrQ1ZHTnN6N0FPdW0wXG5LcVkzczIyUTlsUTY3Rk95cWl1OFdGUTdRUUtCZ1FEWmlGaFRFWmtQRWNxWmpud0pwVEI1NlpXUDlLVHNsWlA3XG53VTRiemk2eSttZXlmM01KKzRMMlN5SGMzY3BTTWJqdE5PWkN0NDdiOTA4RlVtTFhVR05oY3d1WmpFUXhGZXkwXG5tNDFjUzVlNFA0OWI5bjZ5TEJqQnJCb3FzMldCYWwyZWdkaE5KU3NDV29pWlA4L1pUOGVnWHZoN2I5MWp6b0syXG5xMlBVbUE0RGdRS0JnQVdMMklqdkVJME95eDJTMTFjbi9lM1dKYVRQZ05QVEc5MDNVcGErcW56aE9JeCtNYXFoXG5QRjRXc3VBeTBBb2dHSndnTkpiTjhIdktVc0VUdkE1d3l5TjM5WE43dzBjaGFyRkwzN29zVStXT0F6RGpuamNzXG5BcTVPN0dQR21YdWI2RUJRQlBKaEpQMXd5NHYvSzFmSGcvRjQ3cTRmNDBMQUpPa2FZUkpENUh6QkFvR0JBTlVoXG5uSUJQSnFxNElNdlE2Y0M5ZzhCKzF4WURlYTkvWWsxdytTbVBHdndyRVh5M0dLeDRLN2xLcGJQejdtNFgzM3N4XG5zRVUvK1kyVlFtd1JhMXhRbS81M3JLN1YybDVKZi9ENDAwalJtNlpmU0FPdmdEVHJ0Wm5VR0pNcno5RTd1Tnc3XG5sZ1VIM0pyaXZ5Ri9meE1JOHFzelFid1hQMCt4bnlxQXhFQWdkdUtCQW9HQUlNK1BTTllXQ1pYeERwU0hJMThkXG5qS2tvQWJ3Mk1veXdRSWxrZXVBbjFkWEZhZDF6c1hRR2RUcm1YeXY3TlBQKzhHWEJrbkJMaTNjdnhUaWxKSVN5XG51Y05yQ01pcU5BU24vZHE3Y1dERlVBQmdqWDE2SkgyRE5GWi9sL1VWRjNOREFKalhDczFYN3lJSnlYQjZveC96XG5hU2xxbElNVjM1REJEN3F4Unl1S3Nnaz1cbi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS1cbiIsCiAgImNsaWVudF9lbWFpbCI6ICJwdWxsLXNlY3JldC10ZXN0aW5nQGJ1aWxkLWNyZC10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjEwNzkzNTg2MjAzMzAyNTI1MTM1MiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L3B1bGwtc2VjcmV0LXRlc3RpbmclNDBidWlsZC1jcmQtdGVzdGluZy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIKfQo="
decoded, err := base64.StdEncoding.DecodeString(encoedDockercred)
if err != nil {
return nil
}
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: getName(secretName, suffix),
Annotations: map[string]string{
"tekton.dev/docker-0": "https://us.gcr.io",
"tekton.dev/docker-1": "https://eu.gcr.io",
"tekton.dev/docker-2": "https://asia.gcr.io",
"tekton.dev/docker-3": "https://gcr.io",
},
},
Type: "kubernetes.io/basic-auth",
Data: map[string][]byte{
"username": []byte("_json_key"),
"password": decoded,
},
}
}
func getHelloWorldPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun {
return tb.PipelineRun(getName(pipelineRunName, suffix), namespace,
tb.PipelineRunLabel("hello-world-key", "hello-world-value"),
tb.PipelineRunSpec(getName(pipelineName, suffix),
tb.PipelineRunParam("path", "docker://gcr.io/build-crd-testing/secret-sauce"),
tb.PipelineRunParam("dest", "dir:///tmp/"),
tb.PipelineRunServiceAccount(fmt.Sprintf("%s%d", saName, suffix)),
),
)
}
func getName(namespace string, suffix int) string {
return fmt.Sprintf("%s%d", namespace, suffix)
}
// collectMatchingEvents collects list of events under 5 seconds that match
// 1. matchKinds which is a map of Kind of Object with name of objects
// 2. reason which is the expected reason of event
func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string, reason string) ([]*corev1.Event, error) {
var events []*corev1.Event
watchEvents, err := kubeClient.Kube.CoreV1().Events(namespace).Watch(metav1.ListOptions{})
// close watchEvents channel
defer watchEvents.Stop()
if err != nil {
return events, err
}
// create timer to not wait for events longer than 5 seconds
timer := time.NewTimer(5 * time.Second)
for {
select {
case wevent := <-watchEvents.ResultChan():
event := wevent.Object.(*corev1.Event)
if val, ok := kinds[event.InvolvedObject.Kind]; ok {
for _, expectedName := range val {
if event.InvolvedObject.Name == expectedName && event.Reason == reason {
events = append(events, event)
}
}
}
case <-timer.C:
return events, nil
}
}
}
// checkLabelPropagation checks that labels are correctly propagating from
// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods.
func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) {
// Our controllers add 4 labels automatically. If custom labels are set on
// the Pipeline, PipelineRun, or Task then the map will have to be resized.
labels := make(map[string]string, 4)
// Check label propagation to PipelineRuns.
pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err)
}
p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err)
}
for key, val := range p.ObjectMeta.Labels {
labels[key] = val
}
// This label is added to every PipelineRun by the PipelineRun controller
labels[pipeline.GroupName+pipeline.PipelineLabelKey] = p.Name
assertLabelsMatch(t, labels, pr.ObjectMeta.Labels)
// Check label propagation to TaskRuns.
for key, val := range pr.ObjectMeta.Labels {
labels[key] = val
}
// This label is added to every TaskRun by the PipelineRun controller
labels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = pr.Name
if tr.Spec.TaskRef != nil {
task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err)
}
for key, val := range task.ObjectMeta.Labels {
labels[key] = val
}
// This label is added to TaskRuns that reference a Task by the TaskRun controller
labels[pipeline.GroupName+pipeline.TaskLabelKey] = task.Name
}
assertLabelsMatch(t, labels, tr.ObjectMeta.Labels)
// PodName is "" iff a retry happened and pod is deleted
// This label is added to every Pod by the TaskRun controller
if tr.Status.PodName != "" {
// Check label propagation to Pods.
pod := getPodForTaskRun(t, c.KubeClient, namespace, tr)
// This label is added to every Pod by the TaskRun controller
labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = tr.Name
assertLabelsMatch(t, labels, pod.ObjectMeta.Labels)
}
}
// checkAnnotationPropagation checks that annotations are correctly propagating from
// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods.
func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) {
annotations := make(map[string]string)
// Check annotation propagation to PipelineRuns.
pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err)
}
p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err)
}
for key, val := range p.ObjectMeta.Annotations {
annotations[key] = val
}
assertAnnotationsMatch(t, annotations, pr.ObjectMeta.Annotations)
// Check annotation propagation to TaskRuns.
for key, val := range pr.ObjectMeta.Annotations {
annotations[key] = val
}
if tr.Spec.TaskRef != nil {
task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err)
}
for key, val := range task.ObjectMeta.Annotations {
annotations[key] = val
}
}
assertAnnotationsMatch(t, annotations, tr.ObjectMeta.Annotations)
// Check annotation propagation to Pods.
pod := getPodForTaskRun(t, c.KubeClient, namespace, tr)
assertAnnotationsMatch(t, annotations, pod.ObjectMeta.Annotations)
}
func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod {
// The Pod name has a random suffix, so we filter by label to find the one we care about.
pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{
LabelSelector: pipeline.GroupName + pipeline.TaskRunLabelKey + " = " + tr.Name,
})
if err != nil {
t.Fatalf("Couldn't get expected Pod for %s: %s", tr.Name, err)
}
if numPods := len(pods.Items); numPods != 1 {
t.Fatalf("Expected 1 Pod for %s, but got %d Pods", tr.Name, numPods)
}
return &pods.Items[0]
}
func assertLabelsMatch(t *testing.T, expectedLabels, actualLabels map[string]string) {
for key, expectedVal := range expectedLabels {
if actualVal := actualLabels[key]; actualVal != expectedVal {
t.Errorf("Expected labels containing %s=%s but labels were %v", key, expectedVal, actualLabels)
}
}
}
func assertAnnotationsMatch(t *testing.T, expectedAnnotations, actualAnnotations map[string]string) {
for key, expectedVal := range expectedAnnotations {
if actualVal := actualAnnotations[key]; actualVal != expectedVal {
t.Errorf("Expected annotations containing %s=%s but annotations were %v", key, expectedVal, actualAnnotations)
}
}
}
func getPipelineWithFailingCondition(suffix int, namespace string) *v1alpha1.Pipeline {
return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec(
tb.PipelineTask(task1Name, getName(taskName, suffix), tb.PipelineTaskCondition(cond1Name)),
tb.PipelineTask("task2", getName(taskName, suffix), tb.RunAfter(task1Name)),
))
}
func getFailingCondition(namespace string) *v1alpha1.Condition {
return tb.Condition(cond1Name, namespace, tb.ConditionSpec(tb.ConditionSpecCheck("", "ubuntu",
tb.Command("/bin/bash"), tb.Args("exit 1"))))
}
func getConditionalPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun {
return tb.PipelineRun(getName(pipelineRunName, suffix), namespace,
tb.PipelineRunLabel("hello-world-key", "hello-world-value"),
tb.PipelineRunSpec(getName(pipelineName, suffix)),
)
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
type mOS struct {
machport uint32 // return address for mach ipc
waitsema uint32 // semaphore for parking on locks
}
var darwinVersion int
//go:noescape
func mach_msg_trap(h unsafe.Pointer, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32
func mach_reply_port() uint32
func mach_task_self() uint32
func mach_thread_self() uint32
//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func unimplemented(name string) {
println(name, "not implemented")
*(*int)(unsafe.Pointer(uintptr(1231))) = 1231
}
//go:nosplit
func semawakeup(mp *m) {
mach_semrelease(mp.waitsema)
}
//go:nosplit
func semacreate(mp *m) {
if mp.waitsema != 0 {
return
}
systemstack(func() {
mp.waitsema = mach_semcreate()
})
}
// BSD interface for threading.
func osinit() {
// pthread_create delayed until end of goenvs so that we
// can look at the environment first.
ncpu = getncpu()
physPageSize = getPageSize()
darwinVersion = getDarwinVersion()
}
const (
_CTL_KERN = 1
_CTL_HW = 6
_KERN_OSRELEASE = 2
_HW_NCPU = 3
_HW_PAGESIZE = 7
)
func getDarwinVersion() int {
// Use sysctl to fetch kern.osrelease
mib := [2]uint32{_CTL_KERN, _KERN_OSRELEASE}
var out [32]byte
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 {
ver := 0
for i := 0; i < int(nout) && out[i] >= '0' && out[i] <= '9'; i++ {
ver *= 10
ver += int(out[i] - '0')
}
return ver
}
return 17 // should not happen: default to a newish version
}
func getncpu() int32 {
// Use sysctl to fetch hw.ncpu.
mib := [2]uint32{_CTL_HW, _HW_NCPU}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 && int32(out) > 0 {
return int32(out)
}
return 1
}
func getPageSize() uintptr {
// Use sysctl to fetch hw.pagesize.
mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 && int32(out) > 0 {
return uintptr(out)
}
return 0
}
var urandom_dev = []byte("/dev/urandom\x00")
//go:nosplit
func getRandomData(r []byte) {
fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
closefd(fd)
extendRandom(r, int(n))
}
func goenvs() {
goenvs_unix()
}
// May run with m.p==nil, so write barriers are not allowed.
//go:nowritebarrierrec
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
}
// Initialize an attribute object.
var attr pthreadattr
var err int32
err = pthread_attr_init(&attr)
if err != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
// Set the stack size we want to use. 64KB for now.
// TODO: just use OS default size?
const stackSize = 1 << 16
if pthread_attr_setstacksize(&attr, stackSize) != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
//mSysStatInc(&memstats.stacks_sys, stackSize) //TODO: do this?
// Tell the pthread library we won't join with this thread.
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
// Finally, create the thread. It starts at mstart_stub, which does some low-level
// setup and then calls mstart.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
err = pthread_create(&attr, funcPC(mstart_stub), unsafe.Pointer(mp))
sigprocmask(_SIG_SETMASK, &oset, nil)
if err != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
}
// glue code to call mstart from pthread_create.
func mstart_stub()
// newosproc0 is a version of newosproc that can be called before the runtime
// is initialized.
//
// This function is not safe to use after initialization as it does not pass an M as fnarg.
//
//go:nosplit
func newosproc0(stacksize uintptr, fn uintptr) {
// Initialize an attribute object.
var attr pthreadattr
var err int32
err = pthread_attr_init(&attr)
if err != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
// Set the stack we want to use.
if pthread_attr_setstacksize(&attr, stacksize) != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
mSysStatInc(&memstats.stacks_sys, stacksize)
// Tell the pthread library we won't join with this thread.
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
// Finally, create the thread. It starts at mstart_stub, which does some low-level
// setup and then calls mstart.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
err = pthread_create(&attr, fn, nil)
sigprocmask(_SIG_SETMASK, &oset, nil)
if err != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
}
var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
// Called to do synchronous initialization of Go code built with
// -buildmode=c-archive or -buildmode=c-shared.
// None of the Go runtime is initialized.
//go:nosplit
//go:nowritebarrierrec
func libpreinit() {
initsig(true)
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
mp.gsignal = malg(32 * 1024) // OS X wants >= 8K
mp.gsignal.m = mp
}
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
// The alternate signal stack is buggy on arm and arm64.
// The signal handler handles it directly.
// The sigaltstack assembly function does nothing.
if GOARCH != "arm" && GOARCH != "arm64" {
minitSignalStack()
}
minitSignalMask()
}
// Called from dropm to undo the effect of an minit.
//go:nosplit
func unminit() {
// The alternate signal stack is buggy on arm and arm64.
// See minit.
if GOARCH != "arm" && GOARCH != "arm64" {
unminitSignals()
}
}
// Mach IPC, to get at semaphores
// Definitions are in /usr/include/mach on a Mac.
func macherror(r int32, fn string) {
print("mach error ", fn, ": ", r, "\n")
throw("mach error")
}
const _DebugMach = false
var zerondr machndr
func mach_msgh_bits(a, b uint32) uint32 {
return a | b<<8
}
func mach_msg(h *machheader, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32 {
// TODO: Loop on interrupt.
return mach_msg_trap(unsafe.Pointer(h), op, send_size, rcv_size, rcv_name, timeout, notify)
}
// Mach RPC (MIG)
const (
_MinMachMsg = 48
_MachReply = 100
)
type codemsg struct {
h machheader
ndr machndr
code int32
}
func machcall(h *machheader, maxsize int32, rxsize int32) int32 {
_g_ := getg()
port := _g_.m.machport
if port == 0 {
port = mach_reply_port()
_g_.m.machport = port
}
h.msgh_bits |= mach_msgh_bits(_MACH_MSG_TYPE_COPY_SEND, _MACH_MSG_TYPE_MAKE_SEND_ONCE)
h.msgh_local_port = port
h.msgh_reserved = 0
id := h.msgh_id
if _DebugMach {
p := (*[10000]unsafe.Pointer)(unsafe.Pointer(h))
print("send:\t")
var i uint32
for i = 0; i < h.msgh_size/uint32(unsafe.Sizeof(p[0])); i++ {
print(" ", p[i])
if i%8 == 7 {
print("\n\t")
}
}
if i%8 != 0 {
print("\n")
}
}
ret := mach_msg(h, _MACH_SEND_MSG|_MACH_RCV_MSG, h.msgh_size, uint32(maxsize), port, 0, 0)
if ret != 0 {
if _DebugMach {
print("mach_msg error ", ret, "\n")
}
return ret
}
if _DebugMach {
p := (*[10000]unsafe.Pointer)(unsafe.Pointer(h))
var i uint32
for i = 0; i < h.msgh_size/uint32(unsafe.Sizeof(p[0])); i++ {
print(" ", p[i])
if i%8 == 7 {
print("\n\t")
}
}
if i%8 != 0 {
print("\n")
}
}
if h.msgh_id != id+_MachReply {
if _DebugMach {
print("mach_msg _MachReply id mismatch ", h.msgh_id, " != ", id+_MachReply, "\n")
}
return -303 // MIG_REPLY_MISMATCH
}
// Look for a response giving the return value.
// Any call can send this back with an error,
// and some calls only have return values so they
// send it back on success too. I don't quite see how
// you know it's one of these and not the full response
// format, so just look if the message is right.
c := (*codemsg)(unsafe.Pointer(h))
if uintptr(h.msgh_size) == unsafe.Sizeof(*c) && h.msgh_bits&_MACH_MSGH_BITS_COMPLEX == 0 {
if _DebugMach {
print("mig result ", c.code, "\n")
}
return c.code
}
if h.msgh_size != uint32(rxsize) {
if _DebugMach {
print("mach_msg _MachReply size mismatch ", h.msgh_size, " != ", rxsize, "\n")
}
return -307 // MIG_ARRAY_TOO_LARGE
}
return 0
}
// Semaphores!
const (
tmach_semcreate = 3418
rmach_semcreate = tmach_semcreate + _MachReply
tmach_semdestroy = 3419
rmach_semdestroy = tmach_semdestroy + _MachReply
_KERN_ABORTED = 14
_KERN_OPERATION_TIMED_OUT = 49
)
type tmach_semcreatemsg struct {
h machheader
ndr machndr
policy int32
value int32
}
type rmach_semcreatemsg struct {
h machheader
body machbody
semaphore machport
}
type tmach_semdestroymsg struct {
h machheader
body machbody
semaphore machport
}
func mach_semcreate() uint32 {
var m [256]uint8
tx := (*tmach_semcreatemsg)(unsafe.Pointer(&m))
rx := (*rmach_semcreatemsg)(unsafe.Pointer(&m))
tx.h.msgh_bits = 0
tx.h.msgh_size = uint32(unsafe.Sizeof(*tx))
tx.h.msgh_remote_port = mach_task_self()
tx.h.msgh_id = tmach_semcreate
tx.ndr = zerondr
tx.policy = 0 // 0 = SYNC_POLICY_FIFO
tx.value = 0
for {
r := machcall(&tx.h, int32(unsafe.Sizeof(m)), int32(unsafe.Sizeof(*rx)))
if r == 0 {
break
}
if r == _KERN_ABORTED { // interrupted
continue
}
macherror(r, "semaphore_create")
}
if rx.body.msgh_descriptor_count != 1 {
unimplemented("mach_semcreate desc count")
}
return rx.semaphore.name
}
func mach_semdestroy(sem uint32) {
var m [256]uint8
tx := (*tmach_semdestroymsg)(unsafe.Pointer(&m))
tx.h.msgh_bits = _MACH_MSGH_BITS_COMPLEX
tx.h.msgh_size = uint32(unsafe.Sizeof(*tx))
tx.h.msgh_remote_port = mach_task_self()
tx.h.msgh_id = tmach_semdestroy
tx.body.msgh_descriptor_count = 1
tx.semaphore.name = sem
tx.semaphore.disposition = _MACH_MSG_TYPE_MOVE_SEND
tx.semaphore._type = 0
for {
r := machcall(&tx.h, int32(unsafe.Sizeof(m)), 0)
if r == 0 {
break
}
if r == _KERN_ABORTED { // interrupted
continue
}
macherror(r, "semaphore_destroy")
}
}
// The other calls have simple system call traps in sys_darwin_{amd64,386}.s
func mach_semaphore_wait(sema uint32) int32
func mach_semaphore_timedwait(sema, sec, nsec uint32) int32
func mach_semaphore_signal(sema uint32) int32
func mach_semaphore_signal_all(sema uint32) int32
func semasleep1(ns int64) int32 {
_g_ := getg()
if ns >= 0 {
var nsecs int32
secs := timediv(ns, 1000000000, &nsecs)
r := mach_semaphore_timedwait(_g_.m.waitsema, uint32(secs), uint32(nsecs))
if r == _KERN_ABORTED || r == _KERN_OPERATION_TIMED_OUT {
return -1
}
if r != 0 {
macherror(r, "semaphore_wait")
}
return 0
}
for {
r := mach_semaphore_wait(_g_.m.waitsema)
if r == 0 {
break
}
// Note: We don't know how this call (with no timeout) can get _KERN_OPERATION_TIMED_OUT,
// but it does reliably, though at a very low rate, on OS X 10.8, 10.9, 10.10, and 10.11.
// See golang.org/issue/17161.
if r == _KERN_ABORTED || r == _KERN_OPERATION_TIMED_OUT { // interrupted
continue
}
macherror(r, "semaphore_wait")
}
return 0
}
//go:nosplit
func semasleep(ns int64) int32 {
var r int32
systemstack(func() {
r = semasleep1(ns)
})
return r
}
//go:nosplit
func mach_semrelease(sem uint32) {
for {
r := mach_semaphore_signal(sem)
if r == 0 {
break
}
if r == _KERN_ABORTED { // interrupted
continue
}
// mach_semrelease must be completely nosplit,
// because it is called from Go code.
// If we're going to die, start that process on the system stack
// to avoid a Go stack split.
systemstack(func() { macherror(r, "semaphore_signal") })
}
}
//go:nosplit
func osyield() {
usleep(1)
}
const (
_NSIG = 32
_SI_USER = 0 /* empirically true, but not what headers say */
_SIG_BLOCK = 1
_SIG_UNBLOCK = 2
_SIG_SETMASK = 3
_SS_DISABLE = 4
)
//go:noescape
func sigprocmask(how int32, new, old *sigset)
//go:noescape
func sigaction(mode uint32, new *sigactiont, old *usigactiont)
//go:noescape
func sigaltstack(new, old *stackt)
// darwin/arm64 uses registers instead of stack-based arguments.
// TODO: does this matter?
func sigtramp(fn uintptr, infostyle, sig uint32, info *siginfo, ctx unsafe.Pointer)
//go:noescape
func setitimer(mode int32, new, old *itimerval)
func raiseproc(sig uint32)
//extern SigTabTT runtime·sigtab[];
type sigset uint32
var sigset_all = ^sigset(0)
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = ^uint32(0)
sa.sa_tramp = unsafe.Pointer(funcPC(sigtramp)) // runtime·sigtramp's job is to call into real handler
*(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = fn
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func setsigstack(i uint32) {
var osa usigactiont
sigaction(i, nil, &osa)
handler := *(*uintptr)(unsafe.Pointer(&osa.__sigaction_u))
if osa.sa_flags&_SA_ONSTACK != 0 {
return
}
var sa sigactiont
*(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = handler
sa.sa_tramp = unsafe.Pointer(funcPC(sigtramp))
sa.sa_mask = osa.sa_mask
sa.sa_flags = osa.sa_flags | _SA_ONSTACK
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func getsig(i uint32) uintptr {
var sa usigactiont
sigaction(i, nil, &sa)
return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u))
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
}
//go:nosplit
//go:nowritebarrierrec
func sigaddset(mask *sigset, i int) {
*mask |= 1 << (uint32(i) - 1)
}
func sigdelset(mask *sigset, i int) {
*mask &^= 1 << (uint32(i) - 1)
}
//go:linkname executablePath os.executablePath
var executablePath string
func sysargs(argc int32, argv **byte) {
// skip over argv, envv and the first string will be the path
n := argc + 1
for argv_index(argv, n) != nil {
n++
}
executablePath = gostringnocopy(argv_index(argv, n+1))
// strip "executable_path=" prefix if available, it's added after OS X 10.11.
const prefix = "executable_path="
if len(executablePath) > len(prefix) && executablePath[:len(prefix)] == prefix {
executablePath = executablePath[len(prefix):]
}
}
runtime: remove unused darwinVersion and getDarwinVersion
They are unused since CL 114799.
Also remove consts _CTL_KERN and _KERN_OSRELEASE previously used by
getDarwinVersion.
Change-Id: I51b701e8effbe4dd4301b0e6d52e8885469032f4
Reviewed-on: https://go-review.googlesource.com/116955
Run-TryBot: Tobias Klauser <0a68dd4915066ec5d3f81f75a828fee53dcc8822@gmail.com>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
type mOS struct {
machport uint32 // return address for mach ipc
waitsema uint32 // semaphore for parking on locks
}
//go:noescape
func mach_msg_trap(h unsafe.Pointer, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32
func mach_reply_port() uint32
func mach_task_self() uint32
func mach_thread_self() uint32
//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func unimplemented(name string) {
println(name, "not implemented")
*(*int)(unsafe.Pointer(uintptr(1231))) = 1231
}
//go:nosplit
func semawakeup(mp *m) {
mach_semrelease(mp.waitsema)
}
//go:nosplit
func semacreate(mp *m) {
if mp.waitsema != 0 {
return
}
systemstack(func() {
mp.waitsema = mach_semcreate()
})
}
// BSD interface for threading.
func osinit() {
// pthread_create delayed until end of goenvs so that we
// can look at the environment first.
ncpu = getncpu()
physPageSize = getPageSize()
}
const (
_CTL_HW = 6
_HW_NCPU = 3
_HW_PAGESIZE = 7
)
func getncpu() int32 {
// Use sysctl to fetch hw.ncpu.
mib := [2]uint32{_CTL_HW, _HW_NCPU}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 && int32(out) > 0 {
return int32(out)
}
return 1
}
func getPageSize() uintptr {
// Use sysctl to fetch hw.pagesize.
mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 && int32(out) > 0 {
return uintptr(out)
}
return 0
}
var urandom_dev = []byte("/dev/urandom\x00")
//go:nosplit
func getRandomData(r []byte) {
fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
closefd(fd)
extendRandom(r, int(n))
}
func goenvs() {
goenvs_unix()
}
// May run with m.p==nil, so write barriers are not allowed.
//go:nowritebarrierrec
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
}
// Initialize an attribute object.
var attr pthreadattr
var err int32
err = pthread_attr_init(&attr)
if err != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
// Set the stack size we want to use. 64KB for now.
// TODO: just use OS default size?
const stackSize = 1 << 16
if pthread_attr_setstacksize(&attr, stackSize) != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
//mSysStatInc(&memstats.stacks_sys, stackSize) //TODO: do this?
// Tell the pthread library we won't join with this thread.
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
// Finally, create the thread. It starts at mstart_stub, which does some low-level
// setup and then calls mstart.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
err = pthread_create(&attr, funcPC(mstart_stub), unsafe.Pointer(mp))
sigprocmask(_SIG_SETMASK, &oset, nil)
if err != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
}
// glue code to call mstart from pthread_create.
func mstart_stub()
// newosproc0 is a version of newosproc that can be called before the runtime
// is initialized.
//
// This function is not safe to use after initialization as it does not pass an M as fnarg.
//
//go:nosplit
func newosproc0(stacksize uintptr, fn uintptr) {
// Initialize an attribute object.
var attr pthreadattr
var err int32
err = pthread_attr_init(&attr)
if err != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
// Set the stack we want to use.
if pthread_attr_setstacksize(&attr, stacksize) != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
mSysStatInc(&memstats.stacks_sys, stacksize)
// Tell the pthread library we won't join with this thread.
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
// Finally, create the thread. It starts at mstart_stub, which does some low-level
// setup and then calls mstart.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
err = pthread_create(&attr, fn, nil)
sigprocmask(_SIG_SETMASK, &oset, nil)
if err != 0 {
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
exit(1)
}
}
var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
// Called to do synchronous initialization of Go code built with
// -buildmode=c-archive or -buildmode=c-shared.
// None of the Go runtime is initialized.
//go:nosplit
//go:nowritebarrierrec
func libpreinit() {
initsig(true)
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
mp.gsignal = malg(32 * 1024) // OS X wants >= 8K
mp.gsignal.m = mp
}
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
// The alternate signal stack is buggy on arm and arm64.
// The signal handler handles it directly.
// The sigaltstack assembly function does nothing.
if GOARCH != "arm" && GOARCH != "arm64" {
minitSignalStack()
}
minitSignalMask()
}
// Called from dropm to undo the effect of an minit.
//go:nosplit
func unminit() {
// The alternate signal stack is buggy on arm and arm64.
// See minit.
if GOARCH != "arm" && GOARCH != "arm64" {
unminitSignals()
}
}
// Mach IPC, to get at semaphores
// Definitions are in /usr/include/mach on a Mac.
func macherror(r int32, fn string) {
print("mach error ", fn, ": ", r, "\n")
throw("mach error")
}
const _DebugMach = false
var zerondr machndr
func mach_msgh_bits(a, b uint32) uint32 {
return a | b<<8
}
func mach_msg(h *machheader, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32 {
// TODO: Loop on interrupt.
return mach_msg_trap(unsafe.Pointer(h), op, send_size, rcv_size, rcv_name, timeout, notify)
}
// Mach RPC (MIG)
const (
_MinMachMsg = 48
_MachReply = 100
)
type codemsg struct {
h machheader
ndr machndr
code int32
}
func machcall(h *machheader, maxsize int32, rxsize int32) int32 {
_g_ := getg()
port := _g_.m.machport
if port == 0 {
port = mach_reply_port()
_g_.m.machport = port
}
h.msgh_bits |= mach_msgh_bits(_MACH_MSG_TYPE_COPY_SEND, _MACH_MSG_TYPE_MAKE_SEND_ONCE)
h.msgh_local_port = port
h.msgh_reserved = 0
id := h.msgh_id
if _DebugMach {
p := (*[10000]unsafe.Pointer)(unsafe.Pointer(h))
print("send:\t")
var i uint32
for i = 0; i < h.msgh_size/uint32(unsafe.Sizeof(p[0])); i++ {
print(" ", p[i])
if i%8 == 7 {
print("\n\t")
}
}
if i%8 != 0 {
print("\n")
}
}
ret := mach_msg(h, _MACH_SEND_MSG|_MACH_RCV_MSG, h.msgh_size, uint32(maxsize), port, 0, 0)
if ret != 0 {
if _DebugMach {
print("mach_msg error ", ret, "\n")
}
return ret
}
if _DebugMach {
p := (*[10000]unsafe.Pointer)(unsafe.Pointer(h))
var i uint32
for i = 0; i < h.msgh_size/uint32(unsafe.Sizeof(p[0])); i++ {
print(" ", p[i])
if i%8 == 7 {
print("\n\t")
}
}
if i%8 != 0 {
print("\n")
}
}
if h.msgh_id != id+_MachReply {
if _DebugMach {
print("mach_msg _MachReply id mismatch ", h.msgh_id, " != ", id+_MachReply, "\n")
}
return -303 // MIG_REPLY_MISMATCH
}
// Look for a response giving the return value.
// Any call can send this back with an error,
// and some calls only have return values so they
// send it back on success too. I don't quite see how
// you know it's one of these and not the full response
// format, so just look if the message is right.
c := (*codemsg)(unsafe.Pointer(h))
if uintptr(h.msgh_size) == unsafe.Sizeof(*c) && h.msgh_bits&_MACH_MSGH_BITS_COMPLEX == 0 {
if _DebugMach {
print("mig result ", c.code, "\n")
}
return c.code
}
if h.msgh_size != uint32(rxsize) {
if _DebugMach {
print("mach_msg _MachReply size mismatch ", h.msgh_size, " != ", rxsize, "\n")
}
return -307 // MIG_ARRAY_TOO_LARGE
}
return 0
}
// Semaphores!
const (
tmach_semcreate = 3418
rmach_semcreate = tmach_semcreate + _MachReply
tmach_semdestroy = 3419
rmach_semdestroy = tmach_semdestroy + _MachReply
_KERN_ABORTED = 14
_KERN_OPERATION_TIMED_OUT = 49
)
type tmach_semcreatemsg struct {
h machheader
ndr machndr
policy int32
value int32
}
type rmach_semcreatemsg struct {
h machheader
body machbody
semaphore machport
}
type tmach_semdestroymsg struct {
h machheader
body machbody
semaphore machport
}
func mach_semcreate() uint32 {
var m [256]uint8
tx := (*tmach_semcreatemsg)(unsafe.Pointer(&m))
rx := (*rmach_semcreatemsg)(unsafe.Pointer(&m))
tx.h.msgh_bits = 0
tx.h.msgh_size = uint32(unsafe.Sizeof(*tx))
tx.h.msgh_remote_port = mach_task_self()
tx.h.msgh_id = tmach_semcreate
tx.ndr = zerondr
tx.policy = 0 // 0 = SYNC_POLICY_FIFO
tx.value = 0
for {
r := machcall(&tx.h, int32(unsafe.Sizeof(m)), int32(unsafe.Sizeof(*rx)))
if r == 0 {
break
}
if r == _KERN_ABORTED { // interrupted
continue
}
macherror(r, "semaphore_create")
}
if rx.body.msgh_descriptor_count != 1 {
unimplemented("mach_semcreate desc count")
}
return rx.semaphore.name
}
func mach_semdestroy(sem uint32) {
var m [256]uint8
tx := (*tmach_semdestroymsg)(unsafe.Pointer(&m))
tx.h.msgh_bits = _MACH_MSGH_BITS_COMPLEX
tx.h.msgh_size = uint32(unsafe.Sizeof(*tx))
tx.h.msgh_remote_port = mach_task_self()
tx.h.msgh_id = tmach_semdestroy
tx.body.msgh_descriptor_count = 1
tx.semaphore.name = sem
tx.semaphore.disposition = _MACH_MSG_TYPE_MOVE_SEND
tx.semaphore._type = 0
for {
r := machcall(&tx.h, int32(unsafe.Sizeof(m)), 0)
if r == 0 {
break
}
if r == _KERN_ABORTED { // interrupted
continue
}
macherror(r, "semaphore_destroy")
}
}
// The other calls have simple system call traps in sys_darwin_{amd64,386}.s
func mach_semaphore_wait(sema uint32) int32
func mach_semaphore_timedwait(sema, sec, nsec uint32) int32
func mach_semaphore_signal(sema uint32) int32
func mach_semaphore_signal_all(sema uint32) int32
func semasleep1(ns int64) int32 {
_g_ := getg()
if ns >= 0 {
var nsecs int32
secs := timediv(ns, 1000000000, &nsecs)
r := mach_semaphore_timedwait(_g_.m.waitsema, uint32(secs), uint32(nsecs))
if r == _KERN_ABORTED || r == _KERN_OPERATION_TIMED_OUT {
return -1
}
if r != 0 {
macherror(r, "semaphore_wait")
}
return 0
}
for {
r := mach_semaphore_wait(_g_.m.waitsema)
if r == 0 {
break
}
// Note: We don't know how this call (with no timeout) can get _KERN_OPERATION_TIMED_OUT,
// but it does reliably, though at a very low rate, on OS X 10.8, 10.9, 10.10, and 10.11.
// See golang.org/issue/17161.
if r == _KERN_ABORTED || r == _KERN_OPERATION_TIMED_OUT { // interrupted
continue
}
macherror(r, "semaphore_wait")
}
return 0
}
//go:nosplit
func semasleep(ns int64) int32 {
var r int32
systemstack(func() {
r = semasleep1(ns)
})
return r
}
//go:nosplit
func mach_semrelease(sem uint32) {
for {
r := mach_semaphore_signal(sem)
if r == 0 {
break
}
if r == _KERN_ABORTED { // interrupted
continue
}
// mach_semrelease must be completely nosplit,
// because it is called from Go code.
// If we're going to die, start that process on the system stack
// to avoid a Go stack split.
systemstack(func() { macherror(r, "semaphore_signal") })
}
}
//go:nosplit
func osyield() {
usleep(1)
}
const (
_NSIG = 32
_SI_USER = 0 /* empirically true, but not what headers say */
_SIG_BLOCK = 1
_SIG_UNBLOCK = 2
_SIG_SETMASK = 3
_SS_DISABLE = 4
)
//go:noescape
func sigprocmask(how int32, new, old *sigset)
//go:noescape
func sigaction(mode uint32, new *sigactiont, old *usigactiont)
//go:noescape
func sigaltstack(new, old *stackt)
// darwin/arm64 uses registers instead of stack-based arguments.
// TODO: does this matter?
func sigtramp(fn uintptr, infostyle, sig uint32, info *siginfo, ctx unsafe.Pointer)
//go:noescape
func setitimer(mode int32, new, old *itimerval)
func raiseproc(sig uint32)
//extern SigTabTT runtime·sigtab[];
type sigset uint32
var sigset_all = ^sigset(0)
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = ^uint32(0)
sa.sa_tramp = unsafe.Pointer(funcPC(sigtramp)) // runtime·sigtramp's job is to call into real handler
*(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = fn
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func setsigstack(i uint32) {
var osa usigactiont
sigaction(i, nil, &osa)
handler := *(*uintptr)(unsafe.Pointer(&osa.__sigaction_u))
if osa.sa_flags&_SA_ONSTACK != 0 {
return
}
var sa sigactiont
*(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = handler
sa.sa_tramp = unsafe.Pointer(funcPC(sigtramp))
sa.sa_mask = osa.sa_mask
sa.sa_flags = osa.sa_flags | _SA_ONSTACK
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func getsig(i uint32) uintptr {
var sa usigactiont
sigaction(i, nil, &sa)
return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u))
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
}
//go:nosplit
//go:nowritebarrierrec
func sigaddset(mask *sigset, i int) {
*mask |= 1 << (uint32(i) - 1)
}
func sigdelset(mask *sigset, i int) {
*mask &^= 1 << (uint32(i) - 1)
}
//go:linkname executablePath os.executablePath
var executablePath string
func sysargs(argc int32, argv **byte) {
// skip over argv, envv and the first string will be the path
n := argc + 1
for argv_index(argv, n) != nil {
n++
}
executablePath = gostringnocopy(argv_index(argv, n+1))
// strip "executable_path=" prefix if available, it's added after OS X 10.11.
const prefix = "executable_path="
if len(executablePath) > len(prefix) && executablePath[:len(prefix)] == prefix {
executablePath = executablePath[len(prefix):]
}
}
|
package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"fmt"
"testing"
)
func TestKeyGen(t *testing.T) {
c := elliptic.P521()
priv, err := ecdsa.GenerateKey(c, rand.Reader)
if err != nil {
t.Errorf("Error", err)
}
fmt.Printf("X = %v\nY = %v\nD = %v\n", priv.PublicKey.X, priv.PublicKey.Y, priv.D)
//priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(priv.D.Bytes())
//fmt.Printf("X = %v\nY = %v\nD = %v\n", priv.PublicKey.X, priv.PublicKey.Y, priv.D)
test(priv, t)
SaveKey(priv)
SavePrivateKey(priv)
priv, err = LoadPrivateKey("d032bd47a0d2a40561ff959244f9bcb0d73d9b101e51b1f86398cd96cc0213d6")
fmt.Printf("\nhkid:%v\n", GenerateHKID(priv))
}
//func Commitgen_test(t *testing.T) {
//priv, _ := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
//SaveKey(priv)
//fmt.Print("X = %v\nY = %v\nD = %v\n",priv.PublicKey.X,priv.PublicKey.Y,priv.D)
//hkid := GenerateHKID(priv)
//reconstructedkey := LoadKey(hkid)
//reconstructedhkid := GenerateHKID(reconstructedkey)
//fmt.Printf("\n%v\n%v", hkid, reconstructedhkid)
//fmt.Printf("%v", GenerateCommit([]byte("testing"), priv))
//}
func Taggen_test(t *testing.T) {
priv, _ := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
//test(priv)
fmt.Printf("%v\n", GenerateTag([]byte("testing"), "blob", "test", priv))
}
func test(priv *ecdsa.PrivateKey, t *testing.T) {
mar := elliptic.Marshal(
priv.PublicKey.Curve,
priv.PublicKey.X,
priv.PublicKey.Y)
x, y := elliptic.Unmarshal(elliptic.P521(), mar)
maredPublicKey := new(ecdsa.PublicKey)
maredPublicKey.Curve = elliptic.P521()
maredPublicKey.X = x
maredPublicKey.Y = y
hashed := []byte("testing")
r, s, _ := ecdsa.Sign(rand.Reader, priv, hashed)
valid := ecdsa.Verify(maredPublicKey, hashed, r, s)
if valid != true {
t.Errorf("failed Verify\n")
}
invalid := ecdsa.Verify(maredPublicKey, []byte("fail testing"), r, s)
if invalid != false {
t.Errorf("failed falsify\n")
}
//fmt.Printf("valid:%v in:%v marsize:%v bits\n\n\n", valid, invalid, len(mar)*8)
}
removed the old file creation
package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"fmt"
"testing"
)
func TestKeyGen(t *testing.T) {
c := elliptic.P521()
priv, err := ecdsa.GenerateKey(c, rand.Reader)
if err != nil {
t.Errorf("Error", err)
}
fmt.Printf("X = %v\nY = %v\nD = %v\n", priv.PublicKey.X, priv.PublicKey.Y, priv.D)
//priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(priv.D.Bytes())
//fmt.Printf("X = %v\nY = %v\nD = %v\n", priv.PublicKey.X, priv.PublicKey.Y, priv.D)
test(priv, t)
//SaveKey(priv)
SavePrivateKey(priv)
priv, err = LoadPrivateKey("d032bd47a0d2a40561ff959244f9bcb0d73d9b101e51b1f86398cd96cc0213d6")
fmt.Printf("\nhkid:%v\n", GenerateHKID(priv))
}
//func Commitgen_test(t *testing.T) {
//priv, _ := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
//SaveKey(priv)
//fmt.Print("X = %v\nY = %v\nD = %v\n",priv.PublicKey.X,priv.PublicKey.Y,priv.D)
//hkid := GenerateHKID(priv)
//reconstructedkey := LoadKey(hkid)
//reconstructedhkid := GenerateHKID(reconstructedkey)
//fmt.Printf("\n%v\n%v", hkid, reconstructedhkid)
//fmt.Printf("%v", GenerateCommit([]byte("testing"), priv))
//}
func Taggen_test(t *testing.T) {
priv, _ := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
//test(priv)
fmt.Printf("%v\n", GenerateTag([]byte("testing"), "blob", "test", priv))
}
func test(priv *ecdsa.PrivateKey, t *testing.T) {
mar := elliptic.Marshal(
priv.PublicKey.Curve,
priv.PublicKey.X,
priv.PublicKey.Y)
x, y := elliptic.Unmarshal(elliptic.P521(), mar)
maredPublicKey := new(ecdsa.PublicKey)
maredPublicKey.Curve = elliptic.P521()
maredPublicKey.X = x
maredPublicKey.Y = y
hashed := []byte("testing")
r, s, _ := ecdsa.Sign(rand.Reader, priv, hashed)
valid := ecdsa.Verify(maredPublicKey, hashed, r, s)
if valid != true {
t.Errorf("failed Verify\n")
}
invalid := ecdsa.Verify(maredPublicKey, []byte("fail testing"), r, s)
if invalid != false {
t.Errorf("failed falsify\n")
}
//fmt.Printf("valid:%v in:%v marsize:%v bits\n\n\n", valid, invalid, len(mar)*8)
}
|
package main
import (
"flag"
"github.com/BigTong/caspercloud"
_ "github.com/BigTong/caspercloud/ci"
"log"
"net"
"net/http"
_ "net/http/pprof"
"runtime"
)
const (
kDefaultDownloadDirectory = "./images"
)
func main() {
runtime.GOMAXPROCS(6)
log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)
port := flag.String("port", "8000", "port number")
flag.Parse()
service := caspercloud.NewCasperServer()
http.Handle("/submit", service)
http.Handle("/", http.FileServer(http.Dir(kDefaultDownloadDirectory)))
l, e := net.Listen("tcp", ":"+*port)
if e != nil {
log.Fatal("listen error:", e)
}
http.Serve(l, nil)
}
add site
package main
import (
"flag"
"github.com/xlvector/caspercloud"
_ "github.com/xlvector/caspercloud/ci"
"log"
"net"
"net/http"
_ "net/http/pprof"
"runtime"
)
func main() {
runtime.GOMAXPROCS(6)
log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)
port := flag.String("port", "8000", "port number")
flag.Parse()
service := caspercloud.NewCasperServer()
http.Handle("/submit", service)
http.Handle("/images/", http.StripPrefix("/images/", http.FileServer(http.Dir("./images"))))
http.Handle("/site/", http.StripPrefix("/site/", http.FileServer(http.Dir("./site"))))
l, e := net.Listen("tcp", ":"+*port)
if e != nil {
log.Fatal("listen error:", e)
}
http.Serve(l, nil)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.